repo_name
string
path
string
copies
string
size
string
content
string
license
string
Blechd0se/kernel-moto-g
arch/powerpc/platforms/cell/beat_htab.c
4544
12025
/* * "Cell Reference Set" HTAB support. * * (C) Copyright 2006-2007 TOSHIBA CORPORATION * * This code is based on arch/powerpc/platforms/pseries/lpar.c: * Copyright (C) 2001 Todd Inglett, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #undef DEBUG_LOW #include <linux/kernel.h> #include <linux/spinlock.h> #include <asm/mmu.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/udbg.h> #include "beat_wrapper.h" #ifdef DEBUG_LOW #define DBG_LOW(fmt...) do { udbg_printf(fmt); } while (0) #else #define DBG_LOW(fmt...) do { } while (0) #endif static DEFINE_RAW_SPINLOCK(beat_htab_lock); static inline unsigned int beat_read_mask(unsigned hpte_group) { unsigned long rmask = 0; u64 hpte_v[5]; beat_read_htab_entries(0, hpte_group + 0, hpte_v); if (!(hpte_v[0] & HPTE_V_BOLTED)) rmask |= 0x8000; if (!(hpte_v[1] & HPTE_V_BOLTED)) rmask |= 0x4000; if (!(hpte_v[2] & HPTE_V_BOLTED)) rmask |= 0x2000; if (!(hpte_v[3] & HPTE_V_BOLTED)) rmask |= 0x1000; beat_read_htab_entries(0, hpte_group + 4, hpte_v); if (!(hpte_v[0] & HPTE_V_BOLTED)) rmask |= 0x0800; if (!(hpte_v[1] & HPTE_V_BOLTED)) rmask |= 0x0400; if (!(hpte_v[2] & HPTE_V_BOLTED)) rmask |= 0x0200; if (!(hpte_v[3] & HPTE_V_BOLTED)) rmask |= 0x0100; hpte_group = ~hpte_group & (htab_hash_mask * HPTES_PER_GROUP); beat_read_htab_entries(0, hpte_group + 0, hpte_v); if (!(hpte_v[0] & HPTE_V_BOLTED)) rmask |= 0x80; if (!(hpte_v[1] & HPTE_V_BOLTED)) rmask |= 0x40; if (!(hpte_v[2] & HPTE_V_BOLTED)) rmask |= 0x20; if (!(hpte_v[3] & HPTE_V_BOLTED)) rmask |= 0x10; beat_read_htab_entries(0, hpte_group + 4, hpte_v); if (!(hpte_v[0] & HPTE_V_BOLTED)) rmask |= 0x08; if (!(hpte_v[1] & HPTE_V_BOLTED)) rmask |= 0x04; if (!(hpte_v[2] & HPTE_V_BOLTED)) rmask |= 0x02; if (!(hpte_v[3] & HPTE_V_BOLTED)) rmask |= 0x01; return rmask; } static long beat_lpar_hpte_insert(unsigned long hpte_group, unsigned long va, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int ssize) { unsigned long lpar_rc; u64 hpte_v, hpte_r, slot; if (vflags & HPTE_V_SECONDARY) return -1; if (!(vflags & HPTE_V_BOLTED)) DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " "rflags=%lx, vflags=%lx, psize=%d)\n", hpte_group, va, pa, rflags, vflags, psize); hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize) | rflags; if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); if (rflags & _PAGE_NO_CACHE) hpte_r &= ~_PAGE_COHERENT; raw_spin_lock(&beat_htab_lock); lpar_rc = beat_read_mask(hpte_group); if (lpar_rc == 0) { if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" full\n"); raw_spin_unlock(&beat_htab_lock); return -1; } lpar_rc = beat_insert_htab_entry(0, hpte_group, lpar_rc << 48, hpte_v, hpte_r, &slot); raw_spin_unlock(&beat_htab_lock); /* * Since we try and ioremap PHBs we don't own, the pte insert * will fail. However we must catch the failure in hash_page * or we will loop forever, so return -2 in this case. */ if (unlikely(lpar_rc != 0)) { if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" lpar err %lx\n", lpar_rc); return -2; } if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" -> slot: %lx\n", slot); /* We have to pass down the secondary bucket bit here as well */ return (slot ^ hpte_group) & 15; } static long beat_lpar_hpte_remove(unsigned long hpte_group) { DBG_LOW("hpte_remove(group=%lx)\n", hpte_group); return -1; } static unsigned long beat_lpar_hpte_getword0(unsigned long slot) { unsigned long dword0; unsigned long lpar_rc; u64 dword[5]; lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword); dword0 = dword[slot&3]; BUG_ON(lpar_rc != 0); return dword0; } static void beat_lpar_hptab_clear(void) { unsigned long size_bytes = 1UL << ppc64_pft_size; unsigned long hpte_count = size_bytes >> 4; int i; u64 dummy0, dummy1; /* TODO: Use bulk call */ for (i = 0; i < hpte_count; i++) beat_write_htab_entry(0, i, 0, 0, -1UL, -1UL, &dummy0, &dummy1); } /* * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and * the low 3 bits of flags happen to line up. So no transform is needed. * We can probably optimize here and assume the high bits of newpp are * already zero. For now I am paranoid. */ static long beat_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long va, int psize, int ssize, int local) { unsigned long lpar_rc; u64 dummy0, dummy1; unsigned long want_v; want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); DBG_LOW(" update: " "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", want_v & HPTE_V_AVPN, slot, psize, newpp); raw_spin_lock(&beat_htab_lock); dummy0 = beat_lpar_hpte_getword0(slot); if ((dummy0 & ~0x7FUL) != (want_v & ~0x7FUL)) { DBG_LOW("not found !\n"); raw_spin_unlock(&beat_htab_lock); return -1; } lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0, &dummy1); raw_spin_unlock(&beat_htab_lock); if (lpar_rc != 0 || dummy0 == 0) { DBG_LOW("not found !\n"); return -1; } DBG_LOW("ok %lx %lx\n", dummy0, dummy1); BUG_ON(lpar_rc != 0); return 0; } static long beat_lpar_hpte_find(unsigned long va, int psize) { unsigned long hash; unsigned long i, j; long slot; unsigned long want_v, hpte_v; hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M); want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); for (j = 0; j < 2; j++) { slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; for (i = 0; i < HPTES_PER_GROUP; i++) { hpte_v = beat_lpar_hpte_getword0(slot); if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID) && (!!(hpte_v & HPTE_V_SECONDARY) == j)) { /* HPTE matches */ if (j) slot = -slot; return slot; } ++slot; } hash = ~hash; } return -1; } static void beat_lpar_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { unsigned long lpar_rc, slot, vsid, va; u64 dummy0, dummy1; vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); va = (vsid << 28) | (ea & 0x0fffffff); raw_spin_lock(&beat_htab_lock); slot = beat_lpar_hpte_find(va, psize); BUG_ON(slot == -1); lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0, &dummy1); raw_spin_unlock(&beat_htab_lock); BUG_ON(lpar_rc != 0); } static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, int psize, int ssize, int local) { unsigned long want_v; unsigned long lpar_rc; u64 dummy1, dummy2; unsigned long flags; DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", slot, va, psize, local); want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); raw_spin_lock_irqsave(&beat_htab_lock, flags); dummy1 = beat_lpar_hpte_getword0(slot); if ((dummy1 & ~0x7FUL) != (want_v & ~0x7FUL)) { DBG_LOW("not found !\n"); raw_spin_unlock_irqrestore(&beat_htab_lock, flags); return; } lpar_rc = beat_write_htab_entry(0, slot, 0, 0, HPTE_V_VALID, 0, &dummy1, &dummy2); raw_spin_unlock_irqrestore(&beat_htab_lock, flags); BUG_ON(lpar_rc != 0); } void __init hpte_init_beat(void) { ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate; ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp; ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp; ppc_md.hpte_insert = beat_lpar_hpte_insert; ppc_md.hpte_remove = beat_lpar_hpte_remove; ppc_md.hpte_clear_all = beat_lpar_hptab_clear; } static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, unsigned long va, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int ssize) { unsigned long lpar_rc; u64 hpte_v, hpte_r, slot; if (vflags & HPTE_V_SECONDARY) return -1; if (!(vflags & HPTE_V_BOLTED)) DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " "rflags=%lx, vflags=%lx, psize=%d)\n", hpte_group, va, pa, rflags, vflags, psize); hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize) | rflags; if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); if (rflags & _PAGE_NO_CACHE) hpte_r &= ~_PAGE_COHERENT; /* insert into not-volted entry */ lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r, HPTE_V_BOLTED, 0, &slot); /* * Since we try and ioremap PHBs we don't own, the pte insert * will fail. However we must catch the failure in hash_page * or we will loop forever, so return -2 in this case. */ if (unlikely(lpar_rc != 0)) { if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" lpar err %lx\n", lpar_rc); return -2; } if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" -> slot: %lx\n", slot); /* We have to pass down the secondary bucket bit here as well */ return (slot ^ hpte_group) & 15; } /* * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and * the low 3 bits of flags happen to line up. So no transform is needed. * We can probably optimize here and assume the high bits of newpp are * already zero. For now I am paranoid. */ static long beat_lpar_hpte_updatepp_v3(unsigned long slot, unsigned long newpp, unsigned long va, int psize, int ssize, int local) { unsigned long lpar_rc; unsigned long want_v; unsigned long pss; want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; DBG_LOW(" update: " "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", want_v & HPTE_V_AVPN, slot, psize, newpp); lpar_rc = beat_update_htab_permission3(0, slot, want_v, pss, 7, newpp); if (lpar_rc == 0xfffffff7) { DBG_LOW("not found !\n"); return -1; } DBG_LOW("ok\n"); BUG_ON(lpar_rc != 0); return 0; } static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va, int psize, int ssize, int local) { unsigned long want_v; unsigned long lpar_rc; unsigned long pss; DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", slot, va, psize, local); want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss); /* E_busy can be valid output: page may be already replaced */ BUG_ON(lpar_rc != 0 && lpar_rc != 0xfffffff7); } static int64_t _beat_lpar_hptab_clear_v3(void) { return beat_clear_htab3(0); } static void beat_lpar_hptab_clear_v3(void) { _beat_lpar_hptab_clear_v3(); } void __init hpte_init_beat_v3(void) { if (_beat_lpar_hptab_clear_v3() == 0) { ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate_v3; ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp_v3; ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp; ppc_md.hpte_insert = beat_lpar_hpte_insert_v3; ppc_md.hpte_remove = beat_lpar_hpte_remove; ppc_md.hpte_clear_all = beat_lpar_hptab_clear_v3; } else { ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate; ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp; ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp; ppc_md.hpte_insert = beat_lpar_hpte_insert; ppc_md.hpte_remove = beat_lpar_hpte_remove; ppc_md.hpte_clear_all = beat_lpar_hptab_clear; } }
gpl-2.0
Dr-Shadow/android_kernel_acer_c10
arch/powerpc/platforms/85xx/mpc85xx_ads.c
4544
5458
/* * MPC85xx setup and early boot code plus other random bits. * * Maintained by Kumar Gala (see MAINTAINERS for contact information) * * Copyright 2005 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #ifdef CONFIG_CPM2 #include <asm/cpm2.h> #include <sysdev/cpm2_pic.h> #endif #include "mpc85xx.h" #ifdef CONFIG_PCI static int mpc85xx_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn) { if (bus == 0 && PCI_SLOT(devfn) == 0) return PCIBIOS_DEVICE_NOT_FOUND; else return PCIBIOS_SUCCESSFUL; } #endif /* CONFIG_PCI */ static void __init mpc85xx_ads_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); mpc85xx_cpm2_pic_init(); } /* * Setup the architecture */ #ifdef CONFIG_CPM2 struct cpm_pin { int port, pin, flags; }; static const struct cpm_pin mpc8560_ads_pins[] = { /* SCC1 */ {3, 29, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* SCC2 */ {2, 12, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {2, 13, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {3, 26, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 27, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* FCC2 */ {1, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 20, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 22, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 25, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 26, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {1, 30, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 31, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {2, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK14 */ {2, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK13 */ /* FCC3 */ {1, 4, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 5, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 6, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 9, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 12, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 13, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 14, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 15, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {1, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {1, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {2, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK16 */ {2, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK15 */ {2, 27, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, }; static void __init init_ioports(void) { int i; for (i = 0; i < ARRAY_SIZE(mpc8560_ads_pins); i++) { const struct cpm_pin *pin = &mpc8560_ads_pins[i]; cpm2_set_pin(pin->port, pin->pin, pin->flags); } cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK15, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK16, CPM_CLK_TX); } #endif static void __init mpc85xx_ads_setup_arch(void) { #ifdef CONFIG_PCI struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("mpc85xx_ads_setup_arch()", 0); #ifdef CONFIG_CPM2 cpm2_reset(); init_ioports(); #endif #ifdef CONFIG_PCI for_each_compatible_node(np, "pci", "fsl,mpc8540-pci") fsl_add_bridge(np, 1); ppc_md.pci_exclude_device = mpc85xx_exclude_device; #endif } static void mpc85xx_ads_show_cpuinfo(struct seq_file *m) { uint pvid, svid, phid1; pvid = mfspr(SPRN_PVR); svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); seq_printf(m, "PVR\t\t: 0x%x\n", pvid); seq_printf(m, "SVR\t\t: 0x%x\n", svid); /* Display cpu Pll setting */ phid1 = mfspr(SPRN_HID1); seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } machine_device_initcall(mpc85xx_ads, mpc85xx_common_publish_devices); /* * Called very early, device-tree isn't unflattened */ static int __init mpc85xx_ads_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "MPC85xxADS"); } define_machine(mpc85xx_ads) { .name = "MPC85xx ADS", .probe = mpc85xx_ads_probe, .setup_arch = mpc85xx_ads_setup_arch, .init_IRQ = mpc85xx_ads_pic_init, .show_cpuinfo = mpc85xx_ads_show_cpuinfo, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
Ronfante/android_kernel_xiaomi_cancro
net/tipc/name_table.c
4800
25592
/* * net/tipc/name_table.c: TIPC name table code * * Copyright (c) 2000-2006, Ericsson AB * Copyright (c) 2004-2008, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "config.h" #include "name_table.h" #include "name_distr.h" #include "subscr.h" #include "port.h" static int tipc_nametbl_size = 1024; /* must be a power of 2 */ /** * struct name_info - name sequence publication info * @node_list: circular list of publications made by own node * @cluster_list: circular list of publications made by own cluster * @zone_list: circular list of publications made by own zone * @node_list_size: number of entries in "node_list" * @cluster_list_size: number of entries in "cluster_list" * @zone_list_size: number of entries in "zone_list" * * Note: The zone list always contains at least one entry, since all * publications of the associated name sequence belong to it. * (The cluster and node lists may be empty.) */ struct name_info { struct list_head node_list; struct list_head cluster_list; struct list_head zone_list; u32 node_list_size; u32 cluster_list_size; u32 zone_list_size; }; /** * struct sub_seq - container for all published instances of a name sequence * @lower: name sequence lower bound * @upper: name sequence upper bound * @info: pointer to name sequence publication info */ struct sub_seq { u32 lower; u32 upper; struct name_info *info; }; /** * struct name_seq - container for all published instances of a name type * @type: 32 bit 'type' value for name sequence * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type'; * sub-sequences are sorted in ascending order * @alloc: number of sub-sequences currently in array * @first_free: array index of first unused sub-sequence entry * @ns_list: links to adjacent name sequences in hash chain * @subscriptions: list of subscriptions for this 'type' * @lock: spinlock controlling access to publication lists of all sub-sequences */ struct name_seq { u32 type; struct sub_seq *sseqs; u32 alloc; u32 first_free; struct hlist_node ns_list; struct list_head subscriptions; spinlock_t lock; }; /** * struct name_table - table containing all existing port name publications * @types: pointer to fixed-sized array of name sequence lists, * accessed via hashing on 'type'; name sequence lists are *not* sorted * @local_publ_count: number of publications issued by this node */ struct name_table { struct hlist_head *types; u32 local_publ_count; }; static struct name_table table; DEFINE_RWLOCK(tipc_nametbl_lock); static int hash(int x) { return x & (tipc_nametbl_size - 1); } /** * publ_create - create a publication structure */ static struct publication *publ_create(u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port_ref, u32 key) { struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC); if (publ == NULL) { warn("Publication creation failure, no memory\n"); return NULL; } publ->type = type; publ->lower = lower; publ->upper = upper; publ->scope = scope; publ->node = node; publ->ref = port_ref; publ->key = key; INIT_LIST_HEAD(&publ->local_list); INIT_LIST_HEAD(&publ->pport_list); INIT_LIST_HEAD(&publ->subscr.nodesub_list); return publ; } /** * tipc_subseq_alloc - allocate a specified number of sub-sequence structures */ static struct sub_seq *tipc_subseq_alloc(u32 cnt) { struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC); return sseq; } /** * tipc_nameseq_create - create a name sequence structure for the specified 'type' * * Allocates a single sub-sequence structure and sets it to all 0's. */ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) { struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC); struct sub_seq *sseq = tipc_subseq_alloc(1); if (!nseq || !sseq) { warn("Name sequence creation failed, no memory\n"); kfree(nseq); kfree(sseq); return NULL; } spin_lock_init(&nseq->lock); nseq->type = type; nseq->sseqs = sseq; nseq->alloc = 1; INIT_HLIST_NODE(&nseq->ns_list); INIT_LIST_HEAD(&nseq->subscriptions); hlist_add_head(&nseq->ns_list, seq_head); return nseq; } /** * nameseq_find_subseq - find sub-sequence (if any) matching a name instance * * Very time-critical, so binary searches through sub-sequence array. */ static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq, u32 instance) { struct sub_seq *sseqs = nseq->sseqs; int low = 0; int high = nseq->first_free - 1; int mid; while (low <= high) { mid = (low + high) / 2; if (instance < sseqs[mid].lower) high = mid - 1; else if (instance > sseqs[mid].upper) low = mid + 1; else return &sseqs[mid]; } return NULL; } /** * nameseq_locate_subseq - determine position of name instance in sub-sequence * * Returns index in sub-sequence array of the entry that contains the specified * instance value; if no entry contains that value, returns the position * where a new entry for it would be inserted in the array. * * Note: Similar to binary search code for locating a sub-sequence. */ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance) { struct sub_seq *sseqs = nseq->sseqs; int low = 0; int high = nseq->first_free - 1; int mid; while (low <= high) { mid = (low + high) / 2; if (instance < sseqs[mid].lower) high = mid - 1; else if (instance > sseqs[mid].upper) low = mid + 1; else return mid; } return low; } /** * tipc_nameseq_insert_publ - */ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port, u32 key) { struct tipc_subscription *s; struct tipc_subscription *st; struct publication *publ; struct sub_seq *sseq; struct name_info *info; int created_subseq = 0; sseq = nameseq_find_subseq(nseq, lower); if (sseq) { /* Lower end overlaps existing entry => need an exact match */ if ((sseq->lower != lower) || (sseq->upper != upper)) { warn("Cannot publish {%u,%u,%u}, overlap error\n", type, lower, upper); return NULL; } info = sseq->info; /* Check if an identical publication already exists */ list_for_each_entry(publ, &info->zone_list, zone_list) { if ((publ->ref == port) && (publ->key == key) && (!publ->node || (publ->node == node))) return NULL; } } else { u32 inspos; struct sub_seq *freesseq; /* Find where lower end should be inserted */ inspos = nameseq_locate_subseq(nseq, lower); /* Fail if upper end overlaps into an existing entry */ if ((inspos < nseq->first_free) && (upper >= nseq->sseqs[inspos].lower)) { warn("Cannot publish {%u,%u,%u}, overlap error\n", type, lower, upper); return NULL; } /* Ensure there is space for new sub-sequence */ if (nseq->first_free == nseq->alloc) { struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2); if (!sseqs) { warn("Cannot publish {%u,%u,%u}, no memory\n", type, lower, upper); return NULL; } memcpy(sseqs, nseq->sseqs, nseq->alloc * sizeof(struct sub_seq)); kfree(nseq->sseqs); nseq->sseqs = sseqs; nseq->alloc *= 2; } info = kzalloc(sizeof(*info), GFP_ATOMIC); if (!info) { warn("Cannot publish {%u,%u,%u}, no memory\n", type, lower, upper); return NULL; } INIT_LIST_HEAD(&info->node_list); INIT_LIST_HEAD(&info->cluster_list); INIT_LIST_HEAD(&info->zone_list); /* Insert new sub-sequence */ sseq = &nseq->sseqs[inspos]; freesseq = &nseq->sseqs[nseq->first_free]; memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq)); memset(sseq, 0, sizeof(*sseq)); nseq->first_free++; sseq->lower = lower; sseq->upper = upper; sseq->info = info; created_subseq = 1; } /* Insert a publication: */ publ = publ_create(type, lower, upper, scope, node, port, key); if (!publ) return NULL; list_add(&publ->zone_list, &info->zone_list); info->zone_list_size++; if (in_own_cluster(node)) { list_add(&publ->cluster_list, &info->cluster_list); info->cluster_list_size++; } if (node == tipc_own_addr) { list_add(&publ->node_list, &info->node_list); info->node_list_size++; } /* * Any subscriptions waiting for notification? */ list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { tipc_subscr_report_overlap(s, publ->lower, publ->upper, TIPC_PUBLISHED, publ->ref, publ->node, created_subseq); } return publ; } /** * tipc_nameseq_remove_publ - * * NOTE: There may be cases where TIPC is asked to remove a publication * that is not in the name table. For example, if another node issues a * publication for a name sequence that overlaps an existing name sequence * the publication will not be recorded, which means the publication won't * be found when the name sequence is later withdrawn by that node. * A failed withdraw request simply returns a failure indication and lets the * caller issue any error or warning messages associated with such a problem. */ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst, u32 node, u32 ref, u32 key) { struct publication *publ; struct sub_seq *sseq = nameseq_find_subseq(nseq, inst); struct name_info *info; struct sub_seq *free; struct tipc_subscription *s, *st; int removed_subseq = 0; if (!sseq) return NULL; info = sseq->info; /* Locate publication, if it exists */ list_for_each_entry(publ, &info->zone_list, zone_list) { if ((publ->key == key) && (publ->ref == ref) && (!publ->node || (publ->node == node))) goto found; } return NULL; found: /* Remove publication from zone scope list */ list_del(&publ->zone_list); info->zone_list_size--; /* Remove publication from cluster scope list, if present */ if (in_own_cluster(node)) { list_del(&publ->cluster_list); info->cluster_list_size--; } /* Remove publication from node scope list, if present */ if (node == tipc_own_addr) { list_del(&publ->node_list); info->node_list_size--; } /* Contract subseq list if no more publications for that subseq */ if (list_empty(&info->zone_list)) { kfree(info); free = &nseq->sseqs[nseq->first_free--]; memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq)); removed_subseq = 1; } /* Notify any waiting subscriptions */ list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { tipc_subscr_report_overlap(s, publ->lower, publ->upper, TIPC_WITHDRAWN, publ->ref, publ->node, removed_subseq); } return publ; } /** * tipc_nameseq_subscribe: attach a subscription, and issue * the prescribed number of events if there is any sub- * sequence overlapping with the requested sequence */ static void tipc_nameseq_subscribe(struct name_seq *nseq, struct tipc_subscription *s) { struct sub_seq *sseq = nseq->sseqs; list_add(&s->nameseq_list, &nseq->subscriptions); if (!sseq) return; while (sseq != &nseq->sseqs[nseq->first_free]) { if (tipc_subscr_overlap(s, sseq->lower, sseq->upper)) { struct publication *crs; struct name_info *info = sseq->info; int must_report = 1; list_for_each_entry(crs, &info->zone_list, zone_list) { tipc_subscr_report_overlap(s, sseq->lower, sseq->upper, TIPC_PUBLISHED, crs->ref, crs->node, must_report); must_report = 0; } } sseq++; } } static struct name_seq *nametbl_find_seq(u32 type) { struct hlist_head *seq_head; struct hlist_node *seq_node; struct name_seq *ns; seq_head = &table.types[hash(type)]; hlist_for_each_entry(ns, seq_node, seq_head, ns_list) { if (ns->type == type) return ns; } return NULL; }; struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port, u32 key) { struct name_seq *seq = nametbl_find_seq(type); if (lower > upper) { warn("Failed to publish illegal {%u,%u,%u}\n", type, lower, upper); return NULL; } if (!seq) seq = tipc_nameseq_create(type, &table.types[hash(type)]); if (!seq) return NULL; return tipc_nameseq_insert_publ(seq, type, lower, upper, scope, node, port, key); } struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, u32 node, u32 ref, u32 key) { struct publication *publ; struct name_seq *seq = nametbl_find_seq(type); if (!seq) return NULL; publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); if (!seq->first_free && list_empty(&seq->subscriptions)) { hlist_del_init(&seq->ns_list); kfree(seq->sseqs); kfree(seq); } return publ; } /* * tipc_nametbl_translate - perform name translation * * On entry, 'destnode' is the search domain used during translation. * * On exit: * - if name translation is deferred to another node/cluster/zone, * leaves 'destnode' unchanged (will be non-zero) and returns 0 * - if name translation is attempted and succeeds, sets 'destnode' * to publishing node and returns port reference (will be non-zero) * - if name translation is attempted and fails, sets 'destnode' to 0 * and returns 0 */ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) { struct sub_seq *sseq; struct name_info *info; struct publication *publ; struct name_seq *seq; u32 ref = 0; u32 node = 0; if (!tipc_in_scope(*destnode, tipc_own_addr)) return 0; read_lock_bh(&tipc_nametbl_lock); seq = nametbl_find_seq(type); if (unlikely(!seq)) goto not_found; sseq = nameseq_find_subseq(seq, instance); if (unlikely(!sseq)) goto not_found; spin_lock_bh(&seq->lock); info = sseq->info; /* Closest-First Algorithm: */ if (likely(!*destnode)) { if (!list_empty(&info->node_list)) { publ = list_first_entry(&info->node_list, struct publication, node_list); list_move_tail(&publ->node_list, &info->node_list); } else if (!list_empty(&info->cluster_list)) { publ = list_first_entry(&info->cluster_list, struct publication, cluster_list); list_move_tail(&publ->cluster_list, &info->cluster_list); } else { publ = list_first_entry(&info->zone_list, struct publication, zone_list); list_move_tail(&publ->zone_list, &info->zone_list); } } /* Round-Robin Algorithm: */ else if (*destnode == tipc_own_addr) { if (list_empty(&info->node_list)) goto no_match; publ = list_first_entry(&info->node_list, struct publication, node_list); list_move_tail(&publ->node_list, &info->node_list); } else if (in_own_cluster(*destnode)) { if (list_empty(&info->cluster_list)) goto no_match; publ = list_first_entry(&info->cluster_list, struct publication, cluster_list); list_move_tail(&publ->cluster_list, &info->cluster_list); } else { publ = list_first_entry(&info->zone_list, struct publication, zone_list); list_move_tail(&publ->zone_list, &info->zone_list); } ref = publ->ref; node = publ->node; no_match: spin_unlock_bh(&seq->lock); not_found: read_unlock_bh(&tipc_nametbl_lock); *destnode = node; return ref; } /** * tipc_nametbl_mc_translate - find multicast destinations * * Creates list of all local ports that overlap the given multicast address; * also determines if any off-node ports overlap. * * Note: Publications with a scope narrower than 'limit' are ignored. * (i.e. local node-scope publications mustn't receive messages arriving * from another node, even if the multcast link brought it here) * * Returns non-zero if any off-node ports overlap */ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, struct tipc_port_list *dports) { struct name_seq *seq; struct sub_seq *sseq; struct sub_seq *sseq_stop; struct name_info *info; int res = 0; read_lock_bh(&tipc_nametbl_lock); seq = nametbl_find_seq(type); if (!seq) goto exit; spin_lock_bh(&seq->lock); sseq = seq->sseqs + nameseq_locate_subseq(seq, lower); sseq_stop = seq->sseqs + seq->first_free; for (; sseq != sseq_stop; sseq++) { struct publication *publ; if (sseq->lower > upper) break; info = sseq->info; list_for_each_entry(publ, &info->node_list, node_list) { if (publ->scope <= limit) tipc_port_list_add(dports, publ->ref); } if (info->cluster_list_size != info->node_list_size) res = 1; } spin_unlock_bh(&seq->lock); exit: read_unlock_bh(&tipc_nametbl_lock); return res; } /* * tipc_nametbl_publish - add name publication to network name tables */ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, u32 scope, u32 port_ref, u32 key) { struct publication *publ; if (table.local_publ_count >= tipc_max_publications) { warn("Publication failed, local publication limit reached (%u)\n", tipc_max_publications); return NULL; } write_lock_bh(&tipc_nametbl_lock); table.local_publ_count++; publ = tipc_nametbl_insert_publ(type, lower, upper, scope, tipc_own_addr, port_ref, key); if (publ && (scope != TIPC_NODE_SCOPE)) tipc_named_publish(publ); write_unlock_bh(&tipc_nametbl_lock); return publ; } /** * tipc_nametbl_withdraw - withdraw name publication from network name tables */ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) { struct publication *publ; write_lock_bh(&tipc_nametbl_lock); publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); if (likely(publ)) { table.local_publ_count--; if (publ->scope != TIPC_NODE_SCOPE) tipc_named_withdraw(publ); write_unlock_bh(&tipc_nametbl_lock); list_del_init(&publ->pport_list); kfree(publ); return 1; } write_unlock_bh(&tipc_nametbl_lock); err("Unable to remove local publication\n" "(type=%u, lower=%u, ref=%u, key=%u)\n", type, lower, ref, key); return 0; } /** * tipc_nametbl_subscribe - add a subscription object to the name table */ void tipc_nametbl_subscribe(struct tipc_subscription *s) { u32 type = s->seq.type; struct name_seq *seq; write_lock_bh(&tipc_nametbl_lock); seq = nametbl_find_seq(type); if (!seq) seq = tipc_nameseq_create(type, &table.types[hash(type)]); if (seq) { spin_lock_bh(&seq->lock); tipc_nameseq_subscribe(seq, s); spin_unlock_bh(&seq->lock); } else { warn("Failed to create subscription for {%u,%u,%u}\n", s->seq.type, s->seq.lower, s->seq.upper); } write_unlock_bh(&tipc_nametbl_lock); } /** * tipc_nametbl_unsubscribe - remove a subscription object from name table */ void tipc_nametbl_unsubscribe(struct tipc_subscription *s) { struct name_seq *seq; write_lock_bh(&tipc_nametbl_lock); seq = nametbl_find_seq(s->seq.type); if (seq != NULL) { spin_lock_bh(&seq->lock); list_del_init(&s->nameseq_list); spin_unlock_bh(&seq->lock); if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) { hlist_del_init(&seq->ns_list); kfree(seq->sseqs); kfree(seq); } } write_unlock_bh(&tipc_nametbl_lock); } /** * subseq_list: print specified sub-sequence contents into the given buffer */ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth, u32 index) { char portIdStr[27]; const char *scope_str[] = {"", " zone", " cluster", " node"}; struct publication *publ; struct name_info *info; tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper); if (depth == 2) { tipc_printf(buf, "\n"); return; } info = sseq->info; list_for_each_entry(publ, &info->zone_list, zone_list) { sprintf(portIdStr, "<%u.%u.%u:%u>", tipc_zone(publ->node), tipc_cluster(publ->node), tipc_node(publ->node), publ->ref); tipc_printf(buf, "%-26s ", portIdStr); if (depth > 3) { tipc_printf(buf, "%-10u %s", publ->key, scope_str[publ->scope]); } if (!list_is_last(&publ->zone_list, &info->zone_list)) tipc_printf(buf, "\n%33s", " "); }; tipc_printf(buf, "\n"); } /** * nameseq_list: print specified name sequence contents into the given buffer */ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, u32 type, u32 lowbound, u32 upbound, u32 index) { struct sub_seq *sseq; char typearea[11]; if (seq->first_free == 0) return; sprintf(typearea, "%-10u", seq->type); if (depth == 1) { tipc_printf(buf, "%s\n", typearea); return; } for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) { if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) { tipc_printf(buf, "%s ", typearea); spin_lock_bh(&seq->lock); subseq_list(sseq, buf, depth, index); spin_unlock_bh(&seq->lock); sprintf(typearea, "%10s", " "); } } } /** * nametbl_header - print name table header into the given buffer */ static void nametbl_header(struct print_buf *buf, u32 depth) { const char *header[] = { "Type ", "Lower Upper ", "Port Identity ", "Publication Scope" }; int i; if (depth > 4) depth = 4; for (i = 0; i < depth; i++) tipc_printf(buf, header[i]); tipc_printf(buf, "\n"); } /** * nametbl_list - print specified name table contents into the given buffer */ static void nametbl_list(struct print_buf *buf, u32 depth_info, u32 type, u32 lowbound, u32 upbound) { struct hlist_head *seq_head; struct hlist_node *seq_node; struct name_seq *seq; int all_types; u32 depth; u32 i; all_types = (depth_info & TIPC_NTQ_ALLTYPES); depth = (depth_info & ~TIPC_NTQ_ALLTYPES); if (depth == 0) return; if (all_types) { /* display all entries in name table to specified depth */ nametbl_header(buf, depth); lowbound = 0; upbound = ~0; for (i = 0; i < tipc_nametbl_size; i++) { seq_head = &table.types[i]; hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { nameseq_list(seq, buf, depth, seq->type, lowbound, upbound, i); } } } else { /* display only the sequence that matches the specified type */ if (upbound < lowbound) { tipc_printf(buf, "invalid name sequence specified\n"); return; } nametbl_header(buf, depth); i = hash(type); seq_head = &table.types[i]; hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { if (seq->type == type) { nameseq_list(seq, buf, depth, type, lowbound, upbound, i); break; } } } } #define MAX_NAME_TBL_QUERY 32768 struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space) { struct sk_buff *buf; struct tipc_name_table_query *argv; struct tlv_desc *rep_tlv; struct print_buf b; int str_len; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY)); if (!buf) return NULL; rep_tlv = (struct tlv_desc *)buf->data; tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY); argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area); read_lock_bh(&tipc_nametbl_lock); nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type), ntohl(argv->lowbound), ntohl(argv->upbound)); read_unlock_bh(&tipc_nametbl_lock); str_len = tipc_printbuf_validate(&b); skb_put(buf, TLV_SPACE(str_len)); TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); return buf; } int tipc_nametbl_init(void) { table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head), GFP_ATOMIC); if (!table.types) return -ENOMEM; table.local_publ_count = 0; return 0; } void tipc_nametbl_stop(void) { u32 i; if (!table.types) return; /* Verify name table is empty, then release it */ write_lock_bh(&tipc_nametbl_lock); for (i = 0; i < tipc_nametbl_size; i++) { if (!hlist_empty(&table.types[i])) err("tipc_nametbl_stop(): hash chain %u is non-null\n", i); } kfree(table.types); table.types = NULL; write_unlock_bh(&tipc_nametbl_lock); }
gpl-2.0
whdghks913/android_kernel_pantech_ef47s
arch/x86/xen/grant-table.c
7360
4010
/****************************************************************************** * grant_table.c * x86 specific part * * Granting foreign access to our memory reservation. * * Copyright (c) 2005-2006, Christopher Clark * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan. Split out x86 specific part. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <xen/interface/xen.h> #include <xen/page.h> #include <xen/grant_table.h> #include <asm/pgtable.h> static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } /* * This function is used to map shared frames to store grant status. It is * different from map_pte_fn above, the frames type here is uint64_t. */ static int map_pte_fn_status(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { uint64_t **frames = (uint64_t **)data; set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, void **__shared) { int rc; void *shared = *__shared; if (shared == NULL) { struct vm_struct *area = alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); BUG_ON(area == NULL); shared = area->addr; *__shared = shared; } rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); return rc; } int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, grant_status_t **__shared) { int rc; grant_status_t *shared = *__shared; if (shared == NULL) { /* No need to pass in PTE as we are going to do it * in apply_to_page_range anyhow. */ struct vm_struct *area = alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); BUG_ON(area == NULL); shared = area->addr; *__shared = shared; } rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn_status, &frames); return rc; } void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) { apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL); }
gpl-2.0
Sparhawk76/android_kernel_samsung_afyonltev1
arch/powerpc/sysdev/ipic.c
7616
20343
/* * arch/powerpc/sysdev/ipic.c * * IPIC routines implementations. * * Copyright 2005 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/syscore_ops.h> #include <linux/device.h> #include <linux/bootmem.h> #include <linux/spinlock.h> #include <linux/fsl_devices.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/ipic.h> #include "ipic.h" static struct ipic * primary_ipic; static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip; static DEFINE_RAW_SPINLOCK(ipic_lock); static struct ipic_info ipic_info[] = { [1] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_C, .force = IPIC_SIFCR_H, .bit = 16, .prio_mask = 0, }, [2] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_C, .force = IPIC_SIFCR_H, .bit = 17, .prio_mask = 1, }, [3] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_C, .force = IPIC_SIFCR_H, .bit = 18, .prio_mask = 2, }, [4] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_C, .force = IPIC_SIFCR_H, .bit = 19, .prio_mask = 3, }, [5] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_C, .force = IPIC_SIFCR_H, .bit = 20, .prio_mask = 4, }, [6] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_C, .force = IPIC_SIFCR_H, .bit = 21, .prio_mask = 5, }, [7] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_C, .force = IPIC_SIFCR_H, .bit = 22, .prio_mask = 6, }, [8] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_C, .force = IPIC_SIFCR_H, .bit = 23, .prio_mask = 7, }, [9] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_D, .force = IPIC_SIFCR_H, .bit = 24, .prio_mask = 0, }, [10] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_D, .force = IPIC_SIFCR_H, .bit = 25, .prio_mask = 1, }, [11] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_D, .force = IPIC_SIFCR_H, .bit = 26, .prio_mask = 2, }, [12] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_D, .force = IPIC_SIFCR_H, .bit = 27, .prio_mask = 3, }, [13] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_D, .force = IPIC_SIFCR_H, .bit = 28, .prio_mask = 4, }, [14] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_D, .force = IPIC_SIFCR_H, .bit = 29, .prio_mask = 5, }, [15] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_D, .force = IPIC_SIFCR_H, .bit = 30, .prio_mask = 6, }, [16] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_D, .force = IPIC_SIFCR_H, .bit = 31, .prio_mask = 7, }, [17] = { .ack = IPIC_SEPNR, .mask = IPIC_SEMSR, .prio = IPIC_SMPRR_A, .force = IPIC_SEFCR, .bit = 1, .prio_mask = 5, }, [18] = { .ack = IPIC_SEPNR, .mask = IPIC_SEMSR, .prio = IPIC_SMPRR_A, .force = IPIC_SEFCR, .bit = 2, .prio_mask = 6, }, [19] = { .ack = IPIC_SEPNR, .mask = IPIC_SEMSR, .prio = IPIC_SMPRR_A, .force = IPIC_SEFCR, .bit = 3, .prio_mask = 7, }, [20] = { .ack = IPIC_SEPNR, .mask = IPIC_SEMSR, .prio = IPIC_SMPRR_B, .force = IPIC_SEFCR, .bit = 4, .prio_mask = 4, }, [21] = { .ack = IPIC_SEPNR, .mask = IPIC_SEMSR, .prio = IPIC_SMPRR_B, .force = IPIC_SEFCR, .bit = 5, .prio_mask = 5, }, [22] = { .ack = IPIC_SEPNR, .mask = IPIC_SEMSR, .prio = IPIC_SMPRR_B, .force = IPIC_SEFCR, .bit = 6, .prio_mask = 6, }, [23] = { .ack = IPIC_SEPNR, .mask = IPIC_SEMSR, .prio = IPIC_SMPRR_B, .force = IPIC_SEFCR, .bit = 7, .prio_mask = 7, }, [32] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_A, .force = IPIC_SIFCR_H, .bit = 0, .prio_mask = 0, }, [33] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_A, .force = IPIC_SIFCR_H, .bit = 1, .prio_mask = 1, }, [34] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_A, .force = IPIC_SIFCR_H, .bit = 2, .prio_mask = 2, }, [35] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_A, .force = IPIC_SIFCR_H, .bit = 3, .prio_mask = 3, }, [36] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_A, .force = IPIC_SIFCR_H, .bit = 4, .prio_mask = 4, }, [37] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_A, .force = IPIC_SIFCR_H, .bit = 5, .prio_mask = 5, }, [38] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_A, .force = IPIC_SIFCR_H, .bit = 6, .prio_mask = 6, }, [39] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_A, .force = IPIC_SIFCR_H, .bit = 7, .prio_mask = 7, }, [40] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_B, .force = IPIC_SIFCR_H, .bit = 8, .prio_mask = 0, }, [41] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_B, .force = IPIC_SIFCR_H, .bit = 9, .prio_mask = 1, }, [42] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_B, .force = IPIC_SIFCR_H, .bit = 10, .prio_mask = 2, }, [43] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_B, .force = IPIC_SIFCR_H, .bit = 11, .prio_mask = 3, }, [44] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_B, .force = IPIC_SIFCR_H, .bit = 12, .prio_mask = 4, }, [45] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_B, .force = IPIC_SIFCR_H, .bit = 13, .prio_mask = 5, }, [46] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_B, .force = IPIC_SIFCR_H, .bit = 14, .prio_mask = 6, }, [47] = { .mask = IPIC_SIMSR_H, .prio = IPIC_SIPRR_B, .force = IPIC_SIFCR_H, .bit = 15, .prio_mask = 7, }, [48] = { .mask = IPIC_SEMSR, .prio = IPIC_SMPRR_A, .force = IPIC_SEFCR, .bit = 0, .prio_mask = 4, }, [64] = { .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_A, .force = IPIC_SIFCR_L, .bit = 0, .prio_mask = 0, }, [65] = { .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_A, .force = IPIC_SIFCR_L, .bit = 1, .prio_mask = 1, }, [66] = { .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_A, .force = IPIC_SIFCR_L, .bit = 2, .prio_mask = 2, }, [67] = { .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_A, .force = IPIC_SIFCR_L, .bit = 3, .prio_mask = 3, }, [68] = { .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_B, .force = IPIC_SIFCR_L, .bit = 4, .prio_mask = 0, }, [69] = { .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_B, .force = IPIC_SIFCR_L, .bit = 5, .prio_mask = 1, }, [70] = { .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_B, .force = IPIC_SIFCR_L, .bit = 6, .prio_mask = 2, }, [71] = { .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_B, .force = IPIC_SIFCR_L, .bit = 7, .prio_mask = 3, }, [72] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 8, }, [73] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 9, }, [74] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 10, }, [75] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 11, }, [76] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 12, }, [77] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 13, }, [78] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 14, }, [79] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 15, }, [80] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 16, }, [81] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 17, }, [82] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 18, }, [83] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 19, }, [84] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 20, }, [85] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 21, }, [86] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 22, }, [87] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 23, }, [88] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 24, }, [89] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 25, }, [90] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 26, }, [91] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 27, }, [94] = { .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 30, }, }; static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg) { return in_be32(base + (reg >> 2)); } static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value) { out_be32(base + (reg >> 2), value); } static inline struct ipic * ipic_from_irq(unsigned int virq) { return primary_ipic; } static void ipic_unmask_irq(struct irq_data *d) { struct ipic *ipic = ipic_from_irq(d->irq); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; raw_spin_lock_irqsave(&ipic_lock, flags); temp = ipic_read(ipic->regs, ipic_info[src].mask); temp |= (1 << (31 - ipic_info[src].bit)); ipic_write(ipic->regs, ipic_info[src].mask, temp); raw_spin_unlock_irqrestore(&ipic_lock, flags); } static void ipic_mask_irq(struct irq_data *d) { struct ipic *ipic = ipic_from_irq(d->irq); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; raw_spin_lock_irqsave(&ipic_lock, flags); temp = ipic_read(ipic->regs, ipic_info[src].mask); temp &= ~(1 << (31 - ipic_info[src].bit)); ipic_write(ipic->regs, ipic_info[src].mask, temp); /* mb() can't guarantee that masking is finished. But it does finish * for nearly all cases. */ mb(); raw_spin_unlock_irqrestore(&ipic_lock, flags); } static void ipic_ack_irq(struct irq_data *d) { struct ipic *ipic = ipic_from_irq(d->irq); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; raw_spin_lock_irqsave(&ipic_lock, flags); temp = 1 << (31 - ipic_info[src].bit); ipic_write(ipic->regs, ipic_info[src].ack, temp); /* mb() can't guarantee that ack is finished. But it does finish * for nearly all cases. */ mb(); raw_spin_unlock_irqrestore(&ipic_lock, flags); } static void ipic_mask_irq_and_ack(struct irq_data *d) { struct ipic *ipic = ipic_from_irq(d->irq); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; raw_spin_lock_irqsave(&ipic_lock, flags); temp = ipic_read(ipic->regs, ipic_info[src].mask); temp &= ~(1 << (31 - ipic_info[src].bit)); ipic_write(ipic->regs, ipic_info[src].mask, temp); temp = 1 << (31 - ipic_info[src].bit); ipic_write(ipic->regs, ipic_info[src].ack, temp); /* mb() can't guarantee that ack is finished. But it does finish * for nearly all cases. */ mb(); raw_spin_unlock_irqrestore(&ipic_lock, flags); } static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type) { struct ipic *ipic = ipic_from_irq(d->irq); unsigned int src = irqd_to_hwirq(d); unsigned int vold, vnew, edibit; if (flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_LEVEL_LOW; /* ipic supports only low assertion and high-to-low change senses */ if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) { printk(KERN_ERR "ipic: sense type 0x%x not supported\n", flow_type); return -EINVAL; } /* ipic supports only edge mode on external interrupts */ if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !ipic_info[src].ack) { printk(KERN_ERR "ipic: edge sense not supported on internal " "interrupts\n"); return -EINVAL; } irqd_set_trigger_type(d, flow_type); if (flow_type & IRQ_TYPE_LEVEL_LOW) { __irq_set_handler_locked(d->irq, handle_level_irq); d->chip = &ipic_level_irq_chip; } else { __irq_set_handler_locked(d->irq, handle_edge_irq); d->chip = &ipic_edge_irq_chip; } /* only EXT IRQ senses are programmable on ipic * internal IRQ senses are LEVEL_LOW */ if (src == IPIC_IRQ_EXT0) edibit = 15; else if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7) edibit = (14 - (src - IPIC_IRQ_EXT1)); else return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL; vold = ipic_read(ipic->regs, IPIC_SECNR); if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) { vnew = vold | (1 << edibit); } else { vnew = vold & ~(1 << edibit); } if (vold != vnew) ipic_write(ipic->regs, IPIC_SECNR, vnew); return IRQ_SET_MASK_OK_NOCOPY; } /* level interrupts and edge interrupts have different ack operations */ static struct irq_chip ipic_level_irq_chip = { .name = "IPIC", .irq_unmask = ipic_unmask_irq, .irq_mask = ipic_mask_irq, .irq_mask_ack = ipic_mask_irq, .irq_set_type = ipic_set_irq_type, }; static struct irq_chip ipic_edge_irq_chip = { .name = "IPIC", .irq_unmask = ipic_unmask_irq, .irq_mask = ipic_mask_irq, .irq_mask_ack = ipic_mask_irq_and_ack, .irq_ack = ipic_ack_irq, .irq_set_type = ipic_set_irq_type, }; static int ipic_host_match(struct irq_domain *h, struct device_node *node) { /* Exact match, unless ipic node is NULL */ return h->of_node == NULL || h->of_node == node; } static int ipic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct ipic *ipic = h->host_data; irq_set_chip_data(virq, ipic); irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq); /* Set default irq type */ irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; } static struct irq_domain_ops ipic_host_ops = { .match = ipic_host_match, .map = ipic_host_map, .xlate = irq_domain_xlate_onetwocell, }; struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) { struct ipic *ipic; struct resource res; u32 temp = 0, ret; ret = of_address_to_resource(node, 0, &res); if (ret) return NULL; ipic = kzalloc(sizeof(*ipic), GFP_KERNEL); if (ipic == NULL) return NULL; ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS, &ipic_host_ops, ipic); if (ipic->irqhost == NULL) { kfree(ipic); return NULL; } ipic->regs = ioremap(res.start, resource_size(&res)); /* init hw */ ipic_write(ipic->regs, IPIC_SICNR, 0x0); /* default priority scheme is grouped. If spread mode is required * configure SICFR accordingly */ if (flags & IPIC_SPREADMODE_GRP_A) temp |= SICFR_IPSA; if (flags & IPIC_SPREADMODE_GRP_B) temp |= SICFR_IPSB; if (flags & IPIC_SPREADMODE_GRP_C) temp |= SICFR_IPSC; if (flags & IPIC_SPREADMODE_GRP_D) temp |= SICFR_IPSD; if (flags & IPIC_SPREADMODE_MIX_A) temp |= SICFR_MPSA; if (flags & IPIC_SPREADMODE_MIX_B) temp |= SICFR_MPSB; ipic_write(ipic->regs, IPIC_SICFR, temp); /* handle MCP route */ temp = 0; if (flags & IPIC_DISABLE_MCP_OUT) temp = SERCR_MCPR; ipic_write(ipic->regs, IPIC_SERCR, temp); /* handle routing of IRQ0 to MCP */ temp = ipic_read(ipic->regs, IPIC_SEMSR); if (flags & IPIC_IRQ0_MCP) temp |= SEMSR_SIRQ0; else temp &= ~SEMSR_SIRQ0; ipic_write(ipic->regs, IPIC_SEMSR, temp); primary_ipic = ipic; irq_set_default_host(primary_ipic->irqhost); ipic_write(ipic->regs, IPIC_SIMSR_H, 0); ipic_write(ipic->regs, IPIC_SIMSR_L, 0); printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS, primary_ipic->regs); return ipic; } int ipic_set_priority(unsigned int virq, unsigned int priority) { struct ipic *ipic = ipic_from_irq(virq); unsigned int src = virq_to_hw(virq); u32 temp; if (priority > 7) return -EINVAL; if (src > 127) return -EINVAL; if (ipic_info[src].prio == 0) return -EINVAL; temp = ipic_read(ipic->regs, ipic_info[src].prio); if (priority < 4) { temp &= ~(0x7 << (20 + (3 - priority) * 3)); temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3); } else { temp &= ~(0x7 << (4 + (7 - priority) * 3)); temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3); } ipic_write(ipic->regs, ipic_info[src].prio, temp); return 0; } void ipic_set_highest_priority(unsigned int virq) { struct ipic *ipic = ipic_from_irq(virq); unsigned int src = virq_to_hw(virq); u32 temp; temp = ipic_read(ipic->regs, IPIC_SICFR); /* clear and set HPI */ temp &= 0x7f000000; temp |= (src & 0x7f) << 24; ipic_write(ipic->regs, IPIC_SICFR, temp); } void ipic_set_default_priority(void) { ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT); ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT); ipic_write(primary_ipic->regs, IPIC_SIPRR_C, IPIC_PRIORITY_DEFAULT); ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_PRIORITY_DEFAULT); ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_PRIORITY_DEFAULT); ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT); } void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq) { struct ipic *ipic = primary_ipic; u32 temp; temp = ipic_read(ipic->regs, IPIC_SERMR); temp |= (1 << (31 - mcp_irq)); ipic_write(ipic->regs, IPIC_SERMR, temp); } void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq) { struct ipic *ipic = primary_ipic; u32 temp; temp = ipic_read(ipic->regs, IPIC_SERMR); temp &= (1 << (31 - mcp_irq)); ipic_write(ipic->regs, IPIC_SERMR, temp); } u32 ipic_get_mcp_status(void) { return ipic_read(primary_ipic->regs, IPIC_SERMR); } void ipic_clear_mcp_status(u32 mask) { ipic_write(primary_ipic->regs, IPIC_SERMR, mask); } /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ unsigned int ipic_get_irq(void) { int irq; BUG_ON(primary_ipic == NULL); #define IPIC_SIVCR_VECTOR_MASK 0x7f irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK; if (irq == 0) /* 0 --> no irq is pending */ return NO_IRQ; return irq_linear_revmap(primary_ipic->irqhost, irq); } #ifdef CONFIG_SUSPEND static struct { u32 sicfr; u32 siprr[2]; u32 simsr[2]; u32 sicnr; u32 smprr[2]; u32 semsr; u32 secnr; u32 sermr; u32 sercr; } ipic_saved_state; static int ipic_suspend(void) { struct ipic *ipic = primary_ipic; ipic_saved_state.sicfr = ipic_read(ipic->regs, IPIC_SICFR); ipic_saved_state.siprr[0] = ipic_read(ipic->regs, IPIC_SIPRR_A); ipic_saved_state.siprr[1] = ipic_read(ipic->regs, IPIC_SIPRR_D); ipic_saved_state.simsr[0] = ipic_read(ipic->regs, IPIC_SIMSR_H); ipic_saved_state.simsr[1] = ipic_read(ipic->regs, IPIC_SIMSR_L); ipic_saved_state.sicnr = ipic_read(ipic->regs, IPIC_SICNR); ipic_saved_state.smprr[0] = ipic_read(ipic->regs, IPIC_SMPRR_A); ipic_saved_state.smprr[1] = ipic_read(ipic->regs, IPIC_SMPRR_B); ipic_saved_state.semsr = ipic_read(ipic->regs, IPIC_SEMSR); ipic_saved_state.secnr = ipic_read(ipic->regs, IPIC_SECNR); ipic_saved_state.sermr = ipic_read(ipic->regs, IPIC_SERMR); ipic_saved_state.sercr = ipic_read(ipic->regs, IPIC_SERCR); if (fsl_deep_sleep()) { /* In deep sleep, make sure there can be no * pending interrupts, as this can cause * problems on 831x. */ ipic_write(ipic->regs, IPIC_SIMSR_H, 0); ipic_write(ipic->regs, IPIC_SIMSR_L, 0); ipic_write(ipic->regs, IPIC_SEMSR, 0); ipic_write(ipic->regs, IPIC_SERMR, 0); } return 0; } static void ipic_resume(void) { struct ipic *ipic = primary_ipic; ipic_write(ipic->regs, IPIC_SICFR, ipic_saved_state.sicfr); ipic_write(ipic->regs, IPIC_SIPRR_A, ipic_saved_state.siprr[0]); ipic_write(ipic->regs, IPIC_SIPRR_D, ipic_saved_state.siprr[1]); ipic_write(ipic->regs, IPIC_SIMSR_H, ipic_saved_state.simsr[0]); ipic_write(ipic->regs, IPIC_SIMSR_L, ipic_saved_state.simsr[1]); ipic_write(ipic->regs, IPIC_SICNR, ipic_saved_state.sicnr); ipic_write(ipic->regs, IPIC_SMPRR_A, ipic_saved_state.smprr[0]); ipic_write(ipic->regs, IPIC_SMPRR_B, ipic_saved_state.smprr[1]); ipic_write(ipic->regs, IPIC_SEMSR, ipic_saved_state.semsr); ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); } #else #define ipic_suspend NULL #define ipic_resume NULL #endif static struct syscore_ops ipic_syscore_ops = { .suspend = ipic_suspend, .resume = ipic_resume, }; static int __init init_ipic_syscore(void) { if (!primary_ipic || !primary_ipic->regs) return -ENODEV; printk(KERN_DEBUG "Registering ipic system core operations\n"); register_syscore_ops(&ipic_syscore_ops); return 0; } subsys_initcall(init_ipic_syscore);
gpl-2.0
chaoskagami/android_kernel_nvidia_roth
drivers/video/backlight/kb3886_bl.c
8128
4950
/* * Backlight Driver for the KB3886 Backlight * * Copyright (c) 2007-2008 Claudio Nieder * * Based on corgi_bl.c by Richard Purdie and kb3886 driver by Robert Woerle * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/delay.h> #include <linux/dmi.h> #define KB3886_PARENT 0x64 #define KB3886_IO 0x60 #define KB3886_ADC_DAC_PWM 0xC4 #define KB3886_PWM0_WRITE 0x81 #define KB3886_PWM0_READ 0x41 static DEFINE_MUTEX(bl_mutex); static void kb3886_bl_set_intensity(int intensity) { mutex_lock(&bl_mutex); intensity = intensity&0xff; outb(KB3886_ADC_DAC_PWM, KB3886_PARENT); msleep(10); outb(KB3886_PWM0_WRITE, KB3886_IO); msleep(10); outb(intensity, KB3886_IO); mutex_unlock(&bl_mutex); } struct kb3886bl_machinfo { int max_intensity; int default_intensity; int limit_mask; void (*set_bl_intensity)(int intensity); }; static struct kb3886bl_machinfo kb3886_bl_machinfo = { .max_intensity = 0xff, .default_intensity = 0xa0, .limit_mask = 0x7f, .set_bl_intensity = kb3886_bl_set_intensity, }; static struct platform_device kb3886bl_device = { .name = "kb3886-bl", .dev = { .platform_data = &kb3886_bl_machinfo, }, .id = -1, }; static struct platform_device *devices[] __initdata = { &kb3886bl_device, }; /* * Back to driver */ static int kb3886bl_intensity; static struct backlight_device *kb3886_backlight_device; static struct kb3886bl_machinfo *bl_machinfo; static unsigned long kb3886bl_flags; #define KB3886BL_SUSPENDED 0x01 static struct dmi_system_id __initdata kb3886bl_device_table[] = { { .ident = "Sahara Touch-iT", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SDV"), DMI_MATCH(DMI_PRODUCT_NAME, "iTouch T201"), }, }, { } }; static int kb3886bl_send_intensity(struct backlight_device *bd) { int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (kb3886bl_flags & KB3886BL_SUSPENDED) intensity = 0; bl_machinfo->set_bl_intensity(intensity); kb3886bl_intensity = intensity; return 0; } #ifdef CONFIG_PM static int kb3886bl_suspend(struct platform_device *pdev, pm_message_t state) { struct backlight_device *bd = platform_get_drvdata(pdev); kb3886bl_flags |= KB3886BL_SUSPENDED; backlight_update_status(bd); return 0; } static int kb3886bl_resume(struct platform_device *pdev) { struct backlight_device *bd = platform_get_drvdata(pdev); kb3886bl_flags &= ~KB3886BL_SUSPENDED; backlight_update_status(bd); return 0; } #else #define kb3886bl_suspend NULL #define kb3886bl_resume NULL #endif static int kb3886bl_get_intensity(struct backlight_device *bd) { return kb3886bl_intensity; } static const struct backlight_ops kb3886bl_ops = { .get_brightness = kb3886bl_get_intensity, .update_status = kb3886bl_send_intensity, }; static int kb3886bl_probe(struct platform_device *pdev) { struct backlight_properties props; struct kb3886bl_machinfo *machinfo = pdev->dev.platform_data; bl_machinfo = machinfo; if (!machinfo->limit_mask) machinfo->limit_mask = -1; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = machinfo->max_intensity; kb3886_backlight_device = backlight_device_register("kb3886-bl", &pdev->dev, NULL, &kb3886bl_ops, &props); if (IS_ERR(kb3886_backlight_device)) return PTR_ERR(kb3886_backlight_device); platform_set_drvdata(pdev, kb3886_backlight_device); kb3886_backlight_device->props.power = FB_BLANK_UNBLANK; kb3886_backlight_device->props.brightness = machinfo->default_intensity; backlight_update_status(kb3886_backlight_device); return 0; } static int kb3886bl_remove(struct platform_device *pdev) { struct backlight_device *bd = platform_get_drvdata(pdev); backlight_device_unregister(bd); return 0; } static struct platform_driver kb3886bl_driver = { .probe = kb3886bl_probe, .remove = kb3886bl_remove, .suspend = kb3886bl_suspend, .resume = kb3886bl_resume, .driver = { .name = "kb3886-bl", }, }; static int __init kb3886_init(void) { if (!dmi_check_system(kb3886bl_device_table)) return -ENODEV; platform_add_devices(devices, ARRAY_SIZE(devices)); return platform_driver_register(&kb3886bl_driver); } static void __exit kb3886_exit(void) { platform_driver_unregister(&kb3886bl_driver); } module_init(kb3886_init); module_exit(kb3886_exit); MODULE_AUTHOR("Claudio Nieder <private@claudio.ch>"); MODULE_DESCRIPTION("Tabletkiosk Sahara Touch-iT Backlight Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("dmi:*:svnSDV:pniTouchT201:*");
gpl-2.0
lookfiresu123/my_linux-3.13.0
fs/hpfs/name.c
10944
3234
/* * linux/fs/hpfs/name.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * operations with filenames */ #include "hpfs_fn.h" static inline int not_allowed_char(unsigned char c) { return c<' ' || c=='"' || c=='*' || c=='/' || c==':' || c=='<' || c=='>' || c=='?' || c=='\\' || c=='|'; } static inline int no_dos_char(unsigned char c) { /* Characters that are allowed in HPFS but not in DOS */ return c=='+' || c==',' || c==';' || c=='=' || c=='[' || c==']'; } static inline unsigned char upcase(unsigned char *dir, unsigned char a) { if (a<128 || a==255) return a>='a' && a<='z' ? a - 0x20 : a; if (!dir) return a; return dir[a-128]; } unsigned char hpfs_upcase(unsigned char *dir, unsigned char a) { return upcase(dir, a); } static inline unsigned char locase(unsigned char *dir, unsigned char a) { if (a<128 || a==255) return a>='A' && a<='Z' ? a + 0x20 : a; if (!dir) return a; return dir[a]; } int hpfs_chk_name(const unsigned char *name, unsigned *len) { int i; if (*len > 254) return -ENAMETOOLONG; hpfs_adjust_length(name, len); if (!*len) return -EINVAL; for (i = 0; i < *len; i++) if (not_allowed_char(name[i])) return -EINVAL; if (*len == 1) if (name[0] == '.') return -EINVAL; if (*len == 2) if (name[0] == '.' && name[1] == '.') return -EINVAL; return 0; } unsigned char *hpfs_translate_name(struct super_block *s, unsigned char *from, unsigned len, int lc, int lng) { unsigned char *to; int i; if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) { printk("HPFS: Long name flag mismatch - name "); for (i=0; i<len; i++) printk("%c", from[i]); printk(" misidentified as %s.\n", lng ? "short" : "long"); printk("HPFS: It's nothing serious. It could happen because of bug in OS/2.\nHPFS: Set checks=normal to disable this message.\n"); } if (!lc) return from; if (!(to = kmalloc(len, GFP_KERNEL))) { printk("HPFS: can't allocate memory for name conversion buffer\n"); return from; } for (i = 0; i < len; i++) to[i] = locase(hpfs_sb(s)->sb_cp_table,from[i]); return to; } int hpfs_compare_names(struct super_block *s, const unsigned char *n1, unsigned l1, const unsigned char *n2, unsigned l2, int last) { unsigned l = l1 < l2 ? l1 : l2; unsigned i; if (last) return -1; for (i = 0; i < l; i++) { unsigned char c1 = upcase(hpfs_sb(s)->sb_cp_table,n1[i]); unsigned char c2 = upcase(hpfs_sb(s)->sb_cp_table,n2[i]); if (c1 < c2) return -1; if (c1 > c2) return 1; } if (l1 < l2) return -1; if (l1 > l2) return 1; return 0; } int hpfs_is_name_long(const unsigned char *name, unsigned len) { int i,j; for (i = 0; i < len && name[i] != '.'; i++) if (no_dos_char(name[i])) return 1; if (!i || i > 8) return 1; if (i == len) return 0; for (j = i + 1; j < len; j++) if (name[j] == '.' || no_dos_char(name[i])) return 1; return j - i > 4; } /* OS/2 clears dots and spaces at the end of file name, so we have to */ void hpfs_adjust_length(const unsigned char *name, unsigned *len) { if (!*len) return; if (*len == 1 && name[0] == '.') return; if (*len == 2 && name[0] == '.' && name[1] == '.') return; while (*len && (name[*len - 1] == '.' || name[*len - 1] == ' ')) (*len)--; }
gpl-2.0
sakuramilk/linux-3.4.y
arch/blackfin/mach-bf561/dma.c
12224
3134
/* * the simple DMA Implementation for Blackfin * * Copyright 2007-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <asm/blackfin.h> #include <asm/dma.h> struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = { (struct dma_register *) DMA1_0_NEXT_DESC_PTR, (struct dma_register *) DMA1_1_NEXT_DESC_PTR, (struct dma_register *) DMA1_2_NEXT_DESC_PTR, (struct dma_register *) DMA1_3_NEXT_DESC_PTR, (struct dma_register *) DMA1_4_NEXT_DESC_PTR, (struct dma_register *) DMA1_5_NEXT_DESC_PTR, (struct dma_register *) DMA1_6_NEXT_DESC_PTR, (struct dma_register *) DMA1_7_NEXT_DESC_PTR, (struct dma_register *) DMA1_8_NEXT_DESC_PTR, (struct dma_register *) DMA1_9_NEXT_DESC_PTR, (struct dma_register *) DMA1_10_NEXT_DESC_PTR, (struct dma_register *) DMA1_11_NEXT_DESC_PTR, (struct dma_register *) DMA2_0_NEXT_DESC_PTR, (struct dma_register *) DMA2_1_NEXT_DESC_PTR, (struct dma_register *) DMA2_2_NEXT_DESC_PTR, (struct dma_register *) DMA2_3_NEXT_DESC_PTR, (struct dma_register *) DMA2_4_NEXT_DESC_PTR, (struct dma_register *) DMA2_5_NEXT_DESC_PTR, (struct dma_register *) DMA2_6_NEXT_DESC_PTR, (struct dma_register *) DMA2_7_NEXT_DESC_PTR, (struct dma_register *) DMA2_8_NEXT_DESC_PTR, (struct dma_register *) DMA2_9_NEXT_DESC_PTR, (struct dma_register *) DMA2_10_NEXT_DESC_PTR, (struct dma_register *) DMA2_11_NEXT_DESC_PTR, (struct dma_register *) MDMA_D0_NEXT_DESC_PTR, (struct dma_register *) MDMA_S0_NEXT_DESC_PTR, (struct dma_register *) MDMA_D1_NEXT_DESC_PTR, (struct dma_register *) MDMA_S1_NEXT_DESC_PTR, (struct dma_register *) MDMA_D2_NEXT_DESC_PTR, (struct dma_register *) MDMA_S2_NEXT_DESC_PTR, (struct dma_register *) MDMA_D3_NEXT_DESC_PTR, (struct dma_register *) MDMA_S3_NEXT_DESC_PTR, (struct dma_register *) IMDMA_D0_NEXT_DESC_PTR, (struct dma_register *) IMDMA_S0_NEXT_DESC_PTR, (struct dma_register *) IMDMA_D1_NEXT_DESC_PTR, (struct dma_register *) IMDMA_S1_NEXT_DESC_PTR, }; EXPORT_SYMBOL(dma_io_base_addr); int channel2irq(unsigned int channel) { int ret_irq = -1; switch (channel) { case CH_PPI0: ret_irq = IRQ_PPI0; break; case CH_PPI1: ret_irq = IRQ_PPI1; break; case CH_SPORT0_RX: ret_irq = IRQ_SPORT0_RX; break; case CH_SPORT0_TX: ret_irq = IRQ_SPORT0_TX; break; case CH_SPORT1_RX: ret_irq = IRQ_SPORT1_RX; break; case CH_SPORT1_TX: ret_irq = IRQ_SPORT1_TX; break; case CH_SPI: ret_irq = IRQ_SPI; break; case CH_UART_RX: ret_irq = IRQ_UART_RX; break; case CH_UART_TX: ret_irq = IRQ_UART_TX; break; case CH_MEM_STREAM0_SRC: case CH_MEM_STREAM0_DEST: ret_irq = IRQ_MEM_DMA0; break; case CH_MEM_STREAM1_SRC: case CH_MEM_STREAM1_DEST: ret_irq = IRQ_MEM_DMA1; break; case CH_MEM_STREAM2_SRC: case CH_MEM_STREAM2_DEST: ret_irq = IRQ_MEM_DMA2; break; case CH_MEM_STREAM3_SRC: case CH_MEM_STREAM3_DEST: ret_irq = IRQ_MEM_DMA3; break; case CH_IMEM_STREAM0_SRC: case CH_IMEM_STREAM0_DEST: ret_irq = IRQ_IMEM_DMA0; break; case CH_IMEM_STREAM1_SRC: case CH_IMEM_STREAM1_DEST: ret_irq = IRQ_IMEM_DMA1; break; } return ret_irq; }
gpl-2.0
felipesanches/linux-sunxi
mm/kmemcheck.c
12736
2910
#include <linux/gfp.h> #include <linux/mm_types.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/kmemcheck.h> void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) { struct page *shadow; int pages; int i; pages = 1 << order; /* * With kmemcheck enabled, we need to allocate a memory area for the * shadow bits as well. */ shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); if (!shadow) { if (printk_ratelimit()) printk(KERN_ERR "kmemcheck: failed to allocate " "shadow bitmap\n"); return; } for(i = 0; i < pages; ++i) page[i].shadow = page_address(&shadow[i]); /* * Mark it as non-present for the MMU so that our accesses to * this memory will trigger a page fault and let us analyze * the memory accesses. */ kmemcheck_hide_pages(page, pages); } void kmemcheck_free_shadow(struct page *page, int order) { struct page *shadow; int pages; int i; if (!kmemcheck_page_is_tracked(page)) return; pages = 1 << order; kmemcheck_show_pages(page, pages); shadow = virt_to_page(page[0].shadow); for(i = 0; i < pages; ++i) page[i].shadow = NULL; __free_pages(shadow, order); } void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, size_t size) { /* * Has already been memset(), which initializes the shadow for us * as well. */ if (gfpflags & __GFP_ZERO) return; /* No need to initialize the shadow of a non-tracked slab. */ if (s->flags & SLAB_NOTRACK) return; if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) { /* * Allow notracked objects to be allocated from * tracked caches. Note however that these objects * will still get page faults on access, they just * won't ever be flagged as uninitialized. If page * faults are not acceptable, the slab cache itself * should be marked NOTRACK. */ kmemcheck_mark_initialized(object, size); } else if (!s->ctor) { /* * New objects should be marked uninitialized before * they're returned to the called. */ kmemcheck_mark_uninitialized(object, size); } } void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) { /* TODO: RCU freeing is unsupported for now; hide false positives. */ if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) kmemcheck_mark_freed(object, size); } void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, gfp_t gfpflags) { int pages; if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK)) return; pages = 1 << order; /* * NOTE: We choose to track GFP_ZERO pages too; in fact, they * can become uninitialized by copying uninitialized memory * into them. */ /* XXX: Can use zone->node for node? */ kmemcheck_alloc_shadow(page, order, gfpflags, -1); if (gfpflags & __GFP_ZERO) kmemcheck_mark_initialized_pages(page, pages); else kmemcheck_mark_uninitialized_pages(page, pages); }
gpl-2.0
telf/TDR_patch_series_1
drivers/net/dsa/bcm_sf2.c
193
28736
/* * Broadcom Starfighter 2 DSA switch driver * * Copyright (C) 2014, Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/list.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/phy.h> #include <linux/phy_fixed.h> #include <linux/mii.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <net/dsa.h> #include <linux/ethtool.h> #include <linux/if_bridge.h> #include <linux/brcmphy.h> #include "bcm_sf2.h" #include "bcm_sf2_regs.h" /* String, offset, and register size in bytes if different from 4 bytes */ static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = { { "TxOctets", 0x000, 8 }, { "TxDropPkts", 0x020 }, { "TxQPKTQ0", 0x030 }, { "TxBroadcastPkts", 0x040 }, { "TxMulticastPkts", 0x050 }, { "TxUnicastPKts", 0x060 }, { "TxCollisions", 0x070 }, { "TxSingleCollision", 0x080 }, { "TxMultipleCollision", 0x090 }, { "TxDeferredCollision", 0x0a0 }, { "TxLateCollision", 0x0b0 }, { "TxExcessiveCollision", 0x0c0 }, { "TxFrameInDisc", 0x0d0 }, { "TxPausePkts", 0x0e0 }, { "TxQPKTQ1", 0x0f0 }, { "TxQPKTQ2", 0x100 }, { "TxQPKTQ3", 0x110 }, { "TxQPKTQ4", 0x120 }, { "TxQPKTQ5", 0x130 }, { "RxOctets", 0x140, 8 }, { "RxUndersizePkts", 0x160 }, { "RxPausePkts", 0x170 }, { "RxPkts64Octets", 0x180 }, { "RxPkts65to127Octets", 0x190 }, { "RxPkts128to255Octets", 0x1a0 }, { "RxPkts256to511Octets", 0x1b0 }, { "RxPkts512to1023Octets", 0x1c0 }, { "RxPkts1024toMaxPktsOctets", 0x1d0 }, { "RxOversizePkts", 0x1e0 }, { "RxJabbers", 0x1f0 }, { "RxAlignmentErrors", 0x200 }, { "RxFCSErrors", 0x210 }, { "RxGoodOctets", 0x220, 8 }, { "RxDropPkts", 0x240 }, { "RxUnicastPkts", 0x250 }, { "RxMulticastPkts", 0x260 }, { "RxBroadcastPkts", 0x270 }, { "RxSAChanges", 0x280 }, { "RxFragments", 0x290 }, { "RxJumboPkt", 0x2a0 }, { "RxSymblErr", 0x2b0 }, { "InRangeErrCount", 0x2c0 }, { "OutRangeErrCount", 0x2d0 }, { "EEELpiEvent", 0x2e0 }, { "EEELpiDuration", 0x2f0 }, { "RxDiscard", 0x300, 8 }, { "TxQPKTQ6", 0x320 }, { "TxQPKTQ7", 0x330 }, { "TxPkts64Octets", 0x340 }, { "TxPkts65to127Octets", 0x350 }, { "TxPkts128to255Octets", 0x360 }, { "TxPkts256to511Ocets", 0x370 }, { "TxPkts512to1023Ocets", 0x380 }, { "TxPkts1024toMaxPktOcets", 0x390 }, }; #define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib) static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port, uint8_t *data) { unsigned int i; for (i = 0; i < BCM_SF2_STATS_SIZE; i++) memcpy(data + i * ETH_GSTRING_LEN, bcm_sf2_mib[i].string, ETH_GSTRING_LEN); } static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { struct bcm_sf2_priv *priv = ds_to_priv(ds); const struct bcm_sf2_hw_stats *s; unsigned int i; u64 val = 0; u32 offset; mutex_lock(&priv->stats_mutex); /* Now fetch the per-port counters */ for (i = 0; i < BCM_SF2_STATS_SIZE; i++) { s = &bcm_sf2_mib[i]; /* Do a latched 64-bit read if needed */ offset = s->reg + CORE_P_MIB_OFFSET(port); if (s->sizeof_stat == 8) val = core_readq(priv, offset); else val = core_readl(priv, offset); data[i] = (u64)val; } mutex_unlock(&priv->stats_mutex); } static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds) { return BCM_SF2_STATS_SIZE; } static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr) { return "Broadcom Starfighter 2"; } static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int i; u32 reg; /* Enable the IMP Port to be in the same VLAN as the other ports * on a per-port basis such that we only have Port i and IMP in * the same VLAN. */ for (i = 0; i < priv->hw_params.num_ports; i++) { if (!((1 << i) & ds->phys_port_mask)) continue; reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg |= (1 << cpu_port); core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); } } static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 reg, val; /* Enable the port memories */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ reg = core_readl(priv, CORE_IMP_CTL); reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); reg &= ~(RX_DIS | TX_DIS); core_writel(priv, reg, CORE_IMP_CTL); /* Enable forwarding */ core_writel(priv, SW_FWDG_EN, CORE_SWMODE); /* Enable IMP port in dumb mode */ reg = core_readl(priv, CORE_SWITCH_CTRL); reg |= MII_DUMB_FWDG_EN; core_writel(priv, reg, CORE_SWITCH_CTRL); /* Resolve which bit controls the Broadcom tag */ switch (port) { case 8: val = BRCM_HDR_EN_P8; break; case 7: val = BRCM_HDR_EN_P7; break; case 5: val = BRCM_HDR_EN_P5; break; default: val = 0; break; } /* Enable Broadcom tags for IMP port */ reg = core_readl(priv, CORE_BRCM_HDR_CTRL); reg |= val; core_writel(priv, reg, CORE_BRCM_HDR_CTRL); /* Enable reception Broadcom tag for CPU TX (switch RX) to * allow us to tag outgoing frames */ reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS); reg &= ~(1 << port); core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS); /* Enable transmission of Broadcom tags from the switch (CPU RX) to * allow delivering frames to the per-port net_devices */ reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS); reg &= ~(1 << port); core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS); /* Force link status for IMP port */ reg = core_readl(priv, CORE_STS_OVERRIDE_IMP); reg |= (MII_SW_OR | LINK_STS); core_writel(priv, reg, CORE_STS_OVERRIDE_IMP); } static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 reg; reg = core_readl(priv, CORE_EEE_EN_CTRL); if (enable) reg |= 1 << port; else reg &= ~(1 << port); core_writel(priv, reg, CORE_EEE_EN_CTRL); } static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 reg; reg = reg_readl(priv, REG_SPHY_CNTRL); if (enable) { reg |= PHY_RESET; reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS); reg_writel(priv, reg, REG_SPHY_CNTRL); udelay(21); reg = reg_readl(priv, REG_SPHY_CNTRL); reg &= ~PHY_RESET; } else { reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET; reg_writel(priv, reg, REG_SPHY_CNTRL); mdelay(1); reg |= CK25_DIS; } reg_writel(priv, reg, REG_SPHY_CNTRL); /* Use PHY-driven LED signaling */ if (!enable) { reg = reg_readl(priv, REG_LED_CNTRL(0)); reg |= SPDLNK_SRC_SEL; reg_writel(priv, reg, REG_LED_CNTRL(0)); } } static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = ds_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; u32 reg; /* Clear the memory power down */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); /* Clear the Rx and Tx disable bits and set to no spanning tree */ core_writel(priv, 0, CORE_G_PCTL_PORT(port)); /* Re-enable the GPHY and re-apply workarounds */ if (port == 0 && priv->hw_params.num_gphy == 1) { bcm_sf2_gphy_enable_set(ds, true); if (phy) { /* if phy_stop() has been called before, phy * will be in halted state, and phy_start() * will call resume. * * the resume path does not configure back * autoneg settings, and since we hard reset * the phy manually here, we need to reset the * state machine also. */ phy->state = PHY_READY; phy_init_hw(phy); } } /* Enable port 7 interrupts to get notified */ if (port == 7) intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); /* Set this port, and only this one to be in the default VLAN, * if member of a bridge, restore its membership prior to * bringing down this port. */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); reg &= ~PORT_VLAN_CTRL_MASK; reg |= (1 << port); reg |= priv->port_sts[port].vlan_ctl_mask; core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); bcm_sf2_imp_vlan_setup(ds, cpu_port); /* If EEE was enabled, restore it */ if (priv->port_sts[port].eee.eee_enabled) bcm_sf2_eee_enable_set(ds, port, true); return 0; } static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 off, reg; if (priv->wol_ports_mask & (1 << port)) return; if (port == 7) { intrl2_1_mask_set(priv, P_IRQ_MASK(P7_IRQ_OFF)); intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR); } if (port == 0 && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); if (dsa_is_cpu_port(ds, port)) off = CORE_IMP_CTL; else off = CORE_G_PCTL_PORT(port); reg = core_readl(priv, off); reg |= RX_DIS | TX_DIS; core_writel(priv, reg, off); /* Power down the port memory */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg |= P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); } /* Returns 0 if EEE was not enabled, or 1 otherwise */ static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = ds_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; int ret; p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); ret = phy_init_eee(phy, 0); if (ret) return 0; bcm_sf2_eee_enable_set(ds, port, true); return 1; } static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { struct bcm_sf2_priv *priv = ds_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; u32 reg; reg = core_readl(priv, CORE_EEE_LPI_INDICATE); e->eee_enabled = p->eee_enabled; e->eee_active = !!(reg & (1 << port)); return 0; } static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e) { struct bcm_sf2_priv *priv = ds_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; p->eee_enabled = e->eee_enabled; if (!p->eee_enabled) { bcm_sf2_eee_enable_set(ds, port, false); } else { p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); if (!p->eee_enabled) return -EOPNOTSUPP; } return 0; } /* Fast-ageing of ARL entries for a given port, equivalent to an ARL * flush for that port. */ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int timeout = 1000; u32 reg; core_writel(priv, port, CORE_FAST_AGE_PORT); reg = core_readl(priv, CORE_FAST_AGE_CTRL); reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; core_writel(priv, reg, CORE_FAST_AGE_CTRL); do { reg = core_readl(priv, CORE_FAST_AGE_CTRL); if (!(reg & FAST_AGE_STR_DONE)) break; cpu_relax(); } while (timeout--); if (!timeout) return -ETIMEDOUT; core_writel(priv, 0, CORE_FAST_AGE_CTRL); return 0; } static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, u32 br_port_mask) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int i; u32 reg, p_ctl; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); for (i = 0; i < priv->hw_params.num_ports; i++) { if (!((1 << i) & br_port_mask)) continue; /* Add this local port to the remote port VLAN control * membership and update the remote port bitmask */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg |= 1 << port; core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); priv->port_sts[i].vlan_ctl_mask = reg; p_ctl |= 1 << i; } /* Configure the local port VLAN control membership to include * remote ports and update the local port bitmask */ core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; return 0; } static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port, u32 br_port_mask) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int i; u32 reg, p_ctl; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); for (i = 0; i < priv->hw_params.num_ports; i++) { /* Don't touch the remaining ports */ if (!((1 << i) & br_port_mask)) continue; reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg &= ~(1 << port); core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); priv->port_sts[port].vlan_ctl_mask = reg; /* Prevent self removal to preserve isolation */ if (port != i) p_ctl &= ~(1 << i); } core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; return 0; } static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u8 hw_state, cur_hw_state; int ret = 0; u32 reg; reg = core_readl(priv, CORE_G_PCTL_PORT(port)); cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); switch (state) { case BR_STATE_DISABLED: hw_state = G_MISTP_DIS_STATE; break; case BR_STATE_LISTENING: hw_state = G_MISTP_LISTEN_STATE; break; case BR_STATE_LEARNING: hw_state = G_MISTP_LEARN_STATE; break; case BR_STATE_FORWARDING: hw_state = G_MISTP_FWD_STATE; break; case BR_STATE_BLOCKING: hw_state = G_MISTP_BLOCK_STATE; break; default: pr_err("%s: invalid STP state: %d\n", __func__, state); return -EINVAL; } /* Fast-age ARL entries if we are moving a port from Learning or * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening * state (hw_state) */ if (cur_hw_state != hw_state) { if (cur_hw_state >= G_MISTP_LEARN_STATE && hw_state <= G_MISTP_LISTEN_STATE) { ret = bcm_sf2_sw_fast_age_port(ds, port); if (ret) { pr_err("%s: fast-ageing failed\n", __func__); return ret; } } } reg = core_readl(priv, CORE_G_PCTL_PORT(port)); reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); reg |= hw_state; core_writel(priv, reg, CORE_G_PCTL_PORT(port)); return 0; } static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) { struct bcm_sf2_priv *priv = dev_id; priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & ~priv->irq0_mask; intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); return IRQ_HANDLED; } static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) { struct bcm_sf2_priv *priv = dev_id; priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & ~priv->irq1_mask; intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) priv->port_sts[7].link = 1; if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) priv->port_sts[7].link = 0; return IRQ_HANDLED; } static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) { unsigned int timeout = 1000; u32 reg; reg = core_readl(priv, CORE_WATCHDOG_CTRL); reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; core_writel(priv, reg, CORE_WATCHDOG_CTRL); do { reg = core_readl(priv, CORE_WATCHDOG_CTRL); if (!(reg & SOFTWARE_RESET)) break; usleep_range(1000, 2000); } while (timeout-- > 0); if (timeout == 0) return -ETIMEDOUT; return 0; } static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) { intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); } static int bcm_sf2_sw_setup(struct dsa_switch *ds) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; struct bcm_sf2_priv *priv = ds_to_priv(ds); struct device_node *dn; void __iomem **base; unsigned int port; unsigned int i; u32 reg, rev; int ret; spin_lock_init(&priv->indir_lock); mutex_init(&priv->stats_mutex); /* All the interesting properties are at the parent device_node * level */ dn = ds->pd->of_node->parent; priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { *base = of_iomap(dn, i); if (*base == NULL) { pr_err("unable to find register: %s\n", reg_names[i]); ret = -ENOMEM; goto out_unmap; } base++; } ret = bcm_sf2_sw_rst(priv); if (ret) { pr_err("unable to software reset switch: %d\n", ret); goto out_unmap; } /* Disable all interrupts and request them */ bcm_sf2_intr_disable(priv); ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, "switch_0", priv); if (ret < 0) { pr_err("failed to request switch_0 IRQ\n"); goto out_unmap; } ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, "switch_1", priv); if (ret < 0) { pr_err("failed to request switch_1 IRQ\n"); goto out_free_irq0; } /* Reset the MIB counters */ reg = core_readl(priv, CORE_GMNCFGCFG); reg |= RST_MIB_CNT; core_writel(priv, reg, CORE_GMNCFGCFG); reg &= ~RST_MIB_CNT; core_writel(priv, reg, CORE_GMNCFGCFG); /* Get the maximum number of ports for this switch */ priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; if (priv->hw_params.num_ports > DSA_MAX_PORTS) priv->hw_params.num_ports = DSA_MAX_PORTS; /* Assume a single GPHY setup if we can't read that property */ if (of_property_read_u32(dn, "brcm,num-gphy", &priv->hw_params.num_gphy)) priv->hw_params.num_gphy = 1; /* Enable all valid ports and disable those unused */ for (port = 0; port < priv->hw_params.num_ports; port++) { /* IMP port receives special treatment */ if ((1 << port) & ds->phys_port_mask) bcm_sf2_port_setup(ds, port, NULL); else if (dsa_is_cpu_port(ds, port)) bcm_sf2_imp_setup(ds, port); else bcm_sf2_port_disable(ds, port, NULL); } /* Include the pseudo-PHY address and the broadcast PHY address to * divert reads towards our workaround. This is only required for * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such * that we can use the regular SWITCH_MDIO master controller instead. * * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask * to have a 1:1 mapping between Port address and PHY address in order * to utilize the slave_mii_bus instance to read from Port PHYs. This is * not what we want here, so we initialize phys_mii_mask 0 to always * utilize the "master" MDIO bus backed by the "mdio-unimac" driver. */ if (of_machine_is_compatible("brcm,bcm7445d0")) ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); else ds->phys_mii_mask = 0; rev = reg_readl(priv, REG_SWITCH_REVISION); priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & SWITCH_TOP_REV_MASK; priv->hw_params.core_rev = (rev & SF2_REV_MASK); rev = reg_readl(priv, REG_PHY_REVISION); priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, priv->core, priv->irq0, priv->irq1); return 0; out_free_irq0: free_irq(priv->irq0, priv); out_unmap: base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { if (*base) iounmap(*base); base++; } return ret; } static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr) { return 0; } static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); /* The BCM7xxx PHY driver expects to find the integrated PHY revision * in bits 15:8 and the patch level in bits 7:0 which is exactly what * the REG_PHY_REVISION register layout is. */ return priv->hw_params.gphy_rev; } static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr, int regnum, u16 val) { struct bcm_sf2_priv *priv = ds_to_priv(ds); int ret = 0; u32 reg; reg = reg_readl(priv, REG_SWITCH_CNTRL); reg |= MDIO_MASTER_SEL; reg_writel(priv, reg, REG_SWITCH_CNTRL); /* Page << 8 | offset */ reg = 0x70; reg <<= 2; core_writel(priv, addr, reg); /* Page << 8 | offset */ reg = 0x80 << 8 | regnum << 1; reg <<= 2; if (op) ret = core_readl(priv, reg); else core_writel(priv, val, reg); reg = reg_readl(priv, REG_SWITCH_CNTRL); reg &= ~MDIO_MASTER_SEL; reg_writel(priv, reg, REG_SWITCH_CNTRL); return ret & 0xffff; } static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum) { /* Intercept reads from the MDIO broadcast address or Broadcom * pseudo-PHY address */ switch (addr) { case 0: case BRCM_PSEUDO_PHY_ADDR: return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0); default: return 0xffff; } } static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val) { /* Intercept writes to the MDIO broadcast address or Broadcom * pseudo-PHY address */ switch (addr) { case 0: case BRCM_PSEUDO_PHY_ADDR: bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val); break; } return 0; } static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phydev) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 id_mode_dis = 0, port_mode; const char *str = NULL; u32 reg; switch (phydev->interface) { case PHY_INTERFACE_MODE_RGMII: str = "RGMII (no delay)"; id_mode_dis = 1; case PHY_INTERFACE_MODE_RGMII_TXID: if (!str) str = "RGMII (TX delay)"; port_mode = EXT_GPHY; break; case PHY_INTERFACE_MODE_MII: str = "MII"; port_mode = EXT_EPHY; break; case PHY_INTERFACE_MODE_REVMII: str = "Reverse MII"; port_mode = EXT_REVMII; break; default: /* All other PHYs: internal and MoCA */ goto force_link; } /* If the link is down, just disable the interface to conserve power */ if (!phydev->link) { reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); reg &= ~RGMII_MODE_EN; reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); goto force_link; } /* Clear id_mode_dis bit, and the existing port mode, but * make sure we enable the RGMII block for data to pass */ reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); reg &= ~ID_MODE_DIS; reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); reg |= port_mode | RGMII_MODE_EN; if (id_mode_dis) reg |= ID_MODE_DIS; if (phydev->pause) { if (phydev->asym_pause) reg |= TX_PAUSE_EN; reg |= RX_PAUSE_EN; } reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); pr_info("Port %d configured for %s\n", port, str); force_link: /* Force link settings detected from the PHY */ reg = SW_OVERRIDE; switch (phydev->speed) { case SPEED_1000: reg |= SPDSTS_1000 << SPEED_SHIFT; break; case SPEED_100: reg |= SPDSTS_100 << SPEED_SHIFT; break; } if (phydev->link) reg |= LINK_STS; if (phydev->duplex == DUPLEX_FULL) reg |= DUPLX_MODE; core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); } static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 duplex, pause; u32 reg; duplex = core_readl(priv, CORE_DUPSTS); pause = core_readl(priv, CORE_PAUSESTS); status->link = 0; /* Port 7 is special as we do not get link status from CORE_LNKSTS, * which means that we need to force the link at the port override * level to get the data to flow. We do use what the interrupt handler * did determine before. * * For the other ports, we just force the link status, since this is * a fixed PHY device. */ if (port == 7) { status->link = priv->port_sts[port].link; /* For MoCA interfaces, also force a link down notification * since some version of the user-space daemon (mocad) use * cmd->autoneg to force the link, which messes up the PHY * state machine and make it go in PHY_FORCING state instead. */ if (!status->link) netif_carrier_off(ds->ports[port]); status->duplex = 1; } else { status->link = 1; status->duplex = !!(duplex & (1 << port)); } reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port)); reg |= SW_OVERRIDE; if (status->link) reg |= LINK_STS; else reg &= ~LINK_STS; core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); if ((pause & (1 << port)) && (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { status->asym_pause = 1; status->pause = 1; } if (pause & (1 << port)) status->pause = 1; } static int bcm_sf2_sw_suspend(struct dsa_switch *ds) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int port; bcm_sf2_intr_disable(priv); /* Disable all ports physically present including the IMP * port, the other ones have already been disabled during * bcm_sf2_sw_setup */ for (port = 0; port < DSA_MAX_PORTS; port++) { if ((1 << port) & ds->phys_port_mask || dsa_is_cpu_port(ds, port)) bcm_sf2_port_disable(ds, port, NULL); } return 0; } static int bcm_sf2_sw_resume(struct dsa_switch *ds) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int port; int ret; ret = bcm_sf2_sw_rst(priv); if (ret) { pr_err("%s: failed to software reset switch\n", __func__); return ret; } if (priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, true); for (port = 0; port < DSA_MAX_PORTS; port++) { if ((1 << port) & ds->phys_port_mask) bcm_sf2_port_setup(ds, port, NULL); else if (dsa_is_cpu_port(ds, port)) bcm_sf2_imp_setup(ds, port); } return 0; } static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { struct net_device *p = ds->dst[ds->index].master_netdev; struct bcm_sf2_priv *priv = ds_to_priv(ds); struct ethtool_wolinfo pwol; /* Get the parent device WoL settings */ p->ethtool_ops->get_wol(p, &pwol); /* Advertise the parent device supported settings */ wol->supported = pwol.supported; memset(&wol->sopass, 0, sizeof(wol->sopass)); if (pwol.wolopts & WAKE_MAGICSECURE) memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass)); if (priv->wol_ports_mask & (1 << port)) wol->wolopts = pwol.wolopts; else wol->wolopts = 0; } static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { struct net_device *p = ds->dst[ds->index].master_netdev; struct bcm_sf2_priv *priv = ds_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; struct ethtool_wolinfo pwol; p->ethtool_ops->get_wol(p, &pwol); if (wol->wolopts & ~pwol.supported) return -EINVAL; if (wol->wolopts) priv->wol_ports_mask |= (1 << port); else priv->wol_ports_mask &= ~(1 << port); /* If we have at least one port enabled, make sure the CPU port * is also enabled. If the CPU port is the last one enabled, we disable * it since this configuration does not make sense. */ if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port)) priv->wol_ports_mask |= (1 << cpu_port); else priv->wol_ports_mask &= ~(1 << cpu_port); return p->ethtool_ops->set_wol(p, wol); } static struct dsa_switch_driver bcm_sf2_switch_driver = { .tag_protocol = DSA_TAG_PROTO_BRCM, .priv_size = sizeof(struct bcm_sf2_priv), .probe = bcm_sf2_sw_probe, .setup = bcm_sf2_sw_setup, .set_addr = bcm_sf2_sw_set_addr, .get_phy_flags = bcm_sf2_sw_get_phy_flags, .phy_read = bcm_sf2_sw_phy_read, .phy_write = bcm_sf2_sw_phy_write, .get_strings = bcm_sf2_sw_get_strings, .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, .get_sset_count = bcm_sf2_sw_get_sset_count, .adjust_link = bcm_sf2_sw_adjust_link, .fixed_link_update = bcm_sf2_sw_fixed_link_update, .suspend = bcm_sf2_sw_suspend, .resume = bcm_sf2_sw_resume, .get_wol = bcm_sf2_sw_get_wol, .set_wol = bcm_sf2_sw_set_wol, .port_enable = bcm_sf2_port_setup, .port_disable = bcm_sf2_port_disable, .get_eee = bcm_sf2_sw_get_eee, .set_eee = bcm_sf2_sw_set_eee, .port_join_bridge = bcm_sf2_sw_br_join, .port_leave_bridge = bcm_sf2_sw_br_leave, .port_stp_update = bcm_sf2_sw_br_set_stp_state, }; static int __init bcm_sf2_init(void) { register_switch_driver(&bcm_sf2_switch_driver); return 0; } module_init(bcm_sf2_init); static void __exit bcm_sf2_exit(void) { unregister_switch_driver(&bcm_sf2_switch_driver); } module_exit(bcm_sf2_exit); MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:brcm-sf2");
gpl-2.0
optimsoc/gzll-gcc
gcc/testsuite/gcc.target/i386/avx-set-v8si-5.c
193
1071
/* { dg-do run } */ /* { dg-require-effective-target avx } */ /* { dg-options "-O2 -mavx" } */ #include "avx-check.h" static __m256i __attribute__((noinline)) foo (int x, int i) { switch (i) { case 7: return _mm256_set_epi32 (x, 1, 1, 1, 1, 1, 1, 1); case 6: return _mm256_set_epi32 (1, x, 1, 1, 1, 1, 1, 1); case 5: return _mm256_set_epi32 (1, 1, x, 1, 1, 1, 1, 1); case 4: return _mm256_set_epi32 (1, 1, 1, x, 1, 1, 1, 1); case 3: return _mm256_set_epi32 (1, 1, 1, 1, x, 1, 1, 1); case 2: return _mm256_set_epi32 (1, 1, 1, 1, 1, x, 1, 1); case 1: return _mm256_set_epi32 (1, 1, 1, 1, 1, 1, x, 1); case 0: return _mm256_set_epi32 (1, 1, 1, 1, 1, 1, 1, x); default: abort (); } } static void avx_test (void) { int e = 0xabadbeef; int v[8]; union256i_d u; int i, j; for (i = 0; i < ARRAY_SIZE (v); i++) { for (j = 0; j < ARRAY_SIZE (v); j++) v[j] = 1; v[i] = e; u.x = foo (e, i); if (check_union256i_d (u, v)) abort (); } }
gpl-2.0
minipli/linux-grsec
drivers/hid/uhid.c
193
17996
/* * User-space I/O driver support for HID subsystem * Copyright (c) 2012 David Herrmann */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/atomic.h> #include <linux/compat.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/hid.h> #include <linux/input.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/uhid.h> #include <linux/wait.h> #define UHID_NAME "uhid" #define UHID_BUFSIZE 32 struct uhid_device { struct mutex devlock; bool running; __u8 *rd_data; uint rd_size; struct hid_device *hid; struct uhid_event input_buf; wait_queue_head_t waitq; spinlock_t qlock; __u8 head; __u8 tail; struct uhid_event *outq[UHID_BUFSIZE]; /* blocking GET_REPORT support; state changes protected by qlock */ struct mutex report_lock; wait_queue_head_t report_wait; bool report_running; u32 report_id; u32 report_type; struct uhid_event report_buf; struct work_struct worker; }; static struct miscdevice uhid_misc; static void uhid_device_add_worker(struct work_struct *work) { struct uhid_device *uhid = container_of(work, struct uhid_device, worker); int ret; ret = hid_add_device(uhid->hid); if (ret) { hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); hid_destroy_device(uhid->hid); uhid->hid = NULL; uhid->running = false; } } static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev) { __u8 newhead; newhead = (uhid->head + 1) % UHID_BUFSIZE; if (newhead != uhid->tail) { uhid->outq[uhid->head] = ev; uhid->head = newhead; wake_up_interruptible(&uhid->waitq); } else { hid_warn(uhid->hid, "Output queue is full\n"); kfree(ev); } } static int uhid_queue_event(struct uhid_device *uhid, __u32 event) { unsigned long flags; struct uhid_event *ev; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = event; spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return 0; } static int uhid_hid_start(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; struct uhid_event *ev; unsigned long flags; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_START; if (hid->report_enum[HID_FEATURE_REPORT].numbered) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS; if (hid->report_enum[HID_OUTPUT_REPORT].numbered) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS; if (hid->report_enum[HID_INPUT_REPORT].numbered) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS; spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return 0; } static void uhid_hid_stop(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; hid->claimed = 0; uhid_queue_event(uhid, UHID_STOP); } static int uhid_hid_open(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; return uhid_queue_event(uhid, UHID_OPEN); } static void uhid_hid_close(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; uhid_queue_event(uhid, UHID_CLOSE); } static int uhid_hid_parse(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; return hid_parse_report(hid, uhid->rd_data, uhid->rd_size); } /* must be called with report_lock held */ static int __uhid_report_queue_and_wait(struct uhid_device *uhid, struct uhid_event *ev, __u32 *report_id) { unsigned long flags; int ret; spin_lock_irqsave(&uhid->qlock, flags); *report_id = ++uhid->report_id; uhid->report_type = ev->type + 1; uhid->report_running = true; uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); ret = wait_event_interruptible_timeout(uhid->report_wait, !uhid->report_running || !uhid->running, 5 * HZ); if (!ret || !uhid->running || uhid->report_running) ret = -EIO; else if (ret < 0) ret = -ERESTARTSYS; else ret = 0; uhid->report_running = false; return ret; } static void uhid_report_wake_up(struct uhid_device *uhid, u32 id, const struct uhid_event *ev) { unsigned long flags; spin_lock_irqsave(&uhid->qlock, flags); /* id for old report; drop it silently */ if (uhid->report_type != ev->type || uhid->report_id != id) goto unlock; if (!uhid->report_running) goto unlock; memcpy(&uhid->report_buf, ev, sizeof(*ev)); uhid->report_running = false; wake_up_interruptible(&uhid->report_wait); unlock: spin_unlock_irqrestore(&uhid->qlock, flags); } static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum, u8 *buf, size_t count, u8 rtype) { struct uhid_device *uhid = hid->driver_data; struct uhid_get_report_reply_req *req; struct uhid_event *ev; int ret; if (!uhid->running) return -EIO; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_GET_REPORT; ev->u.get_report.rnum = rnum; ev->u.get_report.rtype = rtype; ret = mutex_lock_interruptible(&uhid->report_lock); if (ret) { kfree(ev); return ret; } /* this _always_ takes ownership of @ev */ ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id); if (ret) goto unlock; req = &uhid->report_buf.u.get_report_reply; if (req->err) { ret = -EIO; } else { ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX); memcpy(buf, req->data, ret); } unlock: mutex_unlock(&uhid->report_lock); return ret; } static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum, const u8 *buf, size_t count, u8 rtype) { struct uhid_device *uhid = hid->driver_data; struct uhid_event *ev; int ret; if (!uhid->running || count > UHID_DATA_MAX) return -EIO; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_SET_REPORT; ev->u.set_report.rnum = rnum; ev->u.set_report.rtype = rtype; ev->u.set_report.size = count; memcpy(ev->u.set_report.data, buf, count); ret = mutex_lock_interruptible(&uhid->report_lock); if (ret) { kfree(ev); return ret; } /* this _always_ takes ownership of @ev */ ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id); if (ret) goto unlock; if (uhid->report_buf.u.set_report_reply.err) ret = -EIO; else ret = count; unlock: mutex_unlock(&uhid->report_lock); return ret; } static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) { u8 u_rtype; switch (rtype) { case HID_FEATURE_REPORT: u_rtype = UHID_FEATURE_REPORT; break; case HID_OUTPUT_REPORT: u_rtype = UHID_OUTPUT_REPORT; break; case HID_INPUT_REPORT: u_rtype = UHID_INPUT_REPORT; break; default: return -EINVAL; } switch (reqtype) { case HID_REQ_GET_REPORT: return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype); case HID_REQ_SET_REPORT: return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype); default: return -EIO; } } static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count, unsigned char report_type) { struct uhid_device *uhid = hid->driver_data; __u8 rtype; unsigned long flags; struct uhid_event *ev; switch (report_type) { case HID_FEATURE_REPORT: rtype = UHID_FEATURE_REPORT; break; case HID_OUTPUT_REPORT: rtype = UHID_OUTPUT_REPORT; break; default: return -EINVAL; } if (count < 1 || count > UHID_DATA_MAX) return -EINVAL; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_OUTPUT; ev->u.output.size = count; ev->u.output.rtype = rtype; memcpy(ev->u.output.data, buf, count); spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return count; } static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf, size_t count) { return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT); } static struct hid_ll_driver uhid_hid_driver = { .start = uhid_hid_start, .stop = uhid_hid_stop, .open = uhid_hid_open, .close = uhid_hid_close, .parse = uhid_hid_parse, .raw_request = uhid_hid_raw_request, .output_report = uhid_hid_output_report, }; #ifdef CONFIG_COMPAT /* Apparently we haven't stepped on these rakes enough times yet. */ struct uhid_create_req_compat { __u8 name[128]; __u8 phys[64]; __u8 uniq[64]; compat_uptr_t rd_data; __u16 rd_size; __u16 bus; __u32 vendor; __u32 product; __u32 version; __u32 country; } __attribute__((__packed__)); static int uhid_event_from_user(const char __user *buffer, size_t len, struct uhid_event *event) { if (in_compat_syscall()) { u32 type; if (get_user(type, buffer)) return -EFAULT; if (type == UHID_CREATE) { /* * This is our messed up request with compat pointer. * It is largish (more than 256 bytes) so we better * allocate it from the heap. */ struct uhid_create_req_compat *compat; compat = kzalloc(sizeof(*compat), GFP_KERNEL); if (!compat) return -ENOMEM; buffer += sizeof(type); len -= sizeof(type); if (copy_from_user(compat, buffer, min(len, sizeof(*compat)))) { kfree(compat); return -EFAULT; } /* Shuffle the data over to proper structure */ event->type = type; memcpy(event->u.create.name, compat->name, sizeof(compat->name)); memcpy(event->u.create.phys, compat->phys, sizeof(compat->phys)); memcpy(event->u.create.uniq, compat->uniq, sizeof(compat->uniq)); event->u.create.rd_data = compat_ptr(compat->rd_data); event->u.create.rd_size = compat->rd_size; event->u.create.bus = compat->bus; event->u.create.vendor = compat->vendor; event->u.create.product = compat->product; event->u.create.version = compat->version; event->u.create.country = compat->country; kfree(compat); return 0; } /* All others can be copied directly */ } if (copy_from_user(event, buffer, min(len, sizeof(*event)))) return -EFAULT; return 0; } #else static int uhid_event_from_user(const char __user *buffer, size_t len, struct uhid_event *event) { if (copy_from_user(event, buffer, min(len, sizeof(*event)))) return -EFAULT; return 0; } #endif static int uhid_dev_create2(struct uhid_device *uhid, const struct uhid_event *ev) { struct hid_device *hid; size_t rd_size, len; void *rd_data; int ret; if (uhid->running) return -EALREADY; rd_size = ev->u.create2.rd_size; if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE) return -EINVAL; rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL); if (!rd_data) return -ENOMEM; uhid->rd_size = rd_size; uhid->rd_data = rd_data; hid = hid_allocate_device(); if (IS_ERR(hid)) { ret = PTR_ERR(hid); goto err_free; } len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1; strncpy(hid->name, ev->u.create2.name, len); len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1; strncpy(hid->phys, ev->u.create2.phys, len); len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1; strncpy(hid->uniq, ev->u.create2.uniq, len); hid->ll_driver = &uhid_hid_driver; hid->bus = ev->u.create2.bus; hid->vendor = ev->u.create2.vendor; hid->product = ev->u.create2.product; hid->version = ev->u.create2.version; hid->country = ev->u.create2.country; hid->driver_data = uhid; hid->dev.parent = uhid_misc.this_device; uhid->hid = hid; uhid->running = true; /* Adding of a HID device is done through a worker, to allow HID drivers * which use feature requests during .probe to work, without they would * be blocked on devlock, which is held by uhid_char_write. */ schedule_work(&uhid->worker); return 0; err_free: kfree(uhid->rd_data); uhid->rd_data = NULL; uhid->rd_size = 0; return ret; } static int uhid_dev_create(struct uhid_device *uhid, struct uhid_event *ev) { struct uhid_create_req orig; orig = ev->u.create; if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE) return -EINVAL; if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size)) return -EFAULT; memcpy(ev->u.create2.name, orig.name, sizeof(orig.name)); memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys)); memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq)); ev->u.create2.rd_size = orig.rd_size; ev->u.create2.bus = orig.bus; ev->u.create2.vendor = orig.vendor; ev->u.create2.product = orig.product; ev->u.create2.version = orig.version; ev->u.create2.country = orig.country; return uhid_dev_create2(uhid, ev); } static int uhid_dev_destroy(struct uhid_device *uhid) { if (!uhid->running) return -EINVAL; uhid->running = false; wake_up_interruptible(&uhid->report_wait); cancel_work_sync(&uhid->worker); hid_destroy_device(uhid->hid); kfree(uhid->rd_data); return 0; } static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev) { if (!uhid->running) return -EINVAL; hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data, min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0); return 0; } static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev) { if (!uhid->running) return -EINVAL; hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data, min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0); return 0; } static int uhid_dev_get_report_reply(struct uhid_device *uhid, struct uhid_event *ev) { if (!uhid->running) return -EINVAL; uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev); return 0; } static int uhid_dev_set_report_reply(struct uhid_device *uhid, struct uhid_event *ev) { if (!uhid->running) return -EINVAL; uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev); return 0; } static int uhid_char_open(struct inode *inode, struct file *file) { struct uhid_device *uhid; uhid = kzalloc(sizeof(*uhid), GFP_KERNEL); if (!uhid) return -ENOMEM; mutex_init(&uhid->devlock); mutex_init(&uhid->report_lock); spin_lock_init(&uhid->qlock); init_waitqueue_head(&uhid->waitq); init_waitqueue_head(&uhid->report_wait); uhid->running = false; INIT_WORK(&uhid->worker, uhid_device_add_worker); file->private_data = uhid; nonseekable_open(inode, file); return 0; } static int uhid_char_release(struct inode *inode, struct file *file) { struct uhid_device *uhid = file->private_data; unsigned int i; uhid_dev_destroy(uhid); for (i = 0; i < UHID_BUFSIZE; ++i) kfree(uhid->outq[i]); kfree(uhid); return 0; } static ssize_t uhid_char_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct uhid_device *uhid = file->private_data; int ret; unsigned long flags; size_t len; /* they need at least the "type" member of uhid_event */ if (count < sizeof(__u32)) return -EINVAL; try_again: if (file->f_flags & O_NONBLOCK) { if (uhid->head == uhid->tail) return -EAGAIN; } else { ret = wait_event_interruptible(uhid->waitq, uhid->head != uhid->tail); if (ret) return ret; } ret = mutex_lock_interruptible(&uhid->devlock); if (ret) return ret; if (uhid->head == uhid->tail) { mutex_unlock(&uhid->devlock); goto try_again; } else { len = min(count, sizeof(**uhid->outq)); if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) { ret = -EFAULT; } else { kfree(uhid->outq[uhid->tail]); uhid->outq[uhid->tail] = NULL; spin_lock_irqsave(&uhid->qlock, flags); uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE; spin_unlock_irqrestore(&uhid->qlock, flags); } } mutex_unlock(&uhid->devlock); return ret ? ret : len; } static ssize_t uhid_char_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct uhid_device *uhid = file->private_data; int ret; size_t len; /* we need at least the "type" member of uhid_event */ if (count < sizeof(__u32)) return -EINVAL; ret = mutex_lock_interruptible(&uhid->devlock); if (ret) return ret; memset(&uhid->input_buf, 0, sizeof(uhid->input_buf)); len = min(count, sizeof(uhid->input_buf)); ret = uhid_event_from_user(buffer, len, &uhid->input_buf); if (ret) goto unlock; switch (uhid->input_buf.type) { case UHID_CREATE: ret = uhid_dev_create(uhid, &uhid->input_buf); break; case UHID_CREATE2: ret = uhid_dev_create2(uhid, &uhid->input_buf); break; case UHID_DESTROY: ret = uhid_dev_destroy(uhid); break; case UHID_INPUT: ret = uhid_dev_input(uhid, &uhid->input_buf); break; case UHID_INPUT2: ret = uhid_dev_input2(uhid, &uhid->input_buf); break; case UHID_GET_REPORT_REPLY: ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf); break; case UHID_SET_REPORT_REPLY: ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf); break; default: ret = -EOPNOTSUPP; } unlock: mutex_unlock(&uhid->devlock); /* return "count" not "len" to not confuse the caller */ return ret ? ret : count; } static unsigned int uhid_char_poll(struct file *file, poll_table *wait) { struct uhid_device *uhid = file->private_data; poll_wait(file, &uhid->waitq, wait); if (uhid->head != uhid->tail) return POLLIN | POLLRDNORM; return 0; } static const struct file_operations uhid_fops = { .owner = THIS_MODULE, .open = uhid_char_open, .release = uhid_char_release, .read = uhid_char_read, .write = uhid_char_write, .poll = uhid_char_poll, .llseek = no_llseek, }; static struct miscdevice uhid_misc = { .fops = &uhid_fops, .minor = UHID_MINOR, .name = UHID_NAME, }; module_misc_device(uhid_misc); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); MODULE_ALIAS_MISCDEV(UHID_MINOR); MODULE_ALIAS("devname:" UHID_NAME);
gpl-2.0
bossino/tq210-kernel
arch/powerpc/kernel/btext.c
961
38468
/* * Procedures for drawing on the screen early on in the boot process. * * Benjamin Herrenschmidt <benh@kernel.crashing.org> */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> #include <linux/module.h> #include <linux/memblock.h> #include <asm/sections.h> #include <asm/prom.h> #include <asm/btext.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/udbg.h> #define NO_SCROLL #ifndef NO_SCROLL static void scrollscreen(void); #endif static void draw_byte(unsigned char c, long locX, long locY); static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb); static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb); static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb); #define __force_data __attribute__((__section__(".data"))) static int g_loc_X __force_data; static int g_loc_Y __force_data; static int g_max_loc_X __force_data; static int g_max_loc_Y __force_data; static int dispDeviceRowBytes __force_data; static int dispDeviceDepth __force_data; static int dispDeviceRect[4] __force_data; static unsigned char *dispDeviceBase __force_data; static unsigned char *logicalDisplayBase __force_data; unsigned long disp_BAT[2] __initdata = {0, 0}; #define cmapsz (16*256) static unsigned char vga_font[cmapsz]; int boot_text_mapped __force_data = 0; int force_printk_to_btext = 0; #ifdef CONFIG_PPC32 /* Calc BAT values for mapping the display and store them * in disp_BAT. Those values are then used from head.S to map * the display during identify_machine() and MMU_Init() * * The display is mapped to virtual address 0xD0000000, rather * than 1:1, because some some CHRP machines put the frame buffer * in the region starting at 0xC0000000 (PAGE_OFFSET). * This mapping is temporary and will disappear as soon as the * setup done by MMU_Init() is applied. * * For now, we align the BAT and then map 8Mb on 601 and 16Mb * on other PPCs. This may cause trouble if the framebuffer * is really badly aligned, but I didn't encounter this case * yet. */ void __init btext_prepare_BAT(void) { unsigned long vaddr = PAGE_OFFSET + 0x10000000; unsigned long addr; unsigned long lowbits; addr = (unsigned long)dispDeviceBase; if (!addr) { boot_text_mapped = 0; return; } if (PVR_VER(mfspr(SPRN_PVR)) != 1) { /* 603, 604, G3, G4, ... */ lowbits = addr & ~0xFF000000UL; addr &= 0xFF000000UL; disp_BAT[0] = vaddr | (BL_16M<<2) | 2; disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW); } else { /* 601 */ lowbits = addr & ~0xFF800000UL; addr &= 0xFF800000UL; disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4; disp_BAT[1] = addr | BL_8M | 0x40; } logicalDisplayBase = (void *) (vaddr + lowbits); } #endif /* This function can be used to enable the early boot text when doing * OF booting or within bootx init. It must be followed by a btext_unmap() * call before the logical address becomes unuseable */ void __init btext_setup_display(int width, int height, int depth, int pitch, unsigned long address) { g_loc_X = 0; g_loc_Y = 0; g_max_loc_X = width / 8; g_max_loc_Y = height / 16; logicalDisplayBase = (unsigned char *)address; dispDeviceBase = (unsigned char *)address; dispDeviceRowBytes = pitch; dispDeviceDepth = depth == 15 ? 16 : depth; dispDeviceRect[0] = dispDeviceRect[1] = 0; dispDeviceRect[2] = width; dispDeviceRect[3] = height; boot_text_mapped = 1; } void __init btext_unmap(void) { boot_text_mapped = 0; } /* Here's a small text engine to use during early boot * or for debugging purposes * * todo: * * - build some kind of vgacon with it to enable early printk * - move to a separate file * - add a few video driver hooks to keep in sync with display * changes. */ static void map_boot_text(void) { unsigned long base, offset, size; unsigned char *vbase; /* By default, we are no longer mapped */ boot_text_mapped = 0; if (dispDeviceBase == 0) return; base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL; offset = ((unsigned long) dispDeviceBase) - base; size = dispDeviceRowBytes * dispDeviceRect[3] + offset + dispDeviceRect[0]; vbase = __ioremap(base, size, _PAGE_NO_CACHE); if (vbase == 0) return; logicalDisplayBase = vbase + offset; boot_text_mapped = 1; } int btext_initialize(struct device_node *np) { unsigned int width, height, depth, pitch; unsigned long address = 0; const u32 *prop; prop = of_get_property(np, "linux,bootx-width", NULL); if (prop == NULL) prop = of_get_property(np, "width", NULL); if (prop == NULL) return -EINVAL; width = *prop; prop = of_get_property(np, "linux,bootx-height", NULL); if (prop == NULL) prop = of_get_property(np, "height", NULL); if (prop == NULL) return -EINVAL; height = *prop; prop = of_get_property(np, "linux,bootx-depth", NULL); if (prop == NULL) prop = of_get_property(np, "depth", NULL); if (prop == NULL) return -EINVAL; depth = *prop; pitch = width * ((depth + 7) / 8); prop = of_get_property(np, "linux,bootx-linebytes", NULL); if (prop == NULL) prop = of_get_property(np, "linebytes", NULL); if (prop && *prop != 0xffffffffu) pitch = *prop; if (pitch == 1) pitch = 0x1000; prop = of_get_property(np, "linux,bootx-addr", NULL); if (prop == NULL) prop = of_get_property(np, "address", NULL); if (prop) address = *prop; /* FIXME: Add support for PCI reg properties. Right now, only * reliable on macs */ if (address == 0) return -EINVAL; g_loc_X = 0; g_loc_Y = 0; g_max_loc_X = width / 8; g_max_loc_Y = height / 16; dispDeviceBase = (unsigned char *)address; dispDeviceRowBytes = pitch; dispDeviceDepth = depth == 15 ? 16 : depth; dispDeviceRect[0] = dispDeviceRect[1] = 0; dispDeviceRect[2] = width; dispDeviceRect[3] = height; map_boot_text(); return 0; } int __init btext_find_display(int allow_nonstdout) { const char *name; struct device_node *np = NULL; int rc = -ENODEV; name = of_get_property(of_chosen, "linux,stdout-path", NULL); if (name != NULL) { np = of_find_node_by_path(name); if (np != NULL) { if (strcmp(np->type, "display") != 0) { printk("boot stdout isn't a display !\n"); of_node_put(np); np = NULL; } } } if (np) rc = btext_initialize(np); if (rc == 0 || !allow_nonstdout) return rc; for_each_node_by_type(np, "display") { if (of_get_property(np, "linux,opened", NULL)) { printk("trying %s ...\n", np->full_name); rc = btext_initialize(np); printk("result: %d\n", rc); } if (rc == 0) break; } return rc; } /* Calc the base address of a given point (x,y) */ static unsigned char * calc_base(int x, int y) { unsigned char *base; base = logicalDisplayBase; if (base == 0) base = dispDeviceBase; base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3); base += (y + dispDeviceRect[1]) * dispDeviceRowBytes; return base; } /* Adjust the display to a new resolution */ void btext_update_display(unsigned long phys, int width, int height, int depth, int pitch) { if (dispDeviceBase == 0) return; /* check it's the same frame buffer (within 256MB) */ if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000) return; dispDeviceBase = (__u8 *) phys; dispDeviceRect[0] = 0; dispDeviceRect[1] = 0; dispDeviceRect[2] = width; dispDeviceRect[3] = height; dispDeviceDepth = depth; dispDeviceRowBytes = pitch; if (boot_text_mapped) { iounmap(logicalDisplayBase); boot_text_mapped = 0; } map_boot_text(); g_loc_X = 0; g_loc_Y = 0; g_max_loc_X = width / 8; g_max_loc_Y = height / 16; } EXPORT_SYMBOL(btext_update_display); void btext_clearscreen(void) { unsigned int *base = (unsigned int *)calc_base(0, 0); unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * (dispDeviceDepth >> 3)) >> 2; int i,j; for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++) { unsigned int *ptr = base; for(j=width; j; --j) *(ptr++) = 0; base += (dispDeviceRowBytes >> 2); } } void btext_flushscreen(void) { unsigned int *base = (unsigned int *)calc_base(0, 0); unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * (dispDeviceDepth >> 3)) >> 2; int i,j; for (i=0; i < (dispDeviceRect[3] - dispDeviceRect[1]); i++) { unsigned int *ptr = base; for(j = width; j > 0; j -= 8) { __asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr)); ptr += 8; } base += (dispDeviceRowBytes >> 2); } __asm__ __volatile__ ("sync" ::: "memory"); } void btext_flushline(void) { unsigned int *base = (unsigned int *)calc_base(0, g_loc_Y << 4); unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * (dispDeviceDepth >> 3)) >> 2; int i,j; for (i=0; i < 16; i++) { unsigned int *ptr = base; for(j = width; j > 0; j -= 8) { __asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr)); ptr += 8; } base += (dispDeviceRowBytes >> 2); } __asm__ __volatile__ ("sync" ::: "memory"); } #ifndef NO_SCROLL static void scrollscreen(void) { unsigned int *src = (unsigned int *)calc_base(0,16); unsigned int *dst = (unsigned int *)calc_base(0,0); unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * (dispDeviceDepth >> 3)) >> 2; int i,j; for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++) { unsigned int *src_ptr = src; unsigned int *dst_ptr = dst; for(j=width; j; --j) *(dst_ptr++) = *(src_ptr++); src += (dispDeviceRowBytes >> 2); dst += (dispDeviceRowBytes >> 2); } for (i=0; i<16; i++) { unsigned int *dst_ptr = dst; for(j=width; j; --j) *(dst_ptr++) = 0; dst += (dispDeviceRowBytes >> 2); } } #endif /* ndef NO_SCROLL */ void btext_drawchar(char c) { int cline = 0; #ifdef NO_SCROLL int x; #endif if (!boot_text_mapped) return; switch (c) { case '\b': if (g_loc_X > 0) --g_loc_X; break; case '\t': g_loc_X = (g_loc_X & -8) + 8; break; case '\r': g_loc_X = 0; break; case '\n': g_loc_X = 0; g_loc_Y++; cline = 1; break; default: draw_byte(c, g_loc_X++, g_loc_Y); } if (g_loc_X >= g_max_loc_X) { g_loc_X = 0; g_loc_Y++; cline = 1; } #ifndef NO_SCROLL while (g_loc_Y >= g_max_loc_Y) { scrollscreen(); g_loc_Y--; } #else /* wrap around from bottom to top of screen so we don't waste time scrolling each line. -- paulus. */ if (g_loc_Y >= g_max_loc_Y) g_loc_Y = 0; if (cline) { for (x = 0; x < g_max_loc_X; ++x) draw_byte(' ', x, g_loc_Y); } #endif } void btext_drawstring(const char *c) { if (!boot_text_mapped) return; while (*c) btext_drawchar(*c++); } void btext_drawtext(const char *c, unsigned int len) { if (!boot_text_mapped) return; while (len--) btext_drawchar(*c++); } void btext_drawhex(unsigned long v) { if (!boot_text_mapped) return; #ifdef CONFIG_PPC64 btext_drawchar(hex_asc_hi(v >> 56)); btext_drawchar(hex_asc_lo(v >> 56)); btext_drawchar(hex_asc_hi(v >> 48)); btext_drawchar(hex_asc_lo(v >> 48)); btext_drawchar(hex_asc_hi(v >> 40)); btext_drawchar(hex_asc_lo(v >> 40)); btext_drawchar(hex_asc_hi(v >> 32)); btext_drawchar(hex_asc_lo(v >> 32)); #endif btext_drawchar(hex_asc_hi(v >> 24)); btext_drawchar(hex_asc_lo(v >> 24)); btext_drawchar(hex_asc_hi(v >> 16)); btext_drawchar(hex_asc_lo(v >> 16)); btext_drawchar(hex_asc_hi(v >> 8)); btext_drawchar(hex_asc_lo(v >> 8)); btext_drawchar(hex_asc_hi(v)); btext_drawchar(hex_asc_lo(v)); btext_drawchar(' '); } static void draw_byte(unsigned char c, long locX, long locY) { unsigned char *base = calc_base(locX << 3, locY << 4); unsigned char *font = &vga_font[((unsigned int)c) * 16]; int rb = dispDeviceRowBytes; switch(dispDeviceDepth) { case 24: case 32: draw_byte_32(font, (unsigned int *)base, rb); break; case 15: case 16: draw_byte_16(font, (unsigned int *)base, rb); break; case 8: draw_byte_8(font, (unsigned int *)base, rb); break; } } static unsigned int expand_bits_8[16] = { 0x00000000, 0x000000ff, 0x0000ff00, 0x0000ffff, 0x00ff0000, 0x00ff00ff, 0x00ffff00, 0x00ffffff, 0xff000000, 0xff0000ff, 0xff00ff00, 0xff00ffff, 0xffff0000, 0xffff00ff, 0xffffff00, 0xffffffff }; static unsigned int expand_bits_16[4] = { 0x00000000, 0x0000ffff, 0xffff0000, 0xffffffff }; static void draw_byte_32(unsigned char *font, unsigned int *base, int rb) { int l, bits; int fg = 0xFFFFFFFFUL; int bg = 0x00000000UL; for (l = 0; l < 16; ++l) { bits = *font++; base[0] = (-(bits >> 7) & fg) ^ bg; base[1] = (-((bits >> 6) & 1) & fg) ^ bg; base[2] = (-((bits >> 5) & 1) & fg) ^ bg; base[3] = (-((bits >> 4) & 1) & fg) ^ bg; base[4] = (-((bits >> 3) & 1) & fg) ^ bg; base[5] = (-((bits >> 2) & 1) & fg) ^ bg; base[6] = (-((bits >> 1) & 1) & fg) ^ bg; base[7] = (-(bits & 1) & fg) ^ bg; base = (unsigned int *) ((char *)base + rb); } } static void draw_byte_16(unsigned char *font, unsigned int *base, int rb) { int l, bits; int fg = 0xFFFFFFFFUL; int bg = 0x00000000UL; unsigned int *eb = (int *)expand_bits_16; for (l = 0; l < 16; ++l) { bits = *font++; base[0] = (eb[bits >> 6] & fg) ^ bg; base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; base[3] = (eb[bits & 3] & fg) ^ bg; base = (unsigned int *) ((char *)base + rb); } } static void draw_byte_8(unsigned char *font, unsigned int *base, int rb) { int l, bits; int fg = 0x0F0F0F0FUL; int bg = 0x00000000UL; unsigned int *eb = (int *)expand_bits_8; for (l = 0; l < 16; ++l) { bits = *font++; base[0] = (eb[bits >> 4] & fg) ^ bg; base[1] = (eb[bits & 0xf] & fg) ^ bg; base = (unsigned int *) ((char *)base + rb); } } static unsigned char vga_font[cmapsz] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd, 0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff, 0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe, 0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd, 0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e, 0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30, 0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63, 0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb, 0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c, 0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18, 0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe, 0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0, 0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde, 0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7, 0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c, 0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18, 0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60, 0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60, 0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60, 0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60, 0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30, 0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18, 0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b, 0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c, 0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00, 0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c, 0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06, 0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36, 0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60, 0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; void __init udbg_init_btext(void) { /* If btext is enabled, we might have a BAT setup for early display, * thus we do enable some very basic udbg output */ udbg_putc = btext_drawchar; }
gpl-2.0
RolanDroid/lge_MonsterKernel-lproj
drivers/leds/leds-ot200.c
1217
3113
/* * Bachmann ot200 leds driver. * * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * Christian Gmeiner <christian.gmeiner@gmail.com> * * License: GPL as published by the FSF. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/leds.h> #include <linux/io.h> #include <linux/module.h> struct ot200_led { struct led_classdev cdev; const char *name; unsigned long port; u8 mask; }; /* * The device has three leds on the back panel (led_err, led_init and led_run) * and can handle up to seven leds on the front panel. */ static struct ot200_led leds[] = { { .name = "led_run", .port = 0x5a, .mask = BIT(0), }, { .name = "led_init", .port = 0x5a, .mask = BIT(1), }, { .name = "led_err", .port = 0x5a, .mask = BIT(2), }, { .name = "led_1", .port = 0x49, .mask = BIT(6), }, { .name = "led_2", .port = 0x49, .mask = BIT(5), }, { .name = "led_3", .port = 0x49, .mask = BIT(4), }, { .name = "led_4", .port = 0x49, .mask = BIT(3), }, { .name = "led_5", .port = 0x49, .mask = BIT(2), }, { .name = "led_6", .port = 0x49, .mask = BIT(1), }, { .name = "led_7", .port = 0x49, .mask = BIT(0), } }; static DEFINE_SPINLOCK(value_lock); /* * we need to store the current led states, as it is not * possible to read the current led state via inb(). */ static u8 leds_back; static u8 leds_front; static void ot200_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { struct ot200_led *led = container_of(led_cdev, struct ot200_led, cdev); u8 *val; unsigned long flags; spin_lock_irqsave(&value_lock, flags); if (led->port == 0x49) val = &leds_front; else if (led->port == 0x5a) val = &leds_back; else BUG(); if (value == LED_OFF) *val &= ~led->mask; else *val |= led->mask; outb(*val, led->port); spin_unlock_irqrestore(&value_lock, flags); } static int __devinit ot200_led_probe(struct platform_device *pdev) { int i; int ret; for (i = 0; i < ARRAY_SIZE(leds); i++) { leds[i].cdev.name = leds[i].name; leds[i].cdev.brightness_set = ot200_led_brightness_set; ret = led_classdev_register(&pdev->dev, &leds[i].cdev); if (ret < 0) goto err; } leds_front = 0; /* turn off all front leds */ leds_back = BIT(1); /* turn on init led */ outb(leds_front, 0x49); outb(leds_back, 0x5a); return 0; err: for (i = i - 1; i >= 0; i--) led_classdev_unregister(&leds[i].cdev); return ret; } static int __devexit ot200_led_remove(struct platform_device *pdev) { int i; for (i = 0; i < ARRAY_SIZE(leds); i++) led_classdev_unregister(&leds[i].cdev); return 0; } static struct platform_driver ot200_led_driver = { .probe = ot200_led_probe, .remove = __devexit_p(ot200_led_remove), .driver = { .name = "leds-ot200", .owner = THIS_MODULE, }, }; module_platform_driver(ot200_led_driver); MODULE_AUTHOR("Sebastian A. Siewior <bigeasy@linutronix.de>"); MODULE_DESCRIPTION("ot200 LED driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:leds-ot200");
gpl-2.0
chrisc93/bullhead
drivers/staging/media/solo6x10/solo6x10-disp.c
2241
9889
/* * Copyright (C) 2010-2013 Bluecherry, LLC <http://www.bluecherrydvr.com> * * Original author: * Ben Collins <bcollins@ubuntu.com> * * Additional work by: * John Brooks <john.brooks@bluecherry.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/videodev2.h> #include <media/v4l2-ioctl.h> #include "solo6x10.h" #define SOLO_VCLK_DELAY 3 #define SOLO_PROGRESSIVE_VSIZE 1024 #define SOLO_MOT_THRESH_W 64 #define SOLO_MOT_THRESH_H 64 #define SOLO_MOT_THRESH_SIZE 8192 #define SOLO_MOT_THRESH_REAL (SOLO_MOT_THRESH_W * SOLO_MOT_THRESH_H) #define SOLO_MOT_FLAG_SIZE 1024 #define SOLO_MOT_FLAG_AREA (SOLO_MOT_FLAG_SIZE * 16) static void solo_vin_config(struct solo_dev *solo_dev) { solo_dev->vin_hstart = 8; solo_dev->vin_vstart = 2; solo_reg_write(solo_dev, SOLO_SYS_VCLK, SOLO_VCLK_SELECT(2) | SOLO_VCLK_VIN1415_DELAY(SOLO_VCLK_DELAY) | SOLO_VCLK_VIN1213_DELAY(SOLO_VCLK_DELAY) | SOLO_VCLK_VIN1011_DELAY(SOLO_VCLK_DELAY) | SOLO_VCLK_VIN0809_DELAY(SOLO_VCLK_DELAY) | SOLO_VCLK_VIN0607_DELAY(SOLO_VCLK_DELAY) | SOLO_VCLK_VIN0405_DELAY(SOLO_VCLK_DELAY) | SOLO_VCLK_VIN0203_DELAY(SOLO_VCLK_DELAY) | SOLO_VCLK_VIN0001_DELAY(SOLO_VCLK_DELAY)); solo_reg_write(solo_dev, SOLO_VI_ACT_I_P, SOLO_VI_H_START(solo_dev->vin_hstart) | SOLO_VI_V_START(solo_dev->vin_vstart) | SOLO_VI_V_STOP(solo_dev->vin_vstart + solo_dev->video_vsize)); solo_reg_write(solo_dev, SOLO_VI_ACT_I_S, SOLO_VI_H_START(solo_dev->vout_hstart) | SOLO_VI_V_START(solo_dev->vout_vstart) | SOLO_VI_V_STOP(solo_dev->vout_vstart + solo_dev->video_vsize)); solo_reg_write(solo_dev, SOLO_VI_ACT_P, SOLO_VI_H_START(0) | SOLO_VI_V_START(1) | SOLO_VI_V_STOP(SOLO_PROGRESSIVE_VSIZE)); solo_reg_write(solo_dev, SOLO_VI_CH_FORMAT, SOLO_VI_FD_SEL_MASK(0) | SOLO_VI_PROG_MASK(0)); /* On 6110, initialize mozaic darkness stength */ if (solo_dev->type == SOLO_DEV_6010) solo_reg_write(solo_dev, SOLO_VI_FMT_CFG, 0); else solo_reg_write(solo_dev, SOLO_VI_FMT_CFG, 16 << 22); solo_reg_write(solo_dev, SOLO_VI_PAGE_SW, 2); if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) { solo_reg_write(solo_dev, SOLO_VI_PB_CONFIG, SOLO_VI_PB_USER_MODE); solo_reg_write(solo_dev, SOLO_VI_PB_RANGE_HV, SOLO_VI_PB_HSIZE(858) | SOLO_VI_PB_VSIZE(246)); solo_reg_write(solo_dev, SOLO_VI_PB_ACT_V, SOLO_VI_PB_VSTART(4) | SOLO_VI_PB_VSTOP(4 + 240)); } else { solo_reg_write(solo_dev, SOLO_VI_PB_CONFIG, SOLO_VI_PB_USER_MODE | SOLO_VI_PB_PAL); solo_reg_write(solo_dev, SOLO_VI_PB_RANGE_HV, SOLO_VI_PB_HSIZE(864) | SOLO_VI_PB_VSIZE(294)); solo_reg_write(solo_dev, SOLO_VI_PB_ACT_V, SOLO_VI_PB_VSTART(4) | SOLO_VI_PB_VSTOP(4 + 288)); } solo_reg_write(solo_dev, SOLO_VI_PB_ACT_H, SOLO_VI_PB_HSTART(16) | SOLO_VI_PB_HSTOP(16 + 720)); } static void solo_vout_config_cursor(struct solo_dev *dev) { int i; /* Load (blank) cursor bitmap mask (2bpp) */ for (i = 0; i < 20; i++) solo_reg_write(dev, SOLO_VO_CURSOR_MASK(i), 0); solo_reg_write(dev, SOLO_VO_CURSOR_POS, 0); solo_reg_write(dev, SOLO_VO_CURSOR_CLR, (0x80 << 24) | (0x80 << 16) | (0x10 << 8) | 0x80); solo_reg_write(dev, SOLO_VO_CURSOR_CLR2, (0xe0 << 8) | 0x80); } static void solo_vout_config(struct solo_dev *solo_dev) { solo_dev->vout_hstart = 6; solo_dev->vout_vstart = 8; solo_reg_write(solo_dev, SOLO_VO_FMT_ENC, solo_dev->video_type | SOLO_VO_USER_COLOR_SET_NAV | SOLO_VO_USER_COLOR_SET_NAH | SOLO_VO_NA_COLOR_Y(0) | SOLO_VO_NA_COLOR_CB(0) | SOLO_VO_NA_COLOR_CR(0)); solo_reg_write(solo_dev, SOLO_VO_ACT_H, SOLO_VO_H_START(solo_dev->vout_hstart) | SOLO_VO_H_STOP(solo_dev->vout_hstart + solo_dev->video_hsize)); solo_reg_write(solo_dev, SOLO_VO_ACT_V, SOLO_VO_V_START(solo_dev->vout_vstart) | SOLO_VO_V_STOP(solo_dev->vout_vstart + solo_dev->video_vsize)); solo_reg_write(solo_dev, SOLO_VO_RANGE_HV, SOLO_VO_H_LEN(solo_dev->video_hsize) | SOLO_VO_V_LEN(solo_dev->video_vsize)); /* Border & background colors */ solo_reg_write(solo_dev, SOLO_VO_BORDER_LINE_COLOR, (0xa0 << 24) | (0x88 << 16) | (0xa0 << 8) | 0x88); solo_reg_write(solo_dev, SOLO_VO_BORDER_FILL_COLOR, (0x10 << 24) | (0x8f << 16) | (0x10 << 8) | 0x8f); solo_reg_write(solo_dev, SOLO_VO_BKG_COLOR, (16 << 24) | (128 << 16) | (16 << 8) | 128); solo_reg_write(solo_dev, SOLO_VO_DISP_ERASE, SOLO_VO_DISP_ERASE_ON); solo_reg_write(solo_dev, SOLO_VI_WIN_SW, 0); solo_reg_write(solo_dev, SOLO_VO_ZOOM_CTRL, 0); solo_reg_write(solo_dev, SOLO_VO_FREEZE_CTRL, 0); solo_reg_write(solo_dev, SOLO_VO_DISP_CTRL, SOLO_VO_DISP_ON | SOLO_VO_DISP_ERASE_COUNT(8) | SOLO_VO_DISP_BASE(SOLO_DISP_EXT_ADDR)); solo_vout_config_cursor(solo_dev); /* Enable channels we support */ solo_reg_write(solo_dev, SOLO_VI_CH_ENA, (1 << solo_dev->nr_chans) - 1); } static int solo_dma_vin_region(struct solo_dev *solo_dev, u32 off, u16 val, int reg_size) { u16 buf[64]; int i; int ret = 0; for (i = 0; i < sizeof(buf) >> 1; i++) buf[i] = cpu_to_le16(val); for (i = 0; i < reg_size; i += sizeof(buf)) ret |= solo_p2m_dma(solo_dev, 1, buf, SOLO_MOTION_EXT_ADDR(solo_dev) + off + i, sizeof(buf), 0, 0); return ret; } int solo_set_motion_threshold(struct solo_dev *solo_dev, u8 ch, u16 val) { if (ch > solo_dev->nr_chans) return -EINVAL; return solo_dma_vin_region(solo_dev, SOLO_MOT_FLAG_AREA + (ch * SOLO_MOT_THRESH_SIZE * 2), val, SOLO_MOT_THRESH_SIZE); } int solo_set_motion_block(struct solo_dev *solo_dev, u8 ch, const struct solo_motion_thresholds *thresholds) { u32 off = SOLO_MOT_FLAG_AREA + ch * SOLO_MOT_THRESH_SIZE * 2; u16 buf[64]; int x, y; int ret = 0; memset(buf, 0, sizeof(buf)); for (y = 0; y < SOLO_MOTION_SZ; y++) { for (x = 0; x < SOLO_MOTION_SZ; x++) buf[x] = cpu_to_le16(thresholds->thresholds[y][x]); ret |= solo_p2m_dma(solo_dev, 1, buf, SOLO_MOTION_EXT_ADDR(solo_dev) + off + y * sizeof(buf), sizeof(buf), 0, 0); } return ret; } /* First 8k is motion flag (512 bytes * 16). Following that is an 8k+8k * threshold and working table for each channel. Atleast that's what the * spec says. However, this code (taken from rdk) has some mystery 8k * block right after the flag area, before the first thresh table. */ static void solo_motion_config(struct solo_dev *solo_dev) { int i; for (i = 0; i < solo_dev->nr_chans; i++) { /* Clear motion flag area */ solo_dma_vin_region(solo_dev, i * SOLO_MOT_FLAG_SIZE, 0x0000, SOLO_MOT_FLAG_SIZE); /* Clear working cache table */ solo_dma_vin_region(solo_dev, SOLO_MOT_FLAG_AREA + (i * SOLO_MOT_THRESH_SIZE * 2) + SOLO_MOT_THRESH_SIZE, 0x0000, SOLO_MOT_THRESH_SIZE); /* Set default threshold table */ solo_set_motion_threshold(solo_dev, i, SOLO_DEF_MOT_THRESH); } /* Default motion settings */ solo_reg_write(solo_dev, SOLO_VI_MOT_ADR, SOLO_VI_MOTION_EN(0) | (SOLO_MOTION_EXT_ADDR(solo_dev) >> 16)); solo_reg_write(solo_dev, SOLO_VI_MOT_CTRL, SOLO_VI_MOTION_FRAME_COUNT(3) | SOLO_VI_MOTION_SAMPLE_LENGTH(solo_dev->video_hsize / 16) /* | SOLO_VI_MOTION_INTR_START_STOP */ | SOLO_VI_MOTION_SAMPLE_COUNT(10)); solo_reg_write(solo_dev, SOLO_VI_MOTION_BORDER, 0); solo_reg_write(solo_dev, SOLO_VI_MOTION_BAR, 0); } int solo_disp_init(struct solo_dev *solo_dev) { int i; solo_dev->video_hsize = 704; if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) { solo_dev->video_vsize = 240; solo_dev->fps = 30; } else { solo_dev->video_vsize = 288; solo_dev->fps = 25; } solo_vin_config(solo_dev); solo_motion_config(solo_dev); solo_vout_config(solo_dev); for (i = 0; i < solo_dev->nr_chans; i++) solo_reg_write(solo_dev, SOLO_VI_WIN_ON(i), 1); return 0; } void solo_disp_exit(struct solo_dev *solo_dev) { int i; solo_reg_write(solo_dev, SOLO_VO_DISP_CTRL, 0); solo_reg_write(solo_dev, SOLO_VO_ZOOM_CTRL, 0); solo_reg_write(solo_dev, SOLO_VO_FREEZE_CTRL, 0); for (i = 0; i < solo_dev->nr_chans; i++) { solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL0(i), 0); solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL1(i), 0); solo_reg_write(solo_dev, SOLO_VI_WIN_ON(i), 0); } /* Set default border */ for (i = 0; i < 5; i++) solo_reg_write(solo_dev, SOLO_VO_BORDER_X(i), 0); for (i = 0; i < 5; i++) solo_reg_write(solo_dev, SOLO_VO_BORDER_Y(i), 0); solo_reg_write(solo_dev, SOLO_VO_BORDER_LINE_MASK, 0); solo_reg_write(solo_dev, SOLO_VO_BORDER_FILL_MASK, 0); solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_CTRL(0), 0); solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_START(0), 0); solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_STOP(0), 0); solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_CTRL(1), 0); solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_START(1), 0); solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_STOP(1), 0); }
gpl-2.0
derekcentrico/m6.kernel.3.x
lib/find_last_bit.c
3009
1139
/* find_last_bit.c: fallback find next bit implementation * * Copyright (C) 2008 IBM Corporation * Written by Rusty Russell <rusty@rustcorp.com.au> * (Inspired by David Howell's find_next_bit implementation) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/bitops.h> #include <linux/module.h> #include <asm/types.h> #include <asm/byteorder.h> #ifndef find_last_bit unsigned long find_last_bit(const unsigned long *addr, unsigned long size) { unsigned long words; unsigned long tmp; /* Start at final word. */ words = size / BITS_PER_LONG; /* Partial final word? */ if (size & (BITS_PER_LONG-1)) { tmp = (addr[words] & (~0UL >> (BITS_PER_LONG - (size & (BITS_PER_LONG-1))))); if (tmp) goto found; } while (words) { tmp = addr[--words]; if (tmp) { found: return words * BITS_PER_LONG + __fls(tmp); } } /* Not found */ return size; } EXPORT_SYMBOL(find_last_bit); #endif
gpl-2.0
BobZmotion/android_kernel_sony_msm8974
drivers/block/rbd.c
3265
59885
/* rbd.c -- Export ceph rados objects as a Linux block device based on drivers/block/osdblk.c: Copyright 2009 Red Hat, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. For usage instructions, please refer to: Documentation/ABI/testing/sysfs-bus-rbd */ #include <linux/ceph/libceph.h> #include <linux/ceph/osd_client.h> #include <linux/ceph/mon_client.h> #include <linux/ceph/decode.h> #include <linux/parser.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/blkdev.h> #include "rbd_types.h" /* * The basic unit of block I/O is a sector. It is interpreted in a * number of contexts in Linux (blk, bio, genhd), but the default is * universally 512 bytes. These symbols are just slightly more * meaningful than the bare numbers they represent. */ #define SECTOR_SHIFT 9 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) #define RBD_DRV_NAME "rbd" #define RBD_DRV_NAME_LONG "rbd (rados block device)" #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */ #define RBD_MAX_MD_NAME_LEN (RBD_MAX_OBJ_NAME_LEN + sizeof(RBD_SUFFIX)) #define RBD_MAX_POOL_NAME_LEN 64 #define RBD_MAX_SNAP_NAME_LEN 32 #define RBD_MAX_OPT_LEN 1024 #define RBD_SNAP_HEAD_NAME "-" /* * An RBD device name will be "rbd#", where the "rbd" comes from * RBD_DRV_NAME above, and # is a unique integer identifier. * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big * enough to hold all possible device names. */ #define DEV_NAME_LEN 32 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1) #define RBD_NOTIFY_TIMEOUT_DEFAULT 10 /* * block device image metadata (in-memory version) */ struct rbd_image_header { u64 image_size; char block_name[32]; __u8 obj_order; __u8 crypt_type; __u8 comp_type; struct ceph_snap_context *snapc; size_t snap_names_len; u64 snap_seq; u32 total_snaps; char *snap_names; u64 *snap_sizes; u64 obj_version; }; struct rbd_options { int notify_timeout; }; /* * an instance of the client. multiple devices may share an rbd client. */ struct rbd_client { struct ceph_client *client; struct rbd_options *rbd_opts; struct kref kref; struct list_head node; }; /* * a request completion status */ struct rbd_req_status { int done; int rc; u64 bytes; }; /* * a collection of requests */ struct rbd_req_coll { int total; int num_done; struct kref kref; struct rbd_req_status status[0]; }; /* * a single io request */ struct rbd_request { struct request *rq; /* blk layer request */ struct bio *bio; /* cloned bio */ struct page **pages; /* list of used pages */ u64 len; int coll_index; struct rbd_req_coll *coll; }; struct rbd_snap { struct device dev; const char *name; size_t size; struct list_head node; u64 id; }; /* * a single device */ struct rbd_device { int id; /* blkdev unique id */ int major; /* blkdev assigned major */ struct gendisk *disk; /* blkdev's gendisk and rq */ struct request_queue *q; struct rbd_client *rbd_client; char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ spinlock_t lock; /* queue lock */ struct rbd_image_header header; char obj[RBD_MAX_OBJ_NAME_LEN]; /* rbd image name */ int obj_len; char obj_md_name[RBD_MAX_MD_NAME_LEN]; /* hdr nm. */ char pool_name[RBD_MAX_POOL_NAME_LEN]; int poolid; struct ceph_osd_event *watch_event; struct ceph_osd_request *watch_request; /* protects updating the header */ struct rw_semaphore header_rwsem; char snap_name[RBD_MAX_SNAP_NAME_LEN]; u32 cur_snap; /* index+1 of current snapshot within snap context 0 - for the head */ int read_only; struct list_head node; /* list of snapshots */ struct list_head snaps; /* sysfs related */ struct device dev; }; static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ static LIST_HEAD(rbd_dev_list); /* devices */ static DEFINE_SPINLOCK(rbd_dev_list_lock); static LIST_HEAD(rbd_client_list); /* clients */ static DEFINE_SPINLOCK(rbd_client_list_lock); static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); static void rbd_dev_release(struct device *dev); static ssize_t rbd_snap_add(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev, struct rbd_snap *snap); static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count); static ssize_t rbd_remove(struct bus_type *bus, const char *buf, size_t count); static struct bus_attribute rbd_bus_attrs[] = { __ATTR(add, S_IWUSR, NULL, rbd_add), __ATTR(remove, S_IWUSR, NULL, rbd_remove), __ATTR_NULL }; static struct bus_type rbd_bus_type = { .name = "rbd", .bus_attrs = rbd_bus_attrs, }; static void rbd_root_dev_release(struct device *dev) { } static struct device rbd_root_dev = { .init_name = "rbd", .release = rbd_root_dev_release, }; static struct device *rbd_get_dev(struct rbd_device *rbd_dev) { return get_device(&rbd_dev->dev); } static void rbd_put_dev(struct rbd_device *rbd_dev) { put_device(&rbd_dev->dev); } static int __rbd_update_snaps(struct rbd_device *rbd_dev); static int rbd_open(struct block_device *bdev, fmode_t mode) { struct rbd_device *rbd_dev = bdev->bd_disk->private_data; rbd_get_dev(rbd_dev); set_device_ro(bdev, rbd_dev->read_only); if ((mode & FMODE_WRITE) && rbd_dev->read_only) return -EROFS; return 0; } static int rbd_release(struct gendisk *disk, fmode_t mode) { struct rbd_device *rbd_dev = disk->private_data; rbd_put_dev(rbd_dev); return 0; } static const struct block_device_operations rbd_bd_ops = { .owner = THIS_MODULE, .open = rbd_open, .release = rbd_release, }; /* * Initialize an rbd client instance. * We own *opt. */ static struct rbd_client *rbd_client_create(struct ceph_options *opt, struct rbd_options *rbd_opts) { struct rbd_client *rbdc; int ret = -ENOMEM; dout("rbd_client_create\n"); rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL); if (!rbdc) goto out_opt; kref_init(&rbdc->kref); INIT_LIST_HEAD(&rbdc->node); mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); rbdc->client = ceph_create_client(opt, rbdc, 0, 0); if (IS_ERR(rbdc->client)) goto out_mutex; opt = NULL; /* Now rbdc->client is responsible for opt */ ret = ceph_open_session(rbdc->client); if (ret < 0) goto out_err; rbdc->rbd_opts = rbd_opts; spin_lock(&rbd_client_list_lock); list_add_tail(&rbdc->node, &rbd_client_list); spin_unlock(&rbd_client_list_lock); mutex_unlock(&ctl_mutex); dout("rbd_client_create created %p\n", rbdc); return rbdc; out_err: ceph_destroy_client(rbdc->client); out_mutex: mutex_unlock(&ctl_mutex); kfree(rbdc); out_opt: if (opt) ceph_destroy_options(opt); return ERR_PTR(ret); } /* * Find a ceph client with specific addr and configuration. */ static struct rbd_client *__rbd_client_find(struct ceph_options *opt) { struct rbd_client *client_node; if (opt->flags & CEPH_OPT_NOSHARE) return NULL; list_for_each_entry(client_node, &rbd_client_list, node) if (ceph_compare_options(opt, client_node->client) == 0) return client_node; return NULL; } /* * mount options */ enum { Opt_notify_timeout, Opt_last_int, /* int args above */ Opt_last_string, /* string args above */ }; static match_table_t rbdopt_tokens = { {Opt_notify_timeout, "notify_timeout=%d"}, /* int args above */ /* string args above */ {-1, NULL} }; static int parse_rbd_opts_token(char *c, void *private) { struct rbd_options *rbdopt = private; substring_t argstr[MAX_OPT_ARGS]; int token, intval, ret; token = match_token(c, rbdopt_tokens, argstr); if (token < 0) return -EINVAL; if (token < Opt_last_int) { ret = match_int(&argstr[0], &intval); if (ret < 0) { pr_err("bad mount option arg (not int) " "at '%s'\n", c); return ret; } dout("got int token %d val %d\n", token, intval); } else if (token > Opt_last_int && token < Opt_last_string) { dout("got string token %d val %s\n", token, argstr[0].from); } else { dout("got token %d\n", token); } switch (token) { case Opt_notify_timeout: rbdopt->notify_timeout = intval; break; default: BUG_ON(token); } return 0; } /* * Get a ceph client with specific addr and configuration, if one does * not exist create it. */ static struct rbd_client *rbd_get_client(const char *mon_addr, size_t mon_addr_len, char *options) { struct rbd_client *rbdc; struct ceph_options *opt; struct rbd_options *rbd_opts; rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL); if (!rbd_opts) return ERR_PTR(-ENOMEM); rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT; opt = ceph_parse_options(options, mon_addr, mon_addr + mon_addr_len, parse_rbd_opts_token, rbd_opts); if (IS_ERR(opt)) { kfree(rbd_opts); return ERR_CAST(opt); } spin_lock(&rbd_client_list_lock); rbdc = __rbd_client_find(opt); if (rbdc) { /* using an existing client */ kref_get(&rbdc->kref); spin_unlock(&rbd_client_list_lock); ceph_destroy_options(opt); kfree(rbd_opts); return rbdc; } spin_unlock(&rbd_client_list_lock); rbdc = rbd_client_create(opt, rbd_opts); if (IS_ERR(rbdc)) kfree(rbd_opts); return rbdc; } /* * Destroy ceph client * * Caller must hold rbd_client_list_lock. */ static void rbd_client_release(struct kref *kref) { struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); dout("rbd_release_client %p\n", rbdc); list_del(&rbdc->node); ceph_destroy_client(rbdc->client); kfree(rbdc->rbd_opts); kfree(rbdc); } /* * Drop reference to ceph client node. If it's not referenced anymore, release * it. */ static void rbd_put_client(struct rbd_device *rbd_dev) { spin_lock(&rbd_client_list_lock); kref_put(&rbd_dev->rbd_client->kref, rbd_client_release); spin_unlock(&rbd_client_list_lock); rbd_dev->rbd_client = NULL; } /* * Destroy requests collection */ static void rbd_coll_release(struct kref *kref) { struct rbd_req_coll *coll = container_of(kref, struct rbd_req_coll, kref); dout("rbd_coll_release %p\n", coll); kfree(coll); } /* * Create a new header structure, translate header format from the on-disk * header. */ static int rbd_header_from_disk(struct rbd_image_header *header, struct rbd_image_header_ondisk *ondisk, int allocated_snaps, gfp_t gfp_flags) { int i; u32 snap_count; if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) return -ENXIO; snap_count = le32_to_cpu(ondisk->snap_count); header->snapc = kmalloc(sizeof(struct ceph_snap_context) + snap_count * sizeof (*ondisk), gfp_flags); if (!header->snapc) return -ENOMEM; header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); if (snap_count) { header->snap_names = kmalloc(header->snap_names_len, GFP_KERNEL); if (!header->snap_names) goto err_snapc; header->snap_sizes = kmalloc(snap_count * sizeof(u64), GFP_KERNEL); if (!header->snap_sizes) goto err_names; } else { header->snap_names = NULL; header->snap_sizes = NULL; } memcpy(header->block_name, ondisk->block_name, sizeof(ondisk->block_name)); header->image_size = le64_to_cpu(ondisk->image_size); header->obj_order = ondisk->options.order; header->crypt_type = ondisk->options.crypt_type; header->comp_type = ondisk->options.comp_type; atomic_set(&header->snapc->nref, 1); header->snap_seq = le64_to_cpu(ondisk->snap_seq); header->snapc->num_snaps = snap_count; header->total_snaps = snap_count; if (snap_count && allocated_snaps == snap_count) { for (i = 0; i < snap_count; i++) { header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id); header->snap_sizes[i] = le64_to_cpu(ondisk->snaps[i].image_size); } /* copy snapshot names */ memcpy(header->snap_names, &ondisk->snaps[i], header->snap_names_len); } return 0; err_names: kfree(header->snap_names); err_snapc: kfree(header->snapc); return -ENOMEM; } static int snap_index(struct rbd_image_header *header, int snap_num) { return header->total_snaps - snap_num; } static u64 cur_snap_id(struct rbd_device *rbd_dev) { struct rbd_image_header *header = &rbd_dev->header; if (!rbd_dev->cur_snap) return 0; return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)]; } static int snap_by_name(struct rbd_image_header *header, const char *snap_name, u64 *seq, u64 *size) { int i; char *p = header->snap_names; for (i = 0; i < header->total_snaps; i++) { if (!strcmp(snap_name, p)) { /* Found it. Pass back its id and/or size */ if (seq) *seq = header->snapc->snaps[i]; if (size) *size = header->snap_sizes[i]; return i; } p += strlen(p) + 1; /* Skip ahead to the next name */ } return -ENOENT; } static int rbd_header_set_snap(struct rbd_device *dev, u64 *size) { struct rbd_image_header *header = &dev->header; struct ceph_snap_context *snapc = header->snapc; int ret = -ENOENT; BUILD_BUG_ON(sizeof (dev->snap_name) < sizeof (RBD_SNAP_HEAD_NAME)); down_write(&dev->header_rwsem); if (!memcmp(dev->snap_name, RBD_SNAP_HEAD_NAME, sizeof (RBD_SNAP_HEAD_NAME))) { if (header->total_snaps) snapc->seq = header->snap_seq; else snapc->seq = 0; dev->cur_snap = 0; dev->read_only = 0; if (size) *size = header->image_size; } else { ret = snap_by_name(header, dev->snap_name, &snapc->seq, size); if (ret < 0) goto done; dev->cur_snap = header->total_snaps - ret; dev->read_only = 1; } ret = 0; done: up_write(&dev->header_rwsem); return ret; } static void rbd_header_free(struct rbd_image_header *header) { kfree(header->snapc); kfree(header->snap_names); kfree(header->snap_sizes); } /* * get the actual striped segment name, offset and length */ static u64 rbd_get_segment(struct rbd_image_header *header, const char *block_name, u64 ofs, u64 len, char *seg_name, u64 *segofs) { u64 seg = ofs >> header->obj_order; if (seg_name) snprintf(seg_name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx", block_name, seg); ofs = ofs & ((1 << header->obj_order) - 1); len = min_t(u64, len, (1 << header->obj_order) - ofs); if (segofs) *segofs = ofs; return len; } static int rbd_get_num_segments(struct rbd_image_header *header, u64 ofs, u64 len) { u64 start_seg = ofs >> header->obj_order; u64 end_seg = (ofs + len - 1) >> header->obj_order; return end_seg - start_seg + 1; } /* * returns the size of an object in the image */ static u64 rbd_obj_bytes(struct rbd_image_header *header) { return 1 << header->obj_order; } /* * bio helpers */ static void bio_chain_put(struct bio *chain) { struct bio *tmp; while (chain) { tmp = chain; chain = chain->bi_next; bio_put(tmp); } } /* * zeros a bio chain, starting at specific offset */ static void zero_bio_chain(struct bio *chain, int start_ofs) { struct bio_vec *bv; unsigned long flags; void *buf; int i; int pos = 0; while (chain) { bio_for_each_segment(bv, chain, i) { if (pos + bv->bv_len > start_ofs) { int remainder = max(start_ofs - pos, 0); buf = bvec_kmap_irq(bv, &flags); memset(buf + remainder, 0, bv->bv_len - remainder); bvec_kunmap_irq(buf, &flags); } pos += bv->bv_len; } chain = chain->bi_next; } } /* * bio_chain_clone - clone a chain of bios up to a certain length. * might return a bio_pair that will need to be released. */ static struct bio *bio_chain_clone(struct bio **old, struct bio **next, struct bio_pair **bp, int len, gfp_t gfpmask) { struct bio *tmp, *old_chain = *old, *new_chain = NULL, *tail = NULL; int total = 0; if (*bp) { bio_pair_release(*bp); *bp = NULL; } while (old_chain && (total < len)) { tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs); if (!tmp) goto err_out; if (total + old_chain->bi_size > len) { struct bio_pair *bp; /* * this split can only happen with a single paged bio, * split_bio will BUG_ON if this is not the case */ dout("bio_chain_clone split! total=%d remaining=%d" "bi_size=%d\n", (int)total, (int)len-total, (int)old_chain->bi_size); /* split the bio. We'll release it either in the next call, or it will have to be released outside */ bp = bio_split(old_chain, (len - total) / SECTOR_SIZE); if (!bp) goto err_out; __bio_clone(tmp, &bp->bio1); *next = &bp->bio2; } else { __bio_clone(tmp, old_chain); *next = old_chain->bi_next; } tmp->bi_bdev = NULL; gfpmask &= ~__GFP_WAIT; tmp->bi_next = NULL; if (!new_chain) { new_chain = tail = tmp; } else { tail->bi_next = tmp; tail = tmp; } old_chain = old_chain->bi_next; total += tmp->bi_size; } BUG_ON(total < len); if (tail) tail->bi_next = NULL; *old = old_chain; return new_chain; err_out: dout("bio_chain_clone with err\n"); bio_chain_put(new_chain); return NULL; } /* * helpers for osd request op vectors. */ static int rbd_create_rw_ops(struct ceph_osd_req_op **ops, int num_ops, int opcode, u32 payload_len) { *ops = kzalloc(sizeof(struct ceph_osd_req_op) * (num_ops + 1), GFP_NOIO); if (!*ops) return -ENOMEM; (*ops)[0].op = opcode; /* * op extent offset and length will be set later on * in calc_raw_layout() */ (*ops)[0].payload_len = payload_len; return 0; } static void rbd_destroy_ops(struct ceph_osd_req_op *ops) { kfree(ops); } static void rbd_coll_end_req_index(struct request *rq, struct rbd_req_coll *coll, int index, int ret, u64 len) { struct request_queue *q; int min, max, i; dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n", coll, index, ret, len); if (!rq) return; if (!coll) { blk_end_request(rq, ret, len); return; } q = rq->q; spin_lock_irq(q->queue_lock); coll->status[index].done = 1; coll->status[index].rc = ret; coll->status[index].bytes = len; max = min = coll->num_done; while (max < coll->total && coll->status[max].done) max++; for (i = min; i<max; i++) { __blk_end_request(rq, coll->status[i].rc, coll->status[i].bytes); coll->num_done++; kref_put(&coll->kref, rbd_coll_release); } spin_unlock_irq(q->queue_lock); } static void rbd_coll_end_req(struct rbd_request *req, int ret, u64 len) { rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len); } /* * Send ceph osd request */ static int rbd_do_request(struct request *rq, struct rbd_device *dev, struct ceph_snap_context *snapc, u64 snapid, const char *obj, u64 ofs, u64 len, struct bio *bio, struct page **pages, int num_pages, int flags, struct ceph_osd_req_op *ops, int num_reply, struct rbd_req_coll *coll, int coll_index, void (*rbd_cb)(struct ceph_osd_request *req, struct ceph_msg *msg), struct ceph_osd_request **linger_req, u64 *ver) { struct ceph_osd_request *req; struct ceph_file_layout *layout; int ret; u64 bno; struct timespec mtime = CURRENT_TIME; struct rbd_request *req_data; struct ceph_osd_request_head *reqhead; struct ceph_osd_client *osdc; req_data = kzalloc(sizeof(*req_data), GFP_NOIO); if (!req_data) { if (coll) rbd_coll_end_req_index(rq, coll, coll_index, -ENOMEM, len); return -ENOMEM; } if (coll) { req_data->coll = coll; req_data->coll_index = coll_index; } dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs); down_read(&dev->header_rwsem); osdc = &dev->rbd_client->client->osdc; req = ceph_osdc_alloc_request(osdc, flags, snapc, ops, false, GFP_NOIO, pages, bio); if (!req) { up_read(&dev->header_rwsem); ret = -ENOMEM; goto done_pages; } req->r_callback = rbd_cb; req_data->rq = rq; req_data->bio = bio; req_data->pages = pages; req_data->len = len; req->r_priv = req_data; reqhead = req->r_request->front.iov_base; reqhead->snapid = cpu_to_le64(CEPH_NOSNAP); strncpy(req->r_oid, obj, sizeof(req->r_oid)); req->r_oid_len = strlen(req->r_oid); layout = &req->r_file_layout; memset(layout, 0, sizeof(*layout)); layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); layout->fl_stripe_count = cpu_to_le32(1); layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); layout->fl_pg_preferred = cpu_to_le32(-1); layout->fl_pg_pool = cpu_to_le32(dev->poolid); ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno, req, ops); ceph_osdc_build_request(req, ofs, &len, ops, snapc, &mtime, req->r_oid, req->r_oid_len); up_read(&dev->header_rwsem); if (linger_req) { ceph_osdc_set_request_linger(osdc, req); *linger_req = req; } ret = ceph_osdc_start_request(osdc, req, false); if (ret < 0) goto done_err; if (!rbd_cb) { ret = ceph_osdc_wait_request(osdc, req); if (ver) *ver = le64_to_cpu(req->r_reassert_version.version); dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version)); ceph_osdc_put_request(req); } return ret; done_err: bio_chain_put(req_data->bio); ceph_osdc_put_request(req); done_pages: rbd_coll_end_req(req_data, ret, len); kfree(req_data); return ret; } /* * Ceph osd op callback */ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg) { struct rbd_request *req_data = req->r_priv; struct ceph_osd_reply_head *replyhead; struct ceph_osd_op *op; __s32 rc; u64 bytes; int read_op; /* parse reply */ replyhead = msg->front.iov_base; WARN_ON(le32_to_cpu(replyhead->num_ops) == 0); op = (void *)(replyhead + 1); rc = le32_to_cpu(replyhead->result); bytes = le64_to_cpu(op->extent.length); read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ); dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc); if (rc == -ENOENT && read_op) { zero_bio_chain(req_data->bio, 0); rc = 0; } else if (rc == 0 && read_op && bytes < req_data->len) { zero_bio_chain(req_data->bio, bytes); bytes = req_data->len; } rbd_coll_end_req(req_data, rc, bytes); if (req_data->bio) bio_chain_put(req_data->bio); ceph_osdc_put_request(req); kfree(req_data); } static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg) { ceph_osdc_put_request(req); } /* * Do a synchronous ceph osd operation */ static int rbd_req_sync_op(struct rbd_device *dev, struct ceph_snap_context *snapc, u64 snapid, int opcode, int flags, struct ceph_osd_req_op *orig_ops, int num_reply, const char *obj, u64 ofs, u64 len, char *buf, struct ceph_osd_request **linger_req, u64 *ver) { int ret; struct page **pages; int num_pages; struct ceph_osd_req_op *ops = orig_ops; u32 payload_len; num_pages = calc_pages_for(ofs , len); pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); if (IS_ERR(pages)) return PTR_ERR(pages); if (!orig_ops) { payload_len = (flags & CEPH_OSD_FLAG_WRITE ? len : 0); ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len); if (ret < 0) goto done; if ((flags & CEPH_OSD_FLAG_WRITE) && buf) { ret = ceph_copy_to_page_vector(pages, buf, ofs, len); if (ret < 0) goto done_ops; } } ret = rbd_do_request(NULL, dev, snapc, snapid, obj, ofs, len, NULL, pages, num_pages, flags, ops, 2, NULL, 0, NULL, linger_req, ver); if (ret < 0) goto done_ops; if ((flags & CEPH_OSD_FLAG_READ) && buf) ret = ceph_copy_from_page_vector(pages, buf, ofs, ret); done_ops: if (!orig_ops) rbd_destroy_ops(ops); done: ceph_release_page_vector(pages, num_pages); return ret; } /* * Do an asynchronous ceph osd operation */ static int rbd_do_op(struct request *rq, struct rbd_device *rbd_dev , struct ceph_snap_context *snapc, u64 snapid, int opcode, int flags, int num_reply, u64 ofs, u64 len, struct bio *bio, struct rbd_req_coll *coll, int coll_index) { char *seg_name; u64 seg_ofs; u64 seg_len; int ret; struct ceph_osd_req_op *ops; u32 payload_len; seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); if (!seg_name) return -ENOMEM; seg_len = rbd_get_segment(&rbd_dev->header, rbd_dev->header.block_name, ofs, len, seg_name, &seg_ofs); payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0); ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len); if (ret < 0) goto done; /* we've taken care of segment sizes earlier when we cloned the bios. We should never have a segment truncated at this point */ BUG_ON(seg_len < len); ret = rbd_do_request(rq, rbd_dev, snapc, snapid, seg_name, seg_ofs, seg_len, bio, NULL, 0, flags, ops, num_reply, coll, coll_index, rbd_req_cb, 0, NULL); rbd_destroy_ops(ops); done: kfree(seg_name); return ret; } /* * Request async osd write */ static int rbd_req_write(struct request *rq, struct rbd_device *rbd_dev, struct ceph_snap_context *snapc, u64 ofs, u64 len, struct bio *bio, struct rbd_req_coll *coll, int coll_index) { return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 2, ofs, len, bio, coll, coll_index); } /* * Request async osd read */ static int rbd_req_read(struct request *rq, struct rbd_device *rbd_dev, u64 snapid, u64 ofs, u64 len, struct bio *bio, struct rbd_req_coll *coll, int coll_index) { return rbd_do_op(rq, rbd_dev, NULL, (snapid ? snapid : CEPH_NOSNAP), CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 2, ofs, len, bio, coll, coll_index); } /* * Request sync osd read */ static int rbd_req_sync_read(struct rbd_device *dev, struct ceph_snap_context *snapc, u64 snapid, const char *obj, u64 ofs, u64 len, char *buf, u64 *ver) { return rbd_req_sync_op(dev, NULL, (snapid ? snapid : CEPH_NOSNAP), CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL, 1, obj, ofs, len, buf, NULL, ver); } /* * Request sync osd watch */ static int rbd_req_sync_notify_ack(struct rbd_device *dev, u64 ver, u64 notify_id, const char *obj) { struct ceph_osd_req_op *ops; struct page **pages = NULL; int ret; ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); if (ret < 0) return ret; ops[0].watch.ver = cpu_to_le64(dev->header.obj_version); ops[0].watch.cookie = notify_id; ops[0].watch.flag = 0; ret = rbd_do_request(NULL, dev, NULL, CEPH_NOSNAP, obj, 0, 0, NULL, pages, 0, CEPH_OSD_FLAG_READ, ops, 1, NULL, 0, rbd_simple_req_cb, 0, NULL); rbd_destroy_ops(ops); return ret; } static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) { struct rbd_device *dev = (struct rbd_device *)data; int rc; if (!dev) return; dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name, notify_id, (int)opcode); mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); rc = __rbd_update_snaps(dev); mutex_unlock(&ctl_mutex); if (rc) pr_warning(RBD_DRV_NAME "%d got notification but failed to " " update snaps: %d\n", dev->major, rc); rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name); } /* * Request sync osd watch */ static int rbd_req_sync_watch(struct rbd_device *dev, const char *obj, u64 ver) { struct ceph_osd_req_op *ops; struct ceph_osd_client *osdc = &dev->rbd_client->client->osdc; int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0); if (ret < 0) return ret; ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0, (void *)dev, &dev->watch_event); if (ret < 0) goto fail; ops[0].watch.ver = cpu_to_le64(ver); ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie); ops[0].watch.flag = 1; ret = rbd_req_sync_op(dev, NULL, CEPH_NOSNAP, 0, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, ops, 1, obj, 0, 0, NULL, &dev->watch_request, NULL); if (ret < 0) goto fail_event; rbd_destroy_ops(ops); return 0; fail_event: ceph_osdc_cancel_event(dev->watch_event); dev->watch_event = NULL; fail: rbd_destroy_ops(ops); return ret; } /* * Request sync osd unwatch */ static int rbd_req_sync_unwatch(struct rbd_device *dev, const char *obj) { struct ceph_osd_req_op *ops; int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0); if (ret < 0) return ret; ops[0].watch.ver = 0; ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie); ops[0].watch.flag = 0; ret = rbd_req_sync_op(dev, NULL, CEPH_NOSNAP, 0, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, ops, 1, obj, 0, 0, NULL, NULL, NULL); rbd_destroy_ops(ops); ceph_osdc_cancel_event(dev->watch_event); dev->watch_event = NULL; return ret; } struct rbd_notify_info { struct rbd_device *dev; }; static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data) { struct rbd_device *dev = (struct rbd_device *)data; if (!dev) return; dout("rbd_notify_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name, notify_id, (int)opcode); } /* * Request sync osd notify */ static int rbd_req_sync_notify(struct rbd_device *dev, const char *obj) { struct ceph_osd_req_op *ops; struct ceph_osd_client *osdc = &dev->rbd_client->client->osdc; struct ceph_osd_event *event; struct rbd_notify_info info; int payload_len = sizeof(u32) + sizeof(u32); int ret; ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY, payload_len); if (ret < 0) return ret; info.dev = dev; ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1, (void *)&info, &event); if (ret < 0) goto fail; ops[0].watch.ver = 1; ops[0].watch.flag = 1; ops[0].watch.cookie = event->cookie; ops[0].watch.prot_ver = RADOS_NOTIFY_VER; ops[0].watch.timeout = 12; ret = rbd_req_sync_op(dev, NULL, CEPH_NOSNAP, 0, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, ops, 1, obj, 0, 0, NULL, NULL, NULL); if (ret < 0) goto fail_event; ret = ceph_osdc_wait_event(event, CEPH_OSD_TIMEOUT_DEFAULT); dout("ceph_osdc_wait_event returned %d\n", ret); rbd_destroy_ops(ops); return 0; fail_event: ceph_osdc_cancel_event(event); fail: rbd_destroy_ops(ops); return ret; } /* * Request sync osd read */ static int rbd_req_sync_exec(struct rbd_device *dev, const char *obj, const char *cls, const char *method, const char *data, int len, u64 *ver) { struct ceph_osd_req_op *ops; int cls_len = strlen(cls); int method_len = strlen(method); int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_CALL, cls_len + method_len + len); if (ret < 0) return ret; ops[0].cls.class_name = cls; ops[0].cls.class_len = (__u8)cls_len; ops[0].cls.method_name = method; ops[0].cls.method_len = (__u8)method_len; ops[0].cls.argc = 0; ops[0].cls.indata = data; ops[0].cls.indata_len = len; ret = rbd_req_sync_op(dev, NULL, CEPH_NOSNAP, 0, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, ops, 1, obj, 0, 0, NULL, NULL, ver); rbd_destroy_ops(ops); dout("cls_exec returned %d\n", ret); return ret; } static struct rbd_req_coll *rbd_alloc_coll(int num_reqs) { struct rbd_req_coll *coll = kzalloc(sizeof(struct rbd_req_coll) + sizeof(struct rbd_req_status) * num_reqs, GFP_ATOMIC); if (!coll) return NULL; coll->total = num_reqs; kref_init(&coll->kref); return coll; } /* * block device queue callback */ static void rbd_rq_fn(struct request_queue *q) { struct rbd_device *rbd_dev = q->queuedata; struct request *rq; struct bio_pair *bp = NULL; while ((rq = blk_fetch_request(q))) { struct bio *bio; struct bio *rq_bio, *next_bio = NULL; bool do_write; int size, op_size = 0; u64 ofs; int num_segs, cur_seg = 0; struct rbd_req_coll *coll; /* peek at request from block layer */ if (!rq) break; dout("fetched request\n"); /* filter out block requests we don't understand */ if ((rq->cmd_type != REQ_TYPE_FS)) { __blk_end_request_all(rq, 0); continue; } /* deduce our operation (read, write) */ do_write = (rq_data_dir(rq) == WRITE); size = blk_rq_bytes(rq); ofs = blk_rq_pos(rq) * SECTOR_SIZE; rq_bio = rq->bio; if (do_write && rbd_dev->read_only) { __blk_end_request_all(rq, -EROFS); continue; } spin_unlock_irq(q->queue_lock); dout("%s 0x%x bytes at 0x%llx\n", do_write ? "write" : "read", size, blk_rq_pos(rq) * SECTOR_SIZE); num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); coll = rbd_alloc_coll(num_segs); if (!coll) { spin_lock_irq(q->queue_lock); __blk_end_request_all(rq, -ENOMEM); continue; } do { /* a bio clone to be passed down to OSD req */ dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); op_size = rbd_get_segment(&rbd_dev->header, rbd_dev->header.block_name, ofs, size, NULL, NULL); kref_get(&coll->kref); bio = bio_chain_clone(&rq_bio, &next_bio, &bp, op_size, GFP_ATOMIC); if (!bio) { rbd_coll_end_req_index(rq, coll, cur_seg, -ENOMEM, op_size); goto next_seg; } /* init OSD command: write or read */ if (do_write) rbd_req_write(rq, rbd_dev, rbd_dev->header.snapc, ofs, op_size, bio, coll, cur_seg); else rbd_req_read(rq, rbd_dev, cur_snap_id(rbd_dev), ofs, op_size, bio, coll, cur_seg); next_seg: size -= op_size; ofs += op_size; cur_seg++; rq_bio = next_bio; } while (size > 0); kref_put(&coll->kref, rbd_coll_release); if (bp) bio_pair_release(bp); spin_lock_irq(q->queue_lock); } } /* * a queue callback. Makes sure that we don't create a bio that spans across * multiple osd objects. One exception would be with a single page bios, * which we handle later at bio_chain_clone */ static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, struct bio_vec *bvec) { struct rbd_device *rbd_dev = q->queuedata; unsigned int chunk_sectors; sector_t sector; unsigned int bio_sectors; int max; chunk_sectors = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT); sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev); bio_sectors = bmd->bi_size >> SECTOR_SHIFT; max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << SECTOR_SHIFT; if (max < 0) max = 0; /* bio_add cannot handle a negative return */ if (max <= bvec->bv_len && bio_sectors == 0) return bvec->bv_len; return max; } static void rbd_free_disk(struct rbd_device *rbd_dev) { struct gendisk *disk = rbd_dev->disk; if (!disk) return; rbd_header_free(&rbd_dev->header); if (disk->flags & GENHD_FL_UP) del_gendisk(disk); if (disk->queue) blk_cleanup_queue(disk->queue); put_disk(disk); } /* * reload the ondisk the header */ static int rbd_read_header(struct rbd_device *rbd_dev, struct rbd_image_header *header) { ssize_t rc; struct rbd_image_header_ondisk *dh; int snap_count = 0; u64 ver; size_t len; /* * First reads the fixed-size header to determine the number * of snapshots, then re-reads it, along with all snapshot * records as well as their stored names. */ len = sizeof (*dh); while (1) { dh = kmalloc(len, GFP_KERNEL); if (!dh) return -ENOMEM; rc = rbd_req_sync_read(rbd_dev, NULL, CEPH_NOSNAP, rbd_dev->obj_md_name, 0, len, (char *)dh, &ver); if (rc < 0) goto out_dh; rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL); if (rc < 0) { if (rc == -ENXIO) pr_warning("unrecognized header format" " for image %s", rbd_dev->obj); goto out_dh; } if (snap_count == header->total_snaps) break; snap_count = header->total_snaps; len = sizeof (*dh) + snap_count * sizeof(struct rbd_image_snap_ondisk) + header->snap_names_len; rbd_header_free(header); kfree(dh); } header->obj_version = ver; out_dh: kfree(dh); return rc; } /* * create a snapshot */ static int rbd_header_add_snap(struct rbd_device *dev, const char *snap_name, gfp_t gfp_flags) { int name_len = strlen(snap_name); u64 new_snapid; int ret; void *data, *p, *e; u64 ver; struct ceph_mon_client *monc; /* we should create a snapshot only if we're pointing at the head */ if (dev->cur_snap) return -EINVAL; monc = &dev->rbd_client->client->monc; ret = ceph_monc_create_snapid(monc, dev->poolid, &new_snapid); dout("created snapid=%lld\n", new_snapid); if (ret < 0) return ret; data = kmalloc(name_len + 16, gfp_flags); if (!data) return -ENOMEM; p = data; e = data + name_len + 16; ceph_encode_string_safe(&p, e, snap_name, name_len, bad); ceph_encode_64_safe(&p, e, new_snapid, bad); ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add", data, p - data, &ver); kfree(data); if (ret < 0) return ret; dev->header.snapc->seq = new_snapid; return 0; bad: return -ERANGE; } static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev) { struct rbd_snap *snap; while (!list_empty(&rbd_dev->snaps)) { snap = list_first_entry(&rbd_dev->snaps, struct rbd_snap, node); __rbd_remove_snap_dev(rbd_dev, snap); } } /* * only read the first part of the ondisk header, without the snaps info */ static int __rbd_update_snaps(struct rbd_device *rbd_dev) { int ret; struct rbd_image_header h; u64 snap_seq; int follow_seq = 0; ret = rbd_read_header(rbd_dev, &h); if (ret < 0) return ret; /* resized? */ set_capacity(rbd_dev->disk, h.image_size / SECTOR_SIZE); down_write(&rbd_dev->header_rwsem); snap_seq = rbd_dev->header.snapc->seq; if (rbd_dev->header.total_snaps && rbd_dev->header.snapc->snaps[0] == snap_seq) /* pointing at the head, will need to follow that if head moves */ follow_seq = 1; kfree(rbd_dev->header.snapc); kfree(rbd_dev->header.snap_names); kfree(rbd_dev->header.snap_sizes); rbd_dev->header.total_snaps = h.total_snaps; rbd_dev->header.snapc = h.snapc; rbd_dev->header.snap_names = h.snap_names; rbd_dev->header.snap_names_len = h.snap_names_len; rbd_dev->header.snap_sizes = h.snap_sizes; if (follow_seq) rbd_dev->header.snapc->seq = rbd_dev->header.snapc->snaps[0]; else rbd_dev->header.snapc->seq = snap_seq; ret = __rbd_init_snaps_header(rbd_dev); up_write(&rbd_dev->header_rwsem); return ret; } static int rbd_init_disk(struct rbd_device *rbd_dev) { struct gendisk *disk; struct request_queue *q; int rc; u64 segment_size; u64 total_size = 0; /* contact OSD, request size info about the object being mapped */ rc = rbd_read_header(rbd_dev, &rbd_dev->header); if (rc) return rc; /* no need to lock here, as rbd_dev is not registered yet */ rc = __rbd_init_snaps_header(rbd_dev); if (rc) return rc; rc = rbd_header_set_snap(rbd_dev, &total_size); if (rc) return rc; /* create gendisk info */ rc = -ENOMEM; disk = alloc_disk(RBD_MINORS_PER_MAJOR); if (!disk) goto out; snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d", rbd_dev->id); disk->major = rbd_dev->major; disk->first_minor = 0; disk->fops = &rbd_bd_ops; disk->private_data = rbd_dev; /* init rq */ rc = -ENOMEM; q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock); if (!q) goto out_disk; /* We use the default size, but let's be explicit about it. */ blk_queue_physical_block_size(q, SECTOR_SIZE); /* set io sizes to object size */ segment_size = rbd_obj_bytes(&rbd_dev->header); blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); blk_queue_max_segment_size(q, segment_size); blk_queue_io_min(q, segment_size); blk_queue_io_opt(q, segment_size); blk_queue_merge_bvec(q, rbd_merge_bvec); disk->queue = q; q->queuedata = rbd_dev; rbd_dev->disk = disk; rbd_dev->q = q; /* finally, announce the disk to the world */ set_capacity(disk, total_size / SECTOR_SIZE); add_disk(disk); pr_info("%s: added with size 0x%llx\n", disk->disk_name, (unsigned long long)total_size); return 0; out_disk: put_disk(disk); out: return rc; } /* sysfs */ static struct rbd_device *dev_to_rbd_dev(struct device *dev) { return container_of(dev, struct rbd_device, dev); } static ssize_t rbd_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%llu\n", (unsigned long long)rbd_dev->header.image_size); } static ssize_t rbd_major_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%d\n", rbd_dev->major); } static ssize_t rbd_client_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "client%lld\n", ceph_client_id(rbd_dev->rbd_client->client)); } static ssize_t rbd_pool_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%s\n", rbd_dev->pool_name); } static ssize_t rbd_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%s\n", rbd_dev->obj); } static ssize_t rbd_snap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%s\n", rbd_dev->snap_name); } static ssize_t rbd_image_refresh(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); int rc; int ret = size; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); rc = __rbd_update_snaps(rbd_dev); if (rc < 0) ret = rc; mutex_unlock(&ctl_mutex); return ret; } static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL); static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL); static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL); static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL); static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add); static struct attribute *rbd_attrs[] = { &dev_attr_size.attr, &dev_attr_major.attr, &dev_attr_client_id.attr, &dev_attr_pool.attr, &dev_attr_name.attr, &dev_attr_current_snap.attr, &dev_attr_refresh.attr, &dev_attr_create_snap.attr, NULL }; static struct attribute_group rbd_attr_group = { .attrs = rbd_attrs, }; static const struct attribute_group *rbd_attr_groups[] = { &rbd_attr_group, NULL }; static void rbd_sysfs_dev_release(struct device *dev) { } static struct device_type rbd_device_type = { .name = "rbd", .groups = rbd_attr_groups, .release = rbd_sysfs_dev_release, }; /* sysfs - snapshots */ static ssize_t rbd_snap_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); return sprintf(buf, "%zd\n", snap->size); } static ssize_t rbd_snap_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); return sprintf(buf, "%llu\n", (unsigned long long) snap->id); } static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL); static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL); static struct attribute *rbd_snap_attrs[] = { &dev_attr_snap_size.attr, &dev_attr_snap_id.attr, NULL, }; static struct attribute_group rbd_snap_attr_group = { .attrs = rbd_snap_attrs, }; static void rbd_snap_dev_release(struct device *dev) { struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); kfree(snap->name); kfree(snap); } static const struct attribute_group *rbd_snap_attr_groups[] = { &rbd_snap_attr_group, NULL }; static struct device_type rbd_snap_device_type = { .groups = rbd_snap_attr_groups, .release = rbd_snap_dev_release, }; static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev, struct rbd_snap *snap) { list_del(&snap->node); device_unregister(&snap->dev); } static int rbd_register_snap_dev(struct rbd_device *rbd_dev, struct rbd_snap *snap, struct device *parent) { struct device *dev = &snap->dev; int ret; dev->type = &rbd_snap_device_type; dev->parent = parent; dev->release = rbd_snap_dev_release; dev_set_name(dev, "snap_%s", snap->name); ret = device_register(dev); return ret; } static int __rbd_add_snap_dev(struct rbd_device *rbd_dev, int i, const char *name, struct rbd_snap **snapp) { int ret; struct rbd_snap *snap = kzalloc(sizeof(*snap), GFP_KERNEL); if (!snap) return -ENOMEM; snap->name = kstrdup(name, GFP_KERNEL); snap->size = rbd_dev->header.snap_sizes[i]; snap->id = rbd_dev->header.snapc->snaps[i]; if (device_is_registered(&rbd_dev->dev)) { ret = rbd_register_snap_dev(rbd_dev, snap, &rbd_dev->dev); if (ret < 0) goto err; } *snapp = snap; return 0; err: kfree(snap->name); kfree(snap); return ret; } /* * search for the previous snap in a null delimited string list */ const char *rbd_prev_snap_name(const char *name, const char *start) { if (name < start + 2) return NULL; name -= 2; while (*name) { if (name == start) return start; name--; } return name + 1; } /* * compare the old list of snapshots that we have to what's in the header * and update it accordingly. Note that the header holds the snapshots * in a reverse order (from newest to oldest) and we need to go from * older to new so that we don't get a duplicate snap name when * doing the process (e.g., removed snapshot and recreated a new * one with the same name. */ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev) { const char *name, *first_name; int i = rbd_dev->header.total_snaps; struct rbd_snap *snap, *old_snap = NULL; int ret; struct list_head *p, *n; first_name = rbd_dev->header.snap_names; name = first_name + rbd_dev->header.snap_names_len; list_for_each_prev_safe(p, n, &rbd_dev->snaps) { u64 cur_id; old_snap = list_entry(p, struct rbd_snap, node); if (i) cur_id = rbd_dev->header.snapc->snaps[i - 1]; if (!i || old_snap->id < cur_id) { /* old_snap->id was skipped, thus was removed */ __rbd_remove_snap_dev(rbd_dev, old_snap); continue; } if (old_snap->id == cur_id) { /* we have this snapshot already */ i--; name = rbd_prev_snap_name(name, first_name); continue; } for (; i > 0; i--, name = rbd_prev_snap_name(name, first_name)) { if (!name) { WARN_ON(1); return -EINVAL; } cur_id = rbd_dev->header.snapc->snaps[i]; /* snapshot removal? handle it above */ if (cur_id >= old_snap->id) break; /* a new snapshot */ ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap); if (ret < 0) return ret; /* note that we add it backward so using n and not p */ list_add(&snap->node, n); p = &snap->node; } } /* we're done going over the old snap list, just add what's left */ for (; i > 0; i--) { name = rbd_prev_snap_name(name, first_name); if (!name) { WARN_ON(1); return -EINVAL; } ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap); if (ret < 0) return ret; list_add(&snap->node, &rbd_dev->snaps); } return 0; } static int rbd_bus_add_dev(struct rbd_device *rbd_dev) { int ret; struct device *dev; struct rbd_snap *snap; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); dev = &rbd_dev->dev; dev->bus = &rbd_bus_type; dev->type = &rbd_device_type; dev->parent = &rbd_root_dev; dev->release = rbd_dev_release; dev_set_name(dev, "%d", rbd_dev->id); ret = device_register(dev); if (ret < 0) goto out; list_for_each_entry(snap, &rbd_dev->snaps, node) { ret = rbd_register_snap_dev(rbd_dev, snap, &rbd_dev->dev); if (ret < 0) break; } out: mutex_unlock(&ctl_mutex); return ret; } static void rbd_bus_del_dev(struct rbd_device *rbd_dev) { device_unregister(&rbd_dev->dev); } static int rbd_init_watch_dev(struct rbd_device *rbd_dev) { int ret, rc; do { ret = rbd_req_sync_watch(rbd_dev, rbd_dev->obj_md_name, rbd_dev->header.obj_version); if (ret == -ERANGE) { mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); rc = __rbd_update_snaps(rbd_dev); mutex_unlock(&ctl_mutex); if (rc < 0) return rc; } } while (ret == -ERANGE); return ret; } static atomic64_t rbd_id_max = ATOMIC64_INIT(0); /* * Get a unique rbd identifier for the given new rbd_dev, and add * the rbd_dev to the global list. The minimum rbd id is 1. */ static void rbd_id_get(struct rbd_device *rbd_dev) { rbd_dev->id = atomic64_inc_return(&rbd_id_max); spin_lock(&rbd_dev_list_lock); list_add_tail(&rbd_dev->node, &rbd_dev_list); spin_unlock(&rbd_dev_list_lock); } /* * Remove an rbd_dev from the global list, and record that its * identifier is no longer in use. */ static void rbd_id_put(struct rbd_device *rbd_dev) { struct list_head *tmp; int rbd_id = rbd_dev->id; int max_id; BUG_ON(rbd_id < 1); spin_lock(&rbd_dev_list_lock); list_del_init(&rbd_dev->node); /* * If the id being "put" is not the current maximum, there * is nothing special we need to do. */ if (rbd_id != atomic64_read(&rbd_id_max)) { spin_unlock(&rbd_dev_list_lock); return; } /* * We need to update the current maximum id. Search the * list to find out what it is. We're more likely to find * the maximum at the end, so search the list backward. */ max_id = 0; list_for_each_prev(tmp, &rbd_dev_list) { struct rbd_device *rbd_dev; rbd_dev = list_entry(tmp, struct rbd_device, node); if (rbd_id > max_id) max_id = rbd_id; } spin_unlock(&rbd_dev_list_lock); /* * The max id could have been updated by rbd_id_get(), in * which case it now accurately reflects the new maximum. * Be careful not to overwrite the maximum value in that * case. */ atomic64_cmpxchg(&rbd_id_max, rbd_id, max_id); } /* * Skips over white space at *buf, and updates *buf to point to the * first found non-space character (if any). Returns the length of * the token (string of non-white space characters) found. Note * that *buf must be terminated with '\0'. */ static inline size_t next_token(const char **buf) { /* * These are the characters that produce nonzero for * isspace() in the "C" and "POSIX" locales. */ const char *spaces = " \f\n\r\t\v"; *buf += strspn(*buf, spaces); /* Find start of token */ return strcspn(*buf, spaces); /* Return token length */ } /* * Finds the next token in *buf, and if the provided token buffer is * big enough, copies the found token into it. The result, if * copied, is guaranteed to be terminated with '\0'. Note that *buf * must be terminated with '\0' on entry. * * Returns the length of the token found (not including the '\0'). * Return value will be 0 if no token is found, and it will be >= * token_size if the token would not fit. * * The *buf pointer will be updated to point beyond the end of the * found token. Note that this occurs even if the token buffer is * too small to hold it. */ static inline size_t copy_token(const char **buf, char *token, size_t token_size) { size_t len; len = next_token(buf); if (len < token_size) { memcpy(token, *buf, len); *(token + len) = '\0'; } *buf += len; return len; } /* * This fills in the pool_name, obj, obj_len, snap_name, obj_len, * rbd_dev, rbd_md_name, and name fields of the given rbd_dev, based * on the list of monitor addresses and other options provided via * /sys/bus/rbd/add. */ static int rbd_add_parse_args(struct rbd_device *rbd_dev, const char *buf, const char **mon_addrs, size_t *mon_addrs_size, char *options, size_t options_size) { size_t len; /* The first four tokens are required */ len = next_token(&buf); if (!len) return -EINVAL; *mon_addrs_size = len + 1; *mon_addrs = buf; buf += len; len = copy_token(&buf, options, options_size); if (!len || len >= options_size) return -EINVAL; len = copy_token(&buf, rbd_dev->pool_name, sizeof (rbd_dev->pool_name)); if (!len || len >= sizeof (rbd_dev->pool_name)) return -EINVAL; len = copy_token(&buf, rbd_dev->obj, sizeof (rbd_dev->obj)); if (!len || len >= sizeof (rbd_dev->obj)) return -EINVAL; /* We have the object length in hand, save it. */ rbd_dev->obj_len = len; BUILD_BUG_ON(RBD_MAX_MD_NAME_LEN < RBD_MAX_OBJ_NAME_LEN + sizeof (RBD_SUFFIX)); sprintf(rbd_dev->obj_md_name, "%s%s", rbd_dev->obj, RBD_SUFFIX); /* * The snapshot name is optional, but it's an error if it's * too long. If no snapshot is supplied, fill in the default. */ len = copy_token(&buf, rbd_dev->snap_name, sizeof (rbd_dev->snap_name)); if (!len) memcpy(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME, sizeof (RBD_SNAP_HEAD_NAME)); else if (len >= sizeof (rbd_dev->snap_name)) return -EINVAL; return 0; } static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count) { struct rbd_device *rbd_dev; const char *mon_addrs = NULL; size_t mon_addrs_size = 0; char *options = NULL; struct ceph_osd_client *osdc; int rc = -ENOMEM; if (!try_module_get(THIS_MODULE)) return -ENODEV; rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL); if (!rbd_dev) goto err_nomem; options = kmalloc(count, GFP_KERNEL); if (!options) goto err_nomem; /* static rbd_device initialization */ spin_lock_init(&rbd_dev->lock); INIT_LIST_HEAD(&rbd_dev->node); INIT_LIST_HEAD(&rbd_dev->snaps); init_rwsem(&rbd_dev->header_rwsem); init_rwsem(&rbd_dev->header_rwsem); /* generate unique id: find highest unique id, add one */ rbd_id_get(rbd_dev); /* Fill in the device name, now that we have its id. */ BUILD_BUG_ON(DEV_NAME_LEN < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->id); /* parse add command */ rc = rbd_add_parse_args(rbd_dev, buf, &mon_addrs, &mon_addrs_size, options, count); if (rc) goto err_put_id; rbd_dev->rbd_client = rbd_get_client(mon_addrs, mon_addrs_size - 1, options); if (IS_ERR(rbd_dev->rbd_client)) { rc = PTR_ERR(rbd_dev->rbd_client); goto err_put_id; } /* pick the pool */ osdc = &rbd_dev->rbd_client->client->osdc; rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name); if (rc < 0) goto err_out_client; rbd_dev->poolid = rc; /* register our block device */ rc = register_blkdev(0, rbd_dev->name); if (rc < 0) goto err_out_client; rbd_dev->major = rc; rc = rbd_bus_add_dev(rbd_dev); if (rc) goto err_out_blkdev; /* * At this point cleanup in the event of an error is the job * of the sysfs code (initiated by rbd_bus_del_dev()). * * Set up and announce blkdev mapping. */ rc = rbd_init_disk(rbd_dev); if (rc) goto err_out_bus; rc = rbd_init_watch_dev(rbd_dev); if (rc) goto err_out_bus; return count; err_out_bus: /* this will also clean up rest of rbd_dev stuff */ rbd_bus_del_dev(rbd_dev); kfree(options); return rc; err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); err_out_client: rbd_put_client(rbd_dev); err_put_id: rbd_id_put(rbd_dev); err_nomem: kfree(options); kfree(rbd_dev); dout("Error adding device %s\n", buf); module_put(THIS_MODULE); return (ssize_t) rc; } static struct rbd_device *__rbd_get_dev(unsigned long id) { struct list_head *tmp; struct rbd_device *rbd_dev; spin_lock(&rbd_dev_list_lock); list_for_each(tmp, &rbd_dev_list) { rbd_dev = list_entry(tmp, struct rbd_device, node); if (rbd_dev->id == id) { spin_unlock(&rbd_dev_list_lock); return rbd_dev; } } spin_unlock(&rbd_dev_list_lock); return NULL; } static void rbd_dev_release(struct device *dev) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); if (rbd_dev->watch_request) { struct ceph_client *client = rbd_dev->rbd_client->client; ceph_osdc_unregister_linger_request(&client->osdc, rbd_dev->watch_request); } if (rbd_dev->watch_event) rbd_req_sync_unwatch(rbd_dev, rbd_dev->obj_md_name); rbd_put_client(rbd_dev); /* clean up and free blkdev */ rbd_free_disk(rbd_dev); unregister_blkdev(rbd_dev->major, rbd_dev->name); /* done with the id, and with the rbd_dev */ rbd_id_put(rbd_dev); kfree(rbd_dev); /* release module ref */ module_put(THIS_MODULE); } static ssize_t rbd_remove(struct bus_type *bus, const char *buf, size_t count) { struct rbd_device *rbd_dev = NULL; int target_id, rc; unsigned long ul; int ret = count; rc = strict_strtoul(buf, 10, &ul); if (rc) return rc; /* convert to int; abort if we lost anything in the conversion */ target_id = (int) ul; if (target_id != ul) return -EINVAL; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); rbd_dev = __rbd_get_dev(target_id); if (!rbd_dev) { ret = -ENOENT; goto done; } __rbd_remove_all_snaps(rbd_dev); rbd_bus_del_dev(rbd_dev); done: mutex_unlock(&ctl_mutex); return ret; } static ssize_t rbd_snap_add(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); int ret; char *name = kmalloc(count + 1, GFP_KERNEL); if (!name) return -ENOMEM; snprintf(name, count, "%s", buf); mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); ret = rbd_header_add_snap(rbd_dev, name, GFP_KERNEL); if (ret < 0) goto err_unlock; ret = __rbd_update_snaps(rbd_dev); if (ret < 0) goto err_unlock; /* shouldn't hold ctl_mutex when notifying.. notify might trigger a watch callback that would need to get that mutex */ mutex_unlock(&ctl_mutex); /* make a best effort, don't error if failed */ rbd_req_sync_notify(rbd_dev, rbd_dev->obj_md_name); ret = count; kfree(name); return ret; err_unlock: mutex_unlock(&ctl_mutex); kfree(name); return ret; } /* * create control files in sysfs * /sys/bus/rbd/... */ static int rbd_sysfs_init(void) { int ret; ret = device_register(&rbd_root_dev); if (ret < 0) return ret; ret = bus_register(&rbd_bus_type); if (ret < 0) device_unregister(&rbd_root_dev); return ret; } static void rbd_sysfs_cleanup(void) { bus_unregister(&rbd_bus_type); device_unregister(&rbd_root_dev); } int __init rbd_init(void) { int rc; rc = rbd_sysfs_init(); if (rc) return rc; pr_info("loaded " RBD_DRV_NAME_LONG "\n"); return 0; } void __exit rbd_exit(void) { rbd_sysfs_cleanup(); } module_init(rbd_init); module_exit(rbd_exit); MODULE_AUTHOR("Sage Weil <sage@newdream.net>"); MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>"); MODULE_DESCRIPTION("rados block device"); /* following authorship retained from original osdblk.c */ MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>"); MODULE_LICENSE("GPL");
gpl-2.0
777jon/g3_kernel
tools/perf/util/gtk/browser.c
4801
4204
#include "../evlist.h" #include "../cache.h" #include "../evsel.h" #include "../sort.h" #include "../hist.h" #include "gtk.h" #include <signal.h> #define MAX_COLUMNS 32 void perf_gtk_setup_browser(int argc, const char *argv[], bool fallback_to_pager __used) { gtk_init(&argc, (char ***)&argv); } void perf_gtk_exit_browser(bool wait_for_ok __used) { gtk_main_quit(); } static void perf_gtk_signal(int sig) { psignal(sig, "perf"); gtk_main_quit(); } static void perf_gtk_resize_window(GtkWidget *window) { GdkRectangle rect; GdkScreen *screen; int monitor; int height; int width; screen = gtk_widget_get_screen(window); monitor = gdk_screen_get_monitor_at_window(screen, window->window); gdk_screen_get_monitor_geometry(screen, monitor, &rect); width = rect.width * 3 / 4; height = rect.height * 3 / 4; gtk_window_resize(GTK_WINDOW(window), width, height); } static void perf_gtk_show_hists(GtkWidget *window, struct hists *hists) { GType col_types[MAX_COLUMNS]; GtkCellRenderer *renderer; struct sort_entry *se; GtkListStore *store; struct rb_node *nd; u64 total_period; GtkWidget *view; int col_idx; int nr_cols; nr_cols = 0; /* The percentage column */ col_types[nr_cols++] = G_TYPE_STRING; list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) continue; col_types[nr_cols++] = G_TYPE_STRING; } store = gtk_list_store_newv(nr_cols, col_types); view = gtk_tree_view_new(); renderer = gtk_cell_renderer_text_new(); col_idx = 0; /* The percentage column */ gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), -1, "Overhead (%)", renderer, "text", col_idx++, NULL); list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) continue; gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), -1, se->se_header, renderer, "text", col_idx++, NULL); } gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); g_object_unref(GTK_TREE_MODEL(store)); total_period = hists->stats.total_period; for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); GtkTreeIter iter; double percent; char s[512]; if (h->filtered) continue; gtk_list_store_append(store, &iter); col_idx = 0; percent = (h->period * 100.0) / total_period; snprintf(s, ARRAY_SIZE(s), "%.2f", percent); gtk_list_store_set(store, &iter, col_idx++, s, -1); list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) continue; se->se_snprintf(h, s, ARRAY_SIZE(s), hists__col_len(hists, se->se_width_idx)); gtk_list_store_set(store, &iter, col_idx++, s, -1); } } gtk_container_add(GTK_CONTAINER(window), view); } int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help __used, void (*timer) (void *arg)__used, void *arg __used, int delay_secs __used) { struct perf_evsel *pos; GtkWidget *notebook; GtkWidget *window; signal(SIGSEGV, perf_gtk_signal); signal(SIGFPE, perf_gtk_signal); signal(SIGINT, perf_gtk_signal); signal(SIGQUIT, perf_gtk_signal); signal(SIGTERM, perf_gtk_signal); window = gtk_window_new(GTK_WINDOW_TOPLEVEL); gtk_window_set_title(GTK_WINDOW(window), "perf report"); g_signal_connect(window, "delete_event", gtk_main_quit, NULL); notebook = gtk_notebook_new(); list_for_each_entry(pos, &evlist->entries, node) { struct hists *hists = &pos->hists; const char *evname = event_name(pos); GtkWidget *scrolled_window; GtkWidget *tab_label; scrolled_window = gtk_scrolled_window_new(NULL, NULL); gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC); perf_gtk_show_hists(scrolled_window, hists); tab_label = gtk_label_new(evname); gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); } gtk_container_add(GTK_CONTAINER(window), notebook); gtk_widget_show_all(window); perf_gtk_resize_window(window); gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); gtk_main(); return 0; }
gpl-2.0
mereck/os-fork
net/802/hippi.c
4801
6052
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * HIPPI-type device handling. * * Version: @(#)hippi.c 1.0.0 05/29/97 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Florian La Roche, <rzsfl@rz.uni-sb.de> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Jes Sorensen, <Jes.Sorensen@cern.ch> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/hippidevice.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <net/arp.h> #include <net/sock.h> #include <asm/uaccess.h> /* * Create the HIPPI MAC header for an arbitrary protocol layer * * saddr=NULL means use device source address * daddr=NULL means leave destination address (eg unresolved arp) */ static int hippi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN); struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; if (!len){ len = skb->len - HIPPI_HLEN; printk("hippi_header(): length not supplied\n"); } /* * Due to the stupidity of the little endian byte-order we * have to set the fp field this way. */ hip->fp.fixed = htonl(0x04800018); hip->fp.d2_size = htonl(len + 8); hip->le.fc = 0; hip->le.double_wide = 0; /* only HIPPI 800 for the time being */ hip->le.message_type = 0; /* Data PDU */ hip->le.dest_addr_type = 2; /* 12 bit SC address */ hip->le.src_addr_type = 2; /* 12 bit SC address */ memcpy(hip->le.src_switch_addr, dev->dev_addr + 3, 3); memset(&hip->le.reserved, 0, 16); hip->snap.dsap = HIPPI_EXTENDED_SAP; hip->snap.ssap = HIPPI_EXTENDED_SAP; hip->snap.ctrl = HIPPI_UI_CMD; hip->snap.oui[0] = 0x00; hip->snap.oui[1] = 0x00; hip->snap.oui[2] = 0x00; hip->snap.ethertype = htons(type); if (daddr) { memcpy(hip->le.dest_switch_addr, daddr + 3, 3); memcpy(&hcb->ifield, daddr + 2, 4); return HIPPI_HLEN; } hcb->ifield = 0; return -((int)HIPPI_HLEN); } /* * Rebuild the HIPPI MAC header. This is called after an ARP has * completed on this sk_buff. We now let ARP fill in the other fields. */ static int hippi_rebuild_header(struct sk_buff *skb) { struct hippi_hdr *hip = (struct hippi_hdr *)skb->data; /* * Only IP is currently supported */ if(hip->snap.ethertype != htons(ETH_P_IP)) { printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n",skb->dev->name,ntohs(hip->snap.ethertype)); return 0; } /* * We don't support dynamic ARP on HIPPI, but we use the ARP * static ARP tables to hold the I-FIELDs. */ return arp_find(hip->le.daddr, skb); } /* * Determine the packet's protocol ID. */ __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev) { struct hippi_hdr *hip; /* * This is actually wrong ... question is if we really should * set the raw address here. */ skb->dev = dev; skb_reset_mac_header(skb); hip = (struct hippi_hdr *)skb_mac_header(skb); skb_pull(skb, HIPPI_HLEN); /* * No fancy promisc stuff here now. */ return hip->snap.ethertype; } EXPORT_SYMBOL(hippi_type_trans); int hippi_change_mtu(struct net_device *dev, int new_mtu) { /* * HIPPI's got these nice large MTUs. */ if ((new_mtu < 68) || (new_mtu > 65280)) return -EINVAL; dev->mtu = new_mtu; return 0; } EXPORT_SYMBOL(hippi_change_mtu); /* * For HIPPI we will actually use the lower 4 bytes of the hardware * address as the I-FIELD rather than the actual hardware address. */ int hippi_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); return 0; } EXPORT_SYMBOL(hippi_mac_addr); int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) { /* Never send broadcast/multicast ARP messages */ p->mcast_probes = 0; /* In IPv6 unicast probes are valid even on NBMA, * because they are encapsulated in normal IPv6 protocol. * Should be a generic flag. */ if (p->tbl->family != AF_INET6) p->ucast_probes = 0; return 0; } EXPORT_SYMBOL(hippi_neigh_setup_dev); static const struct header_ops hippi_header_ops = { .create = hippi_header, .rebuild = hippi_rebuild_header, }; static void hippi_setup(struct net_device *dev) { dev->header_ops = &hippi_header_ops; /* * We don't support HIPPI `ARP' for the time being, and probably * never will unless someone else implements it. However we * still need a fake ARPHRD to make ifconfig and friends play ball. */ dev->type = ARPHRD_HIPPI; dev->hard_header_len = HIPPI_HLEN; dev->mtu = 65280; dev->addr_len = HIPPI_ALEN; dev->tx_queue_len = 25 /* 5 */; memset(dev->broadcast, 0xFF, HIPPI_ALEN); /* * HIPPI doesn't support broadcast+multicast and we only use * static ARP tables. ARP is disabled by hippi_neigh_setup_dev. */ dev->flags = 0; } /** * alloc_hippi_dev - Register HIPPI device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this HIPPI device * * Fill in the fields of the device structure with HIPPI-generic values. * * Constructs a new net device, complete with a private data area of * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for * this private data area. */ struct net_device *alloc_hippi_dev(int sizeof_priv) { return alloc_netdev(sizeof_priv, "hip%d", hippi_setup); } EXPORT_SYMBOL(alloc_hippi_dev);
gpl-2.0
maqiangddb/Android_kernel
drivers/media/dvb/mantis/mantis_dma.c
5057
6480
/* Mantis PCI bridge driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <asm/page.h> #include <linux/vmalloc.h> #include <linux/pci.h> #include <asm/irq.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/interrupt.h> #include "dmxdev.h" #include "dvbdev.h" #include "dvb_demux.h" #include "dvb_frontend.h" #include "dvb_net.h" #include "mantis_common.h" #include "mantis_reg.h" #include "mantis_dma.h" #define RISC_WRITE (0x01 << 28) #define RISC_JUMP (0x07 << 28) #define RISC_IRQ (0x01 << 24) #define RISC_STATUS(status) ((((~status) & 0x0f) << 20) | ((status & 0x0f) << 16)) #define RISC_FLUSH(risc_pos) (risc_pos = 0) #define RISC_INSTR(risc_pos, opcode) (mantis->risc_cpu[risc_pos++] = cpu_to_le32(opcode)) #define MANTIS_BUF_SIZE (64 * 1024) #define MANTIS_BLOCK_BYTES (MANTIS_BUF_SIZE / 4) #define MANTIS_DMA_TR_BYTES (2 * 1024) /* upper limit: 4095 bytes. */ #define MANTIS_BLOCK_COUNT (MANTIS_BUF_SIZE / MANTIS_BLOCK_BYTES) #define MANTIS_DMA_TR_UNITS (MANTIS_BLOCK_BYTES / MANTIS_DMA_TR_BYTES) /* MANTIS_BUF_SIZE / MANTIS_DMA_TR_UNITS must not exceed MANTIS_RISC_SIZE (4k RISC cmd buffer) */ #define MANTIS_RISC_SIZE PAGE_SIZE /* RISC program must fit here. */ int mantis_dma_exit(struct mantis_pci *mantis) { if (mantis->buf_cpu) { dprintk(MANTIS_ERROR, 1, "DMA=0x%lx cpu=0x%p size=%d", (unsigned long) mantis->buf_dma, mantis->buf_cpu, MANTIS_BUF_SIZE); pci_free_consistent(mantis->pdev, MANTIS_BUF_SIZE, mantis->buf_cpu, mantis->buf_dma); mantis->buf_cpu = NULL; } if (mantis->risc_cpu) { dprintk(MANTIS_ERROR, 1, "RISC=0x%lx cpu=0x%p size=%lx", (unsigned long) mantis->risc_dma, mantis->risc_cpu, MANTIS_RISC_SIZE); pci_free_consistent(mantis->pdev, MANTIS_RISC_SIZE, mantis->risc_cpu, mantis->risc_dma); mantis->risc_cpu = NULL; } return 0; } EXPORT_SYMBOL_GPL(mantis_dma_exit); static inline int mantis_alloc_buffers(struct mantis_pci *mantis) { if (!mantis->buf_cpu) { mantis->buf_cpu = pci_alloc_consistent(mantis->pdev, MANTIS_BUF_SIZE, &mantis->buf_dma); if (!mantis->buf_cpu) { dprintk(MANTIS_ERROR, 1, "DMA buffer allocation failed"); goto err; } dprintk(MANTIS_ERROR, 1, "DMA=0x%lx cpu=0x%p size=%d", (unsigned long) mantis->buf_dma, mantis->buf_cpu, MANTIS_BUF_SIZE); } if (!mantis->risc_cpu) { mantis->risc_cpu = pci_alloc_consistent(mantis->pdev, MANTIS_RISC_SIZE, &mantis->risc_dma); if (!mantis->risc_cpu) { dprintk(MANTIS_ERROR, 1, "RISC program allocation failed"); mantis_dma_exit(mantis); goto err; } dprintk(MANTIS_ERROR, 1, "RISC=0x%lx cpu=0x%p size=%lx", (unsigned long) mantis->risc_dma, mantis->risc_cpu, MANTIS_RISC_SIZE); } return 0; err: dprintk(MANTIS_ERROR, 1, "Out of memory (?) ....."); return -ENOMEM; } int mantis_dma_init(struct mantis_pci *mantis) { int err = 0; dprintk(MANTIS_DEBUG, 1, "Mantis DMA init"); if (mantis_alloc_buffers(mantis) < 0) { dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer"); /* Stop RISC Engine */ mmwrite(0, MANTIS_DMA_CTL); goto err; } return 0; err: return err; } EXPORT_SYMBOL_GPL(mantis_dma_init); static inline void mantis_risc_program(struct mantis_pci *mantis) { u32 buf_pos = 0; u32 line, step; u32 risc_pos; dprintk(MANTIS_DEBUG, 1, "Mantis create RISC program"); RISC_FLUSH(risc_pos); dprintk(MANTIS_DEBUG, 1, "risc len lines %u, bytes per line %u, bytes per DMA tr %u", MANTIS_BLOCK_COUNT, MANTIS_BLOCK_BYTES, MANTIS_DMA_TR_BYTES); for (line = 0; line < MANTIS_BLOCK_COUNT; line++) { for (step = 0; step < MANTIS_DMA_TR_UNITS; step++) { dprintk(MANTIS_DEBUG, 1, "RISC PROG line=[%d], step=[%d]", line, step); if (step == 0) { RISC_INSTR(risc_pos, RISC_WRITE | RISC_IRQ | RISC_STATUS(line) | MANTIS_DMA_TR_BYTES); } else { RISC_INSTR(risc_pos, RISC_WRITE | MANTIS_DMA_TR_BYTES); } RISC_INSTR(risc_pos, mantis->buf_dma + buf_pos); buf_pos += MANTIS_DMA_TR_BYTES; } } RISC_INSTR(risc_pos, RISC_JUMP); RISC_INSTR(risc_pos, mantis->risc_dma); } void mantis_dma_start(struct mantis_pci *mantis) { dprintk(MANTIS_DEBUG, 1, "Mantis Start DMA engine"); mantis_risc_program(mantis); mmwrite(mantis->risc_dma, MANTIS_RISC_START); mmwrite(mmread(MANTIS_GPIF_ADDR) | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR); mmwrite(0, MANTIS_DMA_CTL); mantis->last_block = mantis->busy_block = 0; mmwrite(mmread(MANTIS_INT_MASK) | MANTIS_INT_RISCI, MANTIS_INT_MASK); mmwrite(MANTIS_FIFO_EN | MANTIS_DCAP_EN | MANTIS_RISC_EN, MANTIS_DMA_CTL); } void mantis_dma_stop(struct mantis_pci *mantis) { u32 stat = 0, mask = 0; stat = mmread(MANTIS_INT_STAT); mask = mmread(MANTIS_INT_MASK); dprintk(MANTIS_DEBUG, 1, "Mantis Stop DMA engine"); mmwrite((mmread(MANTIS_GPIF_ADDR) & (~(MANTIS_GPIF_HIFRDWRN))), MANTIS_GPIF_ADDR); mmwrite((mmread(MANTIS_DMA_CTL) & ~(MANTIS_FIFO_EN | MANTIS_DCAP_EN | MANTIS_RISC_EN)), MANTIS_DMA_CTL); mmwrite(mmread(MANTIS_INT_STAT), MANTIS_INT_STAT); mmwrite(mmread(MANTIS_INT_MASK) & ~(MANTIS_INT_RISCI | MANTIS_INT_RISCEN), MANTIS_INT_MASK); } void mantis_dma_xfer(unsigned long data) { struct mantis_pci *mantis = (struct mantis_pci *) data; struct mantis_hwconfig *config = mantis->hwconfig; while (mantis->last_block != mantis->busy_block) { dprintk(MANTIS_DEBUG, 1, "last block=[%d] finished block=[%d]", mantis->last_block, mantis->busy_block); (config->ts_size ? dvb_dmx_swfilter_204 : dvb_dmx_swfilter) (&mantis->demux, &mantis->buf_cpu[mantis->last_block * MANTIS_BLOCK_BYTES], MANTIS_BLOCK_BYTES); mantis->last_block = (mantis->last_block + 1) % MANTIS_BLOCK_COUNT; } }
gpl-2.0
leftrepo/Owl-Kernel-for-Xperia-Sola
arch/powerpc/platforms/cell/celleb_setup.c
7617
6020
/* * Celleb setup code * * (C) Copyright 2006-2007 TOSHIBA CORPORATION * * This code is based on arch/powerpc/platforms/cell/setup.c: * Copyright (C) 1995 Linus Torvalds * Adapted from 'alpha' version by Gary Thomas * Modified by Cort Dougan (cort@cs.nmt.edu) * Modified by PPC64 Team, IBM Corp * Modified by Cell Team, IBM Deutschland Entwicklung GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #undef DEBUG #include <linux/cpu.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/console.h> #include <linux/of_platform.h> #include <asm/mmu.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/irq.h> #include <asm/time.h> #include <asm/spu_priv1.h> #include <asm/firmware.h> #include <asm/rtas.h> #include <asm/cell-regs.h> #include "beat_interrupt.h" #include "beat_wrapper.h" #include "beat.h" #include "celleb_pci.h" #include "interrupt.h" #include "pervasive.h" #include "ras.h" static char celleb_machine_type[128] = "Celleb"; static void celleb_show_cpuinfo(struct seq_file *m) { struct device_node *root; const char *model = ""; root = of_find_node_by_path("/"); if (root) model = of_get_property(root, "model", NULL); /* using "CHRP" is to trick anaconda into installing FCx into Celleb */ seq_printf(m, "machine\t\t: %s %s\n", celleb_machine_type, model); of_node_put(root); } static int __init celleb_machine_type_hack(char *ptr) { strlcpy(celleb_machine_type, ptr, sizeof(celleb_machine_type)); return 0; } __setup("celleb_machine_type_hack=", celleb_machine_type_hack); static void celleb_progress(char *s, unsigned short hex) { printk("*** %04x : %s\n", hex, s ? s : ""); } static void __init celleb_setup_arch_common(void) { /* init to some ~sane value until calibrate_delay() runs */ loops_per_jiffy = 50000000; #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif } static struct of_device_id celleb_bus_ids[] __initdata = { { .type = "scc", }, { .type = "ioif", }, /* old style */ {}, }; static int __init celleb_publish_devices(void) { /* Publish OF platform devices for southbridge IOs */ of_platform_bus_probe(NULL, celleb_bus_ids, NULL); return 0; } machine_device_initcall(celleb_beat, celleb_publish_devices); machine_device_initcall(celleb_native, celleb_publish_devices); /* * functions for Celleb-Beat */ static void __init celleb_setup_arch_beat(void) { #ifdef CONFIG_SPU_BASE spu_priv1_ops = &spu_priv1_beat_ops; spu_management_ops = &spu_management_of_ops; #endif celleb_setup_arch_common(); } static int __init celleb_probe_beat(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "Beat")) return 0; powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS | FW_FEATURE_BEAT | FW_FEATURE_LPAR; hpte_init_beat_v3(); return 1; } /* * functions for Celleb-native */ static void __init celleb_init_IRQ_native(void) { iic_init_IRQ(); spider_init_IRQ(); } static void __init celleb_setup_arch_native(void) { #ifdef CONFIG_SPU_BASE spu_priv1_ops = &spu_priv1_mmio_ops; spu_management_ops = &spu_management_of_ops; #endif cbe_regs_init(); #ifdef CONFIG_CBE_RAS cbe_ras_init(); #endif #ifdef CONFIG_SMP smp_init_cell(); #endif cbe_pervasive_init(); /* XXX: nvram initialization should be added */ celleb_setup_arch_common(); } static int __init celleb_probe_native(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "Beat") || !of_flat_dt_is_compatible(root, "TOSHIBA,Celleb")) return 0; powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS; hpte_init_native(); return 1; } /* * machine definitions */ define_machine(celleb_beat) { .name = "Cell Reference Set (Beat)", .probe = celleb_probe_beat, .setup_arch = celleb_setup_arch_beat, .show_cpuinfo = celleb_show_cpuinfo, .restart = beat_restart, .power_off = beat_power_off, .halt = beat_halt, .get_rtc_time = beat_get_rtc_time, .set_rtc_time = beat_set_rtc_time, .calibrate_decr = generic_calibrate_decr, .progress = celleb_progress, .power_save = beat_power_save, .nvram_size = beat_nvram_get_size, .nvram_read = beat_nvram_read, .nvram_write = beat_nvram_write, .set_dabr = beat_set_xdabr, .init_IRQ = beatic_init_IRQ, .get_irq = beatic_get_irq, .pci_probe_mode = celleb_pci_probe_mode, .pci_setup_phb = celleb_setup_phb, #ifdef CONFIG_KEXEC .kexec_cpu_down = beat_kexec_cpu_down, #endif }; define_machine(celleb_native) { .name = "Cell Reference Set (native)", .probe = celleb_probe_native, .setup_arch = celleb_setup_arch_native, .show_cpuinfo = celleb_show_cpuinfo, .restart = rtas_restart, .power_off = rtas_power_off, .halt = rtas_halt, .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, .set_rtc_time = rtas_set_rtc_time, .calibrate_decr = generic_calibrate_decr, .progress = celleb_progress, .pci_probe_mode = celleb_pci_probe_mode, .pci_setup_phb = celleb_setup_phb, .init_IRQ = celleb_init_IRQ_native, };
gpl-2.0
ptmr3/S4_jflte-xx-_Kernel2
drivers/w1/masters/ds2482.c
8385
13395
/** * ds2482.c - provides i2c to w1-master bridge(s) * Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com> * * The DS2482 is a sensor chip made by Dallas Semiconductor (Maxim). * It is a I2C to 1-wire bridge. * There are two variations: -100 and -800, which have 1 or 8 1-wire ports. * The complete datasheet can be obtained from MAXIM's website at: * http://www.maxim-ic.com/quick_view2.cfm/qv_pk/4382 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/delay.h> #include <asm/delay.h> #include "../w1.h" #include "../w1_int.h" /** * The DS2482 registers - there are 3 registers that are addressed by a read * pointer. The read pointer is set by the last command executed. * * To read the data, issue a register read for any address */ #define DS2482_CMD_RESET 0xF0 /* No param */ #define DS2482_CMD_SET_READ_PTR 0xE1 /* Param: DS2482_PTR_CODE_xxx */ #define DS2482_CMD_CHANNEL_SELECT 0xC3 /* Param: Channel byte - DS2482-800 only */ #define DS2482_CMD_WRITE_CONFIG 0xD2 /* Param: Config byte */ #define DS2482_CMD_1WIRE_RESET 0xB4 /* Param: None */ #define DS2482_CMD_1WIRE_SINGLE_BIT 0x87 /* Param: Bit byte (bit7) */ #define DS2482_CMD_1WIRE_WRITE_BYTE 0xA5 /* Param: Data byte */ #define DS2482_CMD_1WIRE_READ_BYTE 0x96 /* Param: None */ /* Note to read the byte, Set the ReadPtr to Data then read (any addr) */ #define DS2482_CMD_1WIRE_TRIPLET 0x78 /* Param: Dir byte (bit7) */ /* Values for DS2482_CMD_SET_READ_PTR */ #define DS2482_PTR_CODE_STATUS 0xF0 #define DS2482_PTR_CODE_DATA 0xE1 #define DS2482_PTR_CODE_CHANNEL 0xD2 /* DS2482-800 only */ #define DS2482_PTR_CODE_CONFIG 0xC3 /** * Configure Register bit definitions * The top 4 bits always read 0. * To write, the top nibble must be the 1's compl. of the low nibble. */ #define DS2482_REG_CFG_1WS 0x08 #define DS2482_REG_CFG_SPU 0x04 #define DS2482_REG_CFG_PPM 0x02 #define DS2482_REG_CFG_APU 0x01 /** * Write and verify codes for the CHANNEL_SELECT command (DS2482-800 only). * To set the channel, write the value at the index of the channel. * Read and compare against the corresponding value to verify the change. */ static const u8 ds2482_chan_wr[8] = { 0xF0, 0xE1, 0xD2, 0xC3, 0xB4, 0xA5, 0x96, 0x87 }; static const u8 ds2482_chan_rd[8] = { 0xB8, 0xB1, 0xAA, 0xA3, 0x9C, 0x95, 0x8E, 0x87 }; /** * Status Register bit definitions (read only) */ #define DS2482_REG_STS_DIR 0x80 #define DS2482_REG_STS_TSB 0x40 #define DS2482_REG_STS_SBR 0x20 #define DS2482_REG_STS_RST 0x10 #define DS2482_REG_STS_LL 0x08 #define DS2482_REG_STS_SD 0x04 #define DS2482_REG_STS_PPD 0x02 #define DS2482_REG_STS_1WB 0x01 static int ds2482_probe(struct i2c_client *client, const struct i2c_device_id *id); static int ds2482_remove(struct i2c_client *client); /** * Driver data (common to all clients) */ static const struct i2c_device_id ds2482_id[] = { { "ds2482", 0 }, { } }; static struct i2c_driver ds2482_driver = { .driver = { .owner = THIS_MODULE, .name = "ds2482", }, .probe = ds2482_probe, .remove = ds2482_remove, .id_table = ds2482_id, }; /* * Client data (each client gets its own) */ struct ds2482_data; struct ds2482_w1_chan { struct ds2482_data *pdev; u8 channel; struct w1_bus_master w1_bm; }; struct ds2482_data { struct i2c_client *client; struct mutex access_lock; /* 1-wire interface(s) */ int w1_count; /* 1 or 8 */ struct ds2482_w1_chan w1_ch[8]; /* per-device values */ u8 channel; u8 read_prt; /* see DS2482_PTR_CODE_xxx */ u8 reg_config; }; /** * Sets the read pointer. * @param pdev The ds2482 client pointer * @param read_ptr see DS2482_PTR_CODE_xxx above * @return -1 on failure, 0 on success */ static inline int ds2482_select_register(struct ds2482_data *pdev, u8 read_ptr) { if (pdev->read_prt != read_ptr) { if (i2c_smbus_write_byte_data(pdev->client, DS2482_CMD_SET_READ_PTR, read_ptr) < 0) return -1; pdev->read_prt = read_ptr; } return 0; } /** * Sends a command without a parameter * @param pdev The ds2482 client pointer * @param cmd DS2482_CMD_RESET, * DS2482_CMD_1WIRE_RESET, * DS2482_CMD_1WIRE_READ_BYTE * @return -1 on failure, 0 on success */ static inline int ds2482_send_cmd(struct ds2482_data *pdev, u8 cmd) { if (i2c_smbus_write_byte(pdev->client, cmd) < 0) return -1; pdev->read_prt = DS2482_PTR_CODE_STATUS; return 0; } /** * Sends a command with a parameter * @param pdev The ds2482 client pointer * @param cmd DS2482_CMD_WRITE_CONFIG, * DS2482_CMD_1WIRE_SINGLE_BIT, * DS2482_CMD_1WIRE_WRITE_BYTE, * DS2482_CMD_1WIRE_TRIPLET * @param byte The data to send * @return -1 on failure, 0 on success */ static inline int ds2482_send_cmd_data(struct ds2482_data *pdev, u8 cmd, u8 byte) { if (i2c_smbus_write_byte_data(pdev->client, cmd, byte) < 0) return -1; /* all cmds leave in STATUS, except CONFIG */ pdev->read_prt = (cmd != DS2482_CMD_WRITE_CONFIG) ? DS2482_PTR_CODE_STATUS : DS2482_PTR_CODE_CONFIG; return 0; } /* * 1-Wire interface code */ #define DS2482_WAIT_IDLE_TIMEOUT 100 /** * Waits until the 1-wire interface is idle (not busy) * * @param pdev Pointer to the device structure * @return the last value read from status or -1 (failure) */ static int ds2482_wait_1wire_idle(struct ds2482_data *pdev) { int temp = -1; int retries = 0; if (!ds2482_select_register(pdev, DS2482_PTR_CODE_STATUS)) { do { temp = i2c_smbus_read_byte(pdev->client); } while ((temp >= 0) && (temp & DS2482_REG_STS_1WB) && (++retries < DS2482_WAIT_IDLE_TIMEOUT)); } if (retries >= DS2482_WAIT_IDLE_TIMEOUT) printk(KERN_ERR "%s: timeout on channel %d\n", __func__, pdev->channel); return temp; } /** * Selects a w1 channel. * The 1-wire interface must be idle before calling this function. * * @param pdev The ds2482 client pointer * @param channel 0-7 * @return -1 (failure) or 0 (success) */ static int ds2482_set_channel(struct ds2482_data *pdev, u8 channel) { if (i2c_smbus_write_byte_data(pdev->client, DS2482_CMD_CHANNEL_SELECT, ds2482_chan_wr[channel]) < 0) return -1; pdev->read_prt = DS2482_PTR_CODE_CHANNEL; pdev->channel = -1; if (i2c_smbus_read_byte(pdev->client) == ds2482_chan_rd[channel]) { pdev->channel = channel; return 0; } return -1; } /** * Performs the touch-bit function, which writes a 0 or 1 and reads the level. * * @param data The ds2482 channel pointer * @param bit The level to write: 0 or non-zero * @return The level read: 0 or 1 */ static u8 ds2482_w1_touch_bit(void *data, u8 bit) { struct ds2482_w1_chan *pchan = data; struct ds2482_data *pdev = pchan->pdev; int status = -1; mutex_lock(&pdev->access_lock); /* Select the channel */ ds2482_wait_1wire_idle(pdev); if (pdev->w1_count > 1) ds2482_set_channel(pdev, pchan->channel); /* Send the touch command, wait until 1WB == 0, return the status */ if (!ds2482_send_cmd_data(pdev, DS2482_CMD_1WIRE_SINGLE_BIT, bit ? 0xFF : 0)) status = ds2482_wait_1wire_idle(pdev); mutex_unlock(&pdev->access_lock); return (status & DS2482_REG_STS_SBR) ? 1 : 0; } /** * Performs the triplet function, which reads two bits and writes a bit. * The bit written is determined by the two reads: * 00 => dbit, 01 => 0, 10 => 1 * * @param data The ds2482 channel pointer * @param dbit The direction to choose if both branches are valid * @return b0=read1 b1=read2 b3=bit written */ static u8 ds2482_w1_triplet(void *data, u8 dbit) { struct ds2482_w1_chan *pchan = data; struct ds2482_data *pdev = pchan->pdev; int status = (3 << 5); mutex_lock(&pdev->access_lock); /* Select the channel */ ds2482_wait_1wire_idle(pdev); if (pdev->w1_count > 1) ds2482_set_channel(pdev, pchan->channel); /* Send the triplet command, wait until 1WB == 0, return the status */ if (!ds2482_send_cmd_data(pdev, DS2482_CMD_1WIRE_TRIPLET, dbit ? 0xFF : 0)) status = ds2482_wait_1wire_idle(pdev); mutex_unlock(&pdev->access_lock); /* Decode the status */ return (status >> 5); } /** * Performs the write byte function. * * @param data The ds2482 channel pointer * @param byte The value to write */ static void ds2482_w1_write_byte(void *data, u8 byte) { struct ds2482_w1_chan *pchan = data; struct ds2482_data *pdev = pchan->pdev; mutex_lock(&pdev->access_lock); /* Select the channel */ ds2482_wait_1wire_idle(pdev); if (pdev->w1_count > 1) ds2482_set_channel(pdev, pchan->channel); /* Send the write byte command */ ds2482_send_cmd_data(pdev, DS2482_CMD_1WIRE_WRITE_BYTE, byte); mutex_unlock(&pdev->access_lock); } /** * Performs the read byte function. * * @param data The ds2482 channel pointer * @return The value read */ static u8 ds2482_w1_read_byte(void *data) { struct ds2482_w1_chan *pchan = data; struct ds2482_data *pdev = pchan->pdev; int result; mutex_lock(&pdev->access_lock); /* Select the channel */ ds2482_wait_1wire_idle(pdev); if (pdev->w1_count > 1) ds2482_set_channel(pdev, pchan->channel); /* Send the read byte command */ ds2482_send_cmd(pdev, DS2482_CMD_1WIRE_READ_BYTE); /* Wait until 1WB == 0 */ ds2482_wait_1wire_idle(pdev); /* Select the data register */ ds2482_select_register(pdev, DS2482_PTR_CODE_DATA); /* Read the data byte */ result = i2c_smbus_read_byte(pdev->client); mutex_unlock(&pdev->access_lock); return result; } /** * Sends a reset on the 1-wire interface * * @param data The ds2482 channel pointer * @return 0=Device present, 1=No device present or error */ static u8 ds2482_w1_reset_bus(void *data) { struct ds2482_w1_chan *pchan = data; struct ds2482_data *pdev = pchan->pdev; int err; u8 retval = 1; mutex_lock(&pdev->access_lock); /* Select the channel */ ds2482_wait_1wire_idle(pdev); if (pdev->w1_count > 1) ds2482_set_channel(pdev, pchan->channel); /* Send the reset command */ err = ds2482_send_cmd(pdev, DS2482_CMD_1WIRE_RESET); if (err >= 0) { /* Wait until the reset is complete */ err = ds2482_wait_1wire_idle(pdev); retval = !(err & DS2482_REG_STS_PPD); /* If the chip did reset since detect, re-config it */ if (err & DS2482_REG_STS_RST) ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG, 0xF0); } mutex_unlock(&pdev->access_lock); return retval; } static int ds2482_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ds2482_data *data; int err = -ENODEV; int temp1; int idx; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA | I2C_FUNC_SMBUS_BYTE)) return -ENODEV; if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) { err = -ENOMEM; goto exit; } data->client = client; i2c_set_clientdata(client, data); /* Reset the device (sets the read_ptr to status) */ if (ds2482_send_cmd(data, DS2482_CMD_RESET) < 0) { dev_warn(&client->dev, "DS2482 reset failed.\n"); goto exit_free; } /* Sleep at least 525ns to allow the reset to complete */ ndelay(525); /* Read the status byte - only reset bit and line should be set */ temp1 = i2c_smbus_read_byte(client); if (temp1 != (DS2482_REG_STS_LL | DS2482_REG_STS_RST)) { dev_warn(&client->dev, "DS2482 reset status " "0x%02X - not a DS2482\n", temp1); goto exit_free; } /* Detect the 8-port version */ data->w1_count = 1; if (ds2482_set_channel(data, 7) == 0) data->w1_count = 8; /* Set all config items to 0 (off) */ ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG, 0xF0); mutex_init(&data->access_lock); /* Register 1-wire interface(s) */ for (idx = 0; idx < data->w1_count; idx++) { data->w1_ch[idx].pdev = data; data->w1_ch[idx].channel = idx; /* Populate all the w1 bus master stuff */ data->w1_ch[idx].w1_bm.data = &data->w1_ch[idx]; data->w1_ch[idx].w1_bm.read_byte = ds2482_w1_read_byte; data->w1_ch[idx].w1_bm.write_byte = ds2482_w1_write_byte; data->w1_ch[idx].w1_bm.touch_bit = ds2482_w1_touch_bit; data->w1_ch[idx].w1_bm.triplet = ds2482_w1_triplet; data->w1_ch[idx].w1_bm.reset_bus = ds2482_w1_reset_bus; err = w1_add_master_device(&data->w1_ch[idx].w1_bm); if (err) { data->w1_ch[idx].pdev = NULL; goto exit_w1_remove; } } return 0; exit_w1_remove: for (idx = 0; idx < data->w1_count; idx++) { if (data->w1_ch[idx].pdev != NULL) w1_remove_master_device(&data->w1_ch[idx].w1_bm); } exit_free: kfree(data); exit: return err; } static int ds2482_remove(struct i2c_client *client) { struct ds2482_data *data = i2c_get_clientdata(client); int idx; /* Unregister the 1-wire bridge(s) */ for (idx = 0; idx < data->w1_count; idx++) { if (data->w1_ch[idx].pdev != NULL) w1_remove_master_device(&data->w1_ch[idx].w1_bm); } /* Free the memory */ kfree(data); return 0; } static int __init sensors_ds2482_init(void) { return i2c_add_driver(&ds2482_driver); } static void __exit sensors_ds2482_exit(void) { i2c_del_driver(&ds2482_driver); } MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>"); MODULE_DESCRIPTION("DS2482 driver"); MODULE_LICENSE("GPL"); module_init(sensors_ds2482_init); module_exit(sensors_ds2482_exit);
gpl-2.0
maqiangddb/Android_kernel
drivers/block/paride/dstr.c
15553
5143
/* dstr.c (c) 1997-8 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. dstr.c is a low-level protocol driver for the DataStor EP2000 parallel to IDE adapter chip. */ /* Changes: 1.01 GRG 1998.05.06 init_proto, release_proto */ #define DSTR_VERSION "1.01" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/wait.h> #include <asm/io.h> #include "paride.h" /* mode codes: 0 nybble reads, 8-bit writes 1 8-bit reads and writes 2 8-bit EPP mode 3 EPP-16 4 EPP-32 */ #define j44(a,b) (((a>>3)&0x07)|((~a>>4)&0x08)|((b<<1)&0x70)|((~b)&0x80)) #define P1 w2(5);w2(0xd);w2(5);w2(4); #define P2 w2(5);w2(7);w2(5);w2(4); #define P3 w2(6);w2(4);w2(6);w2(4); /* cont = 0 - access the IDE register file cont = 1 - access the IDE command set */ static int cont_map[2] = { 0x20, 0x40 }; static int dstr_read_regr( PIA *pi, int cont, int regr ) { int a, b, r; r = regr + cont_map[cont]; w0(0x81); P1; if (pi->mode) { w0(0x11); } else { w0(1); } P2; w0(r); P1; switch (pi->mode) { case 0: w2(6); a = r1(); w2(4); w2(6); b = r1(); w2(4); return j44(a,b); case 1: w0(0); w2(0x26); a = r0(); w2(4); return a; case 2: case 3: case 4: w2(0x24); a = r4(); w2(4); return a; } return -1; } static void dstr_write_regr( PIA *pi, int cont, int regr, int val ) { int r; r = regr + cont_map[cont]; w0(0x81); P1; if (pi->mode >= 2) { w0(0x11); } else { w0(1); } P2; w0(r); P1; switch (pi->mode) { case 0: case 1: w0(val); w2(5); w2(7); w2(5); w2(4); break; case 2: case 3: case 4: w4(val); break; } } #define CCP(x) w0(0xff);w2(0xc);w2(4);\ w0(0xaa);w0(0x55);w0(0);w0(0xff);w0(0x87);w0(0x78);\ w0(x);w2(5);w2(4); static void dstr_connect ( PIA *pi ) { pi->saved_r0 = r0(); pi->saved_r2 = r2(); w2(4); CCP(0xe0); w0(0xff); } static void dstr_disconnect ( PIA *pi ) { CCP(0x30); w0(pi->saved_r0); w2(pi->saved_r2); } static void dstr_read_block( PIA *pi, char * buf, int count ) { int k, a, b; w0(0x81); P1; if (pi->mode) { w0(0x19); } else { w0(9); } P2; w0(0x82); P1; P3; w0(0x20); P1; switch (pi->mode) { case 0: for (k=0;k<count;k++) { w2(6); a = r1(); w2(4); w2(6); b = r1(); w2(4); buf[k] = j44(a,b); } break; case 1: w0(0); for (k=0;k<count;k++) { w2(0x26); buf[k] = r0(); w2(0x24); } w2(4); break; case 2: w2(0x24); for (k=0;k<count;k++) buf[k] = r4(); w2(4); break; case 3: w2(0x24); for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w(); w2(4); break; case 4: w2(0x24); for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l(); w2(4); break; } } static void dstr_write_block( PIA *pi, char * buf, int count ) { int k; w0(0x81); P1; if (pi->mode) { w0(0x19); } else { w0(9); } P2; w0(0x82); P1; P3; w0(0x20); P1; switch (pi->mode) { case 0: case 1: for (k=0;k<count;k++) { w2(5); w0(buf[k]); w2(7); } w2(5); w2(4); break; case 2: w2(0xc5); for (k=0;k<count;k++) w4(buf[k]); w2(0xc4); break; case 3: w2(0xc5); for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]); w2(0xc4); break; case 4: w2(0xc5); for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]); w2(0xc4); break; } } static void dstr_log_adapter( PIA *pi, char * scratch, int verbose ) { char *mode_string[5] = {"4-bit","8-bit","EPP-8", "EPP-16","EPP-32"}; printk("%s: dstr %s, DataStor EP2000 at 0x%x, ", pi->device,DSTR_VERSION,pi->port); printk("mode %d (%s), delay %d\n",pi->mode, mode_string[pi->mode],pi->delay); } static struct pi_protocol dstr = { .owner = THIS_MODULE, .name = "dstr", .max_mode = 5, .epp_first = 2, .default_delay = 1, .max_units = 1, .write_regr = dstr_write_regr, .read_regr = dstr_read_regr, .write_block = dstr_write_block, .read_block = dstr_read_block, .connect = dstr_connect, .disconnect = dstr_disconnect, .log_adapter = dstr_log_adapter, }; static int __init dstr_init(void) { return paride_register(&dstr); } static void __exit dstr_exit(void) { paride_unregister(&dstr); } MODULE_LICENSE("GPL"); module_init(dstr_init) module_exit(dstr_exit)
gpl-2.0
TeamFahQ/kernel_linux-4.2.6-lz
drivers/gpu/drm/i915/intel_fbdev.c
194
24170
/* * Copyright © 2007 David Airlie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * David Airlie */ #include <linux/async.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/console.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/sysrq.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/vga_switcheroo.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> #include "intel_drv.h" #include <drm/i915_drm.h> #include "i915_drv.h" static int intel_fbdev_set_par(struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct intel_fbdev *ifbdev = container_of(fb_helper, struct intel_fbdev, helper); int ret; ret = drm_fb_helper_set_par(info); if (ret == 0) { /* * FIXME: fbdev presumes that all callbacks also work from * atomic contexts and relies on that for emergency oops * printing. KMS totally doesn't do that and the locking here is * by far not the only place this goes wrong. Ignore this for * now until we solve this for real. */ mutex_lock(&fb_helper->dev->struct_mutex); ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj, true); mutex_unlock(&fb_helper->dev->struct_mutex); } return ret; } static int intel_fbdev_blank(int blank, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct intel_fbdev *ifbdev = container_of(fb_helper, struct intel_fbdev, helper); int ret; ret = drm_fb_helper_blank(blank, info); if (ret == 0) { /* * FIXME: fbdev presumes that all callbacks also work from * atomic contexts and relies on that for emergency oops * printing. KMS totally doesn't do that and the locking here is * by far not the only place this goes wrong. Ignore this for * now until we solve this for real. */ mutex_lock(&fb_helper->dev->struct_mutex); intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT); mutex_unlock(&fb_helper->dev->struct_mutex); } return ret; } static int intel_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct intel_fbdev *ifbdev = container_of(fb_helper, struct intel_fbdev, helper); int ret; ret = drm_fb_helper_pan_display(var, info); if (ret == 0) { /* * FIXME: fbdev presumes that all callbacks also work from * atomic contexts and relies on that for emergency oops * printing. KMS totally doesn't do that and the locking here is * by far not the only place this goes wrong. Ignore this for * now until we solve this for real. */ mutex_lock(&fb_helper->dev->struct_mutex); intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT); mutex_unlock(&fb_helper->dev->struct_mutex); } return ret; } static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = intel_fbdev_set_par, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_pan_display = intel_fbdev_pan_display, .fb_blank = intel_fbdev_blank, .fb_setcmap = drm_fb_helper_setcmap, .fb_debug_enter = drm_fb_helper_debug_enter, .fb_debug_leave = drm_fb_helper_debug_leave, }; static int intelfb_alloc(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct intel_fbdev *ifbdev = container_of(helper, struct intel_fbdev, helper); struct drm_framebuffer *fb; struct drm_device *dev = helper->dev; struct drm_mode_fb_cmd2 mode_cmd = {}; struct drm_i915_gem_object *obj; int size, ret; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) sizes->surface_bpp = 32; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(sizes->surface_bpp, 8), 64); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = mode_cmd.pitches[0] * mode_cmd.height; size = PAGE_ALIGN(size); obj = i915_gem_object_create_stolen(dev, size); if (obj == NULL) obj = i915_gem_alloc_object(dev, size); if (!obj) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } fb = __intel_framebuffer_create(dev, &mode_cmd, obj); if (IS_ERR(fb)) { ret = PTR_ERR(fb); goto out_unref; } /* Flush everything out, we'll be doing GTT only from now on */ ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL); if (ret) { DRM_ERROR("failed to pin obj: %d\n", ret); goto out_fb; } ifbdev->fb = to_intel_framebuffer(fb); return 0; out_fb: drm_framebuffer_remove(fb); out_unref: drm_gem_object_unreference(&obj->base); out: return ret; } static int intelfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct intel_fbdev *ifbdev = container_of(helper, struct intel_fbdev, helper); struct intel_framebuffer *intel_fb = ifbdev->fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; struct drm_i915_gem_object *obj; int size, ret; bool prealloc = false; mutex_lock(&dev->struct_mutex); if (intel_fb && (sizes->fb_width > intel_fb->base.width || sizes->fb_height > intel_fb->base.height)) { DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d)," " releasing it\n", intel_fb->base.width, intel_fb->base.height, sizes->fb_width, sizes->fb_height); drm_framebuffer_unreference(&intel_fb->base); intel_fb = ifbdev->fb = NULL; } if (!intel_fb || WARN_ON(!intel_fb->obj)) { DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); ret = intelfb_alloc(helper, sizes); if (ret) goto out_unlock; intel_fb = ifbdev->fb; } else { DRM_DEBUG_KMS("re-using BIOS fb\n"); prealloc = true; sizes->fb_width = intel_fb->base.width; sizes->fb_height = intel_fb->base.height; } obj = intel_fb->obj; size = obj->base.size; info = framebuffer_alloc(0, &dev->pdev->dev); if (!info) { ret = -ENOMEM; goto out_unpin; } info->par = helper; fb = &ifbdev->fb->base; ifbdev->helper.fb = fb; ifbdev->helper.fbdev = info; strcpy(info->fix.id, "inteldrmfb"); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unpin; } /* setup aperture base/size for vesafb takeover */ info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; goto out_unpin; } info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); info->fix.smem_len = size; info->screen_base = ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; } info->screen_size = size; /* This driver doesn't need a VT switch to restore the mode on resume */ info->skip_vt_switch = true; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); /* If the object is shmemfs backed, it will have given us zeroed pages. * If the object is stolen however, it will be full of whatever * garbage was left in there. */ if (ifbdev->fb->obj->stolen && !prealloc) memset_io(info->screen_base, 0, info->screen_size); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width, fb->height, i915_gem_obj_ggtt_offset(obj), obj); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); return 0; out_unpin: i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference(&obj->base); out_unlock: mutex_unlock(&dev->struct_mutex); return ret; } /** Sets the color ramps on behalf of RandR */ static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); intel_crtc->lut_r[regno] = red >> 8; intel_crtc->lut_g[regno] = green >> 8; intel_crtc->lut_b[regno] = blue >> 8; } static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); *red = intel_crtc->lut_r[regno] << 8; *green = intel_crtc->lut_g[regno] << 8; *blue = intel_crtc->lut_b[regno] << 8; } static struct drm_fb_helper_crtc * intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc) { int i; for (i = 0; i < fb_helper->crtc_count; i++) if (fb_helper->crtc_info[i].mode_set.crtc == crtc) return &fb_helper->crtc_info[i]; return NULL; } /* * Try to read the BIOS display configuration and use it for the initial * fb configuration. * * The BIOS or boot loader will generally create an initial display * configuration for us that includes some set of active pipes and displays. * This routine tries to figure out which pipes and connectors are active * and stuffs them into the crtcs and modes array given to us by the * drm_fb_helper code. * * The overall sequence is: * intel_fbdev_init - from driver load * intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data * drm_fb_helper_init - build fb helper structs * drm_fb_helper_single_add_all_connectors - more fb helper structs * intel_fbdev_initial_config - apply the config * drm_fb_helper_initial_config - call ->probe then register_framebuffer() * drm_setup_crtcs - build crtc config for fbdev * intel_fb_initial_config - find active connectors etc * drm_fb_helper_single_fb_probe - set up fbdev * intelfb_create - re-use or alloc fb, build out fbdev structs * * Note that we don't make special consideration whether we could actually * switch to the selected modes without a full modeset. E.g. when the display * is in VGA mode we need to recalculate watermarks and set a new high-res * framebuffer anyway. */ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, struct drm_fb_helper_crtc **crtcs, struct drm_display_mode **modes, struct drm_fb_offset *offsets, bool *enabled, int width, int height) { struct drm_device *dev = fb_helper->dev; int i, j; bool *save_enabled; bool fallback = true; int num_connectors_enabled = 0; int num_connectors_detected = 0; uint64_t conn_configured = 0, mask; int pass = 0; save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), GFP_KERNEL); if (!save_enabled) return false; memcpy(save_enabled, enabled, dev->mode_config.num_connector); mask = (1 << fb_helper->connector_count) - 1; retry: for (i = 0; i < fb_helper->connector_count; i++) { struct drm_fb_helper_connector *fb_conn; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_fb_helper_crtc *new_crtc; fb_conn = fb_helper->connector_info[i]; connector = fb_conn->connector; if (conn_configured & (1 << i)) continue; if (pass == 0 && !connector->has_tile) continue; if (connector->status == connector_status_connected) num_connectors_detected++; if (!enabled[i]) { DRM_DEBUG_KMS("connector %s not enabled, skipping\n", connector->name); conn_configured |= (1 << i); continue; } if (connector->force == DRM_FORCE_OFF) { DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n", connector->name); enabled[i] = false; continue; } encoder = connector->encoder; if (!encoder || WARN_ON(!encoder->crtc)) { if (connector->force > DRM_FORCE_OFF) goto bail; DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", connector->name); enabled[i] = false; conn_configured |= (1 << i); continue; } num_connectors_enabled++; new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc); /* * Make sure we're not trying to drive multiple connectors * with a single CRTC, since our cloning support may not * match the BIOS. */ for (j = 0; j < fb_helper->connector_count; j++) { if (crtcs[j] == new_crtc) { DRM_DEBUG_KMS("fallback: cloned configuration\n"); goto bail; } } DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n", connector->name); /* go for command line mode first */ modes[i] = drm_pick_cmdline_mode(fb_conn, width, height); /* try for preferred next */ if (!modes[i]) { DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n", connector->name, connector->has_tile); modes[i] = drm_has_preferred_mode(fb_conn, width, height); } /* No preferred mode marked by the EDID? Are there any modes? */ if (!modes[i] && !list_empty(&connector->modes)) { DRM_DEBUG_KMS("using first mode listed on connector %s\n", connector->name); modes[i] = list_first_entry(&connector->modes, struct drm_display_mode, head); } /* last resort: use current mode */ if (!modes[i]) { /* * IMPORTANT: We want to use the adjusted mode (i.e. * after the panel fitter upscaling) as the initial * config, not the input mode, which is what crtc->mode * usually contains. But since our current fastboot * code puts a mode derived from the post-pfit timings * into crtc->mode this works out correctly. We don't * use hwmode anywhere right now, so use it for this * since the fb helper layer wants a pointer to * something we own. */ DRM_DEBUG_KMS("looking for current mode on connector %s\n", connector->name); intel_mode_from_pipe_config(&encoder->crtc->hwmode, to_intel_crtc(encoder->crtc)->config); modes[i] = &encoder->crtc->hwmode; } crtcs[i] = new_crtc; DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", connector->name, pipe_name(to_intel_crtc(encoder->crtc)->pipe), encoder->crtc->base.id, modes[i]->hdisplay, modes[i]->vdisplay, modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); fallback = false; conn_configured |= (1 << i); } if ((conn_configured & mask) != mask) { pass++; goto retry; } /* * If the BIOS didn't enable everything it could, fall back to have the * same user experiencing of lighting up as much as possible like the * fbdev helper library. */ if (num_connectors_enabled != num_connectors_detected && num_connectors_enabled < INTEL_INFO(dev)->num_pipes) { DRM_DEBUG_KMS("fallback: Not all outputs enabled\n"); DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled, num_connectors_detected); fallback = true; } if (fallback) { bail: DRM_DEBUG_KMS("Not using firmware configuration\n"); memcpy(enabled, save_enabled, dev->mode_config.num_connector); kfree(save_enabled); return false; } kfree(save_enabled); return true; } static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { .initial_config = intel_fb_initial_config, .gamma_set = intel_crtc_fb_gamma_set, .gamma_get = intel_crtc_fb_gamma_get, .fb_probe = intelfb_create, }; static void intel_fbdev_destroy(struct drm_device *dev, struct intel_fbdev *ifbdev) { if (ifbdev->helper.fbdev) { struct fb_info *info = ifbdev->helper.fbdev; unregister_framebuffer(info); iounmap(info->screen_base); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } drm_fb_helper_fini(&ifbdev->helper); drm_framebuffer_unregister_private(&ifbdev->fb->base); drm_framebuffer_remove(&ifbdev->fb->base); } /* * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible. * The core display code will have read out the current plane configuration, * so we use that to figure out if there's an object for us to use as the * fb, and if so, we re-use it for the fbdev configuration. * * Note we only support a single fb shared across pipes for boot (mostly for * fbcon), so we just find the biggest and use that. */ static bool intel_fbdev_init_bios(struct drm_device *dev, struct intel_fbdev *ifbdev) { struct intel_framebuffer *fb = NULL; struct drm_crtc *crtc; struct intel_crtc *intel_crtc; struct intel_initial_plane_config *plane_config = NULL; unsigned int max_size = 0; if (!i915.fastboot) return false; /* Find the largest fb */ for_each_crtc(dev, crtc) { intel_crtc = to_intel_crtc(crtc); if (!intel_crtc->active || !crtc->primary->fb) { DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", pipe_name(intel_crtc->pipe)); continue; } if (intel_crtc->plane_config.size > max_size) { DRM_DEBUG_KMS("found possible fb from plane %c\n", pipe_name(intel_crtc->pipe)); plane_config = &intel_crtc->plane_config; fb = to_intel_framebuffer(crtc->primary->fb); max_size = plane_config->size; } } if (!fb) { DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n"); goto out; } /* Now make sure all the pipes will fit into it */ for_each_crtc(dev, crtc) { unsigned int cur_size; intel_crtc = to_intel_crtc(crtc); if (!intel_crtc->active) { DRM_DEBUG_KMS("pipe %c not active, skipping\n", pipe_name(intel_crtc->pipe)); continue; } DRM_DEBUG_KMS("checking plane %c for BIOS fb\n", pipe_name(intel_crtc->pipe)); /* * See if the plane fb we found above will fit on this * pipe. Note we need to use the selected fb's pitch and bpp * rather than the current pipe's, since they differ. */ cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay; cur_size = cur_size * fb->base.bits_per_pixel / 8; if (fb->base.pitches[0] < cur_size) { DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", pipe_name(intel_crtc->pipe), cur_size, fb->base.pitches[0]); plane_config = NULL; fb = NULL; break; } cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay; cur_size = intel_fb_align_height(dev, cur_size, fb->base.pixel_format, fb->base.modifier[0]); cur_size *= fb->base.pitches[0]; DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", pipe_name(intel_crtc->pipe), intel_crtc->config->base.adjusted_mode.crtc_hdisplay, intel_crtc->config->base.adjusted_mode.crtc_vdisplay, fb->base.bits_per_pixel, cur_size); if (cur_size > max_size) { DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n", pipe_name(intel_crtc->pipe), cur_size, max_size); plane_config = NULL; fb = NULL; break; } DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n", pipe_name(intel_crtc->pipe), max_size, cur_size); } if (!fb) { DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n"); goto out; } ifbdev->preferred_bpp = fb->base.bits_per_pixel; ifbdev->fb = fb; drm_framebuffer_reference(&ifbdev->fb->base); /* Final pass to check if any active pipes don't have fbs */ for_each_crtc(dev, crtc) { intel_crtc = to_intel_crtc(crtc); if (!intel_crtc->active) continue; WARN(!crtc->primary->fb, "re-used BIOS config but lost an fb on crtc %d\n", crtc->base.id); } DRM_DEBUG_KMS("using BIOS fb for initial console\n"); return true; out: return false; } static void intel_fbdev_suspend_worker(struct work_struct *work) { intel_fbdev_set_suspend(container_of(work, struct drm_i915_private, fbdev_suspend_work)->dev, FBINFO_STATE_RUNNING, true); } int intel_fbdev_init(struct drm_device *dev) { struct intel_fbdev *ifbdev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0)) return -ENODEV; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); if (ifbdev == NULL) return -ENOMEM; drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); if (!intel_fbdev_init_bios(dev, ifbdev)) ifbdev->preferred_bpp = 32; ret = drm_fb_helper_init(dev, &ifbdev->helper, INTEL_INFO(dev)->num_pipes, 4); if (ret) { kfree(ifbdev); return ret; } dev_priv->fbdev = ifbdev; INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); drm_fb_helper_single_add_all_connectors(&ifbdev->helper); return 0; } void intel_fbdev_initial_config(void *data, async_cookie_t cookie) { struct drm_i915_private *dev_priv = data; struct intel_fbdev *ifbdev = dev_priv->fbdev; /* Due to peculiar init order wrt to hpd handling this is separate. */ drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp); } void intel_fbdev_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (!dev_priv->fbdev) return; flush_work(&dev_priv->fbdev_suspend_work); async_synchronize_full(); intel_fbdev_destroy(dev, dev_priv->fbdev); kfree(dev_priv->fbdev); dev_priv->fbdev = NULL; } void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_fbdev *ifbdev = dev_priv->fbdev; struct fb_info *info; if (!ifbdev) return; info = ifbdev->helper.fbdev; if (synchronous) { /* Flush any pending work to turn the console on, and then * wait to turn it off. It must be synchronous as we are * about to suspend or unload the driver. * * Note that from within the work-handler, we cannot flush * ourselves, so only flush outstanding work upon suspend! */ if (state != FBINFO_STATE_RUNNING) flush_work(&dev_priv->fbdev_suspend_work); console_lock(); } else { /* * The console lock can be pretty contented on resume due * to all the printk activity. Try to keep it out of the hot * path of resume if possible. */ WARN_ON(state != FBINFO_STATE_RUNNING); if (!console_trylock()) { /* Don't block our own workqueue as this can * be run in parallel with other i915.ko tasks. */ schedule_work(&dev_priv->fbdev_suspend_work); return; } } /* On resume from hibernation: If the object is shmemfs backed, it has * been restored from swap. If the object is stolen however, it will be * full of whatever garbage was left in there. */ if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen) memset_io(info->screen_base, 0, info->screen_size); fb_set_suspend(info, state); console_unlock(); } void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->fbdev) drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); } void intel_fbdev_restore_mode(struct drm_device *dev) { int ret; struct drm_i915_private *dev_priv = dev->dev_private; if (!dev_priv->fbdev) return; ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper); if (ret) DRM_DEBUG("failed to restore crtc mode\n"); }
gpl-2.0
moko365/sensorbox-devkit8000-kernel
drivers/w1/slaves/w1_ds2760.c
194
4394
/* * 1-Wire implementation for the ds2760 chip * * Copyright © 2004-2005, Szabolcs Gyurko <szabolcs.gyurko@tlt.hu> * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is * preserved in its entirety in all copies and derived works. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/idr.h> #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" #include "w1_ds2760.h" static int w1_ds2760_io(struct device *dev, char *buf, int addr, size_t count, int io) { struct w1_slave *sl = container_of(dev, struct w1_slave, dev); if (!dev) return 0; mutex_lock(&sl->master->mutex); if (addr > DS2760_DATA_SIZE || addr < 0) { count = 0; goto out; } if (addr + count > DS2760_DATA_SIZE) count = DS2760_DATA_SIZE - addr; if (!w1_reset_select_slave(sl)) { if (!io) { w1_write_8(sl->master, W1_DS2760_READ_DATA); w1_write_8(sl->master, addr); count = w1_read_block(sl->master, buf, count); } else { w1_write_8(sl->master, W1_DS2760_WRITE_DATA); w1_write_8(sl->master, addr); w1_write_block(sl->master, buf, count); /* XXX w1_write_block returns void, not n_written */ } } out: mutex_unlock(&sl->master->mutex); return count; } int w1_ds2760_read(struct device *dev, char *buf, int addr, size_t count) { return w1_ds2760_io(dev, buf, addr, count, 0); } int w1_ds2760_write(struct device *dev, char *buf, int addr, size_t count) { return w1_ds2760_io(dev, buf, addr, count, 1); } static ssize_t w1_ds2760_read_bin(struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); return w1_ds2760_read(dev, buf, off, count); } static struct bin_attribute w1_ds2760_bin_attr = { .attr = { .name = "w1_slave", .mode = S_IRUGO, }, .size = DS2760_DATA_SIZE, .read = w1_ds2760_read_bin, }; static DEFINE_IDR(bat_idr); static DEFINE_MUTEX(bat_idr_lock); static int new_bat_id(void) { int ret; while (1) { int id; ret = idr_pre_get(&bat_idr, GFP_KERNEL); if (ret == 0) return -ENOMEM; mutex_lock(&bat_idr_lock); ret = idr_get_new(&bat_idr, NULL, &id); mutex_unlock(&bat_idr_lock); if (ret == 0) { ret = id & MAX_ID_MASK; break; } else if (ret == -EAGAIN) { continue; } else { break; } } return ret; } static void release_bat_id(int id) { mutex_lock(&bat_idr_lock); idr_remove(&bat_idr, id); mutex_unlock(&bat_idr_lock); } static int w1_ds2760_add_slave(struct w1_slave *sl) { int ret; int id; struct platform_device *pdev; id = new_bat_id(); if (id < 0) { ret = id; goto noid; } pdev = platform_device_alloc("ds2760-battery", id); if (!pdev) { ret = -ENOMEM; goto pdev_alloc_failed; } pdev->dev.parent = &sl->dev; ret = platform_device_add(pdev); if (ret) goto pdev_add_failed; ret = sysfs_create_bin_file(&sl->dev.kobj, &w1_ds2760_bin_attr); if (ret) goto bin_attr_failed; dev_set_drvdata(&sl->dev, pdev); goto success; bin_attr_failed: pdev_add_failed: platform_device_unregister(pdev); pdev_alloc_failed: release_bat_id(id); noid: success: return ret; } static void w1_ds2760_remove_slave(struct w1_slave *sl) { struct platform_device *pdev = dev_get_drvdata(&sl->dev); int id = pdev->id; platform_device_unregister(pdev); release_bat_id(id); sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2760_bin_attr); } static struct w1_family_ops w1_ds2760_fops = { .add_slave = w1_ds2760_add_slave, .remove_slave = w1_ds2760_remove_slave, }; static struct w1_family w1_ds2760_family = { .fid = W1_FAMILY_DS2760, .fops = &w1_ds2760_fops, }; static int __init w1_ds2760_init(void) { printk(KERN_INFO "1-Wire driver for the DS2760 battery monitor " " chip - (c) 2004-2005, Szabolcs Gyurko\n"); idr_init(&bat_idr); return w1_register_family(&w1_ds2760_family); } static void __exit w1_ds2760_exit(void) { w1_unregister_family(&w1_ds2760_family); idr_destroy(&bat_idr); } EXPORT_SYMBOL(w1_ds2760_read); EXPORT_SYMBOL(w1_ds2760_write); module_init(w1_ds2760_init); module_exit(w1_ds2760_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>"); MODULE_DESCRIPTION("1-wire Driver Dallas 2760 battery monitor chip");
gpl-2.0
beyondhenry/uboot20120401
board/freescale/mpc8315erdb/mpc8315erdb.c
194
6178
/* * Copyright (C) 2007 Freescale Semiconductor, Inc. * * Author: Scott Wood <scottwood@freescale.com> * Dave Liu <daveliu@freescale.com> * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS for A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <hwconfig.h> #include <i2c.h> #include <libfdt.h> #include <fdt_support.h> #include <pci.h> #include <mpc83xx.h> #include <netdev.h> #include <asm/io.h> #include <ns16550.h> #include <nand.h> DECLARE_GLOBAL_DATA_PTR; int board_early_init_f(void) { volatile immap_t *im = (immap_t *)CONFIG_SYS_IMMR; if (im->pmc.pmccr1 & PMCCR1_POWER_OFF) gd->flags |= GD_FLG_SILENT; return 0; } #ifndef CONFIG_NAND_SPL static u8 read_board_info(void) { u8 val8; i2c_set_bus_num(0); if (i2c_read(CONFIG_SYS_I2C_PCF8574A_ADDR, 0, 0, &val8, 1) == 0) return val8; else return 0; } int checkboard(void) { static const char * const rev_str[] = { "0.0", "0.1", "1.0", "1.1", "<unknown>", }; u8 info; int i; info = read_board_info(); i = (!info) ? 4: info & 0x03; printf("Board: Freescale MPC8315ERDB Rev %s\n", rev_str[i]); return 0; } static struct pci_region pci_regions[] = { { bus_start: CONFIG_SYS_PCI_MEM_BASE, phys_start: CONFIG_SYS_PCI_MEM_PHYS, size: CONFIG_SYS_PCI_MEM_SIZE, flags: PCI_REGION_MEM | PCI_REGION_PREFETCH }, { bus_start: CONFIG_SYS_PCI_MMIO_BASE, phys_start: CONFIG_SYS_PCI_MMIO_PHYS, size: CONFIG_SYS_PCI_MMIO_SIZE, flags: PCI_REGION_MEM }, { bus_start: CONFIG_SYS_PCI_IO_BASE, phys_start: CONFIG_SYS_PCI_IO_PHYS, size: CONFIG_SYS_PCI_IO_SIZE, flags: PCI_REGION_IO } }; static struct pci_region pcie_regions_0[] = { { .bus_start = CONFIG_SYS_PCIE1_MEM_BASE, .phys_start = CONFIG_SYS_PCIE1_MEM_PHYS, .size = CONFIG_SYS_PCIE1_MEM_SIZE, .flags = PCI_REGION_MEM, }, { .bus_start = CONFIG_SYS_PCIE1_IO_BASE, .phys_start = CONFIG_SYS_PCIE1_IO_PHYS, .size = CONFIG_SYS_PCIE1_IO_SIZE, .flags = PCI_REGION_IO, }, }; static struct pci_region pcie_regions_1[] = { { .bus_start = CONFIG_SYS_PCIE2_MEM_BASE, .phys_start = CONFIG_SYS_PCIE2_MEM_PHYS, .size = CONFIG_SYS_PCIE2_MEM_SIZE, .flags = PCI_REGION_MEM, }, { .bus_start = CONFIG_SYS_PCIE2_IO_BASE, .phys_start = CONFIG_SYS_PCIE2_IO_PHYS, .size = CONFIG_SYS_PCIE2_IO_SIZE, .flags = PCI_REGION_IO, }, }; void pci_init_board(void) { volatile immap_t *immr = (volatile immap_t *)CONFIG_SYS_IMMR; volatile sysconf83xx_t *sysconf = &immr->sysconf; volatile clk83xx_t *clk = (volatile clk83xx_t *)&immr->clk; volatile law83xx_t *pci_law = immr->sysconf.pcilaw; volatile law83xx_t *pcie_law = sysconf->pcielaw; struct pci_region *reg[] = { pci_regions }; struct pci_region *pcie_reg[] = { pcie_regions_0, pcie_regions_1, }; /* Enable all 3 PCI_CLK_OUTPUTs. */ clk->occr |= 0xe0000000; /* * Configure PCI Local Access Windows */ pci_law[0].bar = CONFIG_SYS_PCI_MEM_PHYS & LAWBAR_BAR; pci_law[0].ar = LBLAWAR_EN | LBLAWAR_512MB; pci_law[1].bar = CONFIG_SYS_PCI_IO_PHYS & LAWBAR_BAR; pci_law[1].ar = LBLAWAR_EN | LBLAWAR_1MB; mpc83xx_pci_init(1, reg); /* Configure the clock for PCIE controller */ clrsetbits_be32(&clk->sccr, SCCR_PCIEXP1CM | SCCR_PCIEXP2CM, SCCR_PCIEXP1CM_1 | SCCR_PCIEXP2CM_1); /* Deassert the resets in the control register */ out_be32(&sysconf->pecr1, 0xE0008000); out_be32(&sysconf->pecr2, 0xE0008000); udelay(2000); /* Configure PCI Express Local Access Windows */ out_be32(&pcie_law[0].bar, CONFIG_SYS_PCIE1_BASE & LAWBAR_BAR); out_be32(&pcie_law[0].ar, LBLAWAR_EN | LBLAWAR_512MB); out_be32(&pcie_law[1].bar, CONFIG_SYS_PCIE2_BASE & LAWBAR_BAR); out_be32(&pcie_law[1].ar, LBLAWAR_EN | LBLAWAR_512MB); mpc83xx_pcie_init(2, pcie_reg); } #if defined(CONFIG_OF_BOARD_SETUP) void fdt_tsec1_fixup(void *fdt, bd_t *bd) { const char disabled[] = "disabled"; const char *path; int ret; if (hwconfig_arg_cmp("board_type", "tsec1")) { return; } else if (!hwconfig_arg_cmp("board_type", "ulpi")) { printf("NOTICE: No or unknown board_type hwconfig specified.\n" " Assuming board with TSEC1.\n"); return; } ret = fdt_path_offset(fdt, "/aliases"); if (ret < 0) { printf("WARNING: can't find /aliases node\n"); return; } path = fdt_getprop(fdt, ret, "ethernet0", NULL); if (!path) { printf("WARNING: can't find ethernet0 alias\n"); return; } do_fixup_by_path(fdt, path, "status", disabled, sizeof(disabled), 1); } void ft_board_setup(void *blob, bd_t *bd) { ft_cpu_setup(blob, bd); #ifdef CONFIG_PCI ft_pci_setup(blob, bd); #endif fdt_fixup_dr_usb(blob, bd); fdt_tsec1_fixup(blob, bd); } #endif int board_eth_init(bd_t *bis) { cpu_eth_init(bis); /* Initialize TSECs first */ return pci_eth_init(bis); } #else /* CONFIG_NAND_SPL */ int checkboard(void) { puts("Board: Freescale MPC8315ERDB\n"); return 0; } void board_init_f(ulong bootflag) { board_early_init_f(); NS16550_init((NS16550_t)(CONFIG_SYS_IMMR + 0x4500), CONFIG_SYS_NS16550_CLK / 16 / CONFIG_BAUDRATE); puts("NAND boot... "); init_timebase(); initdram(0); relocate_code(CONFIG_SYS_NAND_U_BOOT_RELOC + 0x10000, (gd_t *)gd, CONFIG_SYS_NAND_U_BOOT_RELOC); } void board_init_r(gd_t *gd, ulong dest_addr) { nand_boot(); } void putc(char c) { if (gd->flags & GD_FLG_SILENT) return; if (c == '\n') NS16550_putc((NS16550_t)(CONFIG_SYS_IMMR + 0x4500), '\r'); NS16550_putc((NS16550_t)(CONFIG_SYS_IMMR + 0x4500), c); } #endif /* CONFIG_NAND_SPL */
gpl-2.0
MassStash/htc_pme_kernel_sense_6.0
drivers/net/appletalk/cops.c
1986
29348
/* cops.c: LocalTalk driver for Linux. * * Authors: * - Jay Schulist <jschlst@samba.org> * * With more than a little help from; * - Alan Cox <alan@lxorguk.ukuu.org.uk> * * Derived from: * - skeleton.c: A network driver outline for linux. * Written 1993-94 by Donald Becker. * - ltpc.c: A driver for the LocalTalk PC card. * Written by Bradford W. Johnson. * * Copyright 1993 United States Government as represented by the * Director, National Security Agency. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * Changes: * 19970608 Alan Cox Allowed dual card type support * Can set board type in insmod * Hooks for cops_setup routine * (not yet implemented). * 19971101 Jay Schulist Fixes for multiple lt* devices. * 19980607 Steven Hirsch Fixed the badly broken support * for Tangent type cards. Only * tested on Daystar LT200. Some * cleanup of formatting and program * logic. Added emacs 'local-vars' * setup for Jay's brace style. * 20000211 Alan Cox Cleaned up for softnet */ static const char *version = "cops.c:v0.04 6/7/98 Jay Schulist <jschlst@samba.org>\n"; /* * Sources: * COPS Localtalk SDK. This provides almost all of the information * needed. */ /* * insmod/modprobe configurable stuff. * - IO Port, choose one your card supports or 0 if you dare. * - IRQ, also choose one your card supports or nothing and let * the driver figure it out. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/if_ltalk.h> #include <linux/delay.h> /* For udelay() */ #include <linux/atalk.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <asm/io.h> #include <asm/dma.h> #include "cops.h" /* Our Stuff */ #include "cops_ltdrv.h" /* Firmware code for Tangent type cards. */ #include "cops_ffdrv.h" /* Firmware code for Dayna type cards. */ /* * The name of the card. Is used for messages and in the requests for * io regions, irqs and dma channels */ static const char *cardname = "cops"; #ifdef CONFIG_COPS_DAYNA static int board_type = DAYNA; /* Module exported */ #else static int board_type = TANGENT; #endif static int io = 0x240; /* Default IO for Dayna */ static int irq = 5; /* Default IRQ */ /* * COPS Autoprobe information. * Right now if port address is right but IRQ is not 5 this will * return a 5 no matter what since we will still get a status response. * Need one more additional check to narrow down after we have gotten * the ioaddr. But since only other possible IRQs is 3 and 4 so no real * hurry on this. I *STRONGLY* recommend using IRQ 5 for your card with * this driver. * * This driver has 2 modes and they are: Dayna mode and Tangent mode. * Each mode corresponds with the type of card. It has been found * that there are 2 main types of cards and all other cards are * the same and just have different names or only have minor differences * such as more IO ports. As this driver is tested it will * become more clear on exactly what cards are supported. The driver * defaults to using Dayna mode. To change the drivers mode, simply * select Dayna or Tangent mode when configuring the kernel. * * This driver should support: * TANGENT driver mode: * Tangent ATB-II, Novell NL-1000, Daystar Digital LT-200, * COPS LT-1 * DAYNA driver mode: * Dayna DL2000/DaynaTalk PC (Half Length), COPS LT-95, * Farallon PhoneNET PC III, Farallon PhoneNET PC II * Other cards possibly supported mode unknown though: * Dayna DL2000 (Full length), COPS LT/M (Micro-Channel) * * Cards NOT supported by this driver but supported by the ltpc.c * driver written by Bradford W. Johnson <johns393@maroon.tc.umn.edu> * Farallon PhoneNET PC * Original Apple LocalTalk PC card * * N.B. * * The Daystar Digital LT200 boards do not support interrupt-driven * IO. You must specify 'irq=0xff' as a module parameter to invoke * polled mode. I also believe that the port probing logic is quite * dangerous at best and certainly hopeless for a polled card. Best to * specify both. - Steve H. * */ /* * Zero terminated list of IO ports to probe. */ static unsigned int ports[] = { 0x240, 0x340, 0x200, 0x210, 0x220, 0x230, 0x260, 0x2A0, 0x300, 0x310, 0x320, 0x330, 0x350, 0x360, 0 }; /* * Zero terminated list of IRQ ports to probe. */ static int cops_irqlist[] = { 5, 4, 3, 0 }; static struct timer_list cops_timer; /* use 0 for production, 1 for verification, 2 for debug, 3 for verbose debug */ #ifndef COPS_DEBUG #define COPS_DEBUG 1 #endif static unsigned int cops_debug = COPS_DEBUG; /* The number of low I/O ports used by the card. */ #define COPS_IO_EXTENT 8 /* Information that needs to be kept for each board. */ struct cops_local { int board; /* Holds what board type is. */ int nodeid; /* Set to 1 once have nodeid. */ unsigned char node_acquire; /* Node ID when acquired. */ struct atalk_addr node_addr; /* Full node address */ spinlock_t lock; /* RX/TX lock */ }; /* Index to functions, as function prototypes. */ static int cops_probe1 (struct net_device *dev, int ioaddr); static int cops_irq (int ioaddr, int board); static int cops_open (struct net_device *dev); static int cops_jumpstart (struct net_device *dev); static void cops_reset (struct net_device *dev, int sleep); static void cops_load (struct net_device *dev); static int cops_nodeid (struct net_device *dev, int nodeid); static irqreturn_t cops_interrupt (int irq, void *dev_id); static void cops_poll (unsigned long ltdev); static void cops_timeout(struct net_device *dev); static void cops_rx (struct net_device *dev); static netdev_tx_t cops_send_packet (struct sk_buff *skb, struct net_device *dev); static void set_multicast_list (struct net_device *dev); static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); static int cops_close (struct net_device *dev); static void cleanup_card(struct net_device *dev) { if (dev->irq) free_irq(dev->irq, dev); release_region(dev->base_addr, COPS_IO_EXTENT); } /* * Check for a network adaptor of this type, and return '0' iff one exists. * If dev->base_addr == 0, probe all likely locations. * If dev->base_addr in [1..0x1ff], always return failure. * otherwise go with what we pass in. */ struct net_device * __init cops_probe(int unit) { struct net_device *dev; unsigned *port; int base_addr; int err = 0; dev = alloc_ltalkdev(sizeof(struct cops_local)); if (!dev) return ERR_PTR(-ENOMEM); if (unit >= 0) { sprintf(dev->name, "lt%d", unit); netdev_boot_setup_check(dev); irq = dev->irq; base_addr = dev->base_addr; } else { base_addr = dev->base_addr = io; } if (base_addr > 0x1ff) { /* Check a single specified location. */ err = cops_probe1(dev, base_addr); } else if (base_addr != 0) { /* Don't probe at all. */ err = -ENXIO; } else { /* FIXME Does this really work for cards which generate irq? * It's definitely N.G. for polled Tangent. sh * Dayna cards don't autoprobe well at all, but if your card is * at IRQ 5 & IO 0x240 we find it every time. ;) JS */ for (port = ports; *port && cops_probe1(dev, *port) < 0; port++) ; if (!*port) err = -ENODEV; } if (err) goto out; err = register_netdev(dev); if (err) goto out1; return dev; out1: cleanup_card(dev); out: free_netdev(dev); return ERR_PTR(err); } static const struct net_device_ops cops_netdev_ops = { .ndo_open = cops_open, .ndo_stop = cops_close, .ndo_start_xmit = cops_send_packet, .ndo_tx_timeout = cops_timeout, .ndo_do_ioctl = cops_ioctl, .ndo_set_rx_mode = set_multicast_list, }; /* * This is the real probe routine. Linux has a history of friendly device * probes on the ISA bus. A good device probes avoids doing writes, and * verifies that the correct device exists and functions. */ static int __init cops_probe1(struct net_device *dev, int ioaddr) { struct cops_local *lp; static unsigned version_printed; int board = board_type; int retval; if(cops_debug && version_printed++ == 0) printk("%s", version); /* Grab the region so no one else tries to probe our ioports. */ if (!request_region(ioaddr, COPS_IO_EXTENT, dev->name)) return -EBUSY; /* * Since this board has jumpered interrupts, allocate the interrupt * vector now. There is no point in waiting since no other device * can use the interrupt, and this marks the irq as busy. Jumpered * interrupts are typically not reported by the boards, and we must * used AutoIRQ to find them. */ dev->irq = irq; switch (dev->irq) { case 0: /* COPS AutoIRQ routine */ dev->irq = cops_irq(ioaddr, board); if (dev->irq) break; /* No IRQ found on this port, fallthrough */ case 1: retval = -EINVAL; goto err_out; /* Fixup for users that don't know that IRQ 2 is really * IRQ 9, or don't know which one to set. */ case 2: dev->irq = 9; break; /* Polled operation requested. Although irq of zero passed as * a parameter tells the init routines to probe, we'll * overload it to denote polled operation at runtime. */ case 0xff: dev->irq = 0; break; default: break; } /* Reserve any actual interrupt. */ if (dev->irq) { retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev); if (retval) goto err_out; } dev->base_addr = ioaddr; lp = netdev_priv(dev); spin_lock_init(&lp->lock); /* Copy local board variable to lp struct. */ lp->board = board; dev->netdev_ops = &cops_netdev_ops; dev->watchdog_timeo = HZ * 2; /* Tell the user where the card is and what mode we're in. */ if(board==DAYNA) printk("%s: %s at %#3x, using IRQ %d, in Dayna mode.\n", dev->name, cardname, ioaddr, dev->irq); if(board==TANGENT) { if(dev->irq) printk("%s: %s at %#3x, IRQ %d, in Tangent mode\n", dev->name, cardname, ioaddr, dev->irq); else printk("%s: %s at %#3x, using polled IO, in Tangent mode.\n", dev->name, cardname, ioaddr); } return 0; err_out: release_region(ioaddr, COPS_IO_EXTENT); return retval; } static int __init cops_irq (int ioaddr, int board) { /* * This does not use the IRQ to determine where the IRQ is. We just * assume that when we get a correct status response that it's the IRQ. * This really just verifies the IO port but since we only have access * to such a small number of IRQs (5, 4, 3) this is not bad. * This will probably not work for more than one card. */ int irqaddr=0; int i, x, status; if(board==DAYNA) { outb(0, ioaddr+DAYNA_RESET); inb(ioaddr+DAYNA_RESET); mdelay(333); } if(board==TANGENT) { inb(ioaddr); outb(0, ioaddr); outb(0, ioaddr+TANG_RESET); } for(i=0; cops_irqlist[i] !=0; i++) { irqaddr = cops_irqlist[i]; for(x = 0xFFFF; x>0; x --) /* wait for response */ { if(board==DAYNA) { status = (inb(ioaddr+DAYNA_CARD_STATUS)&3); if(status == 1) return irqaddr; } if(board==TANGENT) { if((inb(ioaddr+TANG_CARD_STATUS)& TANG_TX_READY) !=0) return irqaddr; } } } return 0; /* no IRQ found */ } /* * Open/initialize the board. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. */ static int cops_open(struct net_device *dev) { struct cops_local *lp = netdev_priv(dev); if(dev->irq==0) { /* * I don't know if the Dayna-style boards support polled * operation. For now, only allow it for Tangent. */ if(lp->board==TANGENT) /* Poll 20 times per second */ { init_timer(&cops_timer); cops_timer.function = cops_poll; cops_timer.data = (unsigned long)dev; cops_timer.expires = jiffies + HZ/20; add_timer(&cops_timer); } else { printk(KERN_WARNING "%s: No irq line set\n", dev->name); return -EAGAIN; } } cops_jumpstart(dev); /* Start the card up. */ netif_start_queue(dev); return 0; } /* * This allows for a dynamic start/restart of the entire card. */ static int cops_jumpstart(struct net_device *dev) { struct cops_local *lp = netdev_priv(dev); /* * Once the card has the firmware loaded and has acquired * the nodeid, if it is reset it will lose it all. */ cops_reset(dev,1); /* Need to reset card before load firmware. */ cops_load(dev); /* Load the firmware. */ /* * If atalkd already gave us a nodeid we will use that * one again, else we wait for atalkd to give us a nodeid * in cops_ioctl. This may cause a problem if someone steals * our nodeid while we are resetting. */ if(lp->nodeid == 1) cops_nodeid(dev,lp->node_acquire); return 0; } static void tangent_wait_reset(int ioaddr) { int timeout=0; while(timeout++ < 5 && (inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) mdelay(1); /* Wait 1 second */ } /* * Reset the LocalTalk board. */ static void cops_reset(struct net_device *dev, int sleep) { struct cops_local *lp = netdev_priv(dev); int ioaddr=dev->base_addr; if(lp->board==TANGENT) { inb(ioaddr); /* Clear request latch. */ outb(0,ioaddr); /* Clear the TANG_TX_READY flop. */ outb(0, ioaddr+TANG_RESET); /* Reset the adapter. */ tangent_wait_reset(ioaddr); outb(0, ioaddr+TANG_CLEAR_INT); } if(lp->board==DAYNA) { outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */ inb(ioaddr+DAYNA_RESET); /* Clear the reset */ if (sleep) msleep(333); else mdelay(333); } netif_wake_queue(dev); } static void cops_load (struct net_device *dev) { struct ifreq ifr; struct ltfirmware *ltf= (struct ltfirmware *)&ifr.ifr_ifru; struct cops_local *lp = netdev_priv(dev); int ioaddr=dev->base_addr; int length, i = 0; strcpy(ifr.ifr_name,"lt0"); /* Get card's firmware code and do some checks on it. */ #ifdef CONFIG_COPS_DAYNA if(lp->board==DAYNA) { ltf->length=sizeof(ffdrv_code); ltf->data=ffdrv_code; } else #endif #ifdef CONFIG_COPS_TANGENT if(lp->board==TANGENT) { ltf->length=sizeof(ltdrv_code); ltf->data=ltdrv_code; } else #endif { printk(KERN_INFO "%s; unsupported board type.\n", dev->name); return; } /* Check to make sure firmware is correct length. */ if(lp->board==DAYNA && ltf->length!=5983) { printk(KERN_WARNING "%s: Firmware is not length of FFDRV.BIN.\n", dev->name); return; } if(lp->board==TANGENT && ltf->length!=2501) { printk(KERN_WARNING "%s: Firmware is not length of DRVCODE.BIN.\n", dev->name); return; } if(lp->board==DAYNA) { /* * We must wait for a status response * with the DAYNA board. */ while(++i<65536) { if((inb(ioaddr+DAYNA_CARD_STATUS)&3)==1) break; } if(i==65536) return; } /* * Upload the firmware and kick. Byte-by-byte works nicely here. */ i=0; length = ltf->length; while(length--) { outb(ltf->data[i], ioaddr); i++; } if(cops_debug > 1) printk("%s: Uploaded firmware - %d bytes of %d bytes.\n", dev->name, i, ltf->length); if(lp->board==DAYNA) /* Tell Dayna to run the firmware code. */ outb(1, ioaddr+DAYNA_INT_CARD); else /* Tell Tang to run the firmware code. */ inb(ioaddr); if(lp->board==TANGENT) { tangent_wait_reset(ioaddr); inb(ioaddr); /* Clear initial ready signal. */ } } /* * Get the LocalTalk Nodeid from the card. We can suggest * any nodeid 1-254. The card will try and get that exact * address else we can specify 0 as the nodeid and the card * will autoprobe for a nodeid. */ static int cops_nodeid (struct net_device *dev, int nodeid) { struct cops_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; if(lp->board == DAYNA) { /* Empty any pending adapter responses. */ while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0) { outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupts. */ if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST) cops_rx(dev); /* Kick any packets waiting. */ schedule(); } outb(2, ioaddr); /* Output command packet length as 2. */ outb(0, ioaddr); outb(LAP_INIT, ioaddr); /* Send LAP_INIT command byte. */ outb(nodeid, ioaddr); /* Suggest node address. */ } if(lp->board == TANGENT) { /* Empty any pending adapter responses. */ while(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY) { outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupt. */ cops_rx(dev); /* Kick out packets waiting. */ schedule(); } /* Not sure what Tangent does if nodeid picked is used. */ if(nodeid == 0) /* Seed. */ nodeid = jiffies&0xFF; /* Get a random try */ outb(2, ioaddr); /* Command length LSB */ outb(0, ioaddr); /* Command length MSB */ outb(LAP_INIT, ioaddr); /* Send LAP_INIT byte */ outb(nodeid, ioaddr); /* LAP address hint. */ outb(0xFF, ioaddr); /* Int. level to use */ } lp->node_acquire=0; /* Set nodeid holder to 0. */ while(lp->node_acquire==0) /* Get *True* nodeid finally. */ { outb(0, ioaddr+COPS_CLEAR_INT); /* Clear any interrupt. */ if(lp->board == DAYNA) { if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST) cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */ } if(lp->board == TANGENT) { if(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY) cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */ } schedule(); } if(cops_debug > 1) printk(KERN_DEBUG "%s: Node ID %d has been acquired.\n", dev->name, lp->node_acquire); lp->nodeid=1; /* Set got nodeid to 1. */ return 0; } /* * Poll the Tangent type cards to see if we have work. */ static void cops_poll(unsigned long ltdev) { int ioaddr, status; int boguscount = 0; struct net_device *dev = (struct net_device *)ltdev; del_timer(&cops_timer); if(dev == NULL) return; /* We've been downed */ ioaddr = dev->base_addr; do { status=inb(ioaddr+TANG_CARD_STATUS); if(status & TANG_RX_READY) cops_rx(dev); if(status & TANG_TX_READY) netif_wake_queue(dev); status = inb(ioaddr+TANG_CARD_STATUS); } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY))); /* poll 20 times per second */ cops_timer.expires = jiffies + HZ/20; add_timer(&cops_timer); } /* * The typical workload of the driver: * Handle the network interface interrupts. */ static irqreturn_t cops_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct cops_local *lp; int ioaddr, status; int boguscount = 0; ioaddr = dev->base_addr; lp = netdev_priv(dev); if(lp->board==DAYNA) { do { outb(0, ioaddr + COPS_CLEAR_INT); status=inb(ioaddr+DAYNA_CARD_STATUS); if((status&0x03)==DAYNA_RX_REQUEST) cops_rx(dev); netif_wake_queue(dev); } while(++boguscount < 20); } else { do { status=inb(ioaddr+TANG_CARD_STATUS); if(status & TANG_RX_READY) cops_rx(dev); if(status & TANG_TX_READY) netif_wake_queue(dev); status=inb(ioaddr+TANG_CARD_STATUS); } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY))); } return IRQ_HANDLED; } /* * We have a good packet(s), get it/them out of the buffers. */ static void cops_rx(struct net_device *dev) { int pkt_len = 0; int rsp_type = 0; struct sk_buff *skb = NULL; struct cops_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int boguscount = 0; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); if(lp->board==DAYNA) { outb(0, ioaddr); /* Send out Zero length. */ outb(0, ioaddr); outb(DATA_READ, ioaddr); /* Send read command out. */ /* Wait for DMA to turn around. */ while(++boguscount<1000000) { barrier(); if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_READY) break; } if(boguscount==1000000) { printk(KERN_WARNING "%s: DMA timed out.\n",dev->name); spin_unlock_irqrestore(&lp->lock, flags); return; } } /* Get response length. */ if(lp->board==DAYNA) pkt_len = inb(ioaddr) & 0xFF; else pkt_len = inb(ioaddr) & 0x00FF; pkt_len |= (inb(ioaddr) << 8); /* Input IO code. */ rsp_type=inb(ioaddr); /* Malloc up new buffer. */ skb = dev_alloc_skb(pkt_len); if(skb == NULL) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; while(pkt_len--) /* Discard packet */ inb(ioaddr); spin_unlock_irqrestore(&lp->lock, flags); return; } skb->dev = dev; skb_put(skb, pkt_len); skb->protocol = htons(ETH_P_LOCALTALK); insb(ioaddr, skb->data, pkt_len); /* Eat the Data */ if(lp->board==DAYNA) outb(1, ioaddr+DAYNA_INT_CARD); /* Interrupt the card */ spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */ /* Check for bad response length */ if(pkt_len < 0 || pkt_len > MAX_LLAP_SIZE) { printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n", dev->name, pkt_len); dev->stats.tx_errors++; dev_kfree_skb_any(skb); return; } /* Set nodeid and then get out. */ if(rsp_type == LAP_INIT_RSP) { /* Nodeid taken from received packet. */ lp->node_acquire = skb->data[0]; dev_kfree_skb_any(skb); return; } /* One last check to make sure we have a good packet. */ if(rsp_type != LAP_RESPONSE) { printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type); dev->stats.tx_errors++; dev_kfree_skb_any(skb); return; } skb_reset_mac_header(skb); /* Point to entire packet. */ skb_pull(skb,3); skb_reset_transport_header(skb); /* Point to data (Skip header). */ /* Update the counters. */ dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; /* Send packet to a higher place. */ netif_rx(skb); } static void cops_timeout(struct net_device *dev) { struct cops_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; dev->stats.tx_errors++; if(lp->board==TANGENT) { if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) printk(KERN_WARNING "%s: No TX complete interrupt.\n", dev->name); } printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name); cops_jumpstart(dev); /* Restart the card. */ dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } /* * Make the card transmit a LocalTalk packet. */ static netdev_tx_t cops_send_packet(struct sk_buff *skb, struct net_device *dev) { struct cops_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; unsigned long flags; /* * Block a timer-based transmit from overlapping. */ netif_stop_queue(dev); spin_lock_irqsave(&lp->lock, flags); if(lp->board == DAYNA) /* Wait for adapter transmit buffer. */ while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0) cpu_relax(); if(lp->board == TANGENT) /* Wait for adapter transmit buffer. */ while((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) cpu_relax(); /* Output IO length. */ outb(skb->len, ioaddr); if(lp->board == DAYNA) outb(skb->len >> 8, ioaddr); else outb((skb->len >> 8)&0x0FF, ioaddr); /* Output IO code. */ outb(LAP_WRITE, ioaddr); if(lp->board == DAYNA) /* Check the transmit buffer again. */ while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0); outsb(ioaddr, skb->data, skb->len); /* Send out the data. */ if(lp->board==DAYNA) /* Dayna requires you kick the card */ outb(1, ioaddr+DAYNA_INT_CARD); spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */ /* Done sending packet, update counters and cleanup. */ dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; dev_kfree_skb (skb); return NETDEV_TX_OK; } /* * Dummy function to keep the Appletalk layer happy. */ static void set_multicast_list(struct net_device *dev) { if(cops_debug >= 3) printk("%s: set_multicast_list executed\n", dev->name); } /* * System ioctls for the COPS LocalTalk card. */ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct cops_local *lp = netdev_priv(dev); struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr; struct atalk_addr *aa = &lp->node_addr; switch(cmd) { case SIOCSIFADDR: /* Get and set the nodeid and network # atalkd wants. */ cops_nodeid(dev, sa->sat_addr.s_node); aa->s_net = sa->sat_addr.s_net; aa->s_node = lp->node_acquire; /* Set broardcast address. */ dev->broadcast[0] = 0xFF; /* Set hardware address. */ dev->dev_addr[0] = aa->s_node; dev->addr_len = 1; return 0; case SIOCGIFADDR: sa->sat_addr.s_net = aa->s_net; sa->sat_addr.s_node = aa->s_node; return 0; default: return -EOPNOTSUPP; } } /* * The inverse routine to cops_open(). */ static int cops_close(struct net_device *dev) { struct cops_local *lp = netdev_priv(dev); /* If we were running polled, yank the timer. */ if(lp->board==TANGENT && dev->irq==0) del_timer(&cops_timer); netif_stop_queue(dev); return 0; } #ifdef MODULE static struct net_device *cops_dev; MODULE_LICENSE("GPL"); module_param(io, int, 0); module_param(irq, int, 0); module_param(board_type, int, 0); static int __init cops_module_init(void) { if (io == 0) printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", cardname); cops_dev = cops_probe(-1); return PTR_ERR_OR_ZERO(cops_dev); } static void __exit cops_module_exit(void) { unregister_netdev(cops_dev); cleanup_card(cops_dev); free_netdev(cops_dev); } module_init(cops_module_init); module_exit(cops_module_exit); #endif /* MODULE */
gpl-2.0
Borkata/adam-nv-3.1
arch/mips/netlogic/xlr/xlr_console.c
2754
1910
/* * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights * reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the NetLogic * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/types.h> #include <asm/netlogic/xlr/iomap.h> void prom_putchar(char c) { nlm_reg_t *mmio; mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); while (netlogic_read_reg(mmio, 0x5) == 0) ; netlogic_write_reg(mmio, 0x0, c); }
gpl-2.0
aapav01/android_kernel_samsung_ms013g-2
drivers/gpu/drm/radeon/radeon_combios.c
3266
102307
/* * Copyright 2004 ATI Technologies Inc., Markham, Ontario * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" #include "atom.h" #ifdef CONFIG_PPC_PMAC /* not sure which of these are needed */ #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #endif /* CONFIG_PPC_PMAC */ /* from radeon_encoder.c */ extern uint32_t radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac); extern void radeon_link_encoder_connector(struct drm_device *dev); /* from radeon_connector.c */ extern void radeon_add_legacy_connector(struct drm_device *dev, uint32_t connector_id, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, uint16_t connector_object_id, struct radeon_hpd *hpd); /* from radeon_legacy_encoder.c */ extern void radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device); /* old legacy ATI BIOS routines */ /* COMBIOS table offsets */ enum radeon_combios_table_offset { /* absolute offset tables */ COMBIOS_ASIC_INIT_1_TABLE, COMBIOS_BIOS_SUPPORT_TABLE, COMBIOS_DAC_PROGRAMMING_TABLE, COMBIOS_MAX_COLOR_DEPTH_TABLE, COMBIOS_CRTC_INFO_TABLE, COMBIOS_PLL_INFO_TABLE, COMBIOS_TV_INFO_TABLE, COMBIOS_DFP_INFO_TABLE, COMBIOS_HW_CONFIG_INFO_TABLE, COMBIOS_MULTIMEDIA_INFO_TABLE, COMBIOS_TV_STD_PATCH_TABLE, COMBIOS_LCD_INFO_TABLE, COMBIOS_MOBILE_INFO_TABLE, COMBIOS_PLL_INIT_TABLE, COMBIOS_MEM_CONFIG_TABLE, COMBIOS_SAVE_MASK_TABLE, COMBIOS_HARDCODED_EDID_TABLE, COMBIOS_ASIC_INIT_2_TABLE, COMBIOS_CONNECTOR_INFO_TABLE, COMBIOS_DYN_CLK_1_TABLE, COMBIOS_RESERVED_MEM_TABLE, COMBIOS_EXT_TMDS_INFO_TABLE, COMBIOS_MEM_CLK_INFO_TABLE, COMBIOS_EXT_DAC_INFO_TABLE, COMBIOS_MISC_INFO_TABLE, COMBIOS_CRT_INFO_TABLE, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE, COMBIOS_COMPONENT_VIDEO_INFO_TABLE, COMBIOS_FAN_SPEED_INFO_TABLE, COMBIOS_OVERDRIVE_INFO_TABLE, COMBIOS_OEM_INFO_TABLE, COMBIOS_DYN_CLK_2_TABLE, COMBIOS_POWER_CONNECTOR_INFO_TABLE, COMBIOS_I2C_INFO_TABLE, /* relative offset tables */ COMBIOS_ASIC_INIT_3_TABLE, /* offset from misc info */ COMBIOS_ASIC_INIT_4_TABLE, /* offset from misc info */ COMBIOS_DETECTED_MEM_TABLE, /* offset from misc info */ COMBIOS_ASIC_INIT_5_TABLE, /* offset from misc info */ COMBIOS_RAM_RESET_TABLE, /* offset from mem config */ COMBIOS_POWERPLAY_INFO_TABLE, /* offset from mobile info */ COMBIOS_GPIO_INFO_TABLE, /* offset from mobile info */ COMBIOS_LCD_DDC_INFO_TABLE, /* offset from mobile info */ COMBIOS_TMDS_POWER_TABLE, /* offset from mobile info */ COMBIOS_TMDS_POWER_ON_TABLE, /* offset from tmds power */ COMBIOS_TMDS_POWER_OFF_TABLE, /* offset from tmds power */ }; enum radeon_combios_ddc { DDC_NONE_DETECTED, DDC_MONID, DDC_DVI, DDC_VGA, DDC_CRT2, DDC_LCD, DDC_GPIO, }; enum radeon_combios_connector { CONNECTOR_NONE_LEGACY, CONNECTOR_PROPRIETARY_LEGACY, CONNECTOR_CRT_LEGACY, CONNECTOR_DVI_I_LEGACY, CONNECTOR_DVI_D_LEGACY, CONNECTOR_CTV_LEGACY, CONNECTOR_STV_LEGACY, CONNECTOR_UNSUPPORTED_LEGACY }; const int legacy_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_VGA, DRM_MODE_CONNECTOR_DVII, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_Composite, DRM_MODE_CONNECTOR_SVIDEO, DRM_MODE_CONNECTOR_Unknown, }; static uint16_t combios_get_table_offset(struct drm_device *dev, enum radeon_combios_table_offset table) { struct radeon_device *rdev = dev->dev_private; int rev; uint16_t offset = 0, check_offset; if (!rdev->bios) return 0; switch (table) { /* absolute offset tables */ case COMBIOS_ASIC_INIT_1_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0xc); if (check_offset) offset = check_offset; break; case COMBIOS_BIOS_SUPPORT_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x14); if (check_offset) offset = check_offset; break; case COMBIOS_DAC_PROGRAMMING_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x2a); if (check_offset) offset = check_offset; break; case COMBIOS_MAX_COLOR_DEPTH_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x2c); if (check_offset) offset = check_offset; break; case COMBIOS_CRTC_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x2e); if (check_offset) offset = check_offset; break; case COMBIOS_PLL_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x30); if (check_offset) offset = check_offset; break; case COMBIOS_TV_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x32); if (check_offset) offset = check_offset; break; case COMBIOS_DFP_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x34); if (check_offset) offset = check_offset; break; case COMBIOS_HW_CONFIG_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x36); if (check_offset) offset = check_offset; break; case COMBIOS_MULTIMEDIA_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x38); if (check_offset) offset = check_offset; break; case COMBIOS_TV_STD_PATCH_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x3e); if (check_offset) offset = check_offset; break; case COMBIOS_LCD_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x40); if (check_offset) offset = check_offset; break; case COMBIOS_MOBILE_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x42); if (check_offset) offset = check_offset; break; case COMBIOS_PLL_INIT_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x46); if (check_offset) offset = check_offset; break; case COMBIOS_MEM_CONFIG_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x48); if (check_offset) offset = check_offset; break; case COMBIOS_SAVE_MASK_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x4a); if (check_offset) offset = check_offset; break; case COMBIOS_HARDCODED_EDID_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x4c); if (check_offset) offset = check_offset; break; case COMBIOS_ASIC_INIT_2_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x4e); if (check_offset) offset = check_offset; break; case COMBIOS_CONNECTOR_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x50); if (check_offset) offset = check_offset; break; case COMBIOS_DYN_CLK_1_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x52); if (check_offset) offset = check_offset; break; case COMBIOS_RESERVED_MEM_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x54); if (check_offset) offset = check_offset; break; case COMBIOS_EXT_TMDS_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x58); if (check_offset) offset = check_offset; break; case COMBIOS_MEM_CLK_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x5a); if (check_offset) offset = check_offset; break; case COMBIOS_EXT_DAC_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x5c); if (check_offset) offset = check_offset; break; case COMBIOS_MISC_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x5e); if (check_offset) offset = check_offset; break; case COMBIOS_CRT_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x60); if (check_offset) offset = check_offset; break; case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x62); if (check_offset) offset = check_offset; break; case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x64); if (check_offset) offset = check_offset; break; case COMBIOS_FAN_SPEED_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x66); if (check_offset) offset = check_offset; break; case COMBIOS_OVERDRIVE_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x68); if (check_offset) offset = check_offset; break; case COMBIOS_OEM_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x6a); if (check_offset) offset = check_offset; break; case COMBIOS_DYN_CLK_2_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x6c); if (check_offset) offset = check_offset; break; case COMBIOS_POWER_CONNECTOR_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x6e); if (check_offset) offset = check_offset; break; case COMBIOS_I2C_INFO_TABLE: check_offset = RBIOS16(rdev->bios_header_start + 0x70); if (check_offset) offset = check_offset; break; /* relative offset tables */ case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ check_offset = combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE); if (check_offset) { rev = RBIOS8(check_offset); if (rev > 0) { check_offset = RBIOS16(check_offset + 0x3); if (check_offset) offset = check_offset; } } break; case COMBIOS_ASIC_INIT_4_TABLE: /* offset from misc info */ check_offset = combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE); if (check_offset) { rev = RBIOS8(check_offset); if (rev > 0) { check_offset = RBIOS16(check_offset + 0x5); if (check_offset) offset = check_offset; } } break; case COMBIOS_DETECTED_MEM_TABLE: /* offset from misc info */ check_offset = combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE); if (check_offset) { rev = RBIOS8(check_offset); if (rev > 0) { check_offset = RBIOS16(check_offset + 0x7); if (check_offset) offset = check_offset; } } break; case COMBIOS_ASIC_INIT_5_TABLE: /* offset from misc info */ check_offset = combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE); if (check_offset) { rev = RBIOS8(check_offset); if (rev == 2) { check_offset = RBIOS16(check_offset + 0x9); if (check_offset) offset = check_offset; } } break; case COMBIOS_RAM_RESET_TABLE: /* offset from mem config */ check_offset = combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE); if (check_offset) { while (RBIOS8(check_offset++)); check_offset += 2; if (check_offset) offset = check_offset; } break; case COMBIOS_POWERPLAY_INFO_TABLE: /* offset from mobile info */ check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); if (check_offset) { check_offset = RBIOS16(check_offset + 0x11); if (check_offset) offset = check_offset; } break; case COMBIOS_GPIO_INFO_TABLE: /* offset from mobile info */ check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); if (check_offset) { check_offset = RBIOS16(check_offset + 0x13); if (check_offset) offset = check_offset; } break; case COMBIOS_LCD_DDC_INFO_TABLE: /* offset from mobile info */ check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); if (check_offset) { check_offset = RBIOS16(check_offset + 0x15); if (check_offset) offset = check_offset; } break; case COMBIOS_TMDS_POWER_TABLE: /* offset from mobile info */ check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); if (check_offset) { check_offset = RBIOS16(check_offset + 0x17); if (check_offset) offset = check_offset; } break; case COMBIOS_TMDS_POWER_ON_TABLE: /* offset from tmds power */ check_offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE); if (check_offset) { check_offset = RBIOS16(check_offset + 0x2); if (check_offset) offset = check_offset; } break; case COMBIOS_TMDS_POWER_OFF_TABLE: /* offset from tmds power */ check_offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE); if (check_offset) { check_offset = RBIOS16(check_offset + 0x4); if (check_offset) offset = check_offset; } break; default: break; } return offset; } bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) { int edid_info, size; struct edid *edid; unsigned char *raw; edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); if (!edid_info) return false; raw = rdev->bios + edid_info; size = EDID_LENGTH * (raw[0x7e] + 1); edid = kmalloc(size, GFP_KERNEL); if (edid == NULL) return false; memcpy((unsigned char *)edid, raw, size); if (!drm_edid_is_valid(edid)) { kfree(edid); return false; } rdev->mode_info.bios_hardcoded_edid = edid; rdev->mode_info.bios_hardcoded_edid_size = size; return true; } /* this is used for atom LCDs as well */ struct edid * radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) { struct edid *edid; if (rdev->mode_info.bios_hardcoded_edid) { edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); if (edid) { memcpy((unsigned char *)edid, (unsigned char *)rdev->mode_info.bios_hardcoded_edid, rdev->mode_info.bios_hardcoded_edid_size); return edid; } } return NULL; } static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev, enum radeon_combios_ddc ddc, u32 clk_mask, u32 data_mask) { struct radeon_i2c_bus_rec i2c; int ddc_line = 0; /* ddc id = mask reg * DDC_NONE_DETECTED = none * DDC_DVI = RADEON_GPIO_DVI_DDC * DDC_VGA = RADEON_GPIO_VGA_DDC * DDC_LCD = RADEON_GPIOPAD_MASK * DDC_GPIO = RADEON_MDGPIO_MASK * r1xx * DDC_MONID = RADEON_GPIO_MONID * DDC_CRT2 = RADEON_GPIO_CRT2_DDC * r200 * DDC_MONID = RADEON_GPIO_MONID * DDC_CRT2 = RADEON_GPIO_DVI_DDC * r300/r350 * DDC_MONID = RADEON_GPIO_DVI_DDC * DDC_CRT2 = RADEON_GPIO_DVI_DDC * rv2xx/rv3xx * DDC_MONID = RADEON_GPIO_MONID * DDC_CRT2 = RADEON_GPIO_MONID * rs3xx/rs4xx * DDC_MONID = RADEON_GPIOPAD_MASK * DDC_CRT2 = RADEON_GPIO_MONID */ switch (ddc) { case DDC_NONE_DETECTED: default: ddc_line = 0; break; case DDC_DVI: ddc_line = RADEON_GPIO_DVI_DDC; break; case DDC_VGA: ddc_line = RADEON_GPIO_VGA_DDC; break; case DDC_LCD: ddc_line = RADEON_GPIOPAD_MASK; break; case DDC_GPIO: ddc_line = RADEON_MDGPIO_MASK; break; case DDC_MONID: if (rdev->family == CHIP_RS300 || rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) ddc_line = RADEON_GPIOPAD_MASK; else if (rdev->family == CHIP_R300 || rdev->family == CHIP_R350) { ddc_line = RADEON_GPIO_DVI_DDC; ddc = DDC_DVI; } else ddc_line = RADEON_GPIO_MONID; break; case DDC_CRT2: if (rdev->family == CHIP_R200 || rdev->family == CHIP_R300 || rdev->family == CHIP_R350) { ddc_line = RADEON_GPIO_DVI_DDC; ddc = DDC_DVI; } else if (rdev->family == CHIP_RS300 || rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) ddc_line = RADEON_GPIO_MONID; else if (rdev->family >= CHIP_RV350) { ddc_line = RADEON_GPIO_MONID; ddc = DDC_MONID; } else ddc_line = RADEON_GPIO_CRT2_DDC; break; } if (ddc_line == RADEON_GPIOPAD_MASK) { i2c.mask_clk_reg = RADEON_GPIOPAD_MASK; i2c.mask_data_reg = RADEON_GPIOPAD_MASK; i2c.a_clk_reg = RADEON_GPIOPAD_A; i2c.a_data_reg = RADEON_GPIOPAD_A; i2c.en_clk_reg = RADEON_GPIOPAD_EN; i2c.en_data_reg = RADEON_GPIOPAD_EN; i2c.y_clk_reg = RADEON_GPIOPAD_Y; i2c.y_data_reg = RADEON_GPIOPAD_Y; } else if (ddc_line == RADEON_MDGPIO_MASK) { i2c.mask_clk_reg = RADEON_MDGPIO_MASK; i2c.mask_data_reg = RADEON_MDGPIO_MASK; i2c.a_clk_reg = RADEON_MDGPIO_A; i2c.a_data_reg = RADEON_MDGPIO_A; i2c.en_clk_reg = RADEON_MDGPIO_EN; i2c.en_data_reg = RADEON_MDGPIO_EN; i2c.y_clk_reg = RADEON_MDGPIO_Y; i2c.y_data_reg = RADEON_MDGPIO_Y; } else { i2c.mask_clk_reg = ddc_line; i2c.mask_data_reg = ddc_line; i2c.a_clk_reg = ddc_line; i2c.a_data_reg = ddc_line; i2c.en_clk_reg = ddc_line; i2c.en_data_reg = ddc_line; i2c.y_clk_reg = ddc_line; i2c.y_data_reg = ddc_line; } if (clk_mask && data_mask) { /* system specific masks */ i2c.mask_clk_mask = clk_mask; i2c.mask_data_mask = data_mask; i2c.a_clk_mask = clk_mask; i2c.a_data_mask = data_mask; i2c.en_clk_mask = clk_mask; i2c.en_data_mask = data_mask; i2c.y_clk_mask = clk_mask; i2c.y_data_mask = data_mask; } else if ((ddc_line == RADEON_GPIOPAD_MASK) || (ddc_line == RADEON_MDGPIO_MASK)) { /* default gpiopad masks */ i2c.mask_clk_mask = (0x20 << 8); i2c.mask_data_mask = 0x80; i2c.a_clk_mask = (0x20 << 8); i2c.a_data_mask = 0x80; i2c.en_clk_mask = (0x20 << 8); i2c.en_data_mask = 0x80; i2c.y_clk_mask = (0x20 << 8); i2c.y_data_mask = 0x80; } else { /* default masks for ddc pads */ i2c.mask_clk_mask = RADEON_GPIO_MASK_1; i2c.mask_data_mask = RADEON_GPIO_MASK_0; i2c.a_clk_mask = RADEON_GPIO_A_1; i2c.a_data_mask = RADEON_GPIO_A_0; i2c.en_clk_mask = RADEON_GPIO_EN_1; i2c.en_data_mask = RADEON_GPIO_EN_0; i2c.y_clk_mask = RADEON_GPIO_Y_1; i2c.y_data_mask = RADEON_GPIO_Y_0; } switch (rdev->family) { case CHIP_R100: case CHIP_RV100: case CHIP_RS100: case CHIP_RV200: case CHIP_RS200: case CHIP_RS300: switch (ddc_line) { case RADEON_GPIO_DVI_DDC: i2c.hw_capable = true; break; default: i2c.hw_capable = false; break; } break; case CHIP_R200: switch (ddc_line) { case RADEON_GPIO_DVI_DDC: case RADEON_GPIO_MONID: i2c.hw_capable = true; break; default: i2c.hw_capable = false; break; } break; case CHIP_RV250: case CHIP_RV280: switch (ddc_line) { case RADEON_GPIO_VGA_DDC: case RADEON_GPIO_DVI_DDC: case RADEON_GPIO_CRT2_DDC: i2c.hw_capable = true; break; default: i2c.hw_capable = false; break; } break; case CHIP_R300: case CHIP_R350: switch (ddc_line) { case RADEON_GPIO_VGA_DDC: case RADEON_GPIO_DVI_DDC: i2c.hw_capable = true; break; default: i2c.hw_capable = false; break; } break; case CHIP_RV350: case CHIP_RV380: case CHIP_RS400: case CHIP_RS480: switch (ddc_line) { case RADEON_GPIO_VGA_DDC: case RADEON_GPIO_DVI_DDC: i2c.hw_capable = true; break; case RADEON_GPIO_MONID: /* hw i2c on RADEON_GPIO_MONID doesn't seem to work * reliably on some pre-r4xx hardware; not sure why. */ i2c.hw_capable = false; break; default: i2c.hw_capable = false; break; } break; default: i2c.hw_capable = false; break; } i2c.mm_i2c = false; i2c.i2c_id = ddc; i2c.hpd = RADEON_HPD_NONE; if (ddc_line) i2c.valid = true; else i2c.valid = false; return i2c; } void radeon_combios_i2c_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct radeon_i2c_bus_rec i2c; /* actual hw pads * r1xx/rs2xx/rs3xx * 0x60, 0x64, 0x68, 0x6c, gpiopads, mm * r200 * 0x60, 0x64, 0x68, mm * r300/r350 * 0x60, 0x64, mm * rv2xx/rv3xx/rs4xx * 0x60, 0x64, 0x68, gpiopads, mm */ /* 0x60 */ i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC"); /* 0x64 */ i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC"); /* mm i2c */ i2c.valid = true; i2c.hw_capable = true; i2c.mm_i2c = true; i2c.i2c_id = 0xa0; rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C"); if (rdev->family == CHIP_R300 || rdev->family == CHIP_R350) { /* only 2 sw i2c pads */ } else if (rdev->family == CHIP_RS300 || rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { u16 offset; u8 id, blocks, clk, data; int i; /* 0x68 */ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); if (offset) { blocks = RBIOS8(offset + 2); for (i = 0; i < blocks; i++) { id = RBIOS8(offset + 3 + (i * 5) + 0); if (id == 136) { clk = RBIOS8(offset + 3 + (i * 5) + 3); data = RBIOS8(offset + 3 + (i * 5) + 4); /* gpiopad */ i2c = combios_setup_i2c_bus(rdev, DDC_MONID, (1 << clk), (1 << data)); rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); break; } } } } else if ((rdev->family == CHIP_R200) || (rdev->family >= CHIP_R300)) { /* 0x68 */ i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); } else { /* 0x68 */ i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); /* 0x6c */ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC"); } } bool radeon_combios_get_clock_info(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; uint16_t pll_info; struct radeon_pll *p1pll = &rdev->clock.p1pll; struct radeon_pll *p2pll = &rdev->clock.p2pll; struct radeon_pll *spll = &rdev->clock.spll; struct radeon_pll *mpll = &rdev->clock.mpll; int8_t rev; uint16_t sclk, mclk; pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); if (pll_info) { rev = RBIOS8(pll_info); /* pixel clocks */ p1pll->reference_freq = RBIOS16(pll_info + 0xe); p1pll->reference_div = RBIOS16(pll_info + 0x10); p1pll->pll_out_min = RBIOS32(pll_info + 0x12); p1pll->pll_out_max = RBIOS32(pll_info + 0x16); p1pll->lcd_pll_out_min = p1pll->pll_out_min; p1pll->lcd_pll_out_max = p1pll->pll_out_max; if (rev > 9) { p1pll->pll_in_min = RBIOS32(pll_info + 0x36); p1pll->pll_in_max = RBIOS32(pll_info + 0x3a); } else { p1pll->pll_in_min = 40; p1pll->pll_in_max = 500; } *p2pll = *p1pll; /* system clock */ spll->reference_freq = RBIOS16(pll_info + 0x1a); spll->reference_div = RBIOS16(pll_info + 0x1c); spll->pll_out_min = RBIOS32(pll_info + 0x1e); spll->pll_out_max = RBIOS32(pll_info + 0x22); if (rev > 10) { spll->pll_in_min = RBIOS32(pll_info + 0x48); spll->pll_in_max = RBIOS32(pll_info + 0x4c); } else { /* ??? */ spll->pll_in_min = 40; spll->pll_in_max = 500; } /* memory clock */ mpll->reference_freq = RBIOS16(pll_info + 0x26); mpll->reference_div = RBIOS16(pll_info + 0x28); mpll->pll_out_min = RBIOS32(pll_info + 0x2a); mpll->pll_out_max = RBIOS32(pll_info + 0x2e); if (rev > 10) { mpll->pll_in_min = RBIOS32(pll_info + 0x5a); mpll->pll_in_max = RBIOS32(pll_info + 0x5e); } else { /* ??? */ mpll->pll_in_min = 40; mpll->pll_in_max = 500; } /* default sclk/mclk */ sclk = RBIOS16(pll_info + 0xa); mclk = RBIOS16(pll_info + 0x8); if (sclk == 0) sclk = 200 * 100; if (mclk == 0) mclk = 200 * 100; rdev->clock.default_sclk = sclk; rdev->clock.default_mclk = mclk; if (RBIOS32(pll_info + 0x16)) rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16); else rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */ return true; } return false; } bool radeon_combios_sideport_present(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; u16 igp_info; /* sideport is AMD only */ if (rdev->family == CHIP_RS400) return false; igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE); if (igp_info) { if (RBIOS16(igp_info + 0x4)) return true; } return false; } static const uint32_t default_primarydac_adj[CHIP_LAST] = { 0x00000808, /* r100 */ 0x00000808, /* rv100 */ 0x00000808, /* rs100 */ 0x00000808, /* rv200 */ 0x00000808, /* rs200 */ 0x00000808, /* r200 */ 0x00000808, /* rv250 */ 0x00000000, /* rs300 */ 0x00000808, /* rv280 */ 0x00000808, /* r300 */ 0x00000808, /* r350 */ 0x00000808, /* rv350 */ 0x00000808, /* rv380 */ 0x00000808, /* r420 */ 0x00000808, /* r423 */ 0x00000808, /* rv410 */ 0x00000000, /* rs400 */ 0x00000000, /* rs480 */ }; static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev, struct radeon_encoder_primary_dac *p_dac) { p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family]; return; } struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; uint16_t dac_info; uint8_t rev, bg, dac; struct radeon_encoder_primary_dac *p_dac = NULL; int found = 0; p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL); if (!p_dac) return NULL; /* check CRT table */ dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); if (dac_info) { rev = RBIOS8(dac_info) & 0x3; if (rev < 2) { bg = RBIOS8(dac_info + 0x2) & 0xf; dac = (RBIOS8(dac_info + 0x2) >> 4) & 0xf; p_dac->ps2_pdac_adj = (bg << 8) | (dac); } else { bg = RBIOS8(dac_info + 0x2) & 0xf; dac = RBIOS8(dac_info + 0x3) & 0xf; p_dac->ps2_pdac_adj = (bg << 8) | (dac); } /* if the values are all zeros, use the table */ if (p_dac->ps2_pdac_adj) found = 1; } if (!found) /* fallback to defaults */ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); return p_dac; } enum radeon_tv_std radeon_combios_get_tv_info(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; uint16_t tv_info; enum radeon_tv_std tv_std = TV_STD_NTSC; tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); if (tv_info) { if (RBIOS8(tv_info + 6) == 'T') { switch (RBIOS8(tv_info + 7) & 0xf) { case 1: tv_std = TV_STD_NTSC; DRM_DEBUG_KMS("Default TV standard: NTSC\n"); break; case 2: tv_std = TV_STD_PAL; DRM_DEBUG_KMS("Default TV standard: PAL\n"); break; case 3: tv_std = TV_STD_PAL_M; DRM_DEBUG_KMS("Default TV standard: PAL-M\n"); break; case 4: tv_std = TV_STD_PAL_60; DRM_DEBUG_KMS("Default TV standard: PAL-60\n"); break; case 5: tv_std = TV_STD_NTSC_J; DRM_DEBUG_KMS("Default TV standard: NTSC-J\n"); break; case 6: tv_std = TV_STD_SCART_PAL; DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n"); break; default: tv_std = TV_STD_NTSC; DRM_DEBUG_KMS ("Unknown TV standard; defaulting to NTSC\n"); break; } switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) { case 0: DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n"); break; case 1: DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n"); break; case 2: DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n"); break; case 3: DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n"); break; default: break; } } } return tv_std; } static const uint32_t default_tvdac_adj[CHIP_LAST] = { 0x00000000, /* r100 */ 0x00280000, /* rv100 */ 0x00000000, /* rs100 */ 0x00880000, /* rv200 */ 0x00000000, /* rs200 */ 0x00000000, /* r200 */ 0x00770000, /* rv250 */ 0x00290000, /* rs300 */ 0x00560000, /* rv280 */ 0x00780000, /* r300 */ 0x00770000, /* r350 */ 0x00780000, /* rv350 */ 0x00780000, /* rv380 */ 0x01080000, /* r420 */ 0x01080000, /* r423 */ 0x01080000, /* rv410 */ 0x00780000, /* rs400 */ 0x00780000, /* rs480 */ }; static void radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev, struct radeon_encoder_tv_dac *tv_dac) { tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family]; if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250)) tv_dac->ps2_tvdac_adj = 0x00880000; tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; return; } struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; uint16_t dac_info; uint8_t rev, bg, dac; struct radeon_encoder_tv_dac *tv_dac = NULL; int found = 0; tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); if (!tv_dac) return NULL; /* first check TV table */ dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); if (dac_info) { rev = RBIOS8(dac_info + 0x3); if (rev > 4) { bg = RBIOS8(dac_info + 0xc) & 0xf; dac = RBIOS8(dac_info + 0xd) & 0xf; tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20); bg = RBIOS8(dac_info + 0xe) & 0xf; dac = RBIOS8(dac_info + 0xf) & 0xf; tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20); bg = RBIOS8(dac_info + 0x10) & 0xf; dac = RBIOS8(dac_info + 0x11) & 0xf; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); /* if the values are all zeros, use the table */ if (tv_dac->ps2_tvdac_adj) found = 1; } else if (rev > 1) { bg = RBIOS8(dac_info + 0xc) & 0xf; dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20); bg = RBIOS8(dac_info + 0xd) & 0xf; dac = (RBIOS8(dac_info + 0xd) >> 4) & 0xf; tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20); bg = RBIOS8(dac_info + 0xe) & 0xf; dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); /* if the values are all zeros, use the table */ if (tv_dac->ps2_tvdac_adj) found = 1; } tv_dac->tv_std = radeon_combios_get_tv_info(rdev); } if (!found) { /* then check CRT table */ dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); if (dac_info) { rev = RBIOS8(dac_info) & 0x3; if (rev < 2) { bg = RBIOS8(dac_info + 0x3) & 0xf; dac = (RBIOS8(dac_info + 0x3) >> 4) & 0xf; tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20); tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; /* if the values are all zeros, use the table */ if (tv_dac->ps2_tvdac_adj) found = 1; } else { bg = RBIOS8(dac_info + 0x4) & 0xf; dac = RBIOS8(dac_info + 0x5) & 0xf; tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20); tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; /* if the values are all zeros, use the table */ if (tv_dac->ps2_tvdac_adj) found = 1; } } else { DRM_INFO("No TV DAC info found in BIOS\n"); } } if (!found) /* fallback to defaults */ radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac); return tv_dac; } static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct radeon_device *rdev) { struct radeon_encoder_lvds *lvds = NULL; uint32_t fp_vert_stretch, fp_horz_stretch; uint32_t ppll_div_sel, ppll_val; uint32_t lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL); if (!lvds) return NULL; fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH); fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH); /* These should be fail-safe defaults, fingers crossed */ lvds->panel_pwr_delay = 200; lvds->panel_vcc_delay = 2000; lvds->lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); lvds->panel_digon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) & 0xf; lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf; if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE) lvds->native_mode.vdisplay = ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >> RADEON_VERT_PANEL_SHIFT) + 1; else lvds->native_mode.vdisplay = (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1; if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE) lvds->native_mode.hdisplay = (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >> RADEON_HORZ_PANEL_SHIFT) + 1) * 8; else lvds->native_mode.hdisplay = ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8; if ((lvds->native_mode.hdisplay < 640) || (lvds->native_mode.vdisplay < 480)) { lvds->native_mode.hdisplay = 640; lvds->native_mode.vdisplay = 480; } ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3; ppll_val = RREG32_PLL(RADEON_PPLL_DIV_0 + ppll_div_sel); if ((ppll_val & 0x000707ff) == 0x1bb) lvds->use_bios_dividers = false; else { lvds->panel_ref_divider = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff; lvds->panel_post_divider = (ppll_val >> 16) & 0x7; lvds->panel_fb_divider = ppll_val & 0x7ff; if ((lvds->panel_ref_divider != 0) && (lvds->panel_fb_divider > 3)) lvds->use_bios_dividers = true; } lvds->panel_vcc_delay = 200; DRM_INFO("Panel info derived from registers\n"); DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay, lvds->native_mode.vdisplay); return lvds; } struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; uint16_t lcd_info; uint32_t panel_setup; char stmp[30]; int tmp, i; struct radeon_encoder_lvds *lvds = NULL; lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); if (lcd_info) { lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL); if (!lvds) return NULL; for (i = 0; i < 24; i++) stmp[i] = RBIOS8(lcd_info + i + 1); stmp[24] = 0; DRM_INFO("Panel ID String: %s\n", stmp); lvds->native_mode.hdisplay = RBIOS16(lcd_info + 0x19); lvds->native_mode.vdisplay = RBIOS16(lcd_info + 0x1b); DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay, lvds->native_mode.vdisplay); lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000); lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; lvds->panel_blon_delay = (RBIOS16(lcd_info + 0x38) >> 4) & 0xf; lvds->panel_ref_divider = RBIOS16(lcd_info + 0x2e); lvds->panel_post_divider = RBIOS8(lcd_info + 0x30); lvds->panel_fb_divider = RBIOS16(lcd_info + 0x31); if ((lvds->panel_ref_divider != 0) && (lvds->panel_fb_divider > 3)) lvds->use_bios_dividers = true; panel_setup = RBIOS32(lcd_info + 0x39); lvds->lvds_gen_cntl = 0xff00; if (panel_setup & 0x1) lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_FORMAT; if ((panel_setup >> 4) & 0x1) lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_TYPE; switch ((panel_setup >> 8) & 0x7) { case 0: lvds->lvds_gen_cntl |= RADEON_LVDS_NO_FM; break; case 1: lvds->lvds_gen_cntl |= RADEON_LVDS_2_GREY; break; case 2: lvds->lvds_gen_cntl |= RADEON_LVDS_4_GREY; break; default: break; } if ((panel_setup >> 16) & 0x1) lvds->lvds_gen_cntl |= RADEON_LVDS_FP_POL_LOW; if ((panel_setup >> 17) & 0x1) lvds->lvds_gen_cntl |= RADEON_LVDS_LP_POL_LOW; if ((panel_setup >> 18) & 0x1) lvds->lvds_gen_cntl |= RADEON_LVDS_DTM_POL_LOW; if ((panel_setup >> 23) & 0x1) lvds->lvds_gen_cntl |= RADEON_LVDS_BL_CLK_SEL; lvds->lvds_gen_cntl |= (panel_setup & 0xf0000000); for (i = 0; i < 32; i++) { tmp = RBIOS16(lcd_info + 64 + i * 2); if (tmp == 0) break; if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { lvds->native_mode.htotal = lvds->native_mode.hdisplay + (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + (RBIOS8(tmp + 23) * 8); lvds->native_mode.vtotal = lvds->native_mode.vdisplay + (RBIOS16(tmp + 24) - RBIOS16(tmp + 26)); lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26)); lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + ((RBIOS16(tmp + 28) & 0xf800) >> 11); lvds->native_mode.clock = RBIOS16(tmp + 9) * 10; lvds->native_mode.flags = 0; /* set crtc values */ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); } } } else { DRM_INFO("No panel info found in BIOS\n"); lvds = radeon_legacy_get_lvds_info_from_regs(rdev); } if (lvds) encoder->native_mode = lvds->native_mode; return lvds; } static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = { {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_R100 */ {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RV100 */ {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RS100 */ {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RV200 */ {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RS200 */ {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_R200 */ {{15500, 0x81b}, {0xffffffff, 0x83f}, {0, 0}, {0, 0}}, /* CHIP_RV250 */ {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RS300 */ {{13000, 0x400f4}, {15000, 0x400f7}, {0xffffffff, 0x40111}, {0, 0}}, /* CHIP_RV280 */ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R300 */ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R350 */ {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RV350 */ {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RV380 */ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS400 */ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS480 */ }; bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, struct radeon_encoder_int_tmds *tmds) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; int i; for (i = 0; i < 4; i++) { tmds->tmds_pll[i].value = default_tmds_pll[rdev->family][i].value; tmds->tmds_pll[i].freq = default_tmds_pll[rdev->family][i].freq; } return true; } bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, struct radeon_encoder_int_tmds *tmds) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; uint16_t tmds_info; int i, n; uint8_t ver; tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); if (tmds_info) { ver = RBIOS8(tmds_info); DRM_DEBUG_KMS("DFP table revision: %d\n", ver); if (ver == 3) { n = RBIOS8(tmds_info + 5) + 1; if (n > 4) n = 4; for (i = 0; i < n; i++) { tmds->tmds_pll[i].value = RBIOS32(tmds_info + i * 10 + 0x08); tmds->tmds_pll[i].freq = RBIOS16(tmds_info + i * 10 + 0x10); DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n", tmds->tmds_pll[i].freq, tmds->tmds_pll[i].value); } } else if (ver == 4) { int stride = 0; n = RBIOS8(tmds_info + 5) + 1; if (n > 4) n = 4; for (i = 0; i < n; i++) { tmds->tmds_pll[i].value = RBIOS32(tmds_info + stride + 0x08); tmds->tmds_pll[i].freq = RBIOS16(tmds_info + stride + 0x10); if (i == 0) stride += 10; else stride += 6; DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n", tmds->tmds_pll[i].freq, tmds->tmds_pll[i].value); } } } else { DRM_INFO("No TMDS info found in BIOS\n"); return false; } return true; } bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder, struct radeon_encoder_ext_tmds *tmds) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_i2c_bus_rec i2c_bus; /* default for macs */ i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); /* XXX some macs have duallink chips */ switch (rdev->mode_info.connector_table) { case CT_POWERBOOK_EXTERNAL: case CT_MINI_EXTERNAL: default: tmds->dvo_chip = DVO_SIL164; tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */ break; } return true; } bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder, struct radeon_encoder_ext_tmds *tmds) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; uint16_t offset; uint8_t ver; enum radeon_combios_ddc gpio; struct radeon_i2c_bus_rec i2c_bus; tmds->i2c_bus = NULL; if (rdev->flags & RADEON_IS_IGP) { i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); tmds->dvo_chip = DVO_SIL164; tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */ } else { offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); if (offset) { ver = RBIOS8(offset); DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver); tmds->slave_addr = RBIOS8(offset + 4 + 2); tmds->slave_addr >>= 1; /* 7 bit addressing */ gpio = RBIOS8(offset + 4 + 3); if (gpio == DDC_LCD) { /* MM i2c */ i2c_bus.valid = true; i2c_bus.hw_capable = true; i2c_bus.mm_i2c = true; i2c_bus.i2c_id = 0xa0; } else i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0); tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); } } if (!tmds->i2c_bus) { DRM_INFO("No valid Ext TMDS info found in BIOS\n"); return false; } return true; } bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; struct radeon_i2c_bus_rec ddc_i2c; struct radeon_hpd hpd; rdev->mode_info.connector_table = radeon_connector_table; if (rdev->mode_info.connector_table == CT_NONE) { #ifdef CONFIG_PPC_PMAC if (of_machine_is_compatible("PowerBook3,3")) { /* powerbook with VGA */ rdev->mode_info.connector_table = CT_POWERBOOK_VGA; } else if (of_machine_is_compatible("PowerBook3,4") || of_machine_is_compatible("PowerBook3,5")) { /* powerbook with internal tmds */ rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL; } else if (of_machine_is_compatible("PowerBook5,1") || of_machine_is_compatible("PowerBook5,2") || of_machine_is_compatible("PowerBook5,3") || of_machine_is_compatible("PowerBook5,4") || of_machine_is_compatible("PowerBook5,5")) { /* powerbook with external single link tmds (sil164) */ rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; } else if (of_machine_is_compatible("PowerBook5,6")) { /* powerbook with external dual or single link tmds */ rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; } else if (of_machine_is_compatible("PowerBook5,7") || of_machine_is_compatible("PowerBook5,8") || of_machine_is_compatible("PowerBook5,9")) { /* PowerBook6,2 ? */ /* powerbook with external dual link tmds (sil1178?) */ rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; } else if (of_machine_is_compatible("PowerBook4,1") || of_machine_is_compatible("PowerBook4,2") || of_machine_is_compatible("PowerBook4,3") || of_machine_is_compatible("PowerBook6,3") || of_machine_is_compatible("PowerBook6,5") || of_machine_is_compatible("PowerBook6,7")) { /* ibook */ rdev->mode_info.connector_table = CT_IBOOK; } else if (of_machine_is_compatible("PowerMac4,4")) { /* emac */ rdev->mode_info.connector_table = CT_EMAC; } else if (of_machine_is_compatible("PowerMac10,1")) { /* mini with internal tmds */ rdev->mode_info.connector_table = CT_MINI_INTERNAL; } else if (of_machine_is_compatible("PowerMac10,2")) { /* mini with external tmds */ rdev->mode_info.connector_table = CT_MINI_EXTERNAL; } else if (of_machine_is_compatible("PowerMac12,1")) { /* PowerMac8,1 ? */ /* imac g5 isight */ rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; } else if ((rdev->pdev->device == 0x4a48) && (rdev->pdev->subsystem_vendor == 0x1002) && (rdev->pdev->subsystem_device == 0x4a48)) { /* Mac X800 */ rdev->mode_info.connector_table = CT_MAC_X800; } else if ((of_machine_is_compatible("PowerMac7,2") || of_machine_is_compatible("PowerMac7,3")) && (rdev->pdev->device == 0x4150) && (rdev->pdev->subsystem_vendor == 0x1002) && (rdev->pdev->subsystem_device == 0x4150)) { /* Mac G5 tower 9600 */ rdev->mode_info.connector_table = CT_MAC_G5_9600; } else #endif /* CONFIG_PPC_PMAC */ #ifdef CONFIG_PPC64 if (ASIC_IS_RN50(rdev)) rdev->mode_info.connector_table = CT_RN50_POWER; else #endif rdev->mode_info.connector_table = CT_GENERIC; } switch (rdev->mode_info.connector_table) { case CT_GENERIC: DRM_INFO("Connector Table: %d (generic)\n", rdev->mode_info.connector_table); /* these are the most common settings */ if (rdev->flags & RADEON_SINGLE_CRTC) { /* VGA - primary dac */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); } else if (rdev->flags & RADEON_IS_MOBILITY) { /* LVDS */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, CONNECTOR_OBJECT_ID_LVDS, &hpd); /* VGA - primary dac */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); } else { /* DVI-I - tv dac, int tmds */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_1; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, &hpd); /* VGA - primary dac */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); } if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) { /* TV - tv dac */ ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); } break; case CT_IBOOK: DRM_INFO("Connector Table: %d (ibook)\n", rdev->mode_info.connector_table); /* LVDS */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, CONNECTOR_OBJECT_ID_LVDS, &hpd); /* VGA - TV DAC */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); /* TV - TV DAC */ ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; case CT_POWERBOOK_EXTERNAL: DRM_INFO("Connector Table: %d (powerbook external tmds)\n", rdev->mode_info.connector_table); /* LVDS */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, CONNECTOR_OBJECT_ID_LVDS, &hpd); /* DVI-I - primary dac, ext tmds */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_2; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP2_SUPPORT, 0), ATOM_DEVICE_DFP2_SUPPORT); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); /* XXX some are SL */ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, &hpd); /* TV - TV DAC */ ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; case CT_POWERBOOK_INTERNAL: DRM_INFO("Connector Table: %d (powerbook internal tmds)\n", rdev->mode_info.connector_table); /* LVDS */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, CONNECTOR_OBJECT_ID_LVDS, &hpd); /* DVI-I - primary dac, int tmds */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_1; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, &hpd); /* TV - TV DAC */ ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; case CT_POWERBOOK_VGA: DRM_INFO("Connector Table: %d (powerbook vga)\n", rdev->mode_info.connector_table); /* LVDS */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, CONNECTOR_OBJECT_ID_LVDS, &hpd); /* VGA - primary dac */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); /* TV - TV DAC */ ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; case CT_MINI_EXTERNAL: DRM_INFO("Connector Table: %d (mini external tmds)\n", rdev->mode_info.connector_table); /* DVI-I - tv dac, ext tmds */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); hpd.hpd = RADEON_HPD_2; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP2_SUPPORT, 0), ATOM_DEVICE_DFP2_SUPPORT); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); /* XXX are any DL? */ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, &hpd); /* TV - TV DAC */ ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; case CT_MINI_INTERNAL: DRM_INFO("Connector Table: %d (mini internal tmds)\n", rdev->mode_info.connector_table); /* DVI-I - tv dac, int tmds */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); hpd.hpd = RADEON_HPD_1; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, &hpd); /* TV - TV DAC */ ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; case CT_IMAC_G5_ISIGHT: DRM_INFO("Connector Table: %d (imac g5 isight)\n", rdev->mode_info.connector_table); /* DVI-D - int tmds */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); hpd.hpd = RADEON_HPD_1; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT, DRM_MODE_CONNECTOR_DVID, &ddc_i2c, CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D, &hpd); /* VGA - tv dac */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); /* TV - TV DAC */ ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; case CT_EMAC: DRM_INFO("Connector Table: %d (emac)\n", rdev->mode_info.connector_table); /* VGA - primary dac */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); /* VGA - tv dac */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); /* TV - TV DAC */ ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; case CT_RN50_POWER: DRM_INFO("Connector Table: %d (rn50-power)\n", rdev->mode_info.connector_table); /* VGA - primary dac */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); break; case CT_MAC_X800: DRM_INFO("Connector Table: %d (mac x800)\n", rdev->mode_info.connector_table); /* DVI - primary dac, internal tmds */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_1; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, &hpd); /* DVI - tv dac, dvo */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); hpd.hpd = RADEON_HPD_2; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP2_SUPPORT, 0), ATOM_DEVICE_DFP2_SUPPORT); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, &hpd); break; case CT_MAC_G5_9600: DRM_INFO("Connector Table: %d (mac g5 9600)\n", rdev->mode_info.connector_table); /* DVI - tv dac, dvo */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_1; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP2_SUPPORT, 0), ATOM_DEVICE_DFP2_SUPPORT); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, &hpd); /* ADC - primary dac, internal tmds */ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_2; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, &hpd); /* TV - TV DAC */ ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; default: DRM_INFO("Connector table: %d (invalid)\n", rdev->mode_info.connector_table); return false; } radeon_link_encoder_connector(dev); return true; } static bool radeon_apply_legacy_quirks(struct drm_device *dev, int bios_index, enum radeon_combios_connector *legacy_connector, struct radeon_i2c_bus_rec *ddc_i2c, struct radeon_hpd *hpd) { /* Certain IBM chipset RN50s have a BIOS reporting two VGAs, one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */ if (dev->pdev->device == 0x515e && dev->pdev->subsystem_vendor == 0x1014) { if (*legacy_connector == CONNECTOR_CRT_LEGACY && ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC) return false; } /* X300 card with extra non-existent DVI port */ if (dev->pdev->device == 0x5B60 && dev->pdev->subsystem_vendor == 0x17af && dev->pdev->subsystem_device == 0x201e && bios_index == 2) { if (*legacy_connector == CONNECTOR_DVI_I_LEGACY) return false; } return true; } static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev) { /* Acer 5102 has non-existent TV port */ if (dev->pdev->device == 0x5975 && dev->pdev->subsystem_vendor == 0x1025 && dev->pdev->subsystem_device == 0x009f) return false; /* HP dc5750 has non-existent TV port */ if (dev->pdev->device == 0x5974 && dev->pdev->subsystem_vendor == 0x103c && dev->pdev->subsystem_device == 0x280a) return false; /* MSI S270 has non-existent TV port */ if (dev->pdev->device == 0x5955 && dev->pdev->subsystem_vendor == 0x1462 && dev->pdev->subsystem_device == 0x0131) return false; return true; } static uint16_t combios_check_dl_dvi(struct drm_device *dev, int is_dvi_d) { struct radeon_device *rdev = dev->dev_private; uint32_t ext_tmds_info; if (rdev->flags & RADEON_IS_IGP) { if (is_dvi_d) return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; else return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; } ext_tmds_info = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); if (ext_tmds_info) { uint8_t rev = RBIOS8(ext_tmds_info); uint8_t flags = RBIOS8(ext_tmds_info + 4 + 5); if (rev >= 3) { if (is_dvi_d) return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; else return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; } else { if (flags & 1) { if (is_dvi_d) return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; else return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; } } } if (is_dvi_d) return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; else return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; } bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; uint32_t conn_info, entry, devices; uint16_t tmp, connector_object_id; enum radeon_combios_ddc ddc_type; enum radeon_combios_connector connector; int i = 0; struct radeon_i2c_bus_rec ddc_i2c; struct radeon_hpd hpd; conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE); if (conn_info) { for (i = 0; i < 4; i++) { entry = conn_info + 2 + i * 2; if (!RBIOS16(entry)) break; tmp = RBIOS16(entry); connector = (tmp >> 12) & 0xf; ddc_type = (tmp >> 8) & 0xf; ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0); switch (connector) { case CONNECTOR_PROPRIETARY_LEGACY: case CONNECTOR_DVI_I_LEGACY: case CONNECTOR_DVI_D_LEGACY: if ((tmp >> 4) & 0x1) hpd.hpd = RADEON_HPD_2; else hpd.hpd = RADEON_HPD_1; break; default: hpd.hpd = RADEON_HPD_NONE; break; } if (!radeon_apply_legacy_quirks(dev, i, &connector, &ddc_i2c, &hpd)) continue; switch (connector) { case CONNECTOR_PROPRIETARY_LEGACY: if ((tmp >> 4) & 0x1) devices = ATOM_DEVICE_DFP2_SUPPORT; else devices = ATOM_DEVICE_DFP1_SUPPORT; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum (dev, devices, 0), devices); radeon_add_legacy_connector(dev, i, devices, legacy_connector_convert [connector], &ddc_i2c, CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D, &hpd); break; case CONNECTOR_CRT_LEGACY: if (tmp & 0x1) { devices = ATOM_DEVICE_CRT2_SUPPORT; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum (dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); } else { devices = ATOM_DEVICE_CRT1_SUPPORT; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum (dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); } radeon_add_legacy_connector(dev, i, devices, legacy_connector_convert [connector], &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); break; case CONNECTOR_DVI_I_LEGACY: devices = 0; if (tmp & 0x1) { devices |= ATOM_DEVICE_CRT2_SUPPORT; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum (dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); } else { devices |= ATOM_DEVICE_CRT1_SUPPORT; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum (dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); } if ((tmp >> 4) & 0x1) { devices |= ATOM_DEVICE_DFP2_SUPPORT; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum (dev, ATOM_DEVICE_DFP2_SUPPORT, 0), ATOM_DEVICE_DFP2_SUPPORT); connector_object_id = combios_check_dl_dvi(dev, 0); } else { devices |= ATOM_DEVICE_DFP1_SUPPORT; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum (dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; } radeon_add_legacy_connector(dev, i, devices, legacy_connector_convert [connector], &ddc_i2c, connector_object_id, &hpd); break; case CONNECTOR_DVI_D_LEGACY: if ((tmp >> 4) & 0x1) { devices = ATOM_DEVICE_DFP2_SUPPORT; connector_object_id = combios_check_dl_dvi(dev, 1); } else { devices = ATOM_DEVICE_DFP1_SUPPORT; connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; } radeon_add_legacy_encoder(dev, radeon_get_encoder_enum (dev, devices, 0), devices); radeon_add_legacy_connector(dev, i, devices, legacy_connector_convert [connector], &ddc_i2c, connector_object_id, &hpd); break; case CONNECTOR_CTV_LEGACY: case CONNECTOR_STV_LEGACY: radeon_add_legacy_encoder(dev, radeon_get_encoder_enum (dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, i, ATOM_DEVICE_TV1_SUPPORT, legacy_connector_convert [connector], &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; default: DRM_ERROR("Unknown connector type: %d\n", connector); continue; } } } else { uint16_t tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); if (tmds_info) { DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n"); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_1; radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_DFP1_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, &hpd); } else { uint16_t crt_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n"); if (crt_info) { radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, CONNECTOR_OBJECT_ID_VGA, &hpd); } else { DRM_DEBUG_KMS("No connector info found\n"); return false; } } } if (rdev->flags & RADEON_IS_MOBILITY || rdev->flags & RADEON_IS_IGP) { uint16_t lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); if (lcd_info) { uint16_t lcd_ddc_info = combios_get_table_offset(dev, COMBIOS_LCD_DDC_INFO_TABLE); radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); if (lcd_ddc_info) { ddc_type = RBIOS8(lcd_ddc_info + 2); switch (ddc_type) { case DDC_LCD: ddc_i2c = combios_setup_i2c_bus(rdev, DDC_LCD, RBIOS32(lcd_ddc_info + 3), RBIOS32(lcd_ddc_info + 7)); radeon_i2c_add(rdev, &ddc_i2c, "LCD"); break; case DDC_GPIO: ddc_i2c = combios_setup_i2c_bus(rdev, DDC_GPIO, RBIOS32(lcd_ddc_info + 3), RBIOS32(lcd_ddc_info + 7)); radeon_i2c_add(rdev, &ddc_i2c, "LCD"); break; default: ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0); break; } DRM_DEBUG_KMS("LCD DDC Info Table found!\n"); } else ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_connector(dev, 5, ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, CONNECTOR_OBJECT_ID_LVDS, &hpd); } } /* check TV table */ if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) { uint32_t tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); if (tv_info) { if (RBIOS8(tv_info + 6) == 'T') { if (radeon_apply_legacy_tv_quirks(dev)) { hpd.hpd = RADEON_HPD_NONE; ddc_i2c.valid = false; radeon_add_legacy_encoder(dev, radeon_get_encoder_enum (dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); radeon_add_legacy_connector(dev, 6, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, CONNECTOR_OBJECT_ID_SVIDEO, &hpd); } } } } radeon_link_encoder_connector(dev); return true; } static const char *thermal_controller_names[] = { "NONE", "lm63", "adm1032", }; void radeon_combios_get_power_modes(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; u16 offset, misc, misc2 = 0; u8 rev, blocks, tmp; int state_index = 0; struct radeon_i2c_bus_rec i2c_bus; rdev->pm.default_power_state_index = -1; /* allocate 2 power states */ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); if (rdev->pm.power_state) { /* allocate 1 clock mode per state */ rdev->pm.power_state[0].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); rdev->pm.power_state[1].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); if (!rdev->pm.power_state[0].clock_info || !rdev->pm.power_state[1].clock_info) goto pm_failed; } else goto pm_failed; /* check for a thermal chip */ offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); if (offset) { u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0; rev = RBIOS8(offset); if (rev == 0) { thermal_controller = RBIOS8(offset + 3); gpio = RBIOS8(offset + 4) & 0x3f; i2c_addr = RBIOS8(offset + 5); } else if (rev == 1) { thermal_controller = RBIOS8(offset + 4); gpio = RBIOS8(offset + 5) & 0x3f; i2c_addr = RBIOS8(offset + 6); } else if (rev == 2) { thermal_controller = RBIOS8(offset + 4); gpio = RBIOS8(offset + 5) & 0x3f; i2c_addr = RBIOS8(offset + 6); clk_bit = RBIOS8(offset + 0xa); data_bit = RBIOS8(offset + 0xb); } if ((thermal_controller > 0) && (thermal_controller < 3)) { DRM_INFO("Possible %s thermal controller at 0x%02x\n", thermal_controller_names[thermal_controller], i2c_addr >> 1); if (gpio == DDC_LCD) { /* MM i2c */ i2c_bus.valid = true; i2c_bus.hw_capable = true; i2c_bus.mm_i2c = true; i2c_bus.i2c_id = 0xa0; } else if (gpio == DDC_GPIO) i2c_bus = combios_setup_i2c_bus(rdev, gpio, 1 << clk_bit, 1 << data_bit); else i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0); rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); if (rdev->pm.i2c_bus) { struct i2c_board_info info = { }; const char *name = thermal_controller_names[thermal_controller]; info.addr = i2c_addr >> 1; strlcpy(info.type, name, sizeof(info.type)); i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } } } else { /* boards with a thermal chip, but no overdrive table */ /* Asus 9600xt has an f75375 on the monid bus */ if ((dev->pdev->device == 0x4152) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0xc002)) { i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); if (rdev->pm.i2c_bus) { struct i2c_board_info info = { }; const char *name = "f75375"; info.addr = 0x28; strlcpy(info.type, name, sizeof(info.type)); i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); DRM_INFO("Possible %s thermal controller at 0x%02x\n", name, info.addr); } } } if (rdev->flags & RADEON_IS_MOBILITY) { offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); if (offset) { rev = RBIOS8(offset); blocks = RBIOS8(offset + 0x2); /* power mode 0 tends to be the only valid one */ rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2); rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6); if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) goto default_mode; rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BATTERY; misc = RBIOS16(offset + 0x5 + 0x0); if (rev > 4) misc2 = RBIOS16(offset + 0x5 + 0xe); rdev->pm.power_state[state_index].misc = misc; rdev->pm.power_state[state_index].misc2 = misc2; if (misc & 0x4) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; if (misc & 0x8) rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = true; else rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = false; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true; if (rev < 6) { rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg = RBIOS16(offset + 0x5 + 0xb) * 4; tmp = RBIOS8(offset + 0x5 + 0xd); rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp); } else { u8 entries = RBIOS8(offset + 0x5 + 0xb); u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc); if (entries && voltage_table_offset) { rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg = RBIOS16(voltage_table_offset) * 4; tmp = RBIOS8(voltage_table_offset + 0x2); rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp); } else rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false; } switch ((misc2 & 0x700) >> 8) { case 0: default: rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0; break; case 1: rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33; break; case 2: rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66; break; case 3: rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99; break; case 4: rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132; break; } } else rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; if (rev > 6) rdev->pm.power_state[state_index].pcie_lanes = RBIOS8(offset + 0x5 + 0x10); rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; state_index++; } else { /* XXX figure out some good default low power mode for mobility cards w/out power tables */ } } else { /* XXX figure out some good default low power mode for desktop cards */ } default_mode: /* add the default mode */ rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; if ((state_index > 0) && (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO)) rdev->pm.power_state[state_index].clock_info[0].voltage = rdev->pm.power_state[0].clock_info[0].voltage; else rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; rdev->pm.power_state[state_index].pcie_lanes = 16; rdev->pm.power_state[state_index].flags = 0; rdev->pm.default_power_state_index = state_index; rdev->pm.num_power_states = state_index + 1; rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; return; pm_failed: rdev->pm.default_power_state_index = state_index; rdev->pm.num_power_states = 0; rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; } void radeon_external_tmds_setup(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; if (!tmds) return; switch (tmds->dvo_chip) { case DVO_SIL164: /* sil 164 */ radeon_i2c_put_byte(tmds->i2c_bus, tmds->slave_addr, 0x08, 0x30); radeon_i2c_put_byte(tmds->i2c_bus, tmds->slave_addr, 0x09, 0x00); radeon_i2c_put_byte(tmds->i2c_bus, tmds->slave_addr, 0x0a, 0x90); radeon_i2c_put_byte(tmds->i2c_bus, tmds->slave_addr, 0x0c, 0x89); radeon_i2c_put_byte(tmds->i2c_bus, tmds->slave_addr, 0x08, 0x3b); break; case DVO_SIL1178: /* sil 1178 - untested */ /* * 0x0f, 0x44 * 0x0f, 0x4c * 0x0e, 0x01 * 0x0a, 0x80 * 0x09, 0x30 * 0x0c, 0xc9 * 0x0d, 0x70 * 0x08, 0x32 * 0x08, 0x33 */ break; default: break; } } bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint16_t offset; uint8_t blocks, slave_addr, rev; uint32_t index, id; uint32_t reg, val, and_mask, or_mask; struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; if (!tmds) return false; if (rdev->flags & RADEON_IS_IGP) { offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE); rev = RBIOS8(offset); if (offset) { rev = RBIOS8(offset); if (rev > 1) { blocks = RBIOS8(offset + 3); index = offset + 4; while (blocks > 0) { id = RBIOS16(index); index += 2; switch (id >> 13) { case 0: reg = (id & 0x1fff) * 4; val = RBIOS32(index); index += 4; WREG32(reg, val); break; case 2: reg = (id & 0x1fff) * 4; and_mask = RBIOS32(index); index += 4; or_mask = RBIOS32(index); index += 4; val = RREG32(reg); val = (val & and_mask) | or_mask; WREG32(reg, val); break; case 3: val = RBIOS16(index); index += 2; udelay(val); break; case 4: val = RBIOS16(index); index += 2; mdelay(val); break; case 6: slave_addr = id & 0xff; slave_addr >>= 1; /* 7 bit addressing */ index++; reg = RBIOS8(index); index++; val = RBIOS8(index); index++; radeon_i2c_put_byte(tmds->i2c_bus, slave_addr, reg, val); break; default: DRM_ERROR("Unknown id %d\n", id >> 13); break; } blocks--; } return true; } } } else { offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); if (offset) { index = offset + 10; id = RBIOS16(index); while (id != 0xffff) { index += 2; switch (id >> 13) { case 0: reg = (id & 0x1fff) * 4; val = RBIOS32(index); WREG32(reg, val); break; case 2: reg = (id & 0x1fff) * 4; and_mask = RBIOS32(index); index += 4; or_mask = RBIOS32(index); index += 4; val = RREG32(reg); val = (val & and_mask) | or_mask; WREG32(reg, val); break; case 4: val = RBIOS16(index); index += 2; udelay(val); break; case 5: reg = id & 0x1fff; and_mask = RBIOS32(index); index += 4; or_mask = RBIOS32(index); index += 4; val = RREG32_PLL(reg); val = (val & and_mask) | or_mask; WREG32_PLL(reg, val); break; case 6: reg = id & 0x1fff; val = RBIOS8(index); index += 1; radeon_i2c_put_byte(tmds->i2c_bus, tmds->slave_addr, reg, val); break; default: DRM_ERROR("Unknown id %d\n", id >> 13); break; } id = RBIOS16(index); } return true; } } return false; } static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset) { struct radeon_device *rdev = dev->dev_private; if (offset) { while (RBIOS16(offset)) { uint16_t cmd = ((RBIOS16(offset) & 0xe000) >> 13); uint32_t addr = (RBIOS16(offset) & 0x1fff); uint32_t val, and_mask, or_mask; uint32_t tmp; offset += 2; switch (cmd) { case 0: val = RBIOS32(offset); offset += 4; WREG32(addr, val); break; case 1: val = RBIOS32(offset); offset += 4; WREG32(addr, val); break; case 2: and_mask = RBIOS32(offset); offset += 4; or_mask = RBIOS32(offset); offset += 4; tmp = RREG32(addr); tmp &= and_mask; tmp |= or_mask; WREG32(addr, tmp); break; case 3: and_mask = RBIOS32(offset); offset += 4; or_mask = RBIOS32(offset); offset += 4; tmp = RREG32(addr); tmp &= and_mask; tmp |= or_mask; WREG32(addr, tmp); break; case 4: val = RBIOS16(offset); offset += 2; udelay(val); break; case 5: val = RBIOS16(offset); offset += 2; switch (addr) { case 8: while (val--) { if (! (RREG32_PLL (RADEON_CLK_PWRMGT_CNTL) & RADEON_MC_BUSY)) break; } break; case 9: while (val--) { if ((RREG32(RADEON_MC_STATUS) & RADEON_MC_IDLE)) break; } break; default: break; } break; default: break; } } } } static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset) { struct radeon_device *rdev = dev->dev_private; if (offset) { while (RBIOS8(offset)) { uint8_t cmd = ((RBIOS8(offset) & 0xc0) >> 6); uint8_t addr = (RBIOS8(offset) & 0x3f); uint32_t val, shift, tmp; uint32_t and_mask, or_mask; offset++; switch (cmd) { case 0: val = RBIOS32(offset); offset += 4; WREG32_PLL(addr, val); break; case 1: shift = RBIOS8(offset) * 8; offset++; and_mask = RBIOS8(offset) << shift; and_mask |= ~(0xff << shift); offset++; or_mask = RBIOS8(offset) << shift; offset++; tmp = RREG32_PLL(addr); tmp &= and_mask; tmp |= or_mask; WREG32_PLL(addr, tmp); break; case 2: case 3: tmp = 1000; switch (addr) { case 1: udelay(150); break; case 2: mdelay(1); break; case 3: while (tmp--) { if (! (RREG32_PLL (RADEON_CLK_PWRMGT_CNTL) & RADEON_MC_BUSY)) break; } break; case 4: while (tmp--) { if (RREG32_PLL (RADEON_CLK_PWRMGT_CNTL) & RADEON_DLL_READY) break; } break; case 5: tmp = RREG32_PLL(RADEON_CLK_PWRMGT_CNTL); if (tmp & RADEON_CG_NO1_DEBUG_0) { #if 0 uint32_t mclk_cntl = RREG32_PLL (RADEON_MCLK_CNTL); mclk_cntl &= 0xffff0000; /*mclk_cntl |= 0x00001111;*//* ??? */ WREG32_PLL(RADEON_MCLK_CNTL, mclk_cntl); mdelay(10); #endif WREG32_PLL (RADEON_CLK_PWRMGT_CNTL, tmp & ~RADEON_CG_NO1_DEBUG_0); mdelay(10); } break; default: break; } break; default: break; } } } } static void combios_parse_ram_reset_table(struct drm_device *dev, uint16_t offset) { struct radeon_device *rdev = dev->dev_private; uint32_t tmp; if (offset) { uint8_t val = RBIOS8(offset); while (val != 0xff) { offset++; if (val == 0x0f) { uint32_t channel_complete_mask; if (ASIC_IS_R300(rdev)) channel_complete_mask = R300_MEM_PWRUP_COMPLETE; else channel_complete_mask = RADEON_MEM_PWRUP_COMPLETE; tmp = 20000; while (tmp--) { if ((RREG32(RADEON_MEM_STR_CNTL) & channel_complete_mask) == channel_complete_mask) break; } } else { uint32_t or_mask = RBIOS16(offset); offset += 2; tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG); tmp &= RADEON_SDRAM_MODE_MASK; tmp |= or_mask; WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp); or_mask = val << 24; tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG); tmp &= RADEON_B3MEM_RESET_MASK; tmp |= or_mask; WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp); } val = RBIOS8(offset); } } } static uint32_t combios_detect_ram(struct drm_device *dev, int ram, int mem_addr_mapping) { struct radeon_device *rdev = dev->dev_private; uint32_t mem_cntl; uint32_t mem_size; uint32_t addr = 0; mem_cntl = RREG32(RADEON_MEM_CNTL); if (mem_cntl & RV100_HALF_MODE) ram /= 2; mem_size = ram; mem_cntl &= ~(0xff << 8); mem_cntl |= (mem_addr_mapping & 0xff) << 8; WREG32(RADEON_MEM_CNTL, mem_cntl); RREG32(RADEON_MEM_CNTL); /* sdram reset ? */ /* something like this???? */ while (ram--) { addr = ram * 1024 * 1024; /* write to each page */ WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER); WREG32(RADEON_MM_DATA, 0xdeadbeef); /* read back and verify */ WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER); if (RREG32(RADEON_MM_DATA) != 0xdeadbeef) return 0; } return mem_size; } static void combios_write_ram_size(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; uint8_t rev; uint16_t offset; uint32_t mem_size = 0; uint32_t mem_cntl = 0; /* should do something smarter here I guess... */ if (rdev->flags & RADEON_IS_IGP) return; /* first check detected mem table */ offset = combios_get_table_offset(dev, COMBIOS_DETECTED_MEM_TABLE); if (offset) { rev = RBIOS8(offset); if (rev < 3) { mem_cntl = RBIOS32(offset + 1); mem_size = RBIOS16(offset + 5); if ((rdev->family < CHIP_R200) && !ASIC_IS_RN50(rdev)) WREG32(RADEON_MEM_CNTL, mem_cntl); } } if (!mem_size) { offset = combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE); if (offset) { rev = RBIOS8(offset - 1); if (rev < 1) { if ((rdev->family < CHIP_R200) && !ASIC_IS_RN50(rdev)) { int ram = 0; int mem_addr_mapping = 0; while (RBIOS8(offset)) { ram = RBIOS8(offset); mem_addr_mapping = RBIOS8(offset + 1); if (mem_addr_mapping != 0x25) ram *= 2; mem_size = combios_detect_ram(dev, ram, mem_addr_mapping); if (mem_size) break; offset += 2; } } else mem_size = RBIOS8(offset); } else { mem_size = RBIOS8(offset); mem_size *= 2; /* convert to MB */ } } } mem_size *= (1024 * 1024); /* convert to bytes */ WREG32(RADEON_CONFIG_MEMSIZE, mem_size); } void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable) { uint16_t dyn_clk_info = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); if (dyn_clk_info) combios_parse_pll_table(dev, dyn_clk_info); } void radeon_combios_asic_init(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; uint16_t table; /* port hardcoded mac stuff from radeonfb */ if (rdev->bios == NULL) return; /* ASIC INIT 1 */ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE); if (table) combios_parse_mmio_table(dev, table); /* PLL INIT */ table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE); if (table) combios_parse_pll_table(dev, table); /* ASIC INIT 2 */ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE); if (table) combios_parse_mmio_table(dev, table); if (!(rdev->flags & RADEON_IS_IGP)) { /* ASIC INIT 4 */ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_4_TABLE); if (table) combios_parse_mmio_table(dev, table); /* RAM RESET */ table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE); if (table) combios_parse_ram_reset_table(dev, table); /* ASIC INIT 3 */ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_3_TABLE); if (table) combios_parse_mmio_table(dev, table); /* write CONFIG_MEMSIZE */ combios_write_ram_size(dev); } /* quirk for rs4xx HP nx6125 laptop to make it resume * - it hangs on resume inside the dynclk 1 table. */ if (rdev->family == CHIP_RS480 && rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_device == 0x308b) return; /* quirk for rs4xx HP dv5000 laptop to make it resume * - it hangs on resume inside the dynclk 1 table. */ if (rdev->family == CHIP_RS480 && rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_device == 0x30a4) return; /* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume * - it hangs on resume inside the dynclk 1 table. */ if (rdev->family == CHIP_RS480 && rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_device == 0x30ae) return; /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); if (table) combios_parse_pll_table(dev, table); } void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; uint32_t bios_0_scratch, bios_6_scratch, bios_7_scratch; bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); bios_7_scratch = RREG32(RADEON_BIOS_7_SCRATCH); /* let the bios control the backlight */ bios_0_scratch &= ~RADEON_DRIVER_BRIGHTNESS_EN; /* tell the bios not to handle mode switching */ bios_6_scratch |= (RADEON_DISPLAY_SWITCHING_DIS | RADEON_ACC_MODE_CHANGE); /* tell the bios a driver is loaded */ bios_7_scratch |= RADEON_DRV_LOADED; WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch); WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); WREG32(RADEON_BIOS_7_SCRATCH, bios_7_scratch); } void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t bios_6_scratch; bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); if (lock) bios_6_scratch |= RADEON_DRIVER_CRITICAL; else bios_6_scratch &= ~RADEON_DRIVER_CRITICAL; WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); } void radeon_combios_connected_scratch_regs(struct drm_connector *connector, struct drm_encoder *encoder, bool connected) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_4_scratch = RREG32(RADEON_BIOS_4_SCRATCH); uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH); if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("TV1 connected\n"); /* fix me */ bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO; /*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */ bios_5_scratch |= RADEON_TV1_ON; bios_5_scratch |= RADEON_ACC_REQ_TV1; } else { DRM_DEBUG_KMS("TV1 disconnected\n"); bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK; bios_5_scratch &= ~RADEON_TV1_ON; bios_5_scratch &= ~RADEON_ACC_REQ_TV1; } } if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("LCD1 connected\n"); bios_4_scratch |= RADEON_LCD1_ATTACHED; bios_5_scratch |= RADEON_LCD1_ON; bios_5_scratch |= RADEON_ACC_REQ_LCD1; } else { DRM_DEBUG_KMS("LCD1 disconnected\n"); bios_4_scratch &= ~RADEON_LCD1_ATTACHED; bios_5_scratch &= ~RADEON_LCD1_ON; bios_5_scratch &= ~RADEON_ACC_REQ_LCD1; } } if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("CRT1 connected\n"); bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR; bios_5_scratch |= RADEON_CRT1_ON; bios_5_scratch |= RADEON_ACC_REQ_CRT1; } else { DRM_DEBUG_KMS("CRT1 disconnected\n"); bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK; bios_5_scratch &= ~RADEON_CRT1_ON; bios_5_scratch &= ~RADEON_ACC_REQ_CRT1; } } if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("CRT2 connected\n"); bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR; bios_5_scratch |= RADEON_CRT2_ON; bios_5_scratch |= RADEON_ACC_REQ_CRT2; } else { DRM_DEBUG_KMS("CRT2 disconnected\n"); bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK; bios_5_scratch &= ~RADEON_CRT2_ON; bios_5_scratch &= ~RADEON_ACC_REQ_CRT2; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP1 connected\n"); bios_4_scratch |= RADEON_DFP1_ATTACHED; bios_5_scratch |= RADEON_DFP1_ON; bios_5_scratch |= RADEON_ACC_REQ_DFP1; } else { DRM_DEBUG_KMS("DFP1 disconnected\n"); bios_4_scratch &= ~RADEON_DFP1_ATTACHED; bios_5_scratch &= ~RADEON_DFP1_ON; bios_5_scratch &= ~RADEON_ACC_REQ_DFP1; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP2 connected\n"); bios_4_scratch |= RADEON_DFP2_ATTACHED; bios_5_scratch |= RADEON_DFP2_ON; bios_5_scratch |= RADEON_ACC_REQ_DFP2; } else { DRM_DEBUG_KMS("DFP2 disconnected\n"); bios_4_scratch &= ~RADEON_DFP2_ATTACHED; bios_5_scratch &= ~RADEON_DFP2_ON; bios_5_scratch &= ~RADEON_ACC_REQ_DFP2; } } WREG32(RADEON_BIOS_4_SCRATCH, bios_4_scratch); WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch); } void radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH); if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) { bios_5_scratch &= ~RADEON_TV1_CRTC_MASK; bios_5_scratch |= (crtc << RADEON_TV1_CRTC_SHIFT); } if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) { bios_5_scratch &= ~RADEON_CRT1_CRTC_MASK; bios_5_scratch |= (crtc << RADEON_CRT1_CRTC_SHIFT); } if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) { bios_5_scratch &= ~RADEON_CRT2_CRTC_MASK; bios_5_scratch |= (crtc << RADEON_CRT2_CRTC_SHIFT); } if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { bios_5_scratch &= ~RADEON_LCD1_CRTC_MASK; bios_5_scratch |= (crtc << RADEON_LCD1_CRTC_SHIFT); } if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) { bios_5_scratch &= ~RADEON_DFP1_CRTC_MASK; bios_5_scratch |= (crtc << RADEON_DFP1_CRTC_SHIFT); } if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) { bios_5_scratch &= ~RADEON_DFP2_CRTC_MASK; bios_5_scratch |= (crtc << RADEON_DFP2_CRTC_SHIFT); } WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch); } void radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { if (on) bios_6_scratch |= RADEON_TV_DPMS_ON; else bios_6_scratch &= ~RADEON_TV_DPMS_ON; } if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { if (on) bios_6_scratch |= RADEON_CRT_DPMS_ON; else bios_6_scratch &= ~RADEON_CRT_DPMS_ON; } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (on) bios_6_scratch |= RADEON_LCD_DPMS_ON; else bios_6_scratch &= ~RADEON_LCD_DPMS_ON; } if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (on) bios_6_scratch |= RADEON_DFP_DPMS_ON; else bios_6_scratch &= ~RADEON_DFP_DPMS_ON; } WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); }
gpl-2.0
cristianomatos/kernel-stock-mako
arch/arm/mach-tegra/hotplug.c
4546
2562
/* * linux/arch/arm/mach-realview/hotplug.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/cp15.h> static inline void cpu_enter_lowpower(void) { unsigned int v; flush_cache_all(); asm volatile( " mcr p15, 0, %1, c7, c5, 0\n" " mcr p15, 0, %1, c7, c10, 4\n" /* * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" " bic %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0), "Ir" (CR_C) : "cc"); } static inline void cpu_leave_lowpower(void) { unsigned int v; asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" " orr %0, %0, %1\n" " mcr p15, 0, %0, c1, c0, 0\n" " mrc p15, 0, %0, c1, c0, 1\n" " orr %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" : "=&r" (v) : "Ir" (CR_C) : "cc"); } static inline void platform_do_lowpower(unsigned int cpu, int *spurious) { /* * there is no power-control hardware on this platform, so all * we can do is put the core into WFI; this is safe as the calling * code will have already disabled interrupts */ for (;;) { /* * here's the WFI */ asm(".word 0xe320f003\n" : : : "memory", "cc"); /*if (pen_release == cpu) {*/ /* * OK, proper wakeup, we're done */ break; /*}*/ /* * Getting here, means that we have come out of WFI without * having been woken up - this shouldn't happen * * Just note it happening - when we're woken, we can report * its occurrence. */ (*spurious)++; } } int platform_cpu_kill(unsigned int cpu) { return 1; } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void platform_cpu_die(unsigned int cpu) { int spurious = 0; /* * we're ready for shutdown now, so do it */ cpu_enter_lowpower(); platform_do_lowpower(cpu, &spurious); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(); if (spurious) pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); } int platform_cpu_disable(unsigned int cpu) { /* * we don't allow CPU 0 to be shutdown (it is still too special * e.g. clock tick interrupts) */ return cpu == 0 ? -EPERM : 0; }
gpl-2.0
chevanlol360/android_kernel_lge_vu2u
drivers/net/wireless/rtlwifi/rtl8192se/sw.c
4802
14646
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../core.h" #include "../pci.h" #include "../base.h" #include "../pci.h" #include "reg.h" #include "def.h" #include "phy.h" #include "dm.h" #include "fw.h" #include "hw.h" #include "sw.h" #include "trx.h" #include "led.h" #include <linux/module.h> static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); /*close ASPM for AMD defaultly */ rtlpci->const_amdpci_aspm = 0; /* * ASPM PS mode. * 0 - Disable ASPM, * 1 - Enable ASPM without Clock Req, * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. * set defult to RTL8192CE:3 RTL8192E:2 * */ rtlpci->const_pci_aspm = 2; /*Setting for PCI-E device */ rtlpci->const_devicepci_aspm_setting = 0x03; /*Setting for PCI-E bridge */ rtlpci->const_hostpci_aspm_setting = 0x02; /* * In Hw/Sw Radio Off situation. * 0 - Default, * 1 - From ASPM setting without low Mac Pwr, * 2 - From ASPM setting with low Mac Pwr, * 3 - Bus D3 * set default to RTL8192CE:0 RTL8192SE:2 */ rtlpci->const_hwsw_rfoff_d3 = 2; /* * This setting works for those device with * backdoor ASPM setting such as EPHY setting. * 0 - Not support ASPM, * 1 - Support ASPM, * 2 - According to chipset. */ rtlpci->const_support_pciaspm = 2; } static void rtl92se_fw_cb(const struct firmware *firmware, void *context) { struct ieee80211_hw *hw = context; struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); struct rt_firmware *pfirmware = NULL; int err; RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "Firmware callback routine entered!\n"); complete(&rtlpriv->firmware_loading_complete); if (!firmware) { pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name); rtlpriv->max_fw_size = 0; return; } if (firmware->size > rtlpriv->max_fw_size) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Firmware is too big!\n"); rtlpriv->max_fw_size = 0; release_firmware(firmware); return; } pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware; memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size); pfirmware->sz_fw_tmpbufferlen = firmware->size; release_firmware(firmware); err = ieee80211_register_hw(hw); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't register mac80211 hw\n"); return; } else { rtlpriv->mac80211.mac80211_registered = 1; } rtlpci->irq_alloc = 1; set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); /*init rfkill */ rtl_init_rfkill(hw); } static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); int err = 0; u16 earlyrxthreshold = 7; rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; rtlpriv->dm.useramask = true; /* compatible 5G band 91se just 2.4G band & smsp */ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; rtlpriv->rtlhal.bandset = BAND_ON_2_4G; rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; rtlpci->transmit_config = 0; rtlpci->receive_config = RCR_APPFCS | RCR_APWRMGT | /*RCR_ADD3 |*/ RCR_AMF | RCR_ADF | RCR_APP_MIC | RCR_APP_ICV | RCR_AICV | /* Accept ICV error, CRC32 Error */ RCR_ACRC32 | RCR_AB | /* Accept Broadcast, Multicast */ RCR_AM | /* Accept Physical match */ RCR_APM | /* Accept Destination Address packets */ /*RCR_AAP |*/ RCR_APP_PHYST_STAFF | /* Accept PHY status */ RCR_APP_PHYST_RXFF | (earlyrxthreshold << RCR_FIFO_OFFSET); rtlpci->irq_mask[0] = (u32) (IMR_ROK | IMR_VODOK | IMR_VIDOK | IMR_BEDOK | IMR_BKDOK | IMR_HCCADOK | IMR_MGNTDOK | IMR_COMDOK | IMR_HIGHDOK | IMR_BDOK | IMR_RXCMDOK | /*IMR_TIMEOUT0 |*/ IMR_RDU | IMR_RXFOVW | IMR_BCNINT /*| IMR_TXFOVW*/ /*| IMR_TBDOK | IMR_TBDER*/); rtlpci->irq_mask[1] = (u32) 0; rtlpci->shortretry_limit = 0x30; rtlpci->longretry_limit = 0x30; rtlpci->first_init = true; /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; if (!rtlpriv->psc.inactiveps) pr_info("Power Save off (module option)\n"); if (!rtlpriv->psc.fwctrl_lps) pr_info("FW Power Save off (module option)\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl92s_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(sizeof(struct rt_firmware)); if (!rtlpriv->rtlhal.pfirmware) return 1; rtlpriv->max_fw_size = RTL8190_MAX_RAW_FIRMWARE_CODE_SIZE; pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n" "Loading firmware %s\n", rtlpriv->cfg->fw_name); /* request fw */ err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl92se_fw_cb); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request firmware!\n"); return 1; } return err; } static void rtl92s_deinit_sw_vars(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->rtlhal.pfirmware) { vfree(rtlpriv->rtlhal.pfirmware); rtlpriv->rtlhal.pfirmware = NULL; } } static struct rtl_hal_ops rtl8192se_hal_ops = { .init_sw_vars = rtl92s_init_sw_vars, .deinit_sw_vars = rtl92s_deinit_sw_vars, .read_eeprom_info = rtl92se_read_eeprom_info, .interrupt_recognized = rtl92se_interrupt_recognized, .hw_init = rtl92se_hw_init, .hw_disable = rtl92se_card_disable, .hw_suspend = rtl92se_suspend, .hw_resume = rtl92se_resume, .enable_interrupt = rtl92se_enable_interrupt, .disable_interrupt = rtl92se_disable_interrupt, .set_network_type = rtl92se_set_network_type, .set_chk_bssid = rtl92se_set_check_bssid, .set_qos = rtl92se_set_qos, .set_bcn_reg = rtl92se_set_beacon_related_registers, .set_bcn_intv = rtl92se_set_beacon_interval, .update_interrupt_mask = rtl92se_update_interrupt_mask, .get_hw_reg = rtl92se_get_hw_reg, .set_hw_reg = rtl92se_set_hw_reg, .update_rate_tbl = rtl92se_update_hal_rate_tbl, .fill_tx_desc = rtl92se_tx_fill_desc, .fill_tx_cmddesc = rtl92se_tx_fill_cmddesc, .query_rx_desc = rtl92se_rx_query_desc, .set_channel_access = rtl92se_update_channel_access_setting, .radio_onoff_checking = rtl92se_gpio_radio_on_off_checking, .set_bw_mode = rtl92s_phy_set_bw_mode, .switch_channel = rtl92s_phy_sw_chnl, .dm_watchdog = rtl92s_dm_watchdog, .scan_operation_backup = rtl92s_phy_scan_operation_backup, .set_rf_power_state = rtl92s_phy_set_rf_power_state, .led_control = rtl92se_led_control, .set_desc = rtl92se_set_desc, .get_desc = rtl92se_get_desc, .tx_polling = rtl92se_tx_polling, .enable_hw_sec = rtl92se_enable_hw_security_config, .set_key = rtl92se_set_key, .init_sw_leds = rtl92se_init_sw_leds, .get_bbreg = rtl92s_phy_query_bb_reg, .set_bbreg = rtl92s_phy_set_bb_reg, .get_rfreg = rtl92s_phy_query_rf_reg, .set_rfreg = rtl92s_phy_set_rf_reg, }; static struct rtl_mod_params rtl92se_mod_params = { .sw_crypto = false, .inactiveps = true, .swctrl_lps = true, .fwctrl_lps = false, .debug = DBG_EMERG, }; /* Because memory R/W bursting will cause system hang/crash * for 92se, so we don't read back after every write action */ static struct rtl_hal_cfg rtl92se_hal_cfg = { .bar_id = 1, .write_readback = false, .name = "rtl92s_pci", .fw_name = "rtlwifi/rtl8192sefw.bin", .ops = &rtl8192se_hal_ops, .mod_params = &rtl92se_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, .maps[SYS_CLK] = SYS_CLKR, .maps[MAC_RCR_AM] = RCR_AM, .maps[MAC_RCR_AB] = RCR_AB, .maps[MAC_RCR_ACRC32] = RCR_ACRC32, .maps[MAC_RCR_ACF] = RCR_ACF, .maps[MAC_RCR_AAP] = RCR_AAP, .maps[EFUSE_TEST] = REG_EFUSE_TEST, .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, .maps[EFUSE_CLK] = REG_EFUSE_CLK, .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL, .maps[EFUSE_PWC_EV12V] = 0, /* nouse for 8192se */ .maps[EFUSE_FEN_ELDR] = 0, /* nouse for 8192se */ .maps[EFUSE_LOADER_CLK_EN] = 0,/* nouse for 8192se */ .maps[EFUSE_ANA8M] = EFUSE_ANA8M, .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE_92S, .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION, .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN, .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES, .maps[RWCAM] = REG_RWCAM, .maps[WCAMI] = REG_WCAMI, .maps[RCAMO] = REG_RCAMO, .maps[CAMDBG] = REG_CAMDBG, .maps[SECR] = REG_SECR, .maps[SEC_CAM_NONE] = CAM_NONE, .maps[SEC_CAM_WEP40] = CAM_WEP40, .maps[SEC_CAM_TKIP] = CAM_TKIP, .maps[SEC_CAM_AES] = CAM_AES, .maps[SEC_CAM_WEP104] = CAM_WEP104, .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6, .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5, .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4, .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3, .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2, .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1, .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7, .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6, .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5, .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4, .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3, .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2, .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1, .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2, .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1, .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, .maps[RTL_IMR_BcnInt] = IMR_BCNINT, .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, .maps[RTL_IMR_RDU] = IMR_RDU, .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, .maps[RTL_IMR_BDOK] = IMR_BDOK, .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK, .maps[RTL_IMR_TBDER] = IMR_TBDER, .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK, .maps[RTL_IMR_COMDOK] = IMR_COMDOK, .maps[RTL_IMR_TBDOK] = IMR_TBDOK, .maps[RTL_IMR_BKDOK] = IMR_BKDOK, .maps[RTL_IMR_BEDOK] = IMR_BEDOK, .maps[RTL_IMR_VIDOK] = IMR_VIDOK, .maps[RTL_IMR_VODOK] = IMR_VODOK, .maps[RTL_IMR_ROK] = IMR_ROK, .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER), .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M, .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M, .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M, .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M, .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M, .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M, .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M, .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M, .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M, .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M, .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M, .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M, .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7, .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, }; static struct pci_device_id rtl92se_pci_ids[] __devinitdata = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8192, rtl92se_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8171, rtl92se_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8172, rtl92se_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8173, rtl92se_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8174, rtl92se_hal_cfg)}, {}, }; MODULE_DEVICE_TABLE(pci, rtl92se_pci_ids); MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 8192S/8191S 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8192sefw.bin"); module_param_named(swenc, rtl92se_mod_params.sw_crypto, bool, 0444); module_param_named(debug, rtl92se_mod_params.debug, int, 0444); module_param_named(ips, rtl92se_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); static const struct dev_pm_ops rtlwifi_pm_ops = { .suspend = rtl_pci_suspend, .resume = rtl_pci_resume, .freeze = rtl_pci_suspend, .thaw = rtl_pci_resume, .poweroff = rtl_pci_suspend, .restore = rtl_pci_resume, }; static struct pci_driver rtl92se_driver = { .name = KBUILD_MODNAME, .id_table = rtl92se_pci_ids, .probe = rtl_pci_probe, .remove = rtl_pci_disconnect, .driver.pm = &rtlwifi_pm_ops, }; static int __init rtl92se_module_init(void) { int ret = 0; ret = pci_register_driver(&rtl92se_driver); if (ret) RT_ASSERT(false, "No device found\n"); return ret; } static void __exit rtl92se_module_exit(void) { pci_unregister_driver(&rtl92se_driver); } module_init(rtl92se_module_init); module_exit(rtl92se_module_exit);
gpl-2.0
wzhy90/sony_msm8x60
drivers/gpu/drm/exynos/exynos_drm_crtc.c
4802
11865
/* exynos_drm_crtc.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * Authors: * Inki Dae <inki.dae@samsung.com> * Joonyoung Shim <jy0922.shim@samsung.com> * Seung-Woo Kim <sw0312.kim@samsung.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "drm_crtc_helper.h" #include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_encoder.h" #include "exynos_drm_gem.h" #define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ drm_crtc) /* * Exynos specific crtc structure. * * @drm_crtc: crtc object. * @overlay: contain information common to display controller and hdmi and * contents of this overlay object would be copied to sub driver size. * @pipe: a crtc index created at load() with a new crtc object creation * and the crtc object would be set to private->crtc array * to get a crtc object corresponding to this pipe from private->crtc * array when irq interrupt occured. the reason of using this pipe is that * drm framework doesn't support multiple irq yet. * we can refer to the crtc to current hardware interrupt occured through * this pipe value. * @dpms: store the crtc dpms value */ struct exynos_drm_crtc { struct drm_crtc drm_crtc; struct exynos_drm_overlay overlay; unsigned int pipe; unsigned int dpms; }; static void exynos_drm_crtc_apply(struct drm_crtc *crtc) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); struct exynos_drm_overlay *overlay = &exynos_crtc->overlay; exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_crtc_mode_set); exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, exynos_drm_encoder_crtc_commit); } int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, struct drm_framebuffer *fb, struct drm_display_mode *mode, struct exynos_drm_crtc_pos *pos) { struct exynos_drm_gem_buf *buffer; unsigned int actual_w; unsigned int actual_h; int nr = exynos_drm_format_num_buffers(fb->pixel_format); int i; for (i = 0; i < nr; i++) { buffer = exynos_drm_fb_buffer(fb, i); if (!buffer) { DRM_LOG_KMS("buffer is null\n"); return -EFAULT; } overlay->dma_addr[i] = buffer->dma_addr; overlay->vaddr[i] = buffer->kvaddr; DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n", i, (unsigned long)overlay->vaddr[i], (unsigned long)overlay->dma_addr[i]); } actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); /* set drm framebuffer data. */ overlay->fb_x = pos->fb_x; overlay->fb_y = pos->fb_y; overlay->fb_width = fb->width; overlay->fb_height = fb->height; overlay->bpp = fb->bits_per_pixel; overlay->pitch = fb->pitches[0]; overlay->pixel_format = fb->pixel_format; /* set overlay range to be displayed. */ overlay->crtc_x = pos->crtc_x; overlay->crtc_y = pos->crtc_y; overlay->crtc_width = actual_w; overlay->crtc_height = actual_h; /* set drm mode data. */ overlay->mode_width = mode->hdisplay; overlay->mode_height = mode->vdisplay; overlay->refresh = mode->vrefresh; overlay->scan_flag = mode->flags; DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)", overlay->crtc_x, overlay->crtc_y, overlay->crtc_width, overlay->crtc_height); return 0; } static int exynos_drm_crtc_update(struct drm_crtc *crtc) { struct exynos_drm_crtc *exynos_crtc; struct exynos_drm_overlay *overlay; struct exynos_drm_crtc_pos pos; struct drm_display_mode *mode = &crtc->mode; struct drm_framebuffer *fb = crtc->fb; if (!mode || !fb) return -EINVAL; exynos_crtc = to_exynos_crtc(crtc); overlay = &exynos_crtc->overlay; memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos)); /* it means the offset of framebuffer to be displayed. */ pos.fb_x = crtc->x; pos.fb_y = crtc->y; /* OSD position to be displayed. */ pos.crtc_x = 0; pos.crtc_y = 0; pos.crtc_w = fb->width - crtc->x; pos.crtc_h = fb->height - crtc->y; return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos); } static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); if (exynos_crtc->dpms == mode) { DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); return; } mutex_lock(&dev->struct_mutex); switch (mode) { case DRM_MODE_DPMS_ON: exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms); exynos_crtc->dpms = mode; break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms); exynos_crtc->dpms = mode; break; default: DRM_ERROR("unspecified mode %d\n", mode); break; } mutex_unlock(&dev->struct_mutex); } static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) { DRM_DEBUG_KMS("%s\n", __FILE__); /* drm framework doesn't check NULL. */ } static void exynos_drm_crtc_commit(struct drm_crtc *crtc) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); DRM_DEBUG_KMS("%s\n", __FILE__); /* * when set_crtc is requested from user or at booting time, * crtc->commit would be called without dpms call so if dpms is * no power on then crtc->dpms should be called * with DRM_MODE_DPMS_ON for the hardware power to be on. */ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) { int mode = DRM_MODE_DPMS_ON; /* * enable hardware(power on) to all encoders hdmi connected * to current crtc. */ exynos_drm_crtc_dpms(crtc, mode); /* * enable dma to all encoders connected to current crtc and * lcd panel. */ exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_dpms_from_crtc); } exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, exynos_drm_encoder_crtc_commit); } static bool exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { DRM_DEBUG_KMS("%s\n", __FILE__); /* drm framework doesn't check NULL */ return true; } static int exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { DRM_DEBUG_KMS("%s\n", __FILE__); /* * copy the mode data adjusted by mode_fixup() into crtc->mode * so that hardware can be seet to proper mode. */ memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); return exynos_drm_crtc_update(crtc); } static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { int ret; DRM_DEBUG_KMS("%s\n", __FILE__); ret = exynos_drm_crtc_update(crtc); if (ret) return ret; exynos_drm_crtc_apply(crtc); return ret; } static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc) { DRM_DEBUG_KMS("%s\n", __FILE__); /* drm framework doesn't check NULL */ } static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .dpms = exynos_drm_crtc_dpms, .prepare = exynos_drm_crtc_prepare, .commit = exynos_drm_crtc_commit, .mode_fixup = exynos_drm_crtc_mode_fixup, .mode_set = exynos_drm_crtc_mode_set, .mode_set_base = exynos_drm_crtc_mode_set_base, .load_lut = exynos_drm_crtc_load_lut, }; static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event) { struct drm_device *dev = crtc->dev; struct exynos_drm_private *dev_priv = dev->dev_private; struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); struct drm_framebuffer *old_fb = crtc->fb; int ret = -EINVAL; DRM_DEBUG_KMS("%s\n", __FILE__); mutex_lock(&dev->struct_mutex); if (event) { /* * the pipe from user always is 0 so we can set pipe number * of current owner to event. */ event->pipe = exynos_crtc->pipe; ret = drm_vblank_get(dev, exynos_crtc->pipe); if (ret) { DRM_DEBUG("failed to acquire vblank counter\n"); list_del(&event->base.link); goto out; } list_add_tail(&event->base.link, &dev_priv->pageflip_event_list); crtc->fb = fb; ret = exynos_drm_crtc_update(crtc); if (ret) { crtc->fb = old_fb; drm_vblank_put(dev, exynos_crtc->pipe); list_del(&event->base.link); goto out; } /* * the values related to a buffer of the drm framebuffer * to be applied should be set at here. because these values * first, are set to shadow registers and then to * real registers at vsync front porch period. */ exynos_drm_crtc_apply(crtc); } out: mutex_unlock(&dev->struct_mutex); return ret; } static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); struct exynos_drm_private *private = crtc->dev->dev_private; DRM_DEBUG_KMS("%s\n", __FILE__); private->crtc[exynos_crtc->pipe] = NULL; drm_crtc_cleanup(crtc); kfree(exynos_crtc); } static struct drm_crtc_funcs exynos_crtc_funcs = { .set_config = drm_crtc_helper_set_config, .page_flip = exynos_drm_crtc_page_flip, .destroy = exynos_drm_crtc_destroy, }; struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev, struct drm_crtc *crtc) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); return &exynos_crtc->overlay; } int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr) { struct exynos_drm_crtc *exynos_crtc; struct exynos_drm_private *private = dev->dev_private; struct drm_crtc *crtc; DRM_DEBUG_KMS("%s\n", __FILE__); exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); if (!exynos_crtc) { DRM_ERROR("failed to allocate exynos crtc\n"); return -ENOMEM; } exynos_crtc->pipe = nr; exynos_crtc->dpms = DRM_MODE_DPMS_OFF; exynos_crtc->overlay.zpos = DEFAULT_ZPOS; crtc = &exynos_crtc->drm_crtc; private->crtc[nr] = crtc; drm_crtc_init(dev, crtc, &exynos_crtc_funcs); drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs); return 0; } int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc) { struct exynos_drm_private *private = dev->dev_private; struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(private->crtc[crtc]); DRM_DEBUG_KMS("%s\n", __FILE__); if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) return -EPERM; exynos_drm_fn_encoder(private->crtc[crtc], &crtc, exynos_drm_enable_vblank); return 0; } void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) { struct exynos_drm_private *private = dev->dev_private; struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(private->crtc[crtc]); DRM_DEBUG_KMS("%s\n", __FILE__); if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) return; exynos_drm_fn_encoder(private->crtc[crtc], &crtc, exynos_drm_disable_vblank); }
gpl-2.0
AOKP/kernel_sony_msm8974ab
drivers/mfd/stmpe-i2c.c
5058
2647
/* * ST Microelectronics MFD: stmpe's i2c client specific driver * * Copyright (C) ST-Ericsson SA 2010 * Copyright (C) ST Microelectronics SA 2011 * * License Terms: GNU General Public License, version 2 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics */ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include "stmpe.h" static int i2c_reg_read(struct stmpe *stmpe, u8 reg) { struct i2c_client *i2c = stmpe->client; return i2c_smbus_read_byte_data(i2c, reg); } static int i2c_reg_write(struct stmpe *stmpe, u8 reg, u8 val) { struct i2c_client *i2c = stmpe->client; return i2c_smbus_write_byte_data(i2c, reg, val); } static int i2c_block_read(struct stmpe *stmpe, u8 reg, u8 length, u8 *values) { struct i2c_client *i2c = stmpe->client; return i2c_smbus_read_i2c_block_data(i2c, reg, length, values); } static int i2c_block_write(struct stmpe *stmpe, u8 reg, u8 length, const u8 *values) { struct i2c_client *i2c = stmpe->client; return i2c_smbus_write_i2c_block_data(i2c, reg, length, values); } static struct stmpe_client_info i2c_ci = { .read_byte = i2c_reg_read, .write_byte = i2c_reg_write, .read_block = i2c_block_read, .write_block = i2c_block_write, }; static int __devinit stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { i2c_ci.data = (void *)id; i2c_ci.irq = i2c->irq; i2c_ci.client = i2c; i2c_ci.dev = &i2c->dev; return stmpe_probe(&i2c_ci, id->driver_data); } static int __devexit stmpe_i2c_remove(struct i2c_client *i2c) { struct stmpe *stmpe = dev_get_drvdata(&i2c->dev); return stmpe_remove(stmpe); } static const struct i2c_device_id stmpe_i2c_id[] = { { "stmpe610", STMPE610 }, { "stmpe801", STMPE801 }, { "stmpe811", STMPE811 }, { "stmpe1601", STMPE1601 }, { "stmpe2401", STMPE2401 }, { "stmpe2403", STMPE2403 }, { } }; MODULE_DEVICE_TABLE(i2c, stmpe_id); static struct i2c_driver stmpe_i2c_driver = { .driver.name = "stmpe-i2c", .driver.owner = THIS_MODULE, #ifdef CONFIG_PM .driver.pm = &stmpe_dev_pm_ops, #endif .probe = stmpe_i2c_probe, .remove = __devexit_p(stmpe_i2c_remove), .id_table = stmpe_i2c_id, }; static int __init stmpe_init(void) { return i2c_add_driver(&stmpe_i2c_driver); } subsys_initcall(stmpe_init); static void __exit stmpe_exit(void) { i2c_del_driver(&stmpe_i2c_driver); } module_exit(stmpe_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("STMPE MFD I2C Interface Driver"); MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
gpl-2.0
olexiyt/telechips-linux
kernel/cpu_pm.c
5058
6635
/* * Copyright (C) 2011 Google, Inc. * * Author: * Colin Cross <ccross@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/cpu_pm.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/spinlock.h> #include <linux/syscore_ops.h> static DEFINE_RWLOCK(cpu_pm_notifier_lock); static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain); static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) { int ret; ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, nr_to_call, nr_calls); return notifier_to_errno(ret); } /** * cpu_pm_register_notifier - register a driver with cpu_pm * @nb: notifier block to register * * Add a driver to a list of drivers that are notified about * CPU and CPU cluster low power entry and exit. * * This function may sleep, and has the same return conditions as * raw_notifier_chain_register. */ int cpu_pm_register_notifier(struct notifier_block *nb) { unsigned long flags; int ret; write_lock_irqsave(&cpu_pm_notifier_lock, flags); ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb); write_unlock_irqrestore(&cpu_pm_notifier_lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); /** * cpu_pm_unregister_notifier - unregister a driver with cpu_pm * @nb: notifier block to be unregistered * * Remove a driver from the CPU PM notifier list. * * This function may sleep, and has the same return conditions as * raw_notifier_chain_unregister. */ int cpu_pm_unregister_notifier(struct notifier_block *nb) { unsigned long flags; int ret; write_lock_irqsave(&cpu_pm_notifier_lock, flags); ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb); write_unlock_irqrestore(&cpu_pm_notifier_lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); /** * cpm_pm_enter - CPU low power entry notifier * * Notifies listeners that a single CPU is entering a low power state that may * cause some blocks in the same power domain as the cpu to reset. * * Must be called on the affected CPU with interrupts disabled. Platform is * responsible for ensuring that cpu_pm_enter is not called twice on the same * CPU before cpu_pm_exit is called. Notified drivers can include VFP * co-processor, interrupt controller and it's PM extensions, local CPU * timers context save/restore which shouldn't be interrupted. Hence it * must be called with interrupts disabled. * * Return conditions are same as __raw_notifier_call_chain. */ int cpu_pm_enter(void) { int nr_calls; int ret = 0; read_lock(&cpu_pm_notifier_lock); ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); if (ret) /* * Inform listeners (nr_calls - 1) about failure of CPU PM * PM entry who are notified earlier to prepare for it. */ cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL); read_unlock(&cpu_pm_notifier_lock); return ret; } EXPORT_SYMBOL_GPL(cpu_pm_enter); /** * cpm_pm_exit - CPU low power exit notifier * * Notifies listeners that a single CPU is exiting a low power state that may * have caused some blocks in the same power domain as the cpu to reset. * * Notified drivers can include VFP co-processor, interrupt controller * and it's PM extensions, local CPU timers context save/restore which * shouldn't be interrupted. Hence it must be called with interrupts disabled. * * Return conditions are same as __raw_notifier_call_chain. */ int cpu_pm_exit(void) { int ret; read_lock(&cpu_pm_notifier_lock); ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL); read_unlock(&cpu_pm_notifier_lock); return ret; } EXPORT_SYMBOL_GPL(cpu_pm_exit); /** * cpm_cluster_pm_enter - CPU cluster low power entry notifier * * Notifies listeners that all cpus in a power domain are entering a low power * state that may cause some blocks in the same power domain to reset. * * Must be called after cpu_pm_enter has been called on all cpus in the power * domain, and before cpu_pm_exit has been called on any cpu in the power * domain. Notified drivers can include VFP co-processor, interrupt controller * and it's PM extensions, local CPU timers context save/restore which * shouldn't be interrupted. Hence it must be called with interrupts disabled. * * Must be called with interrupts disabled. * * Return conditions are same as __raw_notifier_call_chain. */ int cpu_cluster_pm_enter(void) { int nr_calls; int ret = 0; read_lock(&cpu_pm_notifier_lock); ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); if (ret) /* * Inform listeners (nr_calls - 1) about failure of CPU cluster * PM entry who are notified earlier to prepare for it. */ cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL); read_unlock(&cpu_pm_notifier_lock); return ret; } EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); /** * cpm_cluster_pm_exit - CPU cluster low power exit notifier * * Notifies listeners that all cpus in a power domain are exiting form a * low power state that may have caused some blocks in the same power domain * to reset. * * Must be called after cpu_pm_exit has been called on all cpus in the power * domain, and before cpu_pm_exit has been called on any cpu in the power * domain. Notified drivers can include VFP co-processor, interrupt controller * and it's PM extensions, local CPU timers context save/restore which * shouldn't be interrupted. Hence it must be called with interrupts disabled. * * Return conditions are same as __raw_notifier_call_chain. */ int cpu_cluster_pm_exit(void) { int ret; read_lock(&cpu_pm_notifier_lock); ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL); read_unlock(&cpu_pm_notifier_lock); return ret; } EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); #ifdef CONFIG_PM static int cpu_pm_suspend(void) { int ret; ret = cpu_pm_enter(); if (ret) return ret; ret = cpu_cluster_pm_enter(); return ret; } static void cpu_pm_resume(void) { cpu_cluster_pm_exit(); cpu_pm_exit(); } static struct syscore_ops cpu_pm_syscore_ops = { .suspend = cpu_pm_suspend, .resume = cpu_pm_resume, }; static int cpu_pm_init(void) { register_syscore_ops(&cpu_pm_syscore_ops); return 0; } core_initcall(cpu_pm_init); #endif
gpl-2.0
MoKee/android_kernel_oppo_find7a
arch/sparc/mm/srmmu.c
7106
70399
/* * srmmu.c: SRMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/bootmem.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/kdebug.h> #include <linux/log2.h> #include <linux/gfp.h> #include <asm/bitext.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/vaddrs.h> #include <asm/traps.h> #include <asm/smp.h> #include <asm/mbus.h> #include <asm/cache.h> #include <asm/oplib.h> #include <asm/asi.h> #include <asm/msi.h> #include <asm/mmu_context.h> #include <asm/io-unit.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> /* Now the cpu specific definitions. */ #include <asm/viking.h> #include <asm/mxcc.h> #include <asm/ross.h> #include <asm/tsunami.h> #include <asm/swift.h> #include <asm/turbosparc.h> #include <asm/leon.h> #include <asm/btfixup.h> enum mbus_module srmmu_modtype; static unsigned int hwbug_bitmask; int vac_cache_size; int vac_line_size; extern struct resource sparc_iomap; extern unsigned long last_valid_pfn; extern unsigned long page_kernel; static pgd_t *srmmu_swapper_pg_dir; #ifdef CONFIG_SMP #define FLUSH_BEGIN(mm) #define FLUSH_END #else #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { #define FLUSH_END } #endif BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) int flush_page_for_dma_global = 1; #ifdef CONFIG_SMP BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page) #endif char *srmmu_name; ctxd_t *srmmu_ctx_table_phys; static ctxd_t *srmmu_context_table; int viking_mxcc_present; static DEFINE_SPINLOCK(srmmu_context_spinlock); static int is_hypersparc; /* * In general all page table modifications should use the V8 atomic * swap instruction. This insures the mmu and the cpu are in sync * with respect to ref/mod bits in the page tables. */ static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) { __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr)); return value; } static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval) { srmmu_swap((unsigned long *)ptep, pte_val(pteval)); } /* The very generic SRMMU page table operations. */ static inline int srmmu_device_memory(unsigned long x) { return ((x & 0xF0000000) != 0); } static int srmmu_cache_pagetables; /* these will be initialized in srmmu_nocache_calcsize() */ static unsigned long srmmu_nocache_size; static unsigned long srmmu_nocache_end; /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) /* The context table is a nocache user with the biggest alignment needs. */ #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS) void *srmmu_nocache_pool; void *srmmu_nocache_bitmap; static struct bit_map srmmu_nocache_map; static unsigned long srmmu_pte_pfn(pte_t pte) { if (srmmu_device_memory(pte_val(pte))) { /* Just return something that will cause * pfn_valid() to return false. This makes * copy_one_pte() to just directly copy to * PTE over. */ return ~0UL; } return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); } static struct page *srmmu_pmd_page(pmd_t pmd) { if (srmmu_device_memory(pmd_val(pmd))) BUG(); return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); } static inline unsigned long srmmu_pgd_page(pgd_t pgd) { return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } static inline int srmmu_pte_none(pte_t pte) { return !(pte_val(pte) & 0xFFFFFFF); } static inline int srmmu_pte_present(pte_t pte) { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); } static inline void srmmu_pte_clear(pte_t *ptep) { srmmu_set_pte(ptep, __pte(0)); } static inline int srmmu_pmd_none(pmd_t pmd) { return !(pmd_val(pmd) & 0xFFFFFFF); } static inline int srmmu_pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } static inline int srmmu_pmd_present(pmd_t pmd) { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } static inline void srmmu_pmd_clear(pmd_t *pmdp) { int i; for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); } static inline int srmmu_pgd_none(pgd_t pgd) { return !(pgd_val(pgd) & 0xFFFFFFF); } static inline int srmmu_pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } static inline int srmmu_pgd_present(pgd_t pgd) { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } static inline void srmmu_pgd_clear(pgd_t * pgdp) { srmmu_set_pte((pte_t *)pgdp, __pte(0)); } static inline pte_t srmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE);} static inline pte_t srmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_DIRTY);} static inline pte_t srmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_REF);} static inline pte_t srmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | SRMMU_WRITE);} static inline pte_t srmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | SRMMU_DIRTY);} static inline pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMMU_REF);} /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) { return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); } static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) { return __pte(((page) >> 4) | pgprot_val(pgprot)); } static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) { return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); } /* XXX should we hyper_flush_whole_icache here - Anton */ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) { srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) { srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) { unsigned long ptp; /* Physical address, shifted right by 4 */ int i; ptp = __nocache_pa((unsigned long) ptep) >> 4; for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); } } static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) { unsigned long ptp; /* Physical address, shifted right by 4 */ int i; ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); } } static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } /* to find an entry in a top-level page table... */ static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } /* Find an entry in the second-level page table.. */ static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) { return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); } /* Find an entry in the third-level page table.. */ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) { void *pte; pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4); return (pte_t *) pte + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); } static unsigned long srmmu_swp_type(swp_entry_t entry) { return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK; } static unsigned long srmmu_swp_offset(swp_entry_t entry) { return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK; } static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset) { return (swp_entry_t) { (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT }; } /* * size: bytes to allocate in the nocache area. * align: bytes, number to align at. * Returns the virtual address of the allocated area. */ static unsigned long __srmmu_get_nocache(int size, int align) { int offset; if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { printk("Size 0x%x too small for nocache request\n", size); size = SRMMU_NOCACHE_BITMAP_SHIFT; } if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { printk("Size 0x%x unaligned int nocache request\n", size); size += SRMMU_NOCACHE_BITMAP_SHIFT-1; } BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); offset = bit_map_string_get(&srmmu_nocache_map, size >> SRMMU_NOCACHE_BITMAP_SHIFT, align >> SRMMU_NOCACHE_BITMAP_SHIFT); if (offset == -1) { printk("srmmu: out of nocache %d: %d/%d\n", size, (int) srmmu_nocache_size, srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); return 0; } return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); } static unsigned long srmmu_get_nocache(int size, int align) { unsigned long tmp; tmp = __srmmu_get_nocache(size, align); if (tmp) memset((void *)tmp, 0, size); return tmp; } static void srmmu_free_nocache(unsigned long vaddr, int size) { int offset; if (vaddr < SRMMU_NOCACHE_VADDR) { printk("Vaddr %lx is smaller than nocache base 0x%lx\n", vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); BUG(); } if (vaddr+size > srmmu_nocache_end) { printk("Vaddr %lx is bigger than nocache end 0x%lx\n", vaddr, srmmu_nocache_end); BUG(); } if (!is_power_of_2(size)) { printk("Size 0x%x is not a power of 2\n", size); BUG(); } if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { printk("Size 0x%x is too small\n", size); BUG(); } if (vaddr & (size-1)) { printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); BUG(); } offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; size = size >> SRMMU_NOCACHE_BITMAP_SHIFT; bit_map_clear(&srmmu_nocache_map, offset, size); } static void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end); extern unsigned long probe_memory(void); /* in fault.c */ /* * Reserve nocache dynamically proportionally to the amount of * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 */ static void srmmu_nocache_calcsize(void) { unsigned long sysmemavail = probe_memory() / 1024; int srmmu_nocache_npages; srmmu_nocache_npages = sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256; /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */ // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256; if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES) srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES; /* anything above 1280 blows up */ if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES) srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES; srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE; srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; } static void __init srmmu_nocache_init(void) { unsigned int bitmap_bits; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned long paddr, vaddr; unsigned long pteval; bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX, 0UL); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); init_mm.pgd = srmmu_swapper_pg_dir; srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end); paddr = __pa((unsigned long)srmmu_nocache_pool); vaddr = SRMMU_NOCACHE_VADDR; while (vaddr < srmmu_nocache_end) { pgd = pgd_offset_k(vaddr); pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); if (srmmu_cache_pagetables) pteval |= SRMMU_CACHE; srmmu_set_pte(__nocache_fix(pte), __pte(pteval)); vaddr += PAGE_SIZE; paddr += PAGE_SIZE; } flush_cache_all(); flush_tlb_all(); } static inline pgd_t *srmmu_get_pgd_fast(void) { pgd_t *pgd = NULL; pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); if (pgd) { pgd_t *init = pgd_offset_k(0); memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); } return pgd; } static void srmmu_free_pgd_fast(pgd_t *pgd) { srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); } static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address) { return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); } static void srmmu_pmd_free(pmd_t * pmd) { srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); } /* * Hardware needs alignment to 256 only, but we align to whole page size * to reduce fragmentation problems due to the buddy principle. * XXX Provide actual fragmentation statistics in /proc. * * Alignments up to the page size are the same for physical and virtual * addresses of the nocache area. */ static pte_t * srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); } static pgtable_t srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) { unsigned long pte; struct page *page; if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) return NULL; page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); pgtable_page_ctor(page); return page; } static void srmmu_free_pte_fast(pte_t *pte) { srmmu_free_nocache((unsigned long)pte, PTE_SIZE); } static void srmmu_pte_free(pgtable_t pte) { unsigned long p; pgtable_page_dtor(pte); p = (unsigned long)page_address(pte); /* Cached address (for test) */ if (p == 0) BUG(); p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ p = (unsigned long) __nocache_va(p); /* Nocached virtual */ srmmu_free_nocache(p, PTE_SIZE); } /* */ static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) { struct ctx_list *ctxp; ctxp = ctx_free.next; if(ctxp != &ctx_free) { remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); mm->context = ctxp->ctx_number; ctxp->ctx_mm = mm; return; } ctxp = ctx_used.next; if(ctxp->ctx_mm == old_mm) ctxp = ctxp->next; if(ctxp == &ctx_used) panic("out of mmu contexts"); flush_cache_mm(ctxp->ctx_mm); flush_tlb_mm(ctxp->ctx_mm); remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); ctxp->ctx_mm->context = NO_CONTEXT; ctxp->ctx_mm = mm; mm->context = ctxp->ctx_number; } static inline void free_context(int context) { struct ctx_list *ctx_old; ctx_old = ctx_list_pool + context; remove_from_ctx_list(ctx_old); add_to_free_ctxlist(ctx_old); } static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu) { if(mm->context == NO_CONTEXT) { spin_lock(&srmmu_context_spinlock); alloc_context(old_mm, mm); spin_unlock(&srmmu_context_spinlock); srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); } if (sparc_cpu_model == sparc_leon) leon_switch_mm(); if (is_hypersparc) hyper_flush_whole_icache(); srmmu_set_context(mm->context); } /* Low level IO area allocation on the SRMMU. */ static inline void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; unsigned long tmp; physaddr &= PAGE_MASK; pgdp = pgd_offset_k(virt_addr); pmdp = srmmu_pmd_offset(pgdp, virt_addr); ptep = srmmu_pte_offset(pmdp, virt_addr); tmp = (physaddr >> 4) | SRMMU_ET_PTE; /* * I need to test whether this is consistent over all * sun4m's. The bus_type represents the upper 4 bits of * 36-bit physical address on the I/O space lines... */ tmp |= (bus_type << 28); tmp |= SRMMU_PRIV; __flush_page_to_ram(virt_addr); srmmu_set_pte(ptep, __pte(tmp)); } static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, unsigned long xva, unsigned int len) { while (len != 0) { len -= PAGE_SIZE; srmmu_mapioaddr(xpa, xva, bus); xva += PAGE_SIZE; xpa += PAGE_SIZE; } flush_tlb_all(); } static inline void srmmu_unmapioaddr(unsigned long virt_addr) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(virt_addr); pmdp = srmmu_pmd_offset(pgdp, virt_addr); ptep = srmmu_pte_offset(pmdp, virt_addr); /* No need to flush uncacheable page. */ srmmu_pte_clear(ptep); } static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) { while (len != 0) { len -= PAGE_SIZE; srmmu_unmapioaddr(virt_addr); virt_addr += PAGE_SIZE; } flush_tlb_all(); } /* * On the SRMMU we do not have the problems with limited tlb entries * for mapping kernel pages, so we just take things from the free page * pool. As a side effect we are putting a little too much pressure * on the gfp() subsystem. This setup also makes the logic of the * iommu mapping code a lot easier as we can transparently handle * mappings on the kernel stack without any special code as we did * need on the sun4c. */ static struct thread_info *srmmu_alloc_thread_info_node(int node) { struct thread_info *ret; ret = (struct thread_info *)__get_free_pages(GFP_KERNEL, THREAD_INFO_ORDER); #ifdef CONFIG_DEBUG_STACK_USAGE if (ret) memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER); #endif /* DEBUG_STACK_USAGE */ return ret; } static void srmmu_free_thread_info(struct thread_info *ti) { free_pages((unsigned long)ti, THREAD_INFO_ORDER); } /* tsunami.S */ extern void tsunami_flush_cache_all(void); extern void tsunami_flush_cache_mm(struct mm_struct *mm); extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void tsunami_flush_page_to_ram(unsigned long page); extern void tsunami_flush_page_for_dma(unsigned long page); extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void tsunami_flush_tlb_all(void); extern void tsunami_flush_tlb_mm(struct mm_struct *mm); extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void tsunami_setup_blockops(void); /* * Workaround, until we find what's going on with Swift. When low on memory, * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find * out it is already in page tables/ fault again on the same instruction. * I really don't understand it, have checked it and contexts * are right, flush_tlb_all is done as well, and it faults again... * Strange. -jj * * The following code is a deadwood that may be necessary when * we start to make precise page flushes again. --zaitcev */ static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep) { #if 0 static unsigned long last; unsigned int val; /* unsigned int n; */ if (address == last) { val = srmmu_hwprobe(address); if (val != 0 && pte_val(*ptep) != val) { printk("swift_update_mmu_cache: " "addr %lx put %08x probed %08x from %p\n", address, pte_val(*ptep), val, __builtin_return_address(0)); srmmu_flush_whole_tlb(); } } last = address; #endif } /* swift.S */ extern void swift_flush_cache_all(void); extern void swift_flush_cache_mm(struct mm_struct *mm); extern void swift_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void swift_flush_page_to_ram(unsigned long page); extern void swift_flush_page_for_dma(unsigned long page); extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void swift_flush_tlb_all(void); extern void swift_flush_tlb_mm(struct mm_struct *mm); extern void swift_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); #if 0 /* P3: deadwood to debug precise flushes on Swift. */ void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cctx, ctx1; page &= PAGE_MASK; if ((ctx1 = vma->vm_mm->context) != -1) { cctx = srmmu_get_context(); /* Is context # ever different from current context? P3 */ if (cctx != ctx1) { printk("flush ctx %02x curr %02x\n", ctx1, cctx); srmmu_set_context(ctx1); swift_flush_page(page); __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (page), "i" (ASI_M_FLUSH_PROBE)); srmmu_set_context(cctx); } else { /* Rm. prot. bits from virt. c. */ /* swift_flush_cache_all(); */ /* swift_flush_cache_page(vma, page); */ swift_flush_page(page); __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (page), "i" (ASI_M_FLUSH_PROBE)); /* same as above: srmmu_flush_tlb_page() */ } } } #endif /* * The following are all MBUS based SRMMU modules, and therefore could * be found in a multiprocessor configuration. On the whole, these * chips seems to be much more touchy about DVMA and page tables * with respect to cache coherency. */ /* Cypress flushes. */ static void cypress_flush_cache_all(void) { volatile unsigned long cypress_sucks; unsigned long faddr, tagval; flush_user_windows(); for(faddr = 0; faddr < 0x10000; faddr += 0x20) { __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : "=r" (tagval) : "r" (faddr), "r" (0x40000), "i" (ASI_M_DATAC_TAG)); /* If modified and valid, kick it. */ if((tagval & 0x60) == 0x60) cypress_sucks = *(unsigned long *)(0xf0020000 + faddr); } } static void cypress_flush_cache_mm(struct mm_struct *mm) { register unsigned long a, b, c, d, e, f, g; unsigned long flags, faddr; int octx; FLUSH_BEGIN(mm) flush_user_windows(); local_irq_save(flags); octx = srmmu_get_context(); srmmu_set_context(mm->context); a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; faddr = (0x10000 - 0x100); goto inside; do { faddr -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (faddr), "i" (ASI_M_FLUSH_CTX), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while(faddr); srmmu_set_context(octx); local_irq_restore(flags); FLUSH_END } static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; register unsigned long a, b, c, d, e, f, g; unsigned long flags, faddr; int octx; FLUSH_BEGIN(mm) flush_user_windows(); local_irq_save(flags); octx = srmmu_get_context(); srmmu_set_context(mm->context); a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; start &= SRMMU_REAL_PMD_MASK; while(start < end) { faddr = (start + (0x10000 - 0x100)); goto inside; do { faddr -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (faddr), "i" (ASI_M_FLUSH_SEG), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while (faddr != start); start += SRMMU_REAL_PMD_SIZE; } srmmu_set_context(octx); local_irq_restore(flags); FLUSH_END } static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page) { register unsigned long a, b, c, d, e, f, g; struct mm_struct *mm = vma->vm_mm; unsigned long flags, line; int octx; FLUSH_BEGIN(mm) flush_user_windows(); local_irq_save(flags); octx = srmmu_get_context(); srmmu_set_context(mm->context); a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; page &= PAGE_MASK; line = (page + PAGE_SIZE) - 0x100; goto inside; do { line -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (line), "i" (ASI_M_FLUSH_PAGE), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while(line != page); srmmu_set_context(octx); local_irq_restore(flags); FLUSH_END } /* Cypress is copy-back, at least that is how we configure it. */ static void cypress_flush_page_to_ram(unsigned long page) { register unsigned long a, b, c, d, e, f, g; unsigned long line; a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; page &= PAGE_MASK; line = (page + PAGE_SIZE) - 0x100; goto inside; do { line -= 0x100; inside: __asm__ __volatile__("sta %%g0, [%0] %1\n\t" "sta %%g0, [%0 + %2] %1\n\t" "sta %%g0, [%0 + %3] %1\n\t" "sta %%g0, [%0 + %4] %1\n\t" "sta %%g0, [%0 + %5] %1\n\t" "sta %%g0, [%0 + %6] %1\n\t" "sta %%g0, [%0 + %7] %1\n\t" "sta %%g0, [%0 + %8] %1\n\t" : : "r" (line), "i" (ASI_M_FLUSH_PAGE), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while(line != page); } /* Cypress is also IO cache coherent. */ static void cypress_flush_page_for_dma(unsigned long page) { } /* Cypress has unified L2 VIPT, from which both instructions and data * are stored. It does not have an onboard icache of any sort, therefore * no flush is necessary. */ static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { } static void cypress_flush_tlb_all(void) { srmmu_flush_whole_tlb(); } static void cypress_flush_tlb_mm(struct mm_struct *mm) { FLUSH_BEGIN(mm) __asm__ __volatile__( "lda [%0] %3, %%g5\n\t" "sta %2, [%0] %3\n\t" "sta %%g0, [%1] %4\n\t" "sta %%g5, [%0] %3\n" : /* no outputs */ : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context), "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) : "g5"); FLUSH_END } static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long size; FLUSH_BEGIN(mm) start &= SRMMU_PGDIR_MASK; size = SRMMU_PGDIR_ALIGN(end) - start; __asm__ __volatile__( "lda [%0] %5, %%g5\n\t" "sta %1, [%0] %5\n" "1:\n\t" "subcc %3, %4, %3\n\t" "bne 1b\n\t" " sta %%g0, [%2 + %3] %6\n\t" "sta %%g5, [%0] %5\n" : /* no outputs */ : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200), "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) : "g5", "cc"); FLUSH_END } static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { struct mm_struct *mm = vma->vm_mm; FLUSH_BEGIN(mm) __asm__ __volatile__( "lda [%0] %3, %%g5\n\t" "sta %1, [%0] %3\n\t" "sta %%g0, [%2] %4\n\t" "sta %%g5, [%0] %3\n" : /* no outputs */ : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK), "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) : "g5"); FLUSH_END } /* viking.S */ extern void viking_flush_cache_all(void); extern void viking_flush_cache_mm(struct mm_struct *mm); extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void viking_flush_page_to_ram(unsigned long page); extern void viking_flush_page_for_dma(unsigned long page); extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); extern void viking_flush_page(unsigned long page); extern void viking_mxcc_flush_page(unsigned long page); extern void viking_flush_tlb_all(void); extern void viking_flush_tlb_mm(struct mm_struct *mm); extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void sun4dsmp_flush_tlb_all(void); extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm); extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); /* hypersparc.S */ extern void hypersparc_flush_cache_all(void); extern void hypersparc_flush_cache_mm(struct mm_struct *mm); extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void hypersparc_flush_page_to_ram(unsigned long page); extern void hypersparc_flush_page_for_dma(unsigned long page); extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void hypersparc_flush_tlb_all(void); extern void hypersparc_flush_tlb_mm(struct mm_struct *mm); extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void hypersparc_setup_blockops(void); /* * NOTE: All of this startup code assumes the low 16mb (approx.) of * kernel mappings are done with one single contiguous chunk of * ram. On small ram machines (classics mainly) we only get * around 8mb mapped for us. */ static void __init early_pgtable_allocfail(char *type) { prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); prom_halt(); } static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; while(start < end) { pgdp = pgd_offset_k(start); if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { pmdp = (pmd_t *) __srmmu_get_nocache( SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); srmmu_pgd_set(__nocache_fix(pgdp), pmdp); } pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(__nocache_fix(ptep), 0, PTE_SIZE); srmmu_pmd_set(__nocache_fix(pmdp), ptep); } if (start > (0xffffffffUL - PMD_SIZE)) break; start = (start + PMD_SIZE) & PMD_MASK; } } static void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; while(start < end) { pgdp = pgd_offset_k(start); if(srmmu_pgd_none(*pgdp)) { pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); srmmu_pgd_set(pgdp, pmdp); } pmdp = srmmu_pmd_offset(pgdp, start); if(srmmu_pmd_none(*pmdp)) { ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(ptep, 0, PTE_SIZE); srmmu_pmd_set(pmdp, ptep); } if (start > (0xffffffffUL - PMD_SIZE)) break; start = (start + PMD_SIZE) & PMD_MASK; } } /* * This is much cleaner than poking around physical address space * looking at the prom's page table directly which is what most * other OS's do. Yuck... this is much better. */ static void __init srmmu_inherit_prom_mappings(unsigned long start, unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ unsigned long prompte; while(start <= end) { if (start == 0) break; /* probably wrap around */ if(start == 0xfef00000) start = KADB_DEBUGGER_BEGVM; if(!(prompte = srmmu_hwprobe(start))) { start += PAGE_SIZE; continue; } /* A red snapper, see what it really is. */ what = 0; if(!(start & ~(SRMMU_REAL_PMD_MASK))) { if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) what = 1; } if(!(start & ~(SRMMU_PGDIR_MASK))) { if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == prompte) what = 2; } pgdp = pgd_offset_k(start); if(what == 2) { *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); start += SRMMU_PGDIR_SIZE; continue; } if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); srmmu_pgd_set(__nocache_fix(pgdp), pmdp); } pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(__nocache_fix(ptep), 0, PTE_SIZE); srmmu_pmd_set(__nocache_fix(pmdp), ptep); } if(what == 1) { /* * We bend the rule where all 16 PTPs in a pmd_t point * inside the same PTE page, and we leak a perfectly * good hardware PTE piece. Alternatives seem worse. */ unsigned int x; /* Index of HW PMD in soft cluster */ x = (start >> PMD_SHIFT) & 15; *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte; start += SRMMU_REAL_PMD_SIZE; continue; } ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); *(pte_t *)__nocache_fix(ptep) = __pte(prompte); start += PAGE_SIZE; } } #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID) /* Create a third-level SRMMU 16MB page mapping. */ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base) { pgd_t *pgdp = pgd_offset_k(vaddr); unsigned long big_pte; big_pte = KERNEL_PTE(phys_base >> 4); *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte); } /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) { unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK); unsigned long vstart = (vbase & SRMMU_PGDIR_MASK); unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); /* Map "low" memory only */ const unsigned long min_vaddr = PAGE_OFFSET; const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; if (vstart < min_vaddr || vstart >= max_vaddr) return vstart; if (vend > max_vaddr || vend < min_vaddr) vend = max_vaddr; while(vstart < vend) { do_large_mapping(vstart, pstart); vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; } return vstart; } static inline void memprobe_error(char *msg) { prom_printf(msg); prom_printf("Halting now...\n"); prom_halt(); } static inline void map_kernel(void) { int i; if (phys_base > 0) { do_large_mapping(PAGE_OFFSET, phys_base); } for (i = 0; sp_banks[i].num_bytes != 0; i++) { map_spbank((unsigned long)__va(sp_banks[i].base_addr), i); } BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE); } /* Paging initialization on the Sparc Reference MMU. */ extern void sparc_context_init(int); void (*poke_srmmu)(void) __cpuinitdata = NULL; extern unsigned long bootmem_init(unsigned long *pages_avail); void __init srmmu_paging_init(void) { int i; phandle cpunode; char node_str[128]; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned long pages_avail; sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ if (sparc_cpu_model == sun4d) num_contexts = 65536; /* We know it is Viking */ else { /* Find the number of contexts on the srmmu. */ cpunode = prom_getchild(prom_root_node); num_contexts = 0; while(cpunode != 0) { prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); break; } cpunode = prom_getsibling(cpunode); } } if(!num_contexts) { prom_printf("Something wrong, can't find cpu node in paging_init.\n"); prom_halt(); } pages_avail = 0; last_valid_pfn = bootmem_init(&pages_avail); srmmu_nocache_calcsize(); srmmu_nocache_init(); srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); map_kernel(); /* ctx table has to be physically aligned to its size */ srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); for(i = 0; i < num_contexts; i++) srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); flush_cache_all(); srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); #ifdef CONFIG_SMP /* Stop from hanging here... */ local_flush_tlb_all(); #else flush_tlb_all(); #endif poke_srmmu(); srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); srmmu_allocate_ptable_skeleton( __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP); srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); pgd = pgd_offset_k(PKMAP_BASE); pmd = srmmu_pmd_offset(pgd, PKMAP_BASE); pte = srmmu_pte_offset(pmd, PKMAP_BASE); pkmap_page_table = pte; flush_cache_all(); flush_tlb_all(); sparc_context_init(num_contexts); kmap_init(); { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES]; unsigned long npages; int znum; for (znum = 0; znum < MAX_NR_ZONES; znum++) zones_size[znum] = zholes_size[znum] = 0; npages = max_low_pfn - pfn_base; zones_size[ZONE_DMA] = npages; zholes_size[ZONE_DMA] = npages - pages_avail; npages = highend_pfn - max_low_pfn; zones_size[ZONE_HIGHMEM] = npages; zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); free_area_init_node(0, zones_size, pfn_base, zholes_size); } } static void srmmu_mmu_info(struct seq_file *m) { seq_printf(m, "MMU type\t: %s\n" "contexts\t: %d\n" "nocache total\t: %ld\n" "nocache used\t: %d\n", srmmu_name, num_contexts, srmmu_nocache_size, srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); } static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) { } static void srmmu_destroy_context(struct mm_struct *mm) { if(mm->context != NO_CONTEXT) { flush_cache_mm(mm); srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); flush_tlb_mm(mm); spin_lock(&srmmu_context_spinlock); free_context(mm->context); spin_unlock(&srmmu_context_spinlock); mm->context = NO_CONTEXT; } } /* Init various srmmu chip types. */ static void __init srmmu_is_bad(void) { prom_printf("Could not determine SRMMU chip type.\n"); prom_halt(); } static void __init init_vac_layout(void) { phandle nd; int cache_lines; char node_str[128]; #ifdef CONFIG_SMP int cpu = 0; unsigned long max_size = 0; unsigned long min_line_size = 0x10000000; #endif nd = prom_getchild(prom_root_node); while((nd = prom_getsibling(nd)) != 0) { prom_getstring(nd, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { vac_line_size = prom_getint(nd, "cache-line-size"); if (vac_line_size == -1) { prom_printf("can't determine cache-line-size, " "halting.\n"); prom_halt(); } cache_lines = prom_getint(nd, "cache-nlines"); if (cache_lines == -1) { prom_printf("can't determine cache-nlines, halting.\n"); prom_halt(); } vac_cache_size = cache_lines * vac_line_size; #ifdef CONFIG_SMP if(vac_cache_size > max_size) max_size = vac_cache_size; if(vac_line_size < min_line_size) min_line_size = vac_line_size; //FIXME: cpus not contiguous!! cpu++; if (cpu >= nr_cpu_ids || !cpu_online(cpu)) break; #else break; #endif } } if(nd == 0) { prom_printf("No CPU nodes found, halting.\n"); prom_halt(); } #ifdef CONFIG_SMP vac_cache_size = max_size; vac_line_size = min_line_size; #endif printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n", (int)vac_cache_size, (int)vac_line_size); } static void __cpuinit poke_hypersparc(void) { volatile unsigned long clear; unsigned long mreg = srmmu_get_mmureg(); hyper_flush_unconditional_combined(); mreg &= ~(HYPERSPARC_CWENABLE); mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE); mreg |= (HYPERSPARC_CMODE); srmmu_set_mmureg(mreg); #if 0 /* XXX I think this is bad news... -DaveM */ hyper_clear_all_tags(); #endif put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE); hyper_flush_whole_icache(); clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); } static void __init init_hypersparc(void) { srmmu_name = "ROSS HyperSparc"; srmmu_modtype = HyperSparc; init_vac_layout(); is_hypersparc = 1; BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP); poke_srmmu = poke_hypersparc; hypersparc_setup_blockops(); } static void __cpuinit poke_cypress(void) { unsigned long mreg = srmmu_get_mmureg(); unsigned long faddr, tagval; volatile unsigned long cypress_sucks; volatile unsigned long clear; clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); if (!(mreg & CYPRESS_CENABLE)) { for(faddr = 0x0; faddr < 0x10000; faddr += 20) { __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t" "sta %%g0, [%0] %2\n\t" : : "r" (faddr), "r" (0x40000), "i" (ASI_M_DATAC_TAG)); } } else { for(faddr = 0; faddr < 0x10000; faddr += 0x20) { __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : "=r" (tagval) : "r" (faddr), "r" (0x40000), "i" (ASI_M_DATAC_TAG)); /* If modified and valid, kick it. */ if((tagval & 0x60) == 0x60) cypress_sucks = *(unsigned long *) (0xf0020000 + faddr); } } /* And one more, for our good neighbor, Mr. Broken Cypress. */ clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE); srmmu_set_mmureg(mreg); } static void __init init_cypress_common(void) { init_vac_layout(); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); poke_srmmu = poke_cypress; } static void __init init_cypress_604(void) { srmmu_name = "ROSS Cypress-604(UP)"; srmmu_modtype = Cypress; init_cypress_common(); } static void __init init_cypress_605(unsigned long mrev) { srmmu_name = "ROSS Cypress-605(MP)"; if(mrev == 0xe) { srmmu_modtype = Cypress_vE; hwbug_bitmask |= HWBUG_COPYBACK_BROKEN; } else { if(mrev == 0xd) { srmmu_modtype = Cypress_vD; hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN; } else { srmmu_modtype = Cypress; } } init_cypress_common(); } static void __cpuinit poke_swift(void) { unsigned long mreg; /* Clear any crap from the cache or else... */ swift_flush_cache_all(); /* Enable I & D caches */ mreg = srmmu_get_mmureg(); mreg |= (SWIFT_IE | SWIFT_DE); /* * The Swift branch folding logic is completely broken. At * trap time, if things are just right, if can mistakenly * think that a trap is coming from kernel mode when in fact * it is coming from user mode (it mis-executes the branch in * the trap code). So you see things like crashme completely * hosing your machine which is completely unacceptable. Turn * this shit off... nice job Fujitsu. */ mreg &= ~(SWIFT_BF); srmmu_set_mmureg(mreg); } #define SWIFT_MASKID_ADDR 0x10003018 static void __init init_swift(void) { unsigned long swift_rev; __asm__ __volatile__("lda [%1] %2, %0\n\t" "srl %0, 0x18, %0\n\t" : "=r" (swift_rev) : "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); srmmu_name = "Fujitsu Swift"; switch(swift_rev) { case 0x11: case 0x20: case 0x23: case 0x30: srmmu_modtype = Swift_lots_o_bugs; hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN); /* * Gee george, I wonder why Sun is so hush hush about * this hardware bug... really braindamage stuff going * on here. However I think we can find a way to avoid * all of the workaround overhead under Linux. Basically, * any page fault can cause kernel pages to become user * accessible (the mmu gets confused and clears some of * the ACC bits in kernel ptes). Aha, sounds pretty * horrible eh? But wait, after extensive testing it appears * that if you use pgd_t level large kernel pte's (like the * 4MB pages on the Pentium) the bug does not get tripped * at all. This avoids almost all of the major overhead. * Welcome to a world where your vendor tells you to, * "apply this kernel patch" instead of "sorry for the * broken hardware, send it back and we'll give you * properly functioning parts" */ break; case 0x25: case 0x31: srmmu_modtype = Swift_bad_c; hwbug_bitmask |= HWBUG_KERN_CBITBROKEN; /* * You see Sun allude to this hardware bug but never * admit things directly, they'll say things like, * "the Swift chip cache problems" or similar. */ break; default: srmmu_modtype = Swift_ok; break; } BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); flush_page_for_dma_global = 0; /* * Are you now convinced that the Swift is one of the * biggest VLSI abortions of all time? Bravo Fujitsu! * Fujitsu, the !#?!%$'d up processor people. I bet if * you examined the microcode of the Swift you'd find * XXX's all over the place. */ poke_srmmu = poke_swift; } static void turbosparc_flush_cache_all(void) { flush_user_windows(); turbosparc_idflash_clear(); } static void turbosparc_flush_cache_mm(struct mm_struct *mm) { FLUSH_BEGIN(mm) flush_user_windows(); turbosparc_idflash_clear(); FLUSH_END } static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { FLUSH_BEGIN(vma->vm_mm) flush_user_windows(); turbosparc_idflash_clear(); FLUSH_END } static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page) { FLUSH_BEGIN(vma->vm_mm) flush_user_windows(); if (vma->vm_flags & VM_EXEC) turbosparc_flush_icache(); turbosparc_flush_dcache(); FLUSH_END } /* TurboSparc is copy-back, if we turn it on, but this does not work. */ static void turbosparc_flush_page_to_ram(unsigned long page) { #ifdef TURBOSPARC_WRITEBACK volatile unsigned long clear; if (srmmu_hwprobe(page)) turbosparc_flush_page_cache(page); clear = srmmu_get_fstatus(); #endif } static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { } static void turbosparc_flush_page_for_dma(unsigned long page) { turbosparc_flush_dcache(); } static void turbosparc_flush_tlb_all(void) { srmmu_flush_whole_tlb(); } static void turbosparc_flush_tlb_mm(struct mm_struct *mm) { FLUSH_BEGIN(mm) srmmu_flush_whole_tlb(); FLUSH_END } static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { FLUSH_BEGIN(vma->vm_mm) srmmu_flush_whole_tlb(); FLUSH_END } static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { FLUSH_BEGIN(vma->vm_mm) srmmu_flush_whole_tlb(); FLUSH_END } static void __cpuinit poke_turbosparc(void) { unsigned long mreg = srmmu_get_mmureg(); unsigned long ccreg; /* Clear any crap from the cache or else... */ turbosparc_flush_cache_all(); mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ srmmu_set_mmureg(mreg); ccreg = turbosparc_get_ccreg(); #ifdef TURBOSPARC_WRITEBACK ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */ ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE); /* Write-back D-cache, emulate VLSI * abortion number three, not number one */ #else /* For now let's play safe, optimize later */ ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE); /* Do DVMA snooping in Dcache, Write-thru D-cache */ ccreg &= ~(TURBOSPARC_uS2); /* Emulate VLSI abortion number three, not number one */ #endif switch (ccreg & 7) { case 0: /* No SE cache */ case 7: /* Test mode */ break; default: ccreg |= (TURBOSPARC_SCENABLE); } turbosparc_set_ccreg (ccreg); mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ srmmu_set_mmureg(mreg); } static void __init init_turbosparc(void) { srmmu_name = "Fujitsu TurboSparc"; srmmu_modtype = TurboSparc; BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM); poke_srmmu = poke_turbosparc; } static void __cpuinit poke_tsunami(void) { unsigned long mreg = srmmu_get_mmureg(); tsunami_flush_icache(); tsunami_flush_dcache(); mreg &= ~TSUNAMI_ITD; mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB); srmmu_set_mmureg(mreg); } static void __init init_tsunami(void) { /* * Tsunami's pretty sane, Sun and TI actually got it * somewhat right this time. Fujitsu should have * taken some lessons from them. */ srmmu_name = "TI Tsunami"; srmmu_modtype = Tsunami; BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); poke_srmmu = poke_tsunami; tsunami_setup_blockops(); } static void __cpuinit poke_viking(void) { unsigned long mreg = srmmu_get_mmureg(); static int smp_catch; if(viking_mxcc_present) { unsigned long mxcc_control = mxcc_get_creg(); mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); mxcc_control &= ~(MXCC_CTL_RRC); mxcc_set_creg(mxcc_control); /* * We don't need memory parity checks. * XXX This is a mess, have to dig out later. ecd. viking_mxcc_turn_off_parity(&mreg, &mxcc_control); */ /* We do cache ptables on MXCC. */ mreg |= VIKING_TCENABLE; } else { unsigned long bpreg; mreg &= ~(VIKING_TCENABLE); if(smp_catch++) { /* Must disable mixed-cmd mode here for other cpu's. */ bpreg = viking_get_bpreg(); bpreg &= ~(VIKING_ACTION_MIX); viking_set_bpreg(bpreg); /* Just in case PROM does something funny. */ msi_set_sync(); } } mreg |= VIKING_SPENABLE; mreg |= (VIKING_ICENABLE | VIKING_DCENABLE); mreg |= VIKING_SBENABLE; mreg &= ~(VIKING_ACENABLE); srmmu_set_mmureg(mreg); } static void __init init_viking(void) { unsigned long mreg = srmmu_get_mmureg(); /* Ahhh, the viking. SRMMU VLSI abortion number two... */ if(mreg & VIKING_MMODE) { srmmu_name = "TI Viking"; viking_mxcc_present = 0; msi_set_sync(); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); /* * We need this to make sure old viking takes no hits * on it's cache for dma snoops to workaround the * "load from non-cacheable memory" interrupt bug. * This is only necessary because of the new way in * which we use the IOMMU. */ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); flush_page_for_dma_global = 0; } else { srmmu_name = "TI Viking/MXCC"; viking_mxcc_present = 1; srmmu_cache_pagetables = 1; /* MXCC vikings lack the DMA snooping bug. */ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); } BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM); #ifdef CONFIG_SMP if (sparc_cpu_model == sun4d) { BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM); } else #endif { BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); } BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); poke_srmmu = poke_viking; } #ifdef CONFIG_SPARC_LEON void __init poke_leonsparc(void) { } void __init init_leon(void) { srmmu_name = "LEON"; BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP); poke_srmmu = poke_leonsparc; srmmu_cache_pagetables = 0; leon_flush_during_switch = leon_flush_needed(); } #endif /* Probe for the srmmu chip version. */ static void __init get_srmmu_type(void) { unsigned long mreg, psr; unsigned long mod_typ, mod_rev, psr_typ, psr_vers; srmmu_modtype = SRMMU_INVAL_MOD; hwbug_bitmask = 0; mreg = srmmu_get_mmureg(); psr = get_psr(); mod_typ = (mreg & 0xf0000000) >> 28; mod_rev = (mreg & 0x0f000000) >> 24; psr_typ = (psr >> 28) & 0xf; psr_vers = (psr >> 24) & 0xf; /* First, check for sparc-leon. */ if (sparc_cpu_model == sparc_leon) { init_leon(); return; } /* Second, check for HyperSparc or Cypress. */ if(mod_typ == 1) { switch(mod_rev) { case 7: /* UP or MP Hypersparc */ init_hypersparc(); break; case 0: case 2: /* Uniprocessor Cypress */ init_cypress_604(); break; case 10: case 11: case 12: /* _REALLY OLD_ Cypress MP chips... */ case 13: case 14: case 15: /* MP Cypress mmu/cache-controller */ init_cypress_605(mod_rev); break; default: /* Some other Cypress revision, assume a 605. */ init_cypress_605(mod_rev); break; } return; } /* * Now Fujitsu TurboSparc. It might happen that it is * in Swift emulation mode, so we will check later... */ if (psr_typ == 0 && psr_vers == 5) { init_turbosparc(); return; } /* Next check for Fujitsu Swift. */ if(psr_typ == 0 && psr_vers == 4) { phandle cpunode; char node_str[128]; /* Look if it is not a TurboSparc emulating Swift... */ cpunode = prom_getchild(prom_root_node); while((cpunode = prom_getsibling(cpunode)) != 0) { prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { if (!prom_getintdefault(cpunode, "psr-implementation", 1) && prom_getintdefault(cpunode, "psr-version", 1) == 5) { init_turbosparc(); return; } break; } } init_swift(); return; } /* Now the Viking family of srmmu. */ if(psr_typ == 4 && ((psr_vers == 0) || ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { init_viking(); return; } /* Finally the Tsunami. */ if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { init_tsunami(); return; } /* Oh well */ srmmu_is_bad(); } /* don't laugh, static pagetables */ static void srmmu_check_pgt_cache(int low, int high) { } extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, tsetup_mmu_patchme, rtrap_mmu_patchme; extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk, tsetup_srmmu_stackchk, srmmu_rett_stackchk; extern unsigned long srmmu_fault; #define PATCH_BRANCH(insn, dest) do { \ iaddr = &(insn); \ daddr = &(dest); \ *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \ } while(0) static void __init patch_window_trap_handlers(void) { unsigned long *iaddr, *daddr; PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk); PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk); PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk); PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk); PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault); PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault); PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault); } #ifdef CONFIG_SMP /* Local cross-calls. */ static void smp_flush_page_for_dma(unsigned long page) { xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); local_flush_page_for_dma(page); } #endif static pte_t srmmu_pgoff_to_pte(unsigned long pgoff) { return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE); } static unsigned long srmmu_pte_to_pgoff(pte_t pte) { return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT; } static pgprot_t srmmu_pgprot_noncached(pgprot_t prot) { prot &= ~__pgprot(SRMMU_CACHE); return prot; } /* Load up routines and constants for sun4m and sun4d mmu */ void __init ld_mmu_srmmu(void) { extern void ld_mmu_iommu(void); extern void ld_mmu_iounit(void); extern void ___xchg32_sun4md(void); BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD); BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); /* Functions */ BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM); #ifndef CONFIG_SMP BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); #endif BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM); BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE); BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY); BTFIXUPSET_HALF(pte_youngi, SRMMU_REF); BTFIXUPSET_HALF(pte_filei, SRMMU_FILE); BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE); BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY); BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF); BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE)); BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY)); BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF)); BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(alloc_thread_info_node, srmmu_alloc_thread_info_node, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM); get_srmmu_type(); patch_window_trap_handlers(); #ifdef CONFIG_SMP /* El switcheroo... */ BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm); BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range); BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page); BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram); BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns); BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma); BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); if (sparc_cpu_model != sun4d && sparc_cpu_model != sparc_leon) { BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); } BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); if (poke_srmmu == poke_viking) { /* Avoid unnecessary cross calls. */ BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); } #endif if (sparc_cpu_model == sun4d) ld_mmu_iounit(); else ld_mmu_iommu(); #ifdef CONFIG_SMP if (sparc_cpu_model == sun4d) sun4d_init_smp(); else if (sparc_cpu_model == sparc_leon) leon_init_smp(); else sun4m_init_smp(); #endif }
gpl-2.0
loongson-community/linux_loongson
sound/drivers/portman2x4.c
195
25841
/* * Driver for Midiman Portman2x4 parallel port midi interface * * Copyright (c) by Levent Guendogdu <levon@feature-it.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ChangeLog * Jan 24 2007 Matthias Koenig <mkoenig@suse.de> * - cleanup and rewrite * Sep 30 2004 Tobias Gehrig <tobias@gehrig.tk> * - source code cleanup * Sep 03 2004 Tobias Gehrig <tobias@gehrig.tk> * - fixed compilation problem with alsa 1.0.6a (removed MODULE_CLASSES, * MODULE_PARM_SYNTAX and changed MODULE_DEVICES to * MODULE_SUPPORTED_DEVICE) * Mar 24 2004 Tobias Gehrig <tobias@gehrig.tk> * - added 2.6 kernel support * Mar 18 2004 Tobias Gehrig <tobias@gehrig.tk> * - added parport_unregister_driver to the startup routine if the driver fails to detect a portman * - added support for all 4 output ports in portman_putmidi * Mar 17 2004 Tobias Gehrig <tobias@gehrig.tk> * - added checks for opened input device in interrupt handler * Feb 20 2004 Tobias Gehrig <tobias@gehrig.tk> * - ported from alsa 0.5 to 1.0 */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/parport.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include <sound/control.h> #define CARD_NAME "Portman 2x4" #define DRIVER_NAME "portman" #define PLATFORM_DRIVER "snd_portman2x4" static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; static struct platform_device *platform_devices[SNDRV_CARDS]; static int device_count; module_param_array(index, int, NULL, S_IRUGO); MODULE_PARM_DESC(index, "Index value for " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, S_IRUGO); MODULE_PARM_DESC(id, "ID string for " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, S_IRUGO); MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard."); MODULE_AUTHOR("Levent Guendogdu, Tobias Gehrig, Matthias Koenig"); MODULE_DESCRIPTION("Midiman Portman2x4"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Midiman,Portman2x4}}"); /********************************************************************* * Chip specific *********************************************************************/ #define PORTMAN_NUM_INPUT_PORTS 2 #define PORTMAN_NUM_OUTPUT_PORTS 4 struct portman { spinlock_t reg_lock; struct snd_card *card; struct snd_rawmidi *rmidi; struct pardevice *pardev; int pardev_claimed; int open_count; int mode[PORTMAN_NUM_INPUT_PORTS]; struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS]; }; static int portman_free(struct portman *pm) { kfree(pm); return 0; } static int __devinit portman_create(struct snd_card *card, struct pardevice *pardev, struct portman **rchip) { struct portman *pm; *rchip = NULL; pm = kzalloc(sizeof(struct portman), GFP_KERNEL); if (pm == NULL) return -ENOMEM; /* Init chip specific data */ spin_lock_init(&pm->reg_lock); pm->card = card; pm->pardev = pardev; *rchip = pm; return 0; } /********************************************************************* * HW related constants *********************************************************************/ /* Standard PC parallel port status register equates. */ #define PP_STAT_BSY 0x80 /* Busy status. Inverted. */ #define PP_STAT_ACK 0x40 /* Acknowledge. Non-Inverted. */ #define PP_STAT_POUT 0x20 /* Paper Out. Non-Inverted. */ #define PP_STAT_SEL 0x10 /* Select. Non-Inverted. */ #define PP_STAT_ERR 0x08 /* Error. Non-Inverted. */ /* Standard PC parallel port command register equates. */ #define PP_CMD_IEN 0x10 /* IRQ Enable. Non-Inverted. */ #define PP_CMD_SELI 0x08 /* Select Input. Inverted. */ #define PP_CMD_INIT 0x04 /* Init Printer. Non-Inverted. */ #define PP_CMD_FEED 0x02 /* Auto Feed. Inverted. */ #define PP_CMD_STB 0x01 /* Strobe. Inverted. */ /* Parallel Port Command Register as implemented by PCP2x4. */ #define INT_EN PP_CMD_IEN /* Interrupt enable. */ #define STROBE PP_CMD_STB /* Command strobe. */ /* The parallel port command register field (b1..b3) selects the * various "registers" within the PC/P 2x4. These are the internal * address of these "registers" that must be written to the parallel * port command register. */ #define RXDATA0 (0 << 1) /* PCP RxData channel 0. */ #define RXDATA1 (1 << 1) /* PCP RxData channel 1. */ #define GEN_CTL (2 << 1) /* PCP General Control Register. */ #define SYNC_CTL (3 << 1) /* PCP Sync Control Register. */ #define TXDATA0 (4 << 1) /* PCP TxData channel 0. */ #define TXDATA1 (5 << 1) /* PCP TxData channel 1. */ #define TXDATA2 (6 << 1) /* PCP TxData channel 2. */ #define TXDATA3 (7 << 1) /* PCP TxData channel 3. */ /* Parallel Port Status Register as implemented by PCP2x4. */ #define ESTB PP_STAT_POUT /* Echoed strobe. */ #define INT_REQ PP_STAT_ACK /* Input data int request. */ #define BUSY PP_STAT_ERR /* Interface Busy. */ /* Parallel Port Status Register BUSY and SELECT lines are multiplexed * between several functions. Depending on which 2x4 "register" is * currently selected (b1..b3), the BUSY and SELECT lines are * assigned as follows: * * SELECT LINE: A3 A2 A1 * -------- */ #define RXAVAIL PP_STAT_SEL /* Rx Available, channel 0. 0 0 0 */ // RXAVAIL1 PP_STAT_SEL /* Rx Available, channel 1. 0 0 1 */ #define SYNC_STAT PP_STAT_SEL /* Reserved - Sync Status. 0 1 0 */ // /* Reserved. 0 1 1 */ #define TXEMPTY PP_STAT_SEL /* Tx Empty, channel 0. 1 0 0 */ // TXEMPTY1 PP_STAT_SEL /* Tx Empty, channel 1. 1 0 1 */ // TXEMPTY2 PP_STAT_SEL /* Tx Empty, channel 2. 1 1 0 */ // TXEMPTY3 PP_STAT_SEL /* Tx Empty, channel 3. 1 1 1 */ /* BUSY LINE: A3 A2 A1 * -------- */ #define RXDATA PP_STAT_BSY /* Rx Input Data, channel 0. 0 0 0 */ // RXDATA1 PP_STAT_BSY /* Rx Input Data, channel 1. 0 0 1 */ #define SYNC_DATA PP_STAT_BSY /* Reserved - Sync Data. 0 1 0 */ /* Reserved. 0 1 1 */ #define DATA_ECHO PP_STAT_BSY /* Parallel Port Data Echo. 1 0 0 */ #define A0_ECHO PP_STAT_BSY /* Address 0 Echo. 1 0 1 */ #define A1_ECHO PP_STAT_BSY /* Address 1 Echo. 1 1 0 */ #define A2_ECHO PP_STAT_BSY /* Address 2 Echo. 1 1 1 */ #define PORTMAN2X4_MODE_INPUT_TRIGGERED 0x01 /********************************************************************* * Hardware specific functions *********************************************************************/ static inline void portman_write_command(struct portman *pm, u8 value) { parport_write_control(pm->pardev->port, value); } static inline u8 portman_read_command(struct portman *pm) { return parport_read_control(pm->pardev->port); } static inline u8 portman_read_status(struct portman *pm) { return parport_read_status(pm->pardev->port); } static inline u8 portman_read_data(struct portman *pm) { return parport_read_data(pm->pardev->port); } static inline void portman_write_data(struct portman *pm, u8 value) { parport_write_data(pm->pardev->port, value); } static void portman_write_midi(struct portman *pm, int port, u8 mididata) { int command = ((port + 4) << 1); /* Get entering data byte and port number in BL and BH respectively. * Set up Tx Channel address field for use with PP Cmd Register. * Store address field in BH register. * Inputs: AH = Output port number (0..3). * AL = Data byte. * command = TXDATA0 | INT_EN; * Align port num with address field (b1...b3), * set address for TXDatax, Strobe=0 */ command |= INT_EN; /* Disable interrupts so that the process is not interrupted, then * write the address associated with the current Tx channel to the * PP Command Reg. Do not set the Strobe signal yet. */ do { portman_write_command(pm, command); /* While the address lines settle, write parallel output data to * PP Data Reg. This has no effect until Strobe signal is asserted. */ portman_write_data(pm, mididata); /* If PCP channel's TxEmpty is set (TxEmpty is read through the PP * Status Register), then go write data. Else go back and wait. */ } while ((portman_read_status(pm) & TXEMPTY) != TXEMPTY); /* TxEmpty is set. Maintain PC/P destination address and assert * Strobe through the PP Command Reg. This will Strobe data into * the PC/P transmitter and set the PC/P BUSY signal. */ portman_write_command(pm, command | STROBE); /* Wait for strobe line to settle and echo back through hardware. * Once it has echoed back, assume that the address and data lines * have settled! */ while ((portman_read_status(pm) & ESTB) == 0) cpu_relax(); /* Release strobe and immediately re-allow interrupts. */ portman_write_command(pm, command); while ((portman_read_status(pm) & ESTB) == ESTB) cpu_relax(); /* PC/P BUSY is now set. We must wait until BUSY resets itself. * We'll reenable ints while we're waiting. */ while ((portman_read_status(pm) & BUSY) == BUSY) cpu_relax(); /* Data sent. */ } /* * Read MIDI byte from port * Attempt to read input byte from specified hardware input port (0..). * Return -1 if no data */ static int portman_read_midi(struct portman *pm, int port) { unsigned char midi_data = 0; unsigned char cmdout; /* Saved address+IE bit. */ /* Make sure clocking edge is down before starting... */ portman_write_data(pm, 0); /* Make sure edge is down. */ /* Set destination address to PCP. */ cmdout = (port << 1) | INT_EN; /* Address + IE + No Strobe. */ portman_write_command(pm, cmdout); while ((portman_read_status(pm) & ESTB) == ESTB) cpu_relax(); /* Wait for strobe echo. */ /* After the address lines settle, check multiplexed RxAvail signal. * If data is available, read it. */ if ((portman_read_status(pm) & RXAVAIL) == 0) return -1; /* No data. */ /* Set the Strobe signal to enable the Rx clocking circuitry. */ portman_write_command(pm, cmdout | STROBE); /* Write address+IE+Strobe. */ while ((portman_read_status(pm) & ESTB) == 0) cpu_relax(); /* Wait for strobe echo. */ /* The first data bit (msb) is already sitting on the input line. */ midi_data = (portman_read_status(pm) & 128); portman_write_data(pm, 1); /* Cause rising edge, which shifts data. */ /* Data bit 6. */ portman_write_data(pm, 0); /* Cause falling edge while data settles. */ midi_data |= (portman_read_status(pm) >> 1) & 64; portman_write_data(pm, 1); /* Cause rising edge, which shifts data. */ /* Data bit 5. */ portman_write_data(pm, 0); /* Cause falling edge while data settles. */ midi_data |= (portman_read_status(pm) >> 2) & 32; portman_write_data(pm, 1); /* Cause rising edge, which shifts data. */ /* Data bit 4. */ portman_write_data(pm, 0); /* Cause falling edge while data settles. */ midi_data |= (portman_read_status(pm) >> 3) & 16; portman_write_data(pm, 1); /* Cause rising edge, which shifts data. */ /* Data bit 3. */ portman_write_data(pm, 0); /* Cause falling edge while data settles. */ midi_data |= (portman_read_status(pm) >> 4) & 8; portman_write_data(pm, 1); /* Cause rising edge, which shifts data. */ /* Data bit 2. */ portman_write_data(pm, 0); /* Cause falling edge while data settles. */ midi_data |= (portman_read_status(pm) >> 5) & 4; portman_write_data(pm, 1); /* Cause rising edge, which shifts data. */ /* Data bit 1. */ portman_write_data(pm, 0); /* Cause falling edge while data settles. */ midi_data |= (portman_read_status(pm) >> 6) & 2; portman_write_data(pm, 1); /* Cause rising edge, which shifts data. */ /* Data bit 0. */ portman_write_data(pm, 0); /* Cause falling edge while data settles. */ midi_data |= (portman_read_status(pm) >> 7) & 1; portman_write_data(pm, 1); /* Cause rising edge, which shifts data. */ portman_write_data(pm, 0); /* Return data clock low. */ /* De-assert Strobe and return data. */ portman_write_command(pm, cmdout); /* Output saved address+IE. */ /* Wait for strobe echo. */ while ((portman_read_status(pm) & ESTB) == ESTB) cpu_relax(); return (midi_data & 255); /* Shift back and return value. */ } /* * Checks if any input data on the given channel is available * Checks RxAvail */ static int portman_data_avail(struct portman *pm, int channel) { int command = INT_EN; switch (channel) { case 0: command |= RXDATA0; break; case 1: command |= RXDATA1; break; } /* Write hardware (assumme STROBE=0) */ portman_write_command(pm, command); /* Check multiplexed RxAvail signal */ if ((portman_read_status(pm) & RXAVAIL) == RXAVAIL) return 1; /* Data available */ /* No Data available */ return 0; } /* * Flushes any input */ static void portman_flush_input(struct portman *pm, unsigned char port) { /* Local variable for counting things */ unsigned int i = 0; unsigned char command = 0; switch (port) { case 0: command = RXDATA0; break; case 1: command = RXDATA1; break; default: snd_printk(KERN_WARNING "portman_flush_input() Won't flush port %i\n", port); return; } /* Set address for specified channel in port and allow to settle. */ portman_write_command(pm, command); /* Assert the Strobe and wait for echo back. */ portman_write_command(pm, command | STROBE); /* Wait for ESTB */ while ((portman_read_status(pm) & ESTB) == 0) cpu_relax(); /* Output clock cycles to the Rx circuitry. */ portman_write_data(pm, 0); /* Flush 250 bits... */ for (i = 0; i < 250; i++) { portman_write_data(pm, 1); portman_write_data(pm, 0); } /* Deassert the Strobe signal of the port and wait for it to settle. */ portman_write_command(pm, command | INT_EN); /* Wait for settling */ while ((portman_read_status(pm) & ESTB) == ESTB) cpu_relax(); } static int portman_probe(struct parport *p) { /* Initialize the parallel port data register. Will set Rx clocks * low in case we happen to be addressing the Rx ports at this time. */ /* 1 */ parport_write_data(p, 0); /* Initialize the parallel port command register, thus initializing * hardware handshake lines to midi box: * * Strobe = 0 * Interrupt Enable = 0 */ /* 2 */ parport_write_control(p, 0); /* Check if Portman PC/P 2x4 is out there. */ /* 3 */ parport_write_control(p, RXDATA0); /* Write Strobe=0 to command reg. */ /* Check for ESTB to be clear */ /* 4 */ if ((parport_read_status(p) & ESTB) == ESTB) return 1; /* CODE 1 - Strobe Failure. */ /* Set for RXDATA0 where no damage will be done. */ /* 5 */ parport_write_control(p, RXDATA0 + STROBE); /* Write Strobe=1 to command reg. */ /* 6 */ if ((parport_read_status(p) & ESTB) != ESTB) return 1; /* CODE 1 - Strobe Failure. */ /* 7 */ parport_write_control(p, 0); /* Reset Strobe=0. */ /* Check if Tx circuitry is functioning properly. If initialized * unit TxEmpty is false, send out char and see if if goes true. */ /* 8 */ parport_write_control(p, TXDATA0); /* Tx channel 0, strobe off. */ /* If PCP channel's TxEmpty is set (TxEmpty is read through the PP * Status Register), then go write data. Else go back and wait. */ /* 9 */ if ((parport_read_status(p) & TXEMPTY) == 0) return 2; /* Return OK status. */ return 0; } static int portman_device_init(struct portman *pm) { portman_flush_input(pm, 0); portman_flush_input(pm, 1); return 0; } /********************************************************************* * Rawmidi *********************************************************************/ static int snd_portman_midi_open(struct snd_rawmidi_substream *substream) { return 0; } static int snd_portman_midi_close(struct snd_rawmidi_substream *substream) { return 0; } static void snd_portman_midi_input_trigger(struct snd_rawmidi_substream *substream, int up) { struct portman *pm = substream->rmidi->private_data; unsigned long flags; spin_lock_irqsave(&pm->reg_lock, flags); if (up) pm->mode[substream->number] |= PORTMAN2X4_MODE_INPUT_TRIGGERED; else pm->mode[substream->number] &= ~PORTMAN2X4_MODE_INPUT_TRIGGERED; spin_unlock_irqrestore(&pm->reg_lock, flags); } static void snd_portman_midi_output_trigger(struct snd_rawmidi_substream *substream, int up) { struct portman *pm = substream->rmidi->private_data; unsigned long flags; unsigned char byte; spin_lock_irqsave(&pm->reg_lock, flags); if (up) { while ((snd_rawmidi_transmit(substream, &byte, 1) == 1)) portman_write_midi(pm, substream->number, byte); } spin_unlock_irqrestore(&pm->reg_lock, flags); } static struct snd_rawmidi_ops snd_portman_midi_output = { .open = snd_portman_midi_open, .close = snd_portman_midi_close, .trigger = snd_portman_midi_output_trigger, }; static struct snd_rawmidi_ops snd_portman_midi_input = { .open = snd_portman_midi_open, .close = snd_portman_midi_close, .trigger = snd_portman_midi_input_trigger, }; /* Create and initialize the rawmidi component */ static int __devinit snd_portman_rawmidi_create(struct snd_card *card) { struct portman *pm = card->private_data; struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *substream; int err; err = snd_rawmidi_new(card, CARD_NAME, 0, PORTMAN_NUM_OUTPUT_PORTS, PORTMAN_NUM_INPUT_PORTS, &rmidi); if (err < 0) return err; rmidi->private_data = pm; strcpy(rmidi->name, CARD_NAME); rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; pm->rmidi = rmidi; /* register rawmidi ops */ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_portman_midi_output); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_portman_midi_input); /* name substreams */ /* output */ list_for_each_entry(substream, &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT].substreams, list) { sprintf(substream->name, "Portman2x4 %d", substream->number+1); } /* input */ list_for_each_entry(substream, &rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT].substreams, list) { pm->midi_input[substream->number] = substream; sprintf(substream->name, "Portman2x4 %d", substream->number+1); } return err; } /********************************************************************* * parport stuff *********************************************************************/ static void snd_portman_interrupt(void *userdata) { unsigned char midivalue = 0; struct portman *pm = ((struct snd_card*)userdata)->private_data; spin_lock(&pm->reg_lock); /* While any input data is waiting */ while ((portman_read_status(pm) & INT_REQ) == INT_REQ) { /* If data available on channel 0, read it and stuff it into the queue. */ if (portman_data_avail(pm, 0)) { /* Read Midi */ midivalue = portman_read_midi(pm, 0); /* put midi into queue... */ if (pm->mode[0] & PORTMAN2X4_MODE_INPUT_TRIGGERED) snd_rawmidi_receive(pm->midi_input[0], &midivalue, 1); } /* If data available on channel 1, read it and stuff it into the queue. */ if (portman_data_avail(pm, 1)) { /* Read Midi */ midivalue = portman_read_midi(pm, 1); /* put midi into queue... */ if (pm->mode[1] & PORTMAN2X4_MODE_INPUT_TRIGGERED) snd_rawmidi_receive(pm->midi_input[1], &midivalue, 1); } } spin_unlock(&pm->reg_lock); } static int __devinit snd_portman_probe_port(struct parport *p) { struct pardevice *pardev; int res; pardev = parport_register_device(p, DRIVER_NAME, NULL, NULL, NULL, 0, NULL); if (!pardev) return -EIO; if (parport_claim(pardev)) { parport_unregister_device(pardev); return -EIO; } res = portman_probe(p); parport_release(pardev); parport_unregister_device(pardev); return res ? -EIO : 0; } static void __devinit snd_portman_attach(struct parport *p) { struct platform_device *device; device = platform_device_alloc(PLATFORM_DRIVER, device_count); if (!device) return; /* Temporary assignment to forward the parport */ platform_set_drvdata(device, p); if (platform_device_add(device) < 0) { platform_device_put(device); return; } /* Since we dont get the return value of probe * We need to check if device probing succeeded or not */ if (!platform_get_drvdata(device)) { platform_device_unregister(device); return; } /* register device in global table */ platform_devices[device_count] = device; device_count++; } static void snd_portman_detach(struct parport *p) { /* nothing to do here */ } static struct parport_driver portman_parport_driver = { .name = "portman2x4", .attach = snd_portman_attach, .detach = snd_portman_detach }; /********************************************************************* * platform stuff *********************************************************************/ static void snd_portman_card_private_free(struct snd_card *card) { struct portman *pm = card->private_data; struct pardevice *pardev = pm->pardev; if (pardev) { if (pm->pardev_claimed) parport_release(pardev); parport_unregister_device(pardev); } portman_free(pm); } static int __devinit snd_portman_probe(struct platform_device *pdev) { struct pardevice *pardev; struct parport *p; int dev = pdev->id; struct snd_card *card = NULL; struct portman *pm = NULL; int err; p = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) return -ENOENT; if ((err = snd_portman_probe_port(p)) < 0) return err; card = snd_card_new(index[dev], id[dev], THIS_MODULE, 0); if (card == NULL) { snd_printd("Cannot create card\n"); return -ENOMEM; } strcpy(card->driver, DRIVER_NAME); strcpy(card->shortname, CARD_NAME); sprintf(card->longname, "%s at 0x%lx, irq %i", card->shortname, p->base, p->irq); pardev = parport_register_device(p, /* port */ DRIVER_NAME, /* name */ NULL, /* preempt */ NULL, /* wakeup */ snd_portman_interrupt, /* ISR */ PARPORT_DEV_EXCL, /* flags */ (void *)card); /* private */ if (pardev == NULL) { snd_printd("Cannot register pardevice\n"); err = -EIO; goto __err; } if ((err = portman_create(card, pardev, &pm)) < 0) { snd_printd("Cannot create main component\n"); parport_unregister_device(pardev); goto __err; } card->private_data = pm; card->private_free = snd_portman_card_private_free; if ((err = snd_portman_rawmidi_create(card)) < 0) { snd_printd("Creating Rawmidi component failed\n"); goto __err; } /* claim parport */ if (parport_claim(pardev)) { snd_printd("Cannot claim parport 0x%lx\n", pardev->port->base); err = -EIO; goto __err; } pm->pardev_claimed = 1; /* init device */ if ((err = portman_device_init(pm)) < 0) goto __err; platform_set_drvdata(pdev, card); snd_card_set_dev(card, &pdev->dev); /* At this point card will be usable */ if ((err = snd_card_register(card)) < 0) { snd_printd("Cannot register card\n"); goto __err; } snd_printk(KERN_INFO "Portman 2x4 on 0x%lx\n", p->base); return 0; __err: snd_card_free(card); return err; } static int __devexit snd_portman_remove(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); if (card) snd_card_free(card); return 0; } static struct platform_driver snd_portman_driver = { .probe = snd_portman_probe, .remove = __devexit_p(snd_portman_remove), .driver = { .name = PLATFORM_DRIVER } }; /********************************************************************* * module init stuff *********************************************************************/ static void snd_portman_unregister_all(void) { int i; for (i = 0; i < SNDRV_CARDS; ++i) { if (platform_devices[i]) { platform_device_unregister(platform_devices[i]); platform_devices[i] = NULL; } } platform_driver_unregister(&snd_portman_driver); parport_unregister_driver(&portman_parport_driver); } static int __init snd_portman_module_init(void) { int err; if ((err = platform_driver_register(&snd_portman_driver)) < 0) return err; if (parport_register_driver(&portman_parport_driver) != 0) { platform_driver_unregister(&snd_portman_driver); return -EIO; } if (device_count == 0) { snd_portman_unregister_all(); return -ENODEV; } return 0; } static void __exit snd_portman_module_exit(void) { snd_portman_unregister_all(); } module_init(snd_portman_module_init); module_exit(snd_portman_module_exit);
gpl-2.0
dwengen/linux
drivers/iio/adc/viperboard_adc.c
195
3816
/* * Nano River Technologies viperboard IIO ADC driver * * (C) 2012 by Lemonage GmbH * Author: Lars Poeschel <poeschel@lemonage.de> * All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/usb.h> #include <linux/iio/iio.h> #include <linux/mfd/viperboard.h> #define VPRBRD_ADC_CMD_GET 0x00 struct vprbrd_adc_msg { u8 cmd; u8 chan; u8 val; } __packed; struct vprbrd_adc { struct vprbrd *vb; }; #define VPRBRD_ADC_CHANNEL(_index) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = _index, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ } static struct iio_chan_spec const vprbrd_adc_iio_channels[] = { VPRBRD_ADC_CHANNEL(0), VPRBRD_ADC_CHANNEL(1), VPRBRD_ADC_CHANNEL(2), VPRBRD_ADC_CHANNEL(3), }; static int vprbrd_iio_read_raw(struct iio_dev *iio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long info) { int ret, error = 0; struct vprbrd_adc *adc = iio_priv(iio_dev); struct vprbrd *vb = adc->vb; struct vprbrd_adc_msg *admsg = (struct vprbrd_adc_msg *)vb->buf; switch (info) { case IIO_CHAN_INFO_RAW: mutex_lock(&vb->lock); admsg->cmd = VPRBRD_ADC_CMD_GET; admsg->chan = chan->channel; admsg->val = 0x00; ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, admsg, sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS); if (ret != sizeof(struct vprbrd_adc_msg)) { dev_err(&iio_dev->dev, "usb send error on adc read\n"); error = -EREMOTEIO; } ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, admsg, sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS); *val = admsg->val; mutex_unlock(&vb->lock); if (ret != sizeof(struct vprbrd_adc_msg)) { dev_err(&iio_dev->dev, "usb recv error on adc read\n"); error = -EREMOTEIO; } if (error) goto error; return IIO_VAL_INT; default: error = -EINVAL; break; } error: return error; } static const struct iio_info vprbrd_adc_iio_info = { .read_raw = &vprbrd_iio_read_raw, .driver_module = THIS_MODULE, }; static int vprbrd_adc_probe(struct platform_device *pdev) { struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent); struct vprbrd_adc *adc; struct iio_dev *indio_dev; int ret; /* registering iio */ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc)); if (!indio_dev) { dev_err(&pdev->dev, "failed allocating iio device\n"); return -ENOMEM; } adc = iio_priv(indio_dev); adc->vb = vb; indio_dev->name = "viperboard adc"; indio_dev->dev.parent = &pdev->dev; indio_dev->info = &vprbrd_adc_iio_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = vprbrd_adc_iio_channels; indio_dev->num_channels = ARRAY_SIZE(vprbrd_adc_iio_channels); ret = devm_iio_device_register(&pdev->dev, indio_dev); if (ret) { dev_err(&pdev->dev, "could not register iio (adc)"); return ret; } return 0; } static struct platform_driver vprbrd_adc_driver = { .driver = { .name = "viperboard-adc", .owner = THIS_MODULE, }, .probe = vprbrd_adc_probe, }; module_platform_driver(vprbrd_adc_driver); MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>"); MODULE_DESCRIPTION("IIO ADC driver for Nano River Techs Viperboard"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:viperboard-adc");
gpl-2.0
shubhangi-shrivastava/drm-intel-nightly
sound/core/seq/oss/seq_oss_writeq.c
451
4151
/* * OSS compatible sequencer driver * * seq_oss_writeq.c - write queue and sync * * Copyright (C) 1998,99 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "seq_oss_writeq.h" #include "seq_oss_event.h" #include "seq_oss_timer.h" #include <sound/seq_oss_legacy.h> #include "../seq_lock.h" #include "../seq_clientmgr.h" #include <linux/wait.h> #include <linux/slab.h> /* * create a write queue record */ struct seq_oss_writeq * snd_seq_oss_writeq_new(struct seq_oss_devinfo *dp, int maxlen) { struct seq_oss_writeq *q; struct snd_seq_client_pool pool; if ((q = kzalloc(sizeof(*q), GFP_KERNEL)) == NULL) return NULL; q->dp = dp; q->maxlen = maxlen; spin_lock_init(&q->sync_lock); q->sync_event_put = 0; q->sync_time = 0; init_waitqueue_head(&q->sync_sleep); memset(&pool, 0, sizeof(pool)); pool.client = dp->cseq; pool.output_pool = maxlen; pool.output_room = maxlen / 2; snd_seq_oss_control(dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool); return q; } /* * delete the write queue */ void snd_seq_oss_writeq_delete(struct seq_oss_writeq *q) { if (q) { snd_seq_oss_writeq_clear(q); /* to be sure */ kfree(q); } } /* * reset the write queue */ void snd_seq_oss_writeq_clear(struct seq_oss_writeq *q) { struct snd_seq_remove_events reset; memset(&reset, 0, sizeof(reset)); reset.remove_mode = SNDRV_SEQ_REMOVE_OUTPUT; /* remove all */ snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_REMOVE_EVENTS, &reset); /* wake up sleepers if any */ snd_seq_oss_writeq_wakeup(q, 0); } /* * wait until the write buffer has enough room */ int snd_seq_oss_writeq_sync(struct seq_oss_writeq *q) { struct seq_oss_devinfo *dp = q->dp; abstime_t time; time = snd_seq_oss_timer_cur_tick(dp->timer); if (q->sync_time >= time) return 0; /* already finished */ if (! q->sync_event_put) { struct snd_seq_event ev; union evrec *rec; /* put echoback event */ memset(&ev, 0, sizeof(ev)); ev.flags = 0; ev.type = SNDRV_SEQ_EVENT_ECHO; ev.time.tick = time; /* echo back to itself */ snd_seq_oss_fill_addr(dp, &ev, dp->addr.client, dp->addr.port); rec = (union evrec *)&ev.data; rec->t.code = SEQ_SYNCTIMER; rec->t.time = time; q->sync_event_put = 1; snd_seq_kernel_client_enqueue_blocking(dp->cseq, &ev, NULL, 0, 0); } wait_event_interruptible_timeout(q->sync_sleep, ! q->sync_event_put, HZ); if (signal_pending(current)) /* interrupted - return 0 to finish sync */ q->sync_event_put = 0; if (! q->sync_event_put || q->sync_time >= time) return 0; return 1; } /* * wake up sync - echo event was catched */ void snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time) { unsigned long flags; spin_lock_irqsave(&q->sync_lock, flags); q->sync_time = time; q->sync_event_put = 0; wake_up(&q->sync_sleep); spin_unlock_irqrestore(&q->sync_lock, flags); } /* * return the unused pool size */ int snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q) { struct snd_seq_client_pool pool; pool.client = q->dp->cseq; snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool); return pool.output_free; } /* * set output threshold size from ioctl */ void snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int val) { struct snd_seq_client_pool pool; pool.client = q->dp->cseq; snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool); pool.output_room = val; snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool); }
gpl-2.0
fengguoqing/linux3.10-mini2440
drivers/video/backlight/omap1_bl.c
2243
4819
/* * Backlight driver for OMAP based boards. * * Copyright (c) 2006 Andrzej Zaborowski <balrog@zabor.org> * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This package is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this package; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/slab.h> #include <linux/platform_data/omap1_bl.h> #include <mach/hardware.h> #include <mach/mux.h> #define OMAPBL_MAX_INTENSITY 0xff struct omap_backlight { int powermode; int current_intensity; struct device *dev; struct omap_backlight_config *pdata; }; static inline void omapbl_send_intensity(int intensity) { omap_writeb(intensity, OMAP_PWL_ENABLE); } static inline void omapbl_send_enable(int enable) { omap_writeb(enable, OMAP_PWL_CLK_ENABLE); } static void omapbl_blank(struct omap_backlight *bl, int mode) { if (bl->pdata->set_power) bl->pdata->set_power(bl->dev, mode); switch (mode) { case FB_BLANK_NORMAL: case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_POWERDOWN: omapbl_send_intensity(0); omapbl_send_enable(0); break; case FB_BLANK_UNBLANK: omapbl_send_intensity(bl->current_intensity); omapbl_send_enable(1); break; } } #ifdef CONFIG_PM_SLEEP static int omapbl_suspend(struct device *dev) { struct backlight_device *bl_dev = dev_get_drvdata(dev); struct omap_backlight *bl = bl_get_data(bl_dev); omapbl_blank(bl, FB_BLANK_POWERDOWN); return 0; } static int omapbl_resume(struct device *dev) { struct backlight_device *bl_dev = dev_get_drvdata(dev); struct omap_backlight *bl = bl_get_data(bl_dev); omapbl_blank(bl, bl->powermode); return 0; } #endif static int omapbl_set_power(struct backlight_device *dev, int state) { struct omap_backlight *bl = bl_get_data(dev); omapbl_blank(bl, state); bl->powermode = state; return 0; } static int omapbl_update_status(struct backlight_device *dev) { struct omap_backlight *bl = bl_get_data(dev); if (bl->current_intensity != dev->props.brightness) { if (bl->powermode == FB_BLANK_UNBLANK) omapbl_send_intensity(dev->props.brightness); bl->current_intensity = dev->props.brightness; } if (dev->props.fb_blank != bl->powermode) omapbl_set_power(dev, dev->props.fb_blank); return 0; } static int omapbl_get_intensity(struct backlight_device *dev) { struct omap_backlight *bl = bl_get_data(dev); return bl->current_intensity; } static const struct backlight_ops omapbl_ops = { .get_brightness = omapbl_get_intensity, .update_status = omapbl_update_status, }; static int omapbl_probe(struct platform_device *pdev) { struct backlight_properties props; struct backlight_device *dev; struct omap_backlight *bl; struct omap_backlight_config *pdata = pdev->dev.platform_data; if (!pdata) return -ENXIO; bl = devm_kzalloc(&pdev->dev, sizeof(struct omap_backlight), GFP_KERNEL); if (unlikely(!bl)) return -ENOMEM; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = OMAPBL_MAX_INTENSITY; dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops, &props); if (IS_ERR(dev)) return PTR_ERR(dev); bl->powermode = FB_BLANK_POWERDOWN; bl->current_intensity = 0; bl->pdata = pdata; bl->dev = &pdev->dev; platform_set_drvdata(pdev, dev); omap_cfg_reg(PWL); /* Conflicts with UART3 */ dev->props.fb_blank = FB_BLANK_UNBLANK; dev->props.brightness = pdata->default_intensity; omapbl_update_status(dev); dev_info(&pdev->dev, "OMAP LCD backlight initialised\n"); return 0; } static int omapbl_remove(struct platform_device *pdev) { struct backlight_device *dev = platform_get_drvdata(pdev); backlight_device_unregister(dev); return 0; } static SIMPLE_DEV_PM_OPS(omapbl_pm_ops, omapbl_suspend, omapbl_resume); static struct platform_driver omapbl_driver = { .probe = omapbl_probe, .remove = omapbl_remove, .driver = { .name = "omap-bl", .pm = &omapbl_pm_ops, }, }; module_platform_driver(omapbl_driver); MODULE_AUTHOR("Andrzej Zaborowski <balrog@zabor.org>"); MODULE_DESCRIPTION("OMAP LCD Backlight driver"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_hardkernel_odroidc1
drivers/bluetooth/dtl1_cs.c
2243
13044
/* * * A driver for Nokia Connectivity Card DTL-1 devices * * Copyright (C) 2001-2002 Marcel Holtmann <marcel@holtmann.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation; * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/serial.h> #include <linux/serial_reg.h> #include <linux/bitops.h> #include <asm/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> /* ======================== Module parameters ======================== */ MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth driver for Nokia Connectivity Card DTL-1"); MODULE_LICENSE("GPL"); /* ======================== Local structures ======================== */ typedef struct dtl1_info_t { struct pcmcia_device *p_dev; struct hci_dev *hdev; spinlock_t lock; /* For serializing operations */ unsigned long flowmask; /* HCI flow mask */ int ri_latch; struct sk_buff_head txq; unsigned long tx_state; unsigned long rx_state; unsigned long rx_count; struct sk_buff *rx_skb; } dtl1_info_t; static int dtl1_config(struct pcmcia_device *link); /* Transmit states */ #define XMIT_SENDING 1 #define XMIT_WAKEUP 2 #define XMIT_WAITING 8 /* Receiver States */ #define RECV_WAIT_NSH 0 #define RECV_WAIT_DATA 1 typedef struct { u8 type; u8 zero; u16 len; } __packed nsh_t; /* Nokia Specific Header */ #define NSHL 4 /* Nokia Specific Header Length */ /* ======================== Interrupt handling ======================== */ static int dtl1_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) { int actual = 0; /* Tx FIFO should be empty */ if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) return 0; /* Fill FIFO with current frame */ while ((fifo_size-- > 0) && (actual < len)) { /* Transmit next byte */ outb(buf[actual], iobase + UART_TX); actual++; } return actual; } static void dtl1_write_wakeup(dtl1_info_t *info) { if (!info) { BT_ERR("Unknown device"); return; } if (test_bit(XMIT_WAITING, &(info->tx_state))) { set_bit(XMIT_WAKEUP, &(info->tx_state)); return; } if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) { set_bit(XMIT_WAKEUP, &(info->tx_state)); return; } do { unsigned int iobase = info->p_dev->resource[0]->start; register struct sk_buff *skb; int len; clear_bit(XMIT_WAKEUP, &(info->tx_state)); if (!pcmcia_dev_present(info->p_dev)) return; if (!(skb = skb_dequeue(&(info->txq)))) break; /* Send frame */ len = dtl1_write(iobase, 32, skb->data, skb->len); if (len == skb->len) { set_bit(XMIT_WAITING, &(info->tx_state)); kfree_skb(skb); } else { skb_pull(skb, len); skb_queue_head(&(info->txq), skb); } info->hdev->stat.byte_tx += len; } while (test_bit(XMIT_WAKEUP, &(info->tx_state))); clear_bit(XMIT_SENDING, &(info->tx_state)); } static void dtl1_control(dtl1_info_t *info, struct sk_buff *skb) { u8 flowmask = *(u8 *)skb->data; int i; printk(KERN_INFO "Bluetooth: Nokia control data ="); for (i = 0; i < skb->len; i++) { printk(" %02x", skb->data[i]); } printk("\n"); /* transition to active state */ if (((info->flowmask & 0x07) == 0) && ((flowmask & 0x07) != 0)) { clear_bit(XMIT_WAITING, &(info->tx_state)); dtl1_write_wakeup(info); } info->flowmask = flowmask; kfree_skb(skb); } static void dtl1_receive(dtl1_info_t *info) { unsigned int iobase; nsh_t *nsh; int boguscount = 0; if (!info) { BT_ERR("Unknown device"); return; } iobase = info->p_dev->resource[0]->start; do { info->hdev->stat.byte_rx++; /* Allocate packet */ if (info->rx_skb == NULL) if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) { BT_ERR("Can't allocate mem for new packet"); info->rx_state = RECV_WAIT_NSH; info->rx_count = NSHL; return; } *skb_put(info->rx_skb, 1) = inb(iobase + UART_RX); nsh = (nsh_t *)info->rx_skb->data; info->rx_count--; if (info->rx_count == 0) { switch (info->rx_state) { case RECV_WAIT_NSH: info->rx_state = RECV_WAIT_DATA; info->rx_count = nsh->len + (nsh->len & 0x0001); break; case RECV_WAIT_DATA: bt_cb(info->rx_skb)->pkt_type = nsh->type; /* remove PAD byte if it exists */ if (nsh->len & 0x0001) { info->rx_skb->tail--; info->rx_skb->len--; } /* remove NSH */ skb_pull(info->rx_skb, NSHL); switch (bt_cb(info->rx_skb)->pkt_type) { case 0x80: /* control data for the Nokia Card */ dtl1_control(info, info->rx_skb); break; case 0x82: case 0x83: case 0x84: /* send frame to the HCI layer */ info->rx_skb->dev = (void *) info->hdev; bt_cb(info->rx_skb)->pkt_type &= 0x0f; hci_recv_frame(info->rx_skb); break; default: /* unknown packet */ BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); kfree_skb(info->rx_skb); break; } info->rx_state = RECV_WAIT_NSH; info->rx_count = NSHL; info->rx_skb = NULL; break; } } /* Make sure we don't stay here too long */ if (boguscount++ > 32) break; } while (inb(iobase + UART_LSR) & UART_LSR_DR); } static irqreturn_t dtl1_interrupt(int irq, void *dev_inst) { dtl1_info_t *info = dev_inst; unsigned int iobase; unsigned char msr; int boguscount = 0; int iir, lsr; irqreturn_t r = IRQ_NONE; if (!info || !info->hdev) /* our irq handler is shared */ return IRQ_NONE; iobase = info->p_dev->resource[0]->start; spin_lock(&(info->lock)); iir = inb(iobase + UART_IIR) & UART_IIR_ID; while (iir) { r = IRQ_HANDLED; /* Clear interrupt */ lsr = inb(iobase + UART_LSR); switch (iir) { case UART_IIR_RLSI: BT_ERR("RLSI"); break; case UART_IIR_RDI: /* Receive interrupt */ dtl1_receive(info); break; case UART_IIR_THRI: if (lsr & UART_LSR_THRE) { /* Transmitter ready for data */ dtl1_write_wakeup(info); } break; default: BT_ERR("Unhandled IIR=%#x", iir); break; } /* Make sure we don't stay here too long */ if (boguscount++ > 100) break; iir = inb(iobase + UART_IIR) & UART_IIR_ID; } msr = inb(iobase + UART_MSR); if (info->ri_latch ^ (msr & UART_MSR_RI)) { info->ri_latch = msr & UART_MSR_RI; clear_bit(XMIT_WAITING, &(info->tx_state)); dtl1_write_wakeup(info); r = IRQ_HANDLED; } spin_unlock(&(info->lock)); return r; } /* ======================== HCI interface ======================== */ static int dtl1_hci_open(struct hci_dev *hdev) { set_bit(HCI_RUNNING, &(hdev->flags)); return 0; } static int dtl1_hci_flush(struct hci_dev *hdev) { dtl1_info_t *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); return 0; } static int dtl1_hci_close(struct hci_dev *hdev) { if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) return 0; dtl1_hci_flush(hdev); return 0; } static int dtl1_hci_send_frame(struct sk_buff *skb) { dtl1_info_t *info; struct hci_dev *hdev = (struct hci_dev *)(skb->dev); struct sk_buff *s; nsh_t nsh; if (!hdev) { BT_ERR("Frame for unknown HCI device (hdev=NULL)"); return -ENODEV; } info = hci_get_drvdata(hdev); switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; nsh.type = 0x81; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; nsh.type = 0x82; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; nsh.type = 0x83; break; default: return -EILSEQ; }; nsh.zero = 0; nsh.len = skb->len; s = bt_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC); if (!s) return -ENOMEM; skb_reserve(s, NSHL); skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len); if (skb->len & 0x0001) *skb_put(s, 1) = 0; /* PAD */ /* Prepend skb with Nokia frame header and queue */ memcpy(skb_push(s, NSHL), &nsh, NSHL); skb_queue_tail(&(info->txq), s); dtl1_write_wakeup(info); kfree_skb(skb); return 0; } static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg) { return -ENOIOCTLCMD; } /* ======================== Card services HCI interaction ======================== */ static int dtl1_open(dtl1_info_t *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; struct hci_dev *hdev; spin_lock_init(&(info->lock)); skb_queue_head_init(&(info->txq)); info->rx_state = RECV_WAIT_NSH; info->rx_count = NSHL; info->rx_skb = NULL; set_bit(XMIT_WAITING, &(info->tx_state)); /* Initialize HCI device */ hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can't allocate HCI device"); return -ENOMEM; } info->hdev = hdev; hdev->bus = HCI_PCCARD; hci_set_drvdata(hdev, info); SET_HCIDEV_DEV(hdev, &info->p_dev->dev); hdev->open = dtl1_hci_open; hdev->close = dtl1_hci_close; hdev->flush = dtl1_hci_flush; hdev->send = dtl1_hci_send_frame; hdev->ioctl = dtl1_hci_ioctl; spin_lock_irqsave(&(info->lock), flags); /* Reset UART */ outb(0, iobase + UART_MCR); /* Turn off interrupts */ outb(0, iobase + UART_IER); /* Initialize UART */ outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR); info->ri_latch = inb(info->p_dev->resource[0]->start + UART_MSR) & UART_MSR_RI; /* Turn on interrupts */ outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER); spin_unlock_irqrestore(&(info->lock), flags); /* Timeout before it is safe to send the first HCI packet */ msleep(2000); /* Register HCI device */ if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); info->hdev = NULL; hci_free_dev(hdev); return -ENODEV; } return 0; } static int dtl1_close(dtl1_info_t *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; struct hci_dev *hdev = info->hdev; if (!hdev) return -ENODEV; dtl1_hci_close(hdev); spin_lock_irqsave(&(info->lock), flags); /* Reset UART */ outb(0, iobase + UART_MCR); /* Turn off interrupts */ outb(0, iobase + UART_IER); spin_unlock_irqrestore(&(info->lock), flags); hci_unregister_dev(hdev); hci_free_dev(hdev); return 0; } static int dtl1_probe(struct pcmcia_device *link) { dtl1_info_t *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->p_dev = link; link->priv = info; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; return dtl1_config(link); } static void dtl1_detach(struct pcmcia_device *link) { dtl1_info_t *info = link->priv; dtl1_close(info); pcmcia_disable_device(link); } static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data) { if ((p_dev->resource[1]->end) || (p_dev->resource[1]->end < 8)) return -ENODEV; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; return pcmcia_request_io(p_dev); } static int dtl1_config(struct pcmcia_device *link) { dtl1_info_t *info = link->priv; int ret; /* Look for a generic full-sized window */ link->resource[0]->end = 8; ret = pcmcia_loop_config(link, dtl1_confcheck, NULL); if (ret) goto failed; ret = pcmcia_request_irq(link, dtl1_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; ret = dtl1_open(info); if (ret) goto failed; return 0; failed: dtl1_detach(link); return ret; } static const struct pcmcia_device_id dtl1_ids[] = { PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-1", 0xe1bfdd64, 0xe168480d), PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-4", 0xe1bfdd64, 0x9102bc82), PCMCIA_DEVICE_PROD_ID12("Socket", "CF", 0xb38bcc2e, 0x44ebf863), PCMCIA_DEVICE_PROD_ID12("Socket", "CF+ Personal Network Card", 0xb38bcc2e, 0xe732bae3), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, dtl1_ids); static struct pcmcia_driver dtl1_driver = { .owner = THIS_MODULE, .name = "dtl1_cs", .probe = dtl1_probe, .remove = dtl1_detach, .id_table = dtl1_ids, }; module_pcmcia_driver(dtl1_driver);
gpl-2.0
mcrosson/samsung_kernel_comanche
drivers/staging/westbridge/astoria/gadget/cyasgadget.c
2499
55199
/* cyangadget.c - Linux USB Gadget driver file for the Cypress West Bridge ## =========================== ## Copyright (C) 2010 Cypress Semiconductor ## ## This program is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License ## as published by the Free Software Foundation; either version 2 ## of the License, or (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 51 Franklin Street, Fifth Floor ## Boston, MA 02110-1301, USA. ## =========================== */ /* * Cypress West Bridge high/full speed usb device controller code * Based on the Netchip 2280 device controller by David Brownell * in the linux 2.6.10 kernel * * linux/drivers/usb/gadget/net2280.c */ /* * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) * Copyright (C) 2003 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330 * Boston, MA 02111-1307 USA */ #include "cyasgadget.h" #define CY_AS_DRIVER_DESC "cypress west bridge usb gadget" #define CY_AS_DRIVER_VERSION "REV B" #define DMA_ADDR_INVALID (~(dma_addr_t)0) static const char cy_as_driver_name[] = "cy_astoria_gadget"; static const char cy_as_driver_desc[] = CY_AS_DRIVER_DESC; static const char cy_as_ep0name[] = "EP0"; static const char *cy_as_ep_names[] = { cy_as_ep0name, "EP1", "EP2", "EP3", "EP4", "EP5", "EP6", "EP7", "EP8", "EP9", "EP10", "EP11", "EP12", "EP13", "EP14", "EP15" }; /* forward declarations */ static void cyas_ep_reset( struct cyasgadget_ep *an_ep); static int cyasgadget_fifo_status( struct usb_ep *_ep); static void cyasgadget_stallcallback( cy_as_device_handle h, cy_as_return_status_t status, uint32_t tag, cy_as_funct_c_b_type cbtype, void *cbdata); /* variables */ static cyasgadget *cy_as_gadget_controller; static int append_mtp; module_param(append_mtp, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(append_mtp, "west bridge to append descriptors for mtp 0=no 1=yes"); static int msc_enum_bus_0; module_param(msc_enum_bus_0, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(msc_enum_bus_0, "west bridge to enumerate bus 0 as msc 0=no 1=yes"); static int msc_enum_bus_1; module_param(msc_enum_bus_1, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(msc_enum_bus_1, "west bridge to enumerate bus 1 as msc 0=no 1=yes"); /* all Callbacks are placed in this subsection*/ static void cy_as_gadget_usb_event_callback( cy_as_device_handle h, cy_as_usb_event ev, void *evdata ) { cyasgadget *cy_as_dev; #ifndef WESTBRIDGE_NDEBUG struct usb_ctrlrequest *ctrlreq; #endif /* cy_as_dev = container_of(h, cyasgadget, dev_handle); */ cy_as_dev = cy_as_gadget_controller; switch (ev) { case cy_as_event_usb_suspend: #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<1>_cy_as_event_usb_suspend received\n"); #endif cy_as_dev->driver->suspend(&cy_as_dev->gadget); break; case cy_as_event_usb_resume: #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<1>_cy_as_event_usb_resume received\n"); #endif cy_as_dev->driver->resume(&cy_as_dev->gadget); break; case cy_as_event_usb_reset: #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<1>_cy_as_event_usb_reset received\n"); #endif break; case cy_as_event_usb_speed_change: #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<1>_cy_as_event_usb_speed_change received\n"); #endif break; case cy_as_event_usb_set_config: #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<1>_cy_as_event_usb_set_config received\n"); #endif break; case cy_as_event_usb_setup_packet: #ifndef WESTBRIDGE_NDEBUG ctrlreq = (struct usb_ctrlrequest *)evdata; cy_as_hal_print_message("<1>_cy_as_event_usb_setup_packet " "received" "bRequestType=0x%x," "bRequest=0x%x," "wValue=x%x," "wIndex=0x%x," "wLength=0x%x,", ctrlreq->bRequestType, ctrlreq->bRequest, ctrlreq->wValue, ctrlreq->wIndex, ctrlreq->wLength ); #endif cy_as_dev->outsetupreq = 0; if ((((uint8_t *)evdata)[0] & USB_DIR_IN) == USB_DIR_OUT) cy_as_dev->outsetupreq = 1; cy_as_dev->driver->setup(&cy_as_dev->gadget, (struct usb_ctrlrequest *)evdata); break; case cy_as_event_usb_status_packet: #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<1>_cy_as_event_usb_status_packet received\n"); #endif break; case cy_as_event_usb_inquiry_before: #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<1>_cy_as_event_usb_inquiry_before received\n"); #endif break; case cy_as_event_usb_inquiry_after: #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<1>_cy_as_event_usb_inquiry_after received\n"); #endif break; case cy_as_event_usb_start_stop: #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<1>_cy_as_event_usb_start_stop received\n"); #endif break; default: break; } } static void cy_as_gadget_mtp_event_callback( cy_as_device_handle handle, cy_as_mtp_event evtype, void *evdata ) { cyasgadget *dev = cy_as_gadget_controller; (void) handle; switch (evtype) { case cy_as_mtp_send_object_complete: { cy_as_mtp_send_object_complete_data *send_obj_data = (cy_as_mtp_send_object_complete_data *) evdata; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<6>MTP EVENT: send_object_complete\n"); cy_as_hal_print_message( "<6>_bytes sent = %d\n_send status = %d", send_obj_data->byte_count, send_obj_data->status); #endif dev->tmtp_send_complete_data.byte_count = send_obj_data->byte_count; dev->tmtp_send_complete_data.status = send_obj_data->status; dev->tmtp_send_complete_data.transaction_id = send_obj_data->transaction_id; dev->tmtp_send_complete = cy_true; break; } case cy_as_mtp_get_object_complete: { cy_as_mtp_get_object_complete_data *get_obj_data = (cy_as_mtp_get_object_complete_data *) evdata; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<6>MTP EVENT: get_object_complete\n"); cy_as_hal_print_message( "<6>_bytes got = %d\n_get status = %d", get_obj_data->byte_count, get_obj_data->status); #endif dev->tmtp_get_complete_data.byte_count = get_obj_data->byte_count; dev->tmtp_get_complete_data.status = get_obj_data->status; dev->tmtp_get_complete = cy_true; break; } case cy_as_mtp_block_table_needed: { dev->tmtp_need_new_blk_tbl = cy_true; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<6>MTP EVENT: cy_as_mtp_block_table_needed\n"); #endif break; } default: break; } } static void cyasgadget_setupreadcallback( cy_as_device_handle h, cy_as_end_point_number_t ep, uint32_t count, void *buf, cy_as_return_status_t status) { cyasgadget_ep *an_ep; cyasgadget_req *an_req; cyasgadget *cy_as_dev; unsigned stopped; unsigned long flags; (void)buf; cy_as_dev = cy_as_gadget_controller; if (cy_as_dev->driver == NULL) return; an_ep = &cy_as_dev->an_gadget_ep[ep]; spin_lock_irqsave(&cy_as_dev->lock, flags); stopped = an_ep->stopped; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: ep=%d, count=%d, " "status=%d\n", __func__, ep, count, status); #endif an_req = list_entry(an_ep->queue.next, cyasgadget_req, queue); list_del_init(&an_req->queue); if (status == CY_AS_ERROR_SUCCESS) an_req->req.status = 0; else an_req->req.status = -status; an_req->req.actual = count; an_ep->stopped = 1; spin_unlock_irqrestore(&cy_as_dev->lock, flags); an_req->req.complete(&an_ep->usb_ep_inst, &an_req->req); an_ep->stopped = stopped; } /*called when the write of a setup packet has been completed*/ static void cyasgadget_setupwritecallback( cy_as_device_handle h, cy_as_end_point_number_t ep, uint32_t count, void *buf, cy_as_return_status_t status ) { cyasgadget_ep *an_ep; cyasgadget_req *an_req; cyasgadget *cy_as_dev; unsigned stopped; unsigned long flags; (void)buf; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called status=0x%x\n", __func__, status); #endif cy_as_dev = cy_as_gadget_controller; if (cy_as_dev->driver == NULL) return; an_ep = &cy_as_dev->an_gadget_ep[ep]; spin_lock_irqsave(&cy_as_dev->lock, flags); stopped = an_ep->stopped; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("setup_write_callback: ep=%d, " "count=%d, status=%d\n", ep, count, status); #endif an_req = list_entry(an_ep->queue.next, cyasgadget_req, queue); list_del_init(&an_req->queue); an_req->req.actual = count; an_req->req.status = 0; an_ep->stopped = 1; spin_unlock_irqrestore(&cy_as_dev->lock, flags); an_req->req.complete(&an_ep->usb_ep_inst, &an_req->req); an_ep->stopped = stopped; } /* called when a read operation has completed.*/ static void cyasgadget_readcallback( cy_as_device_handle h, cy_as_end_point_number_t ep, uint32_t count, void *buf, cy_as_return_status_t status ) { cyasgadget_ep *an_ep; cyasgadget_req *an_req; cyasgadget *cy_as_dev; unsigned stopped; cy_as_return_status_t ret; unsigned long flags; (void)h; (void)buf; cy_as_dev = cy_as_gadget_controller; if (cy_as_dev->driver == NULL) return; an_ep = &cy_as_dev->an_gadget_ep[ep]; stopped = an_ep->stopped; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: ep=%d, count=%d, status=%d\n", __func__, ep, count, status); #endif if (status == CY_AS_ERROR_CANCELED) return; spin_lock_irqsave(&cy_as_dev->lock, flags); an_req = list_entry(an_ep->queue.next, cyasgadget_req, queue); list_del_init(&an_req->queue); if (status == CY_AS_ERROR_SUCCESS) an_req->req.status = 0; else an_req->req.status = -status; an_req->complete = 1; an_req->req.actual = count; an_ep->stopped = 1; spin_unlock_irqrestore(&cy_as_dev->lock, flags); an_req->req.complete(&an_ep->usb_ep_inst, &an_req->req); an_ep->stopped = stopped; /* We need to call ReadAsync on this end-point * again, so as to not miss any data packets. */ if (!an_ep->stopped) { spin_lock_irqsave(&cy_as_dev->lock, flags); an_req = 0; if (!list_empty(&an_ep->queue)) an_req = list_entry(an_ep->queue.next, cyasgadget_req, queue); spin_unlock_irqrestore(&cy_as_dev->lock, flags); if ((an_req) && (an_req->req.status == -EINPROGRESS)) { ret = cy_as_usb_read_data_async(cy_as_dev->dev_handle, an_ep->num, cy_false, an_req->req.length, an_req->req.buf, cyasgadget_readcallback); if (ret != CY_AS_ERROR_SUCCESS) cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_read_data_async failed " "with error code %d\n", ret); else an_req->req.status = -EALREADY; } } } /* function is called when a usb write operation has completed*/ static void cyasgadget_writecallback( cy_as_device_handle h, cy_as_end_point_number_t ep, uint32_t count, void *buf, cy_as_return_status_t status ) { cyasgadget_ep *an_ep; cyasgadget_req *an_req; cyasgadget *cy_as_dev; unsigned stopped = 0; cy_as_return_status_t ret; unsigned long flags; (void)h; (void)buf; cy_as_dev = cy_as_gadget_controller; if (cy_as_dev->driver == NULL) return; an_ep = &cy_as_dev->an_gadget_ep[ep]; if (status == CY_AS_ERROR_CANCELED) return; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: ep=%d, count=%d, status=%d\n", __func__, ep, count, status); #endif spin_lock_irqsave(&cy_as_dev->lock, flags); an_req = list_entry(an_ep->queue.next, cyasgadget_req, queue); list_del_init(&an_req->queue); an_req->req.actual = count; /* Verify the status value before setting req.status to zero */ if (status == CY_AS_ERROR_SUCCESS) an_req->req.status = 0; else an_req->req.status = -status; an_ep->stopped = 1; spin_unlock_irqrestore(&cy_as_dev->lock, flags); an_req->req.complete(&an_ep->usb_ep_inst, &an_req->req); an_ep->stopped = stopped; /* We need to call WriteAsync on this end-point again, so as to not miss any data packets. */ if (!an_ep->stopped) { spin_lock_irqsave(&cy_as_dev->lock, flags); an_req = 0; if (!list_empty(&an_ep->queue)) an_req = list_entry(an_ep->queue.next, cyasgadget_req, queue); spin_unlock_irqrestore(&cy_as_dev->lock, flags); if ((an_req) && (an_req->req.status == -EINPROGRESS)) { ret = cy_as_usb_write_data_async(cy_as_dev->dev_handle, an_ep->num, an_req->req.length, an_req->req.buf, cy_false, cyasgadget_writecallback); if (ret != CY_AS_ERROR_SUCCESS) cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_write_data_async " "failed with error code %d\n", ret); else an_req->req.status = -EALREADY; } } } static void cyasgadget_stallcallback( cy_as_device_handle h, cy_as_return_status_t status, uint32_t tag, cy_as_funct_c_b_type cbtype, void *cbdata ) { #ifndef WESTBRIDGE_NDEBUG if (status != CY_AS_ERROR_SUCCESS) cy_as_hal_print_message("<1>_set/_clear stall " "failed with status %d\n", status); #endif } /*******************************************************************/ /* All usb_ep_ops (cyasgadget_ep_ops) are placed in this subsection*/ /*******************************************************************/ static int cyasgadget_enable( struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc ) { cyasgadget *an_dev; cyasgadget_ep *an_ep; u32 max, tmp; unsigned long flags; an_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst); if (!_ep || !desc || an_ep->desc || _ep->name == cy_as_ep0name || desc->bDescriptorType != USB_DT_ENDPOINT) return -EINVAL; an_dev = an_ep->dev; if (!an_dev->driver || an_dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff; spin_lock_irqsave(&an_dev->lock, flags); _ep->maxpacket = max & 0x7ff; an_ep->desc = desc; /* ep_reset() has already been called */ an_ep->stopped = 0; an_ep->out_overflow = 0; if (an_ep->cyepconfig.enabled != cy_true) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_end_point_config EP %s mismatch " "on enabled\n", an_ep->usb_ep_inst.name); #endif spin_unlock_irqrestore(&an_dev->lock, flags); return -EINVAL; } tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); an_ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0; spin_unlock_irqrestore(&an_dev->lock, flags); switch (tmp) { case USB_ENDPOINT_XFER_ISOC: if (an_ep->cyepconfig.type != cy_as_usb_iso) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_end_point_config EP %s mismatch " "on type %d %d\n", an_ep->usb_ep_inst.name, an_ep->cyepconfig.type, cy_as_usb_iso); #endif return -EINVAL; } break; case USB_ENDPOINT_XFER_INT: if (an_ep->cyepconfig.type != cy_as_usb_int) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_end_point_config EP %s mismatch " "on type %d %d\n", an_ep->usb_ep_inst.name, an_ep->cyepconfig.type, cy_as_usb_int); #endif return -EINVAL; } break; default: if (an_ep->cyepconfig.type != cy_as_usb_bulk) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_end_point_config EP %s mismatch " "on type %d %d\n", an_ep->usb_ep_inst.name, an_ep->cyepconfig.type, cy_as_usb_bulk); #endif return -EINVAL; } break; } tmp = desc->bEndpointAddress; an_ep->is_in = (tmp & USB_DIR_IN) != 0; if ((an_ep->cyepconfig.dir == cy_as_usb_in) && (!an_ep->is_in)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_end_point_config EP %s mismatch " "on dir %d %d\n", an_ep->usb_ep_inst.name, an_ep->cyepconfig.dir, cy_as_usb_in); #endif return -EINVAL; } else if ((an_ep->cyepconfig.dir == cy_as_usb_out) && (an_ep->is_in)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_end_point_config EP %s mismatch " "on dir %d %d\n", an_ep->usb_ep_inst.name, an_ep->cyepconfig.dir, cy_as_usb_out); #endif return -EINVAL; } cy_as_usb_clear_stall(an_dev->dev_handle, an_ep->num, cyasgadget_stallcallback, 0); cy_as_hal_print_message("%s enabled %s (ep%d-%d) max %04x\n", __func__, _ep->name, an_ep->num, tmp, max); return 0; } static int cyasgadget_disable( struct usb_ep *_ep ) { cyasgadget_ep *an_ep; unsigned long flags; an_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst); if (!_ep || !an_ep->desc || _ep->name == cy_as_ep0name) return -EINVAL; spin_lock_irqsave(&an_ep->dev->lock, flags); cyas_ep_reset(an_ep); spin_unlock_irqrestore(&an_ep->dev->lock, flags); return 0; } static struct usb_request *cyasgadget_alloc_request( struct usb_ep *_ep, gfp_t gfp_flags ) { cyasgadget_ep *an_ep; cyasgadget_req *an_req; if (!_ep) return NULL; an_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst); an_req = kzalloc(sizeof(cyasgadget_req), gfp_flags); if (!an_req) return NULL; an_req->req.dma = DMA_ADDR_INVALID; INIT_LIST_HEAD(&an_req->queue); return &an_req->req; } static void cyasgadget_free_request( struct usb_ep *_ep, struct usb_request *_req ) { cyasgadget_req *an_req; if (!_ep || !_req) return; an_req = container_of(_req, cyasgadget_req, req); kfree(an_req); } /* Load a packet into the fifo we use for usb IN transfers. * works for all endpoints. */ static int cyasgadget_queue( struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags ) { cyasgadget_req *as_req; cyasgadget_ep *as_ep; cyasgadget *cy_as_dev; unsigned long flags; cy_as_return_status_t ret = 0; as_req = container_of(_req, cyasgadget_req, req); if (!_req || !_req->complete || !_req->buf || !list_empty(&as_req->queue)) return -EINVAL; as_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst); if (!_ep || (!as_ep->desc && (as_ep->num != 0))) return -EINVAL; cy_as_dev = as_ep->dev; if (!cy_as_dev->driver || cy_as_dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&cy_as_dev->lock, flags); _req->status = -EINPROGRESS; _req->actual = 0; spin_unlock_irqrestore(&cy_as_dev->lock, flags); /* Call Async functions */ if (as_ep->is_in) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_write_data_async being called " "on ep %d\n", as_ep->num); #endif ret = cy_as_usb_write_data_async(cy_as_dev->dev_handle, as_ep->num, _req->length, _req->buf, cy_false, cyasgadget_writecallback); if (ret != CY_AS_ERROR_SUCCESS) cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_write_data_async failed with " "error code %d\n", ret); else _req->status = -EALREADY; } else if (as_ep->num == 0) { /* ret = cy_as_usb_write_data_async(cy_as_dev->dev_handle, as_ep->num, _req->length, _req->buf, cy_false, cyasgadget_setupwritecallback); if (ret != CY_AS_ERROR_SUCCESS) cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_write_data_async failed with error " "code %d\n", ret); */ if ((cy_as_dev->outsetupreq) && (_req->length)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_read_data_async " "being called on ep %d\n", as_ep->num); #endif ret = cy_as_usb_read_data_async ( cy_as_dev->dev_handle, as_ep->num, cy_true, _req->length, _req->buf, cyasgadget_setupreadcallback); if (ret != CY_AS_ERROR_SUCCESS) cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_read_data_async failed with " "error code %d\n", ret); } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_write_data_async " "being called on ep %d\n", as_ep->num); #endif ret = cy_as_usb_write_data_async(cy_as_dev->dev_handle, as_ep->num, _req->length, _req->buf, cy_false, cyasgadget_setupwritecallback); if (ret != CY_AS_ERROR_SUCCESS) cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_write_data_async failed with " "error code %d\n", ret); } } else if (list_empty(&as_ep->queue)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_read_data_async being called since " "ep queue empty%d\n", ret); #endif ret = cy_as_usb_read_data_async(cy_as_dev->dev_handle, as_ep->num, cy_false, _req->length, _req->buf, cyasgadget_readcallback); if (ret != CY_AS_ERROR_SUCCESS) cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_read_data_async failed with error " "code %d\n", ret); else _req->status = -EALREADY; } spin_lock_irqsave(&cy_as_dev->lock, flags); if (as_req) list_add_tail(&as_req->queue, &as_ep->queue); spin_unlock_irqrestore(&cy_as_dev->lock, flags); return 0; } /* dequeue request */ static int cyasgadget_dequeue( struct usb_ep *_ep, struct usb_request *_req ) { cyasgadget_ep *an_ep; cyasgadget *dev; an_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst); dev = an_ep->dev; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif cy_as_usb_cancel_async(dev->dev_handle, an_ep->num); return 0; } static int cyasgadget_set_halt( struct usb_ep *_ep, int value ) { cyasgadget_ep *an_ep; int retval = 0; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif an_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst); if (!_ep || (!an_ep->desc && an_ep->num != 0)) return -EINVAL; if (!an_ep->dev->driver || an_ep->dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; if (an_ep->desc /* not ep0 */ && (an_ep->desc->bmAttributes & 0x03) == USB_ENDPOINT_XFER_ISOC) return -EINVAL; if (!list_empty(&an_ep->queue)) retval = -EAGAIN; else if (an_ep->is_in && value && cyasgadget_fifo_status(_ep) != 0) retval = -EAGAIN; else { if (value) { cy_as_usb_set_stall(an_ep->dev->dev_handle, an_ep->num, cyasgadget_stallcallback, 0); } else { cy_as_usb_clear_stall(an_ep->dev->dev_handle, an_ep->num, cyasgadget_stallcallback, 0); } } return retval; } static int cyasgadget_fifo_status( struct usb_ep *_ep ) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif return 0; } static void cyasgadget_fifo_flush( struct usb_ep *_ep ) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif } static struct usb_ep_ops cyasgadget_ep_ops = { .enable = cyasgadget_enable, .disable = cyasgadget_disable, .alloc_request = cyasgadget_alloc_request, .free_request = cyasgadget_free_request, .queue = cyasgadget_queue, .dequeue = cyasgadget_dequeue, .set_halt = cyasgadget_set_halt, .fifo_status = cyasgadget_fifo_status, .fifo_flush = cyasgadget_fifo_flush, }; /*************************************************************/ /*This subsection contains all usb_gadget_ops cyasgadget_ops */ /*************************************************************/ static int cyasgadget_get_frame( struct usb_gadget *_gadget ) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif return 0; } static int cyasgadget_wakeup( struct usb_gadget *_gadget ) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif return 0; } static int cyasgadget_set_selfpowered( struct usb_gadget *_gadget, int value ) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif return 0; } static int cyasgadget_pullup( struct usb_gadget *_gadget, int is_on ) { struct cyasgadget *cy_as_dev; unsigned long flags; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif if (!_gadget) return -ENODEV; cy_as_dev = container_of(_gadget, cyasgadget, gadget); spin_lock_irqsave(&cy_as_dev->lock, flags); cy_as_dev->softconnect = (is_on != 0); if (is_on) cy_as_usb_connect(cy_as_dev->dev_handle, 0, 0); else cy_as_usb_disconnect(cy_as_dev->dev_handle, 0, 0); spin_unlock_irqrestore(&cy_as_dev->lock, flags); return 0; } static int cyasgadget_ioctl( struct usb_gadget *_gadget, unsigned code, unsigned long param ) { int err = 0; int retval = 0; int ret_stat = 0; cyasgadget *dev = cy_as_gadget_controller; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called, code=%d, param=%ld\n", __func__, code, param); #endif /* * extract the type and number bitfields, and don't decode * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() */ if (_IOC_TYPE(code) != CYASGADGET_IOC_MAGIC) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s, bad magic number = 0x%x\n", __func__, _IOC_TYPE(code)); #endif return -ENOTTY; } if (_IOC_NR(code) > CYASGADGET_IOC_MAXNR) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s, bad ioctl code = 0x%x\n", __func__, _IOC_NR(code)); #endif return -ENOTTY; } /* * the direction is a bitmask, and VERIFY_WRITE catches R/W * transfers. `Type' is user-oriented, while * access_ok is kernel-oriented, so the concept of "read" and * "write" is reversed */ if (_IOC_DIR(code) & _IOC_READ) err = !access_ok(VERIFY_WRITE, (void __user *)param, _IOC_SIZE(code)); else if (_IOC_DIR(code) & _IOC_WRITE) err = !access_ok(VERIFY_READ, (void __user *)param, _IOC_SIZE(code)); if (err) { cy_as_hal_print_message("%s, bad ioctl dir = 0x%x\n", __func__, _IOC_DIR(code)); return -EFAULT; } switch (code) { case CYASGADGET_GETMTPSTATUS: { cy_as_gadget_ioctl_tmtp_status *usr_d = (cy_as_gadget_ioctl_tmtp_status *)param; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: got CYASGADGET_GETMTPSTATUS\n", __func__); #endif retval = __put_user(dev->tmtp_send_complete, (uint32_t __user *)(&(usr_d->tmtp_send_complete))); retval = __put_user(dev->tmtp_get_complete, (uint32_t __user *)(&(usr_d->tmtp_get_complete))); retval = __put_user(dev->tmtp_need_new_blk_tbl, (uint32_t __user *)(&(usr_d->tmtp_need_new_blk_tbl))); if (copy_to_user((&(usr_d->tmtp_send_complete_data)), (&(dev->tmtp_send_complete_data)), sizeof(cy_as_gadget_ioctl_send_object))) return -EFAULT; if (copy_to_user((&(usr_d->tmtp_get_complete_data)), (&(dev->tmtp_get_complete_data)), sizeof(cy_as_gadget_ioctl_get_object))) return -EFAULT; break; } case CYASGADGET_CLEARTMTPSTATUS: { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s got CYASGADGET_CLEARTMTPSTATUS\n", __func__); #endif dev->tmtp_send_complete = 0; dev->tmtp_get_complete = 0; dev->tmtp_need_new_blk_tbl = 0; break; } case CYASGADGET_INITSOJ: { cy_as_gadget_ioctl_i_s_o_j_d k_d; cy_as_gadget_ioctl_i_s_o_j_d *usr_d = (cy_as_gadget_ioctl_i_s_o_j_d *)param; cy_as_mtp_block_table blk_table; struct scatterlist sg; char *alloc_filename; struct file *file_to_allocate; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s got CYASGADGET_INITSOJ\n", __func__); #endif memset(&blk_table, 0, sizeof(blk_table)); /* Get user argument structure */ if (copy_from_user(&k_d, usr_d, sizeof(cy_as_gadget_ioctl_i_s_o_j_d))) return -EFAULT; /* better use fixed size buff*/ alloc_filename = kmalloc(k_d.name_length + 1, GFP_KERNEL); if (alloc_filename == NULL) return -ENOMEM; /* get the filename */ if (copy_from_user(alloc_filename, k_d.file_name, k_d.name_length + 1)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: CYASGADGET_INITSOJ, " "copy file name from user space failed\n", __func__); #endif kfree(alloc_filename); return -EFAULT; } file_to_allocate = filp_open(alloc_filename, O_RDWR, 0); if (!IS_ERR(file_to_allocate)) { struct address_space *mapping = file_to_allocate->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; struct inode *inode = mapping->host; struct inode *alloc_inode = file_to_allocate->f_path.dentry->d_inode; uint32_t num_clusters = 0; struct buffer_head bh; struct kstat stat; int nr_pages = 0; int ret_stat = 0; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: fhandle is OK, " "calling vfs_getattr\n", __func__); #endif ret_stat = vfs_getattr(file_to_allocate->f_path.mnt, file_to_allocate->f_path.dentry, &stat); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: returned from " "vfs_getattr() stat->blksize=0x%lx\n", __func__, stat.blksize); #endif /* TODO: get this from disk properties * (from blockdevice)*/ #define SECTOR_SIZE 512 if (stat.blksize != 0) { num_clusters = (k_d.num_bytes) / SECTOR_SIZE; if (((k_d.num_bytes) % SECTOR_SIZE) != 0) num_clusters++; } else { goto initsoj_safe_exit; } bh.b_state = 0; bh.b_blocknr = 0; /* block size is arbitrary , we'll use sector size*/ bh.b_size = SECTOR_SIZE; /* clear dirty pages in page cache * (if were any allocated) */ nr_pages = (k_d.num_bytes) / (PAGE_CACHE_SIZE); if (((k_d.num_bytes) % (PAGE_CACHE_SIZE)) != 0) nr_pages++; #ifndef WESTBRIDGE_NDEBUG /*check out how many pages where actually allocated */ if (mapping->nrpages != nr_pages) cy_as_hal_print_message("%s mpage_cleardirty " "mapping->nrpages %d != num_pages %d\n", __func__, (int) mapping->nrpages, nr_pages); cy_as_hal_print_message("%s: calling " "mpage_cleardirty() " "for %d pages\n", __func__, nr_pages); #endif ret_stat = mpage_cleardirty(mapping, nr_pages); /*fill up the the block table from the addr mapping */ if (a_ops->bmap) { int8_t blk_table_idx = -1; uint32_t file_block_idx = 0; uint32_t last_blk_addr_map = 0, curr_blk_addr_map = 0; #ifndef WESTBRIDGE_NDEBUG if (alloc_inode->i_bytes == 0) cy_as_hal_print_message( "%s: alloc_inode->ibytes =0\n", __func__); #endif /* iterate through the list of * blocks (not clusters)*/ for (file_block_idx = 0; file_block_idx < num_clusters /*inode->i_bytes*/; file_block_idx++) { /* returns starting sector number */ curr_blk_addr_map = a_ops->bmap(mapping, file_block_idx); /*no valid mapping*/ if (curr_blk_addr_map == 0) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s:hit invalid " "mapping\n", __func__); #endif break; } else if (curr_blk_addr_map != (last_blk_addr_map + 1) || (blk_table.num_blocks [blk_table_idx] == 65535)) { /* next table entry */ blk_table_idx++; /* starting sector of a * scattered cluster*/ blk_table.start_blocks [blk_table_idx] = curr_blk_addr_map; /* ++ num of blocks in cur * table entry*/ blk_table. num_blocks[blk_table_idx]++; #ifndef WESTBRIDGE_NDEBUG if (file_block_idx != 0) cy_as_hal_print_message( "<*> next table " "entry:%d required\n", blk_table_idx); #endif } else { /*add contiguous block*/ blk_table.num_blocks [blk_table_idx]++; } /*if (curr_blk_addr_map == 0)*/ last_blk_addr_map = curr_blk_addr_map; } /* end for (file_block_idx = 0; file_block_idx < inode->i_bytes;) */ #ifndef WESTBRIDGE_NDEBUG /*print result for verification*/ { int i; cy_as_hal_print_message( "%s: print block table " "mapping:\n", __func__); for (i = 0; i <= blk_table_idx; i++) { cy_as_hal_print_message( "<1> %d 0x%x 0x%x\n", i, blk_table.start_blocks[i], blk_table.num_blocks[i]); } } #endif /* copy the block table to user * space (for debug purposes) */ retval = __put_user( blk_table.start_blocks[blk_table_idx], (uint32_t __user *) (&(usr_d->blk_addr_p))); retval = __put_user( blk_table.num_blocks[blk_table_idx], (uint32_t __user *) (&(usr_d->blk_count_p))); blk_table_idx++; retval = __put_user(blk_table_idx, (uint32_t __user *) (&(usr_d->item_count))); } /*end if (a_ops->bmap)*/ filp_close(file_to_allocate, NULL); dev->tmtp_send_complete = 0; dev->tmtp_need_new_blk_tbl = 0; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: calling cy_as_mtp_init_send_object()\n", __func__); #endif sg_init_one(&sg, &blk_table, sizeof(blk_table)); ret_stat = cy_as_mtp_init_send_object(dev->dev_handle, (cy_as_mtp_block_table *)&sg, k_d.num_bytes, 0, 0); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: returned from " "cy_as_mtp_init_send_object()\n", __func__); #endif } #ifndef WESTBRIDGE_NDEBUG else { cy_as_hal_print_message( "%s: failed to allocate the file %s\n", __func__, alloc_filename); } /* end if (file_to_allocate)*/ #endif kfree(alloc_filename); initsoj_safe_exit: ret_stat = 0; retval = __put_user(ret_stat, (uint32_t __user *)(&(usr_d->ret_val))); break; } case CYASGADGET_INITGOJ: { cy_as_gadget_ioctl_i_g_o_j_d k_d; cy_as_gadget_ioctl_i_g_o_j_d *usr_d = (cy_as_gadget_ioctl_i_g_o_j_d *)param; cy_as_mtp_block_table blk_table; struct scatterlist sg; char *map_filename; struct file *file_to_map; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: got CYASGADGET_INITGOJ\n", __func__); #endif memset(&blk_table, 0, sizeof(blk_table)); /* Get user argument sturcutre */ if (copy_from_user(&k_d, usr_d, sizeof(cy_as_gadget_ioctl_i_g_o_j_d))) return -EFAULT; map_filename = kmalloc(k_d.name_length + 1, GFP_KERNEL); if (map_filename == NULL) return -ENOMEM; if (copy_from_user(map_filename, k_d.file_name, k_d.name_length + 1)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: copy file name from " "user space failed\n", __func__); #endif kfree(map_filename); return -EFAULT; } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<*>%s: opening %s for kernel " "mode access map\n", __func__, map_filename); #endif file_to_map = filp_open(map_filename, O_RDWR, 0); if (file_to_map) { struct address_space *mapping = file_to_map->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; struct inode *inode = mapping->host; int8_t blk_table_idx = -1; uint32_t file_block_idx = 0; uint32_t last_blk_addr_map = 0, curr_blk_addr_map = 0; /*verify operation exists*/ if (a_ops->bmap) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<*>%s: bmap found, i_bytes=0x%x, " "i_size=0x%x, i_blocks=0x%x\n", __func__, inode->i_bytes, (unsigned int) inode->i_size, (unsigned int) inode->i_blocks); #endif k_d.num_bytes = inode->i_size; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "<*>%s: k_d.num_bytes=0x%x\n", __func__, k_d.num_bytes); #endif for (file_block_idx = 0; file_block_idx < inode->i_size; file_block_idx++) { curr_blk_addr_map = a_ops->bmap(mapping, file_block_idx); if (curr_blk_addr_map == 0) { /*no valid mapping*/ #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: no valid " "mapping\n", __func__); #endif break; } else if (curr_blk_addr_map != (last_blk_addr_map + 1)) { /*non-contiguous break*/ blk_table_idx++; blk_table.start_blocks [blk_table_idx] = curr_blk_addr_map; blk_table.num_blocks [blk_table_idx]++; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: found non-" "contiguous break", __func__); #endif } else { /*add contiguous block*/ blk_table.num_blocks [blk_table_idx]++; } last_blk_addr_map = curr_blk_addr_map; } /*print result for verification*/ #ifndef WESTBRIDGE_NDEBUG { int i = 0; for (i = 0; i <= blk_table_idx; i++) { cy_as_hal_print_message( "%s %d 0x%x 0x%x\n", __func__, i, blk_table.start_blocks[i], blk_table.num_blocks[i]); } } #endif } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: could not find " "a_ops->bmap\n", __func__); #endif return -EFAULT; } filp_close(file_to_map, NULL); dev->tmtp_get_complete = 0; dev->tmtp_need_new_blk_tbl = 0; ret_stat = __put_user( blk_table.start_blocks[blk_table_idx], (uint32_t __user *)(&(usr_d->blk_addr_p))); ret_stat = __put_user( blk_table.num_blocks[blk_table_idx], (uint32_t __user *)(&(usr_d->blk_count_p))); sg_init_one(&sg, &blk_table, sizeof(blk_table)); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: calling cy_as_mtp_init_get_object() " "start=0x%x, num =0x%x, tid=0x%x, " "num_bytes=0x%x\n", __func__, blk_table.start_blocks[0], blk_table.num_blocks[0], k_d.tid, k_d.num_bytes); #endif ret_stat = cy_as_mtp_init_get_object( dev->dev_handle, (cy_as_mtp_block_table *)&sg, k_d.num_bytes, k_d.tid, 0, 0); if (ret_stat != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cy_as_mtp_init_get_object " "failed ret_stat=0x%x\n", __func__, ret_stat); #endif } } #ifndef WESTBRIDGE_NDEBUG else { cy_as_hal_print_message( "%s: failed to open file %s\n", __func__, map_filename); } #endif kfree(map_filename); ret_stat = 0; retval = __put_user(ret_stat, (uint32_t __user *) (&(usr_d->ret_val))); break; } case CYASGADGET_CANCELSOJ: { cy_as_gadget_ioctl_cancel *usr_d = (cy_as_gadget_ioctl_cancel *)param; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: got CYASGADGET_CANCELSOJ\n", __func__); #endif ret_stat = cy_as_mtp_cancel_send_object(dev->dev_handle, 0, 0); retval = __put_user(ret_stat, (uint32_t __user *) (&(usr_d->ret_val))); break; } case CYASGADGET_CANCELGOJ: { cy_as_gadget_ioctl_cancel *usr_d = (cy_as_gadget_ioctl_cancel *)param; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: got CYASGADGET_CANCELGOJ\n", __func__); #endif ret_stat = cy_as_mtp_cancel_get_object(dev->dev_handle, 0, 0); retval = __put_user(ret_stat, (uint32_t __user *)(&(usr_d->ret_val))); break; } default: { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: unknown ioctl received: %d\n", __func__, code); cy_as_hal_print_message("%s: known codes:\n" "CYASGADGET_GETMTPSTATUS=%d\n" "CYASGADGET_CLEARTMTPSTATUS=%d\n" "CYASGADGET_INITSOJ=%d\n" "CYASGADGET_INITGOJ=%d\n" "CYASGADGET_CANCELSOJ=%d\n" "CYASGADGET_CANCELGOJ=%d\n", __func__, CYASGADGET_GETMTPSTATUS, CYASGADGET_CLEARTMTPSTATUS, CYASGADGET_INITSOJ, CYASGADGET_INITGOJ, CYASGADGET_CANCELSOJ, CYASGADGET_CANCELGOJ); #endif break; } } return 0; } static const struct usb_gadget_ops cyasgadget_ops = { .get_frame = cyasgadget_get_frame, .wakeup = cyasgadget_wakeup, .set_selfpowered = cyasgadget_set_selfpowered, .pullup = cyasgadget_pullup, .ioctl = cyasgadget_ioctl, }; /* keeping it simple: * - one bus driver, initted first; * - one function driver, initted second * * most of the work to support multiple controllers would * be to associate this gadget driver with all of them, or * perhaps to bind specific drivers to specific devices. */ static void cyas_ep_reset( cyasgadget_ep *an_ep ) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif an_ep->desc = NULL; INIT_LIST_HEAD(&an_ep->queue); an_ep->stopped = 0; an_ep->is_in = 0; an_ep->is_iso = 0; an_ep->usb_ep_inst.maxpacket = ~0; an_ep->usb_ep_inst.ops = &cyasgadget_ep_ops; } static void cyas_usb_reset( cyasgadget *cy_as_dev ) { cy_as_return_status_t ret; cy_as_usb_enum_control config; #ifndef WESTBRIDGE_NDEBUG cy_as_device *dev_p = (cy_as_device *)cy_as_dev->dev_handle; cy_as_hal_print_message("<1>%s called mtp_firmware=0x%x\n", __func__, dev_p->is_mtp_firmware); #endif ret = cy_as_misc_release_resource(cy_as_dev->dev_handle, cy_as_bus_u_s_b); if (ret != CY_AS_ERROR_SUCCESS && ret != CY_AS_ERROR_RESOURCE_NOT_OWNED) { cy_as_hal_print_message("<1>_cy_as_gadget: cannot " "release usb resource: failed with error code %d\n", ret); return; } cy_as_dev->gadget.speed = USB_SPEED_HIGH; ret = cy_as_usb_start(cy_as_dev->dev_handle, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_start failed with error code %d\n", ret); return; } /* P port will do enumeration, not West Bridge */ config.antioch_enumeration = cy_false; /* 1 2 : 1-BUS_NUM , 2:Storage_device number, SD - is bus 1*/ /* TODO: add module param to enumerate mass storage */ config.mass_storage_interface = 0; if (append_mtp) { ret = cy_as_mtp_start(cy_as_dev->dev_handle, cy_as_gadget_mtp_event_callback, 0, 0); if (ret == CY_AS_ERROR_SUCCESS) { cy_as_hal_print_message("MTP start passed, enumerating " "MTP interface\n"); config.mtp_interface = append_mtp; /*Do not enumerate NAND storage*/ config.devices_to_enumerate[0][0] = cy_false; /*enumerate SD storage as MTP*/ config.devices_to_enumerate[1][0] = cy_true; } } else { cy_as_hal_print_message("MTP start not attempted, not " "enumerating MTP interface\n"); config.mtp_interface = 0; /* enumerate mass storage based on module parameters */ config.devices_to_enumerate[0][0] = msc_enum_bus_0; config.devices_to_enumerate[1][0] = msc_enum_bus_1; } ret = cy_as_usb_set_enum_config(cy_as_dev->dev_handle, &config, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { cy_as_hal_print_message("<1>_cy_as_gadget: " "cy_as_usb_set_enum_config failed with error " "code %d\n", ret); return; } cy_as_usb_set_physical_configuration(cy_as_dev->dev_handle, 1); } static void cyas_usb_reinit( cyasgadget *cy_as_dev ) { int index = 0; cyasgadget_ep *an_ep_p; cy_as_return_status_t ret; cy_as_device *dev_p = (cy_as_device *)cy_as_dev->dev_handle; INIT_LIST_HEAD(&cy_as_dev->gadget.ep_list); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called, is_mtp_firmware = " "0x%x\n", __func__, dev_p->is_mtp_firmware); #endif /* Init the end points */ for (index = 1; index <= 15; index++) { an_ep_p = &cy_as_dev->an_gadget_ep[index]; cyas_ep_reset(an_ep_p); an_ep_p->usb_ep_inst.name = cy_as_ep_names[index]; an_ep_p->dev = cy_as_dev; an_ep_p->num = index; memset(&an_ep_p->cyepconfig, 0, sizeof(an_ep_p->cyepconfig)); /* EP0, EPs 2,4,6,8 need not be added */ if ((index <= 8) && (index % 2 == 0) && (!dev_p->is_mtp_firmware)) { /* EP0 is 64 and EPs 2,4,6,8 not allowed */ cy_as_dev->an_gadget_ep[index].fifo_size = 0; } else { if (index == 1) an_ep_p->fifo_size = 64; else an_ep_p->fifo_size = 512; list_add_tail(&an_ep_p->usb_ep_inst.ep_list, &cy_as_dev->gadget.ep_list); } } /* need to setendpointconfig before usb connect, this is not * quite compatible with gadget methodology (ep_enable called * by gadget after connect), therefore need to set config in * initialization and verify compatibility in ep_enable, * kick up error otherwise*/ an_ep_p = &cy_as_dev->an_gadget_ep[3]; an_ep_p->cyepconfig.enabled = cy_true; an_ep_p->cyepconfig.dir = cy_as_usb_out; an_ep_p->cyepconfig.type = cy_as_usb_bulk; an_ep_p->cyepconfig.size = 0; an_ep_p->cyepconfig.physical = 1; ret = cy_as_usb_set_end_point_config(an_ep_p->dev->dev_handle, 3, &an_ep_p->cyepconfig); if (ret != CY_AS_ERROR_SUCCESS) { cy_as_hal_print_message("cy_as_usb_set_end_point_config " "failed with error code %d\n", ret); } cy_as_usb_set_stall(an_ep_p->dev->dev_handle, 3, 0, 0); an_ep_p = &cy_as_dev->an_gadget_ep[5]; an_ep_p->cyepconfig.enabled = cy_true; an_ep_p->cyepconfig.dir = cy_as_usb_in; an_ep_p->cyepconfig.type = cy_as_usb_bulk; an_ep_p->cyepconfig.size = 0; an_ep_p->cyepconfig.physical = 2; ret = cy_as_usb_set_end_point_config(an_ep_p->dev->dev_handle, 5, &an_ep_p->cyepconfig); if (ret != CY_AS_ERROR_SUCCESS) { cy_as_hal_print_message("cy_as_usb_set_end_point_config " "failed with error code %d\n", ret); } cy_as_usb_set_stall(an_ep_p->dev->dev_handle, 5, 0, 0); an_ep_p = &cy_as_dev->an_gadget_ep[9]; an_ep_p->cyepconfig.enabled = cy_true; an_ep_p->cyepconfig.dir = cy_as_usb_in; an_ep_p->cyepconfig.type = cy_as_usb_bulk; an_ep_p->cyepconfig.size = 0; an_ep_p->cyepconfig.physical = 4; ret = cy_as_usb_set_end_point_config(an_ep_p->dev->dev_handle, 9, &an_ep_p->cyepconfig); if (ret != CY_AS_ERROR_SUCCESS) { cy_as_hal_print_message("cy_as_usb_set_end_point_config " "failed with error code %d\n", ret); } cy_as_usb_set_stall(an_ep_p->dev->dev_handle, 9, 0, 0); if (dev_p->mtp_count != 0) { /* these need to be set for compatibility with * the gadget_enable logic */ an_ep_p = &cy_as_dev->an_gadget_ep[2]; an_ep_p->cyepconfig.enabled = cy_true; an_ep_p->cyepconfig.dir = cy_as_usb_out; an_ep_p->cyepconfig.type = cy_as_usb_bulk; an_ep_p->cyepconfig.size = 0; an_ep_p->cyepconfig.physical = 0; cy_as_usb_set_stall(an_ep_p->dev->dev_handle, 2, 0, 0); an_ep_p = &cy_as_dev->an_gadget_ep[6]; an_ep_p->cyepconfig.enabled = cy_true; an_ep_p->cyepconfig.dir = cy_as_usb_in; an_ep_p->cyepconfig.type = cy_as_usb_bulk; an_ep_p->cyepconfig.size = 0; an_ep_p->cyepconfig.physical = 0; cy_as_usb_set_stall(an_ep_p->dev->dev_handle, 6, 0, 0); } cyas_ep_reset(&cy_as_dev->an_gadget_ep[0]); cy_as_dev->an_gadget_ep[0].usb_ep_inst.name = cy_as_ep_names[0]; cy_as_dev->an_gadget_ep[0].dev = cy_as_dev; cy_as_dev->an_gadget_ep[0].num = 0; cy_as_dev->an_gadget_ep[0].fifo_size = 64; cy_as_dev->an_gadget_ep[0].usb_ep_inst.maxpacket = 64; cy_as_dev->gadget.ep0 = &cy_as_dev->an_gadget_ep[0].usb_ep_inst; cy_as_dev->an_gadget_ep[0].stopped = 0; INIT_LIST_HEAD(&cy_as_dev->gadget.ep0->ep_list); } static void cyas_ep0_start( cyasgadget *dev ) { cy_as_return_status_t ret; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif ret = cy_as_usb_register_callback(dev->dev_handle, cy_as_gadget_usb_event_callback); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cy_as_usb_register_callback " "failed with error code %d\n", __func__, ret); #endif return; } ret = cy_as_usb_commit_config(dev->dev_handle, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cy_as_usb_commit_config " "failed with error code %d\n", __func__, ret); #endif return; } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cy_as_usb_commit_config " "message sent\n", __func__); #endif ret = cy_as_usb_connect(dev->dev_handle, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cy_as_usb_connect failed " "with error code %d\n", __func__, ret); #endif return; } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cy_as_usb_connect message " "sent\n", __func__); #endif } /* * When a driver is successfully registered, it will receive * control requests including set_configuration(), which enables * non-control requests. then usb traffic follows until a * disconnect is reported. then a host may connect again, or * the driver might get unbound. */ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) { cyasgadget *dev = cy_as_gadget_controller; int retval; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called driver=0x%x\n", __func__, (unsigned int) driver); #endif /* insist on high speed support from the driver, since * "must not be used in normal operation" */ if (!driver || !bind || !driver->unbind || !driver->setup) return -EINVAL; if (!dev) return -ENODEV; if (dev->driver) return -EBUSY; /* hook up the driver ... */ dev->softconnect = 1; driver->driver.bus = NULL; dev->driver = driver; dev->gadget.dev.driver = &driver->driver; /* Do the needful */ cyas_usb_reset(dev); /* External usb */ cyas_usb_reinit(dev); /* Internal */ retval = bind(&dev->gadget); if (retval) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s bind to driver %s --> %d\n", __func__, driver->driver.name, retval); #endif dev->driver = NULL; dev->gadget.dev.driver = NULL; return retval; } /* ... then enable host detection and ep0; and we're ready * for set_configuration as well as eventual disconnect. */ cyas_ep0_start(dev); return 0; } EXPORT_SYMBOL(usb_gadget_probe_driver); static void cyasgadget_nuke( cyasgadget_ep *an_ep ) { cyasgadget *dev = cy_as_gadget_controller; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif cy_as_usb_cancel_async(dev->dev_handle, an_ep->num); an_ep->stopped = 1; while (!list_empty(&an_ep->queue)) { cyasgadget_req *an_req = list_entry (an_ep->queue.next, cyasgadget_req, queue); list_del_init(&an_req->queue); an_req->req.status = -ESHUTDOWN; an_req->req.complete(&an_ep->usb_ep_inst, &an_req->req); } } static void cyasgadget_stop_activity( cyasgadget *dev, struct usb_gadget_driver *driver ) { int index; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif /* don't disconnect if it's not connected */ if (dev->gadget.speed == USB_SPEED_UNKNOWN) driver = NULL; if (spin_is_locked(&dev->lock)) spin_unlock(&dev->lock); /* Stop hardware; prevent new request submissions; * and kill any outstanding requests. */ cy_as_usb_disconnect(dev->dev_handle, 0, 0); for (index = 3; index <= 7; index += 2) { cyasgadget_ep *an_ep_p = &dev->an_gadget_ep[index]; cyasgadget_nuke(an_ep_p); } for (index = 9; index <= 15; index++) { cyasgadget_ep *an_ep_p = &dev->an_gadget_ep[index]; cyasgadget_nuke(an_ep_p); } /* report disconnect; the driver is already quiesced */ if (driver) driver->disconnect(&dev->gadget); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("cy_as_usb_disconnect returned success"); #endif /* Stop Usb */ cy_as_usb_stop(dev->dev_handle, 0, 0); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("cy_as_usb_stop returned success"); #endif } int usb_gadget_unregister_driver( struct usb_gadget_driver *driver ) { cyasgadget *dev = cy_as_gadget_controller; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif if (!dev) return -ENODEV; if (!driver || driver != dev->driver) return -EINVAL; cyasgadget_stop_activity(dev, driver); driver->unbind(&dev->gadget); dev->gadget.dev.driver = NULL; dev->driver = NULL; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("unregistered driver '%s'\n", driver->driver.name); #endif return 0; } EXPORT_SYMBOL(usb_gadget_unregister_driver); static void cyas_gadget_release( struct device *_dev ) { cyasgadget *dev = dev_get_drvdata(_dev); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>%s called\n", __func__); #endif kfree(dev); } /* DeInitialize gadget driver */ static void cyasgadget_deinit( cyasgadget *cy_as_dev ) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget deinitialize called\n"); #endif if (!cy_as_dev) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget_deinit: " "invalid cyasgadget device\n"); #endif return; } if (cy_as_dev->driver) { /* should have been done already by driver model core */ #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1> cy_as_gadget: '%s' " "is still registered\n", cy_as_dev->driver->driver.name); #endif usb_gadget_unregister_driver(cy_as_dev->driver); } kfree(cy_as_dev); cy_as_gadget_controller = NULL; } /* Initialize gadget driver */ static int cyasgadget_initialize(void) { cyasgadget *cy_as_dev = 0; int retval = 0; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("<1>_cy_as_gadget [V1.1] initialize called\n"); #endif if (cy_as_gadget_controller != 0) { cy_as_hal_print_message("<1> cy_as_gadget: the device has " "already been initilaized. ignoring\n"); return -EBUSY; } cy_as_dev = kzalloc(sizeof(cyasgadget), GFP_ATOMIC); if (cy_as_dev == NULL) { cy_as_hal_print_message("<1> cy_as_gadget: memory " "allocation failed\n"); return -ENOMEM; } spin_lock_init(&cy_as_dev->lock); cy_as_dev->gadget.ops = &cyasgadget_ops; cy_as_dev->gadget.is_dualspeed = 1; /* the "gadget" abstracts/virtualizes the controller */ /*strcpy(cy_as_dev->gadget.dev.bus_id, "cyasgadget");*/ cy_as_dev->gadget.dev.release = cyas_gadget_release; cy_as_dev->gadget.name = cy_as_driver_name; /* Get the device handle */ cy_as_dev->dev_handle = cyasdevice_getdevhandle(); if (0 == cy_as_dev->dev_handle) { #ifndef NDEBUG cy_as_hal_print_message("<1> cy_as_gadget: " "no west bridge device\n"); #endif retval = -EFAULT; goto done; } /* We are done now */ cy_as_gadget_controller = cy_as_dev; return 0; /* * in case of an error */ done: if (cy_as_dev) cyasgadget_deinit(cy_as_dev); return retval; } static int __init cyas_init(void) { int init_res = 0; init_res = cyasgadget_initialize(); if (init_res != 0) { printk(KERN_WARNING "<1> gadget ctl instance " "init error:%d\n", init_res); if (init_res > 0) { /* force -E/0 linux convention */ init_res = init_res * -1; } } return init_res; } module_init(cyas_init); static void __exit cyas_cleanup(void) { if (cy_as_gadget_controller != NULL) cyasgadget_deinit(cy_as_gadget_controller); } module_exit(cyas_cleanup); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(CY_AS_DRIVER_DESC); MODULE_AUTHOR("cypress semiconductor"); /*[]*/
gpl-2.0
linux-wpan/linux-wpan
drivers/leds/trigger/ledtrig-camera.c
4547
1375
/* * Camera Flash and Torch On/Off Trigger * * based on ledtrig-ide-disk.c * * Copyright 2013 Texas Instruments * * Author: Milo(Woogyom) Kim <milo.kim@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/leds.h> DEFINE_LED_TRIGGER(ledtrig_flash); DEFINE_LED_TRIGGER(ledtrig_torch); void ledtrig_flash_ctrl(bool on) { enum led_brightness brt = on ? LED_FULL : LED_OFF; led_trigger_event(ledtrig_flash, brt); } EXPORT_SYMBOL_GPL(ledtrig_flash_ctrl); void ledtrig_torch_ctrl(bool on) { enum led_brightness brt = on ? LED_FULL : LED_OFF; led_trigger_event(ledtrig_torch, brt); } EXPORT_SYMBOL_GPL(ledtrig_torch_ctrl); static int __init ledtrig_camera_init(void) { led_trigger_register_simple("flash", &ledtrig_flash); led_trigger_register_simple("torch", &ledtrig_torch); return 0; } module_init(ledtrig_camera_init); static void __exit ledtrig_camera_exit(void) { led_trigger_unregister_simple(ledtrig_torch); led_trigger_unregister_simple(ledtrig_flash); } module_exit(ledtrig_camera_exit); MODULE_DESCRIPTION("LED Trigger for Camera Flash/Torch Control"); MODULE_AUTHOR("Milo Kim"); MODULE_LICENSE("GPL");
gpl-2.0
tdro/android_kernel_kobo_macallan
drivers/staging/asus_oled/asus_oled.c
4803
19511
/* * Asus OLED USB driver * * Copyright (C) 2007,2008 Jakub Schmidtke (sjakub@gmail.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * * This module is based on usbled and asus-laptop modules. * * * Asus OLED support is based on asusoled program taken from * <http://lapsus.berlios.de/asus_oled.html>. * * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/platform_device.h> #include <linux/ctype.h> #define ASUS_OLED_VERSION "0.04-dev" #define ASUS_OLED_NAME "asus-oled" #define ASUS_OLED_UNDERSCORE_NAME "asus_oled" #define ASUS_OLED_ERROR "Asus OLED Display Error: " #define ASUS_OLED_STATIC 's' #define ASUS_OLED_ROLL 'r' #define ASUS_OLED_FLASH 'f' #define ASUS_OLED_MAX_WIDTH 1792 #define ASUS_OLED_DISP_HEIGHT 32 #define ASUS_OLED_PACKET_BUF_SIZE 256 #define USB_VENDOR_ID_ASUS 0x0b05 #define USB_DEVICE_ID_ASUS_LCM 0x1726 #define USB_DEVICE_ID_ASUS_LCM2 0x175b MODULE_AUTHOR("Jakub Schmidtke, sjakub@gmail.com"); MODULE_DESCRIPTION("Asus OLED Driver v" ASUS_OLED_VERSION); MODULE_LICENSE("GPL"); static struct class *oled_class; static int oled_num; static uint start_off; module_param(start_off, uint, 0644); MODULE_PARM_DESC(start_off, "Set to 1 to switch off OLED display after it is attached"); enum oled_pack_mode { PACK_MODE_G1, PACK_MODE_G50, PACK_MODE_LAST }; struct oled_dev_desc_str { uint16_t idVendor; uint16_t idProduct; /* width of display */ uint16_t devWidth; /* formula to be used while packing the picture */ enum oled_pack_mode packMode; const char *devDesc; }; /* table of devices that work with this driver */ static const struct usb_device_id id_table[] = { /* Asus G1/G2 (and variants)*/ { USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM) }, /* Asus G50V (and possibly others - G70? G71?)*/ { USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2) }, { }, }; /* parameters of specific devices */ static struct oled_dev_desc_str oled_dev_desc_table[] = { { USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM, 128, PACK_MODE_G1, "G1/G2" }, { USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2, 256, PACK_MODE_G50, "G50" }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); struct asus_oled_header { uint8_t magic1; uint8_t magic2; uint8_t flags; uint8_t value3; uint8_t buffer1; uint8_t buffer2; uint8_t value6; uint8_t value7; uint8_t value8; uint8_t padding2[7]; } __attribute((packed)); struct asus_oled_packet { struct asus_oled_header header; uint8_t bitmap[ASUS_OLED_PACKET_BUF_SIZE]; } __attribute((packed)); struct asus_oled_dev { struct usb_device *udev; uint8_t pic_mode; uint16_t dev_width; enum oled_pack_mode pack_mode; size_t height; size_t width; size_t x_shift; size_t y_shift; size_t buf_offs; uint8_t last_val; size_t buf_size; char *buf; uint8_t enabled; struct device *dev; }; static void setup_packet_header(struct asus_oled_packet *packet, char flags, char value3, char buffer1, char buffer2, char value6, char value7, char value8) { memset(packet, 0, sizeof(struct asus_oled_header)); packet->header.magic1 = 0x55; packet->header.magic2 = 0xaa; packet->header.flags = flags; packet->header.value3 = value3; packet->header.buffer1 = buffer1; packet->header.buffer2 = buffer2; packet->header.value6 = value6; packet->header.value7 = value7; packet->header.value8 = value8; } static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl) { int retval; int act_len; struct asus_oled_packet *packet; packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL); if (!packet) { dev_err(&odev->udev->dev, "out of memory\n"); return; } setup_packet_header(packet, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00); if (enabl) packet->bitmap[0] = 0xaf; else packet->bitmap[0] = 0xae; retval = usb_bulk_msg(odev->udev, usb_sndbulkpipe(odev->udev, 2), packet, sizeof(struct asus_oled_header) + 1, &act_len, -1); if (retval) dev_dbg(&odev->udev->dev, "retval = %d\n", retval); odev->enabled = enabl; kfree(packet); } static ssize_t set_enabled(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct asus_oled_dev *odev = usb_get_intfdata(intf); unsigned long value; if (kstrtoul(buf, 10, &value)) return -EINVAL; enable_oled(odev, value); return count; } static ssize_t class_set_enabled(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { struct asus_oled_dev *odev = (struct asus_oled_dev *) dev_get_drvdata(device); unsigned long value; if (kstrtoul(buf, 10, &value)) return -EINVAL; enable_oled(odev, value); return count; } static ssize_t get_enabled(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct asus_oled_dev *odev = usb_get_intfdata(intf); return sprintf(buf, "%d\n", odev->enabled); } static ssize_t class_get_enabled(struct device *device, struct device_attribute *attr, char *buf) { struct asus_oled_dev *odev = (struct asus_oled_dev *) dev_get_drvdata(device); return sprintf(buf, "%d\n", odev->enabled); } static void send_packets(struct usb_device *udev, struct asus_oled_packet *packet, char *buf, uint8_t p_type, size_t p_num) { size_t i; int act_len; for (i = 0; i < p_num; i++) { int retval; switch (p_type) { case ASUS_OLED_ROLL: setup_packet_header(packet, 0x40, 0x80, p_num, i + 1, 0x00, 0x01, 0xff); break; case ASUS_OLED_STATIC: setup_packet_header(packet, 0x10 + i, 0x80, 0x01, 0x01, 0x00, 0x01, 0x00); break; case ASUS_OLED_FLASH: setup_packet_header(packet, 0x10 + i, 0x80, 0x01, 0x01, 0x00, 0x00, 0xff); break; } memcpy(packet->bitmap, buf + (ASUS_OLED_PACKET_BUF_SIZE*i), ASUS_OLED_PACKET_BUF_SIZE); retval = usb_bulk_msg(udev, usb_sndctrlpipe(udev, 2), packet, sizeof(struct asus_oled_packet), &act_len, -1); if (retval) dev_dbg(&udev->dev, "retval = %d\n", retval); } } static void send_packet(struct usb_device *udev, struct asus_oled_packet *packet, size_t offset, size_t len, char *buf, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5, uint8_t b6) { int retval; int act_len; setup_packet_header(packet, b1, b2, b3, b4, b5, b6, 0x00); memcpy(packet->bitmap, buf + offset, len); retval = usb_bulk_msg(udev, usb_sndctrlpipe(udev, 2), packet, sizeof(struct asus_oled_packet), &act_len, -1); if (retval) dev_dbg(&udev->dev, "retval = %d\n", retval); } static void send_packets_g50(struct usb_device *udev, struct asus_oled_packet *packet, char *buf) { send_packet(udev, packet, 0, 0x100, buf, 0x10, 0x00, 0x02, 0x01, 0x00, 0x01); send_packet(udev, packet, 0x100, 0x080, buf, 0x10, 0x00, 0x02, 0x02, 0x80, 0x00); send_packet(udev, packet, 0x180, 0x100, buf, 0x11, 0x00, 0x03, 0x01, 0x00, 0x01); send_packet(udev, packet, 0x280, 0x100, buf, 0x11, 0x00, 0x03, 0x02, 0x00, 0x01); send_packet(udev, packet, 0x380, 0x080, buf, 0x11, 0x00, 0x03, 0x03, 0x80, 0x00); } static void send_data(struct asus_oled_dev *odev) { size_t packet_num = odev->buf_size / ASUS_OLED_PACKET_BUF_SIZE; struct asus_oled_packet *packet; packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL); if (!packet) { dev_err(&odev->udev->dev, "out of memory\n"); return; } if (odev->pack_mode == PACK_MODE_G1) { /* When sending roll-mode data the display updated only first packet. I have no idea why, but when static picture is sent just before rolling picture everything works fine. */ if (odev->pic_mode == ASUS_OLED_ROLL) send_packets(odev->udev, packet, odev->buf, ASUS_OLED_STATIC, 2); /* Only ROLL mode can use more than 2 packets.*/ if (odev->pic_mode != ASUS_OLED_ROLL && packet_num > 2) packet_num = 2; send_packets(odev->udev, packet, odev->buf, odev->pic_mode, packet_num); } else if (odev->pack_mode == PACK_MODE_G50) { send_packets_g50(odev->udev, packet, odev->buf); } kfree(packet); } static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count) { odev->last_val = val; if (val == 0) { odev->buf_offs += count; return 0; } while (count-- > 0) { size_t x = odev->buf_offs % odev->width; size_t y = odev->buf_offs / odev->width; size_t i; x += odev->x_shift; y += odev->y_shift; switch (odev->pack_mode) { case PACK_MODE_G1: /* i = (x/128)*640 + 127 - x + (y/8)*128; This one for 128 is the same, but might be better for different widths? */ i = (x/odev->dev_width)*640 + odev->dev_width - 1 - x + (y/8)*odev->dev_width; break; case PACK_MODE_G50: i = (odev->dev_width - 1 - x)/8 + y*odev->dev_width/8; break; default: i = 0; printk(ASUS_OLED_ERROR "Unknown OLED Pack Mode: %d!\n", odev->pack_mode); break; } if (i >= odev->buf_size) { printk(ASUS_OLED_ERROR "Buffer overflow! Report a bug:" "offs: %d >= %d i: %d (x: %d y: %d)\n", (int) odev->buf_offs, (int) odev->buf_size, (int) i, (int) x, (int) y); return -EIO; } switch (odev->pack_mode) { case PACK_MODE_G1: odev->buf[i] &= ~(1<<(y%8)); break; case PACK_MODE_G50: odev->buf[i] &= ~(1<<(x%8)); break; default: /* cannot get here; stops gcc complaining*/ ; } odev->buf_offs++; } return 0; } static ssize_t odev_set_picture(struct asus_oled_dev *odev, const char *buf, size_t count) { size_t offs = 0, max_offs; if (count < 1) return 0; if (tolower(buf[0]) == 'b') { /* binary mode, set the entire memory*/ size_t i; odev->buf_size = (odev->dev_width * ASUS_OLED_DISP_HEIGHT) / 8; kfree(odev->buf); odev->buf = kmalloc(odev->buf_size, GFP_KERNEL); if (odev->buf == NULL) { odev->buf_size = 0; printk(ASUS_OLED_ERROR "Out of memory!\n"); return -ENOMEM; } memset(odev->buf, 0xff, odev->buf_size); for (i = 1; i < count && i <= 32 * 32; i++) { odev->buf[i-1] = buf[i]; odev->buf_offs = i-1; } odev->width = odev->dev_width / 8; odev->height = ASUS_OLED_DISP_HEIGHT; odev->x_shift = 0; odev->y_shift = 0; odev->last_val = 0; send_data(odev); return count; } if (buf[0] == '<') { size_t i; size_t w = 0, h = 0; size_t w_mem, h_mem; if (count < 10 || buf[2] != ':') goto error_header; switch (tolower(buf[1])) { case ASUS_OLED_STATIC: case ASUS_OLED_ROLL: case ASUS_OLED_FLASH: odev->pic_mode = buf[1]; break; default: printk(ASUS_OLED_ERROR "Wrong picture mode: '%c'.\n", buf[1]); return -EIO; break; } for (i = 3; i < count; ++i) { if (buf[i] >= '0' && buf[i] <= '9') { w = 10*w + (buf[i] - '0'); if (w > ASUS_OLED_MAX_WIDTH) goto error_width; } else if (tolower(buf[i]) == 'x') { break; } else { goto error_width; } } for (++i; i < count; ++i) { if (buf[i] >= '0' && buf[i] <= '9') { h = 10*h + (buf[i] - '0'); if (h > ASUS_OLED_DISP_HEIGHT) goto error_height; } else if (tolower(buf[i]) == '>') { break; } else { goto error_height; } } if (w < 1 || w > ASUS_OLED_MAX_WIDTH) goto error_width; if (h < 1 || h > ASUS_OLED_DISP_HEIGHT) goto error_height; if (i >= count || buf[i] != '>') goto error_header; offs = i+1; if (w % (odev->dev_width) != 0) w_mem = (w/(odev->dev_width) + 1)*(odev->dev_width); else w_mem = w; if (h < ASUS_OLED_DISP_HEIGHT) h_mem = ASUS_OLED_DISP_HEIGHT; else h_mem = h; odev->buf_size = w_mem * h_mem / 8; kfree(odev->buf); odev->buf = kmalloc(odev->buf_size, GFP_KERNEL); if (odev->buf == NULL) { odev->buf_size = 0; printk(ASUS_OLED_ERROR "Out of memory!\n"); return -ENOMEM; } memset(odev->buf, 0xff, odev->buf_size); odev->buf_offs = 0; odev->width = w; odev->height = h; odev->x_shift = 0; odev->y_shift = 0; odev->last_val = 0; if (odev->pic_mode == ASUS_OLED_FLASH) { if (h < ASUS_OLED_DISP_HEIGHT/2) odev->y_shift = (ASUS_OLED_DISP_HEIGHT/2 - h)/2; } else { if (h < ASUS_OLED_DISP_HEIGHT) odev->y_shift = (ASUS_OLED_DISP_HEIGHT - h)/2; } if (w < (odev->dev_width)) odev->x_shift = ((odev->dev_width) - w)/2; } max_offs = odev->width * odev->height; while (offs < count && odev->buf_offs < max_offs) { int ret = 0; if (buf[offs] == '1' || buf[offs] == '#') { ret = append_values(odev, 1, 1); if (ret < 0) return ret; } else if (buf[offs] == '0' || buf[offs] == ' ') { ret = append_values(odev, 0, 1); if (ret < 0) return ret; } else if (buf[offs] == '\n') { /* New line detected. Lets assume, that all characters till the end of the line were equal to the last character in this line.*/ if (odev->buf_offs % odev->width != 0) ret = append_values(odev, odev->last_val, odev->width - (odev->buf_offs % odev->width)); if (ret < 0) return ret; } offs++; } if (odev->buf_offs >= max_offs) send_data(odev); return count; error_width: printk(ASUS_OLED_ERROR "Wrong picture width specified.\n"); return -EIO; error_height: printk(ASUS_OLED_ERROR "Wrong picture height specified.\n"); return -EIO; error_header: printk(ASUS_OLED_ERROR "Wrong picture header.\n"); return -EIO; } static ssize_t set_picture(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); return odev_set_picture(usb_get_intfdata(intf), buf, count); } static ssize_t class_set_picture(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { return odev_set_picture((struct asus_oled_dev *) dev_get_drvdata(device), buf, count); } #define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO, get_enabled, set_enabled); static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture); static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, class_get_enabled, class_set_enabled); static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture); static int asus_oled_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct asus_oled_dev *odev = NULL; int retval = -ENOMEM; uint16_t dev_width = 0; enum oled_pack_mode pack_mode = PACK_MODE_LAST; const struct oled_dev_desc_str *dev_desc = oled_dev_desc_table; const char *desc = NULL; if (!id) { /* Even possible? Just to make sure...*/ dev_err(&interface->dev, "No usb_device_id provided!\n"); return -ENODEV; } for (; dev_desc->idVendor; dev_desc++) { if (dev_desc->idVendor == id->idVendor && dev_desc->idProduct == id->idProduct) { dev_width = dev_desc->devWidth; desc = dev_desc->devDesc; pack_mode = dev_desc->packMode; break; } } if (!desc || dev_width < 1 || pack_mode == PACK_MODE_LAST) { dev_err(&interface->dev, "Missing or incomplete device description!\n"); return -ENODEV; } odev = kzalloc(sizeof(struct asus_oled_dev), GFP_KERNEL); if (odev == NULL) { dev_err(&interface->dev, "Out of memory\n"); return -ENOMEM; } odev->udev = usb_get_dev(udev); odev->pic_mode = ASUS_OLED_STATIC; odev->dev_width = dev_width; odev->pack_mode = pack_mode; odev->height = 0; odev->width = 0; odev->x_shift = 0; odev->y_shift = 0; odev->buf_offs = 0; odev->buf_size = 0; odev->last_val = 0; odev->buf = NULL; odev->enabled = 1; odev->dev = NULL; usb_set_intfdata(interface, odev); retval = device_create_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(enabled)); if (retval) goto err_files; retval = device_create_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(picture)); if (retval) goto err_files; odev->dev = device_create(oled_class, &interface->dev, MKDEV(0, 0), NULL, "oled_%d", ++oled_num); if (IS_ERR(odev->dev)) { retval = PTR_ERR(odev->dev); goto err_files; } dev_set_drvdata(odev->dev, odev); retval = device_create_file(odev->dev, &dev_attr_enabled); if (retval) goto err_class_enabled; retval = device_create_file(odev->dev, &dev_attr_picture); if (retval) goto err_class_picture; dev_info(&interface->dev, "Attached Asus OLED device: %s [width %u, pack_mode %d]\n", desc, odev->dev_width, odev->pack_mode); if (start_off) enable_oled(odev, 0); return 0; err_class_picture: device_remove_file(odev->dev, &dev_attr_picture); err_class_enabled: device_remove_file(odev->dev, &dev_attr_enabled); device_unregister(odev->dev); err_files: device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(enabled)); device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(picture)); usb_set_intfdata(interface, NULL); usb_put_dev(odev->udev); kfree(odev); return retval; } static void asus_oled_disconnect(struct usb_interface *interface) { struct asus_oled_dev *odev; odev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); device_remove_file(odev->dev, &dev_attr_picture); device_remove_file(odev->dev, &dev_attr_enabled); device_unregister(odev->dev); device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(picture)); device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(enabled)); usb_put_dev(odev->udev); kfree(odev->buf); kfree(odev); dev_info(&interface->dev, "Disconnected Asus OLED device\n"); } static struct usb_driver oled_driver = { .name = ASUS_OLED_NAME, .probe = asus_oled_probe, .disconnect = asus_oled_disconnect, .id_table = id_table, }; static CLASS_ATTR_STRING(version, S_IRUGO, ASUS_OLED_UNDERSCORE_NAME " " ASUS_OLED_VERSION); static int __init asus_oled_init(void) { int retval = 0; oled_class = class_create(THIS_MODULE, ASUS_OLED_UNDERSCORE_NAME); if (IS_ERR(oled_class)) { err("Error creating " ASUS_OLED_UNDERSCORE_NAME " class"); return PTR_ERR(oled_class); } retval = class_create_file(oled_class, &class_attr_version.attr); if (retval) { err("Error creating class version file"); goto error; } retval = usb_register(&oled_driver); if (retval) { err("usb_register failed. Error number %d", retval); goto error; } return retval; error: class_destroy(oled_class); return retval; } static void __exit asus_oled_exit(void) { usb_deregister(&oled_driver); class_remove_file(oled_class, &class_attr_version.attr); class_destroy(oled_class); } module_init(asus_oled_init); module_exit(asus_oled_exit);
gpl-2.0
maxfu/android_kernel_armada_pxa1088
drivers/scsi/qla4xxx/ql4_os.c
4803
169182
/* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2010 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/iscsi_boot_sysfs.h> #include <linux/inet.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> #include "ql4_def.h" #include "ql4_version.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" /* * Driver version */ static char qla4xxx_version_str[40]; /* * SRB allocation cache */ static struct kmem_cache *srb_cachep; /* * Module parameter information and variables */ static int ql4xdisablesysfsboot = 1; module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xdisablesysfsboot, " Set to disable exporting boot targets to sysfs.\n" "\t\t 0 - Export boot targets\n" "\t\t 1 - Do not export boot targets (Default)"); int ql4xdontresethba; module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xdontresethba, " Don't reset the HBA for driver recovery.\n" "\t\t 0 - It will reset HBA (Default)\n" "\t\t 1 - It will NOT reset HBA"); int ql4xextended_error_logging; module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xextended_error_logging, " Option to enable extended error logging.\n" "\t\t 0 - no logging (Default)\n" "\t\t 2 - debug logging"); int ql4xenablemsix = 1; module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql4xenablemsix, " Set to enable MSI or MSI-X interrupt mechanism.\n" "\t\t 0 = enable INTx interrupt mechanism.\n" "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" "\t\t 2 = enable MSI interrupt mechanism."); #define QL4_DEF_QDEPTH 32 static int ql4xmaxqdepth = QL4_DEF_QDEPTH; module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xmaxqdepth, " Maximum queue depth to report for target devices.\n" "\t\t Default: 32."); static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; module_param(ql4xsess_recovery_tmo, int, S_IRUGO); MODULE_PARM_DESC(ql4xsess_recovery_tmo, " Target Session Recovery Timeout.\n" "\t\t Default: 120 sec."); static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); /* * SCSI host template entry points */ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); /* * iSCSI template entry points */ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, enum iscsi_param param, char *buf); static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, enum iscsi_param param, char *buf); static int qla4xxx_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf); static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len); static int qla4xxx_get_iface_param(struct iscsi_iface *iface, enum iscsi_param_type param_type, int param, char *buf); static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking); static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf); static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); static struct iscsi_cls_conn * qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_fd, int is_leading); static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); static struct iscsi_cls_session * qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, uint16_t qdepth, uint32_t initial_cmdsn); static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); static void qla4xxx_task_work(struct work_struct *wdata); static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); static int qla4xxx_task_xmit(struct iscsi_task *); static void qla4xxx_task_cleanup(struct iscsi_task *); static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats); static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, uint32_t iface_type, uint32_t payload_size, uint32_t pid, struct sockaddr *dst_addr); static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, uint32_t *num_entries, char *buf); static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); /* * SCSI host template entry points */ static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); static int qla4xxx_slave_alloc(struct scsi_device *device); static int qla4xxx_slave_configure(struct scsi_device *device); static void qla4xxx_slave_destroy(struct scsi_device *sdev); static umode_t ql4_attr_is_visible(int param_type, int param); static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); static struct qla4_8xxx_legacy_intr_set legacy_intr[] = QLA82XX_LEGACY_INTR_CONFIG; static struct scsi_host_template qla4xxx_driver_template = { .module = THIS_MODULE, .name = DRIVER_NAME, .proc_name = DRIVER_NAME, .queuecommand = qla4xxx_queuecommand, .eh_abort_handler = qla4xxx_eh_abort, .eh_device_reset_handler = qla4xxx_eh_device_reset, .eh_target_reset_handler = qla4xxx_eh_target_reset, .eh_host_reset_handler = qla4xxx_eh_host_reset, .eh_timed_out = qla4xxx_eh_cmd_timed_out, .slave_configure = qla4xxx_slave_configure, .slave_alloc = qla4xxx_slave_alloc, .slave_destroy = qla4xxx_slave_destroy, .this_id = -1, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = SG_ALL, .max_sectors = 0xFFFF, .shost_attrs = qla4xxx_host_attrs, .host_reset = qla4xxx_host_reset, .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, }; static struct iscsi_transport qla4xxx_iscsi_transport = { .owner = THIS_MODULE, .name = DRIVER_NAME, .caps = CAP_TEXT_NEGO | CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | CAP_DATADGST | CAP_LOGIN_OFFLOAD | CAP_MULTI_R2T, .attr_is_visible = ql4_attr_is_visible, .create_session = qla4xxx_session_create, .destroy_session = qla4xxx_session_destroy, .start_conn = qla4xxx_conn_start, .create_conn = qla4xxx_conn_create, .bind_conn = qla4xxx_conn_bind, .stop_conn = iscsi_conn_stop, .destroy_conn = qla4xxx_conn_destroy, .set_param = iscsi_set_param, .get_conn_param = qla4xxx_conn_get_param, .get_session_param = qla4xxx_session_get_param, .get_ep_param = qla4xxx_get_ep_param, .ep_connect = qla4xxx_ep_connect, .ep_poll = qla4xxx_ep_poll, .ep_disconnect = qla4xxx_ep_disconnect, .get_stats = qla4xxx_conn_get_stats, .send_pdu = iscsi_conn_send_pdu, .xmit_task = qla4xxx_task_xmit, .cleanup_task = qla4xxx_task_cleanup, .alloc_pdu = qla4xxx_alloc_pdu, .get_host_param = qla4xxx_host_get_param, .set_iface_param = qla4xxx_iface_set_param, .get_iface_param = qla4xxx_get_iface_param, .bsg_request = qla4xxx_bsg_request, .send_ping = qla4xxx_send_ping, .get_chap = qla4xxx_get_chap_list, .delete_chap = qla4xxx_delete_chap, }; static struct scsi_transport_template *qla4xxx_scsi_transport; static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, uint32_t iface_type, uint32_t payload_size, uint32_t pid, struct sockaddr *dst_addr) { struct scsi_qla_host *ha = to_qla_host(shost); struct sockaddr_in *addr; struct sockaddr_in6 *addr6; uint32_t options = 0; uint8_t ipaddr[IPv6_ADDR_LEN]; int rval; memset(ipaddr, 0, IPv6_ADDR_LEN); /* IPv4 to IPv4 */ if ((iface_type == ISCSI_IFACE_TYPE_IPV4) && (dst_addr->sa_family == AF_INET)) { addr = (struct sockaddr_in *)dst_addr; memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " "dest: %pI4\n", __func__, &ha->ip_config.ip_address, ipaddr)); rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, ipaddr); if (rval) rval = -EINVAL; } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) && (dst_addr->sa_family == AF_INET6)) { /* IPv6 to IPv6 */ addr6 = (struct sockaddr_in6 *)dst_addr; memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN); options |= PING_IPV6_PROTOCOL_ENABLE; /* Ping using LinkLocal address */ if ((iface_num == 0) || (iface_num == 1)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping " "src: %pI6 dest: %pI6\n", __func__, &ha->ip_config.ipv6_link_local_addr, ipaddr)); options |= PING_IPV6_LINKLOCAL_ADDR; rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, ipaddr); } else { ql4_printk(KERN_WARNING, ha, "%s: iface num = %d " "not supported\n", __func__, iface_num); rval = -ENOSYS; goto exit_send_ping; } /* * If ping using LinkLocal address fails, try ping using * IPv6 address */ if (rval != QLA_SUCCESS) { options &= ~PING_IPV6_LINKLOCAL_ADDR; if (iface_num == 0) { options |= PING_IPV6_ADDR0; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " "Ping src: %pI6 " "dest: %pI6\n", __func__, &ha->ip_config.ipv6_addr0, ipaddr)); } else if (iface_num == 1) { options |= PING_IPV6_ADDR1; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " "Ping src: %pI6 " "dest: %pI6\n", __func__, &ha->ip_config.ipv6_addr1, ipaddr)); } rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, ipaddr); if (rval) rval = -EINVAL; } } else rval = -ENOSYS; exit_send_ping: return rval; } static umode_t ql4_attr_is_visible(int param_type, int param) { switch (param_type) { case ISCSI_HOST_PARAM: switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: case ISCSI_HOST_PARAM_INITIATOR_NAME: case ISCSI_HOST_PARAM_PORT_STATE: case ISCSI_HOST_PARAM_PORT_SPEED: return S_IRUGO; default: return 0; } case ISCSI_PARAM: switch (param) { case ISCSI_PARAM_PERSISTENT_ADDRESS: case ISCSI_PARAM_PERSISTENT_PORT: case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_TARGET_NAME: case ISCSI_PARAM_TPGT: case ISCSI_PARAM_TARGET_ALIAS: case ISCSI_PARAM_MAX_BURST: case ISCSI_PARAM_MAX_R2T: case ISCSI_PARAM_FIRST_BURST: case ISCSI_PARAM_MAX_RECV_DLENGTH: case ISCSI_PARAM_MAX_XMIT_DLENGTH: case ISCSI_PARAM_IFACE_NAME: case ISCSI_PARAM_CHAP_OUT_IDX: case ISCSI_PARAM_CHAP_IN_IDX: case ISCSI_PARAM_USERNAME: case ISCSI_PARAM_PASSWORD: case ISCSI_PARAM_USERNAME_IN: case ISCSI_PARAM_PASSWORD_IN: return S_IRUGO; default: return 0; } case ISCSI_NET_PARAM: switch (param) { case ISCSI_NET_PARAM_IPV4_ADDR: case ISCSI_NET_PARAM_IPV4_SUBNET: case ISCSI_NET_PARAM_IPV4_GW: case ISCSI_NET_PARAM_IPV4_BOOTPROTO: case ISCSI_NET_PARAM_IFACE_ENABLE: case ISCSI_NET_PARAM_IPV6_LINKLOCAL: case ISCSI_NET_PARAM_IPV6_ADDR: case ISCSI_NET_PARAM_IPV6_ROUTER: case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: case ISCSI_NET_PARAM_VLAN_ID: case ISCSI_NET_PARAM_VLAN_PRIORITY: case ISCSI_NET_PARAM_VLAN_ENABLED: case ISCSI_NET_PARAM_MTU: case ISCSI_NET_PARAM_PORT: return S_IRUGO; default: return 0; } } return 0; } static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, uint32_t *num_entries, char *buf) { struct scsi_qla_host *ha = to_qla_host(shost); struct ql4_chap_table *chap_table; struct iscsi_chap_rec *chap_rec; int max_chap_entries = 0; int valid_chap_entries = 0; int ret = 0, i; if (is_qla8022(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else max_chap_entries = MAX_CHAP_ENTRIES_40XX; ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n", __func__, *num_entries, chap_tbl_idx); if (!buf) { ret = -ENOMEM; goto exit_get_chap_list; } chap_rec = (struct iscsi_chap_rec *) buf; mutex_lock(&ha->chap_sem); for (i = chap_tbl_idx; i < max_chap_entries; i++) { chap_table = (struct ql4_chap_table *)ha->chap_list + i; if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) continue; chap_rec->chap_tbl_idx = i; strncpy(chap_rec->username, chap_table->name, ISCSI_CHAP_AUTH_NAME_MAX_LEN); strncpy(chap_rec->password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); chap_rec->password_length = chap_table->secret_len; if (chap_table->flags & BIT_7) /* local */ chap_rec->chap_type = CHAP_TYPE_OUT; if (chap_table->flags & BIT_6) /* peer */ chap_rec->chap_type = CHAP_TYPE_IN; chap_rec++; valid_chap_entries++; if (valid_chap_entries == *num_entries) break; else continue; } mutex_unlock(&ha->chap_sem); exit_get_chap_list: ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n", __func__, valid_chap_entries); *num_entries = valid_chap_entries; return ret; } static int __qla4xxx_is_chap_active(struct device *dev, void *data) { int ret = 0; uint16_t *chap_tbl_idx = (uint16_t *) data; struct iscsi_cls_session *cls_session; struct iscsi_session *sess; struct ddb_entry *ddb_entry; if (!iscsi_is_session_dev(dev)) goto exit_is_chap_active; cls_session = iscsi_dev_to_session(dev); sess = cls_session->dd_data; ddb_entry = sess->dd_data; if (iscsi_session_chkready(cls_session)) goto exit_is_chap_active; if (ddb_entry->chap_tbl_idx == *chap_tbl_idx) ret = 1; exit_is_chap_active: return ret; } static int qla4xxx_is_chap_active(struct Scsi_Host *shost, uint16_t chap_tbl_idx) { int ret = 0; ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx, __qla4xxx_is_chap_active); return ret; } static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) { struct scsi_qla_host *ha = to_qla_host(shost); struct ql4_chap_table *chap_table; dma_addr_t chap_dma; int max_chap_entries = 0; uint32_t offset = 0; uint32_t chap_size; int ret = 0; chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); if (chap_table == NULL) return -ENOMEM; memset(chap_table, 0, sizeof(struct ql4_chap_table)); if (is_qla8022(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else max_chap_entries = MAX_CHAP_ENTRIES_40XX; if (chap_tbl_idx > max_chap_entries) { ret = -EINVAL; goto exit_delete_chap; } /* Check if chap index is in use. * If chap is in use don't delet chap entry */ ret = qla4xxx_is_chap_active(shost, chap_tbl_idx); if (ret) { ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot " "delete from flash\n", chap_tbl_idx); ret = -EBUSY; goto exit_delete_chap; } chap_size = sizeof(struct ql4_chap_table); if (is_qla40XX(ha)) offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size); else { offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); /* flt_chap_size is CHAP table size for both ports * so divide it by 2 to calculate the offset for second port */ if (ha->port_num == 1) offset += (ha->hw.flt_chap_size / 2); offset += (chap_tbl_idx * chap_size); } ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); if (ret != QLA_SUCCESS) { ret = -EINVAL; goto exit_delete_chap; } DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", __le16_to_cpu(chap_table->cookie))); if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); goto exit_delete_chap; } chap_table->cookie = __constant_cpu_to_le16(0xFFFF); offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * sizeof(struct ql4_chap_table)); ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, FLASH_OPT_RMW_COMMIT); if (ret == QLA_SUCCESS && ha->chap_list) { mutex_lock(&ha->chap_sem); /* Update ha chap_list cache */ memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx, chap_table, sizeof(struct ql4_chap_table)); mutex_unlock(&ha->chap_sem); } if (ret != QLA_SUCCESS) ret = -EINVAL; exit_delete_chap: dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); return ret; } static int qla4xxx_get_iface_param(struct iscsi_iface *iface, enum iscsi_param_type param_type, int param, char *buf) { struct Scsi_Host *shost = iscsi_iface_to_shost(iface); struct scsi_qla_host *ha = to_qla_host(shost); int len = -ENOSYS; if (param_type != ISCSI_NET_PARAM) return -ENOSYS; switch (param) { case ISCSI_NET_PARAM_IPV4_ADDR: len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); break; case ISCSI_NET_PARAM_IPV4_SUBNET: len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask); break; case ISCSI_NET_PARAM_IPV4_GW: len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); break; case ISCSI_NET_PARAM_IFACE_ENABLE: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) len = sprintf(buf, "%s\n", (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) ? "enabled" : "disabled"); else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) len = sprintf(buf, "%s\n", (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) ? "enabled" : "disabled"); break; case ISCSI_NET_PARAM_IPV4_BOOTPROTO: len = sprintf(buf, "%s\n", (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ? "dhcp" : "static"); break; case ISCSI_NET_PARAM_IPV6_ADDR: if (iface->iface_num == 0) len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0); if (iface->iface_num == 1) len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1); break; case ISCSI_NET_PARAM_IPV6_LINKLOCAL: len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_link_local_addr); break; case ISCSI_NET_PARAM_IPV6_ROUTER: len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_default_router_addr); break; case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: len = sprintf(buf, "%s\n", (ha->ip_config.ipv6_addl_options & IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? "nd" : "static"); break; case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: len = sprintf(buf, "%s\n", (ha->ip_config.ipv6_addl_options & IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? "auto" : "static"); break; case ISCSI_NET_PARAM_VLAN_ID: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) len = sprintf(buf, "%d\n", (ha->ip_config.ipv4_vlan_tag & ISCSI_MAX_VLAN_ID)); else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) len = sprintf(buf, "%d\n", (ha->ip_config.ipv6_vlan_tag & ISCSI_MAX_VLAN_ID)); break; case ISCSI_NET_PARAM_VLAN_PRIORITY: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) len = sprintf(buf, "%d\n", ((ha->ip_config.ipv4_vlan_tag >> 13) & ISCSI_MAX_VLAN_PRIORITY)); else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) len = sprintf(buf, "%d\n", ((ha->ip_config.ipv6_vlan_tag >> 13) & ISCSI_MAX_VLAN_PRIORITY)); break; case ISCSI_NET_PARAM_VLAN_ENABLED: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) len = sprintf(buf, "%s\n", (ha->ip_config.ipv4_options & IPOPT_VLAN_TAGGING_ENABLE) ? "enabled" : "disabled"); else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) len = sprintf(buf, "%s\n", (ha->ip_config.ipv6_options & IPV6_OPT_VLAN_TAGGING_ENABLE) ? "enabled" : "disabled"); break; case ISCSI_NET_PARAM_MTU: len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); break; case ISCSI_NET_PARAM_PORT: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port); else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port); break; default: len = -ENOSYS; } return len; } static struct iscsi_endpoint * qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) { int ret; struct iscsi_endpoint *ep; struct qla_endpoint *qla_ep; struct scsi_qla_host *ha; struct sockaddr_in *addr; struct sockaddr_in6 *addr6; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); if (!shost) { ret = -ENXIO; printk(KERN_ERR "%s: shost is NULL\n", __func__); return ERR_PTR(ret); } ha = iscsi_host_priv(shost); ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); if (!ep) { ret = -ENOMEM; return ERR_PTR(ret); } qla_ep = ep->dd_data; memset(qla_ep, 0, sizeof(struct qla_endpoint)); if (dst_addr->sa_family == AF_INET) { memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); addr = (struct sockaddr_in *)&qla_ep->dst_addr; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, (char *)&addr->sin_addr)); } else if (dst_addr->sa_family == AF_INET6) { memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in6)); addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, (char *)&addr6->sin6_addr)); } qla_ep->host = shost; return ep; } static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct qla_endpoint *qla_ep; struct scsi_qla_host *ha; int ret = 0; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); qla_ep = ep->dd_data; ha = to_qla_host(qla_ep->host); if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) ret = 1; return ret; } static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) { DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); iscsi_destroy_endpoint(ep); } static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { struct qla_endpoint *qla_ep = ep->dd_data; struct sockaddr *dst_addr; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); switch (param) { case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_ADDRESS: if (!qla_ep) return -ENOTCONN; dst_addr = (struct sockaddr *)&qla_ep->dst_addr; if (!dst_addr) return -ENOTCONN; return iscsi_conn_get_addr_param((struct sockaddr_storage *) &qla_ep->dst_addr, param, buf); default: return -ENOSYS; } } static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_session *sess; struct iscsi_cls_session *cls_sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; struct ql_iscsi_stats *ql_iscsi_stats; int stats_size; int ret; dma_addr_t iscsi_stats_dma; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); cls_sess = iscsi_conn_to_session(cls_conn); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); /* Allocate memory */ ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, &iscsi_stats_dma, GFP_KERNEL); if (!ql_iscsi_stats) { ql4_printk(KERN_ERR, ha, "Unable to allocate memory for iscsi stats\n"); goto exit_get_stats; } ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, iscsi_stats_dma); if (ret != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "Unable to retreive iscsi stats\n"); goto free_stats; } /* octets */ stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); /* xmit pdus */ stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); /* recv pdus */ stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); stats->logoutrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); free_stats: dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, iscsi_stats_dma); exit_get_stats: return; } static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) { struct iscsi_cls_session *session; struct iscsi_session *sess; unsigned long flags; enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED; session = starget_to_session(scsi_target(sc->device)); sess = session->dd_data; spin_lock_irqsave(&session->lock, flags); if (session->state == ISCSI_SESSION_FAILED) ret = BLK_EH_RESET_TIMER; spin_unlock_irqrestore(&session->lock, flags); return ret; } static void qla4xxx_set_port_speed(struct Scsi_Host *shost) { struct scsi_qla_host *ha = to_qla_host(shost); struct iscsi_cls_host *ihost = shost->shost_data; uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; qla4xxx_get_firmware_state(ha); switch (ha->addl_fw_state & 0x0F00) { case FW_ADDSTATE_LINK_SPEED_10MBPS: speed = ISCSI_PORT_SPEED_10MBPS; break; case FW_ADDSTATE_LINK_SPEED_100MBPS: speed = ISCSI_PORT_SPEED_100MBPS; break; case FW_ADDSTATE_LINK_SPEED_1GBPS: speed = ISCSI_PORT_SPEED_1GBPS; break; case FW_ADDSTATE_LINK_SPEED_10GBPS: speed = ISCSI_PORT_SPEED_10GBPS; break; } ihost->port_speed = speed; } static void qla4xxx_set_port_state(struct Scsi_Host *shost) { struct scsi_qla_host *ha = to_qla_host(shost); struct iscsi_cls_host *ihost = shost->shost_data; uint32_t state = ISCSI_PORT_STATE_DOWN; if (test_bit(AF_LINK_UP, &ha->flags)) state = ISCSI_PORT_STATE_UP; ihost->port_state = state; } static int qla4xxx_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct scsi_qla_host *ha = to_qla_host(shost); int len; switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); break; case ISCSI_HOST_PARAM_IPADDRESS: len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: len = sprintf(buf, "%s\n", ha->name_string); break; case ISCSI_HOST_PARAM_PORT_STATE: qla4xxx_set_port_state(shost); len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); break; case ISCSI_HOST_PARAM_PORT_SPEED: qla4xxx_set_port_speed(shost); len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); break; default: return -ENOSYS; } return len; } static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) { if (ha->iface_ipv4) return; /* IPv4 */ ha->iface_ipv4 = iscsi_create_iface(ha->host, &qla4xxx_iscsi_transport, ISCSI_IFACE_TYPE_IPV4, 0, 0); if (!ha->iface_ipv4) ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " "iface0.\n"); } static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) { if (!ha->iface_ipv6_0) /* IPv6 iface-0 */ ha->iface_ipv6_0 = iscsi_create_iface(ha->host, &qla4xxx_iscsi_transport, ISCSI_IFACE_TYPE_IPV6, 0, 0); if (!ha->iface_ipv6_0) ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " "iface0.\n"); if (!ha->iface_ipv6_1) /* IPv6 iface-1 */ ha->iface_ipv6_1 = iscsi_create_iface(ha->host, &qla4xxx_iscsi_transport, ISCSI_IFACE_TYPE_IPV6, 1, 0); if (!ha->iface_ipv6_1) ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " "iface1.\n"); } static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) { if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) qla4xxx_create_ipv4_iface(ha); if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) qla4xxx_create_ipv6_iface(ha); } static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) { if (ha->iface_ipv4) { iscsi_destroy_iface(ha->iface_ipv4); ha->iface_ipv4 = NULL; } } static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) { if (ha->iface_ipv6_0) { iscsi_destroy_iface(ha->iface_ipv6_0); ha->iface_ipv6_0 = NULL; } if (ha->iface_ipv6_1) { iscsi_destroy_iface(ha->iface_ipv6_1); ha->iface_ipv6_1 = NULL; } } static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) { qla4xxx_destroy_ipv4_iface(ha); qla4xxx_destroy_ipv6_iface(ha); } static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, struct iscsi_iface_param_info *iface_param, struct addr_ctrl_blk *init_fw_cb) { /* * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. * iface_num 1 is valid only for IPv6 Addr. */ switch (iface_param->param) { case ISCSI_NET_PARAM_IPV6_ADDR: if (iface_param->iface_num & 0x1) /* IPv6 Addr 1 */ memcpy(init_fw_cb->ipv6_addr1, iface_param->value, sizeof(init_fw_cb->ipv6_addr1)); else /* IPv6 Addr 0 */ memcpy(init_fw_cb->ipv6_addr0, iface_param->value, sizeof(init_fw_cb->ipv6_addr0)); break; case ISCSI_NET_PARAM_IPV6_LINKLOCAL: if (iface_param->iface_num & 0x1) break; memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], sizeof(init_fw_cb->ipv6_if_id)); break; case ISCSI_NET_PARAM_IPV6_ROUTER: if (iface_param->iface_num & 0x1) break; memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); break; case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: /* Autocfg applies to even interface */ if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); else ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for " "IPv6 addr\n"); break; case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: /* Autocfg applies to even interface */ if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); else if (iface_param->value[0] == ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); else ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for " "IPv6 linklocal addr\n"); break; case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: /* Autocfg applies to even interface */ if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); break; case ISCSI_NET_PARAM_IFACE_ENABLE: if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { init_fw_cb->ipv6_opts |= cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); qla4xxx_create_ipv6_iface(ha); } else { init_fw_cb->ipv6_opts &= cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & 0xFFFF); qla4xxx_destroy_ipv6_iface(ha); } break; case ISCSI_NET_PARAM_VLAN_TAG: if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) break; init_fw_cb->ipv6_vlan_tag = cpu_to_be16(*(uint16_t *)iface_param->value); break; case ISCSI_NET_PARAM_VLAN_ENABLED: if (iface_param->value[0] == ISCSI_VLAN_ENABLE) init_fw_cb->ipv6_opts |= cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); else init_fw_cb->ipv6_opts &= cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); break; case ISCSI_NET_PARAM_MTU: init_fw_cb->eth_mtu_size = cpu_to_le16(*(uint16_t *)iface_param->value); break; case ISCSI_NET_PARAM_PORT: /* Autocfg applies to even interface */ if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_port = cpu_to_le16(*(uint16_t *)iface_param->value); break; default: ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", iface_param->param); break; } } static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, struct iscsi_iface_param_info *iface_param, struct addr_ctrl_blk *init_fw_cb) { switch (iface_param->param) { case ISCSI_NET_PARAM_IPV4_ADDR: memcpy(init_fw_cb->ipv4_addr, iface_param->value, sizeof(init_fw_cb->ipv4_addr)); break; case ISCSI_NET_PARAM_IPV4_SUBNET: memcpy(init_fw_cb->ipv4_subnet, iface_param->value, sizeof(init_fw_cb->ipv4_subnet)); break; case ISCSI_NET_PARAM_IPV4_GW: memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, sizeof(init_fw_cb->ipv4_gw_addr)); break; case ISCSI_NET_PARAM_IPV4_BOOTPROTO: if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) init_fw_cb->ipv4_tcp_opts |= cpu_to_le16(TCPOPT_DHCP_ENABLE); else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_DHCP_ENABLE); else ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); break; case ISCSI_NET_PARAM_IFACE_ENABLE: if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); qla4xxx_create_ipv4_iface(ha); } else { init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & 0xFFFF); qla4xxx_destroy_ipv4_iface(ha); } break; case ISCSI_NET_PARAM_VLAN_TAG: if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) break; init_fw_cb->ipv4_vlan_tag = cpu_to_be16(*(uint16_t *)iface_param->value); break; case ISCSI_NET_PARAM_VLAN_ENABLED: if (iface_param->value[0] == ISCSI_VLAN_ENABLE) init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); else init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); break; case ISCSI_NET_PARAM_MTU: init_fw_cb->eth_mtu_size = cpu_to_le16(*(uint16_t *)iface_param->value); break; case ISCSI_NET_PARAM_PORT: init_fw_cb->ipv4_port = cpu_to_le16(*(uint16_t *)iface_param->value); break; default: ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", iface_param->param); break; } } static void qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) { struct addr_ctrl_blk_def *acb; acb = (struct addr_ctrl_blk_def *)init_fw_cb; memset(acb->reserved1, 0, sizeof(acb->reserved1)); memset(acb->reserved2, 0, sizeof(acb->reserved2)); memset(acb->reserved3, 0, sizeof(acb->reserved3)); memset(acb->reserved4, 0, sizeof(acb->reserved4)); memset(acb->reserved5, 0, sizeof(acb->reserved5)); memset(acb->reserved6, 0, sizeof(acb->reserved6)); memset(acb->reserved7, 0, sizeof(acb->reserved7)); memset(acb->reserved8, 0, sizeof(acb->reserved8)); memset(acb->reserved9, 0, sizeof(acb->reserved9)); memset(acb->reserved10, 0, sizeof(acb->reserved10)); memset(acb->reserved11, 0, sizeof(acb->reserved11)); memset(acb->reserved12, 0, sizeof(acb->reserved12)); memset(acb->reserved13, 0, sizeof(acb->reserved13)); memset(acb->reserved14, 0, sizeof(acb->reserved14)); memset(acb->reserved15, 0, sizeof(acb->reserved15)); } static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) { struct scsi_qla_host *ha = to_qla_host(shost); int rval = 0; struct iscsi_iface_param_info *iface_param = NULL; struct addr_ctrl_blk *init_fw_cb = NULL; dma_addr_t init_fw_cb_dma; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; uint32_t rem = len; struct nlattr *attr; init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), &init_fw_cb_dma, GFP_KERNEL); if (!init_fw_cb) { ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", __func__); return -ENOMEM; } memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); rval = -EIO; goto exit_init_fw_cb; } nla_for_each_attr(attr, data, len, rem) { iface_param = nla_data(attr); if (iface_param->param_type != ISCSI_NET_PARAM) continue; switch (iface_param->iface_type) { case ISCSI_IFACE_TYPE_IPV4: switch (iface_param->iface_num) { case 0: qla4xxx_set_ipv4(ha, iface_param, init_fw_cb); break; default: /* Cannot have more than one IPv4 interface */ ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface " "number = %d\n", iface_param->iface_num); break; } break; case ISCSI_IFACE_TYPE_IPV6: switch (iface_param->iface_num) { case 0: case 1: qla4xxx_set_ipv6(ha, iface_param, init_fw_cb); break; default: /* Cannot have more than two IPv6 interface */ ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface " "number = %d\n", iface_param->iface_num); break; } break; default: ql4_printk(KERN_ERR, ha, "Invalid iface type\n"); break; } } init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, sizeof(struct addr_ctrl_blk), FLASH_OPT_RMW_COMMIT); if (rval != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", __func__); rval = -EIO; goto exit_init_fw_cb; } rval = qla4xxx_disable_acb(ha); if (rval != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n", __func__); rval = -EIO; goto exit_init_fw_cb; } wait_for_completion_timeout(&ha->disable_acb_comp, DISABLE_ACB_TOV * HZ); qla4xxx_initcb_to_acb(init_fw_cb); rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); if (rval != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", __func__); rval = -EIO; goto exit_init_fw_cb; } memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, init_fw_cb_dma); exit_init_fw_cb: dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), init_fw_cb, init_fw_cb_dma); return rval; } static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, enum iscsi_param param, char *buf) { struct iscsi_session *sess = cls_sess->dd_data; struct ddb_entry *ddb_entry = sess->dd_data; struct scsi_qla_host *ha = ddb_entry->ha; int rval, len; uint16_t idx; switch (param) { case ISCSI_PARAM_CHAP_IN_IDX: rval = qla4xxx_get_chap_index(ha, sess->username_in, sess->password_in, BIDI_CHAP, &idx); if (rval) return -EINVAL; len = sprintf(buf, "%hu\n", idx); break; case ISCSI_PARAM_CHAP_OUT_IDX: rval = qla4xxx_get_chap_index(ha, sess->username, sess->password, LOCAL_CHAP, &idx); if (rval) return -EINVAL; len = sprintf(buf, "%hu\n", idx); break; default: return iscsi_session_get_param(cls_sess, param, buf); } return len; } static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { struct iscsi_conn *conn; struct qla_conn *qla_conn; struct sockaddr *dst_addr; int len = 0; conn = cls_conn->dd_data; qla_conn = conn->dd_data; dst_addr = &qla_conn->qla_ep->dst_addr; switch (param) { case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_ADDRESS: return iscsi_conn_get_addr_param((struct sockaddr_storage *) dst_addr, param, buf); default: return iscsi_conn_get_param(cls_conn, param, buf); } return len; } int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) { uint32_t mbx_sts = 0; uint16_t tmp_ddb_index; int ret; get_ddb_index: tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); if (tmp_ddb_index >= MAX_DDB_ENTRIES) { DEBUG2(ql4_printk(KERN_INFO, ha, "Free DDB index not available\n")); ret = QLA_ERROR; goto exit_get_ddb_index; } if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map)) goto get_ddb_index; DEBUG2(ql4_printk(KERN_INFO, ha, "Found a free DDB index at %d\n", tmp_ddb_index)); ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts); if (ret == QLA_ERROR) { if (mbx_sts == MBOX_STS_COMMAND_ERROR) { ql4_printk(KERN_INFO, ha, "DDB index = %d not available trying next\n", tmp_ddb_index); goto get_ddb_index; } DEBUG2(ql4_printk(KERN_INFO, ha, "Free FW DDB not available\n")); } *ddb_index = tmp_ddb_index; exit_get_ddb_index: return ret; } static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, char *existing_ipaddr, char *user_ipaddr) { uint8_t dst_ipaddr[IPv6_ADDR_LEN]; char formatted_ipaddr[DDB_IPADDR_LEN]; int status = QLA_SUCCESS, ret = 0; if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) { ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, '\0', NULL); if (ret == 0) { status = QLA_ERROR; goto out_match; } ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr); } else { ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, '\0', NULL); if (ret == 0) { status = QLA_ERROR; goto out_match; } ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr); } if (strcmp(existing_ipaddr, formatted_ipaddr)) status = QLA_ERROR; out_match: return status; } static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha, struct iscsi_cls_conn *cls_conn) { int idx = 0, max_ddbs, rval; struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); struct iscsi_session *sess, *existing_sess; struct iscsi_conn *conn, *existing_conn; struct ddb_entry *ddb_entry; sess = cls_sess->dd_data; conn = cls_conn->dd_data; if (sess->targetname == NULL || conn->persistent_address == NULL || conn->persistent_port == 0) return QLA_ERROR; max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; for (idx = 0; idx < max_ddbs; idx++) { ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); if (ddb_entry == NULL) continue; if (ddb_entry->ddb_type != FLASH_DDB) continue; existing_sess = ddb_entry->sess->dd_data; existing_conn = ddb_entry->conn->dd_data; if (existing_sess->targetname == NULL || existing_conn->persistent_address == NULL || existing_conn->persistent_port == 0) continue; DEBUG2(ql4_printk(KERN_INFO, ha, "IQN = %s User IQN = %s\n", existing_sess->targetname, sess->targetname)); DEBUG2(ql4_printk(KERN_INFO, ha, "IP = %s User IP = %s\n", existing_conn->persistent_address, conn->persistent_address)); DEBUG2(ql4_printk(KERN_INFO, ha, "Port = %d User Port = %d\n", existing_conn->persistent_port, conn->persistent_port)); if (strcmp(existing_sess->targetname, sess->targetname)) continue; rval = qla4xxx_match_ipaddress(ha, ddb_entry, existing_conn->persistent_address, conn->persistent_address); if (rval == QLA_ERROR) continue; if (existing_conn->persistent_port != conn->persistent_port) continue; break; } if (idx == max_ddbs) return QLA_ERROR; DEBUG2(ql4_printk(KERN_INFO, ha, "Match found in fwdb sessions\n")); return QLA_SUCCESS; } static struct iscsi_cls_session * qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, uint16_t qdepth, uint32_t initial_cmdsn) { struct iscsi_cls_session *cls_sess; struct scsi_qla_host *ha; struct qla_endpoint *qla_ep; struct ddb_entry *ddb_entry; uint16_t ddb_index; struct iscsi_session *sess; struct sockaddr *dst_addr; int ret; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); if (!ep) { printk(KERN_ERR "qla4xxx: missing ep.\n"); return NULL; } qla_ep = ep->dd_data; dst_addr = (struct sockaddr *)&qla_ep->dst_addr; ha = to_qla_host(qla_ep->host); ret = qla4xxx_get_ddb_index(ha, &ddb_index); if (ret == QLA_ERROR) return NULL; cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, cmds_max, sizeof(struct ddb_entry), sizeof(struct ql4_task_data), initial_cmdsn, ddb_index); if (!cls_sess) return NULL; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ddb_entry->fw_ddb_index = ddb_index; ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; ddb_entry->ha = ha; ddb_entry->sess = cls_sess; ddb_entry->unblock_sess = qla4xxx_unblock_ddb; ddb_entry->ddb_change = qla4xxx_ddb_change; cls_sess->recovery_tmo = ql4xsess_recovery_tmo; ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; ha->tot_ddbs++; return cls_sess; } static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; unsigned long flags; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); spin_lock_irqsave(&ha->hardware_lock, flags); qla4xxx_free_ddb(ha, ddb_entry); spin_unlock_irqrestore(&ha->hardware_lock, flags); iscsi_session_teardown(cls_sess); } static struct iscsi_cls_conn * qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) { struct iscsi_cls_conn *cls_conn; struct iscsi_session *sess; struct ddb_entry *ddb_entry; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_idx); if (!cls_conn) return NULL; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ddb_entry->conn = cls_conn; return cls_conn; } static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_fd, int is_leading) { struct iscsi_conn *conn; struct qla_conn *qla_conn; struct iscsi_endpoint *ep; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) return -EINVAL; ep = iscsi_lookup_endpoint(transport_fd); conn = cls_conn->dd_data; qla_conn = conn->dd_data; qla_conn->qla_ep = ep->dd_data; return 0; } static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; uint32_t mbx_sts = 0; int ret = 0; int status = QLA_SUCCESS; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; /* Check if we have matching FW DDB, if yes then do not * login to this target. This could cause target to logout previous * connection */ ret = qla4xxx_match_fwdb_session(ha, cls_conn); if (ret == QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "Session already exist in FW.\n"); ret = -EEXIST; goto exit_conn_start; } fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__); ret = -ENOMEM; goto exit_conn_start; } ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); if (ret) { /* If iscsid is stopped and started then no need to do * set param again since ddb state will be already * active and FW does not allow set ddb to an * active session. */ if (mbx_sts) if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { ddb_entry->unblock_sess(ddb_entry->sess); goto exit_set_param; } ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", __func__, ddb_entry->fw_ddb_index); goto exit_conn_start; } status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); if (status == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, sess->targetname); ret = -EINVAL; goto exit_conn_start; } if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, ddb_entry->fw_ddb_device_state)); exit_set_param: ret = 0; exit_conn_start: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); return ret; } static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); struct iscsi_session *sess; struct scsi_qla_host *ha; struct ddb_entry *ddb_entry; int options; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; options = LOGOUT_OPTION_CLOSE_SESSION; if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); } static void qla4xxx_task_work(struct work_struct *wdata) { struct ql4_task_data *task_data; struct scsi_qla_host *ha; struct passthru_status *sts; struct iscsi_task *task; struct iscsi_hdr *hdr; uint8_t *data; uint32_t data_len; struct iscsi_conn *conn; int hdr_len; itt_t itt; task_data = container_of(wdata, struct ql4_task_data, task_work); ha = task_data->ha; task = task_data->task; sts = &task_data->sts; hdr_len = sizeof(struct iscsi_hdr); DEBUG3(printk(KERN_INFO "Status returned\n")); DEBUG3(qla4xxx_dump_buffer(sts, 64)); DEBUG3(printk(KERN_INFO "Response buffer")); DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); conn = task->conn; switch (sts->completionStatus) { case PASSTHRU_STATUS_COMPLETE: hdr = (struct iscsi_hdr *)task_data->resp_buffer; /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ itt = sts->handle; hdr->itt = itt; data = task_data->resp_buffer + hdr_len; data_len = task_data->resp_len - hdr_len; iscsi_complete_pdu(conn, hdr, data, data_len); break; default: ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", sts->completionStatus); break; } return; } static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) { struct ql4_task_data *task_data; struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; int hdr_len; sess = task->conn->session; ddb_entry = sess->dd_data; ha = ddb_entry->ha; task_data = task->dd_data; memset(task_data, 0, sizeof(struct ql4_task_data)); if (task->sc) { ql4_printk(KERN_INFO, ha, "%s: SCSI Commands not implemented\n", __func__); return -EINVAL; } hdr_len = sizeof(struct iscsi_hdr); task_data->ha = ha; task_data->task = task; if (task->data_count) { task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, task->data_count, PCI_DMA_TODEVICE); } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", __func__, task->conn->max_recv_dlength, hdr_len)); task_data->resp_len = task->conn->max_recv_dlength + hdr_len; task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, task_data->resp_len, &task_data->resp_dma, GFP_ATOMIC); if (!task_data->resp_buffer) goto exit_alloc_pdu; task_data->req_len = task->data_count + hdr_len; task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, task_data->req_len, &task_data->req_dma, GFP_ATOMIC); if (!task_data->req_buffer) goto exit_alloc_pdu; task->hdr = task_data->req_buffer; INIT_WORK(&task_data->task_work, qla4xxx_task_work); return 0; exit_alloc_pdu: if (task_data->resp_buffer) dma_free_coherent(&ha->pdev->dev, task_data->resp_len, task_data->resp_buffer, task_data->resp_dma); if (task_data->req_buffer) dma_free_coherent(&ha->pdev->dev, task_data->req_len, task_data->req_buffer, task_data->req_dma); return -ENOMEM; } static void qla4xxx_task_cleanup(struct iscsi_task *task) { struct ql4_task_data *task_data; struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; int hdr_len; hdr_len = sizeof(struct iscsi_hdr); sess = task->conn->session; ddb_entry = sess->dd_data; ha = ddb_entry->ha; task_data = task->dd_data; if (task->data_count) { dma_unmap_single(&ha->pdev->dev, task_data->data_dma, task->data_count, PCI_DMA_TODEVICE); } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", __func__, task->conn->max_recv_dlength, hdr_len)); dma_free_coherent(&ha->pdev->dev, task_data->resp_len, task_data->resp_buffer, task_data->resp_dma); dma_free_coherent(&ha->pdev->dev, task_data->req_len, task_data->req_buffer, task_data->req_dma); return; } static int qla4xxx_task_xmit(struct iscsi_task *task) { struct scsi_cmnd *sc = task->sc; struct iscsi_session *sess = task->conn->session; struct ddb_entry *ddb_entry = sess->dd_data; struct scsi_qla_host *ha = ddb_entry->ha; if (!sc) return qla4xxx_send_passthru0(task); ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", __func__); return -ENOSYS; } static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, struct iscsi_cls_session *cls_sess, struct iscsi_cls_conn *cls_conn) { int buflen = 0; struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct iscsi_conn *conn; char ip_addr[DDB_IPADDR_LEN]; uint16_t options = 0; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; conn = cls_conn->dd_data; ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); conn->max_recv_dlength = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); conn->max_xmit_dlength = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); sess->initial_r2t_en = (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options)); sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options)); sess->first_burst = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); sess->max_burst = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); options = le16_to_cpu(fw_ddb_entry->options); if (options & DDB_OPT_IPV6_DEVICE) sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); else sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, (char *)fw_ddb_entry->iscsi_name, buflen); iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, (char *)ha->name_string, buflen); iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, (char *)ip_addr, buflen); iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS, (char *)fw_ddb_entry->iscsi_alias, buflen); } void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { struct iscsi_cls_session *cls_sess; struct iscsi_cls_conn *cls_conn; uint32_t ddb_state; dma_addr_t fw_ddb_entry_dma; struct dev_db_entry *fw_ddb_entry; fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__); goto exit_session_conn_fwddb_param; } if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, NULL, NULL, &ddb_state, NULL, NULL, NULL) == QLA_ERROR) { DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " "get_ddb_entry for fw_ddb_index %d\n", ha->host_no, __func__, ddb_entry->fw_ddb_index)); goto exit_session_conn_fwddb_param; } cls_sess = ddb_entry->sess; cls_conn = ddb_entry->conn; /* Update params */ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); exit_session_conn_fwddb_param: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); } void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { struct iscsi_cls_session *cls_sess; struct iscsi_cls_conn *cls_conn; struct iscsi_session *sess; struct iscsi_conn *conn; uint32_t ddb_state; dma_addr_t fw_ddb_entry_dma; struct dev_db_entry *fw_ddb_entry; fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__); goto exit_session_conn_param; } if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, NULL, NULL, &ddb_state, NULL, NULL, NULL) == QLA_ERROR) { DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " "get_ddb_entry for fw_ddb_index %d\n", ha->host_no, __func__, ddb_entry->fw_ddb_index)); goto exit_session_conn_param; } cls_sess = ddb_entry->sess; sess = cls_sess->dd_data; cls_conn = ddb_entry->conn; conn = cls_conn->dd_data; /* Update timers after login */ ddb_entry->default_relogin_timeout = (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); /* Update params */ ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); conn->max_recv_dlength = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); conn->max_xmit_dlength = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); sess->initial_r2t_en = (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options)); sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options)); sess->first_burst = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); sess->max_burst = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); memcpy(sess->initiatorname, ha->name_string, min(sizeof(ha->name_string), sizeof(sess->initiatorname))); iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS, (char *)fw_ddb_entry->iscsi_alias, 0); exit_session_conn_param: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); } /* * Timer routines */ static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func, unsigned long interval) { DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", __func__, ha->host->host_no)); init_timer(&ha->timer); ha->timer.expires = jiffies + interval * HZ; ha->timer.data = (unsigned long)ha; ha->timer.function = (void (*)(unsigned long))func; add_timer(&ha->timer); ha->timer_active = 1; } static void qla4xxx_stop_timer(struct scsi_qla_host *ha) { del_timer_sync(&ha->timer); ha->timer_active = 0; } /*** * qla4xxx_mark_device_missing - blocks the session * @cls_session: Pointer to the session to be blocked * @ddb_entry: Pointer to device database entry * * This routine marks a device missing and close connection. **/ void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) { iscsi_block_session(cls_session); } /** * qla4xxx_mark_all_devices_missing - mark all devices as missing. * @ha: Pointer to host adapter structure. * * This routine marks a device missing and resets the relogin retry count. **/ void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) { iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); } static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, struct scsi_cmnd *cmd) { struct srb *srb; srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); if (!srb) return srb; kref_init(&srb->srb_ref); srb->ha = ha; srb->ddb = ddb_entry; srb->cmd = cmd; srb->flags = 0; CMD_SP(cmd) = (void *)srb; return srb; } static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb) { struct scsi_cmnd *cmd = srb->cmd; if (srb->flags & SRB_DMA_VALID) { scsi_dma_unmap(cmd); srb->flags &= ~SRB_DMA_VALID; } CMD_SP(cmd) = NULL; } void qla4xxx_srb_compl(struct kref *ref) { struct srb *srb = container_of(ref, struct srb, srb_ref); struct scsi_cmnd *cmd = srb->cmd; struct scsi_qla_host *ha = srb->ha; qla4xxx_srb_free_dma(ha, srb); mempool_free(srb, ha->srb_mempool); cmd->scsi_done(cmd); } /** * qla4xxx_queuecommand - scsi layer issues scsi command to driver. * @host: scsi host * @cmd: Pointer to Linux's SCSI command structure * * Remarks: * This routine is invoked by Linux to send a SCSI command to the driver. * The mid-level driver tries to ensure that queuecommand never gets * invoked concurrently with itself or the interrupt handler (although * the interrupt handler may call this routine as part of request- * completion handling). Unfortunely, it sometimes calls the scheduler * in interrupt context which is a big NO! NO!. **/ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { struct scsi_qla_host *ha = to_qla_host(host); struct ddb_entry *ddb_entry = cmd->device->hostdata; struct iscsi_cls_session *sess = ddb_entry->sess; struct srb *srb; int rval; if (test_bit(AF_EEH_BUSY, &ha->flags)) { if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) cmd->result = DID_NO_CONNECT << 16; else cmd->result = DID_REQUEUE << 16; goto qc_fail_command; } if (!sess) { cmd->result = DID_IMM_RETRY << 16; goto qc_fail_command; } rval = iscsi_session_chkready(sess); if (rval) { cmd->result = rval; goto qc_fail_command; } if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || test_bit(DPC_RESET_HA, &ha->dpc_flags) || test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || !test_bit(AF_ONLINE, &ha->flags) || !test_bit(AF_LINK_UP, &ha->flags) || test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) goto qc_host_busy; srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); if (!srb) goto qc_host_busy; rval = qla4xxx_send_command_to_isp(ha, srb); if (rval != QLA_SUCCESS) goto qc_host_busy_free_sp; return 0; qc_host_busy_free_sp: qla4xxx_srb_free_dma(ha, srb); mempool_free(srb, ha->srb_mempool); qc_host_busy: return SCSI_MLQUEUE_HOST_BUSY; qc_fail_command: cmd->scsi_done(cmd); return 0; } /** * qla4xxx_mem_free - frees memory allocated to adapter * @ha: Pointer to host adapter structure. * * Frees memory previously allocated by qla4xxx_mem_alloc **/ static void qla4xxx_mem_free(struct scsi_qla_host *ha) { if (ha->queues) dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, ha->queues_dma); ha->queues_len = 0; ha->queues = NULL; ha->queues_dma = 0; ha->request_ring = NULL; ha->request_dma = 0; ha->response_ring = NULL; ha->response_dma = 0; ha->shadow_regs = NULL; ha->shadow_regs_dma = 0; /* Free srb pool. */ if (ha->srb_mempool) mempool_destroy(ha->srb_mempool); ha->srb_mempool = NULL; if (ha->chap_dma_pool) dma_pool_destroy(ha->chap_dma_pool); if (ha->chap_list) vfree(ha->chap_list); ha->chap_list = NULL; if (ha->fw_ddb_dma_pool) dma_pool_destroy(ha->fw_ddb_dma_pool); /* release io space registers */ if (is_qla8022(ha)) { if (ha->nx_pcibase) iounmap( (struct device_reg_82xx __iomem *)ha->nx_pcibase); } else if (ha->reg) iounmap(ha->reg); pci_release_regions(ha->pdev); } /** * qla4xxx_mem_alloc - allocates memory for use by adapter. * @ha: Pointer to host adapter structure * * Allocates DMA memory for request and response queues. Also allocates memory * for srbs. **/ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) { unsigned long align; /* Allocate contiguous block of DMA memory for queues. */ ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) + sizeof(struct shadow_regs) + MEM_ALIGN_VALUE + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, &ha->queues_dma, GFP_KERNEL); if (ha->queues == NULL) { ql4_printk(KERN_WARNING, ha, "Memory Allocation failed - queues.\n"); goto mem_alloc_error_exit; } memset(ha->queues, 0, ha->queues_len); /* * As per RISC alignment requirements -- the bus-address must be a * multiple of the request-ring size (in bytes). */ align = 0; if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)) align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)); /* Update request and response queue pointers. */ ha->request_dma = ha->queues_dma + align; ha->request_ring = (struct queue_entry *) (ha->queues + align); ha->response_dma = ha->queues_dma + align + (REQUEST_QUEUE_DEPTH * QUEUE_SIZE); ha->response_ring = (struct queue_entry *) (ha->queues + align + (REQUEST_QUEUE_DEPTH * QUEUE_SIZE)); ha->shadow_regs_dma = ha->queues_dma + align + (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE); ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE)); /* Allocate memory for srb pool. */ ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, mempool_free_slab, srb_cachep); if (ha->srb_mempool == NULL) { ql4_printk(KERN_WARNING, ha, "Memory Allocation failed - SRB Pool.\n"); goto mem_alloc_error_exit; } ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, CHAP_DMA_BLOCK_SIZE, 8, 0); if (ha->chap_dma_pool == NULL) { ql4_printk(KERN_WARNING, ha, "%s: chap_dma_pool allocation failed..\n", __func__); goto mem_alloc_error_exit; } ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev, DDB_DMA_BLOCK_SIZE, 8, 0); if (ha->fw_ddb_dma_pool == NULL) { ql4_printk(KERN_WARNING, ha, "%s: fw_ddb_dma_pool allocation failed..\n", __func__); goto mem_alloc_error_exit; } return QLA_SUCCESS; mem_alloc_error_exit: qla4xxx_mem_free(ha); return QLA_ERROR; } /** * qla4_8xxx_check_temp - Check the ISP82XX temperature. * @ha: adapter block pointer. * * Note: The caller should not hold the idc lock. **/ static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) { uint32_t temp, temp_state, temp_val; int status = QLA_SUCCESS; temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE); temp_state = qla82xx_get_temp_state(temp); temp_val = qla82xx_get_temp_val(temp); if (temp_state == QLA82XX_TEMP_PANIC) { ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" " exceeds maximum allowed. Hardware has been shut" " down.\n", temp_val); status = QLA_ERROR; } else if (temp_state == QLA82XX_TEMP_WARN) { if (ha->temperature == QLA82XX_TEMP_NORMAL) ql4_printk(KERN_WARNING, ha, "Device temperature %d" " degrees C exceeds operating range." " Immediate action needed.\n", temp_val); } else { if (ha->temperature == QLA82XX_TEMP_WARN) ql4_printk(KERN_INFO, ha, "Device temperature is" " now %d degrees C in normal range.\n", temp_val); } ha->temperature = temp_state; return status; } /** * qla4_8xxx_check_fw_alive - Check firmware health * @ha: Pointer to host adapter structure. * * Context: Interrupt **/ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) { uint32_t fw_heartbeat_counter; int status = QLA_SUCCESS; fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ if (fw_heartbeat_counter == 0xffffffff) { DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", ha->host_no, __func__)); return status; } if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { ha->seconds_since_last_heartbeat++; /* FW not alive after 2 seconds */ if (ha->seconds_since_last_heartbeat == 2) { ha->seconds_since_last_heartbeat = 0; ql4_printk(KERN_INFO, ha, "scsi(%ld): %s, Dumping hw/fw registers:\n " " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:" " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:" " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:" " 0x%x,\n PEG_NET_4_PC: 0x%x\n", ha->host_no, __func__, qla4_8xxx_rd_32(ha, QLA82XX_PEG_HALT_STATUS1), qla4_8xxx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c), qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c), qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c), qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c), qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c)); status = QLA_ERROR; } } else ha->seconds_since_last_heartbeat = 0; ha->fw_heartbeat_counter = fw_heartbeat_counter; return status; } /** * qla4_8xxx_watchdog - Poll dev state * @ha: Pointer to host adapter structure. * * Context: Interrupt **/ void qla4_8xxx_watchdog(struct scsi_qla_host *ha) { uint32_t dev_state, halt_status; /* don't poll if reset is going on */ if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || test_bit(DPC_RESET_HA, &ha->dpc_flags) || test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (qla4_8xxx_check_temp(ha)) { ql4_printk(KERN_INFO, ha, "disabling pause" " transmit on port 0 & 1.\n"); qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0 | CRB_NIU_XG_PAUSE_CTL_P1); set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); qla4xxx_wake_dpc(ha); } else if (dev_state == QLA82XX_DEV_NEED_RESET && !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { if (!ql4xdontresethba) { ql4_printk(KERN_INFO, ha, "%s: HW State: " "NEED RESET!\n", __func__); set_bit(DPC_RESET_HA, &ha->dpc_flags); qla4xxx_wake_dpc(ha); } } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", __func__); set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); qla4xxx_wake_dpc(ha); } else { /* Check firmware health */ if (qla4_8xxx_check_fw_alive(ha)) { ql4_printk(KERN_INFO, ha, "disabling pause" " transmit on port 0 & 1.\n"); qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0 | CRB_NIU_XG_PAUSE_CTL_P1); halt_status = qla4_8xxx_rd_32(ha, QLA82XX_PEG_HALT_STATUS1); if (QLA82XX_FWERROR_CODE(halt_status) == 0x67) ql4_printk(KERN_ERR, ha, "%s:" " Firmware aborted with" " error code 0x00006700." " Device is being reset\n", __func__); /* Since we cannot change dev_state in interrupt * context, set appropriate DPC flag then wakeup * DPC */ if (halt_status & HALT_STATUS_UNRECOVERABLE) set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); else { ql4_printk(KERN_INFO, ha, "%s: detect " "abort needed!\n", __func__); set_bit(DPC_RESET_HA, &ha->dpc_flags); } qla4xxx_mailbox_premature_completion(ha); qla4xxx_wake_dpc(ha); } } } } static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; if (!(ddb_entry->ddb_type == FLASH_DDB)) return; if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) && !iscsi_is_session_online(cls_sess)) { if (atomic_read(&ddb_entry->retry_relogin_timer) != INVALID_ENTRY) { if (atomic_read(&ddb_entry->retry_relogin_timer) == 0) { atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); set_bit(DF_RELOGIN, &ddb_entry->flags); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: index [%d] login device\n", __func__, ddb_entry->fw_ddb_index)); } else atomic_dec(&ddb_entry->retry_relogin_timer); } } /* Wait for relogin to timeout */ if (atomic_read(&ddb_entry->relogin_timer) && (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { /* * If the relogin times out and the device is * still NOT ONLINE then try and relogin again. */ if (!iscsi_is_session_online(cls_sess)) { /* Reset retry relogin timer */ atomic_inc(&ddb_entry->relogin_retry_count); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: index[%d] relogin timed out-retrying" " relogin (%d), retry (%d)\n", __func__, ddb_entry->fw_ddb_index, atomic_read(&ddb_entry->relogin_retry_count), ddb_entry->default_time2wait + 4)); set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); atomic_set(&ddb_entry->retry_relogin_timer, ddb_entry->default_time2wait + 4); } } } /** * qla4xxx_timer - checks every second for work to do. * @ha: Pointer to host adapter structure. **/ static void qla4xxx_timer(struct scsi_qla_host *ha) { int start_dpc = 0; uint16_t w; iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb); /* If we are in the middle of AER/EEH processing * skip any processing and reschedule the timer */ if (test_bit(AF_EEH_BUSY, &ha->flags)) { mod_timer(&ha->timer, jiffies + HZ); return; } /* Hardware read to trigger an EEH error during mailbox waits. */ if (!pci_channel_offline(ha->pdev)) pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); if (is_qla8022(ha)) { qla4_8xxx_watchdog(ha); } if (!is_qla8022(ha)) { /* Check for heartbeat interval. */ if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && ha->heartbeat_interval != 0) { ha->seconds_since_last_heartbeat++; if (ha->seconds_since_last_heartbeat > ha->heartbeat_interval + 2) set_bit(DPC_RESET_HA, &ha->dpc_flags); } } /* Process any deferred work. */ if (!list_empty(&ha->work_list)) start_dpc++; /* Wakeup the dpc routine for this adapter, if needed. */ if (start_dpc || test_bit(DPC_RESET_HA, &ha->dpc_flags) || test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || test_bit(DPC_AEN, &ha->dpc_flags)) { DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" " - dpc flags = 0x%lx\n", ha->host_no, __func__, ha->dpc_flags)); qla4xxx_wake_dpc(ha); } /* Reschedule timer thread to call us back in one second */ mod_timer(&ha->timer, jiffies + HZ); DEBUG2(ha->seconds_since_last_intr++); } /** * qla4xxx_cmd_wait - waits for all outstanding commands to complete * @ha: Pointer to host adapter structure. * * This routine stalls the driver until all outstanding commands are returned. * Caller must release the Hardware Lock prior to calling this routine. **/ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) { uint32_t index = 0; unsigned long flags; struct scsi_cmnd *cmd; unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ); DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to " "complete\n", WAIT_CMD_TOV)); while (!time_after_eq(jiffies, wtime)) { spin_lock_irqsave(&ha->hardware_lock, flags); /* Find a command that hasn't completed. */ for (index = 0; index < ha->host->can_queue; index++) { cmd = scsi_host_find_tag(ha->host, index); /* * We cannot just check if the index is valid, * becase if we are run from the scsi eh, then * the scsi/block layer is going to prevent * the tag from being released. */ if (cmd != NULL && CMD_SP(cmd)) break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); /* If No Commands are pending, wait is complete */ if (index == ha->host->can_queue) return QLA_SUCCESS; msleep(1000); } /* If we timed out on waiting for commands to come back * return ERROR. */ return QLA_ERROR; } int qla4xxx_hw_reset(struct scsi_qla_host *ha) { uint32_t ctrl_status; unsigned long flags = 0; DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) return QLA_ERROR; spin_lock_irqsave(&ha->hardware_lock, flags); /* * If the SCSI Reset Interrupt bit is set, clear it. * Otherwise, the Soft Reset won't work. */ ctrl_status = readw(&ha->reg->ctrl_status); if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); /* Issue Soft Reset */ writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla4xxx_soft_reset - performs soft reset. * @ha: Pointer to host adapter structure. **/ int qla4xxx_soft_reset(struct scsi_qla_host *ha) { uint32_t max_wait_time; unsigned long flags = 0; int status; uint32_t ctrl_status; status = qla4xxx_hw_reset(ha); if (status != QLA_SUCCESS) return status; status = QLA_ERROR; /* Wait until the Network Reset Intr bit is cleared */ max_wait_time = RESET_INTR_TOV; do { spin_lock_irqsave(&ha->hardware_lock, flags); ctrl_status = readw(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if ((ctrl_status & CSR_NET_RESET_INTR) == 0) break; msleep(1000); } while ((--max_wait_time)); if ((ctrl_status & CSR_NET_RESET_INTR) != 0) { DEBUG2(printk(KERN_WARNING "scsi%ld: Network Reset Intr not cleared by " "Network function, clearing it now!\n", ha->host_no)); spin_lock_irqsave(&ha->hardware_lock, flags); writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); } /* Wait until the firmware tells us the Soft Reset is done */ max_wait_time = SOFT_RESET_TOV; do { spin_lock_irqsave(&ha->hardware_lock, flags); ctrl_status = readw(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if ((ctrl_status & CSR_SOFT_RESET) == 0) { status = QLA_SUCCESS; break; } msleep(1000); } while ((--max_wait_time)); /* * Also, make sure that the SCSI Reset Interrupt bit has been cleared * after the soft reset has taken place. */ spin_lock_irqsave(&ha->hardware_lock, flags); ctrl_status = readw(&ha->reg->ctrl_status); if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) { writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); } spin_unlock_irqrestore(&ha->hardware_lock, flags); /* If soft reset fails then most probably the bios on other * function is also enabled. * Since the initialization is sequential the other fn * wont be able to acknowledge the soft reset. * Issue a force soft reset to workaround this scenario. */ if (max_wait_time == 0) { /* Issue Force Soft Reset */ spin_lock_irqsave(&ha->hardware_lock, flags); writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait until the firmware tells us the Soft Reset is done */ max_wait_time = SOFT_RESET_TOV; do { spin_lock_irqsave(&ha->hardware_lock, flags); ctrl_status = readw(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) { status = QLA_SUCCESS; break; } msleep(1000); } while ((--max_wait_time)); } return status; } /** * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S. * @ha: Pointer to host adapter structure. * @res: returned scsi status * * This routine is called just prior to a HARD RESET to return all * outstanding commands back to the Operating System. * Caller should make sure that the following locks are released * before this calling routine: Hardware lock, and io_request_lock. **/ static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res) { struct srb *srb; int i; unsigned long flags; spin_lock_irqsave(&ha->hardware_lock, flags); for (i = 0; i < ha->host->can_queue; i++) { srb = qla4xxx_del_from_active_array(ha, i); if (srb != NULL) { srb->cmd->result = res; kref_put(&srb->srb_ref, qla4xxx_srb_compl); } } spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) { clear_bit(AF_ONLINE, &ha->flags); /* Disable the board */ ql4_printk(KERN_INFO, ha, "Disabling the board\n"); qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); qla4xxx_mark_all_devices_missing(ha); clear_bit(AF_INIT_DONE, &ha->flags); } static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; sess = cls_session->dd_data; ddb_entry = sess->dd_data; ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; if (ddb_entry->ddb_type == FLASH_DDB) iscsi_block_session(ddb_entry->sess); else iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); } /** * qla4xxx_recover_adapter - recovers adapter after a fatal error * @ha: Pointer to host adapter structure. **/ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) { int status = QLA_ERROR; uint8_t reset_chip = 0; uint32_t dev_state; unsigned long wait; /* Stall incoming I/O until we are done */ scsi_block_requests(ha->host); clear_bit(AF_ONLINE, &ha->flags); clear_bit(AF_LINK_UP, &ha->flags); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) reset_chip = 1; /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific) * do not reset adapter, jump to initialize_adapter */ if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { status = QLA_SUCCESS; goto recover_ha_init_adapter; } /* For the ISP-82xx adapter, issue a stop_firmware if invoked * from eh_host_reset or ioctl module */ if (is_qla8022(ha) && !reset_chip && test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s - Performing stop_firmware...\n", ha->host_no, __func__)); status = ha->isp_ops->reset_firmware(ha); if (status == QLA_SUCCESS) { if (!test_bit(AF_FW_RECOVERY, &ha->flags)) qla4xxx_cmd_wait(ha); ha->isp_ops->disable_intrs(ha); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); qla4xxx_abort_active_cmds(ha, DID_RESET << 16); } else { /* If the stop_firmware fails then * reset the entire chip */ reset_chip = 1; clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); set_bit(DPC_RESET_HA, &ha->dpc_flags); } } /* Issue full chip reset if recovering from a catastrophic error, * or if stop_firmware fails for ISP-82xx. * This is the default case for ISP-4xxx */ if (!is_qla8022(ha) || reset_chip) { if (!is_qla8022(ha)) goto chip_reset; /* Check if 82XX firmware is alive or not * We may have arrived here from NEED_RESET * detection only */ if (test_bit(AF_FW_RECOVERY, &ha->flags)) goto chip_reset; wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ); while (time_before(jiffies, wait)) { if (qla4_8xxx_check_fw_alive(ha)) { qla4xxx_mailbox_premature_completion(ha); break; } set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ); } if (!test_bit(AF_FW_RECOVERY, &ha->flags)) qla4xxx_cmd_wait(ha); chip_reset: qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); qla4xxx_abort_active_cmds(ha, DID_RESET << 16); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s - Performing chip reset..\n", ha->host_no, __func__)); status = ha->isp_ops->reset_chip(ha); } /* Flush any pending ddb changed AENs */ qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); recover_ha_init_adapter: /* Upon successful firmware/chip reset, re-initialize the adapter */ if (status == QLA_SUCCESS) { /* For ISP-4xxx, force function 1 to always initialize * before function 3 to prevent both funcions from * stepping on top of the other */ if (!is_qla8022(ha) && (ha->mac_index == 3)) ssleep(6); /* NOTE: AF_ONLINE flag set upon successful completion of * qla4xxx_initialize_adapter */ status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); } /* Retry failed adapter initialization, if necessary * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific) * case to prevent ping-pong resets between functions */ if (!test_bit(AF_ONLINE, &ha->flags) && !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { /* Adapter initialization failed, see if we can retry * resetting the ha. * Since we don't want to block the DPC for too long * with multiple resets in the same thread, * utilize DPC to retry */ if (is_qla8022(ha)) { qla4_8xxx_idc_lock(ha); dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); qla4_8xxx_idc_unlock(ha); if (dev_state == QLA82XX_DEV_FAILED) { ql4_printk(KERN_INFO, ha, "%s: don't retry " "recover adapter. H/W is in Failed " "state\n", __func__); qla4xxx_dead_adapter_cleanup(ha); clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); status = QLA_ERROR; goto exit_recover; } } if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; DEBUG2(printk("scsi%ld: recover adapter - retrying " "(%d) more times\n", ha->host_no, ha->retry_reset_ha_cnt)); set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); status = QLA_ERROR; } else { if (ha->retry_reset_ha_cnt > 0) { /* Schedule another Reset HA--DPC will retry */ ha->retry_reset_ha_cnt--; DEBUG2(printk("scsi%ld: recover adapter - " "retry remaining %d\n", ha->host_no, ha->retry_reset_ha_cnt)); status = QLA_ERROR; } if (ha->retry_reset_ha_cnt == 0) { /* Recover adapter retries have been exhausted. * Adapter DEAD */ DEBUG2(printk("scsi%ld: recover adapter " "failed - board disabled\n", ha->host_no)); qla4xxx_dead_adapter_cleanup(ha); clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); status = QLA_ERROR; } } } else { clear_bit(DPC_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); } exit_recover: ha->adapter_error_count++; if (test_bit(AF_ONLINE, &ha->flags)) ha->isp_ops->enable_intrs(ha); scsi_unblock_requests(ha->host); clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no, status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); return status; } static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; sess = cls_session->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; if (!iscsi_is_session_online(cls_session)) { if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" " unblock session\n", ha->host_no, __func__, ddb_entry->fw_ddb_index); iscsi_unblock_session(ddb_entry->sess); } else { /* Trigger relogin */ if (ddb_entry->ddb_type == FLASH_DDB) { if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) qla4xxx_arm_relogin_timer(ddb_entry); } else iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); } } } int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; sess = cls_session->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" " unblock session\n", ha->host_no, __func__, ddb_entry->fw_ddb_index); iscsi_unblock_session(ddb_entry->sess); /* Start scan target */ if (test_bit(AF_ONLINE, &ha->flags)) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" " start scan\n", ha->host_no, __func__, ddb_entry->fw_ddb_index); scsi_queue_work(ha->host, &ddb_entry->sess->scan_work); } return QLA_SUCCESS; } int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; sess = cls_session->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" " unblock user space session\n", ha->host_no, __func__, ddb_entry->fw_ddb_index); iscsi_conn_start(ddb_entry->conn); iscsi_conn_login_event(ddb_entry->conn, ISCSI_CONN_STATE_LOGGED_IN); return QLA_SUCCESS; } static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) { iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); } static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) { uint16_t relogin_timer; struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; relogin_timer = max(ddb_entry->default_relogin_timeout, (uint16_t)RELOGIN_TOV); atomic_set(&ddb_entry->relogin_timer, relogin_timer); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, ddb_entry->fw_ddb_index, relogin_timer)); qla4xxx_login_flash_ddb(cls_sess); } static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; if (!(ddb_entry->ddb_type == FLASH_DDB)) return; if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && !iscsi_is_session_online(cls_sess)) { DEBUG2(ql4_printk(KERN_INFO, ha, "relogin issued\n")); qla4xxx_relogin_flash_ddb(cls_sess); } } void qla4xxx_wake_dpc(struct scsi_qla_host *ha) { if (ha->dpc_thread) queue_work(ha->dpc_thread, &ha->dpc_work); } static struct qla4_work_evt * qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size, enum qla4_work_type type) { struct qla4_work_evt *e; uint32_t size = sizeof(struct qla4_work_evt) + data_size; e = kzalloc(size, GFP_ATOMIC); if (!e) return NULL; INIT_LIST_HEAD(&e->list); e->type = type; return e; } static void qla4xxx_post_work(struct scsi_qla_host *ha, struct qla4_work_evt *e) { unsigned long flags; spin_lock_irqsave(&ha->work_lock, flags); list_add_tail(&e->list, &ha->work_list); spin_unlock_irqrestore(&ha->work_lock, flags); qla4xxx_wake_dpc(ha); } int qla4xxx_post_aen_work(struct scsi_qla_host *ha, enum iscsi_host_event_code aen_code, uint32_t data_size, uint8_t *data) { struct qla4_work_evt *e; e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN); if (!e) return QLA_ERROR; e->u.aen.code = aen_code; e->u.aen.data_size = data_size; memcpy(e->u.aen.data, data, data_size); qla4xxx_post_work(ha, e); return QLA_SUCCESS; } int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, uint32_t status, uint32_t pid, uint32_t data_size, uint8_t *data) { struct qla4_work_evt *e; e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS); if (!e) return QLA_ERROR; e->u.ping.status = status; e->u.ping.pid = pid; e->u.ping.data_size = data_size; memcpy(e->u.ping.data, data, data_size); qla4xxx_post_work(ha, e); return QLA_SUCCESS; } static void qla4xxx_do_work(struct scsi_qla_host *ha) { struct qla4_work_evt *e, *tmp; unsigned long flags; LIST_HEAD(work); spin_lock_irqsave(&ha->work_lock, flags); list_splice_init(&ha->work_list, &work); spin_unlock_irqrestore(&ha->work_lock, flags); list_for_each_entry_safe(e, tmp, &work, list) { list_del_init(&e->list); switch (e->type) { case QLA4_EVENT_AEN: iscsi_post_host_event(ha->host_no, &qla4xxx_iscsi_transport, e->u.aen.code, e->u.aen.data_size, e->u.aen.data); break; case QLA4_EVENT_PING_STATUS: iscsi_ping_comp_event(ha->host_no, &qla4xxx_iscsi_transport, e->u.ping.status, e->u.ping.pid, e->u.ping.data_size, e->u.ping.data); break; default: ql4_printk(KERN_WARNING, ha, "event type: 0x%x not " "supported", e->type); } kfree(e); } } /** * qla4xxx_do_dpc - dpc routine * @data: in our case pointer to adapter structure * * This routine is a task that is schedule by the interrupt handler * to perform the background processing for interrupts. We put it * on a task queue that is consumed whenever the scheduler runs; that's * so you can do anything (i.e. put the process to sleep etc). In fact, * the mid-level tries to sleep when it reaches the driver threshold * "host->can_queue". This can cause a panic if we were in our interrupt code. **/ static void qla4xxx_do_dpc(struct work_struct *work) { struct scsi_qla_host *ha = container_of(work, struct scsi_qla_host, dpc_work); int status = QLA_ERROR; DEBUG2(printk("scsi%ld: %s: DPC handler waking up." "flags = 0x%08lx, dpc_flags = 0x%08lx\n", ha->host_no, __func__, ha->flags, ha->dpc_flags)) /* Initialization not yet finished. Don't do anything yet. */ if (!test_bit(AF_INIT_DONE, &ha->flags)) return; if (test_bit(AF_EEH_BUSY, &ha->flags)) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", ha->host_no, __func__, ha->flags)); return; } /* post events to application */ qla4xxx_do_work(ha); if (is_qla8022(ha)) { if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { qla4_8xxx_idc_lock(ha); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); qla4_8xxx_idc_unlock(ha); ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); qla4_8xxx_device_state_handler(ha); } if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { qla4_8xxx_need_qsnt_handler(ha); } } if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) && (test_bit(DPC_RESET_HA, &ha->dpc_flags) || test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { if (ql4xdontresethba) { DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", ha->host_no, __func__)); clear_bit(DPC_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); goto dpc_post_reset_ha; } if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || test_bit(DPC_RESET_HA, &ha->dpc_flags)) qla4xxx_recover_adapter(ha); if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { uint8_t wait_time = RESET_INTR_TOV; while ((readw(&ha->reg->ctrl_status) & (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { if (--wait_time == 0) break; msleep(1000); } if (wait_time == 0) DEBUG2(printk("scsi%ld: %s: SR|FSR " "bit not cleared-- resetting\n", ha->host_no, __func__)); qla4xxx_abort_active_cmds(ha, DID_RESET << 16); if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); status = qla4xxx_recover_adapter(ha); } clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); if (status == QLA_SUCCESS) ha->isp_ops->enable_intrs(ha); } } dpc_post_reset_ha: /* ---- process AEN? --- */ if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) qla4xxx_process_aen(ha, PROCESS_ALL_AENS); /* ---- Get DHCP IP Address? --- */ if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) qla4xxx_get_dhcp_ip_address(ha); /* ---- relogin device? --- */ if (adapter_up(ha) && test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin); } /* ---- link change? --- */ if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { if (!test_bit(AF_LINK_UP, &ha->flags)) { /* ---- link down? --- */ qla4xxx_mark_all_devices_missing(ha); } else { /* ---- link up? --- * * F/W will auto login to all devices ONLY ONCE after * link up during driver initialization and runtime * fatal error recovery. Therefore, the driver must * manually relogin to devices when recovering from * connection failures, logouts, expired KATO, etc. */ if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) { qla4xxx_build_ddb_list(ha, ha->is_reset); iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); } else qla4xxx_relogin_all_devices(ha); } } } /** * qla4xxx_free_adapter - release the adapter * @ha: pointer to adapter structure **/ static void qla4xxx_free_adapter(struct scsi_qla_host *ha) { qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) { /* Turn-off interrupts on the card. */ ha->isp_ops->disable_intrs(ha); } /* Remove timer thread, if present */ if (ha->timer_active) qla4xxx_stop_timer(ha); /* Kill the kernel thread for this host */ if (ha->dpc_thread) destroy_workqueue(ha->dpc_thread); /* Kill the kernel thread for this host */ if (ha->task_wq) destroy_workqueue(ha->task_wq); /* Put firmware in known state */ ha->isp_ops->reset_firmware(ha); if (is_qla8022(ha)) { qla4_8xxx_idc_lock(ha); qla4_8xxx_clear_drv_active(ha); qla4_8xxx_idc_unlock(ha); } /* Detach interrupts */ if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) qla4xxx_free_irqs(ha); /* free extra memory */ qla4xxx_mem_free(ha); } int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) { int status = 0; unsigned long mem_base, mem_len, db_base, db_len; struct pci_dev *pdev = ha->pdev; status = pci_request_regions(pdev, DRIVER_NAME); if (status) { printk(KERN_WARNING "scsi(%ld) Failed to reserve PIO regions (%s) " "status=%d\n", ha->host_no, pci_name(pdev), status); goto iospace_error_exit; } DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", __func__, pdev->revision)); ha->revision_id = pdev->revision; /* remap phys address */ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ mem_len = pci_resource_len(pdev, 0); DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n", __func__, mem_base, mem_len)); /* mapping of pcibase pointer */ ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len); if (!ha->nx_pcibase) { printk(KERN_ERR "cannot remap MMIO (%s), aborting\n", pci_name(pdev)); pci_release_regions(ha->pdev); goto iospace_error_exit; } /* Mapping of IO base pointer, door bell read and write pointer */ /* mapping of IO base pointer */ ha->qla4_8xxx_reg = (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11)); db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ db_len = pci_resource_len(pdev, 4); ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : QLA82XX_CAM_RAM_DB2); return 0; iospace_error_exit: return -ENOMEM; } /*** * qla4xxx_iospace_config - maps registers * @ha: pointer to adapter structure * * This routines maps HBA's registers from the pci address space * into the kernel virtual address space for memory mapped i/o. **/ int qla4xxx_iospace_config(struct scsi_qla_host *ha) { unsigned long pio, pio_len, pio_flags; unsigned long mmio, mmio_len, mmio_flags; pio = pci_resource_start(ha->pdev, 0); pio_len = pci_resource_len(ha->pdev, 0); pio_flags = pci_resource_flags(ha->pdev, 0); if (pio_flags & IORESOURCE_IO) { if (pio_len < MIN_IOBASE_LEN) { ql4_printk(KERN_WARNING, ha, "Invalid PCI I/O region size\n"); pio = 0; } } else { ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n"); pio = 0; } /* Use MMIO operations for all accesses. */ mmio = pci_resource_start(ha->pdev, 1); mmio_len = pci_resource_len(ha->pdev, 1); mmio_flags = pci_resource_flags(ha->pdev, 1); if (!(mmio_flags & IORESOURCE_MEM)) { ql4_printk(KERN_ERR, ha, "region #0 not an MMIO resource, aborting\n"); goto iospace_error_exit; } if (mmio_len < MIN_IOBASE_LEN) { ql4_printk(KERN_ERR, ha, "Invalid PCI mem region size, aborting\n"); goto iospace_error_exit; } if (pci_request_regions(ha->pdev, DRIVER_NAME)) { ql4_printk(KERN_WARNING, ha, "Failed to reserve PIO/MMIO regions\n"); goto iospace_error_exit; } ha->pio_address = pio; ha->pio_length = pio_len; ha->reg = ioremap(mmio, MIN_IOBASE_LEN); if (!ha->reg) { ql4_printk(KERN_ERR, ha, "cannot remap MMIO, aborting\n"); goto iospace_error_exit; } return 0; iospace_error_exit: return -ENOMEM; } static struct isp_operations qla4xxx_isp_ops = { .iospace_config = qla4xxx_iospace_config, .pci_config = qla4xxx_pci_config, .disable_intrs = qla4xxx_disable_intrs, .enable_intrs = qla4xxx_enable_intrs, .start_firmware = qla4xxx_start_firmware, .intr_handler = qla4xxx_intr_handler, .interrupt_service_routine = qla4xxx_interrupt_service_routine, .reset_chip = qla4xxx_soft_reset, .reset_firmware = qla4xxx_hw_reset, .queue_iocb = qla4xxx_queue_iocb, .complete_iocb = qla4xxx_complete_iocb, .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, .get_sys_info = qla4xxx_get_sys_info, }; static struct isp_operations qla4_8xxx_isp_ops = { .iospace_config = qla4_8xxx_iospace_config, .pci_config = qla4_8xxx_pci_config, .disable_intrs = qla4_8xxx_disable_intrs, .enable_intrs = qla4_8xxx_enable_intrs, .start_firmware = qla4_8xxx_load_risc, .intr_handler = qla4_8xxx_intr_handler, .interrupt_service_routine = qla4_8xxx_interrupt_service_routine, .reset_chip = qla4_8xxx_isp_reset, .reset_firmware = qla4_8xxx_stop_firmware, .queue_iocb = qla4_8xxx_queue_iocb, .complete_iocb = qla4_8xxx_complete_iocb, .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out, .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in, .get_sys_info = qla4_8xxx_get_sys_info, }; uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) { return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); } uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) { return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out)); } uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) { return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); } uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) { return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in)); } static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) { struct scsi_qla_host *ha = data; char *str = buf; int rc; switch (type) { case ISCSI_BOOT_ETH_FLAGS: rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); break; case ISCSI_BOOT_ETH_INDEX: rc = sprintf(str, "0\n"); break; case ISCSI_BOOT_ETH_MAC: rc = sysfs_format_mac(str, ha->my_mac, MAC_ADDR_LEN); break; default: rc = -ENOSYS; break; } return rc; } static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type) { int rc; switch (type) { case ISCSI_BOOT_ETH_FLAGS: case ISCSI_BOOT_ETH_MAC: case ISCSI_BOOT_ETH_INDEX: rc = S_IRUGO; break; default: rc = 0; break; } return rc; } static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) { struct scsi_qla_host *ha = data; char *str = buf; int rc; switch (type) { case ISCSI_BOOT_INI_INITIATOR_NAME: rc = sprintf(str, "%s\n", ha->name_string); break; default: rc = -ENOSYS; break; } return rc; } static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type) { int rc; switch (type) { case ISCSI_BOOT_INI_INITIATOR_NAME: rc = S_IRUGO; break; default: rc = 0; break; } return rc; } static ssize_t qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, char *buf) { struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; char *str = buf; int rc; switch (type) { case ISCSI_BOOT_TGT_NAME: rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); break; case ISCSI_BOOT_TGT_IP_ADDR: if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) rc = sprintf(buf, "%pI4\n", &boot_conn->dest_ipaddr.ip_address); else rc = sprintf(str, "%pI6\n", &boot_conn->dest_ipaddr.ip_address); break; case ISCSI_BOOT_TGT_PORT: rc = sprintf(str, "%d\n", boot_conn->dest_port); break; case ISCSI_BOOT_TGT_CHAP_NAME: rc = sprintf(str, "%.*s\n", boot_conn->chap.target_chap_name_length, (char *)&boot_conn->chap.target_chap_name); break; case ISCSI_BOOT_TGT_CHAP_SECRET: rc = sprintf(str, "%.*s\n", boot_conn->chap.target_secret_length, (char *)&boot_conn->chap.target_secret); break; case ISCSI_BOOT_TGT_REV_CHAP_NAME: rc = sprintf(str, "%.*s\n", boot_conn->chap.intr_chap_name_length, (char *)&boot_conn->chap.intr_chap_name); break; case ISCSI_BOOT_TGT_REV_CHAP_SECRET: rc = sprintf(str, "%.*s\n", boot_conn->chap.intr_secret_length, (char *)&boot_conn->chap.intr_secret); break; case ISCSI_BOOT_TGT_FLAGS: rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); break; case ISCSI_BOOT_TGT_NIC_ASSOC: rc = sprintf(str, "0\n"); break; default: rc = -ENOSYS; break; } return rc; } static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) { struct scsi_qla_host *ha = data; struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); } static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) { struct scsi_qla_host *ha = data; struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); } static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) { int rc; switch (type) { case ISCSI_BOOT_TGT_NAME: case ISCSI_BOOT_TGT_IP_ADDR: case ISCSI_BOOT_TGT_PORT: case ISCSI_BOOT_TGT_CHAP_NAME: case ISCSI_BOOT_TGT_CHAP_SECRET: case ISCSI_BOOT_TGT_REV_CHAP_NAME: case ISCSI_BOOT_TGT_REV_CHAP_SECRET: case ISCSI_BOOT_TGT_NIC_ASSOC: case ISCSI_BOOT_TGT_FLAGS: rc = S_IRUGO; break; default: rc = 0; break; } return rc; } static void qla4xxx_boot_release(void *data) { struct scsi_qla_host *ha = data; scsi_host_put(ha->host); } static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) { dma_addr_t buf_dma; uint32_t addr, pri_addr, sec_addr; uint32_t offset; uint16_t func_num; uint8_t val; uint8_t *buf = NULL; size_t size = 13 * sizeof(uint8_t); int ret = QLA_SUCCESS; func_num = PCI_FUNC(ha->pdev->devfn); ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", __func__, ha->pdev->device, func_num); if (is_qla40XX(ha)) { if (func_num == 1) { addr = NVRAM_PORT0_BOOT_MODE; pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; } else if (func_num == 3) { addr = NVRAM_PORT1_BOOT_MODE; pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; } else { ret = QLA_ERROR; goto exit_boot_info; } /* Check Boot Mode */ val = rd_nvram_byte(ha, addr); if (!(val & 0x07)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot " "options : 0x%x\n", __func__, val)); ret = QLA_ERROR; goto exit_boot_info; } /* get primary valid target index */ val = rd_nvram_byte(ha, pri_addr); if (val & BIT_7) ddb_index[0] = (val & 0x7f); /* get secondary valid target index */ val = rd_nvram_byte(ha, sec_addr); if (val & BIT_7) ddb_index[1] = (val & 0x7f); } else if (is_qla8022(ha)) { buf = dma_alloc_coherent(&ha->pdev->dev, size, &buf_dma, GFP_KERNEL); if (!buf) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__)); ret = QLA_ERROR; goto exit_boot_info; } if (ha->port_num == 0) offset = BOOT_PARAM_OFFSET_PORT0; else if (ha->port_num == 1) offset = BOOT_PARAM_OFFSET_PORT1; else { ret = QLA_ERROR; goto exit_boot_info_free; } addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + offset; if (qla4xxx_get_flash(ha, buf_dma, addr, 13 * sizeof(uint8_t)) != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" " failed\n", ha->host_no, __func__)); ret = QLA_ERROR; goto exit_boot_info_free; } /* Check Boot Mode */ if (!(buf[1] & 0x07)) { DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options" " : 0x%x\n", buf[1])); ret = QLA_ERROR; goto exit_boot_info_free; } /* get primary valid target index */ if (buf[2] & BIT_7) ddb_index[0] = buf[2] & 0x7f; /* get secondary valid target index */ if (buf[11] & BIT_7) ddb_index[1] = buf[11] & 0x7f; } else { ret = QLA_ERROR; goto exit_boot_info; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" " target ID %d\n", __func__, ddb_index[0], ddb_index[1])); exit_boot_info_free: dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); exit_boot_info: ha->pri_ddb_idx = ddb_index[0]; ha->sec_ddb_idx = ddb_index[1]; return ret; } /** * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password * @ha: pointer to adapter structure * @username: CHAP username to be returned * @password: CHAP password to be returned * * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. * So from the CHAP cache find the first BIDI CHAP entry and set it * to the boot record in sysfs. **/ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, char *password) { int i, ret = -EINVAL; int max_chap_entries = 0; struct ql4_chap_table *chap_table; if (is_qla8022(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else max_chap_entries = MAX_CHAP_ENTRIES_40XX; if (!ha->chap_list) { ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); return ret; } mutex_lock(&ha->chap_sem); for (i = 0; i < max_chap_entries; i++) { chap_table = (struct ql4_chap_table *)ha->chap_list + i; if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { continue; } if (chap_table->flags & BIT_7) /* local */ continue; if (!(chap_table->flags & BIT_6)) /* Not BIDI */ continue; strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); ret = 0; break; } mutex_unlock(&ha->chap_sem); return ret; } static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, struct ql4_boot_session_info *boot_sess, uint16_t ddb_index) { struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; struct dev_db_entry *fw_ddb_entry; dma_addr_t fw_ddb_entry_dma; uint16_t idx; uint16_t options; int ret = QLA_SUCCESS; fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer.\n", __func__)); ret = QLA_ERROR; return ret; } if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, ddb_index)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at " "index [%d]\n", __func__, ddb_index)); ret = QLA_ERROR; goto exit_boot_target; } /* Update target name and IP from DDB */ memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, min(sizeof(boot_sess->target_name), sizeof(fw_ddb_entry->iscsi_name))); options = le16_to_cpu(fw_ddb_entry->options); if (options & DDB_OPT_IPV6_DEVICE) { memcpy(&boot_conn->dest_ipaddr.ip_address, &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); } else { boot_conn->dest_ipaddr.ip_type = 0x1; memcpy(&boot_conn->dest_ipaddr.ip_address, &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); } boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); /* update chap information */ idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. target_chap_name, (char *)&boot_conn->chap.target_secret, idx); if (ret) { ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); ret = QLA_ERROR; goto exit_boot_target; } boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; } if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); ret = qla4xxx_get_bidi_chap(ha, (char *)&boot_conn->chap.intr_chap_name, (char *)&boot_conn->chap.intr_secret); if (ret) { ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); ret = QLA_ERROR; goto exit_boot_target; } boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; } exit_boot_target: dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); return ret; } static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) { uint16_t ddb_index[2]; int ret = QLA_ERROR; int rval; memset(ddb_index, 0, sizeof(ddb_index)); ddb_index[0] = 0xffff; ddb_index[1] = 0xffff; ret = get_fw_boot_info(ha, ddb_index); if (ret != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No boot target configured.\n", __func__)); return ret; } if (ql4xdisablesysfsboot) return QLA_SUCCESS; if (ddb_index[0] == 0xffff) goto sec_target; rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), ddb_index[0]); if (rval != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not " "configured\n", __func__)); } else ret = QLA_SUCCESS; sec_target: if (ddb_index[1] == 0xffff) goto exit_get_boot_info; rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), ddb_index[1]); if (rval != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not" " configured\n", __func__)); } else ret = QLA_SUCCESS; exit_get_boot_info: return ret; } static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) { struct iscsi_boot_kobj *boot_kobj; if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) return QLA_ERROR; if (ql4xdisablesysfsboot) { ql4_printk(KERN_INFO, ha, "%s: syfsboot disabled - driver will trigger login " "and publish session for discovery .\n", __func__); return QLA_SUCCESS; } ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); if (!ha->boot_kset) goto kset_free; if (!scsi_host_get(ha->host)) goto kset_free; boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, qla4xxx_show_boot_tgt_pri_info, qla4xxx_tgt_get_attr_visibility, qla4xxx_boot_release); if (!boot_kobj) goto put_host; if (!scsi_host_get(ha->host)) goto kset_free; boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, qla4xxx_show_boot_tgt_sec_info, qla4xxx_tgt_get_attr_visibility, qla4xxx_boot_release); if (!boot_kobj) goto put_host; if (!scsi_host_get(ha->host)) goto kset_free; boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, qla4xxx_show_boot_ini_info, qla4xxx_ini_get_attr_visibility, qla4xxx_boot_release); if (!boot_kobj) goto put_host; if (!scsi_host_get(ha->host)) goto kset_free; boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, qla4xxx_show_boot_eth_info, qla4xxx_eth_get_attr_visibility, qla4xxx_boot_release); if (!boot_kobj) goto put_host; return QLA_SUCCESS; put_host: scsi_host_put(ha->host); kset_free: iscsi_boot_destroy_kset(ha->boot_kset); return -ENOMEM; } /** * qla4xxx_create chap_list - Create CHAP list from FLASH * @ha: pointer to adapter structure * * Read flash and make a list of CHAP entries, during login when a CHAP entry * is received, it will be checked in this list. If entry exist then the CHAP * entry index is set in the DDB. If CHAP entry does not exist in this list * then a new entry is added in FLASH in CHAP table and the index obtained is * used in the DDB. **/ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) { int rval = 0; uint8_t *chap_flash_data = NULL; uint32_t offset; dma_addr_t chap_dma; uint32_t chap_size = 0; if (is_qla40XX(ha)) chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(struct ql4_chap_table); else /* Single region contains CHAP info for both * ports which is divided into half for each port. */ chap_size = ha->hw.flt_chap_size / 2; chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, &chap_dma, GFP_KERNEL); if (!chap_flash_data) { ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); return; } if (is_qla40XX(ha)) offset = FLASH_CHAP_OFFSET; else { offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); if (ha->port_num == 1) offset += chap_size; } rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); if (rval != QLA_SUCCESS) goto exit_chap_list; if (ha->chap_list == NULL) ha->chap_list = vmalloc(chap_size); if (ha->chap_list == NULL) { ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); goto exit_chap_list; } memcpy(ha->chap_list, chap_flash_data, chap_size); exit_chap_list: dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); } static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, struct ql4_tuple_ddb *tddb) { struct scsi_qla_host *ha; struct iscsi_cls_session *cls_sess; struct iscsi_cls_conn *cls_conn; struct iscsi_session *sess; struct iscsi_conn *conn; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); ha = ddb_entry->ha; cls_sess = ddb_entry->sess; sess = cls_sess->dd_data; cls_conn = ddb_entry->conn; conn = cls_conn->dd_data; tddb->tpgt = sess->tpgt; tddb->port = conn->persistent_port; strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); } static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, struct ql4_tuple_ddb *tddb) { uint16_t options = 0; tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); options = le16_to_cpu(fw_ddb_entry->options); if (options & DDB_OPT_IPV6_DEVICE) sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr); else sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); tddb->port = le16_to_cpu(fw_ddb_entry->port); memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid)); } static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, struct ql4_tuple_ddb *old_tddb, struct ql4_tuple_ddb *new_tddb, uint8_t is_isid_compare) { if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) return QLA_ERROR; if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr)) return QLA_ERROR; if (old_tddb->port != new_tddb->port) return QLA_ERROR; /* For multi sessions, driver generates the ISID, so do not compare * ISID in reset path since it would be a comparision between the * driver generated ISID and firmware generated ISID. This could * lead to adding duplicated DDBs in the list as driver generated * ISID would not match firmware generated ISID. */ if (is_isid_compare) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x" "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n", __func__, old_tddb->isid[5], old_tddb->isid[4], old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1], old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4], new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1], new_tddb->isid[0])); if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], sizeof(old_tddb->isid))) return QLA_ERROR; } DEBUG2(ql4_printk(KERN_INFO, ha, "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt, new_tddb->ip_addr, new_tddb->iscsi_name)); return QLA_SUCCESS; } static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry) { struct ddb_entry *ddb_entry; struct ql4_tuple_ddb *fw_tddb = NULL; struct ql4_tuple_ddb *tmp_tddb = NULL; int idx; int ret = QLA_ERROR; fw_tddb = vzalloc(sizeof(*fw_tddb)); if (!fw_tddb) { DEBUG2(ql4_printk(KERN_WARNING, ha, "Memory Allocation failed.\n")); ret = QLA_SUCCESS; goto exit_check; } tmp_tddb = vzalloc(sizeof(*tmp_tddb)); if (!tmp_tddb) { DEBUG2(ql4_printk(KERN_WARNING, ha, "Memory Allocation failed.\n")); ret = QLA_SUCCESS; goto exit_check; } qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb); for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); if (ddb_entry == NULL) continue; qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { ret = QLA_SUCCESS; /* found */ goto exit_check; } } exit_check: if (fw_tddb) vfree(fw_tddb); if (tmp_tddb) vfree(tmp_tddb); return ret; } static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, struct list_head *list_nt, struct dev_db_entry *fw_ddb_entry) { struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; struct ql4_tuple_ddb *fw_tddb = NULL; struct ql4_tuple_ddb *tmp_tddb = NULL; int ret = QLA_ERROR; fw_tddb = vzalloc(sizeof(*fw_tddb)); if (!fw_tddb) { DEBUG2(ql4_printk(KERN_WARNING, ha, "Memory Allocation failed.\n")); ret = QLA_SUCCESS; goto exit_check; } tmp_tddb = vzalloc(sizeof(*tmp_tddb)); if (!tmp_tddb) { DEBUG2(ql4_printk(KERN_WARNING, ha, "Memory Allocation failed.\n")); ret = QLA_SUCCESS; goto exit_check; } qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb); list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb); if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true)) { ret = QLA_SUCCESS; /* found */ goto exit_check; } } exit_check: if (fw_tddb) vfree(fw_tddb); if (tmp_tddb) vfree(tmp_tddb); return ret; } static void qla4xxx_free_ddb_list(struct list_head *list_ddb) { struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { list_del_init(&ddb_idx->list); vfree(ddb_idx); } } static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry) { struct iscsi_endpoint *ep; struct sockaddr_in *addr; struct sockaddr_in6 *addr6; struct sockaddr *dst_addr; char *ip; /* TODO: need to destroy on unload iscsi_endpoint*/ dst_addr = vmalloc(sizeof(*dst_addr)); if (!dst_addr) return NULL; if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { dst_addr->sa_family = AF_INET6; addr6 = (struct sockaddr_in6 *)dst_addr; ip = (char *)&addr6->sin6_addr; memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); } else { dst_addr->sa_family = AF_INET; addr = (struct sockaddr_in *)dst_addr; ip = (char *)&addr->sin_addr; memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); } ep = qla4xxx_ep_connect(ha->host, dst_addr, 0); vfree(dst_addr); return ep; } static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) { if (ql4xdisablesysfsboot) return QLA_SUCCESS; if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx) return QLA_ERROR; return QLA_SUCCESS; } static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { uint16_t def_timeout; ddb_entry->ddb_type = FLASH_DDB; ddb_entry->fw_ddb_index = INVALID_ENTRY; ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; ddb_entry->ha = ha; ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; ddb_entry->ddb_change = qla4xxx_flash_ddb_change; atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->relogin_timer, 0); atomic_set(&ddb_entry->relogin_retry_count, 0); def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); ddb_entry->default_relogin_timeout = (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? def_timeout : LOGIN_TOV; ddb_entry->default_time2wait = le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); } static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) { uint32_t idx = 0; uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */ uint32_t sts[MBOX_REG_COUNT]; uint32_t ip_state; unsigned long wtime; int ret; wtime = jiffies + (HZ * IP_CONFIG_TOV); do { for (idx = 0; idx < IP_ADDR_COUNT; idx++) { if (ip_idx[idx] == -1) continue; ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts); if (ret == QLA_ERROR) { ip_idx[idx] = -1; continue; } ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT; DEBUG2(ql4_printk(KERN_INFO, ha, "Waiting for IP state for idx = %d, state = 0x%x\n", ip_idx[idx], ip_state)); if (ip_state == IP_ADDRSTATE_UNCONFIGURED || ip_state == IP_ADDRSTATE_INVALID || ip_state == IP_ADDRSTATE_PREFERRED || ip_state == IP_ADDRSTATE_DEPRICATED || ip_state == IP_ADDRSTATE_DISABLING) ip_idx[idx] = -1; } /* Break if all IP states checked */ if ((ip_idx[0] == -1) && (ip_idx[1] == -1) && (ip_idx[2] == -1) && (ip_idx[3] == -1)) break; schedule_timeout_uninterruptible(HZ); } while (time_after(wtime, jiffies)); } static void qla4xxx_build_st_list(struct scsi_qla_host *ha, struct list_head *list_st) { struct qla_ddb_index *st_ddb_idx; int max_ddbs; int fw_idx_size; struct dev_db_entry *fw_ddb_entry; dma_addr_t fw_ddb_dma; int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; uint16_t conn_id = 0; fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, &fw_ddb_dma); if (fw_ddb_entry == NULL) { DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); goto exit_st_list; } max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; fw_idx_size = sizeof(struct qla_ddb_index); for (idx = 0; idx < max_ddbs; idx = next_idx) { ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, NULL, &next_idx, &state, &conn_err, NULL, &conn_id); if (ret == QLA_ERROR) break; /* Ignore DDB if invalid state (unassigned) */ if (state == DDB_DS_UNASSIGNED) goto continue_next_st; /* Check if ST, add to the list_st */ if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) goto continue_next_st; st_ddb_idx = vzalloc(fw_idx_size); if (!st_ddb_idx) break; st_ddb_idx->fw_ddb_idx = idx; list_add_tail(&st_ddb_idx->list, list_st); continue_next_st: if (next_idx == 0) break; } exit_st_list: if (fw_ddb_entry) dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); } /** * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list * @ha: pointer to adapter structure * @list_ddb: List from which failed ddb to be removed * * Iterate over the list of DDBs and find and remove DDBs that are either in * no connection active state or failed state **/ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, struct list_head *list_ddb) { struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; uint32_t next_idx = 0; uint32_t state = 0, conn_err = 0; int ret; list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, NULL, 0, NULL, &next_idx, &state, &conn_err, NULL, NULL); if (ret == QLA_ERROR) continue; if (state == DDB_DS_NO_CONNECTION_ACTIVE || state == DDB_DS_SESSION_FAILED) { list_del_init(&ddb_idx->list); vfree(ddb_idx); } } } static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, int is_reset) { struct iscsi_cls_session *cls_sess; struct iscsi_session *sess; struct iscsi_cls_conn *cls_conn; struct iscsi_endpoint *ep; uint16_t cmds_max = 32; uint16_t conn_id = 0; uint32_t initial_cmdsn = 0; int ret = QLA_SUCCESS; struct ddb_entry *ddb_entry = NULL; /* Create session object, with INVALID_ENTRY, * the targer_id would get set when we issue the login */ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, cmds_max, sizeof(struct ddb_entry), sizeof(struct ql4_task_data), initial_cmdsn, INVALID_ENTRY); if (!cls_sess) { ret = QLA_ERROR; goto exit_setup; } /* * so calling module_put function to decrement the * reference count. **/ module_put(qla4xxx_iscsi_transport.owner); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ddb_entry->sess = cls_sess; cls_sess->recovery_tmo = ql4xsess_recovery_tmo; memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, sizeof(struct dev_db_entry)); qla4xxx_setup_flash_ddb_entry(ha, ddb_entry); cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); if (!cls_conn) { ret = QLA_ERROR; goto exit_setup; } ddb_entry->conn = cls_conn; /* Setup ep, for displaying attributes in sysfs */ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); if (ep) { ep->conn = cls_conn; cls_conn->ep = ep; } else { DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); ret = QLA_ERROR; goto exit_setup; } /* Update sess/conn params */ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); if (is_reset == RESET_ADAPTER) { iscsi_block_session(cls_sess); /* Use the relogin path to discover new devices * by short-circuting the logic of setting * timer to relogin - instead set the flags * to initiate login right away. */ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); set_bit(DF_RELOGIN, &ddb_entry->flags); } exit_setup: return ret; } static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, struct list_head *list_nt, int is_reset) { struct dev_db_entry *fw_ddb_entry; dma_addr_t fw_ddb_dma; int max_ddbs; int fw_idx_size; int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; uint16_t conn_id = 0; struct qla_ddb_index *nt_ddb_idx; fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, &fw_ddb_dma); if (fw_ddb_entry == NULL) { DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); goto exit_nt_list; } max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; fw_idx_size = sizeof(struct qla_ddb_index); for (idx = 0; idx < max_ddbs; idx = next_idx) { ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, NULL, &next_idx, &state, &conn_err, NULL, &conn_id); if (ret == QLA_ERROR) break; if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) goto continue_next_nt; /* Check if NT, then add to list it */ if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) goto continue_next_nt; if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || state == DDB_DS_SESSION_FAILED)) goto continue_next_nt; DEBUG2(ql4_printk(KERN_INFO, ha, "Adding DDB to session = 0x%x\n", idx)); if (is_reset == INIT_ADAPTER) { nt_ddb_idx = vmalloc(fw_idx_size); if (!nt_ddb_idx) break; nt_ddb_idx->fw_ddb_idx = idx; memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, sizeof(struct dev_db_entry)); if (qla4xxx_is_flash_ddb_exists(ha, list_nt, fw_ddb_entry) == QLA_SUCCESS) { vfree(nt_ddb_idx); goto continue_next_nt; } list_add_tail(&nt_ddb_idx->list, list_nt); } else if (is_reset == RESET_ADAPTER) { if (qla4xxx_is_session_exists(ha, fw_ddb_entry) == QLA_SUCCESS) goto continue_next_nt; } ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset); if (ret == QLA_ERROR) goto exit_nt_list; continue_next_nt: if (next_idx == 0) break; } exit_nt_list: if (fw_ddb_entry) dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); } /** * qla4xxx_build_ddb_list - Build ddb list and setup sessions * @ha: pointer to adapter structure * @is_reset: Is this init path or reset path * * Create a list of sendtargets (st) from firmware DDBs, issue send targets * using connection open, then create the list of normal targets (nt) * from firmware DDBs. Based on the list of nt setup session and connection * objects. **/ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) { uint16_t tmo = 0; struct list_head list_st, list_nt; struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; unsigned long wtime; if (!test_bit(AF_LINK_UP, &ha->flags)) { set_bit(AF_BUILD_DDB_LIST, &ha->flags); ha->is_reset = is_reset; return; } INIT_LIST_HEAD(&list_st); INIT_LIST_HEAD(&list_nt); qla4xxx_build_st_list(ha, &list_st); /* Before issuing conn open mbox, ensure all IPs states are configured * Note, conn open fails if IPs are not configured */ qla4xxx_wait_for_ip_configuration(ha); /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); } /* Wait to ensure all sendtargets are done for min 12 sec wait */ tmo = ((ha->def_timeout > LOGIN_TOV) && (ha->def_timeout < LOGIN_TOV * 10) ? ha->def_timeout : LOGIN_TOV); DEBUG2(ql4_printk(KERN_INFO, ha, "Default time to wait for build ddb %d\n", tmo)); wtime = jiffies + (HZ * tmo); do { if (list_empty(&list_st)) break; qla4xxx_remove_failed_ddb(ha, &list_st); schedule_timeout_uninterruptible(HZ / 10); } while (time_after(wtime, jiffies)); /* Free up the sendtargets list */ qla4xxx_free_ddb_list(&list_st); qla4xxx_build_nt_list(ha, &list_nt, is_reset); qla4xxx_free_ddb_list(&list_nt); qla4xxx_free_ddb_index(ha); } /** * qla4xxx_probe_adapter - callback function to probe HBA * @pdev: pointer to pci_dev structure * @pci_device_id: pointer to pci_device entry * * This routine will probe for Qlogic 4xxx iSCSI host adapters. * It returns zero if successful. It also initializes all data necessary for * the driver. **/ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret = -ENODEV, status; struct Scsi_Host *host; struct scsi_qla_host *ha; uint8_t init_retry_count = 0; char buf[34]; struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; uint32_t dev_state; if (pci_enable_device(pdev)) return -1; host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); if (host == NULL) { printk(KERN_WARNING "qla4xxx: Couldn't allocate host from scsi layer!\n"); goto probe_disable_device; } /* Clear our data area */ ha = to_qla_host(host); memset(ha, 0, sizeof(*ha)); /* Save the information from PCI BIOS. */ ha->pdev = pdev; ha->host = host; ha->host_no = host->host_no; pci_enable_pcie_error_reporting(pdev); /* Setup Runtime configurable options */ if (is_qla8022(ha)) { ha->isp_ops = &qla4_8xxx_isp_ops; rwlock_init(&ha->hw_lock); ha->qdr_sn_window = -1; ha->ddr_mn_window = -1; ha->curr_window = 255; ha->func_num = PCI_FUNC(ha->pdev->devfn); nx_legacy_intr = &legacy_intr[ha->func_num]; ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; } else { ha->isp_ops = &qla4xxx_isp_ops; } /* Set EEH reset type to fundamental if required by hba */ if (is_qla8022(ha)) pdev->needs_freset = 1; /* Configure PCI I/O space. */ ret = ha->isp_ops->iospace_config(ha); if (ret) goto probe_failed_ioconfig; ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, ha->reg); qla4xxx_config_dma_addressing(ha); /* Initialize lists and spinlocks. */ INIT_LIST_HEAD(&ha->free_srb_q); mutex_init(&ha->mbox_sem); mutex_init(&ha->chap_sem); init_completion(&ha->mbx_intr_comp); init_completion(&ha->disable_acb_comp); spin_lock_init(&ha->hardware_lock); /* Initialize work list */ INIT_LIST_HEAD(&ha->work_list); /* Allocate dma buffers */ if (qla4xxx_mem_alloc(ha)) { ql4_printk(KERN_WARNING, ha, "[ERROR] Failed to allocate memory for adapter\n"); ret = -ENOMEM; goto probe_failed; } host->cmd_per_lun = 3; host->max_channel = 0; host->max_lun = MAX_LUNS - 1; host->max_id = MAX_TARGETS; host->max_cmd_len = IOCB_MAX_CDB_LEN; host->can_queue = MAX_SRBS ; host->transportt = qla4xxx_scsi_transport; ret = scsi_init_shared_tag_map(host, MAX_SRBS); if (ret) { ql4_printk(KERN_WARNING, ha, "%s: scsi_init_shared_tag_map failed\n", __func__); goto probe_failed; } pci_set_drvdata(pdev, ha); ret = scsi_add_host(host, &pdev->dev); if (ret) goto probe_failed; if (is_qla8022(ha)) (void) qla4_8xxx_get_flash_info(ha); /* * Initialize the Host adapter request/response queues and * firmware * NOTE: interrupts enabled upon successful completion */ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); while ((!test_bit(AF_ONLINE, &ha->flags)) && init_retry_count++ < MAX_INIT_RETRIES) { if (is_qla8022(ha)) { qla4_8xxx_idc_lock(ha); dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); qla4_8xxx_idc_unlock(ha); if (dev_state == QLA82XX_DEV_FAILED) { ql4_printk(KERN_WARNING, ha, "%s: don't retry " "initialize adapter. H/W is in failed state\n", __func__); break; } } DEBUG2(printk("scsi: %s: retrying adapter initialization " "(%d)\n", __func__, init_retry_count)); if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) continue; status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); } if (!test_bit(AF_ONLINE, &ha->flags)) { ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); if (is_qla8022(ha) && ql4xdontresethba) { /* Put the device in failed state. */ DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); qla4_8xxx_idc_lock(ha); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); qla4_8xxx_idc_unlock(ha); } ret = -ENODEV; goto remove_host; } /* Startup the kernel thread for this host adapter. */ DEBUG2(printk("scsi: %s: Starting kernel thread for " "qla4xxx_dpc\n", __func__)); sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); ha->dpc_thread = create_singlethread_workqueue(buf); if (!ha->dpc_thread) { ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); ret = -ENODEV; goto remove_host; } INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); sprintf(buf, "qla4xxx_%lu_task", ha->host_no); ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1); if (!ha->task_wq) { ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); ret = -ENODEV; goto remove_host; } /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc * (which is called indirectly by qla4xxx_initialize_adapter), * so that irqs will be registered after crbinit but before * mbx_intr_enable. */ if (!is_qla8022(ha)) { ret = qla4xxx_request_irqs(ha); if (ret) { ql4_printk(KERN_WARNING, ha, "Failed to reserve " "interrupt %d already in use.\n", pdev->irq); goto remove_host; } } pci_save_state(ha->pdev); ha->isp_ops->enable_intrs(ha); /* Start timer thread. */ qla4xxx_start_timer(ha, qla4xxx_timer, 1); set_bit(AF_INIT_DONE, &ha->flags); printk(KERN_INFO " QLogic iSCSI HBA Driver version: %s\n" " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), ha->host_no, ha->firmware_version[0], ha->firmware_version[1], ha->patch_number, ha->build_number); if (qla4xxx_setup_boot_info(ha)) ql4_printk(KERN_ERR, ha, "%s: No iSCSI boot target configured\n", __func__); /* Perform the build ddb list and login to each */ qla4xxx_build_ddb_list(ha, INIT_ADAPTER); iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); qla4xxx_create_chap_list(ha); qla4xxx_create_ifaces(ha); return 0; remove_host: scsi_remove_host(ha->host); probe_failed: qla4xxx_free_adapter(ha); probe_failed_ioconfig: pci_disable_pcie_error_reporting(pdev); scsi_host_put(ha->host); probe_disable_device: pci_disable_device(pdev); return ret; } /** * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize * @ha: pointer to adapter structure * * Mark the other ISP-4xxx port to indicate that the driver is being removed, * so that the other port will not re-initialize while in the process of * removing the ha due to driver unload or hba hotplug. **/ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) { struct scsi_qla_host *other_ha = NULL; struct pci_dev *other_pdev = NULL; int fn = ISP4XXX_PCI_FN_2; /*iscsi function numbers for ISP4xxx is 1 and 3*/ if (PCI_FUNC(ha->pdev->devfn) & BIT_1) fn = ISP4XXX_PCI_FN_1; other_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), fn)); /* Get other_ha if other_pdev is valid and state is enable*/ if (other_pdev) { if (atomic_read(&other_pdev->enable_cnt)) { other_ha = pci_get_drvdata(other_pdev); if (other_ha) { set_bit(AF_HA_REMOVAL, &other_ha->flags); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " "Prevent %s reinit\n", __func__, dev_name(&other_ha->pdev->dev))); } } pci_dev_put(other_pdev); } } static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) { struct ddb_entry *ddb_entry; int options; int idx; for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); if ((ddb_entry != NULL) && (ddb_entry->ddb_type == FLASH_DDB)) { options = LOGOUT_OPTION_CLOSE_SESSION; if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); /* * we have decremented the reference count of the driver * when we setup the session to have the driver unload * to be seamless without actually destroying the * session **/ try_module_get(qla4xxx_iscsi_transport.owner); iscsi_destroy_endpoint(ddb_entry->conn->ep); qla4xxx_free_ddb(ha, ddb_entry); iscsi_session_teardown(ddb_entry->sess); } } } /** * qla4xxx_remove_adapter - calback function to remove adapter. * @pci_dev: PCI device pointer **/ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev) { struct scsi_qla_host *ha; ha = pci_get_drvdata(pdev); if (!is_qla8022(ha)) qla4xxx_prevent_other_port_reinit(ha); /* destroy iface from sysfs */ qla4xxx_destroy_ifaces(ha); if ((!ql4xdisablesysfsboot) && ha->boot_kset) iscsi_boot_destroy_kset(ha->boot_kset); qla4xxx_destroy_fw_ddb_session(ha); scsi_remove_host(ha->host); qla4xxx_free_adapter(ha); scsi_host_put(ha->host); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } /** * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. * @ha: HA context * * At exit, the @ha's flags.enable_64bit_addressing set to indicated * supported addressing method. */ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) { int retval; /* Update our PCI device dma_mask for full 64 bit mask */ if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) { if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { dev_dbg(&ha->pdev->dev, "Failed to set 64 bit PCI consistent mask; " "using 32 bit.\n"); retval = pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32)); } } else retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32)); } static int qla4xxx_slave_alloc(struct scsi_device *sdev) { struct iscsi_cls_session *cls_sess; struct iscsi_session *sess; struct ddb_entry *ddb; int queue_depth = QL4_DEF_QDEPTH; cls_sess = starget_to_session(sdev->sdev_target); sess = cls_sess->dd_data; ddb = sess->dd_data; sdev->hostdata = ddb; sdev->tagged_supported = 1; if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) queue_depth = ql4xmaxqdepth; scsi_activate_tcq(sdev, queue_depth); return 0; } static int qla4xxx_slave_configure(struct scsi_device *sdev) { sdev->tagged_supported = 1; return 0; } static void qla4xxx_slave_destroy(struct scsi_device *sdev) { scsi_deactivate_tcq(sdev, 1); } /** * qla4xxx_del_from_active_array - returns an active srb * @ha: Pointer to host adapter structure. * @index: index into the active_array * * This routine removes and returns the srb at the specified index **/ struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index) { struct srb *srb = NULL; struct scsi_cmnd *cmd = NULL; cmd = scsi_host_find_tag(ha->host, index); if (!cmd) return srb; srb = (struct srb *)CMD_SP(cmd); if (!srb) return srb; /* update counters */ if (srb->flags & SRB_DMA_VALID) { ha->req_q_count += srb->iocb_cnt; ha->iocb_cnt -= srb->iocb_cnt; if (srb->cmd) srb->cmd->host_scribble = (unsigned char *)(unsigned long) MAX_SRBS; } return srb; } /** * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware * @ha: Pointer to host adapter structure. * @cmd: Scsi Command to wait on. * * This routine waits for the command to be returned by the Firmware * for some max time. **/ static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, struct scsi_cmnd *cmd) { int done = 0; struct srb *rp; uint32_t max_wait_time = EH_WAIT_CMD_TOV; int ret = SUCCESS; /* Dont wait on command if PCI error is being handled * by PCI AER driver */ if (unlikely(pci_channel_offline(ha->pdev)) || (test_bit(AF_EEH_BUSY, &ha->flags))) { ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", ha->host_no, __func__); return ret; } do { /* Checking to see if its returned to OS */ rp = (struct srb *) CMD_SP(cmd); if (rp == NULL) { done++; break; } msleep(2000); } while (max_wait_time--); return done; } /** * qla4xxx_wait_for_hba_online - waits for HBA to come online * @ha: Pointer to host adapter structure **/ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) { unsigned long wait_online; wait_online = jiffies + (HBA_ONLINE_TOV * HZ); while (time_before(jiffies, wait_online)) { if (adapter_up(ha)) return QLA_SUCCESS; msleep(2000); } return QLA_ERROR; } /** * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. * @ha: pointer to HBA * @t: target id * @l: lun id * * This function waits for all outstanding commands to a lun to complete. It * returns 0 if all pending commands are returned and 1 otherwise. **/ static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, struct scsi_target *stgt, struct scsi_device *sdev) { int cnt; int status = 0; struct scsi_cmnd *cmd; /* * Waiting for all commands for the designated target or dev * in the active array */ for (cnt = 0; cnt < ha->host->can_queue; cnt++) { cmd = scsi_host_find_tag(ha->host, cnt); if (cmd && stgt == scsi_target(cmd->device) && (!sdev || sdev == cmd->device)) { if (!qla4xxx_eh_wait_on_command(ha, cmd)) { status++; break; } } } return status; } /** * qla4xxx_eh_abort - callback for abort task. * @cmd: Pointer to Linux's SCSI command structure * * This routine is called by the Linux OS to abort the specified * command. **/ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) { struct scsi_qla_host *ha = to_qla_host(cmd->device->host); unsigned int id = cmd->device->id; unsigned int lun = cmd->device->lun; unsigned long flags; struct srb *srb = NULL; int ret = SUCCESS; int wait = 0; ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Abort command issued cmd=%p\n", ha->host_no, id, lun, cmd); spin_lock_irqsave(&ha->hardware_lock, flags); srb = (struct srb *) CMD_SP(cmd); if (!srb) { spin_unlock_irqrestore(&ha->hardware_lock, flags); return SUCCESS; } kref_get(&srb->srb_ref); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n", ha->host_no, id, lun)); ret = FAILED; } else { DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n", ha->host_no, id, lun)); wait = 1; } kref_put(&srb->srb_ref, qla4xxx_srb_compl); /* Wait for command to complete */ if (wait) { if (!qla4xxx_eh_wait_on_command(ha, cmd)) { DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n", ha->host_no, id, lun)); ret = FAILED; } } ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Abort command - %s\n", ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); return ret; } /** * qla4xxx_eh_device_reset - callback for target reset. * @cmd: Pointer to Linux's SCSI command structure * * This routine is called by the Linux OS to reset all luns on the * specified target. **/ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) { struct scsi_qla_host *ha = to_qla_host(cmd->device->host); struct ddb_entry *ddb_entry = cmd->device->hostdata; int ret = FAILED, stat; if (!ddb_entry) return ret; ret = iscsi_block_scsi_eh(cmd); if (ret) return ret; ret = FAILED; ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun); DEBUG2(printk(KERN_INFO "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, ha->dpc_flags, cmd->result, cmd->allowed)); /* FIXME: wait for hba to go online */ stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); if (stat != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat); goto eh_dev_reset_done; } if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), cmd->device)) { ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED - waiting for " "commands.\n"); goto eh_dev_reset_done; } /* Send marker. */ if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, MM_LUN_RESET) != QLA_SUCCESS) goto eh_dev_reset_done; ql4_printk(KERN_INFO, ha, "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun); ret = SUCCESS; eh_dev_reset_done: return ret; } /** * qla4xxx_eh_target_reset - callback for target reset. * @cmd: Pointer to Linux's SCSI command structure * * This routine is called by the Linux OS to reset the target. **/ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) { struct scsi_qla_host *ha = to_qla_host(cmd->device->host); struct ddb_entry *ddb_entry = cmd->device->hostdata; int stat, ret; if (!ddb_entry) return FAILED; ret = iscsi_block_scsi_eh(cmd); if (ret) return ret; starget_printk(KERN_INFO, scsi_target(cmd->device), "WARM TARGET RESET ISSUED.\n"); DEBUG2(printk(KERN_INFO "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, ha->dpc_flags, cmd->result, cmd->allowed)); stat = qla4xxx_reset_target(ha, ddb_entry); if (stat != QLA_SUCCESS) { starget_printk(KERN_INFO, scsi_target(cmd->device), "WARM TARGET RESET FAILED.\n"); return FAILED; } if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), NULL)) { starget_printk(KERN_INFO, scsi_target(cmd->device), "WARM TARGET DEVICE RESET FAILED - " "waiting for commands.\n"); return FAILED; } /* Send marker. */ if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, MM_TGT_WARM_RESET) != QLA_SUCCESS) { starget_printk(KERN_INFO, scsi_target(cmd->device), "WARM TARGET DEVICE RESET FAILED - " "marker iocb failed.\n"); return FAILED; } starget_printk(KERN_INFO, scsi_target(cmd->device), "WARM TARGET RESET SUCCEEDED.\n"); return SUCCESS; } /** * qla4xxx_is_eh_active - check if error handler is running * @shost: Pointer to SCSI Host struct * * This routine finds that if reset host is called in EH * scenario or from some application like sg_reset **/ static int qla4xxx_is_eh_active(struct Scsi_Host *shost) { if (shost->shost_state == SHOST_RECOVERY) return 1; return 0; } /** * qla4xxx_eh_host_reset - kernel callback * @cmd: Pointer to Linux's SCSI command structure * * This routine is invoked by the Linux kernel to perform fatal error * recovery on the specified adapter. **/ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) { int return_status = FAILED; struct scsi_qla_host *ha; ha = to_qla_host(cmd->device->host); if (ql4xdontresethba) { DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", ha->host_no, __func__)); /* Clear outstanding srb in queues */ if (qla4xxx_is_eh_active(cmd->device->host)) qla4xxx_abort_active_cmds(ha, DID_ABORT << 16); return FAILED; } ql4_printk(KERN_INFO, ha, "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun); if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter " "DEAD.\n", ha->host_no, cmd->device->channel, __func__)); return FAILED; } if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { if (is_qla8022(ha)) set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); else set_bit(DPC_RESET_HA, &ha->dpc_flags); } if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS) return_status = SUCCESS; ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n", return_status == FAILED ? "FAILED" : "SUCCEEDED"); return return_status; } static int qla4xxx_context_reset(struct scsi_qla_host *ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; struct addr_ctrl_blk_def *acb = NULL; uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); int rval = QLA_SUCCESS; dma_addr_t acb_dma; acb = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), &acb_dma, GFP_KERNEL); if (!acb) { ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__); rval = -ENOMEM; goto exit_port_reset; } memset(acb, 0, acb_len); rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); if (rval != QLA_SUCCESS) { rval = -EIO; goto exit_free_acb; } rval = qla4xxx_disable_acb(ha); if (rval != QLA_SUCCESS) { rval = -EIO; goto exit_free_acb; } wait_for_completion_timeout(&ha->disable_acb_comp, DISABLE_ACB_TOV * HZ); rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); if (rval != QLA_SUCCESS) { rval = -EIO; goto exit_free_acb; } exit_free_acb: dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), acb, acb_dma); exit_port_reset: DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); return rval; } static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) { struct scsi_qla_host *ha = to_qla_host(shost); int rval = QLA_SUCCESS; if (ql4xdontresethba) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", __func__)); rval = -EPERM; goto exit_host_reset; } rval = qla4xxx_wait_for_hba_online(ha); if (rval != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host " "adapter\n", __func__)); rval = -EIO; goto exit_host_reset; } if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) goto recover_adapter; switch (reset_type) { case SCSI_ADAPTER_RESET: set_bit(DPC_RESET_HA, &ha->dpc_flags); break; case SCSI_FIRMWARE_RESET: if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { if (is_qla8022(ha)) /* set firmware context reset */ set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); else { rval = qla4xxx_context_reset(ha); goto exit_host_reset; } } break; } recover_adapter: rval = qla4xxx_recover_adapter(ha); if (rval != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", __func__)); rval = -EIO; } exit_host_reset: return rval; } /* PCI AER driver recovers from all correctable errors w/o * driver intervention. For uncorrectable errors PCI AER * driver calls the following device driver's callbacks * * - Fatal Errors - link_reset * - Non-Fatal Errors - driver's pci_error_detected() which * returns CAN_RECOVER, NEED_RESET or DISCONNECT. * * PCI AER driver calls * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled * returns RECOVERED or NEED_RESET if fw_hung * NEED_RESET - driver's slot_reset() * DISCONNECT - device is dead & cannot recover * RECOVERED - driver's pci_resume() */ static pci_ers_result_t qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct scsi_qla_host *ha = pci_get_drvdata(pdev); ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", ha->host_no, __func__, state); if (!is_aer_supported(ha)) return PCI_ERS_RESULT_NONE; switch (state) { case pci_channel_io_normal: clear_bit(AF_EEH_BUSY, &ha->flags); return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: set_bit(AF_EEH_BUSY, &ha->flags); qla4xxx_mailbox_premature_completion(ha); qla4xxx_free_irqs(ha); pci_disable_device(pdev); /* Return back all IOs */ qla4xxx_abort_active_cmds(ha, DID_RESET << 16); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: set_bit(AF_EEH_BUSY, &ha->flags); set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; } /** * qla4xxx_pci_mmio_enabled() gets called if * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER * and read/write to the device still works. **/ static pci_ers_result_t qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) { struct scsi_qla_host *ha = pci_get_drvdata(pdev); if (!is_aer_supported(ha)) return PCI_ERS_RESULT_NONE; return PCI_ERS_RESULT_RECOVERED; } static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) { uint32_t rval = QLA_ERROR; uint32_t ret = 0; int fn; struct pci_dev *other_pdev = NULL; ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); if (test_bit(AF_ONLINE, &ha->flags)) { clear_bit(AF_ONLINE, &ha->flags); clear_bit(AF_LINK_UP, &ha->flags); iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); } fn = PCI_FUNC(ha->pdev->devfn); while (fn > 0) { fn--; ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at " "func %x\n", ha->host_no, __func__, fn); /* Get the pci device given the domain, bus, * slot/function number */ other_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), fn)); if (!other_pdev) continue; if (atomic_read(&other_pdev->enable_cnt)) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI " "func in enabled state%x\n", ha->host_no, __func__, fn); pci_dev_put(other_pdev); break; } pci_dev_put(other_pdev); } /* The first function on the card, the reset owner will * start & initialize the firmware. The other functions * on the card will reset the firmware context */ if (!fn) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " "0x%x is the owner\n", ha->host_no, __func__, ha->pdev->devfn); qla4_8xxx_idc_lock(ha); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); qla4_8xxx_idc_unlock(ha); clear_bit(AF_FW_RECOVERY, &ha->flags); rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); qla4_8xxx_idc_lock(ha); if (rval != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " "FAILED\n", ha->host_no, __func__); qla4_8xxx_clear_drv_active(ha); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); } else { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " "READY\n", ha->host_no, __func__); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); /* Clear driver state register */ qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); qla4_8xxx_set_drv_active(ha); ret = qla4xxx_request_irqs(ha); if (ret) { ql4_printk(KERN_WARNING, ha, "Failed to " "reserve interrupt %d already in use.\n", ha->pdev->irq); rval = QLA_ERROR; } else { ha->isp_ops->enable_intrs(ha); rval = QLA_SUCCESS; } } qla4_8xxx_idc_unlock(ha); } else { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " "the reset owner\n", ha->host_no, __func__, ha->pdev->devfn); if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == QLA82XX_DEV_READY)) { clear_bit(AF_FW_RECOVERY, &ha->flags); rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); if (rval == QLA_SUCCESS) { ret = qla4xxx_request_irqs(ha); if (ret) { ql4_printk(KERN_WARNING, ha, "Failed to" " reserve interrupt %d already in" " use.\n", ha->pdev->irq); rval = QLA_ERROR; } else { ha->isp_ops->enable_intrs(ha); rval = QLA_SUCCESS; } } qla4_8xxx_idc_lock(ha); qla4_8xxx_set_drv_active(ha); qla4_8xxx_idc_unlock(ha); } } clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); return rval; } static pci_ers_result_t qla4xxx_pci_slot_reset(struct pci_dev *pdev) { pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; struct scsi_qla_host *ha = pci_get_drvdata(pdev); int rc; ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", ha->host_no, __func__); if (!is_aer_supported(ha)) return PCI_ERS_RESULT_NONE; /* Restore the saved state of PCIe device - * BAR registers, PCI Config space, PCIX, MSI, * IOV states */ pci_restore_state(pdev); /* pci_restore_state() clears the saved_state flag of the device * save restored state which resets saved_state flag */ pci_save_state(pdev); /* Initialize device or resume if in suspended state */ rc = pci_enable_device(pdev); if (rc) { ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable " "device after reset\n", ha->host_no, __func__); goto exit_slot_reset; } ha->isp_ops->disable_intrs(ha); if (is_qla8022(ha)) { if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { ret = PCI_ERS_RESULT_RECOVERED; goto exit_slot_reset; } else goto exit_slot_reset; } exit_slot_reset: ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" "device after reset\n", ha->host_no, __func__, ret); return ret; } static void qla4xxx_pci_resume(struct pci_dev *pdev) { struct scsi_qla_host *ha = pci_get_drvdata(pdev); int ret; ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", ha->host_no, __func__); ret = qla4xxx_wait_for_hba_online(ha); if (ret != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " "resume I/O from slot/link_reset\n", ha->host_no, __func__); } pci_cleanup_aer_uncorrect_error_status(pdev); clear_bit(AF_EEH_BUSY, &ha->flags); } static struct pci_error_handlers qla4xxx_err_handler = { .error_detected = qla4xxx_pci_error_detected, .mmio_enabled = qla4xxx_pci_mmio_enabled, .slot_reset = qla4xxx_pci_slot_reset, .resume = qla4xxx_pci_resume, }; static struct pci_device_id qla4xxx_pci_tbl[] = { { .vendor = PCI_VENDOR_ID_QLOGIC, .device = PCI_DEVICE_ID_QLOGIC_ISP4010, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = PCI_VENDOR_ID_QLOGIC, .device = PCI_DEVICE_ID_QLOGIC_ISP4022, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = PCI_VENDOR_ID_QLOGIC, .device = PCI_DEVICE_ID_QLOGIC_ISP4032, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = PCI_VENDOR_ID_QLOGIC, .device = PCI_DEVICE_ID_QLOGIC_ISP8022, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, {0, 0}, }; MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); static struct pci_driver qla4xxx_pci_driver = { .name = DRIVER_NAME, .id_table = qla4xxx_pci_tbl, .probe = qla4xxx_probe_adapter, .remove = qla4xxx_remove_adapter, .err_handler = &qla4xxx_err_handler, }; static int __init qla4xxx_module_init(void) { int ret; /* Allocate cache for SRBs. */ srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, SLAB_HWCACHE_ALIGN, NULL); if (srb_cachep == NULL) { printk(KERN_ERR "%s: Unable to allocate SRB cache..." "Failing load!\n", DRIVER_NAME); ret = -ENOMEM; goto no_srp_cache; } /* Derive version string. */ strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION); if (ql4xextended_error_logging) strcat(qla4xxx_version_str, "-debug"); qla4xxx_scsi_transport = iscsi_register_transport(&qla4xxx_iscsi_transport); if (!qla4xxx_scsi_transport){ ret = -ENODEV; goto release_srb_cache; } ret = pci_register_driver(&qla4xxx_pci_driver); if (ret) goto unregister_transport; printk(KERN_INFO "QLogic iSCSI HBA Driver\n"); return 0; unregister_transport: iscsi_unregister_transport(&qla4xxx_iscsi_transport); release_srb_cache: kmem_cache_destroy(srb_cachep); no_srp_cache: return ret; } static void __exit qla4xxx_module_exit(void) { pci_unregister_driver(&qla4xxx_pci_driver); iscsi_unregister_transport(&qla4xxx_iscsi_transport); kmem_cache_destroy(srb_cachep); } module_init(qla4xxx_module_init); module_exit(qla4xxx_module_exit); MODULE_AUTHOR("QLogic Corporation"); MODULE_DESCRIPTION("QLogic iSCSI HBA Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
gpl-2.0
mifl/android_kernel_pantech_ef50l
drivers/scsi/pm8001/pm8001_init.c
4803
26040
/* * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * */ #include <linux/slab.h> #include "pm8001_sas.h" #include "pm8001_chips.h" static struct scsi_transport_template *pm8001_stt; static const struct pm8001_chip_info pm8001_chips[] = { [chip_8001] = { 8, &pm8001_8001_dispatch,}, }; static int pm8001_id; LIST_HEAD(hba_list); struct workqueue_struct *pm8001_wq; /** * The main structure which LLDD must register for scsi core. */ static struct scsi_host_template pm8001_sht = { .module = THIS_MODULE, .name = DRV_NAME, .queuecommand = sas_queuecommand, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = pm8001_scan_finished, .scan_start = pm8001_scan_start, .change_queue_depth = sas_change_queue_depth, .change_queue_type = sas_change_queue_type, .bios_param = sas_bios_param, .can_queue = 1, .cmd_per_lun = 1, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_bus_reset_handler = sas_eh_bus_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = pm8001_host_attrs, }; /** * Sas layer call this function to execute specific task. */ static struct sas_domain_function_template pm8001_transport_ops = { .lldd_dev_found = pm8001_dev_found, .lldd_dev_gone = pm8001_dev_gone, .lldd_execute_task = pm8001_queue_command, .lldd_control_phy = pm8001_phy_control, .lldd_abort_task = pm8001_abort_task, .lldd_abort_task_set = pm8001_abort_task_set, .lldd_clear_aca = pm8001_clear_aca, .lldd_clear_task_set = pm8001_clear_task_set, .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset, .lldd_lu_reset = pm8001_lu_reset, .lldd_query_task = pm8001_query_task, }; /** *pm8001_phy_init - initiate our adapter phys *@pm8001_ha: our hba structure. *@phy_id: phy id. */ static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id) { struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; struct asd_sas_phy *sas_phy = &phy->sas_phy; phy->phy_state = 0; phy->pm8001_ha = pm8001_ha; sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0; sas_phy->class = SAS; sas_phy->iproto = SAS_PROTOCOL_ALL; sas_phy->tproto = 0; sas_phy->type = PHY_TYPE_PHYSICAL; sas_phy->role = PHY_ROLE_INITIATOR; sas_phy->oob_mode = OOB_NOT_CONNECTED; sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; sas_phy->id = phy_id; sas_phy->sas_addr = &pm8001_ha->sas_addr[0]; sas_phy->frame_rcvd = &phy->frame_rcvd[0]; sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata; sas_phy->lldd_phy = phy; } /** *pm8001_free - free hba *@pm8001_ha: our hba structure. * */ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) { int i; if (!pm8001_ha) return; for (i = 0; i < USI_MAX_MEMCNT; i++) { if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { pci_free_consistent(pm8001_ha->pdev, pm8001_ha->memoryMap.region[i].element_size, pm8001_ha->memoryMap.region[i].virt_ptr, pm8001_ha->memoryMap.region[i].phys_addr); } } PM8001_CHIP_DISP->chip_iounmap(pm8001_ha); if (pm8001_ha->shost) scsi_host_put(pm8001_ha->shost); flush_workqueue(pm8001_wq); kfree(pm8001_ha->tags); kfree(pm8001_ha); } #ifdef PM8001_USE_TASKLET static void pm8001_tasklet(unsigned long opaque) { struct pm8001_hba_info *pm8001_ha; pm8001_ha = (struct pm8001_hba_info *)opaque; if (unlikely(!pm8001_ha)) BUG_ON(1); PM8001_CHIP_DISP->isr(pm8001_ha); } #endif /** * pm8001_interrupt - when HBA originate a interrupt,we should invoke this * dispatcher to handle each case. * @irq: irq number. * @opaque: the passed general host adapter struct */ static irqreturn_t pm8001_interrupt(int irq, void *opaque) { struct pm8001_hba_info *pm8001_ha; irqreturn_t ret = IRQ_HANDLED; struct sas_ha_struct *sha = opaque; pm8001_ha = sha->lldd_ha; if (unlikely(!pm8001_ha)) return IRQ_NONE; if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) return IRQ_NONE; #ifdef PM8001_USE_TASKLET tasklet_schedule(&pm8001_ha->tasklet); #else ret = PM8001_CHIP_DISP->isr(pm8001_ha); #endif return ret; } /** * pm8001_alloc - initiate our hba structure and 6 DMAs area. * @pm8001_ha:our hba structure. * */ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha) { int i; spin_lock_init(&pm8001_ha->lock); for (i = 0; i < pm8001_ha->chip->n_phy; i++) { pm8001_phy_init(pm8001_ha, i); pm8001_ha->port[i].wide_port_phymap = 0; pm8001_ha->port[i].port_attached = 0; pm8001_ha->port[i].port_state = 0; INIT_LIST_HEAD(&pm8001_ha->port[i].list); } pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL); if (!pm8001_ha->tags) goto err_out; /* MPI Memory region 1 for AAP Event Log for fw */ pm8001_ha->memoryMap.region[AAP1].num_elements = 1; pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE; pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE; pm8001_ha->memoryMap.region[AAP1].alignment = 32; /* MPI Memory region 2 for IOP Event Log for fw */ pm8001_ha->memoryMap.region[IOP].num_elements = 1; pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE; pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; pm8001_ha->memoryMap.region[IOP].alignment = 32; /* MPI Memory region 3 for consumer Index of inbound queues */ pm8001_ha->memoryMap.region[CI].num_elements = 1; pm8001_ha->memoryMap.region[CI].element_size = 4; pm8001_ha->memoryMap.region[CI].total_len = 4; pm8001_ha->memoryMap.region[CI].alignment = 4; /* MPI Memory region 4 for producer Index of outbound queues */ pm8001_ha->memoryMap.region[PI].num_elements = 1; pm8001_ha->memoryMap.region[PI].element_size = 4; pm8001_ha->memoryMap.region[PI].total_len = 4; pm8001_ha->memoryMap.region[PI].alignment = 4; /* MPI Memory region 5 inbound queues */ pm8001_ha->memoryMap.region[IB].num_elements = 256; pm8001_ha->memoryMap.region[IB].element_size = 64; pm8001_ha->memoryMap.region[IB].total_len = 256 * 64; pm8001_ha->memoryMap.region[IB].alignment = 64; /* MPI Memory region 6 inbound queues */ pm8001_ha->memoryMap.region[OB].num_elements = 256; pm8001_ha->memoryMap.region[OB].element_size = 64; pm8001_ha->memoryMap.region[OB].total_len = 256 * 64; pm8001_ha->memoryMap.region[OB].alignment = 64; /* Memory region write DMA*/ pm8001_ha->memoryMap.region[NVMD].num_elements = 1; pm8001_ha->memoryMap.region[NVMD].element_size = 4096; pm8001_ha->memoryMap.region[NVMD].total_len = 4096; /* Memory region for devices*/ pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1; pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES * sizeof(struct pm8001_device); pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES * sizeof(struct pm8001_device); /* Memory region for ccb_info*/ pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1; pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB * sizeof(struct pm8001_ccb_info); pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB * sizeof(struct pm8001_ccb_info); for (i = 0; i < USI_MAX_MEMCNT; i++) { if (pm8001_mem_alloc(pm8001_ha->pdev, &pm8001_ha->memoryMap.region[i].virt_ptr, &pm8001_ha->memoryMap.region[i].phys_addr, &pm8001_ha->memoryMap.region[i].phys_addr_hi, &pm8001_ha->memoryMap.region[i].phys_addr_lo, pm8001_ha->memoryMap.region[i].total_len, pm8001_ha->memoryMap.region[i].alignment) != 0) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Mem%d alloc failed\n", i)); goto err_out; } } pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr; for (i = 0; i < PM8001_MAX_DEVICES; i++) { pm8001_ha->devices[i].dev_type = NO_DEVICE; pm8001_ha->devices[i].id = i; pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; pm8001_ha->devices[i].running_req = 0; } pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr; for (i = 0; i < PM8001_MAX_CCB; i++) { pm8001_ha->ccb_info[i].ccb_dma_handle = pm8001_ha->memoryMap.region[CCB_MEM].phys_addr + i * sizeof(struct pm8001_ccb_info); pm8001_ha->ccb_info[i].task = NULL; pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff; pm8001_ha->ccb_info[i].device = NULL; ++pm8001_ha->tags_num; } pm8001_ha->flags = PM8001F_INIT_TIME; /* Initialize tags */ pm8001_tag_init(pm8001_ha); return 0; err_out: return 1; } /** * pm8001_ioremap - remap the pci high physical address to kernal virtual * address so that we can access them. * @pm8001_ha:our hba structure. */ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) { u32 bar; u32 logicalBar = 0; struct pci_dev *pdev; pdev = pm8001_ha->pdev; /* map pci mem (PMC pci base 0-3)*/ for (bar = 0; bar < 6; bar++) { /* ** logical BARs for SPC: ** bar 0 and 1 - logical BAR0 ** bar 2 and 3 - logical BAR1 ** bar4 - logical BAR2 ** bar5 - logical BAR3 ** Skip the appropriate assignments: */ if ((bar == 1) || (bar == 3)) continue; if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { pm8001_ha->io_mem[logicalBar].membase = pci_resource_start(pdev, bar); pm8001_ha->io_mem[logicalBar].membase &= (u32)PCI_BASE_ADDRESS_MEM_MASK; pm8001_ha->io_mem[logicalBar].memsize = pci_resource_len(pdev, bar); pm8001_ha->io_mem[logicalBar].memvirtaddr = ioremap(pm8001_ha->io_mem[logicalBar].membase, pm8001_ha->io_mem[logicalBar].memsize); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("PCI: bar %d, logicalBar %d " "virt_addr=%lx,len=%d\n", bar, logicalBar, (unsigned long) pm8001_ha->io_mem[logicalBar].memvirtaddr, pm8001_ha->io_mem[logicalBar].memsize)); } else { pm8001_ha->io_mem[logicalBar].membase = 0; pm8001_ha->io_mem[logicalBar].memsize = 0; pm8001_ha->io_mem[logicalBar].memvirtaddr = 0; } logicalBar++; } return 0; } /** * pm8001_pci_alloc - initialize our ha card structure * @pdev: pci device. * @ent: ent * @shost: scsi host struct which has been initialized before. */ static struct pm8001_hba_info *__devinit pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost) { struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); pm8001_ha = sha->lldd_ha; if (!pm8001_ha) return NULL; pm8001_ha->pdev = pdev; pm8001_ha->dev = &pdev->dev; pm8001_ha->chip_id = chip_id; pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; pm8001_ha->irq = pdev->irq; pm8001_ha->sas = sha; pm8001_ha->shost = shost; pm8001_ha->id = pm8001_id++; pm8001_ha->logging_level = 0x01; sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); #ifdef PM8001_USE_TASKLET tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, (unsigned long)pm8001_ha); #endif pm8001_ioremap(pm8001_ha); if (!pm8001_alloc(pm8001_ha)) return pm8001_ha; pm8001_free(pm8001_ha); return NULL; } /** * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit * @pdev: pci device. */ static int pci_go_44(struct pci_dev *pdev) { int rc; if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44)); if (rc) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "44-bit DMA enable failed\n"); return rc; } } } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "32-bit DMA enable failed\n"); return rc; } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } } return rc; } /** * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them. * @shost: scsi host which has been allocated outside. * @chip_info: our ha struct. */ static int __devinit pm8001_prep_sas_ha_init(struct Scsi_Host * shost, const struct pm8001_chip_info *chip_info) { int phy_nr, port_nr; struct asd_sas_phy **arr_phy; struct asd_sas_port **arr_port; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); phy_nr = chip_info->n_phy; port_nr = phy_nr; memset(sha, 0x00, sizeof(*sha)); arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); if (!arr_phy) goto exit; arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); if (!arr_port) goto exit_free2; sha->sas_phy = arr_phy; sha->sas_port = arr_port; sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL); if (!sha->lldd_ha) goto exit_free1; shost->transportt = pm8001_stt; shost->max_id = PM8001_MAX_DEVICES; shost->max_lun = 8; shost->max_channel = 0; shost->unique_id = pm8001_id; shost->max_cmd_len = 16; shost->can_queue = PM8001_CAN_QUEUE; shost->cmd_per_lun = 32; return 0; exit_free1: kfree(arr_port); exit_free2: kfree(arr_phy); exit: return -1; } /** * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas * @shost: scsi host which has been allocated outside * @chip_info: our ha struct. */ static void __devinit pm8001_post_sas_ha_init(struct Scsi_Host *shost, const struct pm8001_chip_info *chip_info) { int i = 0; struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); pm8001_ha = sha->lldd_ha; for (i = 0; i < chip_info->n_phy; i++) { sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy; sha->sas_port[i] = &pm8001_ha->port[i].sas_port; } sha->sas_ha_name = DRV_NAME; sha->dev = pm8001_ha->dev; sha->lldd_module = THIS_MODULE; sha->sas_addr = &pm8001_ha->sas_addr[0]; sha->num_phys = chip_info->n_phy; sha->lldd_max_execute_num = 1; sha->lldd_queue_size = PM8001_CAN_QUEUE; sha->core.shost = shost; } /** * pm8001_init_sas_add - initialize sas address * @chip_info: our ha struct. * * Currently we just set the fixed SAS address to our HBA,for manufacture, * it should read from the EEPROM */ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) { u8 i; #ifdef PM8001_READ_VPD DECLARE_COMPLETION_ONSTACK(completion); struct pm8001_ioctl_payload payload; pm8001_ha->nvmd_completion = &completion; payload.minor_function = 0; payload.length = 128; payload.func_specific = kzalloc(128, GFP_KERNEL); PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); wait_for_completion(&completion); for (i = 0; i < pm8001_ha->chip->n_phy; i++) { memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, SAS_ADDR_SIZE); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy %d sas_addr = %016llx \n", i, pm8001_ha->phy[i].dev_sas_addr)); } #else for (i = 0; i < pm8001_ha->chip->n_phy; i++) { pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL; pm8001_ha->phy[i].dev_sas_addr = cpu_to_be64((u64) (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr)); } memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr, SAS_ADDR_SIZE); #endif } #ifdef PM8001_USE_MSIX /** * pm8001_setup_msix - enable MSI-X interrupt * @chip_info: our ha struct. * @irq_handler: irq_handler */ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, irq_handler_t irq_handler) { u32 i = 0, j = 0; u32 number_of_intr = 1; int flag = 0; u32 max_entry; int rc; max_entry = sizeof(pm8001_ha->msix_entries) / sizeof(pm8001_ha->msix_entries[0]); flag |= IRQF_DISABLED; for (i = 0; i < max_entry ; i++) pm8001_ha->msix_entries[i].entry = i; rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, number_of_intr); pm8001_ha->number_of_intr = number_of_intr; if (!rc) { for (i = 0; i < number_of_intr; i++) { if (request_irq(pm8001_ha->msix_entries[i].vector, irq_handler, flag, DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost))) { for (j = 0; j < i; j++) free_irq( pm8001_ha->msix_entries[j].vector, SHOST_TO_SAS_HA(pm8001_ha->shost)); pci_disable_msix(pm8001_ha->pdev); break; } } } return rc; } #endif /** * pm8001_request_irq - register interrupt * @chip_info: our ha struct. */ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) { struct pci_dev *pdev; irq_handler_t irq_handler = pm8001_interrupt; int rc; pdev = pm8001_ha->pdev; #ifdef PM8001_USE_MSIX if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) return pm8001_setup_msix(pm8001_ha, irq_handler); else goto intx; #endif intx: /* initialize the INT-X interrupt */ rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost)); return rc; } /** * pm8001_pci_probe - probe supported device * @pdev: pci device which kernel has been prepared for. * @ent: pci device id * * This function is the main initialization function, when register a new * pci driver it is invoked, all struct an hardware initilization should be done * here, also, register interrupt */ static int __devinit pm8001_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int rc; u32 pci_reg; struct pm8001_hba_info *pm8001_ha; struct Scsi_Host *shost = NULL; const struct pm8001_chip_info *chip; dev_printk(KERN_INFO, &pdev->dev, "pm8001: driver version %s\n", DRV_VERSION); rc = pci_enable_device(pdev); if (rc) goto err_out_enable; pci_set_master(pdev); /* * Enable pci slot busmaster by setting pci command register. * This is required by FW for Cyclone card. */ pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg); pci_reg |= 0x157; pci_write_config_dword(pdev, PCI_COMMAND, pci_reg); rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out_disable; rc = pci_go_44(pdev); if (rc) goto err_out_regions; shost = scsi_host_alloc(&pm8001_sht, sizeof(void *)); if (!shost) { rc = -ENOMEM; goto err_out_regions; } chip = &pm8001_chips[ent->driver_data]; SHOST_TO_SAS_HA(shost) = kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL); if (!SHOST_TO_SAS_HA(shost)) { rc = -ENOMEM; goto err_out_free_host; } rc = pm8001_prep_sas_ha_init(shost, chip); if (rc) { rc = -ENOMEM; goto err_out_free; } pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost); if (!pm8001_ha) { rc = -ENOMEM; goto err_out_free; } list_add_tail(&pm8001_ha->list, &hba_list); PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); if (rc) goto err_out_ha_free; rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_ha_free; rc = pm8001_request_irq(pm8001_ha); if (rc) goto err_out_shost; PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); pm8001_init_sas_add(pm8001_ha); pm8001_post_sas_ha_init(shost, chip); rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); if (rc) goto err_out_shost; scsi_scan_host(pm8001_ha->shost); return 0; err_out_shost: scsi_remove_host(pm8001_ha->shost); err_out_ha_free: pm8001_free(pm8001_ha); err_out_free: kfree(SHOST_TO_SAS_HA(shost)); err_out_free_host: kfree(shost); err_out_regions: pci_release_regions(pdev); err_out_disable: pci_disable_device(pdev); err_out_enable: return rc; } static void __devexit pm8001_pci_remove(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; int i; pm8001_ha = sha->lldd_ha; pci_set_drvdata(pdev, NULL); sas_unregister_ha(sha); sas_remove_host(pm8001_ha->shost); list_del(&pm8001_ha->list); scsi_remove_host(pm8001_ha->shost); PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) free_irq(pm8001_ha->msix_entries[i].vector, sha); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); #endif #ifdef PM8001_USE_TASKLET tasklet_kill(&pm8001_ha->tasklet); #endif pm8001_free(pm8001_ha); kfree(sha->sas_phy); kfree(sha->sas_port); kfree(sha); pci_release_regions(pdev); pci_disable_device(pdev); } /** * pm8001_pci_suspend - power management suspend main entry point * @pdev: PCI device struct * @state: PM state change to (usually PCI_D3) * * Returns 0 success, anything else error. */ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; int i , pos; u32 device_state; pm8001_ha = sha->lldd_ha; flush_workqueue(pm8001_wq); scsi_block_requests(pm8001_ha->shost); pos = pci_find_capability(pdev, PCI_CAP_ID_PM); if (pos == 0) { printk(KERN_ERR " PCI PM not supported\n"); return -ENODEV; } PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) free_irq(pm8001_ha->msix_entries[i].vector, sha); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); #endif #ifdef PM8001_USE_TASKLET tasklet_kill(&pm8001_ha->tasklet); #endif device_state = pci_choose_state(pdev, state); pm8001_printk("pdev=0x%p, slot=%s, entering " "operating state [D%d]\n", pdev, pm8001_ha->name, device_state); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, device_state); return 0; } /** * pm8001_pci_resume - power management resume main entry point * @pdev: PCI device struct * * Returns 0 success, anything else error. */ static int pm8001_pci_resume(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; int rc; u32 device_state; pm8001_ha = sha->lldd_ha; device_state = pdev->current_state; pm8001_printk("pdev=0x%p, slot=%s, resuming from previous " "operating state [D%d]\n", pdev, pm8001_ha->name, device_state); pci_set_power_state(pdev, PCI_D0); pci_enable_wake(pdev, PCI_D0, 0); pci_restore_state(pdev); rc = pci_enable_device(pdev); if (rc) { pm8001_printk("slot=%s Enable device failed during resume\n", pm8001_ha->name); goto err_out_enable; } pci_set_master(pdev); rc = pci_go_44(pdev); if (rc) goto err_out_disable; PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); if (rc) goto err_out_disable; PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); rc = pm8001_request_irq(pm8001_ha); if (rc) goto err_out_disable; #ifdef PM8001_USE_TASKLET tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, (unsigned long)pm8001_ha); #endif PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); scsi_unblock_requests(pm8001_ha->shost); return 0; err_out_disable: scsi_remove_host(pm8001_ha->shost); pci_disable_device(pdev); err_out_enable: return rc; } static struct pci_device_id __devinitdata pm8001_pci_table[] = { { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, { PCI_DEVICE(0x117c, 0x0042), .driver_data = chip_8001 }, {} /* terminate list */ }; static struct pci_driver pm8001_pci_driver = { .name = DRV_NAME, .id_table = pm8001_pci_table, .probe = pm8001_pci_probe, .remove = __devexit_p(pm8001_pci_remove), .suspend = pm8001_pci_suspend, .resume = pm8001_pci_resume, }; /** * pm8001_init - initialize scsi transport template */ static int __init pm8001_init(void) { int rc = -ENOMEM; pm8001_wq = alloc_workqueue("pm8001", 0, 0); if (!pm8001_wq) goto err; pm8001_id = 0; pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops); if (!pm8001_stt) goto err_wq; rc = pci_register_driver(&pm8001_pci_driver); if (rc) goto err_tp; return 0; err_tp: sas_release_transport(pm8001_stt); err_wq: destroy_workqueue(pm8001_wq); err: return rc; } static void __exit pm8001_exit(void) { pci_unregister_driver(&pm8001_pci_driver); sas_release_transport(pm8001_stt); destroy_workqueue(pm8001_wq); } module_init(pm8001_init); module_exit(pm8001_exit); MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
gpl-2.0
yank555-lu/Hammerhead-3.4-kitkat-mr2
fs/btrfs/ordered-data.c
4803
26643
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include "ctree.h" #include "transaction.h" #include "btrfs_inode.h" #include "extent_io.h" static u64 entry_end(struct btrfs_ordered_extent *entry) { if (entry->file_offset + entry->len < entry->file_offset) return (u64)-1; return entry->file_offset + entry->len; } /* returns NULL if the insertion worked, or it returns the node it did find * in the tree */ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, struct rb_node *node) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct btrfs_ordered_extent *entry; while (*p) { parent = *p; entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); if (file_offset < entry->file_offset) p = &(*p)->rb_left; else if (file_offset >= entry_end(entry)) p = &(*p)->rb_right; else return parent; } rb_link_node(node, parent, p); rb_insert_color(node, root); return NULL; } static void ordered_data_tree_panic(struct inode *inode, int errno, u64 offset) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset " "%llu\n", (unsigned long long)offset); } /* * look for a given offset in the tree, and if it can't be found return the * first lesser offset */ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, struct rb_node **prev_ret) { struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *test; struct btrfs_ordered_extent *entry; struct btrfs_ordered_extent *prev_entry = NULL; while (n) { entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); prev = n; prev_entry = entry; if (file_offset < entry->file_offset) n = n->rb_left; else if (file_offset >= entry_end(entry)) n = n->rb_right; else return n; } if (!prev_ret) return NULL; while (prev && file_offset >= entry_end(prev_entry)) { test = rb_next(prev); if (!test) break; prev_entry = rb_entry(test, struct btrfs_ordered_extent, rb_node); if (file_offset < entry_end(prev_entry)) break; prev = test; } if (prev) prev_entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); while (prev && file_offset < entry_end(prev_entry)) { test = rb_prev(prev); if (!test) break; prev_entry = rb_entry(test, struct btrfs_ordered_extent, rb_node); prev = test; } *prev_ret = prev; return NULL; } /* * helper to check if a given offset is inside a given entry */ static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) { if (file_offset < entry->file_offset || entry->file_offset + entry->len <= file_offset) return 0; return 1; } static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, u64 len) { if (file_offset + len <= entry->file_offset || entry->file_offset + entry->len <= file_offset) return 0; return 1; } /* * look find the first ordered struct that has this offset, otherwise * the first one less than this offset */ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, u64 file_offset) { struct rb_root *root = &tree->tree; struct rb_node *prev = NULL; struct rb_node *ret; struct btrfs_ordered_extent *entry; if (tree->last) { entry = rb_entry(tree->last, struct btrfs_ordered_extent, rb_node); if (offset_in_entry(entry, file_offset)) return tree->last; } ret = __tree_search(root, file_offset, &prev); if (!ret) ret = prev; if (ret) tree->last = ret; return ret; } /* allocate and add a new ordered_extent into the per-inode tree. * file_offset is the logical offset in the file * * start is the disk block number of an extent already reserved in the * extent allocation tree * * len is the length of the extent * * The tree is given a single reference on the ordered extent that was * inserted. */ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type, int dio, int compress_type) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry; tree = &BTRFS_I(inode)->ordered_tree; entry = kzalloc(sizeof(*entry), GFP_NOFS); if (!entry) return -ENOMEM; entry->file_offset = file_offset; entry->start = start; entry->len = len; entry->disk_len = disk_len; entry->bytes_left = len; entry->inode = inode; entry->compress_type = compress_type; if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) set_bit(type, &entry->flags); if (dio) set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); /* one ref for the tree */ atomic_set(&entry->refs, 1); init_waitqueue_head(&entry->wait); INIT_LIST_HEAD(&entry->list); INIT_LIST_HEAD(&entry->root_extent_list); trace_btrfs_ordered_extent_add(inode, entry); spin_lock(&tree->lock); node = tree_insert(&tree->tree, file_offset, &entry->rb_node); if (node) ordered_data_tree_panic(inode, -EEXIST, file_offset); spin_unlock(&tree->lock); spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); list_add_tail(&entry->root_extent_list, &BTRFS_I(inode)->root->fs_info->ordered_extents); spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); return 0; } int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, disk_len, type, 0, BTRFS_COMPRESS_NONE); } int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, disk_len, type, 1, BTRFS_COMPRESS_NONE); } int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type, int compress_type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, disk_len, type, 0, compress_type); } /* * Add a struct btrfs_ordered_sum into the list of checksums to be inserted * when an ordered extent is finished. If the list covers more than one * ordered extent, it is split across multiples. */ void btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum) { struct btrfs_ordered_inode_tree *tree; tree = &BTRFS_I(inode)->ordered_tree; spin_lock(&tree->lock); list_add_tail(&sum->list, &entry->list); spin_unlock(&tree->lock); } /* * this is used to account for finished IO across a given range * of the file. The IO may span ordered extents. If * a given ordered_extent is completely done, 1 is returned, otherwise * 0. * * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used * to make sure this function only returns 1 once for a given ordered extent. * * file_offset is updated to one byte past the range that is recorded as * complete. This allows you to walk forward in the file. */ int btrfs_dec_test_first_ordered_pending(struct inode *inode, struct btrfs_ordered_extent **cached, u64 *file_offset, u64 io_size) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; int ret; u64 dec_end; u64 dec_start; u64 to_dec; tree = &BTRFS_I(inode)->ordered_tree; spin_lock(&tree->lock); node = tree_search(tree, *file_offset); if (!node) { ret = 1; goto out; } entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (!offset_in_entry(entry, *file_offset)) { ret = 1; goto out; } dec_start = max(*file_offset, entry->file_offset); dec_end = min(*file_offset + io_size, entry->file_offset + entry->len); *file_offset = dec_end; if (dec_start > dec_end) { printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n", (unsigned long long)dec_start, (unsigned long long)dec_end); } to_dec = dec_end - dec_start; if (to_dec > entry->bytes_left) { printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n", (unsigned long long)entry->bytes_left, (unsigned long long)to_dec); } entry->bytes_left -= to_dec; if (entry->bytes_left == 0) ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); else ret = 1; out: if (!ret && cached && entry) { *cached = entry; atomic_inc(&entry->refs); } spin_unlock(&tree->lock); return ret == 0; } /* * this is used to account for finished IO across a given range * of the file. The IO should not span ordered extents. If * a given ordered_extent is completely done, 1 is returned, otherwise * 0. * * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used * to make sure this function only returns 1 once for a given ordered extent. */ int btrfs_dec_test_ordered_pending(struct inode *inode, struct btrfs_ordered_extent **cached, u64 file_offset, u64 io_size) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; int ret; tree = &BTRFS_I(inode)->ordered_tree; spin_lock(&tree->lock); node = tree_search(tree, file_offset); if (!node) { ret = 1; goto out; } entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (!offset_in_entry(entry, file_offset)) { ret = 1; goto out; } if (io_size > entry->bytes_left) { printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n", (unsigned long long)entry->bytes_left, (unsigned long long)io_size); } entry->bytes_left -= io_size; if (entry->bytes_left == 0) ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); else ret = 1; out: if (!ret && cached && entry) { *cached = entry; atomic_inc(&entry->refs); } spin_unlock(&tree->lock); return ret == 0; } /* * used to drop a reference on an ordered extent. This will free * the extent if the last reference is dropped */ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) { struct list_head *cur; struct btrfs_ordered_sum *sum; trace_btrfs_ordered_extent_put(entry->inode, entry); if (atomic_dec_and_test(&entry->refs)) { while (!list_empty(&entry->list)) { cur = entry->list.next; sum = list_entry(cur, struct btrfs_ordered_sum, list); list_del(&sum->list); kfree(sum); } kfree(entry); } } /* * remove an ordered extent from the tree. No references are dropped * and you must wake_up entry->wait. You must hold the tree lock * while you call this function. */ static void __btrfs_remove_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry) { struct btrfs_ordered_inode_tree *tree; struct btrfs_root *root = BTRFS_I(inode)->root; struct rb_node *node; tree = &BTRFS_I(inode)->ordered_tree; node = &entry->rb_node; rb_erase(node, &tree->tree); tree->last = NULL; set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); spin_lock(&root->fs_info->ordered_extent_lock); list_del_init(&entry->root_extent_list); trace_btrfs_ordered_extent_remove(inode, entry); /* * we have no more ordered extents for this inode and * no dirty pages. We can safely remove it from the * list of ordered extents */ if (RB_EMPTY_ROOT(&tree->tree) && !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { list_del_init(&BTRFS_I(inode)->ordered_operations); } spin_unlock(&root->fs_info->ordered_extent_lock); } /* * remove an ordered extent from the tree. No references are dropped * but any waiters are woken. */ void btrfs_remove_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry) { struct btrfs_ordered_inode_tree *tree; tree = &BTRFS_I(inode)->ordered_tree; spin_lock(&tree->lock); __btrfs_remove_ordered_extent(inode, entry); spin_unlock(&tree->lock); wake_up(&entry->wait); } /* * wait for all the ordered extents in a root. This is done when balancing * space between drives. */ void btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only, int delay_iput) { struct list_head splice; struct list_head *cur; struct btrfs_ordered_extent *ordered; struct inode *inode; INIT_LIST_HEAD(&splice); spin_lock(&root->fs_info->ordered_extent_lock); list_splice_init(&root->fs_info->ordered_extents, &splice); while (!list_empty(&splice)) { cur = splice.next; ordered = list_entry(cur, struct btrfs_ordered_extent, root_extent_list); if (nocow_only && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) && !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { list_move(&ordered->root_extent_list, &root->fs_info->ordered_extents); cond_resched_lock(&root->fs_info->ordered_extent_lock); continue; } list_del_init(&ordered->root_extent_list); atomic_inc(&ordered->refs); /* * the inode may be getting freed (in sys_unlink path). */ inode = igrab(ordered->inode); spin_unlock(&root->fs_info->ordered_extent_lock); if (inode) { btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); if (delay_iput) btrfs_add_delayed_iput(inode); else iput(inode); } else { btrfs_put_ordered_extent(ordered); } spin_lock(&root->fs_info->ordered_extent_lock); } spin_unlock(&root->fs_info->ordered_extent_lock); } /* * this is used during transaction commit to write all the inodes * added to the ordered operation list. These files must be fully on * disk before the transaction commits. * * we have two modes here, one is to just start the IO via filemap_flush * and the other is to wait for all the io. When we wait, we have an * extra check to make sure the ordered operation list really is empty * before we return */ void btrfs_run_ordered_operations(struct btrfs_root *root, int wait) { struct btrfs_inode *btrfs_inode; struct inode *inode; struct list_head splice; INIT_LIST_HEAD(&splice); mutex_lock(&root->fs_info->ordered_operations_mutex); spin_lock(&root->fs_info->ordered_extent_lock); again: list_splice_init(&root->fs_info->ordered_operations, &splice); while (!list_empty(&splice)) { btrfs_inode = list_entry(splice.next, struct btrfs_inode, ordered_operations); inode = &btrfs_inode->vfs_inode; list_del_init(&btrfs_inode->ordered_operations); /* * the inode may be getting freed (in sys_unlink path). */ inode = igrab(inode); if (!wait && inode) { list_add_tail(&BTRFS_I(inode)->ordered_operations, &root->fs_info->ordered_operations); } spin_unlock(&root->fs_info->ordered_extent_lock); if (inode) { if (wait) btrfs_wait_ordered_range(inode, 0, (u64)-1); else filemap_flush(inode->i_mapping); btrfs_add_delayed_iput(inode); } cond_resched(); spin_lock(&root->fs_info->ordered_extent_lock); } if (wait && !list_empty(&root->fs_info->ordered_operations)) goto again; spin_unlock(&root->fs_info->ordered_extent_lock); mutex_unlock(&root->fs_info->ordered_operations_mutex); } /* * Used to start IO or wait for a given ordered extent to finish. * * If wait is one, this effectively waits on page writeback for all the pages * in the extent, and it waits on the io completion code to insert * metadata into the btree corresponding to the extent */ void btrfs_start_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry, int wait) { u64 start = entry->file_offset; u64 end = start + entry->len - 1; trace_btrfs_ordered_extent_start(inode, entry); /* * pages in the range can be dirty, clean or writeback. We * start IO on any dirty ones so the wait doesn't stall waiting * for pdflush to find them */ if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) filemap_fdatawrite_range(inode->i_mapping, start, end); if (wait) { wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); } } /* * Used to wait on ordered extents across a large range of bytes. */ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) { u64 end; u64 orig_end; struct btrfs_ordered_extent *ordered; int found; if (start + len < start) { orig_end = INT_LIMIT(loff_t); } else { orig_end = start + len - 1; if (orig_end > INT_LIMIT(loff_t)) orig_end = INT_LIMIT(loff_t); } again: /* start IO across the range first to instantiate any delalloc * extents */ filemap_fdatawrite_range(inode->i_mapping, start, orig_end); /* The compression code will leave pages locked but return from * writepage without setting the page writeback. Starting again * with WB_SYNC_ALL will end up waiting for the IO to actually start. */ filemap_fdatawrite_range(inode->i_mapping, start, orig_end); filemap_fdatawait_range(inode->i_mapping, start, orig_end); end = orig_end; found = 0; while (1) { ordered = btrfs_lookup_first_ordered_extent(inode, end); if (!ordered) break; if (ordered->file_offset > orig_end) { btrfs_put_ordered_extent(ordered); break; } if (ordered->file_offset + ordered->len < start) { btrfs_put_ordered_extent(ordered); break; } found++; btrfs_start_ordered_extent(inode, ordered, 1); end = ordered->file_offset; btrfs_put_ordered_extent(ordered); if (end == 0 || end == start) break; end--; } if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, EXTENT_DELALLOC, 0, NULL)) { schedule_timeout(1); goto again; } } /* * find an ordered extent corresponding to file_offset. return NULL if * nothing is found, otherwise take a reference on the extent and return it */ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, u64 file_offset) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; spin_lock(&tree->lock); node = tree_search(tree, file_offset); if (!node) goto out; entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (!offset_in_entry(entry, file_offset)) entry = NULL; if (entry) atomic_inc(&entry->refs); out: spin_unlock(&tree->lock); return entry; } /* Since the DIO code tries to lock a wide area we need to look for any ordered * extents that exist in the range, rather than just the start of the range. */ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, u64 file_offset, u64 len) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; spin_lock(&tree->lock); node = tree_search(tree, file_offset); if (!node) { node = tree_search(tree, file_offset + len); if (!node) goto out; } while (1) { entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (range_overlaps(entry, file_offset, len)) break; if (entry->file_offset >= file_offset + len) { entry = NULL; break; } entry = NULL; node = rb_next(node); if (!node) break; } out: if (entry) atomic_inc(&entry->refs); spin_unlock(&tree->lock); return entry; } /* * lookup and return any extent before 'file_offset'. NULL is returned * if none is found */ struct btrfs_ordered_extent * btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; spin_lock(&tree->lock); node = tree_search(tree, file_offset); if (!node) goto out; entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); atomic_inc(&entry->refs); out: spin_unlock(&tree->lock); return entry; } /* * After an extent is done, call this to conditionally update the on disk * i_size. i_size is updated to cover any fully written part of the file. */ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, struct btrfs_ordered_extent *ordered) { struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; u64 disk_i_size; u64 new_i_size; u64 i_size_test; u64 i_size = i_size_read(inode); struct rb_node *node; struct rb_node *prev = NULL; struct btrfs_ordered_extent *test; int ret = 1; if (ordered) offset = entry_end(ordered); else offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); spin_lock(&tree->lock); disk_i_size = BTRFS_I(inode)->disk_i_size; /* truncate file */ if (disk_i_size > i_size) { BTRFS_I(inode)->disk_i_size = i_size; ret = 0; goto out; } /* * if the disk i_size is already at the inode->i_size, or * this ordered extent is inside the disk i_size, we're done */ if (disk_i_size == i_size || offset <= disk_i_size) { goto out; } /* * we can't update the disk_isize if there are delalloc bytes * between disk_i_size and this ordered extent */ if (test_range_bit(io_tree, disk_i_size, offset - 1, EXTENT_DELALLOC, 0, NULL)) { goto out; } /* * walk backward from this ordered extent to disk_i_size. * if we find an ordered extent then we can't update disk i_size * yet */ if (ordered) { node = rb_prev(&ordered->rb_node); } else { prev = tree_search(tree, offset); /* * we insert file extents without involving ordered struct, * so there should be no ordered struct cover this offset */ if (prev) { test = rb_entry(prev, struct btrfs_ordered_extent, rb_node); BUG_ON(offset_in_entry(test, offset)); } node = prev; } while (node) { test = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (test->file_offset + test->len <= disk_i_size) break; if (test->file_offset >= i_size) break; if (test->file_offset >= disk_i_size) goto out; node = rb_prev(node); } new_i_size = min_t(u64, offset, i_size); /* * at this point, we know we can safely update i_size to at least * the offset from this ordered extent. But, we need to * walk forward and see if ios from higher up in the file have * finished. */ if (ordered) { node = rb_next(&ordered->rb_node); } else { if (prev) node = rb_next(prev); else node = rb_first(&tree->tree); } i_size_test = 0; if (node) { /* * do we have an area where IO might have finished * between our ordered extent and the next one. */ test = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (test->file_offset > offset) i_size_test = test->file_offset; } else { i_size_test = i_size; } /* * i_size_test is the end of a region after this ordered * extent where there are no ordered extents. As long as there * are no delalloc bytes in this area, it is safe to update * disk_i_size to the end of the region. */ if (i_size_test > offset && !test_range_bit(io_tree, offset, i_size_test - 1, EXTENT_DELALLOC, 0, NULL)) { new_i_size = min_t(u64, i_size_test, i_size); } BTRFS_I(inode)->disk_i_size = new_i_size; ret = 0; out: /* * we need to remove the ordered extent with the tree lock held * so that other people calling this function don't find our fully * processed ordered entry and skip updating the i_size */ if (ordered) __btrfs_remove_ordered_extent(inode, ordered); spin_unlock(&tree->lock); if (ordered) wake_up(&ordered->wait); return ret; } /* * search the ordered extents for one corresponding to 'offset' and * try to find a checksum. This is used because we allow pages to * be reclaimed before their checksum is actually put into the btree */ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum) { struct btrfs_ordered_sum *ordered_sum; struct btrfs_sector_sum *sector_sums; struct btrfs_ordered_extent *ordered; struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; unsigned long num_sectors; unsigned long i; u32 sectorsize = BTRFS_I(inode)->root->sectorsize; int ret = 1; ordered = btrfs_lookup_ordered_extent(inode, offset); if (!ordered) return 1; spin_lock(&tree->lock); list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { if (disk_bytenr >= ordered_sum->bytenr) { num_sectors = ordered_sum->len / sectorsize; sector_sums = ordered_sum->sums; for (i = 0; i < num_sectors; i++) { if (sector_sums[i].bytenr == disk_bytenr) { *sum = sector_sums[i].sum; ret = 0; goto out; } } } } out: spin_unlock(&tree->lock); btrfs_put_ordered_extent(ordered); return ret; } /* * add a given inode to the list of inodes that must be fully on * disk before a transaction commit finishes. * * This basically gives us the ext3 style data=ordered mode, and it is mostly * used to make sure renamed files are fully on disk. * * It is a noop if the inode is already fully on disk. * * If trans is not null, we'll do a friendly check for a transaction that * is already flushing things and force the IO down ourselves. */ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) { u64 last_mod; last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans); /* * if this file hasn't been changed since the last transaction * commit, we can safely return without doing anything */ if (last_mod < root->fs_info->last_trans_committed) return; /* * the transaction is already committing. Just start the IO and * don't bother with all of this list nonsense */ if (trans && root->fs_info->running_transaction->blocked) { btrfs_wait_ordered_range(inode, 0, (u64)-1); return; } spin_lock(&root->fs_info->ordered_extent_lock); if (list_empty(&BTRFS_I(inode)->ordered_operations)) { list_add_tail(&BTRFS_I(inode)->ordered_operations, &root->fs_info->ordered_operations); } spin_unlock(&root->fs_info->ordered_extent_lock); }
gpl-2.0
xdajog/kernel_fx3q_aosp
drivers/gpio/gpio-mxc.c
5059
12598
/* * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de> * Copyright 2008 Juergen Beisert, kernel@pengutronix.de * * Based on code from Freescale, * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/basic_mmio_gpio.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/module.h> #include <asm-generic/bug.h> #include <asm/mach/irq.h> #define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START) enum mxc_gpio_hwtype { IMX1_GPIO, /* runs on i.mx1 */ IMX21_GPIO, /* runs on i.mx21 and i.mx27 */ IMX31_GPIO, /* runs on all other i.mx */ }; /* device type dependent stuff */ struct mxc_gpio_hwdata { unsigned dr_reg; unsigned gdir_reg; unsigned psr_reg; unsigned icr1_reg; unsigned icr2_reg; unsigned imr_reg; unsigned isr_reg; unsigned low_level; unsigned high_level; unsigned rise_edge; unsigned fall_edge; }; struct mxc_gpio_port { struct list_head node; void __iomem *base; int irq; int irq_high; int virtual_irq_start; struct bgpio_chip bgc; u32 both_edges; }; static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = { .dr_reg = 0x1c, .gdir_reg = 0x00, .psr_reg = 0x24, .icr1_reg = 0x28, .icr2_reg = 0x2c, .imr_reg = 0x30, .isr_reg = 0x34, .low_level = 0x03, .high_level = 0x02, .rise_edge = 0x00, .fall_edge = 0x01, }; static struct mxc_gpio_hwdata imx31_gpio_hwdata = { .dr_reg = 0x00, .gdir_reg = 0x04, .psr_reg = 0x08, .icr1_reg = 0x0c, .icr2_reg = 0x10, .imr_reg = 0x14, .isr_reg = 0x18, .low_level = 0x00, .high_level = 0x01, .rise_edge = 0x02, .fall_edge = 0x03, }; static enum mxc_gpio_hwtype mxc_gpio_hwtype; static struct mxc_gpio_hwdata *mxc_gpio_hwdata; #define GPIO_DR (mxc_gpio_hwdata->dr_reg) #define GPIO_GDIR (mxc_gpio_hwdata->gdir_reg) #define GPIO_PSR (mxc_gpio_hwdata->psr_reg) #define GPIO_ICR1 (mxc_gpio_hwdata->icr1_reg) #define GPIO_ICR2 (mxc_gpio_hwdata->icr2_reg) #define GPIO_IMR (mxc_gpio_hwdata->imr_reg) #define GPIO_ISR (mxc_gpio_hwdata->isr_reg) #define GPIO_INT_LOW_LEV (mxc_gpio_hwdata->low_level) #define GPIO_INT_HIGH_LEV (mxc_gpio_hwdata->high_level) #define GPIO_INT_RISE_EDGE (mxc_gpio_hwdata->rise_edge) #define GPIO_INT_FALL_EDGE (mxc_gpio_hwdata->fall_edge) #define GPIO_INT_NONE 0x4 static struct platform_device_id mxc_gpio_devtype[] = { { .name = "imx1-gpio", .driver_data = IMX1_GPIO, }, { .name = "imx21-gpio", .driver_data = IMX21_GPIO, }, { .name = "imx31-gpio", .driver_data = IMX31_GPIO, }, { /* sentinel */ } }; static const struct of_device_id mxc_gpio_dt_ids[] = { { .compatible = "fsl,imx1-gpio", .data = &mxc_gpio_devtype[IMX1_GPIO], }, { .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], }, { .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], }, { /* sentinel */ } }; /* * MX2 has one interrupt *for all* gpio ports. The list is used * to save the references to all ports, so that mx2_gpio_irq_handler * can walk through all interrupt status registers. */ static LIST_HEAD(mxc_gpio_ports); /* Note: This driver assumes 32 GPIOs are handled in one register */ static int gpio_set_irq_type(struct irq_data *d, u32 type) { u32 gpio = irq_to_gpio(d->irq); struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mxc_gpio_port *port = gc->private; u32 bit, val; int edge; void __iomem *reg = port->base; port->both_edges &= ~(1 << (gpio & 31)); switch (type) { case IRQ_TYPE_EDGE_RISING: edge = GPIO_INT_RISE_EDGE; break; case IRQ_TYPE_EDGE_FALLING: edge = GPIO_INT_FALL_EDGE; break; case IRQ_TYPE_EDGE_BOTH: val = gpio_get_value(gpio); if (val) { edge = GPIO_INT_LOW_LEV; pr_debug("mxc: set GPIO %d to low trigger\n", gpio); } else { edge = GPIO_INT_HIGH_LEV; pr_debug("mxc: set GPIO %d to high trigger\n", gpio); } port->both_edges |= 1 << (gpio & 31); break; case IRQ_TYPE_LEVEL_LOW: edge = GPIO_INT_LOW_LEV; break; case IRQ_TYPE_LEVEL_HIGH: edge = GPIO_INT_HIGH_LEV; break; default: return -EINVAL; } reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ bit = gpio & 0xf; val = readl(reg) & ~(0x3 << (bit << 1)); writel(val | (edge << (bit << 1)), reg); writel(1 << (gpio & 0x1f), port->base + GPIO_ISR); return 0; } static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) { void __iomem *reg = port->base; u32 bit, val; int edge; reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ bit = gpio & 0xf; val = readl(reg); edge = (val >> (bit << 1)) & 3; val &= ~(0x3 << (bit << 1)); if (edge == GPIO_INT_HIGH_LEV) { edge = GPIO_INT_LOW_LEV; pr_debug("mxc: switch GPIO %d to low trigger\n", gpio); } else if (edge == GPIO_INT_LOW_LEV) { edge = GPIO_INT_HIGH_LEV; pr_debug("mxc: switch GPIO %d to high trigger\n", gpio); } else { pr_err("mxc: invalid configuration for GPIO %d: %x\n", gpio, edge); return; } writel(val | (edge << (bit << 1)), reg); } /* handle 32 interrupts in one status register */ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) { u32 gpio_irq_no_base = port->virtual_irq_start; while (irq_stat != 0) { int irqoffset = fls(irq_stat) - 1; if (port->both_edges & (1 << irqoffset)) mxc_flip_edge(port, irqoffset); generic_handle_irq(gpio_irq_no_base + irqoffset); irq_stat &= ~(1 << irqoffset); } } /* MX1 and MX3 has one interrupt *per* gpio port */ static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) { u32 irq_stat; struct mxc_gpio_port *port = irq_get_handler_data(irq); struct irq_chip *chip = irq_get_chip(irq); chained_irq_enter(chip, desc); irq_stat = readl(port->base + GPIO_ISR) & readl(port->base + GPIO_IMR); mxc_gpio_irq_handler(port, irq_stat); chained_irq_exit(chip, desc); } /* MX2 has one interrupt *for all* gpio ports */ static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) { u32 irq_msk, irq_stat; struct mxc_gpio_port *port; /* walk through all interrupt status registers */ list_for_each_entry(port, &mxc_gpio_ports, node) { irq_msk = readl(port->base + GPIO_IMR); if (!irq_msk) continue; irq_stat = readl(port->base + GPIO_ISR) & irq_msk; if (irq_stat) mxc_gpio_irq_handler(port, irq_stat); } } /* * Set interrupt number "irq" in the GPIO as a wake-up source. * While system is running, all registered GPIO interrupts need to have * wake-up enabled. When system is suspended, only selected GPIO interrupts * need to have wake-up enabled. * @param irq interrupt source number * @param enable enable as wake-up if equal to non-zero * @return This function returns 0 on success. */ static int gpio_set_wake_irq(struct irq_data *d, u32 enable) { u32 gpio = irq_to_gpio(d->irq); u32 gpio_idx = gpio & 0x1F; struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mxc_gpio_port *port = gc->private; if (enable) { if (port->irq_high && (gpio_idx >= 16)) enable_irq_wake(port->irq_high); else enable_irq_wake(port->irq); } else { if (port->irq_high && (gpio_idx >= 16)) disable_irq_wake(port->irq_high); else disable_irq_wake(port->irq); } return 0; } static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip("gpio-mxc", 1, port->virtual_irq_start, port->base, handle_level_irq); gc->private = port; ct = gc->chip_types; ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_set_type = gpio_set_irq_type; ct->chip.irq_set_wake = gpio_set_wake_irq; ct->regs.ack = GPIO_ISR; ct->regs.mask = GPIO_IMR; irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0); } static void __devinit mxc_gpio_get_hw(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(mxc_gpio_dt_ids, &pdev->dev); enum mxc_gpio_hwtype hwtype; if (of_id) pdev->id_entry = of_id->data; hwtype = pdev->id_entry->driver_data; if (mxc_gpio_hwtype) { /* * The driver works with a reasonable presupposition, * that is all gpio ports must be the same type when * running on one soc. */ BUG_ON(mxc_gpio_hwtype != hwtype); return; } if (hwtype == IMX31_GPIO) mxc_gpio_hwdata = &imx31_gpio_hwdata; else mxc_gpio_hwdata = &imx1_imx21_gpio_hwdata; mxc_gpio_hwtype = hwtype; } static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset) { struct bgpio_chip *bgc = to_bgpio_chip(gc); struct mxc_gpio_port *port = container_of(bgc, struct mxc_gpio_port, bgc); return port->virtual_irq_start + offset; } static int __devinit mxc_gpio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mxc_gpio_port *port; struct resource *iores; int err; mxc_gpio_get_hw(pdev); port = kzalloc(sizeof(struct mxc_gpio_port), GFP_KERNEL); if (!port) return -ENOMEM; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iores) { err = -ENODEV; goto out_kfree; } if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { err = -EBUSY; goto out_kfree; } port->base = ioremap(iores->start, resource_size(iores)); if (!port->base) { err = -ENOMEM; goto out_release_mem; } port->irq_high = platform_get_irq(pdev, 1); port->irq = platform_get_irq(pdev, 0); if (port->irq < 0) { err = -EINVAL; goto out_iounmap; } /* disable the interrupt and clear the status */ writel(0, port->base + GPIO_IMR); writel(~0, port->base + GPIO_ISR); if (mxc_gpio_hwtype == IMX21_GPIO) { /* setup one handler for all GPIO interrupts */ if (pdev->id == 0) irq_set_chained_handler(port->irq, mx2_gpio_irq_handler); } else { /* setup one handler for each entry */ irq_set_chained_handler(port->irq, mx3_gpio_irq_handler); irq_set_handler_data(port->irq, port); if (port->irq_high > 0) { /* setup handler for GPIO 16 to 31 */ irq_set_chained_handler(port->irq_high, mx3_gpio_irq_handler); irq_set_handler_data(port->irq_high, port); } } err = bgpio_init(&port->bgc, &pdev->dev, 4, port->base + GPIO_PSR, port->base + GPIO_DR, NULL, port->base + GPIO_GDIR, NULL, false); if (err) goto out_iounmap; port->bgc.gc.to_irq = mxc_gpio_to_irq; port->bgc.gc.base = pdev->id * 32; port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir); port->bgc.data = port->bgc.read_reg(port->bgc.reg_set); err = gpiochip_add(&port->bgc.gc); if (err) goto out_bgpio_remove; /* * In dt case, we use gpio number range dynamically * allocated by gpio core. */ port->virtual_irq_start = MXC_GPIO_IRQ_START + (np ? port->bgc.gc.base : pdev->id * 32); /* gpio-mxc can be a generic irq chip */ mxc_gpio_init_gc(port); list_add_tail(&port->node, &mxc_gpio_ports); return 0; out_bgpio_remove: bgpio_remove(&port->bgc); out_iounmap: iounmap(port->base); out_release_mem: release_mem_region(iores->start, resource_size(iores)); out_kfree: kfree(port); dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err); return err; } static struct platform_driver mxc_gpio_driver = { .driver = { .name = "gpio-mxc", .owner = THIS_MODULE, .of_match_table = mxc_gpio_dt_ids, }, .probe = mxc_gpio_probe, .id_table = mxc_gpio_devtype, }; static int __init gpio_mxc_init(void) { return platform_driver_register(&mxc_gpio_driver); } postcore_initcall(gpio_mxc_init); MODULE_AUTHOR("Freescale Semiconductor, " "Daniel Mack <danielncaiaq.de>, " "Juergen Beisert <kernel@pengutronix.de>"); MODULE_DESCRIPTION("Freescale MXC GPIO"); MODULE_LICENSE("GPL");
gpl-2.0
CandyDevices/kernel_htc_msm8974
drivers/video/omap2/omapfb/omapfb-sysfs.c
5059
12152
/* * linux/drivers/video/omap2/omapfb-sysfs.c * * Copyright (C) 2008 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * Some code and ideas taken from drivers/video/omap/ driver * by Imre Deak. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/fb.h> #include <linux/sysfs.h> #include <linux/device.h> #include <linux/uaccess.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/omapfb.h> #include <video/omapdss.h> #include <plat/vrfb.h> #include "omapfb.h" static ssize_t show_rotate_type(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->rotation_type); } static ssize_t store_rotate_type(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_mem_region *rg; int rot_type; int r; r = kstrtoint(buf, 0, &rot_type); if (r) return r; if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB) return -EINVAL; if (!lock_fb_info(fbi)) return -ENODEV; r = 0; if (rot_type == ofbi->rotation_type) goto out; rg = omapfb_get_mem_region(ofbi->region); if (rg->size) { r = -EBUSY; goto put_region; } ofbi->rotation_type = rot_type; /* * Since the VRAM for this FB is not allocated at the moment we don't * need to do any further parameter checking at this point. */ put_region: omapfb_put_mem_region(rg); out: unlock_fb_info(fbi); return r ? r : count; } static ssize_t show_mirror(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->mirror); } static ssize_t store_mirror(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); bool mirror; int r; struct fb_var_screeninfo new_var; r = strtobool(buf, &mirror); if (r) return r; if (!lock_fb_info(fbi)) return -ENODEV; ofbi->mirror = mirror; omapfb_get_mem_region(ofbi->region); memcpy(&new_var, &fbi->var, sizeof(new_var)); r = check_fb_var(fbi, &new_var); if (r) goto out; memcpy(&fbi->var, &new_var, sizeof(fbi->var)); set_fb_fix(fbi); r = omapfb_apply_changes(fbi, 0); if (r) goto out; r = count; out: omapfb_put_mem_region(ofbi->region); unlock_fb_info(fbi); return r; } static ssize_t show_overlays(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; ssize_t l = 0; int t; if (!lock_fb_info(fbi)) return -ENODEV; omapfb_lock(fbdev); for (t = 0; t < ofbi->num_overlays; t++) { struct omap_overlay *ovl = ofbi->overlays[t]; int ovlnum; for (ovlnum = 0; ovlnum < fbdev->num_overlays; ++ovlnum) if (ovl == fbdev->overlays[ovlnum]) break; l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", t == 0 ? "" : ",", ovlnum); } l += snprintf(buf + l, PAGE_SIZE - l, "\n"); omapfb_unlock(fbdev); unlock_fb_info(fbi); return l; } static struct omapfb_info *get_overlay_fb(struct omapfb2_device *fbdev, struct omap_overlay *ovl) { int i, t; for (i = 0; i < fbdev->num_fbs; i++) { struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); for (t = 0; t < ofbi->num_overlays; t++) { if (ofbi->overlays[t] == ovl) return ofbi; } } return NULL; } static ssize_t store_overlays(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omap_overlay *ovls[OMAPFB_MAX_OVL_PER_FB]; struct omap_overlay *ovl; int num_ovls, r, i; int len; bool added = false; num_ovls = 0; len = strlen(buf); if (buf[len - 1] == '\n') len = len - 1; if (!lock_fb_info(fbi)) return -ENODEV; omapfb_lock(fbdev); if (len > 0) { char *p = (char *)buf; int ovlnum; while (p < buf + len) { int found; if (num_ovls == OMAPFB_MAX_OVL_PER_FB) { r = -EINVAL; goto out; } ovlnum = simple_strtoul(p, &p, 0); if (ovlnum > fbdev->num_overlays) { r = -EINVAL; goto out; } found = 0; for (i = 0; i < num_ovls; ++i) { if (ovls[i] == fbdev->overlays[ovlnum]) { found = 1; break; } } if (!found) ovls[num_ovls++] = fbdev->overlays[ovlnum]; p++; } } for (i = 0; i < num_ovls; ++i) { struct omapfb_info *ofbi2 = get_overlay_fb(fbdev, ovls[i]); if (ofbi2 && ofbi2 != ofbi) { dev_err(fbdev->dev, "overlay already in use\n"); r = -EINVAL; goto out; } } /* detach unused overlays */ for (i = 0; i < ofbi->num_overlays; ++i) { int t, found; ovl = ofbi->overlays[i]; found = 0; for (t = 0; t < num_ovls; ++t) { if (ovl == ovls[t]) { found = 1; break; } } if (found) continue; DBG("detaching %d\n", ofbi->overlays[i]->id); omapfb_get_mem_region(ofbi->region); omapfb_overlay_enable(ovl, 0); if (ovl->manager) ovl->manager->apply(ovl->manager); omapfb_put_mem_region(ofbi->region); for (t = i + 1; t < ofbi->num_overlays; t++) { ofbi->rotation[t-1] = ofbi->rotation[t]; ofbi->overlays[t-1] = ofbi->overlays[t]; } ofbi->num_overlays--; i--; } for (i = 0; i < num_ovls; ++i) { int t, found; ovl = ovls[i]; found = 0; for (t = 0; t < ofbi->num_overlays; ++t) { if (ovl == ofbi->overlays[t]) { found = 1; break; } } if (found) continue; ofbi->rotation[ofbi->num_overlays] = 0; ofbi->overlays[ofbi->num_overlays++] = ovl; added = true; } if (added) { omapfb_get_mem_region(ofbi->region); r = omapfb_apply_changes(fbi, 0); omapfb_put_mem_region(ofbi->region); if (r) goto out; } r = count; out: omapfb_unlock(fbdev); unlock_fb_info(fbi); return r; } static ssize_t show_overlays_rotate(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); ssize_t l = 0; int t; if (!lock_fb_info(fbi)) return -ENODEV; for (t = 0; t < ofbi->num_overlays; t++) { l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", t == 0 ? "" : ",", ofbi->rotation[t]); } l += snprintf(buf + l, PAGE_SIZE - l, "\n"); unlock_fb_info(fbi); return l; } static ssize_t store_overlays_rotate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); int num_ovls = 0, r, i; int len; bool changed = false; u8 rotation[OMAPFB_MAX_OVL_PER_FB]; len = strlen(buf); if (buf[len - 1] == '\n') len = len - 1; if (!lock_fb_info(fbi)) return -ENODEV; if (len > 0) { char *p = (char *)buf; while (p < buf + len) { int rot; if (num_ovls == ofbi->num_overlays) { r = -EINVAL; goto out; } rot = simple_strtoul(p, &p, 0); if (rot < 0 || rot > 3) { r = -EINVAL; goto out; } if (ofbi->rotation[num_ovls] != rot) changed = true; rotation[num_ovls++] = rot; p++; } } if (num_ovls != ofbi->num_overlays) { r = -EINVAL; goto out; } if (changed) { for (i = 0; i < num_ovls; ++i) ofbi->rotation[i] = rotation[i]; omapfb_get_mem_region(ofbi->region); r = omapfb_apply_changes(fbi, 0); omapfb_put_mem_region(ofbi->region); if (r) goto out; /* FIXME error handling? */ } r = count; out: unlock_fb_info(fbi); return r; } static ssize_t show_size(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region->size); } static ssize_t store_size(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omapfb2_mem_region *rg; unsigned long size; int r; int i; r = kstrtoul(buf, 0, &size); if (r) return r; size = PAGE_ALIGN(size); if (!lock_fb_info(fbi)) return -ENODEV; rg = ofbi->region; down_write_nested(&rg->lock, rg->id); atomic_inc(&rg->lock_count); if (atomic_read(&rg->map_count)) { r = -EBUSY; goto out; } for (i = 0; i < fbdev->num_fbs; i++) { struct omapfb_info *ofbi2 = FB2OFB(fbdev->fbs[i]); int j; if (ofbi2->region != rg) continue; for (j = 0; j < ofbi2->num_overlays; j++) { struct omap_overlay *ovl; ovl = ofbi2->overlays[j]; if (ovl->is_enabled(ovl)) { r = -EBUSY; goto out; } } } if (size != ofbi->region->size) { r = omapfb_realloc_fbmem(fbi, size, ofbi->region->type); if (r) { dev_err(dev, "realloc fbmem failed\n"); goto out; } } r = count; out: atomic_dec(&rg->lock_count); up_write(&rg->lock); unlock_fb_info(fbi); return r; } static ssize_t show_phys(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region->paddr); } static ssize_t show_virt(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr); } static ssize_t show_upd_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); enum omapfb_update_mode mode; int r; r = omapfb_get_update_mode(fbi, &mode); if (r) return r; return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode); } static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); unsigned mode; int r; r = kstrtouint(buf, 0, &mode); if (r) return r; r = omapfb_set_update_mode(fbi, mode); if (r) return r; return count; } static struct device_attribute omapfb_attrs[] = { __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type, store_rotate_type), __ATTR(mirror, S_IRUGO | S_IWUSR, show_mirror, store_mirror), __ATTR(size, S_IRUGO | S_IWUSR, show_size, store_size), __ATTR(overlays, S_IRUGO | S_IWUSR, show_overlays, store_overlays), __ATTR(overlays_rotate, S_IRUGO | S_IWUSR, show_overlays_rotate, store_overlays_rotate), __ATTR(phys_addr, S_IRUGO, show_phys, NULL), __ATTR(virt_addr, S_IRUGO, show_virt, NULL), __ATTR(update_mode, S_IRUGO | S_IWUSR, show_upd_mode, store_upd_mode), }; int omapfb_create_sysfs(struct omapfb2_device *fbdev) { int i; int r; DBG("create sysfs for fbs\n"); for (i = 0; i < fbdev->num_fbs; i++) { int t; for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) { r = device_create_file(fbdev->fbs[i]->dev, &omapfb_attrs[t]); if (r) { dev_err(fbdev->dev, "failed to create sysfs " "file\n"); return r; } } } return 0; } void omapfb_remove_sysfs(struct omapfb2_device *fbdev) { int i, t; DBG("remove sysfs for fbs\n"); for (i = 0; i < fbdev->num_fbs; i++) { for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) device_remove_file(fbdev->fbs[i]->dev, &omapfb_attrs[t]); } }
gpl-2.0
synel/synergy2416-linux-kernel
arch/arm/mach-pxa/tosa-bt.c
8131
2935
/* * Bluetooth built-in chip control * * Copyright (c) 2008 Dmitry Baryshkov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/rfkill.h> #include <mach/tosa_bt.h> static void tosa_bt_on(struct tosa_bt_data *data) { gpio_set_value(data->gpio_reset, 0); gpio_set_value(data->gpio_pwr, 1); gpio_set_value(data->gpio_reset, 1); mdelay(20); gpio_set_value(data->gpio_reset, 0); } static void tosa_bt_off(struct tosa_bt_data *data) { gpio_set_value(data->gpio_reset, 1); mdelay(10); gpio_set_value(data->gpio_pwr, 0); gpio_set_value(data->gpio_reset, 0); } static int tosa_bt_set_block(void *data, bool blocked) { pr_info("BT_RADIO going: %s\n", blocked ? "off" : "on"); if (!blocked) { pr_info("TOSA_BT: going ON\n"); tosa_bt_on(data); } else { pr_info("TOSA_BT: going OFF\n"); tosa_bt_off(data); } return 0; } static const struct rfkill_ops tosa_bt_rfkill_ops = { .set_block = tosa_bt_set_block, }; static int tosa_bt_probe(struct platform_device *dev) { int rc; struct rfkill *rfk; struct tosa_bt_data *data = dev->dev.platform_data; rc = gpio_request(data->gpio_reset, "Bluetooth reset"); if (rc) goto err_reset; rc = gpio_direction_output(data->gpio_reset, 0); if (rc) goto err_reset_dir; rc = gpio_request(data->gpio_pwr, "Bluetooth power"); if (rc) goto err_pwr; rc = gpio_direction_output(data->gpio_pwr, 0); if (rc) goto err_pwr_dir; rfk = rfkill_alloc("tosa-bt", &dev->dev, RFKILL_TYPE_BLUETOOTH, &tosa_bt_rfkill_ops, data); if (!rfk) { rc = -ENOMEM; goto err_rfk_alloc; } rc = rfkill_register(rfk); if (rc) goto err_rfkill; platform_set_drvdata(dev, rfk); return 0; err_rfkill: rfkill_destroy(rfk); err_rfk_alloc: tosa_bt_off(data); err_pwr_dir: gpio_free(data->gpio_pwr); err_pwr: err_reset_dir: gpio_free(data->gpio_reset); err_reset: return rc; } static int __devexit tosa_bt_remove(struct platform_device *dev) { struct tosa_bt_data *data = dev->dev.platform_data; struct rfkill *rfk = platform_get_drvdata(dev); platform_set_drvdata(dev, NULL); if (rfk) { rfkill_unregister(rfk); rfkill_destroy(rfk); } rfk = NULL; tosa_bt_off(data); gpio_free(data->gpio_pwr); gpio_free(data->gpio_reset); return 0; } static struct platform_driver tosa_bt_driver = { .probe = tosa_bt_probe, .remove = __devexit_p(tosa_bt_remove), .driver = { .name = "tosa-bt", .owner = THIS_MODULE, }, }; static int __init tosa_bt_init(void) { return platform_driver_register(&tosa_bt_driver); } static void __exit tosa_bt_exit(void) { platform_driver_unregister(&tosa_bt_driver); } module_init(tosa_bt_init); module_exit(tosa_bt_exit);
gpl-2.0
SamueleCiprietti/nova_kernel
drivers/spi/omap_spi_100k.c
8387
15967
/* * OMAP7xx SPI 100k controller driver * Author: Fabrice Crohas <fcrohas@gmail.com> * from original omap1_mcspi driver * * Copyright (C) 2005, 2006 Nokia Corporation * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and * Juha Yrj�l� <juha.yrjola@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <plat/clock.h> #define OMAP1_SPI100K_MAX_FREQ 48000000 #define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12) #define SPI_SETUP1 0x00 #define SPI_SETUP2 0x02 #define SPI_CTRL 0x04 #define SPI_STATUS 0x06 #define SPI_TX_LSB 0x08 #define SPI_TX_MSB 0x0a #define SPI_RX_LSB 0x0c #define SPI_RX_MSB 0x0e #define SPI_SETUP1_INT_READ_ENABLE (1UL << 5) #define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4) #define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1) #define SPI_SETUP1_CLOCK_ENABLE (1UL << 0) #define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0) #define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0) #define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5) #define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5) #define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10) #define SPI_SETUP2_EDGE_TRIGGER (1UL << 10) #define SPI_CTRL_SEN(x) ((x) << 7) #define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2) #define SPI_CTRL_WR (1UL << 1) #define SPI_CTRL_RD (1UL << 0) #define SPI_STATUS_WE (1UL << 1) #define SPI_STATUS_RD (1UL << 0) #define WRITE 0 #define READ 1 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and * cache operations; better heuristics consider wordsize and bitrate. */ #define DMA_MIN_BYTES 8 #define SPI_RUNNING 0 #define SPI_SHUTDOWN 1 struct omap1_spi100k { struct work_struct work; /* lock protects queue and registers */ spinlock_t lock; struct list_head msg_queue; struct spi_master *master; struct clk *ick; struct clk *fck; /* Virtual base address of the controller */ void __iomem *base; /* State of the SPI */ unsigned int state; }; struct omap1_spi100k_cs { void __iomem *base; int word_len; }; static struct workqueue_struct *omap1_spi100k_wq; #define MOD_REG_BIT(val, mask, set) do { \ if (set) \ val |= mask; \ else \ val &= ~mask; \ } while (0) static void spi100k_enable_clock(struct spi_master *master) { unsigned int val; struct omap1_spi100k *spi100k = spi_master_get_devdata(master); /* enable SPI */ val = readw(spi100k->base + SPI_SETUP1); val |= SPI_SETUP1_CLOCK_ENABLE; writew(val, spi100k->base + SPI_SETUP1); } static void spi100k_disable_clock(struct spi_master *master) { unsigned int val; struct omap1_spi100k *spi100k = spi_master_get_devdata(master); /* disable SPI */ val = readw(spi100k->base + SPI_SETUP1); val &= ~SPI_SETUP1_CLOCK_ENABLE; writew(val, spi100k->base + SPI_SETUP1); } static void spi100k_write_data(struct spi_master *master, int len, int data) { struct omap1_spi100k *spi100k = spi_master_get_devdata(master); /* write 16-bit word, shifting 8-bit data if necessary */ if (len <= 8) { data <<= 8; len = 16; } spi100k_enable_clock(master); writew( data , spi100k->base + SPI_TX_MSB); writew(SPI_CTRL_SEN(0) | SPI_CTRL_WORD_SIZE(len) | SPI_CTRL_WR, spi100k->base + SPI_CTRL); /* Wait for bit ack send change */ while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE); udelay(1000); spi100k_disable_clock(master); } static int spi100k_read_data(struct spi_master *master, int len) { int dataH,dataL; struct omap1_spi100k *spi100k = spi_master_get_devdata(master); /* Always do at least 16 bits */ if (len <= 8) len = 16; spi100k_enable_clock(master); writew(SPI_CTRL_SEN(0) | SPI_CTRL_WORD_SIZE(len) | SPI_CTRL_RD, spi100k->base + SPI_CTRL); while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD); udelay(1000); dataL = readw(spi100k->base + SPI_RX_LSB); dataH = readw(spi100k->base + SPI_RX_MSB); spi100k_disable_clock(master); return dataL; } static void spi100k_open(struct spi_master *master) { /* get control of SPI */ struct omap1_spi100k *spi100k = spi_master_get_devdata(master); writew(SPI_SETUP1_INT_READ_ENABLE | SPI_SETUP1_INT_WRITE_ENABLE | SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1); /* configure clock and interrupts */ writew(SPI_SETUP2_ACTIVE_EDGE_FALLING | SPI_SETUP2_NEGATIVE_LEVEL | SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2); } static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable) { if (enable) writew(0x05fc, spi100k->base + SPI_CTRL); else writew(0x05fd, spi100k->base + SPI_CTRL); } static unsigned omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) { struct omap1_spi100k *spi100k; struct omap1_spi100k_cs *cs = spi->controller_state; unsigned int count, c; int word_len; spi100k = spi_master_get_devdata(spi->master); count = xfer->len; c = count; word_len = cs->word_len; if (word_len <= 8) { u8 *rx; const u8 *tx; rx = xfer->rx_buf; tx = xfer->tx_buf; do { c-=1; if (xfer->tx_buf != NULL) spi100k_write_data(spi->master, word_len, *tx++); if (xfer->rx_buf != NULL) *rx++ = spi100k_read_data(spi->master, word_len); } while(c); } else if (word_len <= 16) { u16 *rx; const u16 *tx; rx = xfer->rx_buf; tx = xfer->tx_buf; do { c-=2; if (xfer->tx_buf != NULL) spi100k_write_data(spi->master,word_len, *tx++); if (xfer->rx_buf != NULL) *rx++ = spi100k_read_data(spi->master,word_len); } while(c); } else if (word_len <= 32) { u32 *rx; const u32 *tx; rx = xfer->rx_buf; tx = xfer->tx_buf; do { c-=4; if (xfer->tx_buf != NULL) spi100k_write_data(spi->master,word_len, *tx); if (xfer->rx_buf != NULL) *rx = spi100k_read_data(spi->master,word_len); } while(c); } return count - c; } /* called only when no transfer is active to this device */ static int omap1_spi100k_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master); struct omap1_spi100k_cs *cs = spi->controller_state; u8 word_len = spi->bits_per_word; if (t != NULL && t->bits_per_word) word_len = t->bits_per_word; if (!word_len) word_len = 8; if (spi->bits_per_word > 32) return -EINVAL; cs->word_len = word_len; /* SPI init before transfer */ writew(0x3e , spi100k->base + SPI_SETUP1); writew(0x00 , spi100k->base + SPI_STATUS); writew(0x3e , spi100k->base + SPI_CTRL); return 0; } /* the spi->mode bits understood by this driver: */ #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) static int omap1_spi100k_setup(struct spi_device *spi) { int ret; struct omap1_spi100k *spi100k; struct omap1_spi100k_cs *cs = spi->controller_state; if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { dev_dbg(&spi->dev, "setup: unsupported %d bit words\n", spi->bits_per_word); return -EINVAL; } spi100k = spi_master_get_devdata(spi->master); if (!cs) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) return -ENOMEM; cs->base = spi100k->base + spi->chip_select * 0x14; spi->controller_state = cs; } spi100k_open(spi->master); clk_enable(spi100k->ick); clk_enable(spi100k->fck); ret = omap1_spi100k_setup_transfer(spi, NULL); clk_disable(spi100k->ick); clk_disable(spi100k->fck); return ret; } static void omap1_spi100k_work(struct work_struct *work) { struct omap1_spi100k *spi100k; int status = 0; spi100k = container_of(work, struct omap1_spi100k, work); spin_lock_irq(&spi100k->lock); clk_enable(spi100k->ick); clk_enable(spi100k->fck); /* We only enable one channel at a time -- the one whose message is * at the head of the queue -- although this controller would gladly * arbitrate among multiple channels. This corresponds to "single * channel" master mode. As a side effect, we need to manage the * chipselect with the FORCE bit ... CS != channel enable. */ while (!list_empty(&spi100k->msg_queue)) { struct spi_message *m; struct spi_device *spi; struct spi_transfer *t = NULL; int cs_active = 0; struct omap1_spi100k_cs *cs; int par_override = 0; m = container_of(spi100k->msg_queue.next, struct spi_message, queue); list_del_init(&m->queue); spin_unlock_irq(&spi100k->lock); spi = m->spi; cs = spi->controller_state; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { status = -EINVAL; break; } if (par_override || t->speed_hz || t->bits_per_word) { par_override = 1; status = omap1_spi100k_setup_transfer(spi, t); if (status < 0) break; if (!t->speed_hz && !t->bits_per_word) par_override = 0; } if (!cs_active) { omap1_spi100k_force_cs(spi100k, 1); cs_active = 1; } if (t->len) { unsigned count; count = omap1_spi100k_txrx_pio(spi, t); m->actual_length += count; if (count != t->len) { status = -EIO; break; } } if (t->delay_usecs) udelay(t->delay_usecs); /* ignore the "leave it on after last xfer" hint */ if (t->cs_change) { omap1_spi100k_force_cs(spi100k, 0); cs_active = 0; } } /* Restore defaults if they were overriden */ if (par_override) { par_override = 0; status = omap1_spi100k_setup_transfer(spi, NULL); } if (cs_active) omap1_spi100k_force_cs(spi100k, 0); m->status = status; m->complete(m->context); spin_lock_irq(&spi100k->lock); } clk_disable(spi100k->ick); clk_disable(spi100k->fck); spin_unlock_irq(&spi100k->lock); if (status < 0) printk(KERN_WARNING "spi transfer failed with %d\n", status); } static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m) { struct omap1_spi100k *spi100k; unsigned long flags; struct spi_transfer *t; m->actual_length = 0; m->status = -EINPROGRESS; spi100k = spi_master_get_devdata(spi->master); /* Don't accept new work if we're shutting down */ if (spi100k->state == SPI_SHUTDOWN) return -ESHUTDOWN; /* reject invalid messages and transfers */ if (list_empty(&m->transfers) || !m->complete) return -EINVAL; list_for_each_entry(t, &m->transfers, transfer_list) { const void *tx_buf = t->tx_buf; void *rx_buf = t->rx_buf; unsigned len = t->len; if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ || (len && !(rx_buf || tx_buf)) || (t->bits_per_word && ( t->bits_per_word < 4 || t->bits_per_word > 32))) { dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", t->speed_hz, len, tx_buf ? "tx" : "", rx_buf ? "rx" : "", t->bits_per_word); return -EINVAL; } if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) { dev_dbg(&spi->dev, "%d Hz max exceeds %d\n", t->speed_hz, OMAP1_SPI100K_MAX_FREQ/(1<<16)); return -EINVAL; } } spin_lock_irqsave(&spi100k->lock, flags); list_add_tail(&m->queue, &spi100k->msg_queue); queue_work(omap1_spi100k_wq, &spi100k->work); spin_unlock_irqrestore(&spi100k->lock, flags); return 0; } static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k) { return 0; } static int __devinit omap1_spi100k_probe(struct platform_device *pdev) { struct spi_master *master; struct omap1_spi100k *spi100k; int status = 0; if (!pdev->id) return -EINVAL; master = spi_alloc_master(&pdev->dev, sizeof *spi100k); if (master == NULL) { dev_dbg(&pdev->dev, "master allocation failed\n"); return -ENOMEM; } if (pdev->id != -1) master->bus_num = pdev->id; master->setup = omap1_spi100k_setup; master->transfer = omap1_spi100k_transfer; master->cleanup = NULL; master->num_chipselect = 2; master->mode_bits = MODEBITS; dev_set_drvdata(&pdev->dev, master); spi100k = spi_master_get_devdata(master); spi100k->master = master; /* * The memory region base address is taken as the platform_data. * You should allocate this with ioremap() before initializing * the SPI. */ spi100k->base = (void __iomem *) pdev->dev.platform_data; INIT_WORK(&spi100k->work, omap1_spi100k_work); spin_lock_init(&spi100k->lock); INIT_LIST_HEAD(&spi100k->msg_queue); spi100k->ick = clk_get(&pdev->dev, "ick"); if (IS_ERR(spi100k->ick)) { dev_dbg(&pdev->dev, "can't get spi100k_ick\n"); status = PTR_ERR(spi100k->ick); goto err1; } spi100k->fck = clk_get(&pdev->dev, "fck"); if (IS_ERR(spi100k->fck)) { dev_dbg(&pdev->dev, "can't get spi100k_fck\n"); status = PTR_ERR(spi100k->fck); goto err2; } if (omap1_spi100k_reset(spi100k) < 0) goto err3; status = spi_register_master(master); if (status < 0) goto err3; spi100k->state = SPI_RUNNING; return status; err3: clk_put(spi100k->fck); err2: clk_put(spi100k->ick); err1: spi_master_put(master); return status; } static int __exit omap1_spi100k_remove(struct platform_device *pdev) { struct spi_master *master; struct omap1_spi100k *spi100k; struct resource *r; unsigned limit = 500; unsigned long flags; int status = 0; master = dev_get_drvdata(&pdev->dev); spi100k = spi_master_get_devdata(master); spin_lock_irqsave(&spi100k->lock, flags); spi100k->state = SPI_SHUTDOWN; while (!list_empty(&spi100k->msg_queue) && limit--) { spin_unlock_irqrestore(&spi100k->lock, flags); msleep(10); spin_lock_irqsave(&spi100k->lock, flags); } if (!list_empty(&spi100k->msg_queue)) status = -EBUSY; spin_unlock_irqrestore(&spi100k->lock, flags); if (status != 0) return status; clk_put(spi100k->fck); clk_put(spi100k->ick); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); spi_unregister_master(master); return 0; } static struct platform_driver omap1_spi100k_driver = { .driver = { .name = "omap1_spi100k", .owner = THIS_MODULE, }, .remove = __exit_p(omap1_spi100k_remove), }; static int __init omap1_spi100k_init(void) { omap1_spi100k_wq = create_singlethread_workqueue( omap1_spi100k_driver.driver.name); if (omap1_spi100k_wq == NULL) return -1; return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe); } static void __exit omap1_spi100k_exit(void) { platform_driver_unregister(&omap1_spi100k_driver); destroy_workqueue(omap1_spi100k_wq); } module_init(omap1_spi100k_init); module_exit(omap1_spi100k_exit); MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver"); MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Silentlys/android_kernel_lenovo_msm8916
Documentation/ptp/testptp.c
10435
8946
/* * PTP 1588 clock support - User space test program * * Copyright (C) 2010 OMICRON electronics GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <errno.h> #include <fcntl.h> #include <math.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/ioctl.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/time.h> #include <sys/timex.h> #include <sys/types.h> #include <time.h> #include <unistd.h> #include <linux/ptp_clock.h> #define DEVICE "/dev/ptp0" #ifndef ADJ_SETOFFSET #define ADJ_SETOFFSET 0x0100 #endif #ifndef CLOCK_INVALID #define CLOCK_INVALID -1 #endif /* When glibc offers the syscall, this will go away. */ #include <sys/syscall.h> static int clock_adjtime(clockid_t id, struct timex *tx) { return syscall(__NR_clock_adjtime, id, tx); } static clockid_t get_clockid(int fd) { #define CLOCKFD 3 #define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD) return FD_TO_CLOCKID(fd); } static void handle_alarm(int s) { printf("received signal %d\n", s); } static int install_handler(int signum, void (*handler)(int)) { struct sigaction action; sigset_t mask; /* Unblock the signal. */ sigemptyset(&mask); sigaddset(&mask, signum); sigprocmask(SIG_UNBLOCK, &mask, NULL); /* Install the signal handler. */ action.sa_handler = handler; action.sa_flags = 0; sigemptyset(&action.sa_mask); sigaction(signum, &action, NULL); return 0; } static long ppb_to_scaled_ppm(int ppb) { /* * The 'freq' field in the 'struct timex' is in parts per * million, but with a 16 bit binary fractional field. * Instead of calculating either one of * * scaled_ppm = (ppb / 1000) << 16 [1] * scaled_ppm = (ppb << 16) / 1000 [2] * * we simply use double precision math, in order to avoid the * truncation in [1] and the possible overflow in [2]. */ return (long) (ppb * 65.536); } static void usage(char *progname) { fprintf(stderr, "usage: %s [options]\n" " -a val request a one-shot alarm after 'val' seconds\n" " -A val request a periodic alarm every 'val' seconds\n" " -c query the ptp clock's capabilities\n" " -d name device to open\n" " -e val read 'val' external time stamp events\n" " -f val adjust the ptp clock frequency by 'val' ppb\n" " -g get the ptp clock time\n" " -h prints this message\n" " -p val enable output with a period of 'val' nanoseconds\n" " -P val enable or disable (val=1|0) the system clock PPS\n" " -s set the ptp clock time from the system time\n" " -S set the system time from the ptp clock time\n" " -t val shift the ptp clock time by 'val' seconds\n", progname); } int main(int argc, char *argv[]) { struct ptp_clock_caps caps; struct ptp_extts_event event; struct ptp_extts_request extts_request; struct ptp_perout_request perout_request; struct timespec ts; struct timex tx; static timer_t timerid; struct itimerspec timeout; struct sigevent sigevent; char *progname; int c, cnt, fd; char *device = DEVICE; clockid_t clkid; int adjfreq = 0x7fffffff; int adjtime = 0; int capabilities = 0; int extts = 0; int gettime = 0; int oneshot = 0; int periodic = 0; int perout = -1; int pps = -1; int settime = 0; progname = strrchr(argv[0], '/'); progname = progname ? 1+progname : argv[0]; while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghp:P:sSt:v"))) { switch (c) { case 'a': oneshot = atoi(optarg); break; case 'A': periodic = atoi(optarg); break; case 'c': capabilities = 1; break; case 'd': device = optarg; break; case 'e': extts = atoi(optarg); break; case 'f': adjfreq = atoi(optarg); break; case 'g': gettime = 1; break; case 'p': perout = atoi(optarg); break; case 'P': pps = atoi(optarg); break; case 's': settime = 1; break; case 'S': settime = 2; break; case 't': adjtime = atoi(optarg); break; case 'h': usage(progname); return 0; case '?': default: usage(progname); return -1; } } fd = open(device, O_RDWR); if (fd < 0) { fprintf(stderr, "opening %s: %s\n", device, strerror(errno)); return -1; } clkid = get_clockid(fd); if (CLOCK_INVALID == clkid) { fprintf(stderr, "failed to read clock id\n"); return -1; } if (capabilities) { if (ioctl(fd, PTP_CLOCK_GETCAPS, &caps)) { perror("PTP_CLOCK_GETCAPS"); } else { printf("capabilities:\n" " %d maximum frequency adjustment (ppb)\n" " %d programmable alarms\n" " %d external time stamp channels\n" " %d programmable periodic signals\n" " %d pulse per second\n", caps.max_adj, caps.n_alarm, caps.n_ext_ts, caps.n_per_out, caps.pps); } } if (0x7fffffff != adjfreq) { memset(&tx, 0, sizeof(tx)); tx.modes = ADJ_FREQUENCY; tx.freq = ppb_to_scaled_ppm(adjfreq); if (clock_adjtime(clkid, &tx)) { perror("clock_adjtime"); } else { puts("frequency adjustment okay"); } } if (adjtime) { memset(&tx, 0, sizeof(tx)); tx.modes = ADJ_SETOFFSET; tx.time.tv_sec = adjtime; tx.time.tv_usec = 0; if (clock_adjtime(clkid, &tx) < 0) { perror("clock_adjtime"); } else { puts("time shift okay"); } } if (gettime) { if (clock_gettime(clkid, &ts)) { perror("clock_gettime"); } else { printf("clock time: %ld.%09ld or %s", ts.tv_sec, ts.tv_nsec, ctime(&ts.tv_sec)); } } if (settime == 1) { clock_gettime(CLOCK_REALTIME, &ts); if (clock_settime(clkid, &ts)) { perror("clock_settime"); } else { puts("set time okay"); } } if (settime == 2) { clock_gettime(clkid, &ts); if (clock_settime(CLOCK_REALTIME, &ts)) { perror("clock_settime"); } else { puts("set time okay"); } } if (extts) { memset(&extts_request, 0, sizeof(extts_request)); extts_request.index = 0; extts_request.flags = PTP_ENABLE_FEATURE; if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) { perror("PTP_EXTTS_REQUEST"); extts = 0; } else { puts("external time stamp request okay"); } for (; extts; extts--) { cnt = read(fd, &event, sizeof(event)); if (cnt != sizeof(event)) { perror("read"); break; } printf("event index %u at %lld.%09u\n", event.index, event.t.sec, event.t.nsec); fflush(stdout); } /* Disable the feature again. */ extts_request.flags = 0; if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) { perror("PTP_EXTTS_REQUEST"); } } if (oneshot) { install_handler(SIGALRM, handle_alarm); /* Create a timer. */ sigevent.sigev_notify = SIGEV_SIGNAL; sigevent.sigev_signo = SIGALRM; if (timer_create(clkid, &sigevent, &timerid)) { perror("timer_create"); return -1; } /* Start the timer. */ memset(&timeout, 0, sizeof(timeout)); timeout.it_value.tv_sec = oneshot; if (timer_settime(timerid, 0, &timeout, NULL)) { perror("timer_settime"); return -1; } pause(); timer_delete(timerid); } if (periodic) { install_handler(SIGALRM, handle_alarm); /* Create a timer. */ sigevent.sigev_notify = SIGEV_SIGNAL; sigevent.sigev_signo = SIGALRM; if (timer_create(clkid, &sigevent, &timerid)) { perror("timer_create"); return -1; } /* Start the timer. */ memset(&timeout, 0, sizeof(timeout)); timeout.it_interval.tv_sec = periodic; timeout.it_value.tv_sec = periodic; if (timer_settime(timerid, 0, &timeout, NULL)) { perror("timer_settime"); return -1; } while (1) { pause(); } timer_delete(timerid); } if (perout >= 0) { if (clock_gettime(clkid, &ts)) { perror("clock_gettime"); return -1; } memset(&perout_request, 0, sizeof(perout_request)); perout_request.index = 0; perout_request.start.sec = ts.tv_sec + 2; perout_request.start.nsec = 0; perout_request.period.sec = 0; perout_request.period.nsec = perout; if (ioctl(fd, PTP_PEROUT_REQUEST, &perout_request)) { perror("PTP_PEROUT_REQUEST"); } else { puts("periodic output request okay"); } } if (pps != -1) { int enable = pps ? 1 : 0; if (ioctl(fd, PTP_ENABLE_PPS, enable)) { perror("PTP_ENABLE_PPS"); } else { puts("pps for system time request okay"); } } close(fd); return 0; }
gpl-2.0
Team-SennyC2/android_kernel_htc_villec2
drivers/block/paride/fit3.c
15555
4484
/* fit3.c (c) 1998 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. fit3.c is a low-level protocol driver for newer models of the Fidelity International Technology parallel port adapter. This adapter is used in their TransDisk 3000 portable hard-drives, as well as CD-ROM, PD-CD and other devices. The TD-2000 and certain older devices use a different protocol. Try the fit2 protocol module with them. NB: The FIT adapters do not appear to support the control registers. So, we map ALT_STATUS to STATUS and NO-OP writes to the device control register - this means that IDE reset will not work on these devices. */ #define FIT3_VERSION "1.0" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/wait.h> #include <asm/io.h> #include "paride.h" #define j44(a,b) (((a>>3)&0x0f)|((b<<1)&0xf0)) #define w7(byte) {out_p(7,byte);} #define r7() (in_p(7) & 0xff) /* cont = 0 - access the IDE register file cont = 1 - access the IDE command set */ static void fit3_write_regr( PIA *pi, int cont, int regr, int val) { if (cont == 1) return; switch (pi->mode) { case 0: case 1: w2(0xc); w0(regr); w2(0x8); w2(0xc); w0(val); w2(0xd); w0(0); w2(0xc); break; case 2: w2(0xc); w0(regr); w2(0x8); w2(0xc); w4(val); w4(0); w2(0xc); break; } } static int fit3_read_regr( PIA *pi, int cont, int regr ) { int a, b; if (cont) { if (regr != 6) return 0xff; regr = 7; } switch (pi->mode) { case 0: w2(0xc); w0(regr + 0x10); w2(0x8); w2(0xc); w2(0xd); a = r1(); w2(0xf); b = r1(); w2(0xc); return j44(a,b); case 1: w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc); w2(0xec); w2(0xee); w2(0xef); a = r0(); w2(0xc); return a; case 2: w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc); w2(0xec); a = r4(); b = r4(); w2(0xc); return a; } return -1; } static void fit3_read_block( PIA *pi, char * buf, int count ) { int k, a, b, c, d; switch (pi->mode) { case 0: w2(0xc); w0(0x10); w2(0x8); w2(0xc); for (k=0;k<count/2;k++) { w2(0xd); a = r1(); w2(0xf); b = r1(); w2(0xc); c = r1(); w2(0xe); d = r1(); buf[2*k ] = j44(a,b); buf[2*k+1] = j44(c,d); } w2(0xc); break; case 1: w2(0xc); w0(0x90); w2(0x8); w2(0xc); w2(0xec); w2(0xee); for (k=0;k<count/2;k++) { w2(0xef); a = r0(); w2(0xee); b = r0(); buf[2*k ] = a; buf[2*k+1] = b; } w2(0xec); w2(0xc); break; case 2: w2(0xc); w0(0x90); w2(0x8); w2(0xc); w2(0xec); for (k=0;k<count;k++) buf[k] = r4(); w2(0xc); break; } } static void fit3_write_block( PIA *pi, char * buf, int count ) { int k; switch (pi->mode) { case 0: case 1: w2(0xc); w0(0); w2(0x8); w2(0xc); for (k=0;k<count/2;k++) { w0(buf[2*k ]); w2(0xd); w0(buf[2*k+1]); w2(0xc); } break; case 2: w2(0xc); w0(0); w2(0x8); w2(0xc); for (k=0;k<count;k++) w4(buf[k]); w2(0xc); break; } } static void fit3_connect ( PIA *pi ) { pi->saved_r0 = r0(); pi->saved_r2 = r2(); w2(0xc); w0(0); w2(0xa); if (pi->mode == 2) { w2(0xc); w0(0x9); w2(0x8); w2(0xc); } } static void fit3_disconnect ( PIA *pi ) { w2(0xc); w0(0xa); w2(0x8); w2(0xc); w0(pi->saved_r0); w2(pi->saved_r2); } static void fit3_log_adapter( PIA *pi, char * scratch, int verbose ) { char *mode_string[3] = {"4-bit","8-bit","EPP"}; printk("%s: fit3 %s, FIT 3000 adapter at 0x%x, " "mode %d (%s), delay %d\n", pi->device,FIT3_VERSION,pi->port, pi->mode,mode_string[pi->mode],pi->delay); } static struct pi_protocol fit3 = { .owner = THIS_MODULE, .name = "fit3", .max_mode = 3, .epp_first = 2, .default_delay = 1, .max_units = 1, .write_regr = fit3_write_regr, .read_regr = fit3_read_regr, .write_block = fit3_write_block, .read_block = fit3_read_block, .connect = fit3_connect, .disconnect = fit3_disconnect, .log_adapter = fit3_log_adapter, }; static int __init fit3_init(void) { return paride_register(&fit3); } static void __exit fit3_exit(void) { paride_unregister(&fit3); } MODULE_LICENSE("GPL"); module_init(fit3_init) module_exit(fit3_exit)
gpl-2.0
pierrewillenbrock/dolphin
Externals/liblzma/check/check.c
196
2910
/////////////////////////////////////////////////////////////////////////////// // /// \file check.c /// \brief Single API to access different integrity checks // // Author: Lasse Collin // // This file has been put into the public domain. // You can do whatever you want with this file. // /////////////////////////////////////////////////////////////////////////////// #include "check.h" extern LZMA_API(lzma_bool) lzma_check_is_supported(lzma_check type) { if ((unsigned int)(type) > LZMA_CHECK_ID_MAX) return false; static const lzma_bool available_checks[LZMA_CHECK_ID_MAX + 1] = { true, // LZMA_CHECK_NONE #ifdef HAVE_CHECK_CRC32 true, #else false, #endif false, // Reserved false, // Reserved #ifdef HAVE_CHECK_CRC64 true, #else false, #endif false, // Reserved false, // Reserved false, // Reserved false, // Reserved false, // Reserved #ifdef HAVE_CHECK_SHA256 true, #else false, #endif false, // Reserved false, // Reserved false, // Reserved false, // Reserved false, // Reserved }; return available_checks[(unsigned int)(type)]; } extern LZMA_API(uint32_t) lzma_check_size(lzma_check type) { if ((unsigned int)(type) > LZMA_CHECK_ID_MAX) return UINT32_MAX; // See file-format.txt section 2.1.1.2. static const uint8_t check_sizes[LZMA_CHECK_ID_MAX + 1] = { 0, 4, 4, 4, 8, 8, 8, 16, 16, 16, 32, 32, 32, 64, 64, 64 }; return check_sizes[(unsigned int)(type)]; } extern void lzma_check_init(lzma_check_state *check, lzma_check type) { switch (type) { case LZMA_CHECK_NONE: break; #ifdef HAVE_CHECK_CRC32 case LZMA_CHECK_CRC32: check->state.crc32 = 0; break; #endif #ifdef HAVE_CHECK_CRC64 case LZMA_CHECK_CRC64: check->state.crc64 = 0; break; #endif #ifdef HAVE_CHECK_SHA256 case LZMA_CHECK_SHA256: lzma_sha256_init(check); break; #endif default: break; } return; } extern void lzma_check_update(lzma_check_state *check, lzma_check type, const uint8_t *buf, size_t size) { switch (type) { #ifdef HAVE_CHECK_CRC32 case LZMA_CHECK_CRC32: check->state.crc32 = lzma_crc32(buf, size, check->state.crc32); break; #endif #ifdef HAVE_CHECK_CRC64 case LZMA_CHECK_CRC64: check->state.crc64 = lzma_crc64(buf, size, check->state.crc64); break; #endif #ifdef HAVE_CHECK_SHA256 case LZMA_CHECK_SHA256: lzma_sha256_update(buf, size, check); break; #endif default: break; } return; } extern void lzma_check_finish(lzma_check_state *check, lzma_check type) { switch (type) { #ifdef HAVE_CHECK_CRC32 case LZMA_CHECK_CRC32: check->buffer.u32[0] = conv32le(check->state.crc32); break; #endif #ifdef HAVE_CHECK_CRC64 case LZMA_CHECK_CRC64: check->buffer.u64[0] = conv64le(check->state.crc64); break; #endif #ifdef HAVE_CHECK_SHA256 case LZMA_CHECK_SHA256: lzma_sha256_finish(check); break; #endif default: break; } return; }
gpl-2.0
tomasbw/linux-yocto-4.4
drivers/infiniband/hw/nes/nes_nic.c
196
63295
/* * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/ethtool.h> #include <linux/slab.h> #include <net/tcp.h> #include <net/inet_common.h> #include <linux/inet.h> #include "nes.h" static struct nic_qp_map nic_qp_mapping_0[] = { {16,0,0,1},{24,4,0,0},{28,8,0,0},{32,12,0,0}, {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0}, {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0}, {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0} }; static struct nic_qp_map nic_qp_mapping_1[] = { {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0}, {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0} }; static struct nic_qp_map nic_qp_mapping_2[] = { {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0} }; static struct nic_qp_map nic_qp_mapping_3[] = { {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0} }; static struct nic_qp_map nic_qp_mapping_4[] = { {28,8,0,0},{32,12,0,0} }; static struct nic_qp_map nic_qp_mapping_5[] = { {29,9,1,0},{33,13,1,0} }; static struct nic_qp_map nic_qp_mapping_6[] = { {30,10,2,0},{34,14,2,0} }; static struct nic_qp_map nic_qp_mapping_7[] = { {31,11,3,0},{35,15,3,0} }; static struct nic_qp_map *nic_qp_mapping_per_function[] = { nic_qp_mapping_0, nic_qp_mapping_1, nic_qp_mapping_2, nic_qp_mapping_3, nic_qp_mapping_4, nic_qp_mapping_5, nic_qp_mapping_6, nic_qp_mapping_7 }; static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; static int debug = -1; static int nics_per_function = 1; /** * nes_netdev_poll */ static int nes_netdev_poll(struct napi_struct *napi, int budget) { struct nes_vnic *nesvnic = container_of(napi, struct nes_vnic, napi); struct nes_device *nesdev = nesvnic->nesdev; struct nes_hw_nic_cq *nescq = &nesvnic->nic_cq; nesvnic->budget = budget; nescq->cqes_pending = 0; nescq->rx_cqes_completed = 0; nescq->cqe_allocs_pending = 0; nescq->rx_pkts_indicated = 0; nes_nic_ce_handler(nesdev, nescq); if (nescq->cqes_pending == 0) { napi_complete(napi); /* clear out completed cqes and arm */ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | nescq->cq_number | (nescq->cqe_allocs_pending << 16)); nes_read32(nesdev->regs+NES_CQE_ALLOC); } else { /* clear out completed cqes but don't arm */ nes_write32(nesdev->regs+NES_CQE_ALLOC, nescq->cq_number | (nescq->cqe_allocs_pending << 16)); nes_debug(NES_DBG_NETDEV, "%s: exiting with work pending\n", nesvnic->netdev->name); } return nescq->rx_pkts_indicated; } /** * nes_netdev_open - Activate the network interface; ifconfig * ethx up. */ static int nes_netdev_open(struct net_device *netdev) { u32 macaddr_low; u16 macaddr_high; struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; int ret; int i; struct nes_vnic *first_nesvnic = NULL; u32 nic_active_bit; u32 nic_active; struct list_head *list_pos, *list_temp; unsigned long flags; assert(nesdev != NULL); if (nesvnic->netdev_open == 1) return 0; if (netif_msg_ifup(nesvnic)) printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name); ret = nes_init_nic_qp(nesdev, netdev); if (ret) { return ret; } netif_carrier_off(netdev); netif_stop_queue(netdev); if ((!nesvnic->of_device_registered) && (nesvnic->rdma_enabled)) { nesvnic->nesibdev = nes_init_ofa_device(netdev); if (nesvnic->nesibdev == NULL) { printk(KERN_ERR PFX "%s: nesvnic->nesibdev alloc failed", netdev->name); } else { nesvnic->nesibdev->nesvnic = nesvnic; ret = nes_register_ofa_device(nesvnic->nesibdev); if (ret) { printk(KERN_ERR PFX "%s: Unable to register RDMA device, ret = %d\n", netdev->name, ret); } } } /* Set packet filters */ nic_active_bit = 1 << nesvnic->nic_index; nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); macaddr_high = ((u16)netdev->dev_addr[0]) << 8; macaddr_high += (u16)netdev->dev_addr[1]; macaddr_low = ((u32)netdev->dev_addr[2]) << 24; macaddr_low += ((u32)netdev->dev_addr[3]) << 16; macaddr_low += ((u32)netdev->dev_addr[4]) << 8; macaddr_low += (u32)netdev->dev_addr[5]; /* Program the various MAC regs */ for (i = 0; i < NES_MAX_PORT_COUNT; i++) { if (nesvnic->qp_nic_index[i] == 0xf) { break; } nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW" " (Addr:%08X) = %08X, HIGH = %08X.\n", i, nesvnic->qp_nic_index[i], NES_IDX_PERFECT_FILTER_LOW+ (nesvnic->qp_nic_index[i] * 8), macaddr_low, (u32)macaddr_high | NES_MAC_ADDR_VALID | ((((u32)nesvnic->nic_index) << 16))); nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8), macaddr_low); nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8), (u32)macaddr_high | NES_MAC_ADDR_VALID | ((((u32)nesvnic->nic_index) << 16))); } nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | nesvnic->nic_cq.cq_number); nes_read32(nesdev->regs+NES_CQE_ALLOC); list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) { first_nesvnic = container_of(list_pos, struct nes_vnic, list); if (first_nesvnic->netdev_open == 1) break; } if (first_nesvnic->netdev_open == 0) { nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n"); nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK + (0x200 * nesdev->mac_index), ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT | NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR)); first_nesvnic = nesvnic; } if (first_nesvnic->linkup) { /* Enable network packets */ nesvnic->linkup = 1; netif_start_queue(netdev); netif_carrier_on(netdev); } spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) { nesdev->link_recheck = 1; mod_delayed_work(system_wq, &nesdev->work, NES_LINK_RECHECK_DELAY); } spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags); if (nesvnic->of_device_registered) { nesdev->nesadapter->send_term_ok = 1; if (nesvnic->linkup == 1) { if (nesdev->iw_status == 0) { nesdev->iw_status = 1; nes_port_ibevent(nesvnic); } } else { nesdev->iw_status = 0; } } spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags); napi_enable(&nesvnic->napi); nesvnic->netdev_open = 1; return 0; } /** * nes_netdev_stop */ static int nes_netdev_stop(struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; u32 nic_active_mask; u32 nic_active; struct nes_vnic *first_nesvnic = NULL; struct list_head *list_pos, *list_temp; unsigned long flags; nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n", nesvnic, nesdev, netdev, netdev->name); if (nesvnic->netdev_open == 0) return 0; if (netif_msg_ifdown(nesvnic)) printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name); netif_carrier_off(netdev); /* Disable network packets */ napi_disable(&nesvnic->napi); netif_stop_queue(netdev); list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) { first_nesvnic = container_of(list_pos, struct nes_vnic, list); if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic)) break; } if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic) && (PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) != PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) { nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+ (0x200*nesdev->mac_index), 0xffffffff); nes_write_indexed(first_nesvnic->nesdev, NES_IDX_MAC_INT_MASK+ (0x200*first_nesvnic->nesdev->mac_index), ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT | NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR)); } else { nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff); } nic_active_mask = ~((u32)(1 << nesvnic->nic_index)); nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH+ (nesvnic->perfect_filter_index*8), 0); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE); nic_active &= nic_active_mask; nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); nic_active &= nic_active_mask; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE); nic_active &= nic_active_mask; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); nic_active &= nic_active_mask; nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON); nic_active &= nic_active_mask; nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags); if (nesvnic->of_device_registered) { nesdev->nesadapter->send_term_ok = 0; nesdev->iw_status = 0; if (nesvnic->linkup == 1) nes_port_ibevent(nesvnic); } del_timer_sync(&nesvnic->event_timer); nesvnic->event_timer.function = NULL; spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags); nes_destroy_nic_qp(nesvnic); nesvnic->netdev_open = 0; return 0; } /** * nes_nic_send */ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_hw_nic *nesnic = &nesvnic->nic; struct nes_hw_nic_sq_wqe *nic_sqe; struct tcphdr *tcph; __le16 *wqe_fragment_length; u32 wqe_misc; u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */ u16 skb_fragment_index; dma_addr_t bus_address; nic_sqe = &nesnic->sq_vbase[nesnic->sq_head]; wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; /* setup the VLAN tag if present */ if (skb_vlan_tag_present(skb)) { nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", netdev->name, skb_vlan_tag_get(skb)); wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb); } else wqe_misc = 0; /* bump past the vlan tag */ wqe_fragment_length++; /* wqe_fragment_address = (u64 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX]; */ wqe_misc |= NES_NIC_SQ_WQE_COMPLETION; if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb_is_gso(skb)) { tcph = tcp_hdr(skb); /* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... is_gso = %u seg size = %u\n", netdev->name, skb_is_gso(skb), skb_shinfo(skb)->gso_size); */ wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE | (u16)skb_shinfo(skb)->gso_size; set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX, ((u32)tcph->doff) | (((u32)(((unsigned char *)tcph) - skb->data)) << 4)); } } else { /* CHECKSUM_HW */ wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM; } set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX, skb->len); memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer, skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb))); wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb))); wqe_fragment_length[1] = 0; if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) { if ((skb_shinfo(skb)->nr_frags + 1) > 4) { nes_debug(NES_DBG_NIC_TX, "%s: Packet with %u fragments not sent, skb_headlen=%u\n", netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb)); kfree_skb(skb); nesvnic->tx_sw_dropped++; return NETDEV_TX_LOCKED; } set_bit(nesnic->sq_head, nesnic->first_frag_overflow); bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE, skb_headlen(skb) - NES_FIRST_FRAG_SIZE, PCI_DMA_TODEVICE); wqe_fragment_length[wqe_fragment_index++] = cpu_to_le16(skb_headlen(skb) - NES_FIRST_FRAG_SIZE); wqe_fragment_length[wqe_fragment_index] = 0; set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX, ((u64)(bus_address))); nesnic->tx_skb[nesnic->sq_head] = skb; } if (skb_headlen(skb) == skb->len) { if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) { nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0; nesnic->tx_skb[nesnic->sq_head] = skb; } } else { /* Deal with Fragments */ nesnic->tx_skb[nesnic->sq_head] = skb; for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags; skb_fragment_index++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[skb_fragment_index]; bus_address = skb_frag_dma_map(&nesdev->pcidev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); wqe_fragment_length[wqe_fragment_index] = cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[skb_fragment_index])); set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), bus_address); wqe_fragment_index++; if (wqe_fragment_index < 5) wqe_fragment_length[wqe_fragment_index] = 0; } } set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc); nesnic->sq_head++; nesnic->sq_head &= nesnic->sq_size - 1; return NETDEV_TX_OK; } /** * nes_netdev_start_xmit */ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_hw_nic *nesnic = &nesvnic->nic; struct nes_hw_nic_sq_wqe *nic_sqe; struct tcphdr *tcph; /* struct udphdr *udph; */ #define NES_MAX_TSO_FRAGS MAX_SKB_FRAGS /* 64K segment plus overflow on each side */ dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS]; dma_addr_t bus_address; u32 tso_frag_index; u32 tso_frag_count; u32 tso_wqe_length; u32 curr_tcp_seq; u32 wqe_count=1; u32 send_rc; struct iphdr *iph; __le16 *wqe_fragment_length; u32 nr_frags; u32 original_first_length; /* u64 *wqe_fragment_address; */ /* first fragment (0) is used by copy buffer */ u16 wqe_fragment_index=1; u16 hoffset; u16 nhoffset; u16 wqes_needed; u16 wqes_available; u32 wqe_misc; /* * nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u," * " (%u frags), tso_size=%u\n", * netdev->name, skb->len, skb_headlen(skb), * skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); */ if (!netif_carrier_ok(netdev)) return NETDEV_TX_OK; if (netif_queue_stopped(netdev)) return NETDEV_TX_BUSY; /* Check if SQ is full */ if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) { if (!netif_queue_stopped(netdev)) { netif_stop_queue(netdev); barrier(); if ((((((volatile u16)nesnic->sq_tail)+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) != 1) { netif_start_queue(netdev); goto sq_no_longer_full; } } nesvnic->sq_full++; return NETDEV_TX_BUSY; } sq_no_longer_full: nr_frags = skb_shinfo(skb)->nr_frags; if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) { nr_frags++; } /* Check if too many fragments */ if (unlikely((nr_frags > 4))) { if (skb_is_gso(skb)) { nesvnic->segmented_tso_requests++; nesvnic->tso_requests++; /* Basically 4 fragments available per WQE with extended fragments */ wqes_needed = nr_frags >> 2; wqes_needed += (nr_frags&3)?1:0; wqes_available = (((nesnic->sq_tail+nesnic->sq_size)-nesnic->sq_head) - 1) & (nesnic->sq_size - 1); if (unlikely(wqes_needed > wqes_available)) { if (!netif_queue_stopped(netdev)) { netif_stop_queue(netdev); barrier(); wqes_available = (((((volatile u16)nesnic->sq_tail)+nesnic->sq_size)-nesnic->sq_head) - 1) & (nesnic->sq_size - 1); if (wqes_needed <= wqes_available) { netif_start_queue(netdev); goto tso_sq_no_longer_full; } } nesvnic->sq_full++; nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n", netdev->name); return NETDEV_TX_BUSY; } tso_sq_no_longer_full: /* Map all the buffers */ for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags; tso_frag_count++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[tso_frag_count]; tso_bus_address[tso_frag_count] = skb_frag_dma_map(&nesdev->pcidev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); } tso_frag_index = 0; curr_tcp_seq = ntohl(tcp_hdr(skb)->seq); hoffset = skb_transport_header(skb) - skb->data; nhoffset = skb_network_header(skb) - skb->data; original_first_length = hoffset + ((((struct tcphdr *)skb_transport_header(skb))->doff)<<2); for (wqe_count=0; wqe_count<((u32)wqes_needed); wqe_count++) { tso_wqe_length = 0; nic_sqe = &nesnic->sq_vbase[nesnic->sq_head]; wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; /* setup the VLAN tag if present */ if (skb_vlan_tag_present(skb)) { nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", netdev->name, skb_vlan_tag_get(skb)); wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb); } else wqe_misc = 0; /* bump past the vlan tag */ wqe_fragment_length++; /* Assumes header totally fits in allocated buffer and is in first fragment */ if (original_first_length > NES_FIRST_FRAG_SIZE) { nes_debug(NES_DBG_NIC_TX, "ERROR: SKB header too big, headlen=%u, FIRST_FRAG_SIZE=%u\n", original_first_length, NES_FIRST_FRAG_SIZE); nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u," " (%u frags), is_gso = %u tso_size=%u\n", netdev->name, skb->len, skb_headlen(skb), skb_shinfo(skb)->nr_frags, skb_is_gso(skb), skb_shinfo(skb)->gso_size); } memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer, skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), original_first_length)); iph = (struct iphdr *) (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[nhoffset]); tcph = (struct tcphdr *) (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[hoffset]); if ((wqe_count+1)!=(u32)wqes_needed) { tcph->fin = 0; tcph->psh = 0; tcph->rst = 0; tcph->urg = 0; } if (wqe_count) { tcph->syn = 0; } tcph->seq = htonl(curr_tcp_seq); wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE), original_first_length)); wqe_fragment_index = 1; if ((wqe_count==0) && (skb_headlen(skb) > original_first_length)) { set_bit(nesnic->sq_head, nesnic->first_frag_overflow); bus_address = pci_map_single(nesdev->pcidev, skb->data + original_first_length, skb_headlen(skb) - original_first_length, PCI_DMA_TODEVICE); wqe_fragment_length[wqe_fragment_index++] = cpu_to_le16(skb_headlen(skb) - original_first_length); wqe_fragment_length[wqe_fragment_index] = 0; set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX, bus_address); tso_wqe_length += skb_headlen(skb) - original_first_length; } while (wqe_fragment_index < 5) { wqe_fragment_length[wqe_fragment_index] = cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index])); set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), (u64)tso_bus_address[tso_frag_index]); wqe_fragment_index++; tso_wqe_length += skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index++]); if (wqe_fragment_index < 5) wqe_fragment_length[wqe_fragment_index] = 0; if (tso_frag_index == tso_frag_count) break; } if ((wqe_count+1) == (u32)wqes_needed) { nesnic->tx_skb[nesnic->sq_head] = skb; } else { nesnic->tx_skb[nesnic->sq_head] = NULL; } wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_shinfo(skb)->gso_size; if ((tso_wqe_length + original_first_length) > skb_shinfo(skb)->gso_size) { wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE; } else { iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset); } set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc); set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX, ((u32)tcph->doff) | (((u32)hoffset) << 4)); set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX, tso_wqe_length + original_first_length); curr_tcp_seq += tso_wqe_length; nesnic->sq_head++; nesnic->sq_head &= nesnic->sq_size-1; } } else { nesvnic->linearized_skbs++; hoffset = skb_transport_header(skb) - skb->data; nhoffset = skb_network_header(skb) - skb->data; skb_linearize(skb); skb_set_transport_header(skb, hoffset); skb_set_network_header(skb, nhoffset); send_rc = nes_nic_send(skb, netdev); if (send_rc != NETDEV_TX_OK) return NETDEV_TX_OK; } } else { send_rc = nes_nic_send(skb, netdev); if (send_rc != NETDEV_TX_OK) return NETDEV_TX_OK; } barrier(); if (wqe_count) nes_write32(nesdev->regs+NES_WQE_ALLOC, (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id); netdev->trans_start = jiffies; return NETDEV_TX_OK; } /** * nes_netdev_get_stats */ static struct net_device_stats *nes_netdev_get_stats(struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; u64 u64temp; u32 u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->nic_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->endnode_nstat_rx_discard += u32temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->nic_index*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32; nesvnic->endnode_nstat_rx_octets += u64temp; nesvnic->netstats.rx_bytes += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->nic_index*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32; nesvnic->endnode_nstat_rx_frames += u64temp; nesvnic->netstats.rx_packets += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->nic_index*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32; nesvnic->endnode_nstat_tx_octets += u64temp; nesvnic->netstats.tx_bytes += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->nic_index*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32; nesvnic->endnode_nstat_tx_frames += u64temp; nesvnic->netstats.tx_packets += u64temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_short_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_oversized_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_jabber_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_length_errors += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_crc_errors += u32temp; nesvnic->netstats.rx_crc_errors += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_tx_errors += u32temp; nesvnic->netstats.tx_errors += u32temp; return &nesvnic->netstats; } /** * nes_netdev_tx_timeout */ static void nes_netdev_tx_timeout(struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); if (netif_msg_timer(nesvnic)) nes_debug(NES_DBG_NIC_TX, "%s: tx timeout\n", netdev->name); } /** * nes_netdev_set_mac_address */ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct sockaddr *mac_addr = p; int i; u32 macaddr_low; u16 macaddr_high; if (!is_valid_ether_addr(mac_addr->sa_data)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); printk(PFX "%s: Address length = %d, Address = %pM\n", __func__, netdev->addr_len, mac_addr->sa_data); macaddr_high = ((u16)netdev->dev_addr[0]) << 8; macaddr_high += (u16)netdev->dev_addr[1]; macaddr_low = ((u32)netdev->dev_addr[2]) << 24; macaddr_low += ((u32)netdev->dev_addr[3]) << 16; macaddr_low += ((u32)netdev->dev_addr[4]) << 8; macaddr_low += (u32)netdev->dev_addr[5]; for (i = 0; i < NES_MAX_PORT_COUNT; i++) { if (nesvnic->qp_nic_index[i] == 0xf) { break; } nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8), macaddr_low); nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8), (u32)macaddr_high | NES_MAC_ADDR_VALID | ((((u32)nesvnic->nic_index) << 16))); } return 0; } static void set_allmulti(struct nes_device *nesdev, u32 nic_active_bit) { u32 nic_active; nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); nic_active &= ~nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); } #define get_addr(addrs, index) ((addrs) + (index) * ETH_ALEN) /** * nes_netdev_set_multicast_list */ static void nes_netdev_set_multicast_list(struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; u32 nic_active_bit; u32 nic_active; u32 perfect_filter_register_address; u32 macaddr_low; u16 macaddr_high; u8 mc_all_on = 0; u8 mc_index; int mc_nic_index = -1; u8 pft_entries_preallocated = max(nesadapter->adapter_fcn_count * nics_per_function, 4); u8 max_pft_entries_avaiable = NES_PFT_SIZE - pft_entries_preallocated; unsigned long flags; int mc_count = netdev_mc_count(netdev); spin_lock_irqsave(&nesadapter->resource_lock, flags); nic_active_bit = 1 << nesvnic->nic_index; if (netdev->flags & IFF_PROMISC) { nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); mc_all_on = 1; } else if ((netdev->flags & IFF_ALLMULTI) || (nesvnic->nic_index > 3)) { set_allmulti(nesdev, nic_active_bit); mc_all_on = 1; } else { nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); nic_active &= ~nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); nic_active &= ~nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); } nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscuous = %d, All Multicast = %d.\n", mc_count, !!(netdev->flags & IFF_PROMISC), !!(netdev->flags & IFF_ALLMULTI)); if (!mc_all_on) { char *addrs; int i; struct netdev_hw_addr *ha; addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC); if (!addrs) { set_allmulti(nesdev, nic_active_bit); goto unlock; } i = 0; netdev_for_each_mc_addr(ha, netdev) memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN); perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + pft_entries_preallocated * 0x8; for (i = 0, mc_index = 0; mc_index < max_pft_entries_avaiable; mc_index++) { while (i < mc_count && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, get_addr(addrs, i++))) == 0)); if (mc_nic_index < 0) mc_nic_index = nesvnic->nic_index; while (nesadapter->pft_mcast_map[mc_index] < 16 && nesadapter->pft_mcast_map[mc_index] != nesvnic->nic_index && mc_index < max_pft_entries_avaiable) { nes_debug(NES_DBG_NIC_RX, "mc_index=%d skipping nic_index=%d, " "used for=%d \n", mc_index, nesvnic->nic_index, nesadapter->pft_mcast_map[mc_index]); mc_index++; } if (mc_index >= max_pft_entries_avaiable) break; if (i < mc_count) { char *addr = get_addr(addrs, i++); nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %pM to register 0x%04X nic_idx=%d\n", addr, perfect_filter_register_address+(mc_index * 8), mc_nic_index); macaddr_high = ((u8) addr[0]) << 8; macaddr_high += (u8) addr[1]; macaddr_low = ((u8) addr[2]) << 24; macaddr_low += ((u8) addr[3]) << 16; macaddr_low += ((u8) addr[4]) << 8; macaddr_low += (u8) addr[5]; nes_write_indexed(nesdev, perfect_filter_register_address+(mc_index * 8), macaddr_low); nes_write_indexed(nesdev, perfect_filter_register_address+4+(mc_index * 8), (u32)macaddr_high | NES_MAC_ADDR_VALID | ((((u32)(1<<mc_nic_index)) << 16))); nesadapter->pft_mcast_map[mc_index] = nesvnic->nic_index; } else { nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n", perfect_filter_register_address+(mc_index * 8)); nes_write_indexed(nesdev, perfect_filter_register_address+4+(mc_index * 8), 0); nesadapter->pft_mcast_map[mc_index] = 255; } } kfree(addrs); /* PFT is not large enough */ if (i < mc_count) set_allmulti(nesdev, nic_active_bit); } unlock: spin_unlock_irqrestore(&nesadapter->resource_lock, flags); } /** * nes_netdev_change_mtu */ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; int ret = 0; u8 jumbomode = 0; u32 nic_active; u32 nic_active_bit; u32 uc_all_active; u32 mc_all_active; if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu)) return -EINVAL; netdev->mtu = new_mtu; nesvnic->max_frame_size = new_mtu + VLAN_ETH_HLEN; if (netdev->mtu > 1500) { jumbomode=1; } nes_nic_init_timer_defaults(nesdev, jumbomode); if (netif_running(netdev)) { nic_active_bit = 1 << nesvnic->nic_index; mc_all_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL) & nic_active_bit; uc_all_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL) & nic_active_bit; nes_netdev_stop(netdev); nes_netdev_open(netdev); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); nic_active |= mc_all_active; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); nic_active |= uc_all_active; nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); } return ret; } static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { "Link Change Interrupts", "Linearized SKBs", "T/GSO Requests", "Pause Frames Sent", "Pause Frames Received", "Internal Routing Errors", "SQ SW Dropped SKBs", "SQ Full", "Segmented TSO Requests", "Rx Symbol Errors", "Rx Jabber Errors", "Rx Oversized Frames", "Rx Short Frames", "Rx Length Errors", "Rx CRC Errors", "Rx Port Discard", "Endnode Rx Discards", "Endnode Rx Octets", "Endnode Rx Frames", "Endnode Tx Octets", "Endnode Tx Frames", "Tx Errors", "mh detected", "mh pauses", "Retransmission Count", "CM Connects", "CM Accepts", "Disconnects", "Connected Events", "Connect Requests", "CM Rejects", "ModifyQP Timeouts", "CreateQPs", "SW DestroyQPs", "DestroyQPs", "CM Closes", "CM Packets Sent", "CM Packets Bounced", "CM Packets Created", "CM Packets Rcvd", "CM Packets Dropped", "CM Packets Retrans", "CM Listens Created", "CM Listens Destroyed", "CM Backlog Drops", "CM Loopbacks", "CM Nodes Created", "CM Nodes Destroyed", "CM Accel Drops", "CM Resets Received", "Free 4Kpbls", "Free 256pbls", "Timer Inits", "LRO aggregated", "LRO flushed", "LRO no_desc", "PAU CreateQPs", "PAU DestroyQPs", }; #define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset) /** * nes_netdev_get_sset_count */ static int nes_netdev_get_sset_count(struct net_device *netdev, int stringset) { if (stringset == ETH_SS_STATS) return NES_ETHTOOL_STAT_COUNT; else return -EINVAL; } /** * nes_netdev_get_strings */ static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset, u8 *ethtool_strings) { if (stringset == ETH_SS_STATS) memcpy(ethtool_strings, &nes_ethtool_stringset, sizeof(nes_ethtool_stringset)); } /** * nes_netdev_get_ethtool_stats */ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values) { u64 u64temp; struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; u32 nic_count; u32 u32temp; u32 index = 0; target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT; target_stat_values[index] = nesvnic->nesdev->link_status_interrupts; target_stat_values[++index] = nesvnic->linearized_skbs; target_stat_values[++index] = nesvnic->tso_requests; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_pause_frames_sent += u32temp; target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_sent; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_pause_frames_received += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_PORT_RX_DISCARDS + (nesvnic->nesdev->mac_index*0x40)); nesvnic->nesdev->port_rx_discards += u32temp; nesvnic->netstats.rx_dropped += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_PORT_TX_DISCARDS + (nesvnic->nesdev->mac_index*0x40)); nesvnic->nesdev->port_tx_discards += u32temp; nesvnic->netstats.tx_dropped += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_short_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_oversized_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_jabber_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_length_errors += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_crc_errors += u32temp; nesvnic->netstats.rx_crc_errors += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_tx_errors += u32temp; nesvnic->netstats.tx_errors += u32temp; for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) { if (nesvnic->qp_nic_index[nic_count] == 0xf) break; u32temp = nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->qp_nic_index[nic_count]*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->endnode_nstat_rx_discard += u32temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->qp_nic_index[nic_count]*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; nesvnic->endnode_nstat_rx_octets += u64temp; nesvnic->netstats.rx_bytes += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->qp_nic_index[nic_count]*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; nesvnic->endnode_nstat_rx_frames += u64temp; nesvnic->netstats.rx_packets += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->qp_nic_index[nic_count]*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; nesvnic->endnode_nstat_tx_octets += u64temp; nesvnic->netstats.tx_bytes += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->qp_nic_index[nic_count]*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; nesvnic->endnode_nstat_tx_frames += u64temp; nesvnic->netstats.tx_packets += u64temp; u32temp = nes_read_indexed(nesdev, NES_IDX_IPV4_TCP_REXMITS + (nesvnic->qp_nic_index[nic_count]*0x200)); nesvnic->endnode_ipv4_tcp_retransmits += u32temp; } target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_received; target_stat_values[++index] = nesdev->nesadapter->nic_rx_eth_route_err; target_stat_values[++index] = nesvnic->tx_sw_dropped; target_stat_values[++index] = nesvnic->sq_full; target_stat_values[++index] = nesvnic->segmented_tso_requests; target_stat_values[++index] = nesvnic->nesdev->mac_rx_symbol_err_frames; target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames; target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames; target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames; target_stat_values[++index] = nesvnic->netstats.rx_length_errors; target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors; target_stat_values[++index] = nesvnic->nesdev->port_rx_discards; target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard; target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets; target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames; target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets; target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames; target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors; target_stat_values[++index] = mh_detected; target_stat_values[++index] = mh_pauses_sent; target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; target_stat_values[++index] = atomic_read(&cm_connects); target_stat_values[++index] = atomic_read(&cm_accepts); target_stat_values[++index] = atomic_read(&cm_disconnects); target_stat_values[++index] = atomic_read(&cm_connecteds); target_stat_values[++index] = atomic_read(&cm_connect_reqs); target_stat_values[++index] = atomic_read(&cm_rejects); target_stat_values[++index] = atomic_read(&mod_qp_timouts); target_stat_values[++index] = atomic_read(&qps_created); target_stat_values[++index] = atomic_read(&sw_qps_destroyed); target_stat_values[++index] = atomic_read(&qps_destroyed); target_stat_values[++index] = atomic_read(&cm_closes); target_stat_values[++index] = cm_packets_sent; target_stat_values[++index] = cm_packets_bounced; target_stat_values[++index] = cm_packets_created; target_stat_values[++index] = cm_packets_received; target_stat_values[++index] = cm_packets_dropped; target_stat_values[++index] = cm_packets_retrans; target_stat_values[++index] = atomic_read(&cm_listens_created); target_stat_values[++index] = atomic_read(&cm_listens_destroyed); target_stat_values[++index] = cm_backlog_drops; target_stat_values[++index] = atomic_read(&cm_loopbacks); target_stat_values[++index] = atomic_read(&cm_nodes_created); target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); target_stat_values[++index] = atomic_read(&cm_resets_recvd); target_stat_values[++index] = nesadapter->free_4kpbl; target_stat_values[++index] = nesadapter->free_256pbl; target_stat_values[++index] = int_mod_timer_init; target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; target_stat_values[++index] = atomic_read(&pau_qps_created); target_stat_values[++index] = atomic_read(&pau_qps_destroyed); } /** * nes_netdev_get_drvinfo */ static void nes_netdev_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev), sizeof(drvinfo->bus_info)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%u.%u", nesadapter->firmware_version >> 16, nesadapter->firmware_version & 0x000000ff); strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); } /** * nes_netdev_set_coalesce */ static int nes_netdev_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *et_coalesce) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; unsigned long flags; spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); if (et_coalesce->rx_max_coalesced_frames_low) { shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low; } if (et_coalesce->rx_max_coalesced_frames_irq) { shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq; } if (et_coalesce->rx_max_coalesced_frames_high) { shared_timer->threshold_high = et_coalesce->rx_max_coalesced_frames_high; } if (et_coalesce->rx_coalesce_usecs_low) { shared_timer->timer_in_use_min = et_coalesce->rx_coalesce_usecs_low; } if (et_coalesce->rx_coalesce_usecs_high) { shared_timer->timer_in_use_max = et_coalesce->rx_coalesce_usecs_high; } spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); /* using this to drive total interrupt moderation */ nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq; if (et_coalesce->use_adaptive_rx_coalesce) { nesadapter->et_use_adaptive_rx_coalesce = 1; nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC; nesadapter->et_rx_coalesce_usecs_irq = 0; if (et_coalesce->pkt_rate_low) { nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low; } } else { nesadapter->et_use_adaptive_rx_coalesce = 0; nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT; if (nesadapter->et_rx_coalesce_usecs_irq) { nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8))); } } return 0; } /** * nes_netdev_get_coalesce */ static int nes_netdev_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *et_coalesce) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; struct ethtool_coalesce temp_et_coalesce; struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; unsigned long flags; memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce)); temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq; temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce; temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval; temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low; spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low; temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target; temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high; temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min; temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max; if (nesadapter->et_use_adaptive_rx_coalesce) { temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use; } spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce)); return 0; } /** * nes_netdev_get_pauseparam */ static void nes_netdev_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *et_pauseparam) { struct nes_vnic *nesvnic = netdev_priv(netdev); et_pauseparam->autoneg = 0; et_pauseparam->rx_pause = (nesvnic->nesdev->disable_rx_flow_control == 0) ? 1:0; et_pauseparam->tx_pause = (nesvnic->nesdev->disable_tx_flow_control == 0) ? 1:0; } /** * nes_netdev_set_pauseparam */ static int nes_netdev_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *et_pauseparam) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; u32 u32temp; if (et_pauseparam->autoneg) { /* TODO: should return unsupported */ return 0; } if ((et_pauseparam->tx_pause == 1) && (nesdev->disable_tx_flow_control == 1)) { u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp); nesdev->disable_tx_flow_control = 0; } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp); nesdev->disable_tx_flow_control = 1; } if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { u32temp = nes_read_indexed(nesdev, NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40)); u32temp &= ~NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE; nes_write_indexed(nesdev, NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp); nesdev->disable_rx_flow_control = 0; } else if ((et_pauseparam->rx_pause == 0) && (nesdev->disable_rx_flow_control == 0)) { u32temp = nes_read_indexed(nesdev, NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40)); u32temp |= NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE; nes_write_indexed(nesdev, NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp); nesdev->disable_rx_flow_control = 1; } return 0; } /** * nes_netdev_get_settings */ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; u32 mac_index = nesdev->mac_index; u8 phy_type = nesadapter->phy_type[mac_index]; u8 phy_index = nesadapter->phy_index[mac_index]; u16 phy_data; et_cmd->duplex = DUPLEX_FULL; et_cmd->port = PORT_MII; et_cmd->maxtxpkt = 511; et_cmd->maxrxpkt = 511; if (nesadapter->OneG_Mode) { ethtool_cmd_speed_set(et_cmd, SPEED_1000); if (phy_type == NES_PHY_TYPE_PUMA_1G) { et_cmd->supported = SUPPORTED_1000baseT_Full; et_cmd->advertising = ADVERTISED_1000baseT_Full; et_cmd->autoneg = AUTONEG_DISABLE; et_cmd->transceiver = XCVR_INTERNAL; et_cmd->phy_address = mac_index; } else { unsigned long flags; et_cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg; et_cmd->advertising = ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg; spin_lock_irqsave(&nesadapter->phy_lock, flags); nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); spin_unlock_irqrestore(&nesadapter->phy_lock, flags); if (phy_data & 0x1000) et_cmd->autoneg = AUTONEG_ENABLE; else et_cmd->autoneg = AUTONEG_DISABLE; et_cmd->transceiver = XCVR_EXTERNAL; et_cmd->phy_address = phy_index; } return 0; } if ((phy_type == NES_PHY_TYPE_ARGUS) || (phy_type == NES_PHY_TYPE_SFP_D) || (phy_type == NES_PHY_TYPE_KR)) { et_cmd->transceiver = XCVR_EXTERNAL; et_cmd->port = PORT_FIBRE; et_cmd->supported = SUPPORTED_FIBRE; et_cmd->advertising = ADVERTISED_FIBRE; et_cmd->phy_address = phy_index; } else { et_cmd->transceiver = XCVR_INTERNAL; et_cmd->supported = SUPPORTED_10000baseT_Full; et_cmd->advertising = ADVERTISED_10000baseT_Full; et_cmd->phy_address = mac_index; } ethtool_cmd_speed_set(et_cmd, SPEED_10000); et_cmd->autoneg = AUTONEG_DISABLE; return 0; } /** * nes_netdev_set_settings */ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; if ((nesadapter->OneG_Mode) && (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) { unsigned long flags; u16 phy_data; u8 phy_index = nesadapter->phy_index[nesdev->mac_index]; spin_lock_irqsave(&nesadapter->phy_lock, flags); nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); if (et_cmd->autoneg) { /* Turn on Full duplex, Autoneg, and restart autonegotiation */ phy_data |= 0x1300; } else { /* Turn off autoneg */ phy_data &= ~0x1000; } nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data); spin_unlock_irqrestore(&nesadapter->phy_lock, flags); } return 0; } static const struct ethtool_ops nes_ethtool_ops = { .get_link = ethtool_op_get_link, .get_settings = nes_netdev_get_settings, .set_settings = nes_netdev_set_settings, .get_strings = nes_netdev_get_strings, .get_sset_count = nes_netdev_get_sset_count, .get_ethtool_stats = nes_netdev_get_ethtool_stats, .get_drvinfo = nes_netdev_get_drvinfo, .get_coalesce = nes_netdev_get_coalesce, .set_coalesce = nes_netdev_set_coalesce, .get_pauseparam = nes_netdev_get_pauseparam, .set_pauseparam = nes_netdev_set_pauseparam, }; static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features) { struct nes_adapter *nesadapter = nesdev->nesadapter; u32 u32temp; unsigned long flags; spin_lock_irqsave(&nesadapter->phy_lock, flags); nes_debug(NES_DBG_NETDEV, "%s: %s\n", __func__, netdev->name); /* Enable/Disable VLAN Stripping */ u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG); if (features & NETIF_F_HW_VLAN_CTAG_RX) u32temp &= 0xfdffffff; else u32temp |= 0x02000000; nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp); spin_unlock_irqrestore(&nesadapter->phy_lock, flags); } static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_CTAG_TX; else features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } static int nes_set_features(struct net_device *netdev, netdev_features_t features) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; u32 changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_CTAG_RX) nes_vlan_mode(netdev, nesdev, features); return 0; } static const struct net_device_ops nes_netdev_ops = { .ndo_open = nes_netdev_open, .ndo_stop = nes_netdev_stop, .ndo_start_xmit = nes_netdev_start_xmit, .ndo_get_stats = nes_netdev_get_stats, .ndo_tx_timeout = nes_netdev_tx_timeout, .ndo_set_mac_address = nes_netdev_set_mac_address, .ndo_set_rx_mode = nes_netdev_set_multicast_list, .ndo_change_mtu = nes_netdev_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = nes_fix_features, .ndo_set_features = nes_set_features, }; /** * nes_netdev_init - initialize network device */ struct net_device *nes_netdev_init(struct nes_device *nesdev, void __iomem *mmio_addr) { u64 u64temp; struct nes_vnic *nesvnic; struct net_device *netdev; struct nic_qp_map *curr_qp_map; u8 phy_type = nesdev->nesadapter->phy_type[nesdev->mac_index]; netdev = alloc_etherdev(sizeof(struct nes_vnic)); if (!netdev) { printk(KERN_ERR PFX "nesvnic etherdev alloc failed"); return NULL; } nesvnic = netdev_priv(netdev); nes_debug(NES_DBG_INIT, "netdev = %p, %s\n", netdev, netdev->name); SET_NETDEV_DEV(netdev, &nesdev->pcidev->dev); netdev->watchdog_timeo = NES_TX_TIMEOUT; netdev->irq = nesdev->pcidev->irq; netdev->mtu = ETH_DATA_LEN; netdev->hard_header_len = ETH_HLEN; netdev->addr_len = ETH_ALEN; netdev->type = ARPHRD_ETHER; netdev->netdev_ops = &nes_netdev_ops; netdev->ethtool_ops = &nes_ethtool_ops; netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128); nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n"); /* Fill in the port structure */ nesvnic->netdev = netdev; nesvnic->nesdev = nesdev; nesvnic->msg_enable = netif_msg_init(debug, default_msg); nesvnic->netdev_index = nesdev->netdev_count; nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count; nesvnic->max_frame_size = netdev->mtu + netdev->hard_header_len + VLAN_HLEN; curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)]; nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid; nesvnic->nic_index = curr_qp_map[nesdev->netdev_count].nic_index; nesvnic->logical_port = curr_qp_map[nesdev->netdev_count].logical_port; /* Setup the burned in MAC address */ u64temp = (u64)nesdev->nesadapter->mac_addr_low; u64temp += ((u64)nesdev->nesadapter->mac_addr_high) << 32; u64temp += nesvnic->nic_index; netdev->dev_addr[0] = (u8)(u64temp>>40); netdev->dev_addr[1] = (u8)(u64temp>>32); netdev->dev_addr[2] = (u8)(u64temp>>24); netdev->dev_addr[3] = (u8)(u64temp>>16); netdev->dev_addr[4] = (u8)(u64temp>>8); netdev->dev_addr[5] = (u8)u64temp; netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) netdev->hw_features |= NETIF_F_TSO; netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features |= NETIF_F_LRO; nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d," " nic_index = %d, logical_port = %d, mac_index = %d.\n", nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id, nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index); if (nesvnic->nesdev->nesadapter->port_count == 1 && nesvnic->nesdev->nesadapter->adapter_fcn_count == 1) { nesvnic->qp_nic_index[0] = nesvnic->nic_index; nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1; if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) { nesvnic->qp_nic_index[2] = 0xf; nesvnic->qp_nic_index[3] = 0xf; } else { nesvnic->qp_nic_index[2] = nesvnic->nic_index + 2; nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3; } } else { if (nesvnic->nesdev->nesadapter->port_count == 2 || (nesvnic->nesdev->nesadapter->port_count == 1 && nesvnic->nesdev->nesadapter->adapter_fcn_count == 2)) { nesvnic->qp_nic_index[0] = nesvnic->nic_index; nesvnic->qp_nic_index[1] = nesvnic->nic_index + 2; nesvnic->qp_nic_index[2] = 0xf; nesvnic->qp_nic_index[3] = 0xf; } else { nesvnic->qp_nic_index[0] = nesvnic->nic_index; nesvnic->qp_nic_index[1] = 0xf; nesvnic->qp_nic_index[2] = 0xf; nesvnic->qp_nic_index[3] = 0xf; } } nesvnic->next_qp_nic_index = 0; if (nesdev->netdev_count == 0) { nesvnic->rdma_enabled = 1; } else { nesvnic->rdma_enabled = 0; } nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id; init_timer(&nesvnic->event_timer); nesvnic->event_timer.function = NULL; spin_lock_init(&nesvnic->tx_lock); spin_lock_init(&nesvnic->port_ibevent_lock); nesdev->netdev[nesdev->netdev_count] = netdev; nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n", nesvnic, nesdev->mac_index); list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]); if ((nesdev->netdev_count == 0) && ((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) || ((phy_type == NES_PHY_TYPE_PUMA_1G) && (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) || ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) { u32 u32temp; u32 link_mask = 0; u32 link_val = 0; u16 temp_phy_data; u16 phy_data = 0; unsigned long flags; u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesdev->mac_index & 1))); if (phy_type != NES_PHY_TYPE_PUMA_1G) { u32temp |= 0x00200000; nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesdev->mac_index & 1)), u32temp); } /* Check and set linkup here. This is for back to back */ /* configuration where second port won't get link interrupt */ switch (phy_type) { case NES_PHY_TYPE_PUMA_1G: if (nesdev->mac_index < 2) { link_mask = 0x01010000; link_val = 0x01010000; } else { link_mask = 0x02020000; link_val = 0x02020000; } break; case NES_PHY_TYPE_SFP_D: spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 0x9003); temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 3, 0x0021); nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 3, 0x0021); phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; break; default: link_mask = 0x0f1f0000; link_val = 0x0f0f0000; break; } u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesdev->mac_index & 1))); if (phy_type == NES_PHY_TYPE_SFP_D) { if (phy_data & 0x0004) nesvnic->linkup = 1; } else { if ((u32temp & link_mask) == link_val) nesvnic->linkup = 1; } /* clear the MAC interrupt status, assumes direct logical to physical mapping */ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index)); nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp); nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp); nes_init_phy(nesdev); } nes_vlan_mode(netdev, nesdev, netdev->features); return netdev; } /** * nes_netdev_destroy - destroy network device structure */ void nes_netdev_destroy(struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); /* make sure 'stop' method is called by Linux stack */ /* nes_netdev_stop(netdev); */ list_del(&nesvnic->list); if (nesvnic->of_device_registered) { nes_destroy_ofa_device(nesvnic->nesibdev); } free_netdev(netdev); } /** * nes_nic_cm_xmit -- CM calls this to send out pkts */ int nes_nic_cm_xmit(struct sk_buff *skb, struct net_device *netdev) { int ret; skb->dev = netdev; ret = dev_queue_xmit(skb); if (ret) { nes_debug(NES_DBG_CM, "Bad return code from dev_queue_xmit %d\n", ret); } return ret; }
gpl-2.0
nikhil18/lightning-kernel-bacon
arch/arm/mach-msm/ocmem.c
196
21692
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/rbtree.h> #include <linux/genalloc.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <mach/ocmem_priv.h> #define OCMEM_REGION_CTL_BASE 0xFDD0003C #define OCMEM_REGION_CTL_SIZE 0xFD0 #define GRAPHICS_REGION_CTL (0x17F000) struct ocmem_partition { const char *name; int id; unsigned long p_start; unsigned long p_size; unsigned long p_min; unsigned int p_tail; }; struct ocmem_zone zones[OCMEM_CLIENT_MAX]; struct ocmem_zone *get_zone(unsigned id) { if (id < OCMEM_GRAPHICS || id >= OCMEM_CLIENT_MAX) return NULL; else return &zones[id]; } static struct ocmem_plat_data *ocmem_pdata; static bool probe_done; #define CLIENT_NAME_MAX 10 /* Must be in sync with enum ocmem_client */ static const char *client_names[OCMEM_CLIENT_MAX] = { "graphics", "video", "camera", "hp_audio", "voice", "lp_audio", "sensors", "other_os", }; /* Must be in sync with enum ocmem_zstat_item */ static const char *zstat_names[NR_OCMEM_ZSTAT_ITEMS] = { "Allocation requests", "Synchronous allocations", "Ranged allocations", "Asynchronous allocations", "Allocation failures", "Allocations grown", "Allocations freed", "Allocations shrunk", "OCMEM maps", "Map failures", "OCMEM unmaps", "Unmap failures", "Transfers to OCMEM", "Transfers to DDR", "Transfer failures", "Evictions", "Restorations", "Dump requests", "Dump completed", }; struct ocmem_quota_table { const char *name; int id; unsigned long start; unsigned long size; unsigned long min; unsigned int tail; }; /* This static table will go away with device tree support */ static struct ocmem_quota_table qt[OCMEM_CLIENT_MAX] = { /* name, id, start, size, min, tail */ { "graphics", OCMEM_GRAPHICS, 0x0, 0x100000, 0x80000, 0}, { "video", OCMEM_VIDEO, 0x100000, 0x80000, 0x55000, 1}, { "camera", OCMEM_CAMERA, 0x0, 0x0, 0x0, 0}, { "voice", OCMEM_VOICE, 0x0, 0x0, 0x0, 0 }, { "hp_audio", OCMEM_HP_AUDIO, 0x0, 0x0, 0x0, 0}, { "lp_audio", OCMEM_LP_AUDIO, 0x80000, 0xA0000, 0xA0000, 0}, { "other_os", OCMEM_OTHER_OS, 0x120000, 0x20000, 0x20000, 0}, { "sensors", OCMEM_SENSORS, 0x140000, 0x40000, 0x40000, 0}, }; static inline int get_id(const char *name) { int i = 0; for (i = 0 ; i < OCMEM_CLIENT_MAX; i++) { if (strncmp(name, client_names[i], CLIENT_NAME_MAX) == 0) return i; } return -EINVAL; } bool is_probe_done(void) { return probe_done; } int check_id(int id) { return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS); } const char *get_name(int id) { if (!check_id(id)) return "Unknown"; return client_names[id]; } inline unsigned long phys_to_offset(unsigned long addr) { if (!ocmem_pdata) return 0; if (addr < ocmem_pdata->base || addr > (ocmem_pdata->base + ocmem_pdata->size)) return 0; return addr - ocmem_pdata->base; } inline unsigned long offset_to_phys(unsigned long offset) { if (!ocmem_pdata) return 0; if (offset > ocmem_pdata->size) return 0; return offset + ocmem_pdata->base; } inline int zone_active(int id) { struct ocmem_zone *z = get_zone(id); if (z) return z->active == true ? 1 : 0; else return 0; } inline void inc_ocmem_stat(struct ocmem_zone *z, enum ocmem_zstat_item item) { if (!z) return; atomic_long_inc(&z->z_stat[item]); } inline unsigned long get_ocmem_stat(struct ocmem_zone *z, enum ocmem_zstat_item item) { if (!z) return 0; else return atomic_long_read(&z->z_stat[item]); } static struct ocmem_plat_data *parse_static_config(struct platform_device *pdev) { struct ocmem_plat_data *pdata = NULL; struct ocmem_partition *parts = NULL; struct device *dev = &pdev->dev; unsigned nr_parts = 0; int i; int j; pdata = devm_kzalloc(dev, sizeof(struct ocmem_plat_data), GFP_KERNEL); if (!pdata) { dev_err(dev, "Unable to allocate memory for" " platform data\n"); return NULL; } for (i = 0 ; i < ARRAY_SIZE(qt); i++) if (qt[i].size != 0x0) nr_parts++; if (nr_parts == 0x0) { dev_err(dev, "No valid ocmem partitions\n"); return NULL; } else dev_info(dev, "Total partitions = %d\n", nr_parts); parts = devm_kzalloc(dev, sizeof(struct ocmem_partition) * nr_parts, GFP_KERNEL); if (!parts) { dev_err(dev, "Unable to allocate memory for" " partition data\n"); return NULL; } for (i = 0, j = 0; i < ARRAY_SIZE(qt); i++) { if (qt[i].size == 0x0) { dev_dbg(dev, "Skipping creation of pool for %s\n", qt[i].name); continue; } parts[j].id = qt[i].id; parts[j].p_size = qt[i].size; parts[j].p_start = qt[i].start; parts[j].p_min = qt[i].min; parts[j].p_tail = qt[i].tail; j++; } BUG_ON(j != nr_parts); pdata->nr_parts = nr_parts; pdata->parts = parts; pdata->base = OCMEM_PHYS_BASE; pdata->size = OCMEM_PHYS_SIZE; return pdata; } int __devinit of_ocmem_parse_regions(struct device *dev, struct ocmem_partition **part) { const char *name; struct device_node *child = NULL; int nr_parts = 0; int i = 0; int rc = 0; int id = -1; /*Compute total partitions */ for_each_child_of_node(dev->of_node, child) nr_parts++; if (nr_parts == 0) return 0; *part = devm_kzalloc(dev, nr_parts * sizeof(**part), GFP_KERNEL); if (!*part) return -ENOMEM; for_each_child_of_node(dev->of_node, child) { const u32 *addr; u32 min; u64 size; u64 p_start; addr = of_get_address(child, 0, &size, NULL); if (!addr) { dev_err(dev, "Invalid addr for partition %d, ignored\n", i); continue; } rc = of_property_read_u32(child, "qcom,ocmem-part-min", &min); if (rc) { dev_err(dev, "No min for partition %d, ignored\n", i); continue; } rc = of_property_read_string(child, "qcom,ocmem-part-name", &name); if (rc) { dev_err(dev, "No name for partition %d, ignored\n", i); continue; } id = get_id(name); if (id < 0) { dev_err(dev, "Ignoring invalid partition %s\n", name); continue; } p_start = of_translate_address(child, addr); if (p_start == OF_BAD_ADDR) { dev_err(dev, "Invalid offset for partition %d\n", i); continue; } (*part)[i].p_start = p_start; (*part)[i].p_size = size; (*part)[i].id = id; (*part)[i].name = name; (*part)[i].p_min = min; (*part)[i].p_tail = of_property_read_bool(child, "tail"); i++; } return i; } #if defined(CONFIG_MSM_OCMEM_LOCAL_POWER_CTRL) static int parse_power_ctrl_config(struct ocmem_plat_data *pdata, struct device_node *node) { pdata->rpm_pwr_ctrl = false; pdata->rpm_rsc_type = ~0x0; return 0; } #else static int parse_power_ctrl_config(struct ocmem_plat_data *pdata, struct device_node *node) { unsigned rsc_type = ~0x0; pdata->rpm_pwr_ctrl = false; if (of_property_read_u32(node, "qcom,resource-type", &rsc_type)) return -EINVAL; pdata->rpm_pwr_ctrl = true; pdata->rpm_rsc_type = rsc_type; return 0; } #endif /* CONFIG_MSM_OCMEM_LOCAL_POWER_CTRL */ /* Core Clock Operations */ int ocmem_enable_core_clock(void) { int ret; ret = clk_prepare_enable(ocmem_pdata->core_clk); if (ret) { pr_err("ocmem: Failed to enable core clock\n"); return ret; } pr_debug("ocmem: Enabled core clock\n"); return 0; } void ocmem_disable_core_clock(void) { clk_disable_unprepare(ocmem_pdata->core_clk); pr_debug("ocmem: Disabled core clock\n"); } /* Branch Clock Operations */ int ocmem_enable_iface_clock(void) { int ret; if (!ocmem_pdata->iface_clk) return 0; ret = clk_prepare_enable(ocmem_pdata->iface_clk); if (ret) { pr_err("ocmem: Failed to disable iface clock\n"); return ret; } pr_debug("ocmem: Enabled iface clock\n"); return 0; } void ocmem_disable_iface_clock(void) { if (!ocmem_pdata->iface_clk) return; clk_disable_unprepare(ocmem_pdata->iface_clk); pr_debug("ocmem: Disabled iface clock\n"); } static struct ocmem_plat_data * __devinit parse_dt_config (struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = pdev->dev.of_node; struct ocmem_plat_data *pdata = NULL; struct ocmem_partition *parts = NULL; struct resource *ocmem_irq; struct resource *dm_irq; struct resource *ocmem_mem; struct resource *reg_base; struct resource *br_base; struct resource *dm_base; struct resource *ocmem_mem_io; unsigned nr_parts = 0; unsigned nr_regions = 0; unsigned nr_macros = 0; pdata = devm_kzalloc(dev, sizeof(struct ocmem_plat_data), GFP_KERNEL); if (!pdata) { dev_err(dev, "Unable to allocate memory for platform data\n"); return NULL; } ocmem_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ocmem_physical"); if (!ocmem_mem) { dev_err(dev, "No OCMEM memory resource\n"); return NULL; } ocmem_mem_io = request_mem_region(ocmem_mem->start, resource_size(ocmem_mem), pdev->name); if (!ocmem_mem_io) { dev_err(dev, "Could not claim OCMEM memory\n"); return NULL; } pdata->base = ocmem_mem->start; pdata->size = resource_size(ocmem_mem); pdata->vbase = devm_ioremap_nocache(dev, ocmem_mem->start, resource_size(ocmem_mem)); if (!pdata->vbase) { dev_err(dev, "Could not ioremap ocmem memory\n"); return NULL; } reg_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ocmem_ctrl_physical"); if (!reg_base) { dev_err(dev, "No OCMEM register resource\n"); return NULL; } pdata->reg_base = devm_ioremap_nocache(dev, reg_base->start, resource_size(reg_base)); if (!pdata->reg_base) { dev_err(dev, "Could not ioremap register map\n"); return NULL; } br_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "br_ctrl_physical"); if (!br_base) { dev_err(dev, "No OCMEM BR resource\n"); return NULL; } pdata->br_base = devm_ioremap_nocache(dev, br_base->start, resource_size(br_base)); if (!pdata->br_base) { dev_err(dev, "Could not ioremap BR resource\n"); return NULL; } dm_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dm_ctrl_physical"); if (!dm_base) { dev_err(dev, "No OCMEM DM resource\n"); return NULL; } pdata->dm_base = devm_ioremap_nocache(dev, dm_base->start, resource_size(dm_base)); if (!pdata->dm_base) { dev_err(dev, "Could not ioremap DM resource\n"); return NULL; } ocmem_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ocmem_irq"); if (!ocmem_irq) { dev_err(dev, "No OCMEM IRQ resource\n"); return NULL; } dm_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "dm_irq"); if (!dm_irq) { dev_err(dev, "No DM IRQ resource\n"); return NULL; } if (of_property_read_u32(node, "qcom,ocmem-num-regions", &nr_regions)) { dev_err(dev, "No OCMEM memory regions specified\n"); } if (nr_regions == 0) { dev_err(dev, "No hardware memory regions found\n"); return NULL; } if (of_property_read_u32(node, "qcom,ocmem-num-macros", &nr_macros)) { dev_err(dev, "No OCMEM macros specified\n"); } if (nr_macros == 0) { dev_err(dev, "No hardware macros found\n"); return NULL; } /* Figure out the number of partititons */ nr_parts = of_ocmem_parse_regions(dev, &parts); if (nr_parts <= 0) { dev_err(dev, "No valid OCMEM partitions found\n"); goto pdata_error; } else dev_dbg(dev, "Found %d ocmem partitions\n", nr_parts); if (parse_power_ctrl_config(pdata, node)) { dev_err(dev, "No OCMEM RPM Resource specified\n"); return NULL; } pdata->nr_parts = nr_parts; pdata->parts = parts; pdata->nr_regions = nr_regions; pdata->nr_macros = nr_macros; pdata->ocmem_irq = ocmem_irq->start; pdata->dm_irq = dm_irq->start; return pdata; pdata_error: return NULL; } static int ocmem_zones_show(struct seq_file *f, void *dummy) { unsigned i = 0; for (i = OCMEM_GRAPHICS; i < OCMEM_CLIENT_MAX; i++) { struct ocmem_zone *z = get_zone(i); if (z && z->active == true) seq_printf(f, "zone %s\t:0x%08lx - 0x%08lx (%4ld KB)\n", get_name(z->owner), z->z_start, z->z_end - 1, (z->z_end - z->z_start)/SZ_1K); } return 0; } static int ocmem_zones_open(struct inode *inode, struct file *file) { return single_open(file, ocmem_zones_show, inode->i_private); } static const struct file_operations zones_show_fops = { .open = ocmem_zones_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int ocmem_stats_show(struct seq_file *f, void *dummy) { unsigned i = 0; unsigned j = 0; for (i = OCMEM_GRAPHICS; i < OCMEM_CLIENT_MAX; i++) { struct ocmem_zone *z = get_zone(i); if (z && z->active == true) { seq_printf(f, "zone %s:\n", get_name(z->owner)); for (j = 0 ; j < ARRAY_SIZE(zstat_names); j++) { seq_printf(f, "\t %s: %lu\n", zstat_names[j], get_ocmem_stat(z, j)); } } } return 0; } static int ocmem_stats_open(struct inode *inode, struct file *file) { return single_open(file, ocmem_stats_show, inode->i_private); } static const struct file_operations stats_show_fops = { .open = ocmem_stats_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int ocmem_timing_show(struct seq_file *f, void *dummy) { unsigned i = 0; for (i = OCMEM_GRAPHICS; i < OCMEM_CLIENT_MAX; i++) { struct ocmem_zone *z = get_zone(i); if (z && z->active == true) seq_printf(f, "zone %s\t: alloc_delay:[max:%d, min:%d, total:%llu,cnt:%lu] free_delay:[max:%d, min:%d, total:%llu, cnt:%lu]\n", get_name(z->owner), z->max_alloc_time, z->min_alloc_time, z->total_alloc_time, get_ocmem_stat(z, 1), z->max_free_time, z->min_free_time, z->total_free_time, get_ocmem_stat(z, 6)); } return 0; } static int ocmem_timing_open(struct inode *inode, struct file *file) { return single_open(file, ocmem_timing_show, inode->i_private); } static const struct file_operations timing_show_fops = { .open = ocmem_timing_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int ocmem_zone_init(struct platform_device *pdev) { int ret = -1; int i = 0; unsigned active_zones = 0; struct ocmem_zone *zone = NULL; struct ocmem_zone_ops *z_ops = NULL; struct device *dev = &pdev->dev; unsigned long start; struct ocmem_plat_data *pdata = NULL; pdata = platform_get_drvdata(pdev); for (i = 0; i < pdata->nr_parts; i++) { struct ocmem_partition *part = &pdata->parts[i]; zone = get_zone(part->id); zone->active = false; dev_dbg(dev, "Partition %d, start %lx, size %lx for %s\n", i, part->p_start, part->p_size, client_names[part->id]); if (part->p_size > pdata->size) { dev_alert(dev, "Quota > ocmem_size for id:%d\n", part->id); continue; } zone->z_pool = gen_pool_create(PAGE_SHIFT, -1); if (!zone->z_pool) { dev_alert(dev, "Creating pool failed for id:%d\n", part->id); return -EBUSY; } start = part->p_start; ret = gen_pool_add(zone->z_pool, start, part->p_size, -1); if (ret < 0) { gen_pool_destroy(zone->z_pool); dev_alert(dev, "Unable to back pool %d with " "buffer:%lx\n", part->id, part->p_size); return -EBUSY; } /* Initialize zone allocators */ z_ops = devm_kzalloc(dev, sizeof(struct ocmem_zone_ops), GFP_KERNEL); if (!z_ops) { pr_alert("ocmem: Unable to allocate memory for" "zone ops:%d\n", i); return -EBUSY; } /* Initialize zone parameters */ zone->z_start = start; zone->z_head = zone->z_start; zone->z_end = start + part->p_size; zone->z_tail = zone->z_end; zone->z_free = part->p_size; zone->owner = part->id; zone->active_regions = 0; zone->max_regions = 0; INIT_LIST_HEAD(&zone->req_list); zone->z_ops = z_ops; zone->max_alloc_time = 0; zone->min_alloc_time = 0xFFFFFFFF; zone->total_alloc_time = 0; zone->max_free_time = 0; zone->min_free_time = 0xFFFFFFFF; zone->total_free_time = 0; if (part->p_tail) { z_ops->allocate = allocate_tail; z_ops->free = free_tail; } else { z_ops->allocate = allocate_head; z_ops->free = free_head; } /* zap the counters */ memset(zone->z_stat, 0 , sizeof(zone->z_stat)); zone->active = true; active_zones++; if (active_zones == 1) pr_info("Physical OCMEM zone layout:\n"); pr_info(" zone %s\t: 0x%08lx - 0x%08lx (%4ld KB)\n", client_names[part->id], zone->z_start, zone->z_end - 1, part->p_size/SZ_1K); } if (!debugfs_create_file("zones", S_IRUGO, pdata->debug_node, NULL, &zones_show_fops)) { dev_err(dev, "Unable to create debugfs node for zones\n"); return -EBUSY; } if (!debugfs_create_file("stats", S_IRUGO, pdata->debug_node, NULL, &stats_show_fops)) { dev_err(dev, "Unable to create debugfs node for stats\n"); return -EBUSY; } if (!debugfs_create_file("timing", S_IRUGO, pdata->debug_node, NULL, &timing_show_fops)) { dev_err(dev, "Unable to create debugfs node for timing\n"); return -EBUSY; } dev_dbg(dev, "Total active zones = %d\n", active_zones); return 0; } /* Enable the ocmem graphics mpU as a workaround */ #ifdef CONFIG_MSM_OCMEM_NONSECURE static int ocmem_init_gfx_mpu(struct platform_device *pdev) { int rc; struct device *dev = &pdev->dev; void __iomem *ocmem_region_vbase = NULL; ocmem_region_vbase = devm_ioremap_nocache(dev, OCMEM_REGION_CTL_BASE, OCMEM_REGION_CTL_SIZE); if (!ocmem_region_vbase) return -EBUSY; rc = ocmem_enable_core_clock(); if (rc < 0) return rc; writel_relaxed(GRAPHICS_REGION_CTL, ocmem_region_vbase + 0xFCC); ocmem_disable_core_clock(); return 0; } #else static int ocmem_init_gfx_mpu(struct platform_device *pdev) { return 0; } #endif /* CONFIG_MSM_OCMEM_NONSECURE */ static int __devinit ocmem_debugfs_init(struct platform_device *pdev) { struct dentry *debug_dir = NULL; struct ocmem_plat_data *pdata = platform_get_drvdata(pdev); debug_dir = debugfs_create_dir("ocmem", NULL); if (!debug_dir || IS_ERR(debug_dir)) { pr_err("ocmem: Unable to create debugfs root\n"); return PTR_ERR(debug_dir); } pdata->debug_node = debug_dir; return 0; } static void __devexit ocmem_debugfs_exit(struct platform_device *pdev) { struct ocmem_plat_data *pdata = platform_get_drvdata(pdev); debugfs_remove_recursive(pdata->debug_node); } static int __devinit msm_ocmem_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct clk *ocmem_core_clk = NULL; struct clk *ocmem_iface_clk = NULL; int rc; ocmem_core_clk = devm_clk_get(dev, "core_clk"); if (IS_ERR(ocmem_core_clk)) { dev_err(dev, "Unable to get the core clock\n"); return PTR_ERR(ocmem_core_clk); } /* The core clock is synchronous with graphics */ if (clk_set_rate(ocmem_core_clk, 1000) < 0) { dev_err(dev, "Set rate failed on the core clock\n"); return -EBUSY; } ocmem_iface_clk = devm_clk_get(dev, "iface_clk"); if (IS_ERR_OR_NULL(ocmem_iface_clk)) ocmem_iface_clk = NULL; if (!pdev->dev.of_node) { dev_info(dev, "Missing Configuration in Device Tree\n"); ocmem_pdata = parse_static_config(pdev); } else { ocmem_pdata = parse_dt_config(pdev); } /* Check if we have some configuration data to start */ if (!ocmem_pdata) return -ENODEV; ocmem_pdata->core_clk = ocmem_core_clk; ocmem_pdata->iface_clk = ocmem_iface_clk; /* Sanity Checks */ BUG_ON(!IS_ALIGNED(ocmem_pdata->size, PAGE_SIZE)); BUG_ON(!IS_ALIGNED(ocmem_pdata->base, PAGE_SIZE)); dev_info(dev, "OCMEM Virtual addr %p\n", ocmem_pdata->vbase); platform_set_drvdata(pdev, ocmem_pdata); rc = ocmem_enable_core_clock(); if (rc < 0) goto core_clk_fail; rc = ocmem_enable_iface_clock(); if (rc < 0) goto iface_clk_fail; /* Parameter to be updated based on TZ */ /* Allow the OCMEM CSR to be programmed */ if (ocmem_restore_sec_program(OCMEM_SECURE_DEV_ID)) { ocmem_disable_iface_clock(); ocmem_disable_core_clock(); return -EBUSY; } ocmem_disable_iface_clock(); ocmem_disable_core_clock(); if (ocmem_debugfs_init(pdev)) dev_err(dev, "ocmem: No debugfs node available\n"); if (ocmem_core_init(pdev)) return -EBUSY; if (ocmem_zone_init(pdev)) return -EBUSY; if (ocmem_notifier_init()) return -EBUSY; if (ocmem_sched_init(pdev)) return -EBUSY; if (ocmem_rdm_init(pdev)) return -EBUSY; if (ocmem_init_gfx_mpu(pdev)) { dev_err(dev, "Unable to initialize Graphics mPU\n"); return -EBUSY; } probe_done = true; dev_dbg(dev, "initialized successfully\n"); return 0; iface_clk_fail: ocmem_disable_core_clock(); core_clk_fail: return rc; } static int __devexit msm_ocmem_remove(struct platform_device *pdev) { ocmem_debugfs_exit(pdev); return 0; } static struct of_device_id msm_ocmem_dt_match[] = { { .compatible = "qcom,msm-ocmem", }, {} }; static struct platform_driver msm_ocmem_driver = { .probe = msm_ocmem_probe, .remove = __devexit_p(msm_ocmem_remove), .driver = { .name = "msm_ocmem", .owner = THIS_MODULE, .of_match_table = msm_ocmem_dt_match, }, }; static int __init ocmem_init(void) { return platform_driver_register(&msm_ocmem_driver); } subsys_initcall(ocmem_init); static void __exit ocmem_exit(void) { platform_driver_unregister(&msm_ocmem_driver); } module_exit(ocmem_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Support for On-Chip Memory on MSM");
gpl-2.0
chasmodo/android_kernel_oneplus_msm8974
drivers/gpu/drm/radeon/radeon_ttm.c
708
25041
/* * Copyright 2009 Jerome Glisse. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ /* * Authors: * Jerome Glisse <glisse@freedesktop.org> * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> * Dave Airlie */ #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> #include <ttm/ttm_placement.h> #include <ttm/ttm_module.h> #include <ttm/ttm_page_alloc.h> #include <drm/drmP.h> #include <drm/radeon_drm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include "radeon_reg.h" #include "radeon.h" #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) static int radeon_ttm_debugfs_init(struct radeon_device *rdev); static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) { struct radeon_mman *mman; struct radeon_device *rdev; mman = container_of(bdev, struct radeon_mman, bdev); rdev = container_of(mman, struct radeon_device, mman); return rdev; } /* * Global memory. */ static int radeon_ttm_mem_global_init(struct drm_global_reference *ref) { return ttm_mem_global_init(ref->object); } static void radeon_ttm_mem_global_release(struct drm_global_reference *ref) { ttm_mem_global_release(ref->object); } static int radeon_ttm_global_init(struct radeon_device *rdev) { struct drm_global_reference *global_ref; int r; rdev->mman.mem_global_referenced = false; global_ref = &rdev->mman.mem_global_ref; global_ref->global_type = DRM_GLOBAL_TTM_MEM; global_ref->size = sizeof(struct ttm_mem_global); global_ref->init = &radeon_ttm_mem_global_init; global_ref->release = &radeon_ttm_mem_global_release; r = drm_global_item_ref(global_ref); if (r != 0) { DRM_ERROR("Failed setting up TTM memory accounting " "subsystem.\n"); return r; } rdev->mman.bo_global_ref.mem_glob = rdev->mman.mem_global_ref.object; global_ref = &rdev->mman.bo_global_ref.ref; global_ref->global_type = DRM_GLOBAL_TTM_BO; global_ref->size = sizeof(struct ttm_bo_global); global_ref->init = &ttm_bo_global_init; global_ref->release = &ttm_bo_global_release; r = drm_global_item_ref(global_ref); if (r != 0) { DRM_ERROR("Failed setting up TTM BO subsystem.\n"); drm_global_item_unref(&rdev->mman.mem_global_ref); return r; } rdev->mman.mem_global_referenced = true; return 0; } static void radeon_ttm_global_fini(struct radeon_device *rdev) { if (rdev->mman.mem_global_referenced) { drm_global_item_unref(&rdev->mman.bo_global_ref.ref); drm_global_item_unref(&rdev->mman.mem_global_ref); rdev->mman.mem_global_referenced = false; } } static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) { return 0; } static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { struct radeon_device *rdev; rdev = radeon_get_rdev(bdev); switch (type) { case TTM_PL_SYSTEM: /* System memory */ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_TT: man->func = &ttm_bo_manager_func; man->gpu_offset = rdev->mc.gtt_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { DRM_ERROR("AGP is not enabled for memory type %u\n", (unsigned)type); return -EINVAL; } if (!rdev->ddev->agp->cant_use_aperture) man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; } #endif break; case TTM_PL_VRAM: /* "On-card" video ram */ man->func = &ttm_bo_manager_func; man->gpu_offset = rdev->mc.vram_start; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); return -EINVAL; } return 0; } static void radeon_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct radeon_bo *rbo; static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!radeon_ttm_bo_is_radeon_bo(bo)) { placement->fpfn = 0; placement->lpfn = 0; placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; placement->num_busy_placement = 1; return; } rbo = container_of(bo, struct radeon_bo, tbo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); else radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); break; case TTM_PL_TT: default: radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); } *placement = rbo->placement; } static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) { return 0; } static void radeon_move_null(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_mem_reg *old_mem = &bo->mem; BUG_ON(old_mem->mm_node != NULL); *old_mem = *new_mem; new_mem->mm_node = NULL; } static int radeon_move_blit(struct ttm_buffer_object *bo, bool evict, int no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem, struct ttm_mem_reg *old_mem) { struct radeon_device *rdev; uint64_t old_start, new_start; struct radeon_fence *fence; int r, i; rdev = radeon_get_rdev(bo->bdev); r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev)); if (unlikely(r)) { return r; } old_start = old_mem->start << PAGE_SHIFT; new_start = new_mem->start << PAGE_SHIFT; switch (old_mem->mem_type) { case TTM_PL_VRAM: old_start += rdev->mc.vram_start; break; case TTM_PL_TT: old_start += rdev->mc.gtt_start; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); return -EINVAL; } switch (new_mem->mem_type) { case TTM_PL_VRAM: new_start += rdev->mc.vram_start; break; case TTM_PL_TT: new_start += rdev->mc.gtt_start; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); return -EINVAL; } if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); /* sync other rings */ if (rdev->family >= CHIP_R600) { for (i = 0; i < RADEON_NUM_RINGS; ++i) { /* no need to sync to our own or unused rings */ if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready) continue; if (!fence->semaphore) { r = radeon_semaphore_create(rdev, &fence->semaphore); /* FIXME: handle semaphore error */ if (r) continue; } r = radeon_ring_lock(rdev, &rdev->ring[i], 3); /* FIXME: handle ring lock error */ if (r) continue; radeon_semaphore_emit_signal(rdev, i, fence->semaphore); radeon_ring_unlock_commit(rdev, &rdev->ring[i]); r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3); /* FIXME: handle ring lock error */ if (r) continue; radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore); radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]); } } r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ fence); /* FIXME: handle copy error */ r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); radeon_fence_unref(&fence); return r; } static int radeon_move_vram_ram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; u32 placements; struct ttm_placement placement; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.fpfn = 0; placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); if (unlikely(r)) { return r; } r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); if (unlikely(r)) { goto out_cleanup; } r = ttm_tt_bind(bo->ttm, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; } static int radeon_move_ram_vram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_placement placement; u32 placements; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.fpfn = 0; placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); if (unlikely(r)) { return r; } r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; } static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; int r; rdev = radeon_get_rdev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { radeon_move_null(bo, new_mem); return 0; } if ((old_mem->mem_type == TTM_PL_TT && new_mem->mem_type == TTM_PL_SYSTEM) || (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_TT)) { /* bind is enough */ radeon_move_null(bo, new_mem); return 0; } if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || rdev->asic->copy.copy == NULL) { /* use memcpy */ goto memcpy; } if (old_mem->mem_type == TTM_PL_VRAM && new_mem->mem_type == TTM_PL_SYSTEM) { r = radeon_move_vram_ram(bo, evict, interruptible, no_wait_reserve, no_wait_gpu, new_mem); } else if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) { r = radeon_move_ram_vram(bo, evict, interruptible, no_wait_reserve, no_wait_gpu, new_mem); } else { r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); } if (r) { memcpy: r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); } return r; } static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct radeon_device *rdev = radeon_get_rdev(bdev); mem->bus.addr = NULL; mem->bus.offset = 0; mem->bus.size = mem->num_pages << PAGE_SHIFT; mem->bus.base = 0; mem->bus.is_iomem = false; if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: /* system memory */ return 0; case TTM_PL_TT: #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { /* RADEON_IS_AGP is set only if AGP is active */ mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = rdev->mc.agp_base; mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; } #endif break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) return -EINVAL; mem->bus.base = rdev->mc.aper_base; mem->bus.is_iomem = true; #ifdef __alpha__ /* * Alpha: use bus.addr to hold the ioremap() return, * so we can modify bus.base below. */ if (mem->placement & TTM_PL_FLAG_WC) mem->bus.addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); else mem->bus.addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); /* * Alpha: Use just the bus offset plus * the hose/domain memory base for bus.base. * It then can be used to build PTEs for VRAM * access, as done in ttm_bo_vm_fault(). */ mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + rdev->ddev->hose->dense_mem_base; #endif break; default: return -EINVAL; } return 0; } static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { } static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, bool lazy, bool interruptible) { return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); } static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg) { return 0; } static void radeon_sync_obj_unref(void **sync_obj) { radeon_fence_unref((struct radeon_fence **)sync_obj); } static void *radeon_sync_obj_ref(void *sync_obj) { return radeon_fence_ref((struct radeon_fence *)sync_obj); } static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) { return radeon_fence_signaled((struct radeon_fence *)sync_obj); } /* * TTM backend functions. */ struct radeon_ttm_tt { struct ttm_dma_tt ttm; struct radeon_device *rdev; u64 offset; }; static int radeon_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct radeon_ttm_tt *gtt = (void*)ttm; int r; gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address); if (r) { DRM_ERROR("failed to bind %lu pages at 0x%08X\n", ttm->num_pages, (unsigned)gtt->offset); return r; } return 0; } static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); return 0; } static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; ttm_dma_tt_fini(&gtt->ttm); kfree(gtt); } static struct ttm_backend_func radeon_backend_func = { .bind = &radeon_ttm_backend_bind, .unbind = &radeon_ttm_backend_unbind, .destroy = &radeon_ttm_backend_destroy, }; struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct page *dummy_read_page) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt; rdev = radeon_get_rdev(bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, size, page_flags, dummy_read_page); } #endif gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL); if (gtt == NULL) { return NULL; } gtt->ttm.ttm.func = &radeon_backend_func; gtt->rdev = rdev; if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) { kfree(gtt); return NULL; } return &gtt->ttm.ttm; } static int radeon_ttm_tt_populate(struct ttm_tt *ttm) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; int r; if (ttm->state != tt_unpopulated) return 0; rdev = radeon_get_rdev(ttm->bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { return ttm_agp_tt_populate(ttm); } #endif #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { return ttm_dma_populate(&gtt->ttm, rdev->dev); } #endif r = ttm_pool_populate(ttm); if (r) { return r; } for (i = 0; i < ttm->num_pages; i++) { gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { while (--i) { pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); gtt->ttm.dma_address[i] = 0; } ttm_pool_unpopulate(ttm); return -EFAULT; } } return 0; } static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; rdev = radeon_get_rdev(ttm->bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { ttm_agp_tt_unpopulate(ttm); return; } #endif #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { ttm_dma_unpopulate(&gtt->ttm, rdev->dev); return; } #endif for (i = 0; i < ttm->num_pages; i++) { if (gtt->ttm.dma_address[i]) { pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); } } ttm_pool_unpopulate(ttm); } static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, .invalidate_caches = &radeon_invalidate_caches, .init_mem_type = &radeon_init_mem_type, .evict_flags = &radeon_evict_flags, .move = &radeon_bo_move, .verify_access = &radeon_verify_access, .sync_obj_signaled = &radeon_sync_obj_signaled, .sync_obj_wait = &radeon_sync_obj_wait, .sync_obj_flush = &radeon_sync_obj_flush, .sync_obj_unref = &radeon_sync_obj_unref, .sync_obj_ref = &radeon_sync_obj_ref, .move_notify = &radeon_bo_move_notify, .fault_reserve_notify = &radeon_bo_fault_reserve_notify, .io_mem_reserve = &radeon_ttm_io_mem_reserve, .io_mem_free = &radeon_ttm_io_mem_free, }; int radeon_ttm_init(struct radeon_device *rdev) { int r; r = radeon_ttm_global_init(rdev); if (r) { return r; } /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&rdev->mman.bdev, rdev->mman.bo_global_ref.ref.object, &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, rdev->need_dma32); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; } rdev->mman.initialized = true; r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, rdev->mc.real_vram_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->stollen_vga_memory); if (r) { return r; } r = radeon_bo_reserve(rdev->stollen_vga_memory, false); if (r) return r; r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); radeon_bo_unreserve(rdev->stollen_vga_memory); if (r) { radeon_bo_unref(&rdev->stollen_vga_memory); return r; } DRM_INFO("radeon: %uM of VRAM memory ready\n", (unsigned) (rdev->mc.real_vram_size / (1024 * 1024))); r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, rdev->mc.gtt_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); return r; } DRM_INFO("radeon: %uM of GTT memory ready.\n", (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; } r = radeon_ttm_debugfs_init(rdev); if (r) { DRM_ERROR("Failed to init debugfs\n"); return r; } return 0; } void radeon_ttm_fini(struct radeon_device *rdev) { int r; if (!rdev->mman.initialized) return; if (rdev->stollen_vga_memory) { r = radeon_bo_reserve(rdev->stollen_vga_memory, false); if (r == 0) { radeon_bo_unpin(rdev->stollen_vga_memory); radeon_bo_unreserve(rdev->stollen_vga_memory); } radeon_bo_unref(&rdev->stollen_vga_memory); } ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); ttm_bo_device_release(&rdev->mman.bdev); radeon_gart_fini(rdev); radeon_ttm_global_fini(rdev); rdev->mman.initialized = false; DRM_INFO("radeon: ttm finalized\n"); } /* this should only be called at bootup or when userspace * isn't running */ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) { struct ttm_mem_type_manager *man; if (!rdev->mman.initialized) return; man = &rdev->mman.bdev.man[TTM_PL_VRAM]; /* this just adjusts TTM size idea, which sets lpfn to the correct value */ man->size = size >> PAGE_SHIFT; } static struct vm_operations_struct radeon_ttm_vm_ops; static const struct vm_operations_struct *ttm_vm_ops = NULL; static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ttm_buffer_object *bo; struct radeon_device *rdev; int r; bo = (struct ttm_buffer_object *)vma->vm_private_data; if (bo == NULL) { return VM_FAULT_NOPAGE; } rdev = radeon_get_rdev(bo->bdev); mutex_lock(&rdev->vram_mutex); r = ttm_vm_ops->fault(vma, vmf); mutex_unlock(&rdev->vram_mutex); return r; } int radeon_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv; struct radeon_device *rdev; int r; if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { return drm_mmap(filp, vma); } file_priv = filp->private_data; rdev = file_priv->minor->dev->dev_private; if (rdev == NULL) { return -EINVAL; } r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); if (unlikely(r != 0)) { return r; } if (unlikely(ttm_vm_ops == NULL)) { ttm_vm_ops = vma->vm_ops; radeon_ttm_vm_ops = *ttm_vm_ops; radeon_ttm_vm_ops.fault = &radeon_ttm_fault; } vma->vm_ops = &radeon_ttm_vm_ops; return 0; } #define RADEON_DEBUGFS_MEM_TYPES 2 #if defined(CONFIG_DEBUG_FS) static int radeon_mm_dump_table(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; int ret; struct ttm_bo_global *glob = rdev->mman.bdev.glob; spin_lock(&glob->lru_lock); ret = drm_mm_dump_table(m, mm); spin_unlock(&glob->lru_lock); return ret; } #endif static int radeon_ttm_debugfs_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2]; static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32]; unsigned i; for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { if (i == 0) sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); else sprintf(radeon_mem_types_names[i], "radeon_gtt_mm"); radeon_mem_types_list[i].name = radeon_mem_types_names[i]; radeon_mem_types_list[i].show = &radeon_mm_dump_table; radeon_mem_types_list[i].driver_features = 0; if (i == 0) radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; else radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; } /* Add ttm page pool to debugfs */ sprintf(radeon_mem_types_names[i], "ttm_page_pool"); radeon_mem_types_list[i].name = radeon_mem_types_names[i]; radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; radeon_mem_types_list[i].driver_features = 0; radeon_mem_types_list[i++].data = NULL; #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool"); radeon_mem_types_list[i].name = radeon_mem_types_names[i]; radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs; radeon_mem_types_list[i].driver_features = 0; radeon_mem_types_list[i++].data = NULL; } #endif return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i); #endif return 0; }
gpl-2.0
maz-1/firefly-rk3288-kernel
arch/mips/jz4740/board-qi_lb60.c
1988
12347
/* * linux/arch/mips/jz4740/board-qi_lb60.c * * QI_LB60 board support * * Copyright (c) 2009 Qi Hardware inc., * Author: Xiangfu Liu <xiangfu@qi-hardware.com> * Copyright 2010, Lars-Peter Clausen <lars@metafoo.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 or later * as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/input/matrix_keypad.h> #include <linux/spi/spi.h> #include <linux/spi/spi_gpio.h> #include <linux/power_supply.h> #include <linux/power/jz4740-battery.h> #include <linux/power/gpio-charger.h> #include <asm/mach-jz4740/jz4740_fb.h> #include <asm/mach-jz4740/jz4740_mmc.h> #include <asm/mach-jz4740/jz4740_nand.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/leds_pwm.h> #include <asm/mach-jz4740/platform.h> #include "clock.h" static bool is_avt2; /* GPIOs */ #define QI_LB60_GPIO_SD_CD JZ_GPIO_PORTD(0) #define QI_LB60_GPIO_SD_VCC_EN_N JZ_GPIO_PORTD(2) #define QI_LB60_GPIO_KEYOUT(x) (JZ_GPIO_PORTC(10) + (x)) #define QI_LB60_GPIO_KEYIN(x) (JZ_GPIO_PORTD(18) + (x)) #define QI_LB60_GPIO_KEYIN8 JZ_GPIO_PORTD(26) /* NAND */ static struct nand_ecclayout qi_lb60_ecclayout_1gb = { .eccbytes = 36, .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41 }, .oobfree = { { .offset = 2, .length = 4 }, { .offset = 42, .length = 22 } }, }; /* Early prototypes of the QI LB60 had only 1GB of NAND. * In order to support these devices as well the partition and ecc layout is * initialized depending on the NAND size */ static struct mtd_partition qi_lb60_partitions_1gb[] = { { .name = "NAND BOOT partition", .offset = 0 * 0x100000, .size = 4 * 0x100000, }, { .name = "NAND KERNEL partition", .offset = 4 * 0x100000, .size = 4 * 0x100000, }, { .name = "NAND ROOTFS partition", .offset = 8 * 0x100000, .size = (504 + 512) * 0x100000, }, }; static struct nand_ecclayout qi_lb60_ecclayout_2gb = { .eccbytes = 72, .eccpos = { 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83 }, .oobfree = { { .offset = 2, .length = 10 }, { .offset = 84, .length = 44 }, }, }; static struct mtd_partition qi_lb60_partitions_2gb[] = { { .name = "NAND BOOT partition", .offset = 0 * 0x100000, .size = 4 * 0x100000, }, { .name = "NAND KERNEL partition", .offset = 4 * 0x100000, .size = 4 * 0x100000, }, { .name = "NAND ROOTFS partition", .offset = 8 * 0x100000, .size = (504 + 512 + 1024) * 0x100000, }, }; static void qi_lb60_nand_ident(struct platform_device *pdev, struct nand_chip *chip, struct mtd_partition **partitions, int *num_partitions) { if (chip->page_shift == 12) { chip->ecc.layout = &qi_lb60_ecclayout_2gb; *partitions = qi_lb60_partitions_2gb; *num_partitions = ARRAY_SIZE(qi_lb60_partitions_2gb); } else { chip->ecc.layout = &qi_lb60_ecclayout_1gb; *partitions = qi_lb60_partitions_1gb; *num_partitions = ARRAY_SIZE(qi_lb60_partitions_1gb); } } static struct jz_nand_platform_data qi_lb60_nand_pdata = { .ident_callback = qi_lb60_nand_ident, .busy_gpio = 94, .banks = { 1 }, }; /* Keyboard*/ #define KEY_QI_QI KEY_F13 #define KEY_QI_UPRED KEY_RIGHTALT #define KEY_QI_VOLUP KEY_VOLUMEUP #define KEY_QI_VOLDOWN KEY_VOLUMEDOWN #define KEY_QI_FN KEY_LEFTCTRL static const uint32_t qi_lb60_keymap[] = { KEY(0, 0, KEY_F1), /* S2 */ KEY(0, 1, KEY_F2), /* S3 */ KEY(0, 2, KEY_F3), /* S4 */ KEY(0, 3, KEY_F4), /* S5 */ KEY(0, 4, KEY_F5), /* S6 */ KEY(0, 5, KEY_F6), /* S7 */ KEY(0, 6, KEY_F7), /* S8 */ KEY(1, 0, KEY_Q), /* S10 */ KEY(1, 1, KEY_W), /* S11 */ KEY(1, 2, KEY_E), /* S12 */ KEY(1, 3, KEY_R), /* S13 */ KEY(1, 4, KEY_T), /* S14 */ KEY(1, 5, KEY_Y), /* S15 */ KEY(1, 6, KEY_U), /* S16 */ KEY(1, 7, KEY_I), /* S17 */ KEY(2, 0, KEY_A), /* S18 */ KEY(2, 1, KEY_S), /* S19 */ KEY(2, 2, KEY_D), /* S20 */ KEY(2, 3, KEY_F), /* S21 */ KEY(2, 4, KEY_G), /* S22 */ KEY(2, 5, KEY_H), /* S23 */ KEY(2, 6, KEY_J), /* S24 */ KEY(2, 7, KEY_K), /* S25 */ KEY(3, 0, KEY_ESC), /* S26 */ KEY(3, 1, KEY_Z), /* S27 */ KEY(3, 2, KEY_X), /* S28 */ KEY(3, 3, KEY_C), /* S29 */ KEY(3, 4, KEY_V), /* S30 */ KEY(3, 5, KEY_B), /* S31 */ KEY(3, 6, KEY_N), /* S32 */ KEY(3, 7, KEY_M), /* S33 */ KEY(4, 0, KEY_TAB), /* S34 */ KEY(4, 1, KEY_CAPSLOCK), /* S35 */ KEY(4, 2, KEY_BACKSLASH), /* S36 */ KEY(4, 3, KEY_APOSTROPHE), /* S37 */ KEY(4, 4, KEY_COMMA), /* S38 */ KEY(4, 5, KEY_DOT), /* S39 */ KEY(4, 6, KEY_SLASH), /* S40 */ KEY(4, 7, KEY_UP), /* S41 */ KEY(5, 0, KEY_O), /* S42 */ KEY(5, 1, KEY_L), /* S43 */ KEY(5, 2, KEY_EQUAL), /* S44 */ KEY(5, 3, KEY_QI_UPRED), /* S45 */ KEY(5, 4, KEY_SPACE), /* S46 */ KEY(5, 5, KEY_QI_QI), /* S47 */ KEY(5, 6, KEY_RIGHTCTRL), /* S48 */ KEY(5, 7, KEY_LEFT), /* S49 */ KEY(6, 0, KEY_F8), /* S50 */ KEY(6, 1, KEY_P), /* S51 */ KEY(6, 2, KEY_BACKSPACE),/* S52 */ KEY(6, 3, KEY_ENTER), /* S53 */ KEY(6, 4, KEY_QI_VOLUP), /* S54 */ KEY(6, 5, KEY_QI_VOLDOWN), /* S55 */ KEY(6, 6, KEY_DOWN), /* S56 */ KEY(6, 7, KEY_RIGHT), /* S57 */ KEY(7, 0, KEY_LEFTSHIFT), /* S58 */ KEY(7, 1, KEY_LEFTALT), /* S59 */ KEY(7, 2, KEY_QI_FN), /* S60 */ }; static const struct matrix_keymap_data qi_lb60_keymap_data = { .keymap = qi_lb60_keymap, .keymap_size = ARRAY_SIZE(qi_lb60_keymap), }; static const unsigned int qi_lb60_keypad_cols[] = { QI_LB60_GPIO_KEYOUT(0), QI_LB60_GPIO_KEYOUT(1), QI_LB60_GPIO_KEYOUT(2), QI_LB60_GPIO_KEYOUT(3), QI_LB60_GPIO_KEYOUT(4), QI_LB60_GPIO_KEYOUT(5), QI_LB60_GPIO_KEYOUT(6), QI_LB60_GPIO_KEYOUT(7), }; static const unsigned int qi_lb60_keypad_rows[] = { QI_LB60_GPIO_KEYIN(0), QI_LB60_GPIO_KEYIN(1), QI_LB60_GPIO_KEYIN(2), QI_LB60_GPIO_KEYIN(3), QI_LB60_GPIO_KEYIN(4), QI_LB60_GPIO_KEYIN(5), QI_LB60_GPIO_KEYIN(6), QI_LB60_GPIO_KEYIN8, }; static struct matrix_keypad_platform_data qi_lb60_pdata = { .keymap_data = &qi_lb60_keymap_data, .col_gpios = qi_lb60_keypad_cols, .row_gpios = qi_lb60_keypad_rows, .num_col_gpios = ARRAY_SIZE(qi_lb60_keypad_cols), .num_row_gpios = ARRAY_SIZE(qi_lb60_keypad_rows), .col_scan_delay_us = 10, .debounce_ms = 10, .wakeup = 1, .active_low = 1, }; static struct platform_device qi_lb60_keypad = { .name = "matrix-keypad", .id = -1, .dev = { .platform_data = &qi_lb60_pdata, }, }; /* Display */ static struct fb_videomode qi_lb60_video_modes[] = { { .name = "320x240", .xres = 320, .yres = 240, .refresh = 30, .left_margin = 140, .right_margin = 273, .upper_margin = 20, .lower_margin = 2, .hsync_len = 1, .vsync_len = 1, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct jz4740_fb_platform_data qi_lb60_fb_pdata = { .width = 60, .height = 45, .num_modes = ARRAY_SIZE(qi_lb60_video_modes), .modes = qi_lb60_video_modes, .bpp = 24, .lcd_type = JZ_LCD_TYPE_8BIT_SERIAL, .pixclk_falling_edge = 1, }; struct spi_gpio_platform_data spigpio_platform_data = { .sck = JZ_GPIO_PORTC(23), .mosi = JZ_GPIO_PORTC(22), .miso = -1, .num_chipselect = 1, }; static struct platform_device spigpio_device = { .name = "spi_gpio", .id = 1, .dev = { .platform_data = &spigpio_platform_data, }, }; static struct spi_board_info qi_lb60_spi_board_info[] = { { .modalias = "ili8960", .controller_data = (void *)JZ_GPIO_PORTC(21), .chip_select = 0, .bus_num = 1, .max_speed_hz = 30 * 1000, .mode = SPI_3WIRE, }, }; /* Battery */ static struct jz_battery_platform_data qi_lb60_battery_pdata = { .gpio_charge = JZ_GPIO_PORTC(27), .gpio_charge_active_low = 1, .info = { .name = "battery", .technology = POWER_SUPPLY_TECHNOLOGY_LIPO, .voltage_max_design = 4200000, .voltage_min_design = 3600000, }, }; /* GPIO Key: power */ static struct gpio_keys_button qi_lb60_gpio_keys_buttons[] = { [0] = { .code = KEY_POWER, .gpio = JZ_GPIO_PORTD(29), .active_low = 1, .desc = "Power", .wakeup = 1, }, }; static struct gpio_keys_platform_data qi_lb60_gpio_keys_data = { .nbuttons = ARRAY_SIZE(qi_lb60_gpio_keys_buttons), .buttons = qi_lb60_gpio_keys_buttons, }; static struct platform_device qi_lb60_gpio_keys = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &qi_lb60_gpio_keys_data, } }; static struct jz4740_mmc_platform_data qi_lb60_mmc_pdata = { .gpio_card_detect = QI_LB60_GPIO_SD_CD, .gpio_read_only = -1, .gpio_power = QI_LB60_GPIO_SD_VCC_EN_N, .power_active_low = 1, }; /* OHCI */ static struct regulator_consumer_supply avt2_usb_regulator_consumer = REGULATOR_SUPPLY("vbus", "jz4740-ohci"); static struct regulator_init_data avt2_usb_regulator_init_data = { .num_consumer_supplies = 1, .consumer_supplies = &avt2_usb_regulator_consumer, .constraints = { .name = "USB power", .min_uV = 5000000, .max_uV = 5000000, .valid_modes_mask = REGULATOR_MODE_NORMAL, .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, }; static struct fixed_voltage_config avt2_usb_regulator_data = { .supply_name = "USB power", .microvolts = 5000000, .gpio = JZ_GPIO_PORTB(17), .init_data = &avt2_usb_regulator_init_data, }; static struct platform_device avt2_usb_regulator_device = { .name = "reg-fixed-voltage", .id = -1, .dev = { .platform_data = &avt2_usb_regulator_data, } }; /* beeper */ static struct platform_device qi_lb60_pwm_beeper = { .name = "pwm-beeper", .id = -1, .dev = { .platform_data = (void *)4, }, }; /* charger */ static char *qi_lb60_batteries[] = { "battery", }; static struct gpio_charger_platform_data qi_lb60_charger_pdata = { .name = "usb", .type = POWER_SUPPLY_TYPE_USB, .gpio = JZ_GPIO_PORTD(28), .gpio_active_low = 1, .supplied_to = qi_lb60_batteries, .num_supplicants = ARRAY_SIZE(qi_lb60_batteries), }; static struct platform_device qi_lb60_charger_device = { .name = "gpio-charger", .dev = { .platform_data = &qi_lb60_charger_pdata, }, }; /* audio */ static struct platform_device qi_lb60_audio_device = { .name = "qi-lb60-audio", .id = -1, }; static struct platform_device *jz_platform_devices[] __initdata = { &jz4740_udc_device, &jz4740_mmc_device, &jz4740_nand_device, &qi_lb60_keypad, &spigpio_device, &jz4740_framebuffer_device, &jz4740_pcm_device, &jz4740_i2s_device, &jz4740_codec_device, &jz4740_rtc_device, &jz4740_adc_device, &jz4740_pwm_device, &qi_lb60_gpio_keys, &qi_lb60_pwm_beeper, &qi_lb60_charger_device, &qi_lb60_audio_device, }; static void __init board_gpio_setup(void) { /* We only need to enable/disable pullup here for pins used in generic * drivers. Everything else is done by the drivers themselves. */ jz_gpio_disable_pullup(QI_LB60_GPIO_SD_VCC_EN_N); jz_gpio_disable_pullup(QI_LB60_GPIO_SD_CD); } static int __init qi_lb60_init_platform_devices(void) { jz4740_framebuffer_device.dev.platform_data = &qi_lb60_fb_pdata; jz4740_nand_device.dev.platform_data = &qi_lb60_nand_pdata; jz4740_adc_device.dev.platform_data = &qi_lb60_battery_pdata; jz4740_mmc_device.dev.platform_data = &qi_lb60_mmc_pdata; jz4740_serial_device_register(); spi_register_board_info(qi_lb60_spi_board_info, ARRAY_SIZE(qi_lb60_spi_board_info)); if (is_avt2) { platform_device_register(&avt2_usb_regulator_device); platform_device_register(&jz4740_usb_ohci_device); } return platform_add_devices(jz_platform_devices, ARRAY_SIZE(jz_platform_devices)); } struct jz4740_clock_board_data jz4740_clock_bdata = { .ext_rate = 12000000, .rtc_rate = 32768, }; static __init int board_avt2(char *str) { qi_lb60_mmc_pdata.card_detect_active_low = 1; is_avt2 = true; return 1; } __setup("avt2", board_avt2); static int __init qi_lb60_board_setup(void) { printk(KERN_INFO "Qi Hardware JZ4740 QI %s setup\n", is_avt2 ? "AVT2" : "LB60"); board_gpio_setup(); if (qi_lb60_init_platform_devices()) panic("Failed to initialize platform devices"); return 0; } arch_initcall(qi_lb60_board_setup);
gpl-2.0
mikalv/android_kernel_samsung_degas3g
drivers/usb/gadget/fsl_qe_udc.c
1988
64171
/* * driver/usb/gadget/fsl_qe_udc.c * * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved. * * Xie Xiaobo <X.Xie@freescale.com> * Li Yang <leoli@freescale.com> * Based on bareboard code from Shlomi Gridish. * * Description: * Freescle QE/CPM USB Pheripheral Controller Driver * The controller can be found on MPC8360, MPC8272, and etc. * MPC8360 Rev 1.1 may need QE mircocode update * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #undef USB_TRACE #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/moduleparam.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/dma-mapping.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> #include <asm/qe.h> #include <asm/cpm.h> #include <asm/dma.h> #include <asm/reg.h> #include "fsl_qe_udc.h" #define DRIVER_DESC "Freescale QE/CPM USB Device Controller driver" #define DRIVER_AUTHOR "Xie XiaoBo" #define DRIVER_VERSION "1.0" #define DMA_ADDR_INVALID (~(dma_addr_t)0) static const char driver_name[] = "fsl_qe_udc"; static const char driver_desc[] = DRIVER_DESC; /*ep name is important in gadget, it should obey the convention of ep_match()*/ static const char *const ep_name[] = { "ep0-control", /* everyone has ep0 */ /* 3 configurable endpoints */ "ep1", "ep2", "ep3", }; static struct usb_endpoint_descriptor qe_ep0_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 0, .bmAttributes = USB_ENDPOINT_XFER_CONTROL, .wMaxPacketSize = USB_MAX_CTRL_PAYLOAD, }; /******************************************************************** * Internal Used Function Start ********************************************************************/ /*----------------------------------------------------------------- * done() - retire a request; caller blocked irqs *--------------------------------------------------------------*/ static void done(struct qe_ep *ep, struct qe_req *req, int status) { struct qe_udc *udc = ep->udc; unsigned char stopped = ep->stopped; /* the req->queue pointer is used by ep_queue() func, in which * the request will be added into a udc_ep->queue 'd tail * so here the req will be dropped from the ep->queue */ list_del_init(&req->queue); /* req.status should be set as -EINPROGRESS in ep_queue() */ if (req->req.status == -EINPROGRESS) req->req.status = status; else status = req->req.status; if (req->mapped) { dma_unmap_single(udc->gadget.dev.parent, req->req.dma, req->req.length, ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->req.dma = DMA_ADDR_INVALID; req->mapped = 0; } else dma_sync_single_for_cpu(udc->gadget.dev.parent, req->req.dma, req->req.length, ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (status && (status != -ESHUTDOWN)) dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n", ep->ep.name, &req->req, status, req->req.actual, req->req.length); /* don't modify queue heads during completion callback */ ep->stopped = 1; spin_unlock(&udc->lock); /* this complete() should a func implemented by gadget layer, * eg fsg->bulk_in_complete() */ if (req->req.complete) req->req.complete(&ep->ep, &req->req); spin_lock(&udc->lock); ep->stopped = stopped; } /*----------------------------------------------------------------- * nuke(): delete all requests related to this ep *--------------------------------------------------------------*/ static void nuke(struct qe_ep *ep, int status) { /* Whether this eq has request linked */ while (!list_empty(&ep->queue)) { struct qe_req *req = NULL; req = list_entry(ep->queue.next, struct qe_req, queue); done(ep, req, status); } } /*---------------------------------------------------------------------------* * USB and Endpoint manipulate process, include parameter and register * *---------------------------------------------------------------------------*/ /* @value: 1--set stall 0--clean stall */ static int qe_eprx_stall_change(struct qe_ep *ep, int value) { u16 tem_usep; u8 epnum = ep->epnum; struct qe_udc *udc = ep->udc; tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]); tem_usep = tem_usep & ~USB_RHS_MASK; if (value == 1) tem_usep |= USB_RHS_STALL; else if (ep->dir == USB_DIR_IN) tem_usep |= USB_RHS_IGNORE_OUT; out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep); return 0; } static int qe_eptx_stall_change(struct qe_ep *ep, int value) { u16 tem_usep; u8 epnum = ep->epnum; struct qe_udc *udc = ep->udc; tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]); tem_usep = tem_usep & ~USB_THS_MASK; if (value == 1) tem_usep |= USB_THS_STALL; else if (ep->dir == USB_DIR_OUT) tem_usep |= USB_THS_IGNORE_IN; out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep); return 0; } static int qe_ep0_stall(struct qe_udc *udc) { qe_eptx_stall_change(&udc->eps[0], 1); qe_eprx_stall_change(&udc->eps[0], 1); udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = 0; return 0; } static int qe_eprx_nack(struct qe_ep *ep) { u8 epnum = ep->epnum; struct qe_udc *udc = ep->udc; if (ep->state == EP_STATE_IDLE) { /* Set the ep's nack */ clrsetbits_be16(&udc->usb_regs->usb_usep[epnum], USB_RHS_MASK, USB_RHS_NACK); /* Mask Rx and Busy interrupts */ clrbits16(&udc->usb_regs->usb_usbmr, (USB_E_RXB_MASK | USB_E_BSY_MASK)); ep->state = EP_STATE_NACK; } return 0; } static int qe_eprx_normal(struct qe_ep *ep) { struct qe_udc *udc = ep->udc; if (ep->state == EP_STATE_NACK) { clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum], USB_RTHS_MASK, USB_THS_IGNORE_IN); /* Unmask RX interrupts */ out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK | USB_E_RXB_MASK); setbits16(&udc->usb_regs->usb_usbmr, (USB_E_RXB_MASK | USB_E_BSY_MASK)); ep->state = EP_STATE_IDLE; ep->has_data = 0; } return 0; } static int qe_ep_cmd_stoptx(struct qe_ep *ep) { if (ep->udc->soc_type == PORT_CPM) cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT), CPM_USB_STOP_TX_OPCODE); else qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB, ep->epnum, 0); return 0; } static int qe_ep_cmd_restarttx(struct qe_ep *ep) { if (ep->udc->soc_type == PORT_CPM) cpm_command(CPM_USB_RESTART_TX | (ep->epnum << CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE); else qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB, ep->epnum, 0); return 0; } static int qe_ep_flushtxfifo(struct qe_ep *ep) { struct qe_udc *udc = ep->udc; int i; i = (int)ep->epnum; qe_ep_cmd_stoptx(ep); out_8(&udc->usb_regs->usb_uscom, USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum))); out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase)); out_be32(&udc->ep_param[i]->tstate, 0); out_be16(&udc->ep_param[i]->tbcnt, 0); ep->c_txbd = ep->txbase; ep->n_txbd = ep->txbase; qe_ep_cmd_restarttx(ep); return 0; } static int qe_ep_filltxfifo(struct qe_ep *ep) { struct qe_udc *udc = ep->udc; out_8(&udc->usb_regs->usb_uscom, USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum))); return 0; } static int qe_epbds_reset(struct qe_udc *udc, int pipe_num) { struct qe_ep *ep; u32 bdring_len; struct qe_bd __iomem *bd; int i; ep = &udc->eps[pipe_num]; if (ep->dir == USB_DIR_OUT) bdring_len = USB_BDRING_LEN_RX; else bdring_len = USB_BDRING_LEN; bd = ep->rxbase; for (i = 0; i < (bdring_len - 1); i++) { out_be32((u32 __iomem *)bd, R_E | R_I); bd++; } out_be32((u32 __iomem *)bd, R_E | R_I | R_W); bd = ep->txbase; for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) { out_be32(&bd->buf, 0); out_be32((u32 __iomem *)bd, 0); bd++; } out_be32((u32 __iomem *)bd, T_W); return 0; } static int qe_ep_reset(struct qe_udc *udc, int pipe_num) { struct qe_ep *ep; u16 tmpusep; ep = &udc->eps[pipe_num]; tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]); tmpusep &= ~USB_RTHS_MASK; switch (ep->dir) { case USB_DIR_BOTH: qe_ep_flushtxfifo(ep); break; case USB_DIR_OUT: tmpusep |= USB_THS_IGNORE_IN; break; case USB_DIR_IN: qe_ep_flushtxfifo(ep); tmpusep |= USB_RHS_IGNORE_OUT; break; default: break; } out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep); qe_epbds_reset(udc, pipe_num); return 0; } static int qe_ep_toggledata01(struct qe_ep *ep) { ep->data01 ^= 0x1; return 0; } static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num) { struct qe_ep *ep = &udc->eps[pipe_num]; unsigned long tmp_addr = 0; struct usb_ep_para __iomem *epparam; int i; struct qe_bd __iomem *bd; int bdring_len; if (ep->dir == USB_DIR_OUT) bdring_len = USB_BDRING_LEN_RX; else bdring_len = USB_BDRING_LEN; epparam = udc->ep_param[pipe_num]; /* alloc multi-ram for BD rings and set the ep parameters */ tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len + USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD); if (IS_ERR_VALUE(tmp_addr)) return -ENOMEM; out_be16(&epparam->rbase, (u16)tmp_addr); out_be16(&epparam->tbase, (u16)(tmp_addr + (sizeof(struct qe_bd) * bdring_len))); out_be16(&epparam->rbptr, in_be16(&epparam->rbase)); out_be16(&epparam->tbptr, in_be16(&epparam->tbase)); ep->rxbase = cpm_muram_addr(tmp_addr); ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd) * bdring_len)); ep->n_rxbd = ep->rxbase; ep->e_rxbd = ep->rxbase; ep->n_txbd = ep->txbase; ep->c_txbd = ep->txbase; ep->data01 = 0; /* data0 */ /* Init TX and RX bds */ bd = ep->rxbase; for (i = 0; i < bdring_len - 1; i++) { out_be32(&bd->buf, 0); out_be32((u32 __iomem *)bd, 0); bd++; } out_be32(&bd->buf, 0); out_be32((u32 __iomem *)bd, R_W); bd = ep->txbase; for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) { out_be32(&bd->buf, 0); out_be32((u32 __iomem *)bd, 0); bd++; } out_be32(&bd->buf, 0); out_be32((u32 __iomem *)bd, T_W); return 0; } static int qe_ep_rxbd_update(struct qe_ep *ep) { unsigned int size; int i; unsigned int tmp; struct qe_bd __iomem *bd; unsigned int bdring_len; if (ep->rxbase == NULL) return -EINVAL; bd = ep->rxbase; ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC); if (ep->rxframe == NULL) { dev_err(ep->udc->dev, "malloc rxframe failed\n"); return -ENOMEM; } qe_frame_init(ep->rxframe); if (ep->dir == USB_DIR_OUT) bdring_len = USB_BDRING_LEN_RX; else bdring_len = USB_BDRING_LEN; size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1); ep->rxbuffer = kzalloc(size, GFP_ATOMIC); if (ep->rxbuffer == NULL) { dev_err(ep->udc->dev, "malloc rxbuffer failed,size=%d\n", size); kfree(ep->rxframe); return -ENOMEM; } ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer); if (ep->rxbuf_d == DMA_ADDR_INVALID) { ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent, ep->rxbuffer, size, DMA_FROM_DEVICE); ep->rxbufmap = 1; } else { dma_sync_single_for_device(ep->udc->gadget.dev.parent, ep->rxbuf_d, size, DMA_FROM_DEVICE); ep->rxbufmap = 0; } size = ep->ep.maxpacket + USB_CRC_SIZE + 2; tmp = ep->rxbuf_d; tmp = (u32)(((tmp >> 2) << 2) + 4); for (i = 0; i < bdring_len - 1; i++) { out_be32(&bd->buf, tmp); out_be32((u32 __iomem *)bd, (R_E | R_I)); tmp = tmp + size; bd++; } out_be32(&bd->buf, tmp); out_be32((u32 __iomem *)bd, (R_E | R_I | R_W)); return 0; } static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num) { struct qe_ep *ep = &udc->eps[pipe_num]; struct usb_ep_para __iomem *epparam; u16 usep, logepnum; u16 tmp; u8 rtfcr = 0; epparam = udc->ep_param[pipe_num]; usep = 0; logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); usep |= (logepnum << USB_EPNUM_SHIFT); switch (ep->ep.desc->bmAttributes & 0x03) { case USB_ENDPOINT_XFER_BULK: usep |= USB_TRANS_BULK; break; case USB_ENDPOINT_XFER_ISOC: usep |= USB_TRANS_ISO; break; case USB_ENDPOINT_XFER_INT: usep |= USB_TRANS_INT; break; default: usep |= USB_TRANS_CTR; break; } switch (ep->dir) { case USB_DIR_OUT: usep |= USB_THS_IGNORE_IN; break; case USB_DIR_IN: usep |= USB_RHS_IGNORE_OUT; break; default: break; } out_be16(&udc->usb_regs->usb_usep[pipe_num], usep); rtfcr = 0x30; out_8(&epparam->rbmr, rtfcr); out_8(&epparam->tbmr, rtfcr); tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE); /* MRBLR must be divisble by 4 */ tmp = (u16)(((tmp >> 2) << 2) + 4); out_be16(&epparam->mrblr, tmp); return 0; } static int qe_ep_init(struct qe_udc *udc, unsigned char pipe_num, const struct usb_endpoint_descriptor *desc) { struct qe_ep *ep = &udc->eps[pipe_num]; unsigned long flags; int reval = 0; u16 max = 0; max = usb_endpoint_maxp(desc); /* check the max package size validate for this endpoint */ /* Refer to USB2.0 spec table 9-13, */ if (pipe_num != 0) { switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { case USB_ENDPOINT_XFER_BULK: if (strstr(ep->ep.name, "-iso") || strstr(ep->ep.name, "-int")) goto en_done; switch (udc->gadget.speed) { case USB_SPEED_HIGH: if ((max == 128) || (max == 256) || (max == 512)) break; default: switch (max) { case 4: case 8: case 16: case 32: case 64: break; default: case USB_SPEED_LOW: goto en_done; } } break; case USB_ENDPOINT_XFER_INT: if (strstr(ep->ep.name, "-iso")) /* bulk is ok */ goto en_done; switch (udc->gadget.speed) { case USB_SPEED_HIGH: if (max <= 1024) break; case USB_SPEED_FULL: if (max <= 64) break; default: if (max <= 8) break; goto en_done; } break; case USB_ENDPOINT_XFER_ISOC: if (strstr(ep->ep.name, "-bulk") || strstr(ep->ep.name, "-int")) goto en_done; switch (udc->gadget.speed) { case USB_SPEED_HIGH: if (max <= 1024) break; case USB_SPEED_FULL: if (max <= 1023) break; default: goto en_done; } break; case USB_ENDPOINT_XFER_CONTROL: if (strstr(ep->ep.name, "-iso") || strstr(ep->ep.name, "-int")) goto en_done; switch (udc->gadget.speed) { case USB_SPEED_HIGH: case USB_SPEED_FULL: switch (max) { case 1: case 2: case 4: case 8: case 16: case 32: case 64: break; default: goto en_done; } case USB_SPEED_LOW: switch (max) { case 1: case 2: case 4: case 8: break; default: goto en_done; } default: goto en_done; } break; default: goto en_done; } } /* if ep0*/ spin_lock_irqsave(&udc->lock, flags); /* initialize ep structure */ ep->ep.maxpacket = max; ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); ep->ep.desc = desc; ep->stopped = 0; ep->init = 1; if (pipe_num == 0) { ep->dir = USB_DIR_BOTH; udc->ep0_dir = USB_DIR_OUT; udc->ep0_state = WAIT_FOR_SETUP; } else { switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) { case USB_DIR_OUT: ep->dir = USB_DIR_OUT; break; case USB_DIR_IN: ep->dir = USB_DIR_IN; default: break; } } /* hardware special operation */ qe_ep_bd_init(udc, pipe_num); if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) { reval = qe_ep_rxbd_update(ep); if (reval) goto en_done1; } if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) { ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC); if (ep->txframe == NULL) { dev_err(udc->dev, "malloc txframe failed\n"); goto en_done2; } qe_frame_init(ep->txframe); } qe_ep_register_init(udc, pipe_num); /* Now HW will be NAKing transfers to that EP, * until a buffer is queued to it. */ spin_unlock_irqrestore(&udc->lock, flags); return 0; en_done2: kfree(ep->rxbuffer); kfree(ep->rxframe); en_done1: spin_unlock_irqrestore(&udc->lock, flags); en_done: dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name); return -ENODEV; } static inline void qe_usb_enable(struct qe_udc *udc) { setbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN); } static inline void qe_usb_disable(struct qe_udc *udc) { clrbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN); } /*----------------------------------------------------------------------------* * USB and EP basic manipulate function end * *----------------------------------------------------------------------------*/ /****************************************************************************** UDC transmit and receive process ******************************************************************************/ static void recycle_one_rxbd(struct qe_ep *ep) { u32 bdstatus; bdstatus = in_be32((u32 __iomem *)ep->e_rxbd); bdstatus = R_I | R_E | (bdstatus & R_W); out_be32((u32 __iomem *)ep->e_rxbd, bdstatus); if (bdstatus & R_W) ep->e_rxbd = ep->rxbase; else ep->e_rxbd++; } static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext) { u32 bdstatus; struct qe_bd __iomem *bd, *nextbd; unsigned char stop = 0; nextbd = ep->n_rxbd; bd = ep->e_rxbd; bdstatus = in_be32((u32 __iomem *)bd); while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) { bdstatus = R_E | R_I | (bdstatus & R_W); out_be32((u32 __iomem *)bd, bdstatus); if (bdstatus & R_W) bd = ep->rxbase; else bd++; bdstatus = in_be32((u32 __iomem *)bd); if (stopatnext && (bd == nextbd)) stop = 1; } ep->e_rxbd = bd; } static void ep_recycle_rxbds(struct qe_ep *ep) { struct qe_bd __iomem *bd = ep->n_rxbd; u32 bdstatus; u8 epnum = ep->epnum; struct qe_udc *udc = ep->udc; bdstatus = in_be32((u32 __iomem *)bd); if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) { bd = ep->rxbase + ((in_be16(&udc->ep_param[epnum]->rbptr) - in_be16(&udc->ep_param[epnum]->rbase)) >> 3); bdstatus = in_be32((u32 __iomem *)bd); if (bdstatus & R_W) bd = ep->rxbase; else bd++; ep->e_rxbd = bd; recycle_rxbds(ep, 0); ep->e_rxbd = ep->n_rxbd; } else recycle_rxbds(ep, 1); if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK) out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK); if (ep->has_data <= 0 && (!list_empty(&ep->queue))) qe_eprx_normal(ep); ep->localnack = 0; } static void setup_received_handle(struct qe_udc *udc, struct usb_ctrlrequest *setup); static int qe_ep_rxframe_handle(struct qe_ep *ep); static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req); /* when BD PID is setup, handle the packet */ static int ep0_setup_handle(struct qe_udc *udc) { struct qe_ep *ep = &udc->eps[0]; struct qe_frame *pframe; unsigned int fsize; u8 *cp; pframe = ep->rxframe; if ((frame_get_info(pframe) & PID_SETUP) && (udc->ep0_state == WAIT_FOR_SETUP)) { fsize = frame_get_length(pframe); if (unlikely(fsize != 8)) return -EINVAL; cp = (u8 *)&udc->local_setup_buff; memcpy(cp, pframe->data, fsize); ep->data01 = 1; /* handle the usb command base on the usb_ctrlrequest */ setup_received_handle(udc, &udc->local_setup_buff); return 0; } return -EINVAL; } static int qe_ep0_rx(struct qe_udc *udc) { struct qe_ep *ep = &udc->eps[0]; struct qe_frame *pframe; struct qe_bd __iomem *bd; u32 bdstatus, length; u32 vaddr; pframe = ep->rxframe; if (ep->dir == USB_DIR_IN) { dev_err(udc->dev, "ep0 not a control endpoint\n"); return -EINVAL; } bd = ep->n_rxbd; bdstatus = in_be32((u32 __iomem *)bd); length = bdstatus & BD_LENGTH_MASK; while (!(bdstatus & R_E) && length) { if ((bdstatus & R_F) && (bdstatus & R_L) && !(bdstatus & R_ERROR)) { if (length == USB_CRC_SIZE) { udc->ep0_state = WAIT_FOR_SETUP; dev_vdbg(udc->dev, "receive a ZLP in status phase\n"); } else { qe_frame_clean(pframe); vaddr = (u32)phys_to_virt(in_be32(&bd->buf)); frame_set_data(pframe, (u8 *)vaddr); frame_set_length(pframe, (length - USB_CRC_SIZE)); frame_set_status(pframe, FRAME_OK); switch (bdstatus & R_PID) { case R_PID_SETUP: frame_set_info(pframe, PID_SETUP); break; case R_PID_DATA1: frame_set_info(pframe, PID_DATA1); break; default: frame_set_info(pframe, PID_DATA0); break; } if ((bdstatus & R_PID) == R_PID_SETUP) ep0_setup_handle(udc); else qe_ep_rxframe_handle(ep); } } else { dev_err(udc->dev, "The receive frame with error!\n"); } /* note: don't clear the rxbd's buffer address */ recycle_one_rxbd(ep); /* Get next BD */ if (bdstatus & R_W) bd = ep->rxbase; else bd++; bdstatus = in_be32((u32 __iomem *)bd); length = bdstatus & BD_LENGTH_MASK; } ep->n_rxbd = bd; return 0; } static int qe_ep_rxframe_handle(struct qe_ep *ep) { struct qe_frame *pframe; u8 framepid = 0; unsigned int fsize; u8 *cp; struct qe_req *req; pframe = ep->rxframe; if (frame_get_info(pframe) & PID_DATA1) framepid = 0x1; if (framepid != ep->data01) { dev_err(ep->udc->dev, "the data01 error!\n"); return -EIO; } fsize = frame_get_length(pframe); if (list_empty(&ep->queue)) { dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name); } else { req = list_entry(ep->queue.next, struct qe_req, queue); cp = (u8 *)(req->req.buf) + req->req.actual; if (cp) { memcpy(cp, pframe->data, fsize); req->req.actual += fsize; if ((fsize < ep->ep.maxpacket) || (req->req.actual >= req->req.length)) { if (ep->epnum == 0) ep0_req_complete(ep->udc, req); else done(ep, req, 0); if (list_empty(&ep->queue) && ep->epnum != 0) qe_eprx_nack(ep); } } } qe_ep_toggledata01(ep); return 0; } static void ep_rx_tasklet(unsigned long data) { struct qe_udc *udc = (struct qe_udc *)data; struct qe_ep *ep; struct qe_frame *pframe; struct qe_bd __iomem *bd; unsigned long flags; u32 bdstatus, length; u32 vaddr, i; spin_lock_irqsave(&udc->lock, flags); for (i = 1; i < USB_MAX_ENDPOINTS; i++) { ep = &udc->eps[i]; if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) { dev_dbg(udc->dev, "This is a transmit ep or disable tasklet!\n"); continue; } pframe = ep->rxframe; bd = ep->n_rxbd; bdstatus = in_be32((u32 __iomem *)bd); length = bdstatus & BD_LENGTH_MASK; while (!(bdstatus & R_E) && length) { if (list_empty(&ep->queue)) { qe_eprx_nack(ep); dev_dbg(udc->dev, "The rxep have noreq %d\n", ep->has_data); break; } if ((bdstatus & R_F) && (bdstatus & R_L) && !(bdstatus & R_ERROR)) { qe_frame_clean(pframe); vaddr = (u32)phys_to_virt(in_be32(&bd->buf)); frame_set_data(pframe, (u8 *)vaddr); frame_set_length(pframe, (length - USB_CRC_SIZE)); frame_set_status(pframe, FRAME_OK); switch (bdstatus & R_PID) { case R_PID_DATA1: frame_set_info(pframe, PID_DATA1); break; case R_PID_SETUP: frame_set_info(pframe, PID_SETUP); break; default: frame_set_info(pframe, PID_DATA0); break; } /* handle the rx frame */ qe_ep_rxframe_handle(ep); } else { dev_err(udc->dev, "error in received frame\n"); } /* note: don't clear the rxbd's buffer address */ /*clear the length */ out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK); ep->has_data--; if (!(ep->localnack)) recycle_one_rxbd(ep); /* Get next BD */ if (bdstatus & R_W) bd = ep->rxbase; else bd++; bdstatus = in_be32((u32 __iomem *)bd); length = bdstatus & BD_LENGTH_MASK; } ep->n_rxbd = bd; if (ep->localnack) ep_recycle_rxbds(ep); ep->enable_tasklet = 0; } /* for i=1 */ spin_unlock_irqrestore(&udc->lock, flags); } static int qe_ep_rx(struct qe_ep *ep) { struct qe_udc *udc; struct qe_frame *pframe; struct qe_bd __iomem *bd; u16 swoffs, ucoffs, emptybds; udc = ep->udc; pframe = ep->rxframe; if (ep->dir == USB_DIR_IN) { dev_err(udc->dev, "transmit ep in rx function\n"); return -EINVAL; } bd = ep->n_rxbd; swoffs = (u16)(bd - ep->rxbase); ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) - in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3); if (swoffs < ucoffs) emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs; else emptybds = swoffs - ucoffs; if (emptybds < MIN_EMPTY_BDS) { qe_eprx_nack(ep); ep->localnack = 1; dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds); } ep->has_data = USB_BDRING_LEN_RX - emptybds; if (list_empty(&ep->queue)) { qe_eprx_nack(ep); dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n", ep->has_data); return 0; } tasklet_schedule(&udc->rx_tasklet); ep->enable_tasklet = 1; return 0; } /* send data from a frame, no matter what tx_req */ static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame) { struct qe_udc *udc = ep->udc; struct qe_bd __iomem *bd; u16 saveusbmr; u32 bdstatus, pidmask; u32 paddr; if (ep->dir == USB_DIR_OUT) { dev_err(udc->dev, "receive ep passed to tx function\n"); return -EINVAL; } /* Disable the Tx interrupt */ saveusbmr = in_be16(&udc->usb_regs->usb_usbmr); out_be16(&udc->usb_regs->usb_usbmr, saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK)); bd = ep->n_txbd; bdstatus = in_be32((u32 __iomem *)bd); if (!(bdstatus & (T_R | BD_LENGTH_MASK))) { if (frame_get_length(frame) == 0) { frame_set_data(frame, udc->nullbuf); frame_set_length(frame, 2); frame->info |= (ZLP | NO_CRC); dev_vdbg(udc->dev, "the frame size = 0\n"); } paddr = virt_to_phys((void *)frame->data); out_be32(&bd->buf, paddr); bdstatus = (bdstatus&T_W); if (!(frame_get_info(frame) & NO_CRC)) bdstatus |= T_R | T_I | T_L | T_TC | frame_get_length(frame); else bdstatus |= T_R | T_I | T_L | frame_get_length(frame); /* if the packet is a ZLP in status phase */ if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP)) ep->data01 = 0x1; if (ep->data01) { pidmask = T_PID_DATA1; frame->info |= PID_DATA1; } else { pidmask = T_PID_DATA0; frame->info |= PID_DATA0; } bdstatus |= T_CNF; bdstatus |= pidmask; out_be32((u32 __iomem *)bd, bdstatus); qe_ep_filltxfifo(ep); /* enable the TX interrupt */ out_be16(&udc->usb_regs->usb_usbmr, saveusbmr); qe_ep_toggledata01(ep); if (bdstatus & T_W) ep->n_txbd = ep->txbase; else ep->n_txbd++; return 0; } else { out_be16(&udc->usb_regs->usb_usbmr, saveusbmr); dev_vdbg(udc->dev, "The tx bd is not ready!\n"); return -EBUSY; } } /* when a bd was transmitted, the function can * handle the tx_req, not include ep0 */ static int txcomplete(struct qe_ep *ep, unsigned char restart) { if (ep->tx_req != NULL) { struct qe_req *req = ep->tx_req; unsigned zlp = 0, last_len = 0; last_len = min_t(unsigned, req->req.length - ep->sent, ep->ep.maxpacket); if (!restart) { int asent = ep->last; ep->sent += asent; ep->last -= asent; } else { ep->last = 0; } /* zlp needed when req->re.zero is set */ if (req->req.zero) { if (last_len == 0 || (req->req.length % ep->ep.maxpacket) != 0) zlp = 0; else zlp = 1; } else zlp = 0; /* a request already were transmitted completely */ if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) { done(ep, ep->tx_req, 0); ep->tx_req = NULL; ep->last = 0; ep->sent = 0; } } /* we should gain a new tx_req fot this endpoint */ if (ep->tx_req == NULL) { if (!list_empty(&ep->queue)) { ep->tx_req = list_entry(ep->queue.next, struct qe_req, queue); ep->last = 0; ep->sent = 0; } } return 0; } /* give a frame and a tx_req, send some data */ static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame) { unsigned int size; u8 *buf; qe_frame_clean(frame); size = min_t(u32, (ep->tx_req->req.length - ep->sent), ep->ep.maxpacket); buf = (u8 *)ep->tx_req->req.buf + ep->sent; if (buf && size) { ep->last = size; ep->tx_req->req.actual += size; frame_set_data(frame, buf); frame_set_length(frame, size); frame_set_status(frame, FRAME_OK); frame_set_info(frame, 0); return qe_ep_tx(ep, frame); } return -EIO; } /* give a frame struct,send a ZLP */ static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor) { struct qe_udc *udc = ep->udc; if (frame == NULL) return -ENODEV; qe_frame_clean(frame); frame_set_data(frame, (u8 *)udc->nullbuf); frame_set_length(frame, 2); frame_set_status(frame, FRAME_OK); frame_set_info(frame, (ZLP | NO_CRC | infor)); return qe_ep_tx(ep, frame); } static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame) { struct qe_req *req = ep->tx_req; int reval; if (req == NULL) return -ENODEV; if ((req->req.length - ep->sent) > 0) reval = qe_usb_senddata(ep, frame); else reval = sendnulldata(ep, frame, 0); return reval; } /* if direction is DIR_IN, the status is Device->Host * if direction is DIR_OUT, the status transaction is Device<-Host * in status phase, udc create a request and gain status */ static int ep0_prime_status(struct qe_udc *udc, int direction) { struct qe_ep *ep = &udc->eps[0]; if (direction == USB_DIR_IN) { udc->ep0_state = DATA_STATE_NEED_ZLP; udc->ep0_dir = USB_DIR_IN; sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ); } else { udc->ep0_dir = USB_DIR_OUT; udc->ep0_state = WAIT_FOR_OUT_STATUS; } return 0; } /* a request complete in ep0, whether gadget request or udc request */ static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req) { struct qe_ep *ep = &udc->eps[0]; /* because usb and ep's status already been set in ch9setaddress() */ switch (udc->ep0_state) { case DATA_STATE_XMIT: done(ep, req, 0); /* receive status phase */ if (ep0_prime_status(udc, USB_DIR_OUT)) qe_ep0_stall(udc); break; case DATA_STATE_NEED_ZLP: done(ep, req, 0); udc->ep0_state = WAIT_FOR_SETUP; break; case DATA_STATE_RECV: done(ep, req, 0); /* send status phase */ if (ep0_prime_status(udc, USB_DIR_IN)) qe_ep0_stall(udc); break; case WAIT_FOR_OUT_STATUS: done(ep, req, 0); udc->ep0_state = WAIT_FOR_SETUP; break; case WAIT_FOR_SETUP: dev_vdbg(udc->dev, "Unexpected interrupt\n"); break; default: qe_ep0_stall(udc); break; } } static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart) { struct qe_req *tx_req = NULL; struct qe_frame *frame = ep->txframe; if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) { if (!restart) ep->udc->ep0_state = WAIT_FOR_SETUP; else sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ); return 0; } tx_req = ep->tx_req; if (tx_req != NULL) { if (!restart) { int asent = ep->last; ep->sent += asent; ep->last -= asent; } else { ep->last = 0; } /* a request already were transmitted completely */ if ((ep->tx_req->req.length - ep->sent) <= 0) { ep->tx_req->req.actual = (unsigned int)ep->sent; ep0_req_complete(ep->udc, ep->tx_req); ep->tx_req = NULL; ep->last = 0; ep->sent = 0; } } else { dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n"); } return 0; } static int ep0_txframe_handle(struct qe_ep *ep) { /* if have error, transmit again */ if (frame_get_status(ep->txframe) & FRAME_ERROR) { qe_ep_flushtxfifo(ep); dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n"); if (frame_get_info(ep->txframe) & PID_DATA0) ep->data01 = 0; else ep->data01 = 1; ep0_txcomplete(ep, 1); } else ep0_txcomplete(ep, 0); frame_create_tx(ep, ep->txframe); return 0; } static int qe_ep0_txconf(struct qe_ep *ep) { struct qe_bd __iomem *bd; struct qe_frame *pframe; u32 bdstatus; bd = ep->c_txbd; bdstatus = in_be32((u32 __iomem *)bd); while (!(bdstatus & T_R) && (bdstatus & ~T_W)) { pframe = ep->txframe; /* clear and recycle the BD */ out_be32((u32 __iomem *)bd, bdstatus & T_W); out_be32(&bd->buf, 0); if (bdstatus & T_W) ep->c_txbd = ep->txbase; else ep->c_txbd++; if (ep->c_txbd == ep->n_txbd) { if (bdstatus & DEVICE_T_ERROR) { frame_set_status(pframe, FRAME_ERROR); if (bdstatus & T_TO) pframe->status |= TX_ER_TIMEOUT; if (bdstatus & T_UN) pframe->status |= TX_ER_UNDERUN; } ep0_txframe_handle(ep); } bd = ep->c_txbd; bdstatus = in_be32((u32 __iomem *)bd); } return 0; } static int ep_txframe_handle(struct qe_ep *ep) { if (frame_get_status(ep->txframe) & FRAME_ERROR) { qe_ep_flushtxfifo(ep); dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n"); if (frame_get_info(ep->txframe) & PID_DATA0) ep->data01 = 0; else ep->data01 = 1; txcomplete(ep, 1); } else txcomplete(ep, 0); frame_create_tx(ep, ep->txframe); /* send the data */ return 0; } /* confirm the already trainsmited bd */ static int qe_ep_txconf(struct qe_ep *ep) { struct qe_bd __iomem *bd; struct qe_frame *pframe = NULL; u32 bdstatus; unsigned char breakonrxinterrupt = 0; bd = ep->c_txbd; bdstatus = in_be32((u32 __iomem *)bd); while (!(bdstatus & T_R) && (bdstatus & ~T_W)) { pframe = ep->txframe; if (bdstatus & DEVICE_T_ERROR) { frame_set_status(pframe, FRAME_ERROR); if (bdstatus & T_TO) pframe->status |= TX_ER_TIMEOUT; if (bdstatus & T_UN) pframe->status |= TX_ER_UNDERUN; } /* clear and recycle the BD */ out_be32((u32 __iomem *)bd, bdstatus & T_W); out_be32(&bd->buf, 0); if (bdstatus & T_W) ep->c_txbd = ep->txbase; else ep->c_txbd++; /* handle the tx frame */ ep_txframe_handle(ep); bd = ep->c_txbd; bdstatus = in_be32((u32 __iomem *)bd); } if (breakonrxinterrupt) return -EIO; else return 0; } /* Add a request in queue, and try to transmit a packet */ static int ep_req_send(struct qe_ep *ep, struct qe_req *req) { int reval = 0; if (ep->tx_req == NULL) { ep->sent = 0; ep->last = 0; txcomplete(ep, 0); /* can gain a new tx_req */ reval = frame_create_tx(ep, ep->txframe); } return reval; } /* Maybe this is a good ideal */ static int ep_req_rx(struct qe_ep *ep, struct qe_req *req) { struct qe_udc *udc = ep->udc; struct qe_frame *pframe = NULL; struct qe_bd __iomem *bd; u32 bdstatus, length; u32 vaddr, fsize; u8 *cp; u8 finish_req = 0; u8 framepid; if (list_empty(&ep->queue)) { dev_vdbg(udc->dev, "the req already finish!\n"); return 0; } pframe = ep->rxframe; bd = ep->n_rxbd; bdstatus = in_be32((u32 __iomem *)bd); length = bdstatus & BD_LENGTH_MASK; while (!(bdstatus & R_E) && length) { if (finish_req) break; if ((bdstatus & R_F) && (bdstatus & R_L) && !(bdstatus & R_ERROR)) { qe_frame_clean(pframe); vaddr = (u32)phys_to_virt(in_be32(&bd->buf)); frame_set_data(pframe, (u8 *)vaddr); frame_set_length(pframe, (length - USB_CRC_SIZE)); frame_set_status(pframe, FRAME_OK); switch (bdstatus & R_PID) { case R_PID_DATA1: frame_set_info(pframe, PID_DATA1); break; default: frame_set_info(pframe, PID_DATA0); break; } /* handle the rx frame */ if (frame_get_info(pframe) & PID_DATA1) framepid = 0x1; else framepid = 0; if (framepid != ep->data01) { dev_vdbg(udc->dev, "the data01 error!\n"); } else { fsize = frame_get_length(pframe); cp = (u8 *)(req->req.buf) + req->req.actual; if (cp) { memcpy(cp, pframe->data, fsize); req->req.actual += fsize; if ((fsize < ep->ep.maxpacket) || (req->req.actual >= req->req.length)) { finish_req = 1; done(ep, req, 0); if (list_empty(&ep->queue)) qe_eprx_nack(ep); } } qe_ep_toggledata01(ep); } } else { dev_err(udc->dev, "The receive frame with error!\n"); } /* note: don't clear the rxbd's buffer address * * only Clear the length */ out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK)); ep->has_data--; /* Get next BD */ if (bdstatus & R_W) bd = ep->rxbase; else bd++; bdstatus = in_be32((u32 __iomem *)bd); length = bdstatus & BD_LENGTH_MASK; } ep->n_rxbd = bd; ep_recycle_rxbds(ep); return 0; } /* only add the request in queue */ static int ep_req_receive(struct qe_ep *ep, struct qe_req *req) { if (ep->state == EP_STATE_NACK) { if (ep->has_data <= 0) { /* Enable rx and unmask rx interrupt */ qe_eprx_normal(ep); } else { /* Copy the exist BD data */ ep_req_rx(ep, req); } } return 0; } /******************************************************************** Internal Used Function End ********************************************************************/ /*----------------------------------------------------------------------- Endpoint Management Functions For Gadget -----------------------------------------------------------------------*/ static int qe_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct qe_udc *udc; struct qe_ep *ep; int retval = 0; unsigned char epnum; ep = container_of(_ep, struct qe_ep, ep); /* catch various bogus parameters */ if (!_ep || !desc || _ep->name == ep_name[0] || (desc->bDescriptorType != USB_DT_ENDPOINT)) return -EINVAL; udc = ep->udc; if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN)) return -ESHUTDOWN; epnum = (u8)desc->bEndpointAddress & 0xF; retval = qe_ep_init(udc, epnum, desc); if (retval != 0) { cpm_muram_free(cpm_muram_offset(ep->rxbase)); dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum); return -EINVAL; } dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum); return 0; } static int qe_ep_disable(struct usb_ep *_ep) { struct qe_udc *udc; struct qe_ep *ep; unsigned long flags; unsigned int size; ep = container_of(_ep, struct qe_ep, ep); udc = ep->udc; if (!_ep || !ep->ep.desc) { dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL); return -EINVAL; } spin_lock_irqsave(&udc->lock, flags); /* Nuke all pending requests (does flush) */ nuke(ep, -ESHUTDOWN); ep->ep.desc = NULL; ep->stopped = 1; ep->tx_req = NULL; qe_ep_reset(udc, ep->epnum); spin_unlock_irqrestore(&udc->lock, flags); cpm_muram_free(cpm_muram_offset(ep->rxbase)); if (ep->dir == USB_DIR_OUT) size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN_RX + 1); else size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1); if (ep->dir != USB_DIR_IN) { kfree(ep->rxframe); if (ep->rxbufmap) { dma_unmap_single(udc->gadget.dev.parent, ep->rxbuf_d, size, DMA_FROM_DEVICE); ep->rxbuf_d = DMA_ADDR_INVALID; } else { dma_sync_single_for_cpu( udc->gadget.dev.parent, ep->rxbuf_d, size, DMA_FROM_DEVICE); } kfree(ep->rxbuffer); } if (ep->dir != USB_DIR_OUT) kfree(ep->txframe); dev_dbg(udc->dev, "disabled %s OK\n", _ep->name); return 0; } static struct usb_request *qe_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct qe_req *req; req = kzalloc(sizeof(*req), gfp_flags); if (!req) return NULL; req->req.dma = DMA_ADDR_INVALID; INIT_LIST_HEAD(&req->queue); return &req->req; } static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req) { struct qe_req *req; req = container_of(_req, struct qe_req, req); if (_req) kfree(req); } static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req) { struct qe_ep *ep = container_of(_ep, struct qe_ep, ep); struct qe_req *req = container_of(_req, struct qe_req, req); struct qe_udc *udc; int reval; udc = ep->udc; /* catch various bogus parameters */ if (!_req || !req->req.complete || !req->req.buf || !list_empty(&req->queue)) { dev_dbg(udc->dev, "bad params\n"); return -EINVAL; } if (!_ep || (!ep->ep.desc && ep_index(ep))) { dev_dbg(udc->dev, "bad ep\n"); return -EINVAL; } if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; req->ep = ep; /* map virtual address to hardware */ if (req->req.dma == DMA_ADDR_INVALID) { req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, req->req.buf, req->req.length, ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 1; } else { dma_sync_single_for_device(ep->udc->gadget.dev.parent, req->req.dma, req->req.length, ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 0; } req->req.status = -EINPROGRESS; req->req.actual = 0; list_add_tail(&req->queue, &ep->queue); dev_vdbg(udc->dev, "gadget have request in %s! %d\n", ep->name, req->req.length); /* push the request to device */ if (ep_is_in(ep)) reval = ep_req_send(ep, req); /* EP0 */ if (ep_index(ep) == 0 && req->req.length > 0) { if (ep_is_in(ep)) udc->ep0_state = DATA_STATE_XMIT; else udc->ep0_state = DATA_STATE_RECV; } if (ep->dir == USB_DIR_OUT) reval = ep_req_receive(ep, req); return 0; } /* queues (submits) an I/O request to an endpoint */ static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct qe_ep *ep = container_of(_ep, struct qe_ep, ep); struct qe_udc *udc = ep->udc; unsigned long flags; int ret; spin_lock_irqsave(&udc->lock, flags); ret = __qe_ep_queue(_ep, _req); spin_unlock_irqrestore(&udc->lock, flags); return ret; } /* dequeues (cancels, unlinks) an I/O request from an endpoint */ static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct qe_ep *ep = container_of(_ep, struct qe_ep, ep); struct qe_req *req; unsigned long flags; if (!_ep || !_req) return -EINVAL; spin_lock_irqsave(&ep->udc->lock, flags); /* make sure it's actually queued on this endpoint */ list_for_each_entry(req, &ep->queue, queue) { if (&req->req == _req) break; } if (&req->req != _req) { spin_unlock_irqrestore(&ep->udc->lock, flags); return -EINVAL; } done(ep, req, -ECONNRESET); spin_unlock_irqrestore(&ep->udc->lock, flags); return 0; } /*----------------------------------------------------------------- * modify the endpoint halt feature * @ep: the non-isochronous endpoint being stalled * @value: 1--set halt 0--clear halt * Returns zero, or a negative error code. *----------------------------------------------------------------*/ static int qe_ep_set_halt(struct usb_ep *_ep, int value) { struct qe_ep *ep; unsigned long flags; int status = -EOPNOTSUPP; struct qe_udc *udc; ep = container_of(_ep, struct qe_ep, ep); if (!_ep || !ep->ep.desc) { status = -EINVAL; goto out; } udc = ep->udc; /* Attempt to halt IN ep will fail if any transfer requests * are still queue */ if (value && ep_is_in(ep) && !list_empty(&ep->queue)) { status = -EAGAIN; goto out; } status = 0; spin_lock_irqsave(&ep->udc->lock, flags); qe_eptx_stall_change(ep, value); qe_eprx_stall_change(ep, value); spin_unlock_irqrestore(&ep->udc->lock, flags); if (ep->epnum == 0) { udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = 0; } /* set data toggle to DATA0 on clear halt */ if (value == 0) ep->data01 = 0; out: dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name, value ? "set" : "clear", status); return status; } static struct usb_ep_ops qe_ep_ops = { .enable = qe_ep_enable, .disable = qe_ep_disable, .alloc_request = qe_alloc_request, .free_request = qe_free_request, .queue = qe_ep_queue, .dequeue = qe_ep_dequeue, .set_halt = qe_ep_set_halt, }; /*------------------------------------------------------------------------ Gadget Driver Layer Operations ------------------------------------------------------------------------*/ /* Get the current frame number */ static int qe_get_frame(struct usb_gadget *gadget) { struct qe_udc *udc = container_of(gadget, struct qe_udc, gadget); u16 tmp; tmp = in_be16(&udc->usb_param->frame_n); if (tmp & 0x8000) tmp = tmp & 0x07ff; else tmp = -EINVAL; return (int)tmp; } static int fsl_qe_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver); static int fsl_qe_stop(struct usb_gadget *gadget, struct usb_gadget_driver *driver); /* defined in usb_gadget.h */ static const struct usb_gadget_ops qe_gadget_ops = { .get_frame = qe_get_frame, .udc_start = fsl_qe_start, .udc_stop = fsl_qe_stop, }; /*------------------------------------------------------------------------- USB ep0 Setup process in BUS Enumeration -------------------------------------------------------------------------*/ static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe) { struct qe_ep *ep = &udc->eps[pipe]; nuke(ep, -ECONNRESET); ep->tx_req = NULL; return 0; } static int reset_queues(struct qe_udc *udc) { u8 pipe; for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++) udc_reset_ep_queue(udc, pipe); /* report disconnect; the driver is already quiesced */ spin_unlock(&udc->lock); udc->driver->disconnect(&udc->gadget); spin_lock(&udc->lock); return 0; } static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index, u16 length) { /* Save the new address to device struct */ udc->device_address = (u8) value; /* Update usb state */ udc->usb_state = USB_STATE_ADDRESS; /* Status phase , send a ZLP */ if (ep0_prime_status(udc, USB_DIR_IN)) qe_ep0_stall(udc); } static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req) { struct qe_req *req = container_of(_req, struct qe_req, req); req->req.buf = NULL; kfree(req); } static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value, u16 index, u16 length) { u16 usb_status = 0; struct qe_req *req; struct qe_ep *ep; int status = 0; ep = &udc->eps[0]; if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) { /* Get device status */ usb_status = 1 << USB_DEVICE_SELF_POWERED; } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) { /* Get interface status */ /* We don't have interface information in udc driver */ usb_status = 0; } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) { /* Get endpoint status */ int pipe = index & USB_ENDPOINT_NUMBER_MASK; struct qe_ep *target_ep = &udc->eps[pipe]; u16 usep; /* stall if endpoint doesn't exist */ if (!target_ep->ep.desc) goto stall; usep = in_be16(&udc->usb_regs->usb_usep[pipe]); if (index & USB_DIR_IN) { if (target_ep->dir != USB_DIR_IN) goto stall; if ((usep & USB_THS_MASK) == USB_THS_STALL) usb_status = 1 << USB_ENDPOINT_HALT; } else { if (target_ep->dir != USB_DIR_OUT) goto stall; if ((usep & USB_RHS_MASK) == USB_RHS_STALL) usb_status = 1 << USB_ENDPOINT_HALT; } } req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL), struct qe_req, req); req->req.length = 2; req->req.buf = udc->statusbuf; *(u16 *)req->req.buf = cpu_to_le16(usb_status); req->req.status = -EINPROGRESS; req->req.actual = 0; req->req.complete = ownercomplete; udc->ep0_dir = USB_DIR_IN; /* data phase */ status = __qe_ep_queue(&ep->ep, &req->req); if (status == 0) return; stall: dev_err(udc->dev, "Can't respond to getstatus request \n"); qe_ep0_stall(udc); } /* only handle the setup request, suppose the device in normal status */ static void setup_received_handle(struct qe_udc *udc, struct usb_ctrlrequest *setup) { /* Fix Endian (udc->local_setup_buff is cpu Endian now)*/ u16 wValue = le16_to_cpu(setup->wValue); u16 wIndex = le16_to_cpu(setup->wIndex); u16 wLength = le16_to_cpu(setup->wLength); /* clear the previous request in the ep0 */ udc_reset_ep_queue(udc, 0); if (setup->bRequestType & USB_DIR_IN) udc->ep0_dir = USB_DIR_IN; else udc->ep0_dir = USB_DIR_OUT; switch (setup->bRequest) { case USB_REQ_GET_STATUS: /* Data+Status phase form udc */ if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) != (USB_DIR_IN | USB_TYPE_STANDARD)) break; ch9getstatus(udc, setup->bRequestType, wValue, wIndex, wLength); return; case USB_REQ_SET_ADDRESS: /* Status phase from udc */ if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE)) break; ch9setaddress(udc, wValue, wIndex, wLength); return; case USB_REQ_CLEAR_FEATURE: case USB_REQ_SET_FEATURE: /* Requests with no data phase, status phase from udc */ if ((setup->bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) break; if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) { int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK; struct qe_ep *ep; if (wValue != 0 || wLength != 0 || pipe > USB_MAX_ENDPOINTS) break; ep = &udc->eps[pipe]; spin_unlock(&udc->lock); qe_ep_set_halt(&ep->ep, (setup->bRequest == USB_REQ_SET_FEATURE) ? 1 : 0); spin_lock(&udc->lock); } ep0_prime_status(udc, USB_DIR_IN); return; default: break; } if (wLength) { /* Data phase from gadget, status phase from udc */ if (setup->bRequestType & USB_DIR_IN) { udc->ep0_state = DATA_STATE_XMIT; udc->ep0_dir = USB_DIR_IN; } else { udc->ep0_state = DATA_STATE_RECV; udc->ep0_dir = USB_DIR_OUT; } spin_unlock(&udc->lock); if (udc->driver->setup(&udc->gadget, &udc->local_setup_buff) < 0) qe_ep0_stall(udc); spin_lock(&udc->lock); } else { /* No data phase, IN status from gadget */ udc->ep0_dir = USB_DIR_IN; spin_unlock(&udc->lock); if (udc->driver->setup(&udc->gadget, &udc->local_setup_buff) < 0) qe_ep0_stall(udc); spin_lock(&udc->lock); udc->ep0_state = DATA_STATE_NEED_ZLP; } } /*------------------------------------------------------------------------- USB Interrupt handlers -------------------------------------------------------------------------*/ static void suspend_irq(struct qe_udc *udc) { udc->resume_state = udc->usb_state; udc->usb_state = USB_STATE_SUSPENDED; /* report suspend to the driver ,serial.c not support this*/ if (udc->driver->suspend) udc->driver->suspend(&udc->gadget); } static void resume_irq(struct qe_udc *udc) { udc->usb_state = udc->resume_state; udc->resume_state = 0; /* report resume to the driver , serial.c not support this*/ if (udc->driver->resume) udc->driver->resume(&udc->gadget); } static void idle_irq(struct qe_udc *udc) { u8 usbs; usbs = in_8(&udc->usb_regs->usb_usbs); if (usbs & USB_IDLE_STATUS_MASK) { if ((udc->usb_state) != USB_STATE_SUSPENDED) suspend_irq(udc); } else { if (udc->usb_state == USB_STATE_SUSPENDED) resume_irq(udc); } } static int reset_irq(struct qe_udc *udc) { unsigned char i; if (udc->usb_state == USB_STATE_DEFAULT) return 0; qe_usb_disable(udc); out_8(&udc->usb_regs->usb_usadr, 0); for (i = 0; i < USB_MAX_ENDPOINTS; i++) { if (udc->eps[i].init) qe_ep_reset(udc, i); } reset_queues(udc); udc->usb_state = USB_STATE_DEFAULT; udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = USB_DIR_OUT; qe_usb_enable(udc); return 0; } static int bsy_irq(struct qe_udc *udc) { return 0; } static int txe_irq(struct qe_udc *udc) { return 0; } /* ep0 tx interrupt also in here */ static int tx_irq(struct qe_udc *udc) { struct qe_ep *ep; struct qe_bd __iomem *bd; int i, res = 0; if ((udc->usb_state == USB_STATE_ADDRESS) && (in_8(&udc->usb_regs->usb_usadr) == 0)) out_8(&udc->usb_regs->usb_usadr, udc->device_address); for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) { ep = &udc->eps[i]; if (ep && ep->init && (ep->dir != USB_DIR_OUT)) { bd = ep->c_txbd; if (!(in_be32((u32 __iomem *)bd) & T_R) && (in_be32(&bd->buf))) { /* confirm the transmitted bd */ if (ep->epnum == 0) res = qe_ep0_txconf(ep); else res = qe_ep_txconf(ep); } } } return res; } /* setup packect's rx is handle in the function too */ static void rx_irq(struct qe_udc *udc) { struct qe_ep *ep; struct qe_bd __iomem *bd; int i; for (i = 0; i < USB_MAX_ENDPOINTS; i++) { ep = &udc->eps[i]; if (ep && ep->init && (ep->dir != USB_DIR_IN)) { bd = ep->n_rxbd; if (!(in_be32((u32 __iomem *)bd) & R_E) && (in_be32(&bd->buf))) { if (ep->epnum == 0) { qe_ep0_rx(udc); } else { /*non-setup package receive*/ qe_ep_rx(ep); } } } } } static irqreturn_t qe_udc_irq(int irq, void *_udc) { struct qe_udc *udc = (struct qe_udc *)_udc; u16 irq_src; irqreturn_t status = IRQ_NONE; unsigned long flags; spin_lock_irqsave(&udc->lock, flags); irq_src = in_be16(&udc->usb_regs->usb_usber) & in_be16(&udc->usb_regs->usb_usbmr); /* Clear notification bits */ out_be16(&udc->usb_regs->usb_usber, irq_src); /* USB Interrupt */ if (irq_src & USB_E_IDLE_MASK) { idle_irq(udc); irq_src &= ~USB_E_IDLE_MASK; status = IRQ_HANDLED; } if (irq_src & USB_E_TXB_MASK) { tx_irq(udc); irq_src &= ~USB_E_TXB_MASK; status = IRQ_HANDLED; } if (irq_src & USB_E_RXB_MASK) { rx_irq(udc); irq_src &= ~USB_E_RXB_MASK; status = IRQ_HANDLED; } if (irq_src & USB_E_RESET_MASK) { reset_irq(udc); irq_src &= ~USB_E_RESET_MASK; status = IRQ_HANDLED; } if (irq_src & USB_E_BSY_MASK) { bsy_irq(udc); irq_src &= ~USB_E_BSY_MASK; status = IRQ_HANDLED; } if (irq_src & USB_E_TXE_MASK) { txe_irq(udc); irq_src &= ~USB_E_TXE_MASK; status = IRQ_HANDLED; } spin_unlock_irqrestore(&udc->lock, flags); return status; } /*------------------------------------------------------------------------- Gadget driver probe and unregister. --------------------------------------------------------------------------*/ static int fsl_qe_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct qe_udc *udc; unsigned long flags; udc = container_of(gadget, struct qe_udc, gadget); /* lock is needed but whether should use this lock or another */ spin_lock_irqsave(&udc->lock, flags); driver->driver.bus = NULL; /* hook up the driver */ udc->driver = driver; udc->gadget.speed = driver->max_speed; /* Enable IRQ reg and Set usbcmd reg EN bit */ qe_usb_enable(udc); out_be16(&udc->usb_regs->usb_usber, 0xffff); out_be16(&udc->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE); udc->usb_state = USB_STATE_ATTACHED; udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = USB_DIR_OUT; spin_unlock_irqrestore(&udc->lock, flags); dev_info(udc->dev, "%s bind to driver %s\n", udc->gadget.name, driver->driver.name); return 0; } static int fsl_qe_stop(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct qe_udc *udc; struct qe_ep *loop_ep; unsigned long flags; udc = container_of(gadget, struct qe_udc, gadget); /* stop usb controller, disable intr */ qe_usb_disable(udc); /* in fact, no needed */ udc->usb_state = USB_STATE_ATTACHED; udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = 0; /* stand operation */ spin_lock_irqsave(&udc->lock, flags); udc->gadget.speed = USB_SPEED_UNKNOWN; nuke(&udc->eps[0], -ESHUTDOWN); list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list) nuke(loop_ep, -ESHUTDOWN); spin_unlock_irqrestore(&udc->lock, flags); udc->driver = NULL; dev_info(udc->dev, "unregistered gadget driver '%s'\r\n", driver->driver.name); return 0; } /* udc structure's alloc and setup, include ep-param alloc */ static struct qe_udc *qe_udc_config(struct platform_device *ofdev) { struct qe_udc *udc; struct device_node *np = ofdev->dev.of_node; unsigned int tmp_addr = 0; struct usb_device_para __iomem *usbpram; unsigned int i; u64 size; u32 offset; udc = kzalloc(sizeof(*udc), GFP_KERNEL); if (udc == NULL) { dev_err(&ofdev->dev, "malloc udc failed\n"); goto cleanup; } udc->dev = &ofdev->dev; /* get default address of usb parameter in MURAM from device tree */ offset = *of_get_address(np, 1, &size, NULL); udc->usb_param = cpm_muram_addr(offset); memset_io(udc->usb_param, 0, size); usbpram = udc->usb_param; out_be16(&usbpram->frame_n, 0); out_be32(&usbpram->rstate, 0); tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para)), USB_EP_PARA_ALIGNMENT); if (IS_ERR_VALUE(tmp_addr)) goto cleanup; for (i = 0; i < USB_MAX_ENDPOINTS; i++) { out_be16(&usbpram->epptr[i], (u16)tmp_addr); udc->ep_param[i] = cpm_muram_addr(tmp_addr); tmp_addr += 32; } memset_io(udc->ep_param[0], 0, USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para)); udc->resume_state = USB_STATE_NOTATTACHED; udc->usb_state = USB_STATE_POWERED; udc->ep0_dir = 0; spin_lock_init(&udc->lock); return udc; cleanup: kfree(udc); return NULL; } /* USB Controller register init */ static int qe_udc_reg_init(struct qe_udc *udc) { struct usb_ctlr __iomem *qe_usbregs; qe_usbregs = udc->usb_regs; /* Spec says that we must enable the USB controller to change mode. */ out_8(&qe_usbregs->usb_usmod, 0x01); /* Mode changed, now disable it, since muram isn't initialized yet. */ out_8(&qe_usbregs->usb_usmod, 0x00); /* Initialize the rest. */ out_be16(&qe_usbregs->usb_usbmr, 0); out_8(&qe_usbregs->usb_uscom, 0); out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR); return 0; } static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num) { struct qe_ep *ep = &udc->eps[pipe_num]; ep->udc = udc; strcpy(ep->name, ep_name[pipe_num]); ep->ep.name = ep_name[pipe_num]; ep->ep.ops = &qe_ep_ops; ep->stopped = 1; ep->ep.maxpacket = (unsigned short) ~0; ep->ep.desc = NULL; ep->dir = 0xff; ep->epnum = (u8)pipe_num; ep->sent = 0; ep->last = 0; ep->init = 0; ep->rxframe = NULL; ep->txframe = NULL; ep->tx_req = NULL; ep->state = EP_STATE_IDLE; ep->has_data = 0; /* the queue lists any req for this ep */ INIT_LIST_HEAD(&ep->queue); /* gagdet.ep_list used for ep_autoconfig so no ep0*/ if (pipe_num != 0) list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); ep->gadget = &udc->gadget; return 0; } /*----------------------------------------------------------------------- * UDC device Driver operation functions * *----------------------------------------------------------------------*/ static void qe_udc_release(struct device *dev) { struct qe_udc *udc = container_of(dev, struct qe_udc, gadget.dev); int i; complete(udc->done); cpm_muram_free(cpm_muram_offset(udc->ep_param[0])); for (i = 0; i < USB_MAX_ENDPOINTS; i++) udc->ep_param[i] = NULL; kfree(udc); } /* Driver probe functions */ static const struct of_device_id qe_udc_match[]; static int qe_udc_probe(struct platform_device *ofdev) { struct qe_udc *udc; const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct qe_ep *ep; unsigned int ret = 0; unsigned int i; const void *prop; match = of_match_device(qe_udc_match, &ofdev->dev); if (!match) return -EINVAL; prop = of_get_property(np, "mode", NULL); if (!prop || strcmp(prop, "peripheral")) return -ENODEV; /* Initialize the udc structure including QH member and other member */ udc = qe_udc_config(ofdev); if (!udc) { dev_err(&ofdev->dev, "failed to initialize\n"); return -ENOMEM; } udc->soc_type = (unsigned long)match->data; udc->usb_regs = of_iomap(np, 0); if (!udc->usb_regs) { ret = -ENOMEM; goto err1; } /* initialize usb hw reg except for regs for EP, * leave usbintr reg untouched*/ qe_udc_reg_init(udc); /* here comes the stand operations for probe * set the qe_udc->gadget.xxx */ udc->gadget.ops = &qe_gadget_ops; /* gadget.ep0 is a pointer */ udc->gadget.ep0 = &udc->eps[0].ep; INIT_LIST_HEAD(&udc->gadget.ep_list); /* modify in register gadget process */ udc->gadget.speed = USB_SPEED_UNKNOWN; /* name: Identifies the controller hardware type. */ udc->gadget.name = driver_name; udc->gadget.dev.parent = &ofdev->dev; /* initialize qe_ep struct */ for (i = 0; i < USB_MAX_ENDPOINTS ; i++) { /* because the ep type isn't decide here so * qe_ep_init() should be called in ep_enable() */ /* setup the qe_ep struct and link ep.ep.list * into gadget.ep_list */ qe_ep_config(udc, (unsigned char)i); } /* ep0 initialization in here */ ret = qe_ep_init(udc, 0, &qe_ep0_desc); if (ret) goto err2; /* create a buf for ZLP send, need to remain zeroed */ udc->nullbuf = kzalloc(256, GFP_KERNEL); if (udc->nullbuf == NULL) { dev_err(udc->dev, "cannot alloc nullbuf\n"); ret = -ENOMEM; goto err3; } /* buffer for data of get_status request */ udc->statusbuf = kzalloc(2, GFP_KERNEL); if (udc->statusbuf == NULL) { ret = -ENOMEM; goto err4; } udc->nullp = virt_to_phys((void *)udc->nullbuf); if (udc->nullp == DMA_ADDR_INVALID) { udc->nullp = dma_map_single( udc->gadget.dev.parent, udc->nullbuf, 256, DMA_TO_DEVICE); udc->nullmap = 1; } else { dma_sync_single_for_device(udc->gadget.dev.parent, udc->nullp, 256, DMA_TO_DEVICE); } tasklet_init(&udc->rx_tasklet, ep_rx_tasklet, (unsigned long)udc); /* request irq and disable DR */ udc->usb_irq = irq_of_parse_and_map(np, 0); if (!udc->usb_irq) { ret = -EINVAL; goto err_noirq; } ret = request_irq(udc->usb_irq, qe_udc_irq, 0, driver_name, udc); if (ret) { dev_err(udc->dev, "cannot request irq %d err %d\n", udc->usb_irq, ret); goto err5; } ret = usb_add_gadget_udc_release(&ofdev->dev, &udc->gadget, qe_udc_release); if (ret) goto err6; dev_set_drvdata(&ofdev->dev, udc); dev_info(udc->dev, "%s USB controller initialized as device\n", (udc->soc_type == PORT_QE) ? "QE" : "CPM"); return 0; err6: free_irq(udc->usb_irq, udc); err5: irq_dispose_mapping(udc->usb_irq); err_noirq: if (udc->nullmap) { dma_unmap_single(udc->gadget.dev.parent, udc->nullp, 256, DMA_TO_DEVICE); udc->nullp = DMA_ADDR_INVALID; } else { dma_sync_single_for_cpu(udc->gadget.dev.parent, udc->nullp, 256, DMA_TO_DEVICE); } kfree(udc->statusbuf); err4: kfree(udc->nullbuf); err3: ep = &udc->eps[0]; cpm_muram_free(cpm_muram_offset(ep->rxbase)); kfree(ep->rxframe); kfree(ep->rxbuffer); kfree(ep->txframe); err2: iounmap(udc->usb_regs); err1: kfree(udc); return ret; } #ifdef CONFIG_PM static int qe_udc_suspend(struct platform_device *dev, pm_message_t state) { return -ENOTSUPP; } static int qe_udc_resume(struct platform_device *dev) { return -ENOTSUPP; } #endif static int qe_udc_remove(struct platform_device *ofdev) { struct qe_udc *udc = dev_get_drvdata(&ofdev->dev); struct qe_ep *ep; unsigned int size; DECLARE_COMPLETION(done); usb_del_gadget_udc(&udc->gadget); udc->done = &done; tasklet_disable(&udc->rx_tasklet); if (udc->nullmap) { dma_unmap_single(udc->gadget.dev.parent, udc->nullp, 256, DMA_TO_DEVICE); udc->nullp = DMA_ADDR_INVALID; } else { dma_sync_single_for_cpu(udc->gadget.dev.parent, udc->nullp, 256, DMA_TO_DEVICE); } kfree(udc->statusbuf); kfree(udc->nullbuf); ep = &udc->eps[0]; cpm_muram_free(cpm_muram_offset(ep->rxbase)); size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1); kfree(ep->rxframe); if (ep->rxbufmap) { dma_unmap_single(udc->gadget.dev.parent, ep->rxbuf_d, size, DMA_FROM_DEVICE); ep->rxbuf_d = DMA_ADDR_INVALID; } else { dma_sync_single_for_cpu(udc->gadget.dev.parent, ep->rxbuf_d, size, DMA_FROM_DEVICE); } kfree(ep->rxbuffer); kfree(ep->txframe); free_irq(udc->usb_irq, udc); irq_dispose_mapping(udc->usb_irq); tasklet_kill(&udc->rx_tasklet); iounmap(udc->usb_regs); /* wait for release() of gadget.dev to free udc */ wait_for_completion(&done); return 0; } /*-------------------------------------------------------------------------*/ static const struct of_device_id qe_udc_match[] = { { .compatible = "fsl,mpc8323-qe-usb", .data = (void *)PORT_QE, }, { .compatible = "fsl,mpc8360-qe-usb", .data = (void *)PORT_QE, }, { .compatible = "fsl,mpc8272-cpm-usb", .data = (void *)PORT_CPM, }, {}, }; MODULE_DEVICE_TABLE(of, qe_udc_match); static struct platform_driver udc_driver = { .driver = { .name = (char *)driver_name, .owner = THIS_MODULE, .of_match_table = qe_udc_match, }, .probe = qe_udc_probe, .remove = qe_udc_remove, #ifdef CONFIG_PM .suspend = qe_udc_suspend, .resume = qe_udc_resume, #endif }; module_platform_driver(udc_driver); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_LICENSE("GPL");
gpl-2.0
idryomov/btrfs-unstable
arch/mips/fw/sni/sniprom.c
2500
3845
/* * Big Endian PROM code for SNI RM machines * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005-2006 Florian Lohoff (flo@rfc822.org) * Copyright (C) 2005-2006 Thomas Bogendoerfer (tsbogend@alpha.franken.de) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/console.h> #include <asm/addrspace.h> #include <asm/sni.h> #include <asm/mipsprom.h> #include <asm/mipsregs.h> #include <asm/bootinfo.h> /* special SNI prom calls */ /* * This does not exist in all proms - SINIX compares * the prom env variable "version" against "2.0008" * or greater. If lesser it tries to probe interesting * registers */ #define PROM_GET_MEMCONF 58 #define PROM_GET_HWCONF 61 #define PROM_VEC (u64 *)CKSEG1ADDR(0x1fc00000) #define PROM_ENTRY(x) (PROM_VEC + (x)) #define ___prom_putchar ((int *(*)(int))PROM_ENTRY(PROM_PUTCHAR)) #define ___prom_getenv ((char *(*)(char *))PROM_ENTRY(PROM_GETENV)) #define ___prom_get_memconf ((void (*)(void *))PROM_ENTRY(PROM_GET_MEMCONF)) #define ___prom_get_hwconf ((u32 (*)(void))PROM_ENTRY(PROM_GET_HWCONF)) #ifdef CONFIG_64BIT static u8 o32_stk[16384]; #define O32_STK &o32_stk[sizeof(o32_stk)] #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \ __asm__(#fun " = call_o32") int __PROM_O32(__prom_putchar, (int *(*)(int), void *, int)); char *__PROM_O32(__prom_getenv, (char *(*)(char *), void *, char *)); void __PROM_O32(__prom_get_memconf, (void (*)(void *), void *, void *)); u32 __PROM_O32(__prom_get_hwconf, (u32 (*)(void), void *)); #define _prom_putchar(x) __prom_putchar(___prom_putchar, O32_STK, x) #define _prom_getenv(x) __prom_getenv(___prom_getenv, O32_STK, x) #define _prom_get_memconf(x) __prom_get_memconf(___prom_get_memconf, O32_STK, x) #define _prom_get_hwconf() __prom_get_hwconf(___prom_get_hwconf, O32_STK) #else #define _prom_putchar(x) ___prom_putchar(x) #define _prom_getenv(x) ___prom_getenv(x) #define _prom_get_memconf(x) ___prom_get_memconf(x) #define _prom_get_hwconf(x) ___prom_get_hwconf(x) #endif void prom_putchar(char c) { _prom_putchar(c); } char *prom_getenv(char *s) { return _prom_getenv(s); } void *prom_get_hwconf(void) { u32 hwconf = _prom_get_hwconf(); if (hwconf == 0xffffffff) return NULL; return (void *)CKSEG1ADDR(hwconf); } void __init prom_free_prom_memory(void) { } /* * /proc/cpuinfo system type * */ char *system_type = "Unknown"; const char *get_system_type(void) { return system_type; } static void __init sni_mem_init(void) { int i, memsize; struct membank { u32 size; u32 base; u32 size2; u32 pad1; u32 pad2; } memconf[8]; int brd_type = *(unsigned char *)SNI_IDPROM_BRDTYPE; /* MemSIZE from prom in 16MByte chunks */ memsize = *((unsigned char *) SNI_IDPROM_MEMSIZE) * 16; pr_debug("IDProm memsize: %u MByte\n", memsize); /* get memory bank layout from prom */ _prom_get_memconf(&memconf); pr_debug("prom_get_mem_conf memory configuration:\n"); for (i = 0; i < 8 && memconf[i].size; i++) { if (brd_type == SNI_BRD_PCI_TOWER || brd_type == SNI_BRD_PCI_TOWER_CPLUS) { if (memconf[i].base >= 0x20000000 && memconf[i].base < 0x30000000) memconf[i].base -= 0x20000000; } pr_debug("Bank%d: %08x @ %08x\n", i, memconf[i].size, memconf[i].base); add_memory_region(memconf[i].base, memconf[i].size, BOOT_MEM_RAM); } } void __init prom_init(void) { int argc = fw_arg0; u32 *argv = (u32 *)CKSEG0ADDR(fw_arg1); int i; sni_mem_init(); /* copy prom cmdline parameters to kernel cmdline */ for (i = 1; i < argc; i++) { strcat(arcs_cmdline, (char *)CKSEG0ADDR(argv[i])); if (i < (argc - 1)) strcat(arcs_cmdline, " "); } }
gpl-2.0
sebirdman/m7_kernel
drivers/char/xilinx_hwicap/xilinx_hwicap.c
4804
22186
/***************************************************************************** * * Author: Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group * (c) Copyright 2007-2008 Xilinx Inc. * All rights reserved. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ /* * This is the code behind /dev/icap* -- it allows a user-space * application to use the Xilinx ICAP subsystem. * * The following operations are possible: * * open open the port and initialize for access. * release release port * write Write a bitstream to the configuration processor. * read Read a data stream from the configuration processor. * * After being opened, the port is initialized and accessed to avoid a * corrupted first read which may occur with some hardware. The port * is left in a desynched state, requiring that a synch sequence be * transmitted before any valid configuration data. A user will have * exclusive access to the device while it remains open, and the state * of the ICAP cannot be guaranteed after the device is closed. Note * that a complete reset of the core and the state of the ICAP cannot * be performed on many versions of the cores, hence users of this * device should avoid making inconsistent accesses to the device. In * particular, accessing the read interface, without first generating * a write containing a readback packet can leave the ICAP in an * inaccessible state. * * Note that in order to use the read interface, it is first necessary * to write a request packet to the write interface. i.e., it is not * possible to simply readback the bitstream (or any configuration * bits) from a device without specifically requesting them first. * The code to craft such packets is intended to be part of the * user-space application code that uses this device. The simplest * way to use this interface is simply: * * cp foo.bit /dev/icap0 * * Note that unless foo.bit is an appropriately constructed partial * bitstream, this has a high likelihood of overwriting the design * currently programmed in the FPGA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/sysctl.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/uaccess.h> #ifdef CONFIG_OF /* For open firmware. */ #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #endif #include "xilinx_hwicap.h" #include "buffer_icap.h" #include "fifo_icap.h" #define DRIVER_NAME "icap" #define HWICAP_REGS (0x10000) #define XHWICAP_MAJOR 259 #define XHWICAP_MINOR 0 #define HWICAP_DEVICES 1 /* An array, which is set to true when the device is registered. */ static DEFINE_MUTEX(hwicap_mutex); static bool probed_devices[HWICAP_DEVICES]; static struct mutex icap_sem; static struct class *icap_class; #define UNIMPLEMENTED 0xFFFF static const struct config_registers v2_config_registers = { .CRC = 0, .FAR = 1, .FDRI = 2, .FDRO = 3, .CMD = 4, .CTL = 5, .MASK = 6, .STAT = 7, .LOUT = 8, .COR = 9, .MFWR = 10, .FLR = 11, .KEY = 12, .CBC = 13, .IDCODE = 14, .AXSS = UNIMPLEMENTED, .C0R_1 = UNIMPLEMENTED, .CSOB = UNIMPLEMENTED, .WBSTAR = UNIMPLEMENTED, .TIMER = UNIMPLEMENTED, .BOOTSTS = UNIMPLEMENTED, .CTL_1 = UNIMPLEMENTED, }; static const struct config_registers v4_config_registers = { .CRC = 0, .FAR = 1, .FDRI = 2, .FDRO = 3, .CMD = 4, .CTL = 5, .MASK = 6, .STAT = 7, .LOUT = 8, .COR = 9, .MFWR = 10, .FLR = UNIMPLEMENTED, .KEY = UNIMPLEMENTED, .CBC = 11, .IDCODE = 12, .AXSS = 13, .C0R_1 = UNIMPLEMENTED, .CSOB = UNIMPLEMENTED, .WBSTAR = UNIMPLEMENTED, .TIMER = UNIMPLEMENTED, .BOOTSTS = UNIMPLEMENTED, .CTL_1 = UNIMPLEMENTED, }; static const struct config_registers v5_config_registers = { .CRC = 0, .FAR = 1, .FDRI = 2, .FDRO = 3, .CMD = 4, .CTL = 5, .MASK = 6, .STAT = 7, .LOUT = 8, .COR = 9, .MFWR = 10, .FLR = UNIMPLEMENTED, .KEY = UNIMPLEMENTED, .CBC = 11, .IDCODE = 12, .AXSS = 13, .C0R_1 = 14, .CSOB = 15, .WBSTAR = 16, .TIMER = 17, .BOOTSTS = 18, .CTL_1 = 19, }; /** * hwicap_command_desync - Send a DESYNC command to the ICAP port. * @drvdata: a pointer to the drvdata. * * This command desynchronizes the ICAP After this command, a * bitstream containing a NULL packet, followed by a SYNCH packet is * required before the ICAP will recognize commands. */ static int hwicap_command_desync(struct hwicap_drvdata *drvdata) { u32 buffer[4]; u32 index = 0; /* * Create the data to be written to the ICAP. */ buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1; buffer[index++] = XHI_CMD_DESYNCH; buffer[index++] = XHI_NOOP_PACKET; buffer[index++] = XHI_NOOP_PACKET; /* * Write the data to the FIFO and intiate the transfer of data present * in the FIFO to the ICAP device. */ return drvdata->config->set_configuration(drvdata, &buffer[0], index); } /** * hwicap_get_configuration_register - Query a configuration register. * @drvdata: a pointer to the drvdata. * @reg: a constant which represents the configuration * register value to be returned. * Examples: XHI_IDCODE, XHI_FLR. * @reg_data: returns the value of the register. * * Sends a query packet to the ICAP and then receives the response. * The icap is left in Synched state. */ static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, u32 reg, u32 *reg_data) { int status; u32 buffer[6]; u32 index = 0; /* * Create the data to be written to the ICAP. */ buffer[index++] = XHI_DUMMY_PACKET; buffer[index++] = XHI_NOOP_PACKET; buffer[index++] = XHI_SYNC_PACKET; buffer[index++] = XHI_NOOP_PACKET; buffer[index++] = XHI_NOOP_PACKET; /* * Write the data to the FIFO and initiate the transfer of data present * in the FIFO to the ICAP device. */ status = drvdata->config->set_configuration(drvdata, &buffer[0], index); if (status) return status; /* If the syncword was not found, then we need to start over. */ status = drvdata->config->get_status(drvdata); if ((status & XHI_SR_DALIGN_MASK) != XHI_SR_DALIGN_MASK) return -EIO; index = 0; buffer[index++] = hwicap_type_1_read(reg) | 1; buffer[index++] = XHI_NOOP_PACKET; buffer[index++] = XHI_NOOP_PACKET; /* * Write the data to the FIFO and intiate the transfer of data present * in the FIFO to the ICAP device. */ status = drvdata->config->set_configuration(drvdata, &buffer[0], index); if (status) return status; /* * Read the configuration register */ status = drvdata->config->get_configuration(drvdata, reg_data, 1); if (status) return status; return 0; } static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) { int status; u32 idcode; dev_dbg(drvdata->dev, "initializing\n"); /* Abort any current transaction, to make sure we have the * ICAP in a good state. */ dev_dbg(drvdata->dev, "Reset...\n"); drvdata->config->reset(drvdata); dev_dbg(drvdata->dev, "Desync...\n"); status = hwicap_command_desync(drvdata); if (status) return status; /* Attempt to read the IDCODE from ICAP. This * may not be returned correctly, due to the design of the * hardware. */ dev_dbg(drvdata->dev, "Reading IDCODE...\n"); status = hwicap_get_configuration_register( drvdata, drvdata->config_regs->IDCODE, &idcode); dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode); if (status) return status; dev_dbg(drvdata->dev, "Desync...\n"); status = hwicap_command_desync(drvdata); if (status) return status; return 0; } static ssize_t hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct hwicap_drvdata *drvdata = file->private_data; ssize_t bytes_to_read = 0; u32 *kbuf; u32 words; u32 bytes_remaining; int status; status = mutex_lock_interruptible(&drvdata->sem); if (status) return status; if (drvdata->read_buffer_in_use) { /* If there are leftover bytes in the buffer, just */ /* return them and don't try to read more from the */ /* ICAP device. */ bytes_to_read = (count < drvdata->read_buffer_in_use) ? count : drvdata->read_buffer_in_use; /* Return the data currently in the read buffer. */ if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) { status = -EFAULT; goto error; } drvdata->read_buffer_in_use -= bytes_to_read; memmove(drvdata->read_buffer, drvdata->read_buffer + bytes_to_read, 4 - bytes_to_read); } else { /* Get new data from the ICAP, and return was was requested. */ kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); if (!kbuf) { status = -ENOMEM; goto error; } /* The ICAP device is only able to read complete */ /* words. If a number of bytes that do not correspond */ /* to complete words is requested, then we read enough */ /* words to get the required number of bytes, and then */ /* save the remaining bytes for the next read. */ /* Determine the number of words to read, rounding up */ /* if necessary. */ words = ((count + 3) >> 2); bytes_to_read = words << 2; if (bytes_to_read > PAGE_SIZE) bytes_to_read = PAGE_SIZE; /* Ensure we only read a complete number of words. */ bytes_remaining = bytes_to_read & 3; bytes_to_read &= ~3; words = bytes_to_read >> 2; status = drvdata->config->get_configuration(drvdata, kbuf, words); /* If we didn't read correctly, then bail out. */ if (status) { free_page((unsigned long)kbuf); goto error; } /* If we fail to return the data to the user, then bail out. */ if (copy_to_user(buf, kbuf, bytes_to_read)) { free_page((unsigned long)kbuf); status = -EFAULT; goto error; } memcpy(drvdata->read_buffer, kbuf, bytes_remaining); drvdata->read_buffer_in_use = bytes_remaining; free_page((unsigned long)kbuf); } status = bytes_to_read; error: mutex_unlock(&drvdata->sem); return status; } static ssize_t hwicap_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct hwicap_drvdata *drvdata = file->private_data; ssize_t written = 0; ssize_t left = count; u32 *kbuf; ssize_t len; ssize_t status; status = mutex_lock_interruptible(&drvdata->sem); if (status) return status; left += drvdata->write_buffer_in_use; /* Only write multiples of 4 bytes. */ if (left < 4) { status = 0; goto error; } kbuf = (u32 *) __get_free_page(GFP_KERNEL); if (!kbuf) { status = -ENOMEM; goto error; } while (left > 3) { /* only write multiples of 4 bytes, so there might */ /* be as many as 3 bytes left (at the end). */ len = left; if (len > PAGE_SIZE) len = PAGE_SIZE; len &= ~3; if (drvdata->write_buffer_in_use) { memcpy(kbuf, drvdata->write_buffer, drvdata->write_buffer_in_use); if (copy_from_user( (((char *)kbuf) + drvdata->write_buffer_in_use), buf + written, len - (drvdata->write_buffer_in_use))) { free_page((unsigned long)kbuf); status = -EFAULT; goto error; } } else { if (copy_from_user(kbuf, buf + written, len)) { free_page((unsigned long)kbuf); status = -EFAULT; goto error; } } status = drvdata->config->set_configuration(drvdata, kbuf, len >> 2); if (status) { free_page((unsigned long)kbuf); status = -EFAULT; goto error; } if (drvdata->write_buffer_in_use) { len -= drvdata->write_buffer_in_use; left -= drvdata->write_buffer_in_use; drvdata->write_buffer_in_use = 0; } written += len; left -= len; } if ((left > 0) && (left < 4)) { if (!copy_from_user(drvdata->write_buffer, buf + written, left)) { drvdata->write_buffer_in_use = left; written += left; left = 0; } } free_page((unsigned long)kbuf); status = written; error: mutex_unlock(&drvdata->sem); return status; } static int hwicap_open(struct inode *inode, struct file *file) { struct hwicap_drvdata *drvdata; int status; mutex_lock(&hwicap_mutex); drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); status = mutex_lock_interruptible(&drvdata->sem); if (status) goto out; if (drvdata->is_open) { status = -EBUSY; goto error; } status = hwicap_initialize_hwicap(drvdata); if (status) { dev_err(drvdata->dev, "Failed to open file"); goto error; } file->private_data = drvdata; drvdata->write_buffer_in_use = 0; drvdata->read_buffer_in_use = 0; drvdata->is_open = 1; error: mutex_unlock(&drvdata->sem); out: mutex_unlock(&hwicap_mutex); return status; } static int hwicap_release(struct inode *inode, struct file *file) { struct hwicap_drvdata *drvdata = file->private_data; int i; int status = 0; mutex_lock(&drvdata->sem); if (drvdata->write_buffer_in_use) { /* Flush write buffer. */ for (i = drvdata->write_buffer_in_use; i < 4; i++) drvdata->write_buffer[i] = 0; status = drvdata->config->set_configuration(drvdata, (u32 *) drvdata->write_buffer, 1); if (status) goto error; } status = hwicap_command_desync(drvdata); if (status) goto error; error: drvdata->is_open = 0; mutex_unlock(&drvdata->sem); return status; } static const struct file_operations hwicap_fops = { .owner = THIS_MODULE, .write = hwicap_write, .read = hwicap_read, .open = hwicap_open, .release = hwicap_release, .llseek = noop_llseek, }; static int __devinit hwicap_setup(struct device *dev, int id, const struct resource *regs_res, const struct hwicap_driver_config *config, const struct config_registers *config_regs) { dev_t devt; struct hwicap_drvdata *drvdata = NULL; int retval = 0; dev_info(dev, "Xilinx icap port driver\n"); mutex_lock(&icap_sem); if (id < 0) { for (id = 0; id < HWICAP_DEVICES; id++) if (!probed_devices[id]) break; } if (id < 0 || id >= HWICAP_DEVICES) { mutex_unlock(&icap_sem); dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); return -EINVAL; } if (probed_devices[id]) { mutex_unlock(&icap_sem); dev_err(dev, "cannot assign to %s%i; it is already in use\n", DRIVER_NAME, id); return -EBUSY; } probed_devices[id] = 1; mutex_unlock(&icap_sem); devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id); drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); if (!drvdata) { dev_err(dev, "Couldn't allocate device private record\n"); retval = -ENOMEM; goto failed0; } dev_set_drvdata(dev, (void *)drvdata); if (!regs_res) { dev_err(dev, "Couldn't get registers resource\n"); retval = -EFAULT; goto failed1; } drvdata->mem_start = regs_res->start; drvdata->mem_end = regs_res->end; drvdata->mem_size = resource_size(regs_res); if (!request_mem_region(drvdata->mem_start, drvdata->mem_size, DRIVER_NAME)) { dev_err(dev, "Couldn't lock memory region at %Lx\n", (unsigned long long) regs_res->start); retval = -EBUSY; goto failed1; } drvdata->devt = devt; drvdata->dev = dev; drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size); if (!drvdata->base_address) { dev_err(dev, "ioremap() failed\n"); goto failed2; } drvdata->config = config; drvdata->config_regs = config_regs; mutex_init(&drvdata->sem); drvdata->is_open = 0; dev_info(dev, "ioremap %llx to %p with size %llx\n", (unsigned long long) drvdata->mem_start, drvdata->base_address, (unsigned long long) drvdata->mem_size); cdev_init(&drvdata->cdev, &hwicap_fops); drvdata->cdev.owner = THIS_MODULE; retval = cdev_add(&drvdata->cdev, devt, 1); if (retval) { dev_err(dev, "cdev_add() failed\n"); goto failed3; } device_create(icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id); return 0; /* success */ failed3: iounmap(drvdata->base_address); failed2: release_mem_region(regs_res->start, drvdata->mem_size); failed1: kfree(drvdata); failed0: mutex_lock(&icap_sem); probed_devices[id] = 0; mutex_unlock(&icap_sem); return retval; } static struct hwicap_driver_config buffer_icap_config = { .get_configuration = buffer_icap_get_configuration, .set_configuration = buffer_icap_set_configuration, .get_status = buffer_icap_get_status, .reset = buffer_icap_reset, }; static struct hwicap_driver_config fifo_icap_config = { .get_configuration = fifo_icap_get_configuration, .set_configuration = fifo_icap_set_configuration, .get_status = fifo_icap_get_status, .reset = fifo_icap_reset, }; static int __devexit hwicap_remove(struct device *dev) { struct hwicap_drvdata *drvdata; drvdata = (struct hwicap_drvdata *)dev_get_drvdata(dev); if (!drvdata) return 0; device_destroy(icap_class, drvdata->devt); cdev_del(&drvdata->cdev); iounmap(drvdata->base_address); release_mem_region(drvdata->mem_start, drvdata->mem_size); kfree(drvdata); dev_set_drvdata(dev, NULL); mutex_lock(&icap_sem); probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0; mutex_unlock(&icap_sem); return 0; /* success */ } #ifdef CONFIG_OF static int __devinit hwicap_of_probe(struct platform_device *op, const struct hwicap_driver_config *config) { struct resource res; const unsigned int *id; const char *family; int rc; const struct config_registers *regs; rc = of_address_to_resource(op->dev.of_node, 0, &res); if (rc) { dev_err(&op->dev, "invalid address\n"); return rc; } id = of_get_property(op->dev.of_node, "port-number", NULL); /* It's most likely that we're using V4, if the family is not specified */ regs = &v4_config_registers; family = of_get_property(op->dev.of_node, "xlnx,family", NULL); if (family) { if (!strcmp(family, "virtex2p")) { regs = &v2_config_registers; } else if (!strcmp(family, "virtex4")) { regs = &v4_config_registers; } else if (!strcmp(family, "virtex5")) { regs = &v5_config_registers; } } return hwicap_setup(&op->dev, id ? *id : -1, &res, config, regs); } #else static inline int hwicap_of_probe(struct platform_device *op, const struct hwicap_driver_config *config) { return -EINVAL; } #endif /* CONFIG_OF */ static const struct of_device_id __devinitconst hwicap_of_match[]; static int __devinit hwicap_drv_probe(struct platform_device *pdev) { const struct of_device_id *match; struct resource *res; const struct config_registers *regs; const char *family; match = of_match_device(hwicap_of_match, &pdev->dev); if (match) return hwicap_of_probe(pdev, match->data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; /* It's most likely that we're using V4, if the family is not specified */ regs = &v4_config_registers; family = pdev->dev.platform_data; if (family) { if (!strcmp(family, "virtex2p")) { regs = &v2_config_registers; } else if (!strcmp(family, "virtex4")) { regs = &v4_config_registers; } else if (!strcmp(family, "virtex5")) { regs = &v5_config_registers; } } return hwicap_setup(&pdev->dev, pdev->id, res, &buffer_icap_config, regs); } static int __devexit hwicap_drv_remove(struct platform_device *pdev) { return hwicap_remove(&pdev->dev); } #ifdef CONFIG_OF /* Match table for device tree binding */ static const struct of_device_id __devinitconst hwicap_of_match[] = { { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config}, { .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config}, {}, }; MODULE_DEVICE_TABLE(of, hwicap_of_match); #else #define hwicap_of_match NULL #endif static struct platform_driver hwicap_platform_driver = { .probe = hwicap_drv_probe, .remove = hwicap_drv_remove, .driver = { .owner = THIS_MODULE, .name = DRIVER_NAME, .of_match_table = hwicap_of_match, }, }; static int __init hwicap_module_init(void) { dev_t devt; int retval; icap_class = class_create(THIS_MODULE, "xilinx_config"); mutex_init(&icap_sem); devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR); retval = register_chrdev_region(devt, HWICAP_DEVICES, DRIVER_NAME); if (retval < 0) return retval; retval = platform_driver_register(&hwicap_platform_driver); if (retval) goto failed; return retval; failed: unregister_chrdev_region(devt, HWICAP_DEVICES); return retval; } static void __exit hwicap_module_cleanup(void) { dev_t devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR); class_destroy(icap_class); platform_driver_unregister(&hwicap_platform_driver); unregister_chrdev_region(devt, HWICAP_DEVICES); } module_init(hwicap_module_init); module_exit(hwicap_module_cleanup); MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group"); MODULE_DESCRIPTION("Xilinx ICAP Port Driver"); MODULE_LICENSE("GPL");
gpl-2.0
asce1062/android_kernel_lge_msm7x27a-common
drivers/watchdog/shwdt.c
4804
12135
/* * drivers/watchdog/shwdt.c * * Watchdog driver for integrated watchdog in the SuperH processors. * * Copyright (C) 2001 - 2010 Paul Mundt <lethal@linux-sh.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com> * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT * * 19-Apr-2002 Rob Radez <rob@osinvestor.com> * Added expect close support, made emulated timeout runtime changeable * general cleanups, add some ioctls */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/reboot.h> #include <linux/notifier.h> #include <linux/ioport.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/watchdog.h> #define DRV_NAME "sh-wdt" /* * Default clock division ratio is 5.25 msecs. For an additional table of * values, consult the asm-sh/watchdog.h. Overload this at module load * time. * * In order for this to work reliably we need to have HZ set to 1000 or * something quite higher than 100 (or we need a proper high-res timer * implementation that will deal with this properly), otherwise the 10ms * resolution of a jiffy is enough to trigger the overflow. For things like * the SH-4 and SH-5, this isn't necessarily that big of a problem, though * for the SH-2 and SH-3, this isn't recommended unless the WDT is absolutely * necssary. * * As a result of this timing problem, the only modes that are particularly * feasible are the 4096 and the 2048 divisors, which yield 5.25 and 2.62ms * overflow periods respectively. * * Also, since we can't really expect userspace to be responsive enough * before the overflow happens, we maintain two separate timers .. One in * the kernel for clearing out WOVF every 2ms or so (again, this depends on * HZ == 1000), and another for monitoring userspace writes to the WDT device. * * As such, we currently use a configurable heartbeat interval which defaults * to 30s. In this case, the userspace daemon is only responsible for periodic * writes to the device before the next heartbeat is scheduled. If the daemon * misses its deadline, the kernel timer will allow the WDT to overflow. */ static int clock_division_ratio = WTCSR_CKS_4096; #define next_ping_period(cks) (jiffies + msecs_to_jiffies(cks - 4)) static const struct watchdog_info sh_wdt_info; static struct platform_device *sh_wdt_dev; static DEFINE_SPINLOCK(shwdt_lock); #define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */ static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ static bool nowayout = WATCHDOG_NOWAYOUT; static unsigned long next_heartbeat; struct sh_wdt { void __iomem *base; struct device *dev; struct timer_list timer; unsigned long enabled; char expect_close; }; static void sh_wdt_start(struct sh_wdt *wdt) { unsigned long flags; u8 csr; spin_lock_irqsave(&shwdt_lock, flags); next_heartbeat = jiffies + (heartbeat * HZ); mod_timer(&wdt->timer, next_ping_period(clock_division_ratio)); csr = sh_wdt_read_csr(); csr |= WTCSR_WT | clock_division_ratio; sh_wdt_write_csr(csr); sh_wdt_write_cnt(0); /* * These processors have a bit of an inconsistent initialization * process.. starting with SH-3, RSTS was moved to WTCSR, and the * RSTCSR register was removed. * * On the SH-2 however, in addition with bits being in different * locations, we must deal with RSTCSR outright.. */ csr = sh_wdt_read_csr(); csr |= WTCSR_TME; csr &= ~WTCSR_RSTS; sh_wdt_write_csr(csr); #ifdef CONFIG_CPU_SH2 csr = sh_wdt_read_rstcsr(); csr &= ~RSTCSR_RSTS; sh_wdt_write_rstcsr(csr); #endif spin_unlock_irqrestore(&shwdt_lock, flags); } static void sh_wdt_stop(struct sh_wdt *wdt) { unsigned long flags; u8 csr; spin_lock_irqsave(&shwdt_lock, flags); del_timer(&wdt->timer); csr = sh_wdt_read_csr(); csr &= ~WTCSR_TME; sh_wdt_write_csr(csr); spin_unlock_irqrestore(&shwdt_lock, flags); } static inline void sh_wdt_keepalive(struct sh_wdt *wdt) { unsigned long flags; spin_lock_irqsave(&shwdt_lock, flags); next_heartbeat = jiffies + (heartbeat * HZ); spin_unlock_irqrestore(&shwdt_lock, flags); } static int sh_wdt_set_heartbeat(int t) { unsigned long flags; if (unlikely(t < 1 || t > 3600)) /* arbitrary upper limit */ return -EINVAL; spin_lock_irqsave(&shwdt_lock, flags); heartbeat = t; spin_unlock_irqrestore(&shwdt_lock, flags); return 0; } static void sh_wdt_ping(unsigned long data) { struct sh_wdt *wdt = (struct sh_wdt *)data; unsigned long flags; spin_lock_irqsave(&shwdt_lock, flags); if (time_before(jiffies, next_heartbeat)) { u8 csr; csr = sh_wdt_read_csr(); csr &= ~WTCSR_IOVF; sh_wdt_write_csr(csr); sh_wdt_write_cnt(0); mod_timer(&wdt->timer, next_ping_period(clock_division_ratio)); } else dev_warn(wdt->dev, "Heartbeat lost! Will not ping " "the watchdog\n"); spin_unlock_irqrestore(&shwdt_lock, flags); } static int sh_wdt_open(struct inode *inode, struct file *file) { struct sh_wdt *wdt = platform_get_drvdata(sh_wdt_dev); if (test_and_set_bit(0, &wdt->enabled)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); file->private_data = wdt; sh_wdt_start(wdt); return nonseekable_open(inode, file); } static int sh_wdt_close(struct inode *inode, struct file *file) { struct sh_wdt *wdt = file->private_data; if (wdt->expect_close == 42) { sh_wdt_stop(wdt); } else { dev_crit(wdt->dev, "Unexpected close, not " "stopping watchdog!\n"); sh_wdt_keepalive(wdt); } clear_bit(0, &wdt->enabled); wdt->expect_close = 0; return 0; } static ssize_t sh_wdt_write(struct file *file, const char *buf, size_t count, loff_t *ppos) { struct sh_wdt *wdt = file->private_data; if (count) { if (!nowayout) { size_t i; wdt->expect_close = 0; for (i = 0; i != count; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') wdt->expect_close = 42; } } sh_wdt_keepalive(wdt); } return count; } static long sh_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct sh_wdt *wdt = file->private_data; int new_heartbeat; int options, retval = -EINVAL; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user((struct watchdog_info *)arg, &sh_wdt_info, sizeof(sh_wdt_info)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, (int *)arg); case WDIOC_SETOPTIONS: if (get_user(options, (int *)arg)) return -EFAULT; if (options & WDIOS_DISABLECARD) { sh_wdt_stop(wdt); retval = 0; } if (options & WDIOS_ENABLECARD) { sh_wdt_start(wdt); retval = 0; } return retval; case WDIOC_KEEPALIVE: sh_wdt_keepalive(wdt); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_heartbeat, (int *)arg)) return -EFAULT; if (sh_wdt_set_heartbeat(new_heartbeat)) return -EINVAL; sh_wdt_keepalive(wdt); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(heartbeat, (int *)arg); default: return -ENOTTY; } return 0; } static int sh_wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { struct sh_wdt *wdt = platform_get_drvdata(sh_wdt_dev); if (code == SYS_DOWN || code == SYS_HALT) sh_wdt_stop(wdt); return NOTIFY_DONE; } static const struct file_operations sh_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = sh_wdt_write, .unlocked_ioctl = sh_wdt_ioctl, .open = sh_wdt_open, .release = sh_wdt_close, }; static const struct watchdog_info sh_wdt_info = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "SH WDT", }; static struct notifier_block sh_wdt_notifier = { .notifier_call = sh_wdt_notify_sys, }; static struct miscdevice sh_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &sh_wdt_fops, }; static int __devinit sh_wdt_probe(struct platform_device *pdev) { struct sh_wdt *wdt; struct resource *res; int rc; /* * As this driver only covers the global watchdog case, reject * any attempts to register per-CPU watchdogs. */ if (pdev->id != -1) return -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(!res)) return -EINVAL; if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), DRV_NAME)) return -EBUSY; wdt = devm_kzalloc(&pdev->dev, sizeof(struct sh_wdt), GFP_KERNEL); if (unlikely(!wdt)) { rc = -ENOMEM; goto out_release; } wdt->dev = &pdev->dev; wdt->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (unlikely(!wdt->base)) { rc = -ENXIO; goto out_err; } rc = register_reboot_notifier(&sh_wdt_notifier); if (unlikely(rc)) { dev_err(&pdev->dev, "Can't register reboot notifier (err=%d)\n", rc); goto out_unmap; } sh_wdt_miscdev.parent = wdt->dev; rc = misc_register(&sh_wdt_miscdev); if (unlikely(rc)) { dev_err(&pdev->dev, "Can't register miscdev on minor=%d (err=%d)\n", sh_wdt_miscdev.minor, rc); goto out_unreg; } init_timer(&wdt->timer); wdt->timer.function = sh_wdt_ping; wdt->timer.data = (unsigned long)wdt; wdt->timer.expires = next_ping_period(clock_division_ratio); platform_set_drvdata(pdev, wdt); sh_wdt_dev = pdev; dev_info(&pdev->dev, "initialized.\n"); return 0; out_unreg: unregister_reboot_notifier(&sh_wdt_notifier); out_unmap: devm_iounmap(&pdev->dev, wdt->base); out_err: devm_kfree(&pdev->dev, wdt); out_release: devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); return rc; } static int __devexit sh_wdt_remove(struct platform_device *pdev) { struct sh_wdt *wdt = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); platform_set_drvdata(pdev, NULL); misc_deregister(&sh_wdt_miscdev); sh_wdt_dev = NULL; unregister_reboot_notifier(&sh_wdt_notifier); devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); devm_iounmap(&pdev->dev, wdt->base); devm_kfree(&pdev->dev, wdt); return 0; } static struct platform_driver sh_wdt_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = sh_wdt_probe, .remove = __devexit_p(sh_wdt_remove), }; static int __init sh_wdt_init(void) { int rc; if (unlikely(clock_division_ratio < 0x5 || clock_division_ratio > 0x7)) { clock_division_ratio = WTCSR_CKS_4096; pr_info("divisor must be 0x5<=x<=0x7, using %d\n", clock_division_ratio); } rc = sh_wdt_set_heartbeat(heartbeat); if (unlikely(rc)) { heartbeat = WATCHDOG_HEARTBEAT; pr_info("heartbeat value must be 1<=x<=3600, using %d\n", heartbeat); } pr_info("configured with heartbeat=%d sec (nowayout=%d)\n", heartbeat, nowayout); return platform_driver_register(&sh_wdt_driver); } static void __exit sh_wdt_exit(void) { platform_driver_unregister(&sh_wdt_driver); } module_init(sh_wdt_init); module_exit(sh_wdt_exit); MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); MODULE_DESCRIPTION("SuperH watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(clock_division_ratio, int, 0); MODULE_PARM_DESC(clock_division_ratio, "Clock division ratio. Valid ranges are from 0x5 (1.31ms) " "to 0x7 (5.25ms). (default=" __MODULE_STRING(WTCSR_CKS_4096) ")"); module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (1 <= heartbeat <= 3600, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
gpl-2.0
suky/android_kernel_pantech_ef65l
tools/perf/util/scripting-engines/trace-event-python.c
4804
15059
/* * trace-event-python. Feed trace events to an embedded Python interpreter. * * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <Python.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include "../../perf.h" #include "../util.h" #include "../event.h" #include "../thread.h" #include "../trace-event.h" PyMODINIT_FUNC initperf_trace_context(void); #define FTRACE_MAX_EVENT \ ((1 << (sizeof(unsigned short) * 8)) - 1) struct event *events[FTRACE_MAX_EVENT]; #define MAX_FIELDS 64 #define N_COMMON_FIELDS 7 extern struct scripting_context *scripting_context; static char *cur_field_name; static int zero_flag_atom; static PyObject *main_module, *main_dict; static void handler_call_die(const char *handler_name) { PyErr_Print(); Py_FatalError("problem in Python trace event handler"); } static void define_value(enum print_arg_type field_type, const char *ev_name, const char *field_name, const char *field_value, const char *field_str) { const char *handler_name = "define_flag_value"; PyObject *handler, *t, *retval; unsigned long long value; unsigned n = 0; if (field_type == PRINT_SYMBOL) handler_name = "define_symbolic_value"; t = PyTuple_New(4); if (!t) Py_FatalError("couldn't create Python tuple"); value = eval_flag(field_value); PyTuple_SetItem(t, n++, PyString_FromString(ev_name)); PyTuple_SetItem(t, n++, PyString_FromString(field_name)); PyTuple_SetItem(t, n++, PyInt_FromLong(value)); PyTuple_SetItem(t, n++, PyString_FromString(field_str)); handler = PyDict_GetItemString(main_dict, handler_name); if (handler && PyCallable_Check(handler)) { retval = PyObject_CallObject(handler, t); if (retval == NULL) handler_call_die(handler_name); } Py_DECREF(t); } static void define_values(enum print_arg_type field_type, struct print_flag_sym *field, const char *ev_name, const char *field_name) { define_value(field_type, ev_name, field_name, field->value, field->str); if (field->next) define_values(field_type, field->next, ev_name, field_name); } static void define_field(enum print_arg_type field_type, const char *ev_name, const char *field_name, const char *delim) { const char *handler_name = "define_flag_field"; PyObject *handler, *t, *retval; unsigned n = 0; if (field_type == PRINT_SYMBOL) handler_name = "define_symbolic_field"; if (field_type == PRINT_FLAGS) t = PyTuple_New(3); else t = PyTuple_New(2); if (!t) Py_FatalError("couldn't create Python tuple"); PyTuple_SetItem(t, n++, PyString_FromString(ev_name)); PyTuple_SetItem(t, n++, PyString_FromString(field_name)); if (field_type == PRINT_FLAGS) PyTuple_SetItem(t, n++, PyString_FromString(delim)); handler = PyDict_GetItemString(main_dict, handler_name); if (handler && PyCallable_Check(handler)) { retval = PyObject_CallObject(handler, t); if (retval == NULL) handler_call_die(handler_name); } Py_DECREF(t); } static void define_event_symbols(struct event *event, const char *ev_name, struct print_arg *args) { switch (args->type) { case PRINT_NULL: break; case PRINT_ATOM: define_value(PRINT_FLAGS, ev_name, cur_field_name, "0", args->atom.atom); zero_flag_atom = 0; break; case PRINT_FIELD: if (cur_field_name) free(cur_field_name); cur_field_name = strdup(args->field.name); break; case PRINT_FLAGS: define_event_symbols(event, ev_name, args->flags.field); define_field(PRINT_FLAGS, ev_name, cur_field_name, args->flags.delim); define_values(PRINT_FLAGS, args->flags.flags, ev_name, cur_field_name); break; case PRINT_SYMBOL: define_event_symbols(event, ev_name, args->symbol.field); define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL); define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name, cur_field_name); break; case PRINT_STRING: break; case PRINT_TYPE: define_event_symbols(event, ev_name, args->typecast.item); break; case PRINT_OP: if (strcmp(args->op.op, ":") == 0) zero_flag_atom = 1; define_event_symbols(event, ev_name, args->op.left); define_event_symbols(event, ev_name, args->op.right); break; default: /* we should warn... */ return; } if (args->next) define_event_symbols(event, ev_name, args->next); } static inline struct event *find_cache_event(int type) { static char ev_name[256]; struct event *event; if (events[type]) return events[type]; events[type] = event = trace_find_event(type); if (!event) return NULL; sprintf(ev_name, "%s__%s", event->system, event->name); define_event_symbols(event, ev_name, event->print_fmt.args); return event; } static void python_process_event(union perf_event *pevent __unused, struct perf_sample *sample, struct perf_evsel *evsel __unused, struct machine *machine __unused, struct thread *thread) { PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; static char handler_name[256]; struct format_field *field; unsigned long long val; unsigned long s, ns; struct event *event; unsigned n = 0; int type; int pid; int cpu = sample->cpu; void *data = sample->raw_data; unsigned long long nsecs = sample->time; char *comm = thread->comm; t = PyTuple_New(MAX_FIELDS); if (!t) Py_FatalError("couldn't create Python tuple"); type = trace_parse_common_type(data); event = find_cache_event(type); if (!event) die("ug! no event found for type %d", type); pid = trace_parse_common_pid(data); sprintf(handler_name, "%s__%s", event->system, event->name); handler = PyDict_GetItemString(main_dict, handler_name); if (handler && !PyCallable_Check(handler)) handler = NULL; if (!handler) { dict = PyDict_New(); if (!dict) Py_FatalError("couldn't create Python dict"); } s = nsecs / NSECS_PER_SEC; ns = nsecs - s * NSECS_PER_SEC; scripting_context->event_data = data; context = PyCObject_FromVoidPtr(scripting_context, NULL); PyTuple_SetItem(t, n++, PyString_FromString(handler_name)); PyTuple_SetItem(t, n++, context); if (handler) { PyTuple_SetItem(t, n++, PyInt_FromLong(cpu)); PyTuple_SetItem(t, n++, PyInt_FromLong(s)); PyTuple_SetItem(t, n++, PyInt_FromLong(ns)); PyTuple_SetItem(t, n++, PyInt_FromLong(pid)); PyTuple_SetItem(t, n++, PyString_FromString(comm)); } else { PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu)); PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s)); PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns)); PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid)); PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm)); } for (field = event->format.fields; field; field = field->next) { if (field->flags & FIELD_IS_STRING) { int offset; if (field->flags & FIELD_IS_DYNAMIC) { offset = *(int *)(data + field->offset); offset &= 0xffff; } else offset = field->offset; obj = PyString_FromString((char *)data + offset); } else { /* FIELD_IS_NUMERIC */ val = read_size(data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) { if ((long long)val >= LONG_MIN && (long long)val <= LONG_MAX) obj = PyInt_FromLong(val); else obj = PyLong_FromLongLong(val); } else { if (val <= LONG_MAX) obj = PyInt_FromLong(val); else obj = PyLong_FromUnsignedLongLong(val); } } if (handler) PyTuple_SetItem(t, n++, obj); else PyDict_SetItemString(dict, field->name, obj); } if (!handler) PyTuple_SetItem(t, n++, dict); if (_PyTuple_Resize(&t, n) == -1) Py_FatalError("error resizing Python tuple"); if (handler) { retval = PyObject_CallObject(handler, t); if (retval == NULL) handler_call_die(handler_name); } else { handler = PyDict_GetItemString(main_dict, "trace_unhandled"); if (handler && PyCallable_Check(handler)) { retval = PyObject_CallObject(handler, t); if (retval == NULL) handler_call_die("trace_unhandled"); } Py_DECREF(dict); } Py_DECREF(t); } static int run_start_sub(void) { PyObject *handler, *retval; int err = 0; main_module = PyImport_AddModule("__main__"); if (main_module == NULL) return -1; Py_INCREF(main_module); main_dict = PyModule_GetDict(main_module); if (main_dict == NULL) { err = -1; goto error; } Py_INCREF(main_dict); handler = PyDict_GetItemString(main_dict, "trace_begin"); if (handler == NULL || !PyCallable_Check(handler)) goto out; retval = PyObject_CallObject(handler, NULL); if (retval == NULL) handler_call_die("trace_begin"); Py_DECREF(retval); return err; error: Py_XDECREF(main_dict); Py_XDECREF(main_module); out: return err; } /* * Start trace script */ static int python_start_script(const char *script, int argc, const char **argv) { const char **command_line; char buf[PATH_MAX]; int i, err = 0; FILE *fp; command_line = malloc((argc + 1) * sizeof(const char *)); command_line[0] = script; for (i = 1; i < argc + 1; i++) command_line[i] = argv[i - 1]; Py_Initialize(); initperf_trace_context(); PySys_SetArgv(argc + 1, (char **)command_line); fp = fopen(script, "r"); if (!fp) { sprintf(buf, "Can't open python script \"%s\"", script); perror(buf); err = -1; goto error; } err = PyRun_SimpleFile(fp, script); if (err) { fprintf(stderr, "Error running python script %s\n", script); goto error; } err = run_start_sub(); if (err) { fprintf(stderr, "Error starting python script %s\n", script); goto error; } free(command_line); return err; error: Py_Finalize(); free(command_line); return err; } /* * Stop trace script */ static int python_stop_script(void) { PyObject *handler, *retval; int err = 0; handler = PyDict_GetItemString(main_dict, "trace_end"); if (handler == NULL || !PyCallable_Check(handler)) goto out; retval = PyObject_CallObject(handler, NULL); if (retval == NULL) handler_call_die("trace_end"); else Py_DECREF(retval); out: Py_XDECREF(main_dict); Py_XDECREF(main_module); Py_Finalize(); return err; } static int python_generate_script(const char *outfile) { struct event *event = NULL; struct format_field *f; char fname[PATH_MAX]; int not_first, count; FILE *ofp; sprintf(fname, "%s.py", outfile); ofp = fopen(fname, "w"); if (ofp == NULL) { fprintf(stderr, "couldn't open %s\n", fname); return -1; } fprintf(ofp, "# perf script event handlers, " "generated by perf script -g python\n"); fprintf(ofp, "# Licensed under the terms of the GNU GPL" " License version 2\n\n"); fprintf(ofp, "# The common_* event handler fields are the most useful " "fields common to\n"); fprintf(ofp, "# all events. They don't necessarily correspond to " "the 'common_*' fields\n"); fprintf(ofp, "# in the format files. Those fields not available as " "handler params can\n"); fprintf(ofp, "# be retrieved using Python functions of the form " "common_*(context).\n"); fprintf(ofp, "# See the perf-trace-python Documentation for the list " "of available functions.\n\n"); fprintf(ofp, "import os\n"); fprintf(ofp, "import sys\n\n"); fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n"); fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n"); fprintf(ofp, "\nfrom perf_trace_context import *\n"); fprintf(ofp, "from Core import *\n\n\n"); fprintf(ofp, "def trace_begin():\n"); fprintf(ofp, "\tprint \"in trace_begin\"\n\n"); fprintf(ofp, "def trace_end():\n"); fprintf(ofp, "\tprint \"in trace_end\"\n\n"); while ((event = trace_find_next_event(event))) { fprintf(ofp, "def %s__%s(", event->system, event->name); fprintf(ofp, "event_name, "); fprintf(ofp, "context, "); fprintf(ofp, "common_cpu,\n"); fprintf(ofp, "\tcommon_secs, "); fprintf(ofp, "common_nsecs, "); fprintf(ofp, "common_pid, "); fprintf(ofp, "common_comm,\n\t"); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (++count % 5 == 0) fprintf(ofp, "\n\t"); fprintf(ofp, "%s", f->name); } fprintf(ofp, "):\n"); fprintf(ofp, "\t\tprint_header(event_name, common_cpu, " "common_secs, common_nsecs,\n\t\t\t" "common_pid, common_comm)\n\n"); fprintf(ofp, "\t\tprint \""); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (count && count % 3 == 0) { fprintf(ofp, "\" \\\n\t\t\""); } count++; fprintf(ofp, "%s=", f->name); if (f->flags & FIELD_IS_STRING || f->flags & FIELD_IS_FLAG || f->flags & FIELD_IS_SYMBOLIC) fprintf(ofp, "%%s"); else if (f->flags & FIELD_IS_SIGNED) fprintf(ofp, "%%d"); else fprintf(ofp, "%%u"); } fprintf(ofp, "\\n\" %% \\\n\t\t("); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (++count % 5 == 0) fprintf(ofp, "\n\t\t"); if (f->flags & FIELD_IS_FLAG) { if ((count - 1) % 5 != 0) { fprintf(ofp, "\n\t\t"); count = 4; } fprintf(ofp, "flag_str(\""); fprintf(ofp, "%s__%s\", ", event->system, event->name); fprintf(ofp, "\"%s\", %s)", f->name, f->name); } else if (f->flags & FIELD_IS_SYMBOLIC) { if ((count - 1) % 5 != 0) { fprintf(ofp, "\n\t\t"); count = 4; } fprintf(ofp, "symbol_str(\""); fprintf(ofp, "%s__%s\", ", event->system, event->name); fprintf(ofp, "\"%s\", %s)", f->name, f->name); } else fprintf(ofp, "%s", f->name); } fprintf(ofp, "),\n\n"); } fprintf(ofp, "def trace_unhandled(event_name, context, " "event_fields_dict):\n"); fprintf(ofp, "\t\tprint ' '.join(['%%s=%%s'%%(k,str(v))" "for k,v in sorted(event_fields_dict.items())])\n\n"); fprintf(ofp, "def print_header(" "event_name, cpu, secs, nsecs, pid, comm):\n" "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t" "(event_name, cpu, secs, nsecs, pid, comm),\n"); fclose(ofp); fprintf(stderr, "generated Python script: %s\n", fname); return 0; } struct scripting_ops python_scripting_ops = { .name = "Python", .start_script = python_start_script, .stop_script = python_stop_script, .process_event = python_process_event, .generate_script = python_generate_script, };
gpl-2.0
Hybrid-Rom/kernel_lge_ls970
drivers/watchdog/pnx4008_wdt.c
4804
6060
/* * drivers/char/watchdog/pnx4008_wdt.c * * Watchdog driver for PNX4008 board * * Authors: Dmitry Chigirev <source@mvista.com>, * Vitaly Wool <vitalywool@gmail.com> * Based on sa1100 driver, * Copyright (C) 2000 Oleg Drokin <green@crimea.edu> * * 2005-2006 (c) MontaVista Software, Inc. * * (C) 2012 Wolfram Sang, Pengutronix * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/err.h> #include <mach/hardware.h> /* WatchDog Timer - Chapter 23 Page 207 */ #define DEFAULT_HEARTBEAT 19 #define MAX_HEARTBEAT 60 /* Watchdog timer register set definition */ #define WDTIM_INT(p) ((p) + 0x0) #define WDTIM_CTRL(p) ((p) + 0x4) #define WDTIM_COUNTER(p) ((p) + 0x8) #define WDTIM_MCTRL(p) ((p) + 0xC) #define WDTIM_MATCH0(p) ((p) + 0x10) #define WDTIM_EMR(p) ((p) + 0x14) #define WDTIM_PULSE(p) ((p) + 0x18) #define WDTIM_RES(p) ((p) + 0x1C) /* WDTIM_INT bit definitions */ #define MATCH_INT 1 /* WDTIM_CTRL bit definitions */ #define COUNT_ENAB 1 #define RESET_COUNT (1 << 1) #define DEBUG_EN (1 << 2) /* WDTIM_MCTRL bit definitions */ #define MR0_INT 1 #undef RESET_COUNT0 #define RESET_COUNT0 (1 << 2) #define STOP_COUNT0 (1 << 2) #define M_RES1 (1 << 3) #define M_RES2 (1 << 4) #define RESFRC1 (1 << 5) #define RESFRC2 (1 << 6) /* WDTIM_EMR bit definitions */ #define EXT_MATCH0 1 #define MATCH_OUTPUT_HIGH (2 << 4) /*a MATCH_CTRL setting */ /* WDTIM_RES bit definitions */ #define WDOG_RESET 1 /* read only */ #define WDOG_COUNTER_RATE 13000000 /*the counter clock is 13 MHz fixed */ static bool nowayout = WATCHDOG_NOWAYOUT; static unsigned int heartbeat = DEFAULT_HEARTBEAT; static DEFINE_SPINLOCK(io_lock); static void __iomem *wdt_base; struct clk *wdt_clk; static int pnx4008_wdt_start(struct watchdog_device *wdd) { spin_lock(&io_lock); /* stop counter, initiate counter reset */ writel(RESET_COUNT, WDTIM_CTRL(wdt_base)); /*wait for reset to complete. 100% guarantee event */ while (readl(WDTIM_COUNTER(wdt_base))) cpu_relax(); /* internal and external reset, stop after that */ writel(M_RES2 | STOP_COUNT0 | RESET_COUNT0, WDTIM_MCTRL(wdt_base)); /* configure match output */ writel(MATCH_OUTPUT_HIGH, WDTIM_EMR(wdt_base)); /* clear interrupt, just in case */ writel(MATCH_INT, WDTIM_INT(wdt_base)); /* the longest pulse period 65541/(13*10^6) seconds ~ 5 ms. */ writel(0xFFFF, WDTIM_PULSE(wdt_base)); writel(wdd->timeout * WDOG_COUNTER_RATE, WDTIM_MATCH0(wdt_base)); /*enable counter, stop when debugger active */ writel(COUNT_ENAB | DEBUG_EN, WDTIM_CTRL(wdt_base)); spin_unlock(&io_lock); return 0; } static int pnx4008_wdt_stop(struct watchdog_device *wdd) { spin_lock(&io_lock); writel(0, WDTIM_CTRL(wdt_base)); /*stop counter */ spin_unlock(&io_lock); return 0; } static int pnx4008_wdt_set_timeout(struct watchdog_device *wdd, unsigned int new_timeout) { wdd->timeout = new_timeout; return 0; } static const struct watchdog_info pnx4008_wdt_ident = { .options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = "PNX4008 Watchdog", }; static const struct watchdog_ops pnx4008_wdt_ops = { .owner = THIS_MODULE, .start = pnx4008_wdt_start, .stop = pnx4008_wdt_stop, .set_timeout = pnx4008_wdt_set_timeout, }; static struct watchdog_device pnx4008_wdd = { .info = &pnx4008_wdt_ident, .ops = &pnx4008_wdt_ops, .min_timeout = 1, .max_timeout = MAX_HEARTBEAT, }; static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) { struct resource *r; int ret = 0; if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) heartbeat = DEFAULT_HEARTBEAT; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); wdt_base = devm_request_and_ioremap(&pdev->dev, r); if (!wdt_base) return -EADDRINUSE; wdt_clk = clk_get(&pdev->dev, NULL); if (IS_ERR(wdt_clk)) return PTR_ERR(wdt_clk); ret = clk_enable(wdt_clk); if (ret) goto out; pnx4008_wdd.timeout = heartbeat; pnx4008_wdd.bootstatus = (readl(WDTIM_RES(wdt_base)) & WDOG_RESET) ? WDIOF_CARDRESET : 0; watchdog_set_nowayout(&pnx4008_wdd, nowayout); pnx4008_wdt_stop(&pnx4008_wdd); /* disable for now */ ret = watchdog_register_device(&pnx4008_wdd); if (ret < 0) { dev_err(&pdev->dev, "cannot register watchdog device\n"); goto disable_clk; } dev_info(&pdev->dev, "PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat); return 0; disable_clk: clk_disable(wdt_clk); out: clk_put(wdt_clk); return ret; } static int __devexit pnx4008_wdt_remove(struct platform_device *pdev) { watchdog_unregister_device(&pnx4008_wdd); clk_disable(wdt_clk); clk_put(wdt_clk); return 0; } static struct platform_driver platform_wdt_driver = { .driver = { .name = "pnx4008-watchdog", .owner = THIS_MODULE, }, .probe = pnx4008_wdt_probe, .remove = __devexit_p(pnx4008_wdt_remove), }; module_platform_driver(platform_wdt_driver); MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>"); MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); MODULE_DESCRIPTION("PNX4008 Watchdog Driver"); module_param(heartbeat, uint, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to " __MODULE_STRING(MAX_HEARTBEAT) ", default " __MODULE_STRING(DEFAULT_HEARTBEAT)); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Set to 1 to keep watchdog running after device release"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:pnx4008-watchdog");
gpl-2.0
klabit87/kltevzw_pb1
drivers/gpu/drm/drm_ioctl.c
4804
8833
/** * \file drm_ioctl.c * IOCTL processing for DRM * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "drm_core.h" #include "linux/pci.h" #include "linux/export.h" /** * Get the bus id. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_unique structure. * \return zero on success or a negative number on failure. * * Copies the bus id from drm_device::unique into user space. */ int drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_unique *u = data; struct drm_master *master = file_priv->master; if (u->unique_len >= master->unique_len) { if (copy_to_user(u->unique, master->unique, master->unique_len)) return -EFAULT; } u->unique_len = master->unique_len; return 0; } static void drm_unset_busid(struct drm_device *dev, struct drm_master *master) { kfree(dev->devname); dev->devname = NULL; kfree(master->unique); master->unique = NULL; master->unique_len = 0; master->unique_size = 0; } /** * Set the bus id. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_unique structure. * \return zero on success or a negative number on failure. * * Copies the bus id from userspace into drm_device::unique, and verifies that * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated * in interface version 1.1 and will return EBUSY when setversion has requested * version 1.1 or greater. */ int drm_setunique(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_unique *u = data; struct drm_master *master = file_priv->master; int ret; if (master->unique_len || master->unique) return -EBUSY; if (!u->unique_len || u->unique_len > 1024) return -EINVAL; if (!dev->driver->bus->set_unique) return -EINVAL; ret = dev->driver->bus->set_unique(dev, master, u); if (ret) goto err; return 0; err: drm_unset_busid(dev, master); return ret; } static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) { struct drm_master *master = file_priv->master; int ret; if (master->unique != NULL) drm_unset_busid(dev, master); ret = dev->driver->bus->set_busid(dev, master); if (ret) goto err; return 0; err: drm_unset_busid(dev, master); return ret; } /** * Get a mapping information. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_map structure. * * \return zero on success or a negative number on failure. * * Searches for the mapping with the specified offset and copies its information * into userspace */ int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_map *map = data; struct drm_map_list *r_list = NULL; struct list_head *list; int idx; int i; idx = map->offset; if (idx < 0) return -EINVAL; i = 0; mutex_lock(&dev->struct_mutex); list_for_each(list, &dev->maplist) { if (i == idx) { r_list = list_entry(list, struct drm_map_list, head); break; } i++; } if (!r_list || !r_list->map) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } map->offset = r_list->map->offset; map->size = r_list->map->size; map->type = r_list->map->type; map->flags = r_list->map->flags; map->handle = (void *)(unsigned long) r_list->user_token; map->mtrr = r_list->map->mtrr; mutex_unlock(&dev->struct_mutex); return 0; } /** * Get client information. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_client structure. * * \return zero on success or a negative number on failure. * * Searches for the client with the specified index and copies its information * into userspace */ int drm_getclient(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_client *client = data; struct drm_file *pt; int idx; int i; idx = client->idx; i = 0; mutex_lock(&dev->struct_mutex); list_for_each_entry(pt, &dev->filelist, lhead) { if (i++ >= idx) { client->auth = pt->authenticated; client->pid = pt->pid; client->uid = pt->uid; client->magic = pt->magic; client->iocs = pt->ioctl_count; mutex_unlock(&dev->struct_mutex); return 0; } } mutex_unlock(&dev->struct_mutex); return -EINVAL; } /** * Get statistics information. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_stats structure. * * \return zero on success or a negative number on failure. */ int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_stats *stats = data; int i; memset(stats, 0, sizeof(*stats)); for (i = 0; i < dev->counters; i++) { if (dev->types[i] == _DRM_STAT_LOCK) stats->data[i].value = (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); else stats->data[i].value = atomic_read(&dev->counts[i]); stats->data[i].type = dev->types[i]; } stats->count = dev->counters; return 0; } /** * Get device/driver capabilities */ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_get_cap *req = data; req->value = 0; switch (req->capability) { case DRM_CAP_DUMB_BUFFER: if (dev->driver->dumb_create) req->value = 1; break; case DRM_CAP_VBLANK_HIGH_CRTC: req->value = 1; break; case DRM_CAP_DUMB_PREFERRED_DEPTH: req->value = dev->mode_config.preferred_depth; break; case DRM_CAP_DUMB_PREFER_SHADOW: req->value = dev->mode_config.prefer_shadow; break; default: return -EINVAL; } return 0; } /** * Setversion ioctl. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Sets the requested interface version */ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_set_version *sv = data; int if_version, retcode = 0; if (sv->drm_di_major != -1) { if (sv->drm_di_major != DRM_IF_MAJOR || sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { retcode = -EINVAL; goto done; } if_version = DRM_IF_VERSION(sv->drm_di_major, sv->drm_di_minor); dev->if_version = max(if_version, dev->if_version); if (sv->drm_di_minor >= 1) { /* * Version 1.1 includes tying of DRM to specific device * Version 1.4 has proper PCI domain support */ retcode = drm_set_busid(dev, file_priv); if (retcode) goto done; } } if (sv->drm_dd_major != -1) { if (sv->drm_dd_major != dev->driver->major || sv->drm_dd_minor < 0 || sv->drm_dd_minor > dev->driver->minor) { retcode = -EINVAL; goto done; } if (dev->driver->set_version) dev->driver->set_version(dev, sv); } done: sv->drm_di_major = DRM_IF_MAJOR; sv->drm_di_minor = DRM_IF_MINOR; sv->drm_dd_major = dev->driver->major; sv->drm_dd_minor = dev->driver->minor; return retcode; } /** No-op ioctl. */ int drm_noop(struct drm_device *dev, void *data, struct drm_file *file_priv) { DRM_DEBUG("\n"); return 0; } EXPORT_SYMBOL(drm_noop);
gpl-2.0
falaze/nexus5n
arch/arm/mach-s3c24xx/dma-s3c2410.c
5060
4590
/* linux/arch/arm/mach-s3c2410/dma.c * * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2410 DMA selection * * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/serial_core.h> #include <mach/map.h> #include <mach/dma.h> #include <plat/cpu.h> #include <plat/dma-s3c24xx.h> #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include <plat/regs-ac97.h> #include <plat/regs-dma.h> #include <mach/regs-mem.h> #include <mach/regs-lcd.h> #include <mach/regs-sdi.h> #include <plat/regs-iis.h> #include <plat/regs-spi.h> static struct s3c24xx_dma_map __initdata s3c2410_dma_mappings[] = { [DMACH_XD0] = { .name = "xdreq0", .channels[0] = S3C2410_DCON_CH0_XDREQ0 | DMA_CH_VALID, }, [DMACH_XD1] = { .name = "xdreq1", .channels[1] = S3C2410_DCON_CH1_XDREQ1 | DMA_CH_VALID, }, [DMACH_SDI] = { .name = "sdi", .channels[0] = S3C2410_DCON_CH0_SDI | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_SDI | DMA_CH_VALID, .channels[3] = S3C2410_DCON_CH3_SDI | DMA_CH_VALID, }, [DMACH_SPI0] = { .name = "spi0", .channels[1] = S3C2410_DCON_CH1_SPI | DMA_CH_VALID, }, [DMACH_SPI1] = { .name = "spi1", .channels[3] = S3C2410_DCON_CH3_SPI | DMA_CH_VALID, }, [DMACH_UART0] = { .name = "uart0", .channels[0] = S3C2410_DCON_CH0_UART0 | DMA_CH_VALID, }, [DMACH_UART1] = { .name = "uart1", .channels[1] = S3C2410_DCON_CH1_UART1 | DMA_CH_VALID, }, [DMACH_UART2] = { .name = "uart2", .channels[3] = S3C2410_DCON_CH3_UART2 | DMA_CH_VALID, }, [DMACH_TIMER] = { .name = "timer", .channels[0] = S3C2410_DCON_CH0_TIMER | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_TIMER | DMA_CH_VALID, .channels[3] = S3C2410_DCON_CH3_TIMER | DMA_CH_VALID, }, [DMACH_I2S_IN] = { .name = "i2s-sdi", .channels[1] = S3C2410_DCON_CH1_I2SSDI | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_I2SSDI | DMA_CH_VALID, }, [DMACH_I2S_OUT] = { .name = "i2s-sdo", .channels[2] = S3C2410_DCON_CH2_I2SSDO | DMA_CH_VALID, }, [DMACH_USB_EP1] = { .name = "usb-ep1", .channels[0] = S3C2410_DCON_CH0_USBEP1 | DMA_CH_VALID, }, [DMACH_USB_EP2] = { .name = "usb-ep2", .channels[1] = S3C2410_DCON_CH1_USBEP2 | DMA_CH_VALID, }, [DMACH_USB_EP3] = { .name = "usb-ep3", .channels[2] = S3C2410_DCON_CH2_USBEP3 | DMA_CH_VALID, }, [DMACH_USB_EP4] = { .name = "usb-ep4", .channels[3] =S3C2410_DCON_CH3_USBEP4 | DMA_CH_VALID, }, }; static void s3c2410_dma_select(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map) { chan->dcon = map->channels[chan->number] & ~DMA_CH_VALID; } static struct s3c24xx_dma_selection __initdata s3c2410_dma_sel = { .select = s3c2410_dma_select, .dcon_mask = 7 << 24, .map = s3c2410_dma_mappings, .map_size = ARRAY_SIZE(s3c2410_dma_mappings), }; static struct s3c24xx_dma_order __initdata s3c2410_dma_order = { .channels = { [DMACH_SDI] = { .list = { [0] = 3 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, [2] = 0 | DMA_CH_VALID, }, }, [DMACH_I2S_IN] = { .list = { [0] = 1 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, }, }, }, }; static int __init s3c2410_dma_add(struct device *dev, struct subsys_interface *sif) { s3c2410_dma_init(); s3c24xx_dma_order_set(&s3c2410_dma_order); return s3c24xx_dma_init_map(&s3c2410_dma_sel); } #if defined(CONFIG_CPU_S3C2410) static struct subsys_interface s3c2410_dma_interface = { .name = "s3c2410_dma", .subsys = &s3c2410_subsys, .add_dev = s3c2410_dma_add, }; static int __init s3c2410_dma_drvinit(void) { return subsys_interface_register(&s3c2410_dma_interface); } arch_initcall(s3c2410_dma_drvinit); static struct subsys_interface s3c2410a_dma_interface = { .name = "s3c2410a_dma", .subsys = &s3c2410a_subsys, .add_dev = s3c2410_dma_add, }; static int __init s3c2410a_dma_drvinit(void) { return subsys_interface_register(&s3c2410a_dma_interface); } arch_initcall(s3c2410a_dma_drvinit); #endif #if defined(CONFIG_CPU_S3C2442) /* S3C2442 DMA contains the same selection table as the S3C2410 */ static struct subsys_interface s3c2442_dma_interface = { .name = "s3c2442_dma", .subsys = &s3c2442_subsys, .add_dev = s3c2410_dma_add, }; static int __init s3c2442_dma_drvinit(void) { return subsys_interface_register(&s3c2442_dma_interface); } arch_initcall(s3c2442_dma_drvinit); #endif
gpl-2.0
kernel-hut/android_kernel_xiaomi_dior
arch/powerpc/platforms/wsp/opb_pic.c
6852
7330
/* * IBM Onboard Peripheral Bus Interrupt Controller * * Copyright 2010 Jack Miller, IBM Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/time.h> #include <asm/reg_a2.h> #include <asm/irq.h> #define OPB_NR_IRQS 32 #define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */ #define OPB_MLSIR 0x50 /* MLS Interrupt Register */ #define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */ #define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */ #define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */ static int opb_index = 0; struct opb_pic { struct irq_domain *host; void *regs; int index; spinlock_t lock; }; static u32 opb_in(struct opb_pic *opb, int offset) { return in_be32(opb->regs + offset); } static void opb_out(struct opb_pic *opb, int offset, u32 val) { out_be32(opb->regs + offset, val); } static void opb_unmask_irq(struct irq_data *d) { struct opb_pic *opb; unsigned long flags; u32 ier, bitset; opb = d->chip_data; bitset = (1 << (31 - irqd_to_hwirq(d))); spin_lock_irqsave(&opb->lock, flags); ier = opb_in(opb, OPB_MLSIER); opb_out(opb, OPB_MLSIER, ier | bitset); ier = opb_in(opb, OPB_MLSIER); spin_unlock_irqrestore(&opb->lock, flags); } static void opb_mask_irq(struct irq_data *d) { struct opb_pic *opb; unsigned long flags; u32 ier, mask; opb = d->chip_data; mask = ~(1 << (31 - irqd_to_hwirq(d))); spin_lock_irqsave(&opb->lock, flags); ier = opb_in(opb, OPB_MLSIER); opb_out(opb, OPB_MLSIER, ier & mask); ier = opb_in(opb, OPB_MLSIER); // Flush posted writes spin_unlock_irqrestore(&opb->lock, flags); } static void opb_ack_irq(struct irq_data *d) { struct opb_pic *opb; unsigned long flags; u32 bitset; opb = d->chip_data; bitset = (1 << (31 - irqd_to_hwirq(d))); spin_lock_irqsave(&opb->lock, flags); opb_out(opb, OPB_MLSIR, bitset); opb_in(opb, OPB_MLSIR); // Flush posted writes spin_unlock_irqrestore(&opb->lock, flags); } static void opb_mask_ack_irq(struct irq_data *d) { struct opb_pic *opb; unsigned long flags; u32 bitset; u32 ier, ir; opb = d->chip_data; bitset = (1 << (31 - irqd_to_hwirq(d))); spin_lock_irqsave(&opb->lock, flags); ier = opb_in(opb, OPB_MLSIER); opb_out(opb, OPB_MLSIER, ier & ~bitset); ier = opb_in(opb, OPB_MLSIER); // Flush posted writes opb_out(opb, OPB_MLSIR, bitset); ir = opb_in(opb, OPB_MLSIR); // Flush posted writes spin_unlock_irqrestore(&opb->lock, flags); } static int opb_set_irq_type(struct irq_data *d, unsigned int flow) { struct opb_pic *opb; unsigned long flags; int invert, ipr, mask, bit; opb = d->chip_data; /* The only information we're interested in in the type is whether it's * a high or low trigger. For high triggered interrupts, the polarity * set for it in the MLS Interrupt Polarity Register is 0, for low * interrupts it's 1 so that the proper input in the MLS Interrupt Input * Register is interrupted as asserting the interrupt. */ switch (flow) { case IRQ_TYPE_NONE: opb_mask_irq(d); return 0; case IRQ_TYPE_LEVEL_HIGH: invert = 0; break; case IRQ_TYPE_LEVEL_LOW: invert = 1; break; default: return -EINVAL; } bit = (1 << (31 - irqd_to_hwirq(d))); mask = ~bit; spin_lock_irqsave(&opb->lock, flags); ipr = opb_in(opb, OPB_MLSIPR); ipr = (ipr & mask) | (invert ? bit : 0); opb_out(opb, OPB_MLSIPR, ipr); ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes spin_unlock_irqrestore(&opb->lock, flags); /* Record the type in the interrupt descriptor */ irqd_set_trigger_type(d, flow); return 0; } static struct irq_chip opb_irq_chip = { .name = "OPB", .irq_mask = opb_mask_irq, .irq_unmask = opb_unmask_irq, .irq_mask_ack = opb_mask_ack_irq, .irq_ack = opb_ack_irq, .irq_set_type = opb_set_irq_type }; static int opb_host_map(struct irq_domain *host, unsigned int virq, irq_hw_number_t hwirq) { struct opb_pic *opb; opb = host->host_data; /* Most of the important stuff is handled by the generic host code, like * the lookup, so just attach some info to the virtual irq */ irq_set_chip_data(virq, opb); irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq); irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; } static const struct irq_domain_ops opb_host_ops = { .map = opb_host_map, .xlate = irq_domain_xlate_twocell, }; irqreturn_t opb_irq_handler(int irq, void *private) { struct opb_pic *opb; u32 ir, src, subvirq; opb = (struct opb_pic *) private; /* Read the OPB MLS Interrupt Register for * asserted interrupts */ ir = opb_in(opb, OPB_MLSIR); if (!ir) return IRQ_NONE; do { /* Get 1 - 32 source, *NOT* bit */ src = 32 - ffs(ir); /* Translate from the OPB's conception of interrupt number to * Linux's virtual IRQ */ subvirq = irq_linear_revmap(opb->host, src); generic_handle_irq(subvirq); } while ((ir = opb_in(opb, OPB_MLSIR))); return IRQ_HANDLED; } struct opb_pic *opb_pic_init_one(struct device_node *dn) { struct opb_pic *opb; struct resource res; if (of_address_to_resource(dn, 0, &res)) { printk(KERN_ERR "opb: Couldn't translate resource\n"); return NULL; } opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL); if (!opb) { printk(KERN_ERR "opb: Failed to allocate opb struct!\n"); return NULL; } /* Get access to the OPB MMIO registers */ opb->regs = ioremap(res.start + 0x10000, 0x1000); if (!opb->regs) { printk(KERN_ERR "opb: Failed to allocate register space!\n"); goto free_opb; } /* Allocate an irq domain so that Linux knows that despite only * having one interrupt to issue, we're the controller for multiple * hardware IRQs, so later we can lookup their virtual IRQs. */ opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb); if (!opb->host) { printk(KERN_ERR "opb: Failed to allocate IRQ host!\n"); goto free_regs; } opb->index = opb_index++; spin_lock_init(&opb->lock); /* Disable all interrupts by default */ opb_out(opb, OPB_MLSASIER, 0); opb_out(opb, OPB_MLSIER, 0); /* ACK any interrupts left by FW */ opb_out(opb, OPB_MLSIR, 0xFFFFFFFF); return opb; free_regs: iounmap(opb->regs); free_opb: kfree(opb); return NULL; } void __init opb_pic_init(void) { struct device_node *dn; struct opb_pic *opb; int virq; int rc; /* Call init_one for each OPB device */ for_each_compatible_node(dn, NULL, "ibm,opb") { /* Fill in an OPB struct */ opb = opb_pic_init_one(dn); if (!opb) { printk(KERN_WARNING "opb: Failed to init node, skipped!\n"); continue; } /* Map / get opb's hardware virtual irq */ virq = irq_of_parse_and_map(dn, 0); if (virq <= 0) { printk("opb: irq_op_parse_and_map failed!\n"); continue; } /* Attach opb interrupt handler to new virtual IRQ */ rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD, "OPB LS Cascade", opb); if (rc) { printk("opb: request_irq failed: %d\n", rc); continue; } printk("OPB%d init with %d IRQs at %p\n", opb->index, OPB_NR_IRQS, opb->regs); } }
gpl-2.0
StarkDroid/android_kernel_motorola_msm8610
drivers/tty/serial/netx-serial.c
8132
17115
/* * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/device.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <asm/io.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/netx-regs.h> /* We've been assigned a range on the "Low-density serial ports" major */ #define SERIAL_NX_MAJOR 204 #define MINOR_START 170 enum uart_regs { UART_DR = 0x00, UART_SR = 0x04, UART_LINE_CR = 0x08, UART_BAUDDIV_MSB = 0x0c, UART_BAUDDIV_LSB = 0x10, UART_CR = 0x14, UART_FR = 0x18, UART_IIR = 0x1c, UART_ILPR = 0x20, UART_RTS_CR = 0x24, UART_RTS_LEAD = 0x28, UART_RTS_TRAIL = 0x2c, UART_DRV_ENABLE = 0x30, UART_BRM_CR = 0x34, UART_RXFIFO_IRQLEVEL = 0x38, UART_TXFIFO_IRQLEVEL = 0x3c, }; #define SR_FE (1<<0) #define SR_PE (1<<1) #define SR_BE (1<<2) #define SR_OE (1<<3) #define LINE_CR_BRK (1<<0) #define LINE_CR_PEN (1<<1) #define LINE_CR_EPS (1<<2) #define LINE_CR_STP2 (1<<3) #define LINE_CR_FEN (1<<4) #define LINE_CR_5BIT (0<<5) #define LINE_CR_6BIT (1<<5) #define LINE_CR_7BIT (2<<5) #define LINE_CR_8BIT (3<<5) #define LINE_CR_BITS_MASK (3<<5) #define CR_UART_EN (1<<0) #define CR_SIREN (1<<1) #define CR_SIRLP (1<<2) #define CR_MSIE (1<<3) #define CR_RIE (1<<4) #define CR_TIE (1<<5) #define CR_RTIE (1<<6) #define CR_LBE (1<<7) #define FR_CTS (1<<0) #define FR_DSR (1<<1) #define FR_DCD (1<<2) #define FR_BUSY (1<<3) #define FR_RXFE (1<<4) #define FR_TXFF (1<<5) #define FR_RXFF (1<<6) #define FR_TXFE (1<<7) #define IIR_MIS (1<<0) #define IIR_RIS (1<<1) #define IIR_TIS (1<<2) #define IIR_RTIS (1<<3) #define IIR_MASK 0xf #define RTS_CR_AUTO (1<<0) #define RTS_CR_RTS (1<<1) #define RTS_CR_COUNT (1<<2) #define RTS_CR_MOD2 (1<<3) #define RTS_CR_RTS_POL (1<<4) #define RTS_CR_CTS_CTR (1<<5) #define RTS_CR_CTS_POL (1<<6) #define RTS_CR_STICK (1<<7) #define UART_PORT_SIZE 0x40 #define DRIVER_NAME "netx-uart" struct netx_port { struct uart_port port; }; static void netx_stop_tx(struct uart_port *port) { unsigned int val; val = readl(port->membase + UART_CR); writel(val & ~CR_TIE, port->membase + UART_CR); } static void netx_stop_rx(struct uart_port *port) { unsigned int val; val = readl(port->membase + UART_CR); writel(val & ~CR_RIE, port->membase + UART_CR); } static void netx_enable_ms(struct uart_port *port) { unsigned int val; val = readl(port->membase + UART_CR); writel(val | CR_MSIE, port->membase + UART_CR); } static inline void netx_transmit_buffer(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; if (port->x_char) { writel(port->x_char, port->membase + UART_DR); port->icount.tx++; port->x_char = 0; return; } if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { netx_stop_tx(port); return; } do { /* send xmit->buf[xmit->tail] * out the port here */ writel(xmit->buf[xmit->tail], port->membase + UART_DR); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; if (uart_circ_empty(xmit)) break; } while (!(readl(port->membase + UART_FR) & FR_TXFF)); if (uart_circ_empty(xmit)) netx_stop_tx(port); } static void netx_start_tx(struct uart_port *port) { writel( readl(port->membase + UART_CR) | CR_TIE, port->membase + UART_CR); if (!(readl(port->membase + UART_FR) & FR_TXFF)) netx_transmit_buffer(port); } static unsigned int netx_tx_empty(struct uart_port *port) { return readl(port->membase + UART_FR) & FR_BUSY ? 0 : TIOCSER_TEMT; } static void netx_txint(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { netx_stop_tx(port); return; } netx_transmit_buffer(port); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); } static void netx_rxint(struct uart_port *port) { unsigned char rx, flg, status; struct tty_struct *tty = port->state->port.tty; while (!(readl(port->membase + UART_FR) & FR_RXFE)) { rx = readl(port->membase + UART_DR); flg = TTY_NORMAL; port->icount.rx++; status = readl(port->membase + UART_SR); if (status & SR_BE) { writel(0, port->membase + UART_SR); if (uart_handle_break(port)) continue; } if (unlikely(status & (SR_FE | SR_PE | SR_OE))) { if (status & SR_PE) port->icount.parity++; else if (status & SR_FE) port->icount.frame++; if (status & SR_OE) port->icount.overrun++; status &= port->read_status_mask; if (status & SR_BE) flg = TTY_BREAK; else if (status & SR_PE) flg = TTY_PARITY; else if (status & SR_FE) flg = TTY_FRAME; } if (uart_handle_sysrq_char(port, rx)) continue; uart_insert_char(port, status, SR_OE, rx, flg); } tty_flip_buffer_push(tty); return; } static irqreturn_t netx_int(int irq, void *dev_id) { struct uart_port *port = dev_id; unsigned long flags; unsigned char status; spin_lock_irqsave(&port->lock,flags); status = readl(port->membase + UART_IIR) & IIR_MASK; while (status) { if (status & IIR_RIS) netx_rxint(port); if (status & IIR_TIS) netx_txint(port); if (status & IIR_MIS) { if (readl(port->membase + UART_FR) & FR_CTS) uart_handle_cts_change(port, 1); else uart_handle_cts_change(port, 0); } writel(0, port->membase + UART_IIR); status = readl(port->membase + UART_IIR) & IIR_MASK; } spin_unlock_irqrestore(&port->lock,flags); return IRQ_HANDLED; } static unsigned int netx_get_mctrl(struct uart_port *port) { unsigned int ret = TIOCM_DSR | TIOCM_CAR; if (readl(port->membase + UART_FR) & FR_CTS) ret |= TIOCM_CTS; return ret; } static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl) { unsigned int val; /* FIXME: Locking needed ? */ if (mctrl & TIOCM_RTS) { val = readl(port->membase + UART_RTS_CR); writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR); } } static void netx_break_ctl(struct uart_port *port, int break_state) { unsigned int line_cr; spin_lock_irq(&port->lock); line_cr = readl(port->membase + UART_LINE_CR); if (break_state != 0) line_cr |= LINE_CR_BRK; else line_cr &= ~LINE_CR_BRK; writel(line_cr, port->membase + UART_LINE_CR); spin_unlock_irq(&port->lock); } static int netx_startup(struct uart_port *port) { int ret; ret = request_irq(port->irq, netx_int, 0, DRIVER_NAME, port); if (ret) { dev_err(port->dev, "unable to grab irq%d\n",port->irq); goto exit; } writel(readl(port->membase + UART_LINE_CR) | LINE_CR_FEN, port->membase + UART_LINE_CR); writel(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE | CR_UART_EN, port->membase + UART_CR); exit: return ret; } static void netx_shutdown(struct uart_port *port) { writel(0, port->membase + UART_CR) ; free_irq(port->irq, port); } static void netx_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { unsigned int baud, quot; unsigned char old_cr; unsigned char line_cr = LINE_CR_FEN; unsigned char rts_cr = 0; switch (termios->c_cflag & CSIZE) { case CS5: line_cr |= LINE_CR_5BIT; break; case CS6: line_cr |= LINE_CR_6BIT; break; case CS7: line_cr |= LINE_CR_7BIT; break; case CS8: line_cr |= LINE_CR_8BIT; break; } if (termios->c_cflag & CSTOPB) line_cr |= LINE_CR_STP2; if (termios->c_cflag & PARENB) { line_cr |= LINE_CR_PEN; if (!(termios->c_cflag & PARODD)) line_cr |= LINE_CR_EPS; } if (termios->c_cflag & CRTSCTS) rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL; baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = baud * 4096; quot /= 1000; quot *= 256; quot /= 100000; spin_lock_irq(&port->lock); uart_update_timeout(port, termios->c_cflag, baud); old_cr = readl(port->membase + UART_CR); /* disable interrupts */ writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE), port->membase + UART_CR); /* drain transmitter */ while (readl(port->membase + UART_FR) & FR_BUSY); /* disable UART */ writel(old_cr & ~CR_UART_EN, port->membase + UART_CR); /* modem status interrupts */ old_cr &= ~CR_MSIE; if (UART_ENABLE_MS(port, termios->c_cflag)) old_cr |= CR_MSIE; writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB); writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB); writel(line_cr, port->membase + UART_LINE_CR); writel(rts_cr, port->membase + UART_RTS_CR); /* * Characters to ignore */ port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= SR_PE; if (termios->c_iflag & IGNBRK) { port->ignore_status_mask |= SR_BE; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= SR_PE; } port->read_status_mask = 0; if (termios->c_iflag & (BRKINT | PARMRK)) port->read_status_mask |= SR_BE; if (termios->c_iflag & INPCK) port->read_status_mask |= SR_PE | SR_FE; writel(old_cr, port->membase + UART_CR); spin_unlock_irq(&port->lock); } static const char *netx_type(struct uart_port *port) { return port->type == PORT_NETX ? "NETX" : NULL; } static void netx_release_port(struct uart_port *port) { release_mem_region(port->mapbase, UART_PORT_SIZE); } static int netx_request_port(struct uart_port *port) { return request_mem_region(port->mapbase, UART_PORT_SIZE, DRIVER_NAME) != NULL ? 0 : -EBUSY; } static void netx_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE && netx_request_port(port) == 0) port->type = PORT_NETX; } static int netx_verify_port(struct uart_port *port, struct serial_struct *ser) { int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_NETX) ret = -EINVAL; return ret; } static struct uart_ops netx_pops = { .tx_empty = netx_tx_empty, .set_mctrl = netx_set_mctrl, .get_mctrl = netx_get_mctrl, .stop_tx = netx_stop_tx, .start_tx = netx_start_tx, .stop_rx = netx_stop_rx, .enable_ms = netx_enable_ms, .break_ctl = netx_break_ctl, .startup = netx_startup, .shutdown = netx_shutdown, .set_termios = netx_set_termios, .type = netx_type, .release_port = netx_release_port, .request_port = netx_request_port, .config_port = netx_config_port, .verify_port = netx_verify_port, }; static struct netx_port netx_ports[] = { { .port = { .type = PORT_NETX, .iotype = UPIO_MEM, .membase = (char __iomem *)io_p2v(NETX_PA_UART0), .mapbase = NETX_PA_UART0, .irq = NETX_IRQ_UART0, .uartclk = 100000000, .fifosize = 16, .flags = UPF_BOOT_AUTOCONF, .ops = &netx_pops, .line = 0, }, }, { .port = { .type = PORT_NETX, .iotype = UPIO_MEM, .membase = (char __iomem *)io_p2v(NETX_PA_UART1), .mapbase = NETX_PA_UART1, .irq = NETX_IRQ_UART1, .uartclk = 100000000, .fifosize = 16, .flags = UPF_BOOT_AUTOCONF, .ops = &netx_pops, .line = 1, }, }, { .port = { .type = PORT_NETX, .iotype = UPIO_MEM, .membase = (char __iomem *)io_p2v(NETX_PA_UART2), .mapbase = NETX_PA_UART2, .irq = NETX_IRQ_UART2, .uartclk = 100000000, .fifosize = 16, .flags = UPF_BOOT_AUTOCONF, .ops = &netx_pops, .line = 2, }, } }; #ifdef CONFIG_SERIAL_NETX_CONSOLE static void netx_console_putchar(struct uart_port *port, int ch) { while (readl(port->membase + UART_FR) & FR_BUSY); writel(ch, port->membase + UART_DR); } static void netx_console_write(struct console *co, const char *s, unsigned int count) { struct uart_port *port = &netx_ports[co->index].port; unsigned char cr_save; cr_save = readl(port->membase + UART_CR); writel(cr_save | CR_UART_EN, port->membase + UART_CR); uart_console_write(port, s, count, netx_console_putchar); while (readl(port->membase + UART_FR) & FR_BUSY); writel(cr_save, port->membase + UART_CR); } static void __init netx_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits, int *flow) { unsigned char line_cr; *baud = (readl(port->membase + UART_BAUDDIV_MSB) << 8) | readl(port->membase + UART_BAUDDIV_LSB); *baud *= 1000; *baud /= 4096; *baud *= 1000; *baud /= 256; *baud *= 100; line_cr = readl(port->membase + UART_LINE_CR); *parity = 'n'; if (line_cr & LINE_CR_PEN) { if (line_cr & LINE_CR_EPS) *parity = 'e'; else *parity = 'o'; } switch (line_cr & LINE_CR_BITS_MASK) { case LINE_CR_8BIT: *bits = 8; break; case LINE_CR_7BIT: *bits = 7; break; case LINE_CR_6BIT: *bits = 6; break; case LINE_CR_5BIT: *bits = 5; break; } if (readl(port->membase + UART_RTS_CR) & RTS_CR_AUTO) *flow = 'r'; } static int __init netx_console_setup(struct console *co, char *options) { struct netx_port *sport; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index == -1 || co->index >= ARRAY_SIZE(netx_ports)) co->index = 0; sport = &netx_ports[co->index]; if (options) { uart_parse_options(options, &baud, &parity, &bits, &flow); } else { /* if the UART is enabled, assume it has been correctly setup * by the bootloader and get the options */ if (readl(sport->port.membase + UART_CR) & CR_UART_EN) { netx_console_get_options(&sport->port, &baud, &parity, &bits, &flow); } } return uart_set_options(&sport->port, co, baud, parity, bits, flow); } static struct uart_driver netx_reg; static struct console netx_console = { .name = "ttyNX", .write = netx_console_write, .device = uart_console_device, .setup = netx_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &netx_reg, }; static int __init netx_console_init(void) { register_console(&netx_console); return 0; } console_initcall(netx_console_init); #define NETX_CONSOLE &netx_console #else #define NETX_CONSOLE NULL #endif static struct uart_driver netx_reg = { .owner = THIS_MODULE, .driver_name = DRIVER_NAME, .dev_name = "ttyNX", .major = SERIAL_NX_MAJOR, .minor = MINOR_START, .nr = ARRAY_SIZE(netx_ports), .cons = NETX_CONSOLE, }; static int serial_netx_suspend(struct platform_device *pdev, pm_message_t state) { struct netx_port *sport = platform_get_drvdata(pdev); if (sport) uart_suspend_port(&netx_reg, &sport->port); return 0; } static int serial_netx_resume(struct platform_device *pdev) { struct netx_port *sport = platform_get_drvdata(pdev); if (sport) uart_resume_port(&netx_reg, &sport->port); return 0; } static int serial_netx_probe(struct platform_device *pdev) { struct uart_port *port = &netx_ports[pdev->id].port; dev_info(&pdev->dev, "initialising\n"); port->dev = &pdev->dev; writel(1, port->membase + UART_RXFIFO_IRQLEVEL); uart_add_one_port(&netx_reg, &netx_ports[pdev->id].port); platform_set_drvdata(pdev, &netx_ports[pdev->id]); return 0; } static int serial_netx_remove(struct platform_device *pdev) { struct netx_port *sport = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (sport) uart_remove_one_port(&netx_reg, &sport->port); return 0; } static struct platform_driver serial_netx_driver = { .probe = serial_netx_probe, .remove = serial_netx_remove, .suspend = serial_netx_suspend, .resume = serial_netx_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; static int __init netx_serial_init(void) { int ret; printk(KERN_INFO "Serial: NetX driver\n"); ret = uart_register_driver(&netx_reg); if (ret) return ret; ret = platform_driver_register(&serial_netx_driver); if (ret != 0) uart_unregister_driver(&netx_reg); return 0; } static void __exit netx_serial_exit(void) { platform_driver_unregister(&serial_netx_driver); uart_unregister_driver(&netx_reg); } module_init(netx_serial_init); module_exit(netx_serial_exit); MODULE_AUTHOR("Sascha Hauer"); MODULE_DESCRIPTION("NetX serial port driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
msva/android_kernel_asus_A80
fs/ntfs/compress.c
10948
29762
/** * compress.c - NTFS kernel compressed attributes handling. * Part of the Linux-NTFS project. * * Copyright (c) 2001-2004 Anton Altaparmakov * Copyright (c) 2002 Richard Russon * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/blkdev.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include "attrib.h" #include "inode.h" #include "debug.h" #include "ntfs.h" /** * ntfs_compression_constants - enum of constants used in the compression code */ typedef enum { /* Token types and access mask. */ NTFS_SYMBOL_TOKEN = 0, NTFS_PHRASE_TOKEN = 1, NTFS_TOKEN_MASK = 1, /* Compression sub-block constants. */ NTFS_SB_SIZE_MASK = 0x0fff, NTFS_SB_SIZE = 0x1000, NTFS_SB_IS_COMPRESSED = 0x8000, /* * The maximum compression block size is by definition 16 * the cluster * size, with the maximum supported cluster size being 4kiB. Thus the * maximum compression buffer size is 64kiB, so we use this when * initializing the compression buffer. */ NTFS_MAX_CB_SIZE = 64 * 1024, } ntfs_compression_constants; /** * ntfs_compression_buffer - one buffer for the decompression engine */ static u8 *ntfs_compression_buffer = NULL; /** * ntfs_cb_lock - spinlock which protects ntfs_compression_buffer */ static DEFINE_SPINLOCK(ntfs_cb_lock); /** * allocate_compression_buffers - allocate the decompression buffers * * Caller has to hold the ntfs_lock mutex. * * Return 0 on success or -ENOMEM if the allocations failed. */ int allocate_compression_buffers(void) { BUG_ON(ntfs_compression_buffer); ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE); if (!ntfs_compression_buffer) return -ENOMEM; return 0; } /** * free_compression_buffers - free the decompression buffers * * Caller has to hold the ntfs_lock mutex. */ void free_compression_buffers(void) { BUG_ON(!ntfs_compression_buffer); vfree(ntfs_compression_buffer); ntfs_compression_buffer = NULL; } /** * zero_partial_compressed_page - zero out of bounds compressed page region */ static void zero_partial_compressed_page(struct page *page, const s64 initialized_size) { u8 *kp = page_address(page); unsigned int kp_ofs; ntfs_debug("Zeroing page region outside initialized size."); if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { /* * FIXME: Using clear_page() will become wrong when we get * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem. */ clear_page(kp); return; } kp_ofs = initialized_size & ~PAGE_CACHE_MASK; memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); return; } /** * handle_bounds_compressed_page - test for&handle out of bounds compressed page */ static inline void handle_bounds_compressed_page(struct page *page, const loff_t i_size, const s64 initialized_size) { if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && (initialized_size < i_size)) zero_partial_compressed_page(page, initialized_size); return; } /** * ntfs_decompress - decompress a compression block into an array of pages * @dest_pages: destination array of pages * @dest_index: current index into @dest_pages (IN/OUT) * @dest_ofs: current offset within @dest_pages[@dest_index] (IN/OUT) * @dest_max_index: maximum index into @dest_pages (IN) * @dest_max_ofs: maximum offset within @dest_pages[@dest_max_index] (IN) * @xpage: the target page (-1 if none) (IN) * @xpage_done: set to 1 if xpage was completed successfully (IN/OUT) * @cb_start: compression block to decompress (IN) * @cb_size: size of compression block @cb_start in bytes (IN) * @i_size: file size when we started the read (IN) * @initialized_size: initialized file size when we started the read (IN) * * The caller must have disabled preemption. ntfs_decompress() reenables it when * the critical section is finished. * * This decompresses the compression block @cb_start into the array of * destination pages @dest_pages starting at index @dest_index into @dest_pages * and at offset @dest_pos into the page @dest_pages[@dest_index]. * * When the page @dest_pages[@xpage] is completed, @xpage_done is set to 1. * If xpage is -1 or @xpage has not been completed, @xpage_done is not modified. * * @cb_start is a pointer to the compression block which needs decompressing * and @cb_size is the size of @cb_start in bytes (8-64kiB). * * Return 0 if success or -EOVERFLOW on error in the compressed stream. * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was * completed during the decompression of the compression block (@cb_start). * * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up * unpredicatbly! You have been warned! * * Note to hackers: This function may not sleep until it has finished accessing * the compression block @cb_start as it is a per-CPU buffer. */ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, int *dest_ofs, const int dest_max_index, const int dest_max_ofs, const int xpage, char *xpage_done, u8 *const cb_start, const u32 cb_size, const loff_t i_size, const s64 initialized_size) { /* * Pointers into the compressed data, i.e. the compression block (cb), * and the therein contained sub-blocks (sb). */ u8 *cb_end = cb_start + cb_size; /* End of cb. */ u8 *cb = cb_start; /* Current position in cb. */ u8 *cb_sb_start = cb; /* Beginning of the current sb in the cb. */ u8 *cb_sb_end; /* End of current sb / beginning of next sb. */ /* Variables for uncompressed data / destination. */ struct page *dp; /* Current destination page being worked on. */ u8 *dp_addr; /* Current pointer into dp. */ u8 *dp_sb_start; /* Start of current sub-block in dp. */ u8 *dp_sb_end; /* End of current sb in dp (dp_sb_start + NTFS_SB_SIZE). */ u16 do_sb_start; /* @dest_ofs when starting this sub-block. */ u16 do_sb_end; /* @dest_ofs of end of this sb (do_sb_start + NTFS_SB_SIZE). */ /* Variables for tag and token parsing. */ u8 tag; /* Current tag. */ int token; /* Loop counter for the eight tokens in tag. */ /* Need this because we can't sleep, so need two stages. */ int completed_pages[dest_max_index - *dest_index + 1]; int nr_completed_pages = 0; /* Default error code. */ int err = -EOVERFLOW; ntfs_debug("Entering, cb_size = 0x%x.", cb_size); do_next_sb: ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.", cb - cb_start); /* * Have we reached the end of the compression block or the end of the * decompressed data? The latter can happen for example if the current * position in the compression block is one byte before its end so the * first two checks do not detect it. */ if (cb == cb_end || !le16_to_cpup((le16*)cb) || (*dest_index == dest_max_index && *dest_ofs == dest_max_ofs)) { int i; ntfs_debug("Completed. Returning success (0)."); err = 0; return_error: /* We can sleep from now on, so we drop lock. */ spin_unlock(&ntfs_cb_lock); /* Second stage: finalize completed pages. */ if (nr_completed_pages > 0) { for (i = 0; i < nr_completed_pages; i++) { int di = completed_pages[i]; dp = dest_pages[di]; /* * If we are outside the initialized size, zero * the out of bounds page range. */ handle_bounds_compressed_page(dp, i_size, initialized_size); flush_dcache_page(dp); kunmap(dp); SetPageUptodate(dp); unlock_page(dp); if (di == xpage) *xpage_done = 1; else page_cache_release(dp); dest_pages[di] = NULL; } } return err; } /* Setup offsets for the current sub-block destination. */ do_sb_start = *dest_ofs; do_sb_end = do_sb_start + NTFS_SB_SIZE; /* Check that we are still within allowed boundaries. */ if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs) goto return_overflow; /* Does the minimum size of a compressed sb overflow valid range? */ if (cb + 6 > cb_end) goto return_overflow; /* Setup the current sub-block source pointers and validate range. */ cb_sb_start = cb; cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK) + 3; if (cb_sb_end > cb_end) goto return_overflow; /* Get the current destination page. */ dp = dest_pages[*dest_index]; if (!dp) { /* No page present. Skip decompression of this sub-block. */ cb = cb_sb_end; /* Advance destination position to next sub-block. */ *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK; if (!*dest_ofs && (++*dest_index > dest_max_index)) goto return_overflow; goto do_next_sb; } /* We have a valid destination page. Setup the destination pointers. */ dp_addr = (u8*)page_address(dp) + do_sb_start; /* Now, we are ready to process the current sub-block (sb). */ if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) { ntfs_debug("Found uncompressed sub-block."); /* This sb is not compressed, just copy it into destination. */ /* Advance source position to first data byte. */ cb += 2; /* An uncompressed sb must be full size. */ if (cb_sb_end - cb != NTFS_SB_SIZE) goto return_overflow; /* Copy the block and advance the source position. */ memcpy(dp_addr, cb, NTFS_SB_SIZE); cb += NTFS_SB_SIZE; /* Advance destination position to next sub-block. */ *dest_ofs += NTFS_SB_SIZE; if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) { finalize_page: /* * First stage: add current page index to array of * completed pages. */ completed_pages[nr_completed_pages++] = *dest_index; if (++*dest_index > dest_max_index) goto return_overflow; } goto do_next_sb; } ntfs_debug("Found compressed sub-block."); /* This sb is compressed, decompress it into destination. */ /* Setup destination pointers. */ dp_sb_start = dp_addr; dp_sb_end = dp_sb_start + NTFS_SB_SIZE; /* Forward to the first tag in the sub-block. */ cb += 2; do_next_tag: if (cb == cb_sb_end) { /* Check if the decompressed sub-block was not full-length. */ if (dp_addr < dp_sb_end) { int nr_bytes = do_sb_end - *dest_ofs; ntfs_debug("Filling incomplete sub-block with " "zeroes."); /* Zero remainder and update destination position. */ memset(dp_addr, 0, nr_bytes); *dest_ofs += nr_bytes; } /* We have finished the current sub-block. */ if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) goto finalize_page; goto do_next_sb; } /* Check we are still in range. */ if (cb > cb_sb_end || dp_addr > dp_sb_end) goto return_overflow; /* Get the next tag and advance to first token. */ tag = *cb++; /* Parse the eight tokens described by the tag. */ for (token = 0; token < 8; token++, tag >>= 1) { u16 lg, pt, length, max_non_overlap; register u16 i; u8 *dp_back_addr; /* Check if we are done / still in range. */ if (cb >= cb_sb_end || dp_addr > dp_sb_end) break; /* Determine token type and parse appropriately.*/ if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) { /* * We have a symbol token, copy the symbol across, and * advance the source and destination positions. */ *dp_addr++ = *cb++; ++*dest_ofs; /* Continue with the next token. */ continue; } /* * We have a phrase token. Make sure it is not the first tag in * the sb as this is illegal and would confuse the code below. */ if (dp_addr == dp_sb_start) goto return_overflow; /* * Determine the number of bytes to go back (p) and the number * of bytes to copy (l). We use an optimized algorithm in which * we first calculate log2(current destination position in sb), * which allows determination of l and p in O(1) rather than * O(n). We just need an arch-optimized log2() function now. */ lg = 0; for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1) lg++; /* Get the phrase token into i. */ pt = le16_to_cpup((le16*)cb); /* * Calculate starting position of the byte sequence in * the destination using the fact that p = (pt >> (12 - lg)) + 1 * and make sure we don't go too far back. */ dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1; if (dp_back_addr < dp_sb_start) goto return_overflow; /* Now calculate the length of the byte sequence. */ length = (pt & (0xfff >> lg)) + 3; /* Advance destination position and verify it is in range. */ *dest_ofs += length; if (*dest_ofs > do_sb_end) goto return_overflow; /* The number of non-overlapping bytes. */ max_non_overlap = dp_addr - dp_back_addr; if (length <= max_non_overlap) { /* The byte sequence doesn't overlap, just copy it. */ memcpy(dp_addr, dp_back_addr, length); /* Advance destination pointer. */ dp_addr += length; } else { /* * The byte sequence does overlap, copy non-overlapping * part and then do a slow byte by byte copy for the * overlapping part. Also, advance the destination * pointer. */ memcpy(dp_addr, dp_back_addr, max_non_overlap); dp_addr += max_non_overlap; dp_back_addr += max_non_overlap; length -= max_non_overlap; while (length--) *dp_addr++ = *dp_back_addr++; } /* Advance source position and continue with the next token. */ cb += 2; } /* No tokens left in the current tag. Continue with the next tag. */ goto do_next_tag; return_overflow: ntfs_error(NULL, "Failed. Returning -EOVERFLOW."); goto return_error; } /** * ntfs_read_compressed_block - read a compressed block into the page cache * @page: locked page in the compression block(s) we need to read * * When we are called the page has already been verified to be locked and the * attribute is known to be non-resident, not encrypted, but compressed. * * 1. Determine which compression block(s) @page is in. * 2. Get hold of all pages corresponding to this/these compression block(s). * 3. Read the (first) compression block. * 4. Decompress it into the corresponding pages. * 5. Throw the compressed data away and proceed to 3. for the next compression * block or return success if no more compression blocks left. * * Warning: We have to be careful what we do about existing pages. They might * have been written to so that we would lose data if we were to just overwrite * them with the out-of-date uncompressed data. * * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at * the end of the file I think. We need to detect this case and zero the out * of bounds remainder of the page in question and mark it as handled. At the * moment we would just return -EIO on such a page. This bug will only become * apparent if pages are above 8kiB and the NTFS volume only uses 512 byte * clusters so is probably not going to be seen by anyone. Still this should * be fixed. (AIA) * * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in * handling sparse and compressed cbs. (AIA) * * FIXME: At the moment we don't do any zeroing out in the case that * initialized_size is less than data_size. This should be safe because of the * nature of the compression algorithm used. Just in case we check and output * an error message in read inode if the two sizes are not equal for a * compressed file. (AIA) */ int ntfs_read_compressed_block(struct page *page) { loff_t i_size; s64 initialized_size; struct address_space *mapping = page->mapping; ntfs_inode *ni = NTFS_I(mapping->host); ntfs_volume *vol = ni->vol; struct super_block *sb = vol->sb; runlist_element *rl; unsigned long flags, block_size = sb->s_blocksize; unsigned char block_size_bits = sb->s_blocksize_bits; u8 *cb, *cb_pos, *cb_end; struct buffer_head **bhs; unsigned long offset, index = page->index; u32 cb_size = ni->itype.compressed.block_size; u64 cb_size_mask = cb_size - 1UL; VCN vcn; LCN lcn; /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */ VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >> vol->cluster_size_bits; /* * The first vcn after the last wanted vcn (minimum alignment is again * PAGE_CACHE_SIZE. */ VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1) & ~cb_size_mask) >> vol->cluster_size_bits; /* Number of compression blocks (cbs) in the wanted vcn range. */ unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits >> ni->itype.compressed.block_size_bits; /* * Number of pages required to store the uncompressed data from all * compression blocks (cbs) overlapping @page. Due to alignment * guarantees of start_vcn and end_vcn, no need to round up here. */ unsigned int nr_pages = (end_vcn - start_vcn) << vol->cluster_size_bits >> PAGE_CACHE_SHIFT; unsigned int xpage, max_page, cur_page, cur_ofs, i; unsigned int cb_clusters, cb_max_ofs; int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0; struct page **pages; unsigned char xpage_done = 0; ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = " "%i.", index, cb_size, nr_pages); /* * Bad things happen if we get here for anything that is not an * unnamed $DATA attribute. */ BUG_ON(ni->type != AT_DATA); BUG_ON(ni->name_len); pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS); /* Allocate memory to store the buffer heads we need. */ bhs_size = cb_size / block_size * sizeof(struct buffer_head *); bhs = kmalloc(bhs_size, GFP_NOFS); if (unlikely(!pages || !bhs)) { kfree(bhs); kfree(pages); unlock_page(page); ntfs_error(vol->sb, "Failed to allocate internal buffers."); return -ENOMEM; } /* * We have already been given one page, this is the one we must do. * Once again, the alignment guarantees keep it simple. */ offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT; xpage = index - offset; pages[xpage] = page; /* * The remaining pages need to be allocated and inserted into the page * cache, alignment guarantees keep all the below much simpler. (-8 */ read_lock_irqsave(&ni->size_lock, flags); i_size = i_size_read(VFS_I(ni)); initialized_size = ni->initialized_size; read_unlock_irqrestore(&ni->size_lock, flags); max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - offset; /* Is the page fully outside i_size? (truncate in progress) */ if (xpage >= max_page) { kfree(bhs); kfree(pages); zero_user(page, 0, PAGE_CACHE_SIZE); ntfs_debug("Compressed read outside i_size - truncated?"); SetPageUptodate(page); unlock_page(page); return 0; } if (nr_pages < max_page) max_page = nr_pages; for (i = 0; i < max_page; i++, offset++) { if (i != xpage) pages[i] = grab_cache_page_nowait(mapping, offset); page = pages[i]; if (page) { /* * We only (re)read the page if it isn't already read * in and/or dirty or we would be losing data or at * least wasting our time. */ if (!PageDirty(page) && (!PageUptodate(page) || PageError(page))) { ClearPageError(page); kmap(page); continue; } unlock_page(page); page_cache_release(page); pages[i] = NULL; } } /* * We have the runlist, and all the destination pages we need to fill. * Now read the first compression block. */ cur_page = 0; cur_ofs = 0; cb_clusters = ni->itype.compressed.block_clusters; do_next_cb: nr_cbs--; nr_bhs = 0; /* Read all cb buffer heads one cluster at a time. */ rl = NULL; for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn; vcn++) { bool is_retry = false; if (!rl) { lock_retry_remap: down_read(&ni->runlist.lock); rl = ni->runlist.rl; } if (likely(rl != NULL)) { /* Seek to element containing target vcn. */ while (rl->length && rl[1].vcn <= vcn) rl++; lcn = ntfs_rl_vcn_to_lcn(rl, vcn); } else lcn = LCN_RL_NOT_MAPPED; ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.", (unsigned long long)vcn, (unsigned long long)lcn); if (lcn < 0) { /* * When we reach the first sparse cluster we have * finished with the cb. */ if (lcn == LCN_HOLE) break; if (is_retry || lcn != LCN_RL_NOT_MAPPED) goto rl_err; is_retry = true; /* * Attempt to map runlist, dropping lock for the * duration. */ up_read(&ni->runlist.lock); if (!ntfs_map_runlist(ni, vcn)) goto lock_retry_remap; goto map_rl_err; } block = lcn << vol->cluster_size_bits >> block_size_bits; /* Read the lcn from device in chunks of block_size bytes. */ max_block = block + (vol->cluster_size >> block_size_bits); do { ntfs_debug("block = 0x%x.", block); if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block)))) goto getblk_err; nr_bhs++; } while (++block < max_block); } /* Release the lock if we took it. */ if (rl) up_read(&ni->runlist.lock); /* Setup and initiate io on all buffer heads. */ for (i = 0; i < nr_bhs; i++) { struct buffer_head *tbh = bhs[i]; if (!trylock_buffer(tbh)) continue; if (unlikely(buffer_uptodate(tbh))) { unlock_buffer(tbh); continue; } get_bh(tbh); tbh->b_end_io = end_buffer_read_sync; submit_bh(READ, tbh); } /* Wait for io completion on all buffer heads. */ for (i = 0; i < nr_bhs; i++) { struct buffer_head *tbh = bhs[i]; if (buffer_uptodate(tbh)) continue; wait_on_buffer(tbh); /* * We need an optimization barrier here, otherwise we start * hitting the below fixup code when accessing a loopback * mounted ntfs partition. This indicates either there is a * race condition in the loop driver or, more likely, gcc * overoptimises the code without the barrier and it doesn't * do the Right Thing(TM). */ barrier(); if (unlikely(!buffer_uptodate(tbh))) { ntfs_warning(vol->sb, "Buffer is unlocked but not " "uptodate! Unplugging the disk queue " "and rescheduling."); get_bh(tbh); io_schedule(); put_bh(tbh); if (unlikely(!buffer_uptodate(tbh))) goto read_err; ntfs_warning(vol->sb, "Buffer is now uptodate. Good."); } } /* * Get the compression buffer. We must not sleep any more * until we are finished with it. */ spin_lock(&ntfs_cb_lock); cb = ntfs_compression_buffer; BUG_ON(!cb); cb_pos = cb; cb_end = cb + cb_size; /* Copy the buffer heads into the contiguous buffer. */ for (i = 0; i < nr_bhs; i++) { memcpy(cb_pos, bhs[i]->b_data, block_size); cb_pos += block_size; } /* Just a precaution. */ if (cb_pos + 2 <= cb + cb_size) *(u16*)cb_pos = 0; /* Reset cb_pos back to the beginning. */ cb_pos = cb; /* We now have both source (if present) and destination. */ ntfs_debug("Successfully read the compression block."); /* The last page and maximum offset within it for the current cb. */ cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size; cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK; cb_max_page >>= PAGE_CACHE_SHIFT; /* Catch end of file inside a compression block. */ if (cb_max_page > max_page) cb_max_page = max_page; if (vcn == start_vcn - cb_clusters) { /* Sparse cb, zero out page range overlapping the cb. */ ntfs_debug("Found sparse compression block."); /* We can sleep from now on, so we drop lock. */ spin_unlock(&ntfs_cb_lock); if (cb_max_ofs) cb_max_page--; for (; cur_page < cb_max_page; cur_page++) { page = pages[cur_page]; if (page) { /* * FIXME: Using clear_page() will become wrong * when we get PAGE_CACHE_SIZE != PAGE_SIZE but * for now there is no problem. */ if (likely(!cur_ofs)) clear_page(page_address(page)); else memset(page_address(page) + cur_ofs, 0, PAGE_CACHE_SIZE - cur_ofs); flush_dcache_page(page); kunmap(page); SetPageUptodate(page); unlock_page(page); if (cur_page == xpage) xpage_done = 1; else page_cache_release(page); pages[cur_page] = NULL; } cb_pos += PAGE_CACHE_SIZE - cur_ofs; cur_ofs = 0; if (cb_pos >= cb_end) break; } /* If we have a partial final page, deal with it now. */ if (cb_max_ofs && cb_pos < cb_end) { page = pages[cur_page]; if (page) memset(page_address(page) + cur_ofs, 0, cb_max_ofs - cur_ofs); /* * No need to update cb_pos at this stage: * cb_pos += cb_max_ofs - cur_ofs; */ cur_ofs = cb_max_ofs; } } else if (vcn == start_vcn) { /* We can't sleep so we need two stages. */ unsigned int cur2_page = cur_page; unsigned int cur_ofs2 = cur_ofs; u8 *cb_pos2 = cb_pos; ntfs_debug("Found uncompressed compression block."); /* Uncompressed cb, copy it to the destination pages. */ /* * TODO: As a big optimization, we could detect this case * before we read all the pages and use block_read_full_page() * on all full pages instead (we still have to treat partial * pages especially but at least we are getting rid of the * synchronous io for the majority of pages. * Or if we choose not to do the read-ahead/-behind stuff, we * could just return block_read_full_page(pages[xpage]) as long * as PAGE_CACHE_SIZE <= cb_size. */ if (cb_max_ofs) cb_max_page--; /* First stage: copy data into destination pages. */ for (; cur_page < cb_max_page; cur_page++) { page = pages[cur_page]; if (page) memcpy(page_address(page) + cur_ofs, cb_pos, PAGE_CACHE_SIZE - cur_ofs); cb_pos += PAGE_CACHE_SIZE - cur_ofs; cur_ofs = 0; if (cb_pos >= cb_end) break; } /* If we have a partial final page, deal with it now. */ if (cb_max_ofs && cb_pos < cb_end) { page = pages[cur_page]; if (page) memcpy(page_address(page) + cur_ofs, cb_pos, cb_max_ofs - cur_ofs); cb_pos += cb_max_ofs - cur_ofs; cur_ofs = cb_max_ofs; } /* We can sleep from now on, so drop lock. */ spin_unlock(&ntfs_cb_lock); /* Second stage: finalize pages. */ for (; cur2_page < cb_max_page; cur2_page++) { page = pages[cur2_page]; if (page) { /* * If we are outside the initialized size, zero * the out of bounds page range. */ handle_bounds_compressed_page(page, i_size, initialized_size); flush_dcache_page(page); kunmap(page); SetPageUptodate(page); unlock_page(page); if (cur2_page == xpage) xpage_done = 1; else page_cache_release(page); pages[cur2_page] = NULL; } cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2; cur_ofs2 = 0; if (cb_pos2 >= cb_end) break; } } else { /* Compressed cb, decompress it into the destination page(s). */ unsigned int prev_cur_page = cur_page; ntfs_debug("Found compressed compression block."); err = ntfs_decompress(pages, &cur_page, &cur_ofs, cb_max_page, cb_max_ofs, xpage, &xpage_done, cb_pos, cb_size - (cb_pos - cb), i_size, initialized_size); /* * We can sleep from now on, lock already dropped by * ntfs_decompress(). */ if (err) { ntfs_error(vol->sb, "ntfs_decompress() failed in inode " "0x%lx with error code %i. Skipping " "this compression block.", ni->mft_no, -err); /* Release the unfinished pages. */ for (; prev_cur_page < cur_page; prev_cur_page++) { page = pages[prev_cur_page]; if (page) { flush_dcache_page(page); kunmap(page); unlock_page(page); if (prev_cur_page != xpage) page_cache_release(page); pages[prev_cur_page] = NULL; } } } } /* Release the buffer heads. */ for (i = 0; i < nr_bhs; i++) brelse(bhs[i]); /* Do we have more work to do? */ if (nr_cbs) goto do_next_cb; /* We no longer need the list of buffer heads. */ kfree(bhs); /* Clean up if we have any pages left. Should never happen. */ for (cur_page = 0; cur_page < max_page; cur_page++) { page = pages[cur_page]; if (page) { ntfs_error(vol->sb, "Still have pages left! " "Terminating them with extreme " "prejudice. Inode 0x%lx, page index " "0x%lx.", ni->mft_no, page->index); flush_dcache_page(page); kunmap(page); unlock_page(page); if (cur_page != xpage) page_cache_release(page); pages[cur_page] = NULL; } } /* We no longer need the list of pages. */ kfree(pages); /* If we have completed the requested page, we return success. */ if (likely(xpage_done)) return 0; ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ? "EOVERFLOW" : (!err ? "EIO" : "unknown error")); return err < 0 ? err : -EIO; read_err: ntfs_error(vol->sb, "IO error while reading compressed data."); /* Release the buffer heads. */ for (i = 0; i < nr_bhs; i++) brelse(bhs[i]); goto err_out; map_rl_err: ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read " "compression block."); goto err_out; rl_err: up_read(&ni->runlist.lock); ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read " "compression block."); goto err_out; getblk_err: up_read(&ni->runlist.lock); ntfs_error(vol->sb, "getblk() failed. Cannot read compression block."); err_out: kfree(bhs); for (i = cur_page; i < max_page; i++) { page = pages[i]; if (page) { flush_dcache_page(page); kunmap(page); unlock_page(page); if (i != xpage) page_cache_release(page); } } kfree(pages); return -EIO; }
gpl-2.0
russelldias98/linux_kernel_3.0.68
drivers/media/video/cx25840/cx25840-audio.c
12484
14919
/* cx25840 audio functions * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/v4l2-common.h> #include <media/cx25840.h> #include "cx25840-core.h" /* * Note: The PLL and SRC parameters are based on a reference frequency that * would ideally be: * * NTSC Color subcarrier freq * 8 = 4.5 MHz/286 * 455/2 * 8 = 28.63636363... MHz * * However, it's not the exact reference frequency that matters, only that the * firmware and modules that comprise the driver for a particular board all * use the same value (close to the ideal value). * * Comments below will note which reference frequency is assumed for various * parameters. They will usually be one of * * ref_freq = 28.636360 MHz * or * ref_freq = 28.636363 MHz */ static int cx25840_set_audclk_freq(struct i2c_client *client, u32 freq) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); if (state->aud_input != CX25840_AUDIO_SERIAL) { switch (freq) { case 32000: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x06, AUX PLL Post Divider = 0x10 */ cx25840_write4(client, 0x108, 0x1006040f); /* * VID_PLL Fraction (register 0x10c) = 0x2be2fe * 28636360 * 0xf.15f17f0/4 = 108 MHz * 432 MHz pre-postdivide */ /* * AUX_PLL Fraction = 0x1bb39ee * 28636363 * 0x6.dd9cf70/0x10 = 32000 * 384 * 196.6 MHz pre-postdivide * FIXME < 200 MHz is out of specified valid range * FIXME 28636363 ref_freq doesn't match VID PLL ref */ cx25840_write4(client, 0x110, 0x01bb39ee); /* * SA_MCLK_SEL = 1 * SA_MCLK_DIV = 0x10 = 384/384 * AUX_PLL post dvivider */ cx25840_write(client, 0x127, 0x50); if (is_cx2583x(state)) break; /* src3/4/6_ctl */ /* 0x1.f77f = (4 * 28636360/8 * 2/455) / 32000 */ cx25840_write4(client, 0x900, 0x0801f77f); cx25840_write4(client, 0x904, 0x0801f77f); cx25840_write4(client, 0x90c, 0x0801f77f); break; case 44100: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x09, AUX PLL Post Divider = 0x10 */ cx25840_write4(client, 0x108, 0x1009040f); /* * VID_PLL Fraction (register 0x10c) = 0x2be2fe * 28636360 * 0xf.15f17f0/4 = 108 MHz * 432 MHz pre-postdivide */ /* * AUX_PLL Fraction = 0x0ec6bd6 * 28636363 * 0x9.7635eb0/0x10 = 44100 * 384 * 271 MHz pre-postdivide * FIXME 28636363 ref_freq doesn't match VID PLL ref */ cx25840_write4(client, 0x110, 0x00ec6bd6); /* * SA_MCLK_SEL = 1 * SA_MCLK_DIV = 0x10 = 384/384 * AUX_PLL post dvivider */ cx25840_write(client, 0x127, 0x50); if (is_cx2583x(state)) break; /* src3/4/6_ctl */ /* 0x1.6d59 = (4 * 28636360/8 * 2/455) / 44100 */ cx25840_write4(client, 0x900, 0x08016d59); cx25840_write4(client, 0x904, 0x08016d59); cx25840_write4(client, 0x90c, 0x08016d59); break; case 48000: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x0a, AUX PLL Post Divider = 0x10 */ cx25840_write4(client, 0x108, 0x100a040f); /* * VID_PLL Fraction (register 0x10c) = 0x2be2fe * 28636360 * 0xf.15f17f0/4 = 108 MHz * 432 MHz pre-postdivide */ /* * AUX_PLL Fraction = 0x098d6e5 * 28636363 * 0xa.4c6b728/0x10 = 48000 * 384 * 295 MHz pre-postdivide * FIXME 28636363 ref_freq doesn't match VID PLL ref */ cx25840_write4(client, 0x110, 0x0098d6e5); /* * SA_MCLK_SEL = 1 * SA_MCLK_DIV = 0x10 = 384/384 * AUX_PLL post dvivider */ cx25840_write(client, 0x127, 0x50); if (is_cx2583x(state)) break; /* src3/4/6_ctl */ /* 0x1.4faa = (4 * 28636360/8 * 2/455) / 48000 */ cx25840_write4(client, 0x900, 0x08014faa); cx25840_write4(client, 0x904, 0x08014faa); cx25840_write4(client, 0x90c, 0x08014faa); break; } } else { switch (freq) { case 32000: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x08, AUX PLL Post Divider = 0x1e */ cx25840_write4(client, 0x108, 0x1e08040f); /* * VID_PLL Fraction (register 0x10c) = 0x2be2fe * 28636360 * 0xf.15f17f0/4 = 108 MHz * 432 MHz pre-postdivide */ /* * AUX_PLL Fraction = 0x12a0869 * 28636363 * 0x8.9504348/0x1e = 32000 * 256 * 246 MHz pre-postdivide * FIXME 28636363 ref_freq doesn't match VID PLL ref */ cx25840_write4(client, 0x110, 0x012a0869); /* * SA_MCLK_SEL = 1 * SA_MCLK_DIV = 0x14 = 256/384 * AUX_PLL post dvivider */ cx25840_write(client, 0x127, 0x54); if (is_cx2583x(state)) break; /* src1_ctl */ /* 0x1.0000 = 32000/32000 */ cx25840_write4(client, 0x8f8, 0x08010000); /* src3/4/6_ctl */ /* 0x2.0000 = 2 * (32000/32000) */ cx25840_write4(client, 0x900, 0x08020000); cx25840_write4(client, 0x904, 0x08020000); cx25840_write4(client, 0x90c, 0x08020000); break; case 44100: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x09, AUX PLL Post Divider = 0x18 */ cx25840_write4(client, 0x108, 0x1809040f); /* * VID_PLL Fraction (register 0x10c) = 0x2be2fe * 28636360 * 0xf.15f17f0/4 = 108 MHz * 432 MHz pre-postdivide */ /* * AUX_PLL Fraction = 0x0ec6bd6 * 28636363 * 0x9.7635eb0/0x18 = 44100 * 256 * 271 MHz pre-postdivide * FIXME 28636363 ref_freq doesn't match VID PLL ref */ cx25840_write4(client, 0x110, 0x00ec6bd6); /* * SA_MCLK_SEL = 1 * SA_MCLK_DIV = 0x10 = 256/384 * AUX_PLL post dvivider */ cx25840_write(client, 0x127, 0x50); if (is_cx2583x(state)) break; /* src1_ctl */ /* 0x1.60cd = 44100/32000 */ cx25840_write4(client, 0x8f8, 0x080160cd); /* src3/4/6_ctl */ /* 0x1.7385 = 2 * (32000/44100) */ cx25840_write4(client, 0x900, 0x08017385); cx25840_write4(client, 0x904, 0x08017385); cx25840_write4(client, 0x90c, 0x08017385); break; case 48000: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x0a, AUX PLL Post Divider = 0x18 */ cx25840_write4(client, 0x108, 0x180a040f); /* * VID_PLL Fraction (register 0x10c) = 0x2be2fe * 28636360 * 0xf.15f17f0/4 = 108 MHz * 432 MHz pre-postdivide */ /* * AUX_PLL Fraction = 0x098d6e5 * 28636363 * 0xa.4c6b728/0x18 = 48000 * 256 * 295 MHz pre-postdivide * FIXME 28636363 ref_freq doesn't match VID PLL ref */ cx25840_write4(client, 0x110, 0x0098d6e5); /* * SA_MCLK_SEL = 1 * SA_MCLK_DIV = 0x10 = 256/384 * AUX_PLL post dvivider */ cx25840_write(client, 0x127, 0x50); if (is_cx2583x(state)) break; /* src1_ctl */ /* 0x1.8000 = 48000/32000 */ cx25840_write4(client, 0x8f8, 0x08018000); /* src3/4/6_ctl */ /* 0x1.5555 = 2 * (32000/48000) */ cx25840_write4(client, 0x900, 0x08015555); cx25840_write4(client, 0x904, 0x08015555); cx25840_write4(client, 0x90c, 0x08015555); break; } } state->audclk_freq = freq; return 0; } static inline int cx25836_set_audclk_freq(struct i2c_client *client, u32 freq) { return cx25840_set_audclk_freq(client, freq); } static int cx23885_set_audclk_freq(struct i2c_client *client, u32 freq) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); if (state->aud_input != CX25840_AUDIO_SERIAL) { switch (freq) { case 32000: case 44100: case 48000: /* We don't have register values * so avoid destroying registers. */ /* FIXME return -EINVAL; */ break; } } else { switch (freq) { case 32000: case 44100: /* We don't have register values * so avoid destroying registers. */ /* FIXME return -EINVAL; */ break; case 48000: /* src1_ctl */ /* 0x1.867c = 48000 / (2 * 28636360/8 * 2/455) */ cx25840_write4(client, 0x8f8, 0x0801867c); /* src3/4/6_ctl */ /* 0x1.4faa = (4 * 28636360/8 * 2/455) / 48000 */ cx25840_write4(client, 0x900, 0x08014faa); cx25840_write4(client, 0x904, 0x08014faa); cx25840_write4(client, 0x90c, 0x08014faa); break; } } state->audclk_freq = freq; return 0; } static int cx231xx_set_audclk_freq(struct i2c_client *client, u32 freq) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); if (state->aud_input != CX25840_AUDIO_SERIAL) { switch (freq) { case 32000: /* src3/4/6_ctl */ /* 0x1.f77f = (4 * 28636360/8 * 2/455) / 32000 */ cx25840_write4(client, 0x900, 0x0801f77f); cx25840_write4(client, 0x904, 0x0801f77f); cx25840_write4(client, 0x90c, 0x0801f77f); break; case 44100: /* src3/4/6_ctl */ /* 0x1.6d59 = (4 * 28636360/8 * 2/455) / 44100 */ cx25840_write4(client, 0x900, 0x08016d59); cx25840_write4(client, 0x904, 0x08016d59); cx25840_write4(client, 0x90c, 0x08016d59); break; case 48000: /* src3/4/6_ctl */ /* 0x1.4faa = (4 * 28636360/8 * 2/455) / 48000 */ cx25840_write4(client, 0x900, 0x08014faa); cx25840_write4(client, 0x904, 0x08014faa); cx25840_write4(client, 0x90c, 0x08014faa); break; } } else { switch (freq) { /* FIXME These cases make different assumptions about audclk */ case 32000: /* src1_ctl */ /* 0x1.0000 = 32000/32000 */ cx25840_write4(client, 0x8f8, 0x08010000); /* src3/4/6_ctl */ /* 0x2.0000 = 2 * (32000/32000) */ cx25840_write4(client, 0x900, 0x08020000); cx25840_write4(client, 0x904, 0x08020000); cx25840_write4(client, 0x90c, 0x08020000); break; case 44100: /* src1_ctl */ /* 0x1.60cd = 44100/32000 */ cx25840_write4(client, 0x8f8, 0x080160cd); /* src3/4/6_ctl */ /* 0x1.7385 = 2 * (32000/44100) */ cx25840_write4(client, 0x900, 0x08017385); cx25840_write4(client, 0x904, 0x08017385); cx25840_write4(client, 0x90c, 0x08017385); break; case 48000: /* src1_ctl */ /* 0x1.867c = 48000 / (2 * 28636360/8 * 2/455) */ cx25840_write4(client, 0x8f8, 0x0801867c); /* src3/4/6_ctl */ /* 0x1.4faa = (4 * 28636360/8 * 2/455) / 48000 */ cx25840_write4(client, 0x900, 0x08014faa); cx25840_write4(client, 0x904, 0x08014faa); cx25840_write4(client, 0x90c, 0x08014faa); break; } } state->audclk_freq = freq; return 0; } static int set_audclk_freq(struct i2c_client *client, u32 freq) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); if (freq != 32000 && freq != 44100 && freq != 48000) return -EINVAL; if (is_cx231xx(state)) return cx231xx_set_audclk_freq(client, freq); if (is_cx2388x(state)) return cx23885_set_audclk_freq(client, freq); if (is_cx2583x(state)) return cx25836_set_audclk_freq(client, freq); return cx25840_set_audclk_freq(client, freq); } void cx25840_audio_set_path(struct i2c_client *client) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); if (!is_cx2583x(state)) { /* assert soft reset */ cx25840_and_or(client, 0x810, ~0x1, 0x01); /* stop microcontroller */ cx25840_and_or(client, 0x803, ~0x10, 0); /* Mute everything to prevent the PFFT! */ cx25840_write(client, 0x8d3, 0x1f); if (state->aud_input == CX25840_AUDIO_SERIAL) { /* Set Path1 to Serial Audio Input */ cx25840_write4(client, 0x8d0, 0x01011012); /* The microcontroller should not be started for the * non-tuner inputs: autodetection is specific for * TV audio. */ } else { /* Set Path1 to Analog Demod Main Channel */ cx25840_write4(client, 0x8d0, 0x1f063870); } } set_audclk_freq(client, state->audclk_freq); if (!is_cx2583x(state)) { if (state->aud_input != CX25840_AUDIO_SERIAL) { /* When the microcontroller detects the * audio format, it will unmute the lines */ cx25840_and_or(client, 0x803, ~0x10, 0x10); } /* deassert soft reset */ cx25840_and_or(client, 0x810, ~0x1, 0x00); /* Ensure the controller is running when we exit */ if (is_cx2388x(state) || is_cx231xx(state)) cx25840_and_or(client, 0x803, ~0x10, 0x10); } } static void set_volume(struct i2c_client *client, int volume) { int vol; /* Convert the volume to msp3400 values (0-127) */ vol = volume >> 9; /* now scale it up to cx25840 values * -114dB to -96dB maps to 0 * this should be 19, but in my testing that was 4dB too loud */ if (vol <= 23) { vol = 0; } else { vol -= 23; } /* PATH1_VOLUME */ cx25840_write(client, 0x8d4, 228 - (vol * 2)); } static void set_balance(struct i2c_client *client, int balance) { int bal = balance >> 8; if (bal > 0x80) { /* PATH1_BAL_LEFT */ cx25840_and_or(client, 0x8d5, 0x7f, 0x80); /* PATH1_BAL_LEVEL */ cx25840_and_or(client, 0x8d5, ~0x7f, bal & 0x7f); } else { /* PATH1_BAL_LEFT */ cx25840_and_or(client, 0x8d5, 0x7f, 0x00); /* PATH1_BAL_LEVEL */ cx25840_and_or(client, 0x8d5, ~0x7f, 0x80 - bal); } } int cx25840_s_clock_freq(struct v4l2_subdev *sd, u32 freq) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct cx25840_state *state = to_state(sd); int retval; if (!is_cx2583x(state)) cx25840_and_or(client, 0x810, ~0x1, 1); if (state->aud_input != CX25840_AUDIO_SERIAL) { cx25840_and_or(client, 0x803, ~0x10, 0); cx25840_write(client, 0x8d3, 0x1f); } retval = set_audclk_freq(client, freq); if (state->aud_input != CX25840_AUDIO_SERIAL) cx25840_and_or(client, 0x803, ~0x10, 0x10); if (!is_cx2583x(state)) cx25840_and_or(client, 0x810, ~0x1, 0); return retval; } static int cx25840_audio_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); switch (ctrl->id) { case V4L2_CID_AUDIO_VOLUME: if (state->mute->val) set_volume(client, 0); else set_volume(client, state->volume->val); break; case V4L2_CID_AUDIO_BASS: /* PATH1_EQ_BASS_VOL */ cx25840_and_or(client, 0x8d9, ~0x3f, 48 - (ctrl->val * 48 / 0xffff)); break; case V4L2_CID_AUDIO_TREBLE: /* PATH1_EQ_TREBLE_VOL */ cx25840_and_or(client, 0x8db, ~0x3f, 48 - (ctrl->val * 48 / 0xffff)); break; case V4L2_CID_AUDIO_BALANCE: set_balance(client, ctrl->val); break; default: return -EINVAL; } return 0; } const struct v4l2_ctrl_ops cx25840_audio_ctrl_ops = { .s_ctrl = cx25840_audio_s_ctrl, };
gpl-2.0
revjunkie/kernel-copyleft
net/bridge/netfilter/ebt_mark_m.c
14020
2361
/* * ebt_mark_m * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * * July, 2002 * */ #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_mark_m.h> static bool ebt_mark_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_mark_m_info *info = par->matchinfo; if (info->bitmask & EBT_MARK_OR) return !!(skb->mark & info->mask) ^ info->invert; return ((skb->mark & info->mask) == info->mark) ^ info->invert; } static int ebt_mark_mt_check(const struct xt_mtchk_param *par) { const struct ebt_mark_m_info *info = par->matchinfo; if (info->bitmask & ~EBT_MARK_MASK) return -EINVAL; if ((info->bitmask & EBT_MARK_OR) && (info->bitmask & EBT_MARK_AND)) return -EINVAL; if (!info->bitmask) return -EINVAL; return 0; } #ifdef CONFIG_COMPAT struct compat_ebt_mark_m_info { compat_ulong_t mark, mask; uint8_t invert, bitmask; }; static void mark_mt_compat_from_user(void *dst, const void *src) { const struct compat_ebt_mark_m_info *user = src; struct ebt_mark_m_info *kern = dst; kern->mark = user->mark; kern->mask = user->mask; kern->invert = user->invert; kern->bitmask = user->bitmask; } static int mark_mt_compat_to_user(void __user *dst, const void *src) { struct compat_ebt_mark_m_info __user *user = dst; const struct ebt_mark_m_info *kern = src; if (put_user(kern->mark, &user->mark) || put_user(kern->mask, &user->mask) || put_user(kern->invert, &user->invert) || put_user(kern->bitmask, &user->bitmask)) return -EFAULT; return 0; } #endif static struct xt_match ebt_mark_mt_reg __read_mostly = { .name = "mark_m", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_mark_mt, .checkentry = ebt_mark_mt_check, .matchsize = sizeof(struct ebt_mark_m_info), #ifdef CONFIG_COMPAT .compatsize = sizeof(struct compat_ebt_mark_m_info), .compat_from_user = mark_mt_compat_from_user, .compat_to_user = mark_mt_compat_to_user, #endif .me = THIS_MODULE, }; static int __init ebt_mark_m_init(void) { return xt_register_match(&ebt_mark_mt_reg); } static void __exit ebt_mark_m_fini(void) { xt_unregister_match(&ebt_mark_mt_reg); } module_init(ebt_mark_m_init); module_exit(ebt_mark_m_fini); MODULE_DESCRIPTION("Ebtables: Packet mark match"); MODULE_LICENSE("GPL");
gpl-2.0
Talustus/dreamkernel-gti9100
arch/arm/plat-s5p/dev-fimd-s5p.c
453
2371
/* linux/arch/arm/plat-s5p/dev-fimd-s5p.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Base S5P platform device definitions * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <mach/map.h> #include <plat/fb-s5p.h> #ifdef CONFIG_FB_S5P static struct resource s3cfb_resource[] = { [0] = { .start = S5P_PA_FIMD0, .end = S5P_PA_FIMD0 + SZ_32K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_FIMD0_VSYNC, .end = IRQ_FIMD0_VSYNC, .flags = IORESOURCE_IRQ, }, [2] = { .start = IRQ_FIMD0_FIFO, .end = IRQ_FIMD0_FIFO, .flags = IORESOURCE_IRQ, }, }; static u64 fb_dma_mask = 0xffffffffUL; struct platform_device s3c_device_fb = { .name = "s3cfb", #if defined(CONFIG_ARCH_EXYNOS4) .id = 0, #else .id = -1, #endif .num_resources = ARRAY_SIZE(s3cfb_resource), .resource = s3cfb_resource, .dev = { .dma_mask = &fb_dma_mask, .coherent_dma_mask = 0xffffffffUL } }; static struct s3c_platform_fb default_fb_data __initdata = { #if defined(CONFIG_ARCH_EXYNOS4) .hw_ver = 0x70, #else .hw_ver = 0x62, #endif .nr_wins = 5, #if defined(CONFIG_FB_S5P_DEFAULT_WINDOW) .default_win = CONFIG_FB_S5P_DEFAULT_WINDOW, #else .default_win = 0, #endif .swap = FB_SWAP_WORD | FB_SWAP_HWORD, }; void __init s3cfb_set_platdata(struct s3c_platform_fb *pd) { struct s3c_platform_fb *npd; int i; if (!pd) pd = &default_fb_data; npd = kmemdup(pd, sizeof(struct s3c_platform_fb), GFP_KERNEL); if (!npd) printk(KERN_ERR "%s: no memory for platform data\n", __func__); else { for (i = 0; i < npd->nr_wins; i++) npd->nr_buffers[i] = 1; #if defined(CONFIG_FB_S5P_NR_BUFFERS) npd->nr_buffers[npd->default_win] = CONFIG_FB_S5P_NR_BUFFERS; #else npd->nr_buffers[npd->default_win] = 1; #endif s3cfb_get_clk_name(npd->clk_name); npd->set_display_path = s3cfb_set_display_path; npd->cfg_gpio = s3cfb_cfg_gpio; npd->backlight_on = s3cfb_backlight_on; npd->backlight_off = s3cfb_backlight_off; npd->lcd_on = s3cfb_lcd_on; npd->lcd_off = s3cfb_lcd_off; npd->clk_on = s3cfb_clk_on; npd->clk_off = s3cfb_clk_off; s3c_device_fb.dev.platform_data = npd; } } #endif
gpl-2.0
smaccm/odroid-3.14.y-linaro
arch/um/drivers/ubd_kern.c
453
35354
/* * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) * Licensed under the GPL */ /* 2001-09-28...2002-04-17 * Partition stuff by James_McMechan@hotmail.com * old style ubd by setting UBD_SHIFT to 0 * 2002-09-27...2002-10-18 massive tinkering for 2.5 * partitions have changed in 2.5 * 2003-01-29 more tinkering for 2.5.59-1 * This should now address the sysfs problems and has * the symlink for devfs to allow for booting with * the common /dev/ubd/discX/... names rather than * only /dev/ubdN/discN this version also has lots of * clean ups preparing for ubd-many. * James McMechan */ #define UBD_SHIFT 4 #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/ata.h> #include <linux/hdreg.h> #include <linux/cdrom.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> #include <asm/tlbflush.h> #include <kern_util.h> #include "mconsole_kern.h" #include <init.h> #include <irq_kern.h> #include "ubd.h" #include <os.h> #include "cow.h" enum ubd_req { UBD_READ, UBD_WRITE, UBD_FLUSH }; struct io_thread_req { struct request *req; enum ubd_req op; int fds[2]; unsigned long offsets[2]; unsigned long long offset; unsigned long length; char *buffer; int sectorsize; unsigned long sector_mask; unsigned long long cow_offset; unsigned long bitmap_words[2]; int error; }; static inline int ubd_test_bit(__u64 bit, unsigned char *data) { __u64 n; int bits, off; bits = sizeof(data[0]) * 8; n = bit / bits; off = bit % bits; return (data[n] & (1 << off)) != 0; } static inline void ubd_set_bit(__u64 bit, unsigned char *data) { __u64 n; int bits, off; bits = sizeof(data[0]) * 8; n = bit / bits; off = bit % bits; data[n] |= (1 << off); } /*End stuff from ubd_user.h*/ #define DRIVER_NAME "uml-blkdev" static DEFINE_MUTEX(ubd_lock); static DEFINE_MUTEX(ubd_mutex); /* replaces BKL, might not be needed */ static int ubd_open(struct block_device *bdev, fmode_t mode); static void ubd_release(struct gendisk *disk, fmode_t mode); static int ubd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo); #define MAX_DEV (16) static const struct block_device_operations ubd_blops = { .owner = THIS_MODULE, .open = ubd_open, .release = ubd_release, .ioctl = ubd_ioctl, .getgeo = ubd_getgeo, }; /* Protected by ubd_lock */ static int fake_major = UBD_MAJOR; static struct gendisk *ubd_gendisk[MAX_DEV]; static struct gendisk *fake_gendisk[MAX_DEV]; #ifdef CONFIG_BLK_DEV_UBD_SYNC #define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \ .cl = 1 }) #else #define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 0, .c = 0, \ .cl = 1 }) #endif static struct openflags global_openflags = OPEN_FLAGS; struct cow { /* backing file name */ char *file; /* backing file fd */ int fd; unsigned long *bitmap; unsigned long bitmap_len; int bitmap_offset; int data_offset; }; #define MAX_SG 64 struct ubd { struct list_head restart; /* name (and fd, below) of the file opened for writing, either the * backing or the cow file. */ char *file; int count; int fd; __u64 size; struct openflags boot_openflags; struct openflags openflags; unsigned shared:1; unsigned no_cow:1; struct cow cow; struct platform_device pdev; struct request_queue *queue; spinlock_t lock; struct scatterlist sg[MAX_SG]; struct request *request; int start_sg, end_sg; sector_t rq_pos; }; #define DEFAULT_COW { \ .file = NULL, \ .fd = -1, \ .bitmap = NULL, \ .bitmap_offset = 0, \ .data_offset = 0, \ } #define DEFAULT_UBD { \ .file = NULL, \ .count = 0, \ .fd = -1, \ .size = -1, \ .boot_openflags = OPEN_FLAGS, \ .openflags = OPEN_FLAGS, \ .no_cow = 0, \ .shared = 0, \ .cow = DEFAULT_COW, \ .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \ .request = NULL, \ .start_sg = 0, \ .end_sg = 0, \ .rq_pos = 0, \ } /* Protected by ubd_lock */ static struct ubd ubd_devs[MAX_DEV] = { [0 ... MAX_DEV - 1] = DEFAULT_UBD }; /* Only changed by fake_ide_setup which is a setup */ static int fake_ide = 0; static struct proc_dir_entry *proc_ide_root = NULL; static struct proc_dir_entry *proc_ide = NULL; static void make_proc_ide(void) { proc_ide_root = proc_mkdir("ide", NULL); proc_ide = proc_mkdir("ide0", proc_ide_root); } static int fake_ide_media_proc_show(struct seq_file *m, void *v) { seq_puts(m, "disk\n"); return 0; } static int fake_ide_media_proc_open(struct inode *inode, struct file *file) { return single_open(file, fake_ide_media_proc_show, NULL); } static const struct file_operations fake_ide_media_proc_fops = { .owner = THIS_MODULE, .open = fake_ide_media_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void make_ide_entries(const char *dev_name) { struct proc_dir_entry *dir, *ent; char name[64]; if(proc_ide_root == NULL) make_proc_ide(); dir = proc_mkdir(dev_name, proc_ide); if(!dir) return; ent = proc_create("media", S_IRUGO, dir, &fake_ide_media_proc_fops); if(!ent) return; snprintf(name, sizeof(name), "ide0/%s", dev_name); proc_symlink(dev_name, proc_ide_root, name); } static int fake_ide_setup(char *str) { fake_ide = 1; return 1; } __setup("fake_ide", fake_ide_setup); __uml_help(fake_ide_setup, "fake_ide\n" " Create ide0 entries that map onto ubd devices.\n\n" ); static int parse_unit(char **ptr) { char *str = *ptr, *end; int n = -1; if(isdigit(*str)) { n = simple_strtoul(str, &end, 0); if(end == str) return -1; *ptr = end; } else if (('a' <= *str) && (*str <= 'z')) { n = *str - 'a'; str++; *ptr = str; } return n; } /* If *index_out == -1 at exit, the passed option was a general one; * otherwise, the str pointer is used (and owned) inside ubd_devs array, so it * should not be freed on exit. */ static int ubd_setup_common(char *str, int *index_out, char **error_out) { struct ubd *ubd_dev; struct openflags flags = global_openflags; char *backing_file; int n, err = 0, i; if(index_out) *index_out = -1; n = *str; if(n == '='){ char *end; int major; str++; if(!strcmp(str, "sync")){ global_openflags = of_sync(global_openflags); goto out1; } err = -EINVAL; major = simple_strtoul(str, &end, 0); if((*end != '\0') || (end == str)){ *error_out = "Didn't parse major number"; goto out1; } mutex_lock(&ubd_lock); if (fake_major != UBD_MAJOR) { *error_out = "Can't assign a fake major twice"; goto out1; } fake_major = major; printk(KERN_INFO "Setting extra ubd major number to %d\n", major); err = 0; out1: mutex_unlock(&ubd_lock); return err; } n = parse_unit(&str); if(n < 0){ *error_out = "Couldn't parse device number"; return -EINVAL; } if(n >= MAX_DEV){ *error_out = "Device number out of range"; return 1; } err = -EBUSY; mutex_lock(&ubd_lock); ubd_dev = &ubd_devs[n]; if(ubd_dev->file != NULL){ *error_out = "Device is already configured"; goto out; } if (index_out) *index_out = n; err = -EINVAL; for (i = 0; i < sizeof("rscd="); i++) { switch (*str) { case 'r': flags.w = 0; break; case 's': flags.s = 1; break; case 'd': ubd_dev->no_cow = 1; break; case 'c': ubd_dev->shared = 1; break; case '=': str++; goto break_loop; default: *error_out = "Expected '=' or flag letter " "(r, s, c, or d)"; goto out; } str++; } if (*str == '=') *error_out = "Too many flags specified"; else *error_out = "Missing '='"; goto out; break_loop: backing_file = strchr(str, ','); if (backing_file == NULL) backing_file = strchr(str, ':'); if(backing_file != NULL){ if(ubd_dev->no_cow){ *error_out = "Can't specify both 'd' and a cow file"; goto out; } else { *backing_file = '\0'; backing_file++; } } err = 0; ubd_dev->file = str; ubd_dev->cow.file = backing_file; ubd_dev->boot_openflags = flags; out: mutex_unlock(&ubd_lock); return err; } static int ubd_setup(char *str) { char *error; int err; err = ubd_setup_common(str, NULL, &error); if(err) printk(KERN_ERR "Failed to initialize device with \"%s\" : " "%s\n", str, error); return 1; } __setup("ubd", ubd_setup); __uml_help(ubd_setup, "ubd<n><flags>=<filename>[(:|,)<filename2>]\n" " This is used to associate a device with a file in the underlying\n" " filesystem. When specifying two filenames, the first one is the\n" " COW name and the second is the backing file name. As separator you can\n" " use either a ':' or a ',': the first one allows writing things like;\n" " ubd0=~/Uml/root_cow:~/Uml/root_backing_file\n" " while with a ',' the shell would not expand the 2nd '~'.\n" " When using only one filename, UML will detect whether to treat it like\n" " a COW file or a backing file. To override this detection, add the 'd'\n" " flag:\n" " ubd0d=BackingFile\n" " Usually, there is a filesystem in the file, but \n" " that's not required. Swap devices containing swap files can be\n" " specified like this. Also, a file which doesn't contain a\n" " filesystem can have its contents read in the virtual \n" " machine by running 'dd' on the device. <n> must be in the range\n" " 0 to 7. Appending an 'r' to the number will cause that device\n" " to be mounted read-only. For example ubd1r=./ext_fs. Appending\n" " an 's' will cause data to be written to disk on the host immediately.\n" " 'c' will cause the device to be treated as being shared between multiple\n" " UMLs and file locking will be turned off - this is appropriate for a\n" " cluster filesystem and inappropriate at almost all other times.\n\n" ); static int udb_setup(char *str) { printk("udb%s specified on command line is almost certainly a ubd -> " "udb TYPO\n", str); return 1; } __setup("udb", udb_setup); __uml_help(udb_setup, "udb\n" " This option is here solely to catch ubd -> udb typos, which can be\n" " to impossible to catch visually unless you specifically look for\n" " them. The only result of any option starting with 'udb' is an error\n" " in the boot output.\n\n" ); static void do_ubd_request(struct request_queue * q); /* Only changed by ubd_init, which is an initcall. */ static int thread_fd = -1; static LIST_HEAD(restart); /* XXX - move this inside ubd_intr. */ /* Called without dev->lock held, and only in interrupt context. */ static void ubd_handler(void) { struct io_thread_req *req; struct ubd *ubd; struct list_head *list, *next_ele; unsigned long flags; int n; while(1){ n = os_read_file(thread_fd, &req, sizeof(struct io_thread_req *)); if(n != sizeof(req)){ if(n == -EAGAIN) break; printk(KERN_ERR "spurious interrupt in ubd_handler, " "err = %d\n", -n); return; } blk_end_request(req->req, 0, req->length); kfree(req); } reactivate_fd(thread_fd, UBD_IRQ); list_for_each_safe(list, next_ele, &restart){ ubd = container_of(list, struct ubd, restart); list_del_init(&ubd->restart); spin_lock_irqsave(&ubd->lock, flags); do_ubd_request(ubd->queue); spin_unlock_irqrestore(&ubd->lock, flags); } } static irqreturn_t ubd_intr(int irq, void *dev) { ubd_handler(); return IRQ_HANDLED; } /* Only changed by ubd_init, which is an initcall. */ static int io_pid = -1; static void kill_io_thread(void) { if(io_pid != -1) os_kill_process(io_pid, 1); } __uml_exitcall(kill_io_thread); static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out) { char *file; int fd; int err; __u32 version; __u32 align; char *backing_file; time_t mtime; unsigned long long size; int sector_size; int bitmap_offset; if (ubd_dev->file && ubd_dev->cow.file) { file = ubd_dev->cow.file; goto out; } fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0); if (fd < 0) return fd; err = read_cow_header(file_reader, &fd, &version, &backing_file, \ &mtime, &size, &sector_size, &align, &bitmap_offset); os_close_file(fd); if(err == -EINVAL) file = ubd_dev->file; else file = backing_file; out: return os_file_size(file, size_out); } static int read_cow_bitmap(int fd, void *buf, int offset, int len) { int err; err = os_seek_file(fd, offset); if (err < 0) return err; err = os_read_file(fd, buf, len); if (err < 0) return err; return 0; } static int backing_file_mismatch(char *file, __u64 size, time_t mtime) { unsigned long modtime; unsigned long long actual; int err; err = os_file_modtime(file, &modtime); if (err < 0) { printk(KERN_ERR "Failed to get modification time of backing " "file \"%s\", err = %d\n", file, -err); return err; } err = os_file_size(file, &actual); if (err < 0) { printk(KERN_ERR "Failed to get size of backing file \"%s\", " "err = %d\n", file, -err); return err; } if (actual != size) { /*__u64 can be a long on AMD64 and with %lu GCC complains; so * the typecast.*/ printk(KERN_ERR "Size mismatch (%llu vs %llu) of COW header " "vs backing file\n", (unsigned long long) size, actual); return -EINVAL; } if (modtime != mtime) { printk(KERN_ERR "mtime mismatch (%ld vs %ld) of COW header vs " "backing file\n", mtime, modtime); return -EINVAL; } return 0; } static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow) { struct uml_stat buf1, buf2; int err; if (from_cmdline == NULL) return 0; if (!strcmp(from_cmdline, from_cow)) return 0; err = os_stat_file(from_cmdline, &buf1); if (err < 0) { printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cmdline, -err); return 0; } err = os_stat_file(from_cow, &buf2); if (err < 0) { printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cow, -err); return 1; } if ((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino)) return 0; printk(KERN_ERR "Backing file mismatch - \"%s\" requested, " "\"%s\" specified in COW header of \"%s\"\n", from_cmdline, from_cow, cow); return 1; } static int open_ubd_file(char *file, struct openflags *openflags, int shared, char **backing_file_out, int *bitmap_offset_out, unsigned long *bitmap_len_out, int *data_offset_out, int *create_cow_out) { time_t mtime; unsigned long long size; __u32 version, align; char *backing_file; int fd, err, sectorsize, asked_switch, mode = 0644; fd = os_open_file(file, *openflags, mode); if (fd < 0) { if ((fd == -ENOENT) && (create_cow_out != NULL)) *create_cow_out = 1; if (!openflags->w || ((fd != -EROFS) && (fd != -EACCES))) return fd; openflags->w = 0; fd = os_open_file(file, *openflags, mode); if (fd < 0) return fd; } if (shared) printk(KERN_INFO "Not locking \"%s\" on the host\n", file); else { err = os_lock_file(fd, openflags->w); if (err < 0) { printk(KERN_ERR "Failed to lock '%s', err = %d\n", file, -err); goto out_close; } } /* Successful return case! */ if (backing_file_out == NULL) return fd; err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime, &size, &sectorsize, &align, bitmap_offset_out); if (err && (*backing_file_out != NULL)) { printk(KERN_ERR "Failed to read COW header from COW file " "\"%s\", errno = %d\n", file, -err); goto out_close; } if (err) return fd; asked_switch = path_requires_switch(*backing_file_out, backing_file, file); /* Allow switching only if no mismatch. */ if (asked_switch && !backing_file_mismatch(*backing_file_out, size, mtime)) { printk(KERN_ERR "Switching backing file to '%s'\n", *backing_file_out); err = write_cow_header(file, fd, *backing_file_out, sectorsize, align, &size); if (err) { printk(KERN_ERR "Switch failed, errno = %d\n", -err); goto out_close; } } else { *backing_file_out = backing_file; err = backing_file_mismatch(*backing_file_out, size, mtime); if (err) goto out_close; } cow_sizes(version, size, sectorsize, align, *bitmap_offset_out, bitmap_len_out, data_offset_out); return fd; out_close: os_close_file(fd); return err; } static int create_cow_file(char *cow_file, char *backing_file, struct openflags flags, int sectorsize, int alignment, int *bitmap_offset_out, unsigned long *bitmap_len_out, int *data_offset_out) { int err, fd; flags.c = 1; fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL); if (fd < 0) { err = fd; printk(KERN_ERR "Open of COW file '%s' failed, errno = %d\n", cow_file, -err); goto out; } err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment, bitmap_offset_out, bitmap_len_out, data_offset_out); if (!err) return fd; os_close_file(fd); out: return err; } static void ubd_close_dev(struct ubd *ubd_dev) { os_close_file(ubd_dev->fd); if(ubd_dev->cow.file == NULL) return; os_close_file(ubd_dev->cow.fd); vfree(ubd_dev->cow.bitmap); ubd_dev->cow.bitmap = NULL; } static int ubd_open_dev(struct ubd *ubd_dev) { struct openflags flags; char **back_ptr; int err, create_cow, *create_ptr; int fd; ubd_dev->openflags = ubd_dev->boot_openflags; create_cow = 0; create_ptr = (ubd_dev->cow.file != NULL) ? &create_cow : NULL; back_ptr = ubd_dev->no_cow ? NULL : &ubd_dev->cow.file; fd = open_ubd_file(ubd_dev->file, &ubd_dev->openflags, ubd_dev->shared, back_ptr, &ubd_dev->cow.bitmap_offset, &ubd_dev->cow.bitmap_len, &ubd_dev->cow.data_offset, create_ptr); if((fd == -ENOENT) && create_cow){ fd = create_cow_file(ubd_dev->file, ubd_dev->cow.file, ubd_dev->openflags, 1 << 9, PAGE_SIZE, &ubd_dev->cow.bitmap_offset, &ubd_dev->cow.bitmap_len, &ubd_dev->cow.data_offset); if(fd >= 0){ printk(KERN_INFO "Creating \"%s\" as COW file for " "\"%s\"\n", ubd_dev->file, ubd_dev->cow.file); } } if(fd < 0){ printk("Failed to open '%s', errno = %d\n", ubd_dev->file, -fd); return fd; } ubd_dev->fd = fd; if(ubd_dev->cow.file != NULL){ blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long)); err = -ENOMEM; ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len); if(ubd_dev->cow.bitmap == NULL){ printk(KERN_ERR "Failed to vmalloc COW bitmap\n"); goto error; } flush_tlb_kernel_vm(); err = read_cow_bitmap(ubd_dev->fd, ubd_dev->cow.bitmap, ubd_dev->cow.bitmap_offset, ubd_dev->cow.bitmap_len); if(err < 0) goto error; flags = ubd_dev->openflags; flags.w = 0; err = open_ubd_file(ubd_dev->cow.file, &flags, ubd_dev->shared, NULL, NULL, NULL, NULL, NULL); if(err < 0) goto error; ubd_dev->cow.fd = err; } return 0; error: os_close_file(ubd_dev->fd); return err; } static void ubd_device_release(struct device *dev) { struct ubd *ubd_dev = dev_get_drvdata(dev); blk_cleanup_queue(ubd_dev->queue); *ubd_dev = ((struct ubd) DEFAULT_UBD); } static int ubd_disk_register(int major, u64 size, int unit, struct gendisk **disk_out) { struct gendisk *disk; disk = alloc_disk(1 << UBD_SHIFT); if(disk == NULL) return -ENOMEM; disk->major = major; disk->first_minor = unit << UBD_SHIFT; disk->fops = &ubd_blops; set_capacity(disk, size / 512); if (major == UBD_MAJOR) sprintf(disk->disk_name, "ubd%c", 'a' + unit); else sprintf(disk->disk_name, "ubd_fake%d", unit); /* sysfs register (not for ide fake devices) */ if (major == UBD_MAJOR) { ubd_devs[unit].pdev.id = unit; ubd_devs[unit].pdev.name = DRIVER_NAME; ubd_devs[unit].pdev.dev.release = ubd_device_release; dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]); platform_device_register(&ubd_devs[unit].pdev); disk->driverfs_dev = &ubd_devs[unit].pdev.dev; } disk->private_data = &ubd_devs[unit]; disk->queue = ubd_devs[unit].queue; add_disk(disk); *disk_out = disk; return 0; } #define ROUND_BLOCK(n) ((n + ((1 << 9) - 1)) & (-1 << 9)) static int ubd_add(int n, char **error_out) { struct ubd *ubd_dev = &ubd_devs[n]; int err = 0; if(ubd_dev->file == NULL) goto out; err = ubd_file_size(ubd_dev, &ubd_dev->size); if(err < 0){ *error_out = "Couldn't determine size of device's file"; goto out; } ubd_dev->size = ROUND_BLOCK(ubd_dev->size); INIT_LIST_HEAD(&ubd_dev->restart); sg_init_table(ubd_dev->sg, MAX_SG); err = -ENOMEM; ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock); if (ubd_dev->queue == NULL) { *error_out = "Failed to initialize device queue"; goto out; } ubd_dev->queue->queuedata = ubd_dev; blk_queue_flush(ubd_dev->queue, REQ_FLUSH); blk_queue_max_segments(ubd_dev->queue, MAX_SG); err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); if(err){ *error_out = "Failed to register device"; goto out_cleanup; } if (fake_major != UBD_MAJOR) ubd_disk_register(fake_major, ubd_dev->size, n, &fake_gendisk[n]); /* * Perhaps this should also be under the "if (fake_major)" above * using the fake_disk->disk_name */ if (fake_ide) make_ide_entries(ubd_gendisk[n]->disk_name); err = 0; out: return err; out_cleanup: blk_cleanup_queue(ubd_dev->queue); goto out; } static int ubd_config(char *str, char **error_out) { int n, ret; /* This string is possibly broken up and stored, so it's only * freed if ubd_setup_common fails, or if only general options * were set. */ str = kstrdup(str, GFP_KERNEL); if (str == NULL) { *error_out = "Failed to allocate memory"; return -ENOMEM; } ret = ubd_setup_common(str, &n, error_out); if (ret) goto err_free; if (n == -1) { ret = 0; goto err_free; } mutex_lock(&ubd_lock); ret = ubd_add(n, error_out); if (ret) ubd_devs[n].file = NULL; mutex_unlock(&ubd_lock); out: return ret; err_free: kfree(str); goto out; } static int ubd_get_config(char *name, char *str, int size, char **error_out) { struct ubd *ubd_dev; int n, len = 0; n = parse_unit(&name); if((n >= MAX_DEV) || (n < 0)){ *error_out = "ubd_get_config : device number out of range"; return -1; } ubd_dev = &ubd_devs[n]; mutex_lock(&ubd_lock); if(ubd_dev->file == NULL){ CONFIG_CHUNK(str, size, len, "", 1); goto out; } CONFIG_CHUNK(str, size, len, ubd_dev->file, 0); if(ubd_dev->cow.file != NULL){ CONFIG_CHUNK(str, size, len, ",", 0); CONFIG_CHUNK(str, size, len, ubd_dev->cow.file, 1); } else CONFIG_CHUNK(str, size, len, "", 1); out: mutex_unlock(&ubd_lock); return len; } static int ubd_id(char **str, int *start_out, int *end_out) { int n; n = parse_unit(str); *start_out = 0; *end_out = MAX_DEV - 1; return n; } static int ubd_remove(int n, char **error_out) { struct gendisk *disk = ubd_gendisk[n]; struct ubd *ubd_dev; int err = -ENODEV; mutex_lock(&ubd_lock); ubd_dev = &ubd_devs[n]; if(ubd_dev->file == NULL) goto out; /* you cannot remove a open disk */ err = -EBUSY; if(ubd_dev->count > 0) goto out; ubd_gendisk[n] = NULL; if(disk != NULL){ del_gendisk(disk); put_disk(disk); } if(fake_gendisk[n] != NULL){ del_gendisk(fake_gendisk[n]); put_disk(fake_gendisk[n]); fake_gendisk[n] = NULL; } err = 0; platform_device_unregister(&ubd_dev->pdev); out: mutex_unlock(&ubd_lock); return err; } /* All these are called by mconsole in process context and without * ubd-specific locks. The structure itself is const except for .list. */ static struct mc_device ubd_mc = { .list = LIST_HEAD_INIT(ubd_mc.list), .name = "ubd", .config = ubd_config, .get_config = ubd_get_config, .id = ubd_id, .remove = ubd_remove, }; static int __init ubd_mc_init(void) { mconsole_register_dev(&ubd_mc); return 0; } __initcall(ubd_mc_init); static int __init ubd0_init(void) { struct ubd *ubd_dev = &ubd_devs[0]; mutex_lock(&ubd_lock); if(ubd_dev->file == NULL) ubd_dev->file = "root_fs"; mutex_unlock(&ubd_lock); return 0; } __initcall(ubd0_init); /* Used in ubd_init, which is an initcall */ static struct platform_driver ubd_driver = { .driver = { .name = DRIVER_NAME, }, }; static int __init ubd_init(void) { char *error; int i, err; if (register_blkdev(UBD_MAJOR, "ubd")) return -1; if (fake_major != UBD_MAJOR) { char name[sizeof("ubd_nnn\0")]; snprintf(name, sizeof(name), "ubd_%d", fake_major); if (register_blkdev(fake_major, "ubd")) return -1; } platform_driver_register(&ubd_driver); mutex_lock(&ubd_lock); for (i = 0; i < MAX_DEV; i++){ err = ubd_add(i, &error); if(err) printk(KERN_ERR "Failed to initialize ubd device %d :" "%s\n", i, error); } mutex_unlock(&ubd_lock); return 0; } late_initcall(ubd_init); static int __init ubd_driver_init(void){ unsigned long stack; int err; /* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/ if(global_openflags.s){ printk(KERN_INFO "ubd: Synchronous mode\n"); /* Letting ubd=sync be like using ubd#s= instead of ubd#= is * enough. So use anyway the io thread. */ } stack = alloc_stack(0, 0); io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *), &thread_fd); if(io_pid < 0){ printk(KERN_ERR "ubd : Failed to start I/O thread (errno = %d) - " "falling back to synchronous I/O\n", -io_pid); io_pid = -1; return 0; } err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr, 0, "ubd", ubd_devs); if(err != 0) printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err); return 0; } device_initcall(ubd_driver_init); static int ubd_open(struct block_device *bdev, fmode_t mode) { struct gendisk *disk = bdev->bd_disk; struct ubd *ubd_dev = disk->private_data; int err = 0; mutex_lock(&ubd_mutex); if(ubd_dev->count == 0){ err = ubd_open_dev(ubd_dev); if(err){ printk(KERN_ERR "%s: Can't open \"%s\": errno = %d\n", disk->disk_name, ubd_dev->file, -err); goto out; } } ubd_dev->count++; set_disk_ro(disk, !ubd_dev->openflags.w); /* This should no more be needed. And it didn't work anyway to exclude * read-write remounting of filesystems.*/ /*if((mode & FMODE_WRITE) && !ubd_dev->openflags.w){ if(--ubd_dev->count == 0) ubd_close_dev(ubd_dev); err = -EROFS; }*/ out: mutex_unlock(&ubd_mutex); return err; } static void ubd_release(struct gendisk *disk, fmode_t mode) { struct ubd *ubd_dev = disk->private_data; mutex_lock(&ubd_mutex); if(--ubd_dev->count == 0) ubd_close_dev(ubd_dev); mutex_unlock(&ubd_mutex); } static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask, __u64 *cow_offset, unsigned long *bitmap, __u64 bitmap_offset, unsigned long *bitmap_words, __u64 bitmap_len) { __u64 sector = io_offset >> 9; int i, update_bitmap = 0; for(i = 0; i < length >> 9; i++){ if(cow_mask != NULL) ubd_set_bit(i, (unsigned char *) cow_mask); if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) continue; update_bitmap = 1; ubd_set_bit(sector + i, (unsigned char *) bitmap); } if(!update_bitmap) return; *cow_offset = sector / (sizeof(unsigned long) * 8); /* This takes care of the case where we're exactly at the end of the * device, and *cow_offset + 1 is off the end. So, just back it up * by one word. Thanks to Lynn Kerby for the fix and James McMechan * for the original diagnosis. */ if (*cow_offset == (DIV_ROUND_UP(bitmap_len, sizeof(unsigned long)) - 1)) (*cow_offset)--; bitmap_words[0] = bitmap[*cow_offset]; bitmap_words[1] = bitmap[*cow_offset + 1]; *cow_offset *= sizeof(unsigned long); *cow_offset += bitmap_offset; } static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, __u64 bitmap_offset, __u64 bitmap_len) { __u64 sector = req->offset >> 9; int i; if(req->length > (sizeof(req->sector_mask) * 8) << 9) panic("Operation too long"); if(req->op == UBD_READ) { for(i = 0; i < req->length >> 9; i++){ if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) ubd_set_bit(i, (unsigned char *) &req->sector_mask); } } else cowify_bitmap(req->offset, req->length, &req->sector_mask, &req->cow_offset, bitmap, bitmap_offset, req->bitmap_words, bitmap_len); } /* Called with dev->lock held */ static void prepare_request(struct request *req, struct io_thread_req *io_req, unsigned long long offset, int page_offset, int len, struct page *page) { struct gendisk *disk = req->rq_disk; struct ubd *ubd_dev = disk->private_data; io_req->req = req; io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd : ubd_dev->fd; io_req->fds[1] = ubd_dev->fd; io_req->cow_offset = -1; io_req->offset = offset; io_req->length = len; io_req->error = 0; io_req->sector_mask = 0; io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE; io_req->offsets[0] = 0; io_req->offsets[1] = ubd_dev->cow.data_offset; io_req->buffer = page_address(page) + page_offset; io_req->sectorsize = 1 << 9; if(ubd_dev->cow.file != NULL) cowify_req(io_req, ubd_dev->cow.bitmap, ubd_dev->cow.bitmap_offset, ubd_dev->cow.bitmap_len); } /* Called with dev->lock held */ static void prepare_flush_request(struct request *req, struct io_thread_req *io_req) { struct gendisk *disk = req->rq_disk; struct ubd *ubd_dev = disk->private_data; io_req->req = req; io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd : ubd_dev->fd; io_req->op = UBD_FLUSH; } static bool submit_request(struct io_thread_req *io_req, struct ubd *dev) { int n = os_write_file(thread_fd, &io_req, sizeof(io_req)); if (n != sizeof(io_req)) { if (n != -EAGAIN) printk("write to io thread failed, " "errno = %d\n", -n); else if (list_empty(&dev->restart)) list_add(&dev->restart, &restart); kfree(io_req); return false; } return true; } /* Called with dev->lock held */ static void do_ubd_request(struct request_queue *q) { struct io_thread_req *io_req; struct request *req; while(1){ struct ubd *dev = q->queuedata; if(dev->end_sg == 0){ struct request *req = blk_fetch_request(q); if(req == NULL) return; dev->request = req; dev->rq_pos = blk_rq_pos(req); dev->start_sg = 0; dev->end_sg = blk_rq_map_sg(q, req, dev->sg); } req = dev->request; if (req->cmd_flags & REQ_FLUSH) { io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC); if (io_req == NULL) { if (list_empty(&dev->restart)) list_add(&dev->restart, &restart); return; } prepare_flush_request(req, io_req); submit_request(io_req, dev); } while(dev->start_sg < dev->end_sg){ struct scatterlist *sg = &dev->sg[dev->start_sg]; io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC); if(io_req == NULL){ if(list_empty(&dev->restart)) list_add(&dev->restart, &restart); return; } prepare_request(req, io_req, (unsigned long long)dev->rq_pos << 9, sg->offset, sg->length, sg_page(sg)); if (submit_request(io_req, dev) == false) return; dev->rq_pos += sg->length >> 9; dev->start_sg++; } dev->end_sg = 0; dev->request = NULL; } } static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct ubd *ubd_dev = bdev->bd_disk->private_data; geo->heads = 128; geo->sectors = 32; geo->cylinders = ubd_dev->size / (128 * 32 * 512); return 0; } static int ubd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct ubd *ubd_dev = bdev->bd_disk->private_data; u16 ubd_id[ATA_ID_WORDS]; switch (cmd) { struct cdrom_volctrl volume; case HDIO_GET_IDENTITY: memset(&ubd_id, 0, ATA_ID_WORDS * 2); ubd_id[ATA_ID_CYLS] = ubd_dev->size / (128 * 32 * 512); ubd_id[ATA_ID_HEADS] = 128; ubd_id[ATA_ID_SECTORS] = 32; if(copy_to_user((char __user *) arg, (char *) &ubd_id, sizeof(ubd_id))) return -EFAULT; return 0; case CDROMVOLREAD: if(copy_from_user(&volume, (char __user *) arg, sizeof(volume))) return -EFAULT; volume.channel0 = 255; volume.channel1 = 255; volume.channel2 = 255; volume.channel3 = 255; if(copy_to_user((char __user *) arg, &volume, sizeof(volume))) return -EFAULT; return 0; } return -EINVAL; } static int update_bitmap(struct io_thread_req *req) { int n; if(req->cow_offset == -1) return 0; n = os_seek_file(req->fds[1], req->cow_offset); if(n < 0){ printk("do_io - bitmap lseek failed : err = %d\n", -n); return 1; } n = os_write_file(req->fds[1], &req->bitmap_words, sizeof(req->bitmap_words)); if(n != sizeof(req->bitmap_words)){ printk("do_io - bitmap update failed, err = %d fd = %d\n", -n, req->fds[1]); return 1; } return 0; } static void do_io(struct io_thread_req *req) { char *buf; unsigned long len; int n, nsectors, start, end, bit; int err; __u64 off; if (req->op == UBD_FLUSH) { /* fds[0] is always either the rw image or our cow file */ n = os_sync_file(req->fds[0]); if (n != 0) { printk("do_io - sync failed err = %d " "fd = %d\n", -n, req->fds[0]); req->error = 1; } return; } nsectors = req->length / req->sectorsize; start = 0; do { bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask); end = start; while((end < nsectors) && (ubd_test_bit(end, (unsigned char *) &req->sector_mask) == bit)) end++; off = req->offset + req->offsets[bit] + start * req->sectorsize; len = (end - start) * req->sectorsize; buf = &req->buffer[start * req->sectorsize]; err = os_seek_file(req->fds[bit], off); if(err < 0){ printk("do_io - lseek failed : err = %d\n", -err); req->error = 1; return; } if(req->op == UBD_READ){ n = 0; do { buf = &buf[n]; len -= n; n = os_read_file(req->fds[bit], buf, len); if (n < 0) { printk("do_io - read failed, err = %d " "fd = %d\n", -n, req->fds[bit]); req->error = 1; return; } } while((n < len) && (n != 0)); if (n < len) memset(&buf[n], 0, len - n); } else { n = os_write_file(req->fds[bit], buf, len); if(n != len){ printk("do_io - write failed err = %d " "fd = %d\n", -n, req->fds[bit]); req->error = 1; return; } } start = end; } while(start < nsectors); req->error = update_bitmap(req); } /* Changed in start_io_thread, which is serialized by being called only * from ubd_init, which is an initcall. */ int kernel_fd = -1; /* Only changed by the io thread. XXX: currently unused. */ static int io_count = 0; int io_thread(void *arg) { struct io_thread_req *req; int n; os_fix_helper_signals(); while(1){ n = os_read_file(kernel_fd, &req, sizeof(struct io_thread_req *)); if(n != sizeof(struct io_thread_req *)){ if(n < 0) printk("io_thread - read failed, fd = %d, " "err = %d\n", kernel_fd, -n); else { printk("io_thread - short read, fd = %d, " "length = %d\n", kernel_fd, n); } continue; } io_count++; do_io(req); n = os_write_file(kernel_fd, &req, sizeof(struct io_thread_req *)); if(n != sizeof(struct io_thread_req *)) printk("io_thread - write failed, fd = %d, err = %d\n", kernel_fd, -n); } return 0; }
gpl-2.0
rmcc/gp_one_kernel
arch/s390/mm/pgtable.c
453
8572
/* * Copyright IBM Corp. 2007,2009 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/quicklist.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #ifndef CONFIG_64BIT #define ALLOC_ORDER 1 #define TABLES_PER_PAGE 4 #define FRAG_MASK 15UL #define SECOND_HALVES 10UL void clear_table_pgstes(unsigned long *table) { clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); memset(table + 256, 0, PAGE_SIZE/4); clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); memset(table + 768, 0, PAGE_SIZE/4); } #else #define ALLOC_ORDER 2 #define TABLES_PER_PAGE 2 #define FRAG_MASK 3UL #define SECOND_HALVES 2UL void clear_table_pgstes(unsigned long *table) { clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); memset(table + 256, 0, PAGE_SIZE/2); } #endif unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; EXPORT_SYMBOL(VMALLOC_START); static int __init parse_vmalloc(char *arg) { if (!arg) return -EINVAL; VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK; return 0; } early_param("vmalloc", parse_vmalloc); unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) { struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); if (!page) return NULL; page->index = 0; if (noexec) { struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER); if (!shadow) { __free_pages(page, ALLOC_ORDER); return NULL; } page->index = page_to_phys(shadow); } spin_lock(&mm->context.list_lock); list_add(&page->lru, &mm->context.crst_list); spin_unlock(&mm->context.list_lock); return (unsigned long *) page_to_phys(page); } void crst_table_free(struct mm_struct *mm, unsigned long *table) { unsigned long *shadow = get_shadow_table(table); struct page *page = virt_to_page(table); spin_lock(&mm->context.list_lock); list_del(&page->lru); spin_unlock(&mm->context.list_lock); if (shadow) free_pages((unsigned long) shadow, ALLOC_ORDER); free_pages((unsigned long) table, ALLOC_ORDER); } #ifdef CONFIG_64BIT int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) { unsigned long *table, *pgd; unsigned long entry; BUG_ON(limit > (1UL << 53)); repeat: table = crst_table_alloc(mm, mm->context.noexec); if (!table) return -ENOMEM; spin_lock(&mm->page_table_lock); if (mm->context.asce_limit < limit) { pgd = (unsigned long *) mm->pgd; if (mm->context.asce_limit <= (1UL << 31)) { entry = _REGION3_ENTRY_EMPTY; mm->context.asce_limit = 1UL << 42; mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_REGION3; } else { entry = _REGION2_ENTRY_EMPTY; mm->context.asce_limit = 1UL << 53; mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_REGION2; } crst_table_init(table, entry); pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); mm->pgd = (pgd_t *) table; mm->task_size = mm->context.asce_limit; table = NULL; } spin_unlock(&mm->page_table_lock); if (table) crst_table_free(mm, table); if (mm->context.asce_limit < limit) goto repeat; update_mm(mm, current); return 0; } void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) { pgd_t *pgd; if (mm->context.asce_limit <= limit) return; __tlb_flush_mm(mm); while (mm->context.asce_limit > limit) { pgd = mm->pgd; switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { case _REGION_ENTRY_TYPE_R2: mm->context.asce_limit = 1UL << 42; mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_REGION3; break; case _REGION_ENTRY_TYPE_R3: mm->context.asce_limit = 1UL << 31; mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; break; default: BUG(); } mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); mm->task_size = mm->context.asce_limit; crst_table_free(mm, (unsigned long *) pgd); } update_mm(mm, current); } #endif /* * page table entry allocation/free routines. */ unsigned long *page_table_alloc(struct mm_struct *mm) { struct page *page; unsigned long *table; unsigned long bits; bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; spin_lock(&mm->context.list_lock); page = NULL; if (!list_empty(&mm->context.pgtable_list)) { page = list_first_entry(&mm->context.pgtable_list, struct page, lru); if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) page = NULL; } if (!page) { spin_unlock(&mm->context.list_lock); page = alloc_page(GFP_KERNEL|__GFP_REPEAT); if (!page) return NULL; pgtable_page_ctor(page); page->flags &= ~FRAG_MASK; table = (unsigned long *) page_to_phys(page); if (mm->context.has_pgste) clear_table_pgstes(table); else clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); spin_lock(&mm->context.list_lock); list_add(&page->lru, &mm->context.pgtable_list); } table = (unsigned long *) page_to_phys(page); while (page->flags & bits) { table += 256; bits <<= 1; } page->flags |= bits; if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) list_move_tail(&page->lru, &mm->context.pgtable_list); spin_unlock(&mm->context.list_lock); return table; } void page_table_free(struct mm_struct *mm, unsigned long *table) { struct page *page; unsigned long bits; bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); page = pfn_to_page(__pa(table) >> PAGE_SHIFT); spin_lock(&mm->context.list_lock); page->flags ^= bits; if (page->flags & FRAG_MASK) { /* Page now has some free pgtable fragments. */ list_move(&page->lru, &mm->context.pgtable_list); page = NULL; } else /* All fragments of the 4K page have been freed. */ list_del(&page->lru); spin_unlock(&mm->context.list_lock); if (page) { pgtable_page_dtor(page); __free_page(page); } } void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) { struct page *page; spin_lock(&mm->context.list_lock); /* Free shadow region and segment tables. */ list_for_each_entry(page, &mm->context.crst_list, lru) if (page->index) { free_pages((unsigned long) page->index, ALLOC_ORDER); page->index = 0; } /* "Free" second halves of page tables. */ list_for_each_entry(page, &mm->context.pgtable_list, lru) page->flags &= ~SECOND_HALVES; spin_unlock(&mm->context.list_lock); mm->context.noexec = 0; update_mm(mm, tsk); } /* * switch on pgstes for its userspace process (for kvm) */ int s390_enable_sie(void) { struct task_struct *tsk = current; struct mm_struct *mm, *old_mm; /* Do we have switched amode? If no, we cannot do sie */ if (!switch_amode) return -EINVAL; /* Do we have pgstes? if yes, we are done */ if (tsk->mm->context.has_pgste) return 0; /* lets check if we are allowed to replace the mm */ task_lock(tsk); if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || #ifdef CONFIG_AIO !hlist_empty(&tsk->mm->ioctx_list) || #endif tsk->mm != tsk->active_mm) { task_unlock(tsk); return -EINVAL; } task_unlock(tsk); /* we copy the mm and let dup_mm create the page tables with_pgstes */ tsk->mm->context.alloc_pgste = 1; mm = dup_mm(tsk); tsk->mm->context.alloc_pgste = 0; if (!mm) return -ENOMEM; /* Now lets check again if something happened */ task_lock(tsk); if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || #ifdef CONFIG_AIO !hlist_empty(&tsk->mm->ioctx_list) || #endif tsk->mm != tsk->active_mm) { mmput(mm); task_unlock(tsk); return -EINVAL; } /* ok, we are alone. No ptrace, no threads, etc. */ old_mm = tsk->mm; tsk->mm = tsk->active_mm = mm; preempt_disable(); update_mm(mm, tsk); cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); preempt_enable(); task_unlock(tsk); mmput(old_mm); return 0; } EXPORT_SYMBOL_GPL(s390_enable_sie); #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION) bool kernel_page_present(struct page *page) { unsigned long addr; int cc; addr = page_to_phys(page); asm volatile( " lra %1,0(%1)\n" " ipm %0\n" " srl %0,28" : "=d" (cc), "+a" (addr) : : "cc"); return cc == 0; } #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
gpl-2.0
lategoodbye/linux-lcd6610
drivers/clk/clk-twl6040.c
453
2996
/* * TWL6040 clock module driver for OMAP4 McPDM functional clock * * Copyright (C) 2012 Texas Instruments Inc. * Peter Ujfalusi <peter.ujfalusi@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/mfd/twl6040.h> #include <linux/clk-provider.h> struct twl6040_clk { struct twl6040 *twl6040; struct device *dev; struct clk_hw mcpdm_fclk; struct clk *clk; int enabled; }; static int twl6040_bitclk_is_enabled(struct clk_hw *hw) { struct twl6040_clk *twl6040_clk = container_of(hw, struct twl6040_clk, mcpdm_fclk); return twl6040_clk->enabled; } static int twl6040_bitclk_prepare(struct clk_hw *hw) { struct twl6040_clk *twl6040_clk = container_of(hw, struct twl6040_clk, mcpdm_fclk); int ret; ret = twl6040_power(twl6040_clk->twl6040, 1); if (!ret) twl6040_clk->enabled = 1; return ret; } static void twl6040_bitclk_unprepare(struct clk_hw *hw) { struct twl6040_clk *twl6040_clk = container_of(hw, struct twl6040_clk, mcpdm_fclk); int ret; ret = twl6040_power(twl6040_clk->twl6040, 0); if (!ret) twl6040_clk->enabled = 0; } static const struct clk_ops twl6040_mcpdm_ops = { .is_enabled = twl6040_bitclk_is_enabled, .prepare = twl6040_bitclk_prepare, .unprepare = twl6040_bitclk_unprepare, }; static struct clk_init_data wm831x_clkout_init = { .name = "mcpdm_fclk", .ops = &twl6040_mcpdm_ops, .flags = CLK_IS_ROOT, }; static int twl6040_clk_probe(struct platform_device *pdev) { struct twl6040 *twl6040 = dev_get_drvdata(pdev->dev.parent); struct twl6040_clk *clkdata; clkdata = devm_kzalloc(&pdev->dev, sizeof(*clkdata), GFP_KERNEL); if (!clkdata) return -ENOMEM; clkdata->dev = &pdev->dev; clkdata->twl6040 = twl6040; clkdata->mcpdm_fclk.init = &wm831x_clkout_init; clkdata->clk = devm_clk_register(&pdev->dev, &clkdata->mcpdm_fclk); if (IS_ERR(clkdata->clk)) return PTR_ERR(clkdata->clk); platform_set_drvdata(pdev, clkdata); return 0; } static struct platform_driver twl6040_clk_driver = { .driver = { .name = "twl6040-clk", }, .probe = twl6040_clk_probe, }; module_platform_driver(twl6040_clk_driver); MODULE_DESCRIPTION("TWL6040 clock driver for McPDM functional clock"); MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>"); MODULE_ALIAS("platform:twl6040-clk"); MODULE_LICENSE("GPL");
gpl-2.0
ravendra275/sony_kernel_msm8960t
drivers/video/msm/vidc/common/vcd/vcd_power_sm.c
453
9324
/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <media/msm/vidc_type.h> #include "vcd_power_sm.h" #include "vcd_core.h" #include "vcd.h" #include "vcd_res_tracker.h" u32 vcd_power_event( struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt, u32 event) { u32 rc = VCD_S_SUCCESS; VCD_MSG_MED("Device power state = %d", dev_ctxt->pwr_clk_state); VCD_MSG_MED("event = 0x%x", event); switch (event) { case VCD_EVT_PWR_DEV_INIT_BEGIN: case VCD_EVT_PWR_DEV_INIT_END: case VCD_EVT_PWR_DEV_INIT_FAIL: case VCD_EVT_PWR_DEV_TERM_BEGIN: case VCD_EVT_PWR_DEV_TERM_END: case VCD_EVT_PWR_DEV_TERM_FAIL: case VCD_EVT_PWR_DEV_SLEEP_BEGIN: case VCD_EVT_PWR_DEV_SLEEP_END: case VCD_EVT_PWR_DEV_SET_PERFLVL: case VCD_EVT_PWR_DEV_HWTIMEOUT: { rc = vcd_device_power_event(dev_ctxt, event, cctxt); break; } case VCD_EVT_PWR_CLNT_CMD_BEGIN: case VCD_EVT_PWR_CLNT_CMD_END: case VCD_EVT_PWR_CLNT_CMD_FAIL: case VCD_EVT_PWR_CLNT_PAUSE: case VCD_EVT_PWR_CLNT_RESUME: case VCD_EVT_PWR_CLNT_FIRST_FRAME: case VCD_EVT_PWR_CLNT_LAST_FRAME: case VCD_EVT_PWR_CLNT_ERRFATAL: { rc = vcd_client_power_event(dev_ctxt, cctxt, event); break; } } if (VCD_FAILED(rc)) VCD_MSG_ERROR("vcd_power_event: event 0x%x failed", event); return rc; } u32 vcd_device_power_event(struct vcd_dev_ctxt *dev_ctxt, u32 event, struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_ERR_FAIL; u32 set_perf_lvl; switch (event) { case VCD_EVT_PWR_DEV_INIT_BEGIN: { if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) { if (res_trk_get_max_perf_level(&dev_ctxt-> max_perf_lvl)) { if (res_trk_power_up()) { dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_NOTCLOCKED; dev_ctxt->curr_perf_lvl = 0; dev_ctxt->reqd_perf_lvl = 0; dev_ctxt->active_clnts = 0; dev_ctxt-> set_perf_lvl_pending = false; rc = vcd_enable_clock(dev_ctxt, cctxt); if (VCD_FAILED(rc)) { (void)res_trk_power_down(); dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_OFF; } } } } break; } case VCD_EVT_PWR_DEV_INIT_END: case VCD_EVT_PWR_DEV_TERM_FAIL: case VCD_EVT_PWR_DEV_SLEEP_BEGIN: case VCD_EVT_PWR_DEV_HWTIMEOUT: { rc = vcd_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_DEV_INIT_FAIL: case VCD_EVT_PWR_DEV_TERM_END: { if (dev_ctxt->pwr_clk_state != VCD_PWRCLK_STATE_OFF) { (void)vcd_disable_clock(dev_ctxt); (void)res_trk_power_down(); dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_OFF; dev_ctxt->curr_perf_lvl = 0; dev_ctxt->reqd_perf_lvl = 0; dev_ctxt->active_clnts = 0; dev_ctxt->set_perf_lvl_pending = false; rc = VCD_S_SUCCESS; } break; } case VCD_EVT_PWR_DEV_TERM_BEGIN: case VCD_EVT_PWR_DEV_SLEEP_END: { rc = vcd_un_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_DEV_SET_PERFLVL: { set_perf_lvl = dev_ctxt->reqd_perf_lvl > 0 ? dev_ctxt-> reqd_perf_lvl : VCD_MIN_PERF_LEVEL; rc = vcd_set_perf_level(dev_ctxt, set_perf_lvl); break; } } return rc; } u32 vcd_client_power_event( struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt, u32 event) { u32 rc = VCD_ERR_FAIL; switch (event) { case VCD_EVT_PWR_CLNT_CMD_BEGIN: { rc = vcd_un_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_CLNT_CMD_END: { rc = vcd_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_CLNT_CMD_FAIL: { if (!vcd_core_is_busy(dev_ctxt)) rc = vcd_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_CLNT_PAUSE: case VCD_EVT_PWR_CLNT_LAST_FRAME: case VCD_EVT_PWR_CLNT_ERRFATAL: { if (cctxt) { rc = VCD_S_SUCCESS; if (cctxt->status.req_perf_lvl) { dev_ctxt->reqd_perf_lvl -= cctxt->reqd_perf_lvl; cctxt->status.req_perf_lvl = false; rc = vcd_set_perf_level(dev_ctxt, dev_ctxt->reqd_perf_lvl); } } break; } case VCD_EVT_PWR_CLNT_RESUME: case VCD_EVT_PWR_CLNT_FIRST_FRAME: { if (cctxt) { rc = VCD_S_SUCCESS; if (!cctxt->status.req_perf_lvl) { dev_ctxt->reqd_perf_lvl += cctxt->reqd_perf_lvl; cctxt->status.req_perf_lvl = true; rc = vcd_set_perf_level(dev_ctxt, dev_ctxt->reqd_perf_lvl); } } break; } } return rc; } u32 vcd_enable_clock(struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_S_SUCCESS; u32 set_perf_lvl; if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) { VCD_MSG_ERROR("vcd_enable_clock(): Already in state " "VCD_PWRCLK_STATE_OFF\n"); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) { set_perf_lvl = dev_ctxt->reqd_perf_lvl > 0 ? dev_ctxt-> reqd_perf_lvl : VCD_MIN_PERF_LEVEL; rc = vcd_set_perf_level(dev_ctxt, set_perf_lvl); if (!VCD_FAILED(rc)) { if (res_trk_enable_clocks()) { dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKED; } } else { rc = VCD_ERR_FAIL; } } if (!VCD_FAILED(rc)) dev_ctxt->active_clnts++; return rc; } u32 vcd_disable_clock(struct vcd_dev_ctxt *dev_ctxt) { u32 rc = VCD_S_SUCCESS; if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) { VCD_MSG_ERROR("vcd_disable_clock(): Already in state " "VCD_PWRCLK_STATE_OFF\n"); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED || dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKGATED) { dev_ctxt->active_clnts--; if (!dev_ctxt->active_clnts) { if (!res_trk_disable_clocks()) rc = VCD_ERR_FAIL; dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_NOTCLOCKED; dev_ctxt->curr_perf_lvl = 0; } } return rc; } u32 vcd_set_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_lvl) { u32 rc = VCD_S_SUCCESS; if (!vcd_core_is_busy(dev_ctxt)) { if (res_trk_set_perf_level(perf_lvl, &dev_ctxt->curr_perf_lvl, dev_ctxt)) { dev_ctxt->set_perf_lvl_pending = false; } else { rc = VCD_ERR_FAIL; dev_ctxt->set_perf_lvl_pending = true; } } else { dev_ctxt->set_perf_lvl_pending = true; } return rc; } u32 vcd_set_perf_turbo_level(struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_S_SUCCESS; #ifdef CONFIG_MSM_BUS_SCALING struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; pr_err("\n Setting Turbo mode !!"); if (res_trk_update_bus_perf_level(dev_ctxt, RESTRK_1080P_TURBO_PERF_LEVEL) < 0) { pr_err("\n %s(): update buf perf level failed\n", __func__); return false; } dev_ctxt->curr_perf_lvl = RESTRK_1080P_TURBO_PERF_LEVEL; vcd_update_decoder_perf_level(dev_ctxt, RESTRK_1080P_TURBO_PERF_LEVEL); #endif return rc; } u32 vcd_update_decoder_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_lvl) { u32 rc = VCD_S_SUCCESS; if (res_trk_set_perf_level(perf_lvl, &dev_ctxt->curr_perf_lvl, dev_ctxt)) { dev_ctxt->set_perf_lvl_pending = false; } else { rc = VCD_ERR_FAIL; dev_ctxt->set_perf_lvl_pending = true; } return rc; } u32 vcd_update_clnt_perf_lvl( struct vcd_clnt_ctxt *cctxt, struct vcd_property_frame_rate *fps, u32 frm_p_units) { u32 rc = VCD_S_SUCCESS; struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; u32 new_perf_lvl; new_perf_lvl = frm_p_units *\ (fps->fps_numerator / fps->fps_denominator); if ((fps->fps_numerator * 1000) / fps->fps_denominator > VCD_MAXPERF_FPS_THRESHOLD_X_1000) { u32 max_perf_level = 0; if (res_trk_get_max_perf_level(&max_perf_level)) { new_perf_lvl = max_perf_level; VCD_MSG_HIGH("Using max perf level(%d) for >60fps\n", new_perf_lvl); } else { VCD_MSG_ERROR("Failed to get max perf level\n"); } } if (cctxt->status.req_perf_lvl) { dev_ctxt->reqd_perf_lvl = dev_ctxt->reqd_perf_lvl - cctxt->reqd_perf_lvl + new_perf_lvl; rc = vcd_set_perf_level(cctxt->dev_ctxt, dev_ctxt->reqd_perf_lvl); } cctxt->reqd_perf_lvl = new_perf_lvl; return rc; } u32 vcd_gate_clock(struct vcd_dev_ctxt *dev_ctxt) { u32 rc = VCD_S_SUCCESS; if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF || dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) { VCD_MSG_ERROR("%s(): Clk is Off or Not Clked yet\n", __func__); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKGATED) rc = VCD_S_SUCCESS; else if (res_trk_disable_clocks()) dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKGATED; else rc = VCD_ERR_FAIL; return rc; } u32 vcd_un_gate_clock(struct vcd_dev_ctxt *dev_ctxt) { u32 rc = VCD_S_SUCCESS; if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF || dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) { VCD_MSG_ERROR("%s(): Clk is Off or Not Clked yet\n", __func__); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED) rc = VCD_S_SUCCESS; else if (res_trk_enable_clocks()) dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKED; else rc = VCD_ERR_FAIL; return rc; }
gpl-2.0
Bdaman80/BDA-ACTV
drivers/gpu/drm/drm_context.c
1477
11821
/** * \file drm_context.c * IOCTLs for generic contexts * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com * * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ /* * ChangeLog: * 2001-11-16 Torsten Duwe <duwe@caldera.de> * added context constructor/destructor hooks, * needed by SiS driver's memory management. */ #include "drmP.h" /******************************************************************/ /** \name Context bitmap support */ /*@{*/ /** * Free a handle from the context bitmap. * * \param dev DRM device. * \param ctx_handle context handle. * * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry * in drm_device::ctx_idr, while holding the drm_device::struct_mutex * lock. */ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) { mutex_lock(&dev->struct_mutex); idr_remove(&dev->ctx_idr, ctx_handle); mutex_unlock(&dev->struct_mutex); } /** * Context bitmap allocation. * * \param dev DRM device. * \return (non-negative) context handle on success or a negative number on failure. * * Allocate a new idr from drm_device::ctx_idr while holding the * drm_device::struct_mutex lock. */ static int drm_ctxbitmap_next(struct drm_device * dev) { int new_id; int ret; again: if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) { DRM_ERROR("Out of memory expanding drawable idr\n"); return -ENOMEM; } mutex_lock(&dev->struct_mutex); ret = idr_get_new_above(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, &new_id); if (ret == -EAGAIN) { mutex_unlock(&dev->struct_mutex); goto again; } mutex_unlock(&dev->struct_mutex); return new_id; } /** * Context bitmap initialization. * * \param dev DRM device. * * Initialise the drm_device::ctx_idr */ int drm_ctxbitmap_init(struct drm_device * dev) { idr_init(&dev->ctx_idr); return 0; } /** * Context bitmap cleanup. * * \param dev DRM device. * * Free all idr members using drm_ctx_sarea_free helper function * while holding the drm_device::struct_mutex lock. */ void drm_ctxbitmap_cleanup(struct drm_device * dev) { mutex_lock(&dev->struct_mutex); idr_remove_all(&dev->ctx_idr); mutex_unlock(&dev->struct_mutex); } /*@}*/ /******************************************************************/ /** \name Per Context SAREA Support */ /*@{*/ /** * Get per-context SAREA. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_priv_map structure. * \return zero on success or a negative number on failure. * * Gets the map from drm_device::ctx_idr with the handle specified and * returns its handle. */ int drm_getsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_ctx_priv_map *request = data; struct drm_local_map *map; struct drm_map_list *_entry; mutex_lock(&dev->struct_mutex); map = idr_find(&dev->ctx_idr, request->ctx_id); if (!map) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } mutex_unlock(&dev->struct_mutex); request->handle = NULL; list_for_each_entry(_entry, &dev->maplist, head) { if (_entry->map == map) { request->handle = (void *)(unsigned long)_entry->user_token; break; } } if (request->handle == NULL) return -EINVAL; return 0; } /** * Set per-context SAREA. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_priv_map structure. * \return zero on success or a negative number on failure. * * Searches the mapping specified in \p arg and update the entry in * drm_device::ctx_idr with it. */ int drm_setsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_ctx_priv_map *request = data; struct drm_local_map *map = NULL; struct drm_map_list *r_list = NULL; mutex_lock(&dev->struct_mutex); list_for_each_entry(r_list, &dev->maplist, head) { if (r_list->map && r_list->user_token == (unsigned long) request->handle) goto found; } bad: mutex_unlock(&dev->struct_mutex); return -EINVAL; found: map = r_list->map; if (!map) goto bad; if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) goto bad; mutex_unlock(&dev->struct_mutex); return 0; } /*@}*/ /******************************************************************/ /** \name The actual DRM context handling routines */ /*@{*/ /** * Switch context. * * \param dev DRM device. * \param old old context handle. * \param new new context handle. * \return zero on success or a negative number on failure. * * Attempt to set drm_device::context_flag. */ static int drm_context_switch(struct drm_device * dev, int old, int new) { if (test_and_set_bit(0, &dev->context_flag)) { DRM_ERROR("Reentering -- FIXME\n"); return -EBUSY; } DRM_DEBUG("Context switch from %d to %d\n", old, new); if (new == dev->last_context) { clear_bit(0, &dev->context_flag); return 0; } return 0; } /** * Complete context switch. * * \param dev DRM device. * \param new new context handle. * \return zero on success or a negative number on failure. * * Updates drm_device::last_context and drm_device::last_switch. Verifies the * hardware lock is held, clears the drm_device::context_flag and wakes up * drm_device::context_wait. */ static int drm_context_switch_complete(struct drm_device *dev, struct drm_file *file_priv, int new) { dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ dev->last_switch = jiffies; if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { DRM_ERROR("Lock isn't held after context switch\n"); } /* If a context switch is ever initiated when the kernel holds the lock, release that lock here. */ clear_bit(0, &dev->context_flag); wake_up(&dev->context_wait); return 0; } /** * Reserve contexts. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_res structure. * \return zero on success or a negative number on failure. */ int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_ctx_res *res = data; struct drm_ctx ctx; int i; if (res->count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ctx.handle = i; if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx))) return -EFAULT; } } res->count = DRM_RESERVED_CONTEXTS; return 0; } /** * Add context. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Get a new handle for the context and copy to userspace. */ int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_ctx_list *ctx_entry; struct drm_ctx *ctx = data; ctx->handle = drm_ctxbitmap_next(dev); if (ctx->handle == DRM_KERNEL_CONTEXT) { /* Skip kernel's context and get a new one. */ ctx->handle = drm_ctxbitmap_next(dev); } DRM_DEBUG("%d\n", ctx->handle); if (ctx->handle == -1) { DRM_DEBUG("Not enough free contexts.\n"); /* Should this return -EBUSY instead? */ return -ENOMEM; } if (ctx->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_ctor) if (!dev->driver->context_ctor(dev, ctx->handle)) { DRM_DEBUG("Running out of ctxs or memory.\n"); return -ENOMEM; } } ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL); if (!ctx_entry) { DRM_DEBUG("out of memory\n"); return -ENOMEM; } INIT_LIST_HEAD(&ctx_entry->head); ctx_entry->handle = ctx->handle; ctx_entry->tag = file_priv; mutex_lock(&dev->ctxlist_mutex); list_add(&ctx_entry->head, &dev->ctxlist); ++dev->ctx_count; mutex_unlock(&dev->ctxlist_mutex); return 0; } int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { /* This does nothing */ return 0; } /** * Get context. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. */ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_ctx *ctx = data; /* This is 0, because we don't handle any context flags */ ctx->flags = 0; return 0; } /** * Switch context. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Calls context_switch(). */ int drm_switchctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_ctx *ctx = data; DRM_DEBUG("%d\n", ctx->handle); return drm_context_switch(dev, dev->last_context, ctx->handle); } /** * New context. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Calls context_switch_complete(). */ int drm_newctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_ctx *ctx = data; DRM_DEBUG("%d\n", ctx->handle); drm_context_switch_complete(dev, file_priv, ctx->handle); return 0; } /** * Remove context. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * If not the special kernel context, calls ctxbitmap_free() to free the specified context. */ int drm_rmctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_ctx *ctx = data; DRM_DEBUG("%d\n", ctx->handle); if (ctx->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) dev->driver->context_dtor(dev, ctx->handle); drm_ctxbitmap_free(dev, ctx->handle); } mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { if (pos->handle == ctx->handle) { list_del(&pos->head); kfree(pos); --dev->ctx_count; } } } mutex_unlock(&dev->ctxlist_mutex); return 0; } /*@}*/
gpl-2.0
Mazout360/hammerhead-3.10
drivers/input/keyboard/lpc32xx-keys.c
2245
10524
/* * NXP LPC32xx SoC Key Scan Interface * * Authors: * Kevin Wells <kevin.wells@nxp.com> * Roland Stigge <stigge@antcom.de> * * Copyright (C) 2010 NXP Semiconductors * Copyright (C) 2012 Roland Stigge * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * This controller supports square key matrices from 1x1 up to 8x8 */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> #include <linux/input/matrix_keypad.h> #define DRV_NAME "lpc32xx_keys" /* * Key scanner register offsets */ #define LPC32XX_KS_DEB(x) ((x) + 0x00) #define LPC32XX_KS_STATE_COND(x) ((x) + 0x04) #define LPC32XX_KS_IRQ(x) ((x) + 0x08) #define LPC32XX_KS_SCAN_CTL(x) ((x) + 0x0C) #define LPC32XX_KS_FAST_TST(x) ((x) + 0x10) #define LPC32XX_KS_MATRIX_DIM(x) ((x) + 0x14) /* 1..8 */ #define LPC32XX_KS_DATA(x, y) ((x) + 0x40 + ((y) << 2)) #define LPC32XX_KSCAN_DEB_NUM_DEB_PASS(n) ((n) & 0xFF) #define LPC32XX_KSCAN_SCOND_IN_IDLE 0x0 #define LPC32XX_KSCAN_SCOND_IN_SCANONCE 0x1 #define LPC32XX_KSCAN_SCOND_IN_IRQGEN 0x2 #define LPC32XX_KSCAN_SCOND_IN_SCAN_MATRIX 0x3 #define LPC32XX_KSCAN_IRQ_PENDING_CLR 0x1 #define LPC32XX_KSCAN_SCTRL_SCAN_DELAY(n) ((n) & 0xFF) #define LPC32XX_KSCAN_FTST_FORCESCANONCE 0x1 #define LPC32XX_KSCAN_FTST_USE32K_CLK 0x2 #define LPC32XX_KSCAN_MSEL_SELECT(n) ((n) & 0xF) struct lpc32xx_kscan_drv { struct input_dev *input; struct clk *clk; struct resource *iores; void __iomem *kscan_base; unsigned int irq; u32 matrix_sz; /* Size of matrix in XxY, ie. 3 = 3x3 */ u32 deb_clks; /* Debounce clocks (based on 32KHz clock) */ u32 scan_delay; /* Scan delay (based on 32KHz clock) */ unsigned short *keymap; /* Pointer to key map for the scan matrix */ unsigned int row_shift; u8 lastkeystates[8]; }; static void lpc32xx_mod_states(struct lpc32xx_kscan_drv *kscandat, int col) { struct input_dev *input = kscandat->input; unsigned row, changed, scancode, keycode; u8 key; key = readl(LPC32XX_KS_DATA(kscandat->kscan_base, col)); changed = key ^ kscandat->lastkeystates[col]; kscandat->lastkeystates[col] = key; for (row = 0; changed; row++, changed >>= 1) { if (changed & 1) { /* Key state changed, signal an event */ scancode = MATRIX_SCAN_CODE(row, col, kscandat->row_shift); keycode = kscandat->keymap[scancode]; input_event(input, EV_MSC, MSC_SCAN, scancode); input_report_key(input, keycode, key & (1 << row)); } } } static irqreturn_t lpc32xx_kscan_irq(int irq, void *dev_id) { struct lpc32xx_kscan_drv *kscandat = dev_id; int i; for (i = 0; i < kscandat->matrix_sz; i++) lpc32xx_mod_states(kscandat, i); writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); input_sync(kscandat->input); return IRQ_HANDLED; } static int lpc32xx_kscan_open(struct input_dev *dev) { struct lpc32xx_kscan_drv *kscandat = input_get_drvdata(dev); int error; error = clk_prepare_enable(kscandat->clk); if (error) return error; writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); return 0; } static void lpc32xx_kscan_close(struct input_dev *dev) { struct lpc32xx_kscan_drv *kscandat = input_get_drvdata(dev); writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); clk_disable_unprepare(kscandat->clk); } static int lpc32xx_parse_dt(struct device *dev, struct lpc32xx_kscan_drv *kscandat) { struct device_node *np = dev->of_node; u32 rows = 0, columns = 0; int err; err = matrix_keypad_parse_of_params(dev, &rows, &columns); if (err) return err; if (rows != columns) { dev_err(dev, "rows and columns must be equal!\n"); return -EINVAL; } kscandat->matrix_sz = rows; kscandat->row_shift = get_count_order(columns); of_property_read_u32(np, "nxp,debounce-delay-ms", &kscandat->deb_clks); of_property_read_u32(np, "nxp,scan-delay-ms", &kscandat->scan_delay); if (!kscandat->deb_clks || !kscandat->scan_delay) { dev_err(dev, "debounce or scan delay not specified\n"); return -EINVAL; } return 0; } static int lpc32xx_kscan_probe(struct platform_device *pdev) { struct lpc32xx_kscan_drv *kscandat; struct input_dev *input; struct resource *res; size_t keymap_size; int error; int irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get platform I/O memory\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq < 0 || irq >= NR_IRQS) { dev_err(&pdev->dev, "failed to get platform irq\n"); return -EINVAL; } kscandat = kzalloc(sizeof(struct lpc32xx_kscan_drv), GFP_KERNEL); if (!kscandat) { dev_err(&pdev->dev, "failed to allocate memory\n"); return -ENOMEM; } error = lpc32xx_parse_dt(&pdev->dev, kscandat); if (error) { dev_err(&pdev->dev, "failed to parse device tree\n"); goto err_free_mem; } keymap_size = sizeof(kscandat->keymap[0]) * (kscandat->matrix_sz << kscandat->row_shift); kscandat->keymap = kzalloc(keymap_size, GFP_KERNEL); if (!kscandat->keymap) { dev_err(&pdev->dev, "could not allocate memory for keymap\n"); error = -ENOMEM; goto err_free_mem; } kscandat->input = input = input_allocate_device(); if (!input) { dev_err(&pdev->dev, "failed to allocate input device\n"); error = -ENOMEM; goto err_free_keymap; } /* Setup key input */ input->name = pdev->name; input->phys = "lpc32xx/input0"; input->id.vendor = 0x0001; input->id.product = 0x0001; input->id.version = 0x0100; input->open = lpc32xx_kscan_open; input->close = lpc32xx_kscan_close; input->dev.parent = &pdev->dev; input_set_capability(input, EV_MSC, MSC_SCAN); error = matrix_keypad_build_keymap(NULL, NULL, kscandat->matrix_sz, kscandat->matrix_sz, kscandat->keymap, kscandat->input); if (error) { dev_err(&pdev->dev, "failed to build keymap\n"); goto err_free_input; } input_set_drvdata(kscandat->input, kscandat); kscandat->iores = request_mem_region(res->start, resource_size(res), pdev->name); if (!kscandat->iores) { dev_err(&pdev->dev, "failed to request I/O memory\n"); error = -EBUSY; goto err_free_input; } kscandat->kscan_base = ioremap(kscandat->iores->start, resource_size(kscandat->iores)); if (!kscandat->kscan_base) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -EBUSY; goto err_release_memregion; } /* Get the key scanner clock */ kscandat->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(kscandat->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); error = PTR_ERR(kscandat->clk); goto err_unmap; } /* Configure the key scanner */ error = clk_prepare_enable(kscandat->clk); if (error) goto err_clk_put; writel(kscandat->deb_clks, LPC32XX_KS_DEB(kscandat->kscan_base)); writel(kscandat->scan_delay, LPC32XX_KS_SCAN_CTL(kscandat->kscan_base)); writel(LPC32XX_KSCAN_FTST_USE32K_CLK, LPC32XX_KS_FAST_TST(kscandat->kscan_base)); writel(kscandat->matrix_sz, LPC32XX_KS_MATRIX_DIM(kscandat->kscan_base)); writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); clk_disable_unprepare(kscandat->clk); error = request_irq(irq, lpc32xx_kscan_irq, 0, pdev->name, kscandat); if (error) { dev_err(&pdev->dev, "failed to request irq\n"); goto err_clk_put; } error = input_register_device(kscandat->input); if (error) { dev_err(&pdev->dev, "failed to register input device\n"); goto err_free_irq; } platform_set_drvdata(pdev, kscandat); return 0; err_free_irq: free_irq(irq, kscandat); err_clk_put: clk_put(kscandat->clk); err_unmap: iounmap(kscandat->kscan_base); err_release_memregion: release_mem_region(kscandat->iores->start, resource_size(kscandat->iores)); err_free_input: input_free_device(kscandat->input); err_free_keymap: kfree(kscandat->keymap); err_free_mem: kfree(kscandat); return error; } static int lpc32xx_kscan_remove(struct platform_device *pdev) { struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev); free_irq(platform_get_irq(pdev, 0), kscandat); clk_put(kscandat->clk); iounmap(kscandat->kscan_base); release_mem_region(kscandat->iores->start, resource_size(kscandat->iores)); input_unregister_device(kscandat->input); kfree(kscandat->keymap); kfree(kscandat); return 0; } #ifdef CONFIG_PM_SLEEP static int lpc32xx_kscan_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev); struct input_dev *input = kscandat->input; mutex_lock(&input->mutex); if (input->users) { /* Clear IRQ and disable clock */ writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); clk_disable_unprepare(kscandat->clk); } mutex_unlock(&input->mutex); return 0; } static int lpc32xx_kscan_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev); struct input_dev *input = kscandat->input; int retval = 0; mutex_lock(&input->mutex); if (input->users) { /* Enable clock and clear IRQ */ retval = clk_prepare_enable(kscandat->clk); if (retval == 0) writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base)); } mutex_unlock(&input->mutex); return retval; } #endif static SIMPLE_DEV_PM_OPS(lpc32xx_kscan_pm_ops, lpc32xx_kscan_suspend, lpc32xx_kscan_resume); static const struct of_device_id lpc32xx_kscan_match[] = { { .compatible = "nxp,lpc3220-key" }, {}, }; MODULE_DEVICE_TABLE(of, lpc32xx_kscan_match); static struct platform_driver lpc32xx_kscan_driver = { .probe = lpc32xx_kscan_probe, .remove = lpc32xx_kscan_remove, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .pm = &lpc32xx_kscan_pm_ops, .of_match_table = of_match_ptr(lpc32xx_kscan_match), } }; module_platform_driver(lpc32xx_kscan_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>"); MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); MODULE_DESCRIPTION("Key scanner driver for LPC32XX devices");
gpl-2.0
revjunkie/nexus5-lprev-final
arch/openrisc/mm/fault.c
4549
8559
/* * OpenRISC fault.c * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/sched.h> #include <asm/uaccess.h> #include <asm/siginfo.h> #include <asm/signal.h> #define NUM_TLB_ENTRIES 64 #define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1)) unsigned long pte_misses; /* updated by do_page_fault() */ unsigned long pte_errors; /* updated by do_page_fault() */ /* __PHX__ :: - check the vmalloc_fault in do_page_fault() * - also look into include/asm-or32/mmu_context.h */ volatile pgd_t *current_pgd; extern void die(char *, struct pt_regs *, long); /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * If this routine detects a bad access, it returns 1, otherwise it * returns 0. */ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long vector, int write_acc) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; siginfo_t info; int fault; tsk = current; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. * * NOTE2: This is done so that, when updating the vmalloc * mappings we don't have to walk all processes pgdirs and * add the high mappings all at once. Instead we do it as they * are used. However vmalloc'ed page entries have the PAGE_GLOBAL * bit set so sometimes the TLB can use a lingering entry. * * This verifies that the fault happens in kernel space * and that the fault was not a protection error. */ if (address >= VMALLOC_START && (vector != 0x300 && vector != 0x400) && !user_mode(regs)) goto vmalloc_fault; /* If exceptions were enabled, we can reenable them here */ if (user_mode(regs)) { /* Exception was in userspace: reenable interrupts */ local_irq_enable(); } else { /* If exception was in a syscall, then IRQ's may have * been enabled or disabled. If they were enabled, * reenable them. */ if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE)) local_irq_enable(); } mm = tsk->mm; info.si_code = SEGV_MAPERR; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_interrupt() || !mm) goto no_context; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (user_mode(regs)) { /* * accessing the stack below usp is always a bug. * we get page-aligned addresses so we can only check * if we're within a page from usp, but that might be * enough to catch brutal errors at least. */ if (address + PAGE_SIZE < regs->sp) goto bad_area; } if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; /* first do some preliminary protection checks */ if (write_acc) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { /* not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } /* are we trying to execute nonexecutable area */ if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC)) goto bad_area; /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, write_acc); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } /*RGD modeled on Cris */ if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void *)address; force_sig_info(SIGSEGV, &info, tsk); return; } no_context: /* Are we prepared to handle this kernel fault? * * (The kernel has valid exception-points in the source * when it acesses user-memory. When it fails in one * of those points, we find it in a table and do a jump * to some fixup code that loads an appropriate error * code) */ { const struct exception_table_entry *entry; __asm__ __volatile__("l.nop 42"); if ((entry = search_exception_tables(regs->pc)) != NULL) { /* Adjust the instruction pointer in the stackframe */ regs->pc = entry->fixup; return; } } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if ((unsigned long)(address) < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel access"); printk(" at virtual address 0x%08lx\n", address); die("Oops", regs, write_acc); do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: __asm__ __volatile__("l.nop 42"); __asm__ __volatile__("l.nop 1"); up_read(&mm->mmap_sem); printk("VM: killing process %s\n", tsk->comm); if (user_mode(regs)) do_exit(SIGKILL); goto no_context; do_sigbus: up_read(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void *)address; force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; return; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. * * Use current_pgd instead of tsk->active_mm->pgd * since the latter might be unavailable if this * code is executed in a misfortunately run irq * (like inside schedule() between switch_mm and * switch_to...). */ int offset = pgd_index(address); pgd_t *pgd, *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; /* phx_warn("do_page_fault(): vmalloc_fault will not work, " "since current_pgd assign a proper value somewhere\n" "anyhow we don't need this at the moment\n"); phx_mmu("vmalloc_fault"); */ pgd = (pgd_t *)current_pgd + offset; pgd_k = init_mm.pgd + offset; /* Since we're two-level, we don't need to do both * set_pgd and set_pmd (they do the same thing). If * we go three-level at some point, do the right thing * with pgd_present and set_pgd here. * * Also, since the vmalloc area is global, we don't * need to copy individual PTE's, it is enough to * copy the pgd pointer into the pte page of the * root task. If that is there, we'll find our pte if * it exists. */ pud = pud_offset(pgd, address); pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) goto no_context; pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) goto bad_area_nosemaphore; set_pmd(pmd, *pmd_k); /* Make sure the actual PTE exists as well to * catch kernel vmalloc-area accesses to non-mapped * addresses. If we don't do this, this will just * silently loop forever. */ pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) goto no_context; return; } }
gpl-2.0
zakee94/stellar_msm8226
drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
4805
47476
/* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2010 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/if_vlan.h> #include "qlcnic.h" struct crb_addr_pair { u32 addr; u32 data; }; #define QLCNIC_MAX_CRB_XFORM 60 static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM]; #define crb_addr_transform(name) \ (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \ QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20) #define QLCNIC_ADDR_ERROR (0xffffffff) static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, struct qlcnic_host_rds_ring *rds_ring); static int qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter); static void crb_addr_transform_setup(void) { crb_addr_transform(XDMA); crb_addr_transform(TIMR); crb_addr_transform(SRE); crb_addr_transform(SQN3); crb_addr_transform(SQN2); crb_addr_transform(SQN1); crb_addr_transform(SQN0); crb_addr_transform(SQS3); crb_addr_transform(SQS2); crb_addr_transform(SQS1); crb_addr_transform(SQS0); crb_addr_transform(RPMX7); crb_addr_transform(RPMX6); crb_addr_transform(RPMX5); crb_addr_transform(RPMX4); crb_addr_transform(RPMX3); crb_addr_transform(RPMX2); crb_addr_transform(RPMX1); crb_addr_transform(RPMX0); crb_addr_transform(ROMUSB); crb_addr_transform(SN); crb_addr_transform(QMN); crb_addr_transform(QMS); crb_addr_transform(PGNI); crb_addr_transform(PGND); crb_addr_transform(PGN3); crb_addr_transform(PGN2); crb_addr_transform(PGN1); crb_addr_transform(PGN0); crb_addr_transform(PGSI); crb_addr_transform(PGSD); crb_addr_transform(PGS3); crb_addr_transform(PGS2); crb_addr_transform(PGS1); crb_addr_transform(PGS0); crb_addr_transform(PS); crb_addr_transform(PH); crb_addr_transform(NIU); crb_addr_transform(I2Q); crb_addr_transform(EG); crb_addr_transform(MN); crb_addr_transform(MS); crb_addr_transform(CAS2); crb_addr_transform(CAS1); crb_addr_transform(CAS0); crb_addr_transform(CAM); crb_addr_transform(C2C1); crb_addr_transform(C2C0); crb_addr_transform(SMB); crb_addr_transform(OCM0); crb_addr_transform(I2C0); } void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_rx_buffer *rx_buf; int i, ring; recv_ctx = adapter->recv_ctx; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; for (i = 0; i < rds_ring->num_desc; ++i) { rx_buf = &(rds_ring->rx_buf_arr[i]); if (rx_buf->skb == NULL) continue; pci_unmap_single(adapter->pdev, rx_buf->dma, rds_ring->dma_size, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(rx_buf->skb); } } } void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_rx_buffer *rx_buf; int i, ring; recv_ctx = adapter->recv_ctx; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; INIT_LIST_HEAD(&rds_ring->free_list); rx_buf = rds_ring->rx_buf_arr; for (i = 0; i < rds_ring->num_desc; i++) { list_add_tail(&rx_buf->list, &rds_ring->free_list); rx_buf++; } } } void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter) { struct qlcnic_cmd_buffer *cmd_buf; struct qlcnic_skb_frag *buffrag; int i, j; struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; cmd_buf = tx_ring->cmd_buf_arr; for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; if (buffrag->dma) { pci_unmap_single(adapter->pdev, buffrag->dma, buffrag->length, PCI_DMA_TODEVICE); buffrag->dma = 0ULL; } for (j = 0; j < cmd_buf->frag_count; j++) { buffrag++; if (buffrag->dma) { pci_unmap_page(adapter->pdev, buffrag->dma, buffrag->length, PCI_DMA_TODEVICE); buffrag->dma = 0ULL; } } if (cmd_buf->skb) { dev_kfree_skb_any(cmd_buf->skb); cmd_buf->skb = NULL; } cmd_buf++; } } void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_host_tx_ring *tx_ring; int ring; recv_ctx = adapter->recv_ctx; if (recv_ctx->rds_rings == NULL) goto skip_rds; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; vfree(rds_ring->rx_buf_arr); rds_ring->rx_buf_arr = NULL; } kfree(recv_ctx->rds_rings); skip_rds: if (adapter->tx_ring == NULL) return; tx_ring = adapter->tx_ring; vfree(tx_ring->cmd_buf_arr); tx_ring->cmd_buf_arr = NULL; kfree(adapter->tx_ring); adapter->tx_ring = NULL; } int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_rx_buffer *rx_buf; int ring, i, size; struct qlcnic_cmd_buffer *cmd_buf_arr; struct net_device *netdev = adapter->netdev; size = sizeof(struct qlcnic_host_tx_ring); tx_ring = kzalloc(size, GFP_KERNEL); if (tx_ring == NULL) { dev_err(&netdev->dev, "failed to allocate tx ring struct\n"); return -ENOMEM; } adapter->tx_ring = tx_ring; tx_ring->num_desc = adapter->num_txd; tx_ring->txq = netdev_get_tx_queue(netdev, 0); cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); if (cmd_buf_arr == NULL) { dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); goto err_out; } tx_ring->cmd_buf_arr = cmd_buf_arr; recv_ctx = adapter->recv_ctx; size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring); rds_ring = kzalloc(size, GFP_KERNEL); if (rds_ring == NULL) { dev_err(&netdev->dev, "failed to allocate rds ring struct\n"); goto err_out; } recv_ctx->rds_rings = rds_ring; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; switch (ring) { case RCV_RING_NORMAL: rds_ring->num_desc = adapter->num_rxd; rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN; rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; break; case RCV_RING_JUMBO: rds_ring->num_desc = adapter->num_jumbo_rxd; rds_ring->dma_size = QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN; if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA; rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; break; } rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); if (rds_ring->rx_buf_arr == NULL) { dev_err(&netdev->dev, "Failed to allocate " "rx buffer ring %d\n", ring); goto err_out; } INIT_LIST_HEAD(&rds_ring->free_list); /* * Now go through all of them, set reference handles * and put them in the queues. */ rx_buf = rds_ring->rx_buf_arr; for (i = 0; i < rds_ring->num_desc; i++) { list_add_tail(&rx_buf->list, &rds_ring->free_list); rx_buf->ref_handle = i; rx_buf++; } spin_lock_init(&rds_ring->lock); } for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; sds_ring->irq = adapter->msix_entries[ring].vector; sds_ring->adapter = adapter; sds_ring->num_desc = adapter->num_rxd; for (i = 0; i < NUM_RCV_DESC_RINGS; i++) INIT_LIST_HEAD(&sds_ring->free_list[i]); } return 0; err_out: qlcnic_free_sw_resources(adapter); return -ENOMEM; } /* * Utility to translate from internal Phantom CRB address * to external PCI CRB address. */ static u32 qlcnic_decode_crb_addr(u32 addr) { int i; u32 base_addr, offset, pci_base; crb_addr_transform_setup(); pci_base = QLCNIC_ADDR_ERROR; base_addr = addr & 0xfff00000; offset = addr & 0x000fffff; for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) { if (crb_addr_xform[i] == base_addr) { pci_base = i << 20; break; } } if (pci_base == QLCNIC_ADDR_ERROR) return pci_base; else return pci_base + offset; } #define QLCNIC_MAX_ROM_WAIT_USEC 100 static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter) { long timeout = 0; long done = 0; cond_resched(); while (done == 0) { done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS); done &= 2; if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) { dev_err(&adapter->pdev->dev, "Timeout reached waiting for rom done"); return -EIO; } udelay(1); } return 0; } static int do_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp) { QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3); QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb); if (qlcnic_wait_rom_done(adapter)) { dev_err(&adapter->pdev->dev, "Error waiting for rom done\n"); return -EIO; } /* reset abyte_cnt and dummy_byte_cnt */ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0); udelay(10); QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA); return 0; } static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, u8 *bytes, size_t size) { int addridx; int ret = 0; for (addridx = addr; addridx < (addr + size); addridx += 4) { int v; ret = do_rom_fast_read(adapter, addridx, &v); if (ret != 0) break; *(__le32 *)bytes = cpu_to_le32(v); bytes += 4; } return ret; } int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, u8 *bytes, size_t size) { int ret; ret = qlcnic_rom_lock(adapter); if (ret < 0) return ret; ret = do_rom_fast_read_words(adapter, addr, bytes, size); qlcnic_rom_unlock(adapter); return ret; } int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp) { int ret; if (qlcnic_rom_lock(adapter) != 0) return -EIO; ret = do_rom_fast_read(adapter, addr, valp); qlcnic_rom_unlock(adapter); return ret; } int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) { int addr, val; int i, n, init_delay; struct crb_addr_pair *buf; unsigned offset; u32 off; struct pci_dev *pdev = adapter->pdev; QLCWR32(adapter, CRB_CMDPEG_STATE, 0); QLCWR32(adapter, CRB_RCVPEG_STATE, 0); /* Halt all the indiviual PEGs and other blocks */ /* disable all I2Q */ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x10, 0x0); QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x14, 0x0); QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x18, 0x0); QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x1c, 0x0); QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x20, 0x0); QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x24, 0x0); /* disable all niu interrupts */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x40, 0xff); /* disable xge rx/tx */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x70000, 0x00); /* disable xg1 rx/tx */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x80000, 0x00); /* disable sideband mac */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x90000, 0x00); /* disable ap0 mac */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0xa0000, 0x00); /* disable ap1 mac */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00); /* halt sre */ val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000); QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1))); /* halt epg */ QLCWR32(adapter, QLCNIC_CRB_EPG + 0x1300, 0x1); /* halt timers */ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x0, 0x0); QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x8, 0x0); QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x10, 0x0); QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x18, 0x0); QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x100, 0x0); QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x200, 0x0); /* halt pegs */ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, 1); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, 1); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, 1); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, 1); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1); msleep(20); qlcnic_rom_unlock(adapter); /* big hammer don't reset CAM block on reset */ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); /* Init HW CRB block */ if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || qlcnic_rom_fast_read(adapter, 4, &n) != 0) { dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n); return -EIO; } offset = n & 0xffffU; n = (n >> 16) & 0xffffU; if (n >= 1024) { dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n"); return -EIO; } buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); if (buf == NULL) { dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n"); return -ENOMEM; } for (i = 0; i < n; i++) { if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { kfree(buf); return -EIO; } buf[i].addr = addr; buf[i].data = val; } for (i = 0; i < n; i++) { off = qlcnic_decode_crb_addr(buf[i].addr); if (off == QLCNIC_ADDR_ERROR) { dev_err(&pdev->dev, "CRB init value out of range %x\n", buf[i].addr); continue; } off += QLCNIC_PCI_CRBSPACE; if (off & 1) continue; /* skipping cold reboot MAGIC */ if (off == QLCNIC_CAM_RAM(0x1fc)) continue; if (off == (QLCNIC_CRB_I2C0 + 0x1c)) continue; if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */ continue; if (off == (ROMUSB_GLB + 0xa8)) continue; if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ continue; if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ continue; if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ continue; if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET) continue; /* skip the function enable register */ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION)) continue; if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2)) continue; if ((off & 0x0ff00000) == QLCNIC_CRB_SMB) continue; init_delay = 1; /* After writing this register, HW needs time for CRB */ /* to quiet down (else crb_window returns 0xffffffff) */ if (off == QLCNIC_ROMUSB_GLB_SW_RESET) init_delay = 1000; QLCWR32(adapter, off, buf[i].data); msleep(init_delay); } kfree(buf); /* Initialize protocol process engine */ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); msleep(1); QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); return 0; } static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter) { u32 val; int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; do { val = QLCRD32(adapter, CRB_CMDPEG_STATE); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return 0; case PHAN_INITIALIZE_FAILED: goto out_err; default: break; } msleep(QLCNIC_CMDPEG_CHECK_DELAY); } while (--retries); QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); out_err: dev_err(&adapter->pdev->dev, "Command Peg initialization not " "complete, state: 0x%x.\n", val); return -EIO; } static int qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter) { u32 val; int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT; do { val = QLCRD32(adapter, CRB_RCVPEG_STATE); if (val == PHAN_PEG_RCV_INITIALIZED) return 0; msleep(QLCNIC_RCVPEG_CHECK_DELAY); } while (--retries); if (!retries) { dev_err(&adapter->pdev->dev, "Receive Peg initialization not " "complete, state: 0x%x.\n", val); return -EIO; } return 0; } int qlcnic_check_fw_status(struct qlcnic_adapter *adapter) { int err; err = qlcnic_cmd_peg_ready(adapter); if (err) return err; err = qlcnic_receive_peg_ready(adapter); if (err) return err; QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); return err; } int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { int timeo; u32 val; val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); val = QLC_DEV_GET_DRV(val, adapter->portnum); if ((val & 0x3) != QLCNIC_TYPE_NIC) { dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n", val); return -EIO; } adapter->physical_port = (val >> 2); if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) timeo = QLCNIC_INIT_TIMEOUT_SECS; adapter->dev_init_timeo = timeo; if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo)) timeo = QLCNIC_RESET_TIMEOUT_SECS; adapter->reset_ack_timeo = timeo; return 0; } static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, struct qlcnic_flt_entry *region_entry) { struct qlcnic_flt_header flt_hdr; struct qlcnic_flt_entry *flt_entry; int i = 0, ret; u32 entry_size; memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, (u8 *)&flt_hdr, sizeof(struct qlcnic_flt_header)); if (ret) { dev_warn(&adapter->pdev->dev, "error reading flash layout header\n"); return -EIO; } entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size); if (flt_entry == NULL) { dev_warn(&adapter->pdev->dev, "error allocating memory\n"); return -EIO; } ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + sizeof(struct qlcnic_flt_header), (u8 *)flt_entry, entry_size); if (ret) { dev_warn(&adapter->pdev->dev, "error reading flash layout entries\n"); goto err_out; } while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { if (flt_entry[i].region == region) break; i++; } if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { dev_warn(&adapter->pdev->dev, "region=%x not found in %d regions\n", region, i); ret = -EIO; goto err_out; } memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); err_out: vfree(flt_entry); return ret; } int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) { struct qlcnic_flt_entry fw_entry; u32 ver = -1, min_ver; int ret; if (adapter->ahw->revision_id == QLCNIC_P3P_C0) ret = qlcnic_get_flt_entry(adapter, QLCNIC_C0_FW_IMAGE_REGION, &fw_entry); else ret = qlcnic_get_flt_entry(adapter, QLCNIC_B0_FW_IMAGE_REGION, &fw_entry); if (!ret) /* 0-4:-signature, 4-8:-fw version */ qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, (int *)&ver); else qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); ver = QLCNIC_DECODE_VERSION(ver); min_ver = QLCNIC_MIN_FW_VERSION; if (ver < min_ver) { dev_err(&adapter->pdev->dev, "firmware version %d.%d.%d unsupported." "Min supported version %d.%d.%d\n", _major(ver), _minor(ver), _build(ver), _major(min_ver), _minor(min_ver), _build(min_ver)); return -EINVAL; } return 0; } static int qlcnic_has_mn(struct qlcnic_adapter *adapter) { u32 capability; capability = 0; capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY); if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) return 1; return 0; } static struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section) { u32 i; struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; __le32 entries = cpu_to_le32(directory->num_entries); for (i = 0; i < entries; i++) { __le32 offs = cpu_to_le32(directory->findex) + (i * cpu_to_le32(directory->entry_size)); __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); if (tab_type == section) return (struct uni_table_desc *) &unirom[offs]; } return NULL; } #define FILEHEADER_SIZE (14 * 4) static int qlcnic_validate_header(struct qlcnic_adapter *adapter) { const u8 *unirom = adapter->fw->data; struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; __le32 fw_file_size = adapter->fw->size; __le32 entries; __le32 entry_size; __le32 tab_size; if (fw_file_size < FILEHEADER_SIZE) return -EINVAL; entries = cpu_to_le32(directory->num_entries); entry_size = cpu_to_le32(directory->entry_size); tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); if (fw_file_size < tab_size) return -EINVAL; return 0; } static int qlcnic_validate_bootld(struct qlcnic_adapter *adapter) { struct uni_table_desc *tab_desc; struct uni_data_desc *descr; const u8 *unirom = adapter->fw->data; int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + QLCNIC_UNI_BOOTLD_IDX_OFF)); __le32 offs; __le32 tab_size; __le32 data_size; tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD); if (!tab_desc) return -EINVAL; tab_size = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); if (adapter->fw->size < tab_size) return -EINVAL; offs = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx)); descr = (struct uni_data_desc *)&unirom[offs]; data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); if (adapter->fw->size < data_size) return -EINVAL; return 0; } static int qlcnic_validate_fw(struct qlcnic_adapter *adapter) { struct uni_table_desc *tab_desc; struct uni_data_desc *descr; const u8 *unirom = adapter->fw->data; int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + QLCNIC_UNI_FIRMWARE_IDX_OFF)); __le32 offs; __le32 tab_size; __le32 data_size; tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW); if (!tab_desc) return -EINVAL; tab_size = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); if (adapter->fw->size < tab_size) return -EINVAL; offs = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx)); descr = (struct uni_data_desc *)&unirom[offs]; data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); if (adapter->fw->size < data_size) return -EINVAL; return 0; } static int qlcnic_validate_product_offs(struct qlcnic_adapter *adapter) { struct uni_table_desc *ptab_descr; const u8 *unirom = adapter->fw->data; int mn_present = qlcnic_has_mn(adapter); __le32 entries; __le32 entry_size; __le32 tab_size; u32 i; ptab_descr = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_PRODUCT_TBL); if (!ptab_descr) return -EINVAL; entries = cpu_to_le32(ptab_descr->num_entries); entry_size = cpu_to_le32(ptab_descr->entry_size); tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); if (adapter->fw->size < tab_size) return -EINVAL; nomn: for (i = 0; i < entries; i++) { __le32 flags, file_chiprev, offs; u8 chiprev = adapter->ahw->revision_id; u32 flagbit; offs = cpu_to_le32(ptab_descr->findex) + (i * cpu_to_le32(ptab_descr->entry_size)); flags = cpu_to_le32(*((int *)&unirom[offs] + QLCNIC_UNI_FLAGS_OFF)); file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + QLCNIC_UNI_CHIP_REV_OFF)); flagbit = mn_present ? 1 : 2; if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { adapter->file_prd_off = offs; return 0; } } if (mn_present) { mn_present = 0; goto nomn; } return -EINVAL; } static int qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter) { if (qlcnic_validate_header(adapter)) { dev_err(&adapter->pdev->dev, "unified image: header validation failed\n"); return -EINVAL; } if (qlcnic_validate_product_offs(adapter)) { dev_err(&adapter->pdev->dev, "unified image: product validation failed\n"); return -EINVAL; } if (qlcnic_validate_bootld(adapter)) { dev_err(&adapter->pdev->dev, "unified image: bootld validation failed\n"); return -EINVAL; } if (qlcnic_validate_fw(adapter)) { dev_err(&adapter->pdev->dev, "unified image: firmware validation failed\n"); return -EINVAL; } return 0; } static struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter, u32 section, u32 idx_offset) { const u8 *unirom = adapter->fw->data; int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + idx_offset)); struct uni_table_desc *tab_desc; __le32 offs; tab_desc = qlcnic_get_table_desc(unirom, section); if (tab_desc == NULL) return NULL; offs = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * idx); return (struct uni_data_desc *)&unirom[offs]; } static u8 * qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter) { u32 offs = QLCNIC_BOOTLD_START; if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) offs = cpu_to_le32((qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_BOOTLD, QLCNIC_UNI_BOOTLD_IDX_OFF))->findex); return (u8 *)&adapter->fw->data[offs]; } static u8 * qlcnic_get_fw_offs(struct qlcnic_adapter *adapter) { u32 offs = QLCNIC_IMAGE_START; if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) offs = cpu_to_le32((qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex); return (u8 *)&adapter->fw->data[offs]; } static __le32 qlcnic_get_fw_size(struct qlcnic_adapter *adapter) { if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) return cpu_to_le32((qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, QLCNIC_UNI_FIRMWARE_IDX_OFF))->size); else return cpu_to_le32( *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]); } static __le32 qlcnic_get_fw_version(struct qlcnic_adapter *adapter) { struct uni_data_desc *fw_data_desc; const struct firmware *fw = adapter->fw; __le32 major, minor, sub; const u8 *ver_str; int i, ret; if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]); fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, QLCNIC_UNI_FIRMWARE_IDX_OFF); ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + cpu_to_le32(fw_data_desc->size) - 17; for (i = 0; i < 12; i++) { if (!strncmp(&ver_str[i], "REV=", 4)) { ret = sscanf(&ver_str[i+4], "%u.%u.%u ", &major, &minor, &sub); if (ret != 3) return 0; else return major + (minor << 8) + (sub << 16); } } return 0; } static __le32 qlcnic_get_bios_version(struct qlcnic_adapter *adapter) { const struct firmware *fw = adapter->fw; __le32 bios_ver, prd_off = adapter->file_prd_off; if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) return cpu_to_le32( *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]); bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) + QLCNIC_UNI_BIOS_VERSION_OFF)); return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); } static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter) { if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID)) dev_info(&adapter->pdev->dev, "Resetting rom_lock\n"); qlcnic_pcie_sem_unlock(adapter, 2); } static int qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter) { u32 heartbeat, ret = -EIO; int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); do { msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); if (heartbeat != adapter->heartbeat) { ret = QLCNIC_RCODE_SUCCESS; break; } } while (--retries); return ret; } int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter) { if ((adapter->flags & QLCNIC_FW_HANG) || qlcnic_check_fw_hearbeat(adapter)) { qlcnic_rom_lock_recovery(adapter); return 1; } if (adapter->need_fw_reset) return 1; if (adapter->fw) return 1; return 0; } static const char *fw_name[] = { QLCNIC_UNIFIED_ROMIMAGE_NAME, QLCNIC_FLASH_ROMIMAGE_NAME, }; int qlcnic_load_firmware(struct qlcnic_adapter *adapter) { u64 *ptr64; u32 i, flashaddr, size; const struct firmware *fw = adapter->fw; struct pci_dev *pdev = adapter->pdev; dev_info(&pdev->dev, "loading firmware from %s\n", fw_name[adapter->fw_type]); if (fw) { __le64 data; size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter); flashaddr = QLCNIC_BOOTLD_START; for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } size = (__force u32)qlcnic_get_fw_size(adapter) / 8; ptr64 = (u64 *)qlcnic_get_fw_offs(adapter); flashaddr = QLCNIC_IMAGE_START; for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } size = (__force u32)qlcnic_get_fw_size(adapter) % 8; if (size) { data = cpu_to_le64(ptr64[i]); if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) return -EIO; } } else { u64 data; u32 hi, lo; int ret; struct qlcnic_flt_entry bootld_entry; ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION, &bootld_entry); if (!ret) { size = bootld_entry.size / 8; flashaddr = bootld_entry.start_addr; } else { size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; flashaddr = QLCNIC_BOOTLD_START; dev_info(&pdev->dev, "using legacy method to get flash fw region"); } for (i = 0; i < size; i++) { if (qlcnic_rom_fast_read(adapter, flashaddr, (int *)&lo) != 0) return -EIO; if (qlcnic_rom_fast_read(adapter, flashaddr + 4, (int *)&hi) != 0) return -EIO; data = (((u64)hi << 32) | lo); if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } } msleep(1); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020); QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e); return 0; } static int qlcnic_validate_firmware(struct qlcnic_adapter *adapter) { __le32 val; u32 ver, bios, min_size; struct pci_dev *pdev = adapter->pdev; const struct firmware *fw = adapter->fw; u8 fw_type = adapter->fw_type; if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) { if (qlcnic_validate_unified_romimage(adapter)) return -EINVAL; min_size = QLCNIC_UNI_FW_MIN_SIZE; } else { val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]); if ((__force u32)val != QLCNIC_BDINFO_MAGIC) return -EINVAL; min_size = QLCNIC_FW_MIN_SIZE; } if (fw->size < min_size) return -EINVAL; val = qlcnic_get_fw_version(adapter); ver = QLCNIC_DECODE_VERSION(val); if (ver < QLCNIC_MIN_FW_VERSION) { dev_err(&pdev->dev, "%s: firmware version %d.%d.%d unsupported\n", fw_name[fw_type], _major(ver), _minor(ver), _build(ver)); return -EINVAL; } val = qlcnic_get_bios_version(adapter); qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios); if ((__force u32)val != bios) { dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", fw_name[fw_type]); return -EINVAL; } QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC); return 0; } static void qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter) { u8 fw_type; switch (adapter->fw_type) { case QLCNIC_UNKNOWN_ROMIMAGE: fw_type = QLCNIC_UNIFIED_ROMIMAGE; break; case QLCNIC_UNIFIED_ROMIMAGE: default: fw_type = QLCNIC_FLASH_ROMIMAGE; break; } adapter->fw_type = fw_type; } void qlcnic_request_firmware(struct qlcnic_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int rc; adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE; next: qlcnic_get_next_fwtype(adapter); if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) { adapter->fw = NULL; } else { rc = request_firmware(&adapter->fw, fw_name[adapter->fw_type], &pdev->dev); if (rc != 0) goto next; rc = qlcnic_validate_firmware(adapter); if (rc != 0) { release_firmware(adapter->fw); msleep(1); goto next; } } } void qlcnic_release_firmware(struct qlcnic_adapter *adapter) { if (adapter->fw) release_firmware(adapter->fw); adapter->fw = NULL; } static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, struct qlcnic_fw_msg *msg) { u32 cable_OUI; u16 cable_len; u16 link_speed; u8 link_status, module, duplex, autoneg; u8 lb_status = 0; struct net_device *netdev = adapter->netdev; adapter->has_link_events = 1; cable_OUI = msg->body[1] & 0xffffffff; cable_len = (msg->body[1] >> 32) & 0xffff; link_speed = (msg->body[1] >> 48) & 0xffff; link_status = msg->body[2] & 0xff; duplex = (msg->body[2] >> 16) & 0xff; autoneg = (msg->body[2] >> 24) & 0xff; lb_status = (msg->body[2] >> 32) & 0x3; module = (msg->body[2] >> 8) & 0xff; if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, " "length %d\n", cable_OUI, cable_len); else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) dev_info(&netdev->dev, "unsupported cable length %d\n", cable_len); if (!link_status && (lb_status == QLCNIC_ILB_MODE || lb_status == QLCNIC_ELB_MODE)) adapter->ahw->loopback_state |= QLCNIC_LINKEVENT; qlcnic_advert_link_change(adapter, link_status); if (duplex == LINKEVENT_FULL_DUPLEX) adapter->link_duplex = DUPLEX_FULL; else adapter->link_duplex = DUPLEX_HALF; adapter->module_type = module; adapter->link_autoneg = autoneg; if (link_status) { adapter->link_speed = link_speed; } else { adapter->link_speed = SPEED_UNKNOWN; adapter->link_duplex = DUPLEX_UNKNOWN; } } static void qlcnic_handle_fw_message(int desc_cnt, int index, struct qlcnic_host_sds_ring *sds_ring) { struct qlcnic_fw_msg msg; struct status_desc *desc; struct qlcnic_adapter *adapter; struct device *dev; int i = 0, opcode, ret; while (desc_cnt > 0 && i < 8) { desc = &sds_ring->desc_head[index]; msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); index = get_next_index(index, sds_ring->num_desc); desc_cnt--; } adapter = sds_ring->adapter; dev = &adapter->pdev->dev; opcode = qlcnic_get_nic_msg_opcode(msg.body[0]); switch (opcode) { case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: qlcnic_handle_linkevent(adapter, &msg); break; case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK: ret = (u32)(msg.body[1]); switch (ret) { case 0: adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE; break; case 1: dev_info(dev, "loopback already in progress\n"); adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS; break; case 2: dev_info(dev, "loopback cable is not connected\n"); adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; break; default: dev_info(dev, "loopback configure request failed," " ret %x\n", ret); adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR; break; } break; default: break; } } static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, struct qlcnic_host_rds_ring *rds_ring, struct qlcnic_rx_buffer *buffer) { struct sk_buff *skb; dma_addr_t dma; struct pci_dev *pdev = adapter->pdev; skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size); if (!skb) { adapter->stats.skb_alloc_failure++; return -ENOMEM; } skb_reserve(skb, NET_IP_ALIGN); dma = pci_map_single(pdev, skb->data, rds_ring->dma_size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(pdev, dma)) { adapter->stats.rx_dma_map_error++; dev_kfree_skb_any(skb); return -ENOMEM; } buffer->skb = skb; buffer->dma = dma; return 0; } static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum) { struct qlcnic_rx_buffer *buffer; struct sk_buff *skb; buffer = &rds_ring->rx_buf_arr[index]; if (unlikely(buffer->skb == NULL)) { WARN_ON(1); return NULL; } pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, PCI_DMA_FROMDEVICE); skb = buffer->skb; if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { adapter->stats.csummed++; skb->ip_summed = CHECKSUM_UNNECESSARY; } else { skb_checksum_none_assert(skb); } skb->dev = adapter->netdev; buffer->skb = NULL; return skb; } static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb, u16 *vlan_tag) { struct ethhdr *eth_hdr; if (!__vlan_get_tag(skb, vlan_tag)) { eth_hdr = (struct ethhdr *) skb->data; memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); skb_pull(skb, VLAN_HLEN); } if (!adapter->pvid) return 0; if (*vlan_tag == adapter->pvid) { /* Outer vlan tag. Packet should follow non-vlan path */ *vlan_tag = 0xffff; return 0; } if (adapter->flags & QLCNIC_TAGGING_ENABLED) return 0; return -EINVAL; } static struct qlcnic_rx_buffer * qlcnic_process_rcv(struct qlcnic_adapter *adapter, struct qlcnic_host_sds_ring *sds_ring, int ring, u64 sts_data0) { struct net_device *netdev = adapter->netdev; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_rx_buffer *buffer; struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; int index, length, cksum, pkt_offset; u16 vid = 0xffff; if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = qlcnic_get_sts_refhandle(sts_data0); if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; length = qlcnic_get_sts_totallength(sts_data0); cksum = qlcnic_get_sts_status(sts_data0); pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); if (!skb) return buffer; if (length > rds_ring->skb_size) skb_put(skb, rds_ring->skb_size); else skb_put(skb, length); if (pkt_offset) skb_pull(skb, pkt_offset); if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { adapter->stats.rxdropped++; dev_kfree_skb(skb); return buffer; } skb->protocol = eth_type_trans(skb, netdev); if (vid != 0xffff) __vlan_hwaccel_put_tag(skb, vid); napi_gro_receive(&sds_ring->napi, skb); adapter->stats.rx_pkts++; adapter->stats.rxbytes += length; return buffer; } #define QLC_TCP_HDR_SIZE 20 #define QLC_TCP_TS_OPTION_SIZE 12 #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE) static struct qlcnic_rx_buffer * qlcnic_process_lro(struct qlcnic_adapter *adapter, struct qlcnic_host_sds_ring *sds_ring, int ring, u64 sts_data0, u64 sts_data1) { struct net_device *netdev = adapter->netdev; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_rx_buffer *buffer; struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; struct iphdr *iph; struct tcphdr *th; bool push, timestamp; int l2_hdr_offset, l4_hdr_offset; int index; u16 lro_length, length, data_offset; u32 seq_number; u16 vid = 0xffff; if (unlikely(ring > adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = qlcnic_get_lro_sts_refhandle(sts_data0); if (unlikely(index > rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; timestamp = qlcnic_get_lro_sts_timestamp(sts_data0); lro_length = qlcnic_get_lro_sts_length(sts_data0); l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0); l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0); push = qlcnic_get_lro_sts_push_flag(sts_data0); seq_number = qlcnic_get_lro_sts_seq_number(sts_data1); skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); if (!skb) return buffer; if (timestamp) data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE; else data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; skb_put(skb, lro_length + data_offset); skb_pull(skb, l2_hdr_offset); if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { adapter->stats.rxdropped++; dev_kfree_skb(skb); return buffer; } skb->protocol = eth_type_trans(skb, netdev); iph = (struct iphdr *)skb->data; th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); length = (iph->ihl << 2) + (th->doff << 2) + lro_length; iph->tot_len = htons(length); iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); th->psh = push; th->seq = htonl(seq_number); length = skb->len; if (vid != 0xffff) __vlan_hwaccel_put_tag(skb, vid); netif_receive_skb(skb); adapter->stats.lro_pkts++; adapter->stats.lrobytes += length; return buffer; } int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) { struct qlcnic_adapter *adapter = sds_ring->adapter; struct list_head *cur; struct status_desc *desc; struct qlcnic_rx_buffer *rxbuf; u64 sts_data0, sts_data1; int count = 0; int opcode, ring, desc_cnt; u32 consumer = sds_ring->consumer; while (count < max) { desc = &sds_ring->desc_head[consumer]; sts_data0 = le64_to_cpu(desc->status_desc_data[0]); if (!(sts_data0 & STATUS_OWNER_HOST)) break; desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); opcode = qlcnic_get_sts_opcode(sts_data0); switch (opcode) { case QLCNIC_RXPKT_DESC: case QLCNIC_OLD_RXPKT_DESC: case QLCNIC_SYN_OFFLOAD: ring = qlcnic_get_sts_type(sts_data0); rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring, sts_data0); break; case QLCNIC_LRO_DESC: ring = qlcnic_get_lro_sts_type(sts_data0); sts_data1 = le64_to_cpu(desc->status_desc_data[1]); rxbuf = qlcnic_process_lro(adapter, sds_ring, ring, sts_data0, sts_data1); break; case QLCNIC_RESPONSE_DESC: qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); default: goto skip; } WARN_ON(desc_cnt > 1); if (likely(rxbuf)) list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); else adapter->stats.null_rxbuf++; skip: for (; desc_cnt > 0; desc_cnt--) { desc = &sds_ring->desc_head[consumer]; desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); consumer = get_next_index(consumer, sds_ring->num_desc); } count++; } for (ring = 0; ring < adapter->max_rds_rings; ring++) { struct qlcnic_host_rds_ring *rds_ring = &adapter->recv_ctx->rds_rings[ring]; if (!list_empty(&sds_ring->free_list[ring])) { list_for_each(cur, &sds_ring->free_list[ring]) { rxbuf = list_entry(cur, struct qlcnic_rx_buffer, list); qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); } spin_lock(&rds_ring->lock); list_splice_tail_init(&sds_ring->free_list[ring], &rds_ring->free_list); spin_unlock(&rds_ring->lock); } qlcnic_post_rx_buffers_nodb(adapter, rds_ring); } if (count) { sds_ring->consumer = consumer; writel(consumer, sds_ring->crb_sts_consumer); } return count; } void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, struct qlcnic_host_rds_ring *rds_ring) { struct rcv_desc *pdesc; struct qlcnic_rx_buffer *buffer; int count = 0; u32 producer; struct list_head *head; producer = rds_ring->producer; head = &rds_ring->free_list; while (!list_empty(head)) { buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); if (!buffer->skb) { if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) break; } count++; list_del(&buffer->list); /* make a rcv descriptor */ pdesc = &rds_ring->desc_head[producer]; pdesc->addr_buffer = cpu_to_le64(buffer->dma); pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); producer = get_next_index(producer, rds_ring->num_desc); } if (count) { rds_ring->producer = producer; writel((producer-1) & (rds_ring->num_desc-1), rds_ring->crb_rcv_producer); } } static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, struct qlcnic_host_rds_ring *rds_ring) { struct rcv_desc *pdesc; struct qlcnic_rx_buffer *buffer; int count = 0; uint32_t producer; struct list_head *head; if (!spin_trylock(&rds_ring->lock)) return; producer = rds_ring->producer; head = &rds_ring->free_list; while (!list_empty(head)) { buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); if (!buffer->skb) { if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) break; } count++; list_del(&buffer->list); /* make a rcv descriptor */ pdesc = &rds_ring->desc_head[producer]; pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); pdesc->addr_buffer = cpu_to_le64(buffer->dma); producer = get_next_index(producer, rds_ring->num_desc); } if (count) { rds_ring->producer = producer; writel((producer - 1) & (rds_ring->num_desc - 1), rds_ring->crb_rcv_producer); } spin_unlock(&rds_ring->lock); } static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter) { int i; unsigned char *data = skb->data; printk(KERN_INFO "\n"); for (i = 0; i < skb->len; i++) { QLCDB(adapter, DRV, "%02x ", data[i]); if ((i & 0x0f) == 8) printk(KERN_INFO "\n"); } } void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, struct qlcnic_host_sds_ring *sds_ring, int ring, u64 sts_data0) { struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; int index, length, cksum, pkt_offset; if (unlikely(ring >= adapter->max_rds_rings)) return; rds_ring = &recv_ctx->rds_rings[ring]; index = qlcnic_get_sts_refhandle(sts_data0); length = qlcnic_get_sts_totallength(sts_data0); if (unlikely(index >= rds_ring->num_desc)) return; cksum = qlcnic_get_sts_status(sts_data0); pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); if (!skb) return; if (length > rds_ring->skb_size) skb_put(skb, rds_ring->skb_size); else skb_put(skb, length); if (pkt_offset) skb_pull(skb, pkt_offset); if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) adapter->diag_cnt++; else dump_skb(skb, adapter); dev_kfree_skb_any(skb); adapter->stats.rx_pkts++; adapter->stats.rxbytes += length; return; } void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) { struct qlcnic_adapter *adapter = sds_ring->adapter; struct status_desc *desc; u64 sts_data0; int ring, opcode, desc_cnt; u32 consumer = sds_ring->consumer; desc = &sds_ring->desc_head[consumer]; sts_data0 = le64_to_cpu(desc->status_desc_data[0]); if (!(sts_data0 & STATUS_OWNER_HOST)) return; desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); opcode = qlcnic_get_sts_opcode(sts_data0); switch (opcode) { case QLCNIC_RESPONSE_DESC: qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); break; default: ring = qlcnic_get_sts_type(sts_data0); qlcnic_process_rcv_diag(adapter, sds_ring, ring, sts_data0); break; } for (; desc_cnt > 0; desc_cnt--) { desc = &sds_ring->desc_head[consumer]; desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); consumer = get_next_index(consumer, sds_ring->num_desc); } sds_ring->consumer = consumer; writel(consumer, sds_ring->crb_sts_consumer); } void qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2, u8 alt_mac, u8 *mac) { u32 mac_low, mac_high; int i; mac_low = off1; mac_high = off2; if (alt_mac) { mac_low |= (mac_low >> 16) | (mac_high << 16); mac_high >>= 16; } for (i = 0; i < 2; i++) mac[i] = (u8)(mac_high >> ((1 - i) * 8)); for (i = 2; i < 6; i++) mac[i] = (u8)(mac_low >> ((5 - i) * 8)); }
gpl-2.0
mr-tweaker/sabermod_kernel_cancro
lib/swiotlb.c
4805
26277
/* * Dynamic DMA mapping support. * * This implementation is a fallback for platforms that do not support * I/O TLBs (aka DMA address translation hardware). * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> * Copyright (C) 2000, 2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid * unnecessary i-cache flushing. * 04/07/.. ak Better overflow handling. Assorted fixes. * 05/09/10 linville Add support for syncing ranges, support syncing for * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. * 08/12/11 beckyb Add highmem support */ #include <linux/cache.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/swiotlb.h> #include <linux/pfn.h> #include <linux/types.h> #include <linux/ctype.h> #include <linux/highmem.h> #include <linux/gfp.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/scatterlist.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/iommu-helper.h> #define OFFSET(val,align) ((unsigned long) \ ( (val) & ( (align) - 1))) #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) /* * Minimum IO TLB size to bother booting with. Systems with mainly * 64bit capable cards will only lightly use the swiotlb. If we can't * allocate a contiguous 1MB, we're probably in trouble anyway. */ #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) int swiotlb_force; /* * Used to do a quick range check in swiotlb_tbl_unmap_single and * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this * API. */ static char *io_tlb_start, *io_tlb_end; /* * The number of IO TLB blocks (in groups of 64) between io_tlb_start and * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. */ static unsigned long io_tlb_nslabs; /* * When the IOMMU overflows we return a fallback buffer. This sets the size. */ static unsigned long io_tlb_overflow = 32*1024; static void *io_tlb_overflow_buffer; /* * This is a free list describing the number of free entries available from * each index */ static unsigned int *io_tlb_list; static unsigned int io_tlb_index; /* * We need to save away the original address corresponding to a mapped entry * for the sync operations. */ static phys_addr_t *io_tlb_orig_addr; /* * Protect the above data structures in the map and unmap calls */ static DEFINE_SPINLOCK(io_tlb_lock); static int late_alloc; static int __init setup_io_tlb_npages(char *str) { if (isdigit(*str)) { io_tlb_nslabs = simple_strtoul(str, &str, 0); /* avoid tail segment of size < IO_TLB_SEGSIZE */ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } if (*str == ',') ++str; if (!strcmp(str, "force")) swiotlb_force = 1; return 1; } __setup("swiotlb=", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ unsigned long swiotlb_nr_tbl(void) { return io_tlb_nslabs; } EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); /* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) { return phys_to_dma(hwdev, virt_to_phys(address)); } void swiotlb_print_info(void) { unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; phys_addr_t pstart, pend; pstart = virt_to_phys(io_tlb_start); pend = virt_to_phys(io_tlb_end); printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", bytes >> 20, io_tlb_start, io_tlb_end); printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", (unsigned long long)pstart, (unsigned long long)pend); } void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { unsigned long i, bytes; bytes = nslabs << IO_TLB_SHIFT; io_tlb_nslabs = nslabs; io_tlb_start = tlb; io_tlb_end = io_tlb_start + bytes; /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); for (i = 0; i < io_tlb_nslabs; i++) io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_index = 0; io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); /* * Get the overflow emergency buffer */ io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); if (!io_tlb_overflow_buffer) panic("Cannot allocate SWIOTLB overflow buffer!\n"); if (verbose) swiotlb_print_info(); } /* * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */ void __init swiotlb_init_with_default_size(size_t default_size, int verbose) { unsigned long bytes; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } bytes = io_tlb_nslabs << IO_TLB_SHIFT; /* * Get IO TLB memory from the low pages */ io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); } void __init swiotlb_init(int verbose) { swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ } /* * Systems with larger DMA zones (those that don't support ISA) can * initialize the swiotlb later using the slab allocator if needed. * This should be just like above, but with some error catching. */ int swiotlb_late_init_with_default_size(size_t default_size) { unsigned long i, bytes, req_nslabs = io_tlb_nslabs; unsigned int order; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } /* * Get IO TLB memory from the low pages */ order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); io_tlb_nslabs = SLABS_PER_PAGE << order; bytes = io_tlb_nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); if (io_tlb_start) break; order--; } if (!io_tlb_start) goto cleanup1; if (order != get_order(bytes)) { printk(KERN_WARNING "Warning: only able to allocate %ld MB " "for software IO TLB\n", (PAGE_SIZE << order) >> 20); io_tlb_nslabs = SLABS_PER_PAGE << order; bytes = io_tlb_nslabs << IO_TLB_SHIFT; } io_tlb_end = io_tlb_start + bytes; memset(io_tlb_start, 0, bytes); /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(int))); if (!io_tlb_list) goto cleanup2; for (i = 0; i < io_tlb_nslabs; i++) io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_index = 0; io_tlb_orig_addr = (phys_addr_t *) __get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); if (!io_tlb_orig_addr) goto cleanup3; memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); /* * Get the overflow emergency buffer */ io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, get_order(io_tlb_overflow)); if (!io_tlb_overflow_buffer) goto cleanup4; swiotlb_print_info(); late_alloc = 1; return 0; cleanup4: free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); io_tlb_orig_addr = NULL; cleanup3: free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * sizeof(int))); io_tlb_list = NULL; cleanup2: io_tlb_end = NULL; free_pages((unsigned long)io_tlb_start, order); io_tlb_start = NULL; cleanup1: io_tlb_nslabs = req_nslabs; return -ENOMEM; } void __init swiotlb_free(void) { if (!io_tlb_overflow_buffer) return; if (late_alloc) { free_pages((unsigned long)io_tlb_overflow_buffer, get_order(io_tlb_overflow)); free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * sizeof(int))); free_pages((unsigned long)io_tlb_start, get_order(io_tlb_nslabs << IO_TLB_SHIFT)); } else { free_bootmem_late(__pa(io_tlb_overflow_buffer), PAGE_ALIGN(io_tlb_overflow)); free_bootmem_late(__pa(io_tlb_orig_addr), PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); free_bootmem_late(__pa(io_tlb_list), PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); free_bootmem_late(__pa(io_tlb_start), PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); } io_tlb_nslabs = 0; } static int is_swiotlb_buffer(phys_addr_t paddr) { return paddr >= virt_to_phys(io_tlb_start) && paddr < virt_to_phys(io_tlb_end); } /* * Bounce: copy the swiotlb buffer back to the original dma location */ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, enum dma_data_direction dir) { unsigned long pfn = PFN_DOWN(phys); if (PageHighMem(pfn_to_page(pfn))) { /* The buffer does not have a mapping. Map it in and copy */ unsigned int offset = phys & ~PAGE_MASK; char *buffer; unsigned int sz = 0; unsigned long flags; while (size) { sz = min_t(size_t, PAGE_SIZE - offset, size); local_irq_save(flags); buffer = kmap_atomic(pfn_to_page(pfn)); if (dir == DMA_TO_DEVICE) memcpy(dma_addr, buffer + offset, sz); else memcpy(buffer + offset, dma_addr, sz); kunmap_atomic(buffer); local_irq_restore(flags); size -= sz; pfn++; dma_addr += sz; offset = 0; } } else { if (dir == DMA_TO_DEVICE) memcpy(dma_addr, phys_to_virt(phys), size); else memcpy(phys_to_virt(phys), dma_addr, size); } } EXPORT_SYMBOL_GPL(swiotlb_bounce); void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, phys_addr_t phys, size_t size, enum dma_data_direction dir) { unsigned long flags; char *dma_addr; unsigned int nslots, stride, index, wrap; int i; unsigned long mask; unsigned long offset_slots; unsigned long max_slots; mask = dma_get_seg_boundary(hwdev); tbl_dma_addr &= mask; offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; /* * Carefully handle integer overflow which can occur when mask == ~0UL. */ max_slots = mask + 1 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); /* * For mappings greater than a page, we limit the stride (and * hence alignment) to a page size. */ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; if (size > PAGE_SIZE) stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); else stride = 1; BUG_ON(!nslots); /* * Find suitable number of IO TLB entries size that will fit this * request and allocate a buffer from that IO TLB pool. */ spin_lock_irqsave(&io_tlb_lock, flags); index = ALIGN(io_tlb_index, stride); if (index >= io_tlb_nslabs) index = 0; wrap = index; do { while (iommu_is_span_boundary(index, nslots, offset_slots, max_slots)) { index += stride; if (index >= io_tlb_nslabs) index = 0; if (index == wrap) goto not_found; } /* * If we find a slot that indicates we have 'nslots' number of * contiguous buffers, we allocate the buffers from that slot * and mark the entries as '0' indicating unavailable. */ if (io_tlb_list[index] >= nslots) { int count = 0; for (i = index; i < (int) (index + nslots); i++) io_tlb_list[i] = 0; for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) io_tlb_list[i] = ++count; dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); /* * Update the indices to avoid searching in the next * round. */ io_tlb_index = ((index + nslots) < io_tlb_nslabs ? (index + nslots) : 0); goto found; } index += stride; if (index >= io_tlb_nslabs) index = 0; } while (index != wrap); not_found: spin_unlock_irqrestore(&io_tlb_lock, flags); return NULL; found: spin_unlock_irqrestore(&io_tlb_lock, flags); /* * Save away the mapping from the original address to the DMA address. * This is needed when we sync the memory. Then we sync the buffer if * needed. */ for (i = 0; i < nslots; i++) io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT); if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); return dma_addr; } EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); /* * Allocates bounce buffer and returns its kernel virtual address. */ static void * map_single(struct device *hwdev, phys_addr_t phys, size_t size, enum dma_data_direction dir) { dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start); return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); } /* * dma_addr is the kernel virtual address of the bounce buffer to unmap. */ void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, enum dma_data_direction dir) { unsigned long flags; int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t phys = io_tlb_orig_addr[index]; /* * First, sync the memory before unmapping the entry */ if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); /* * Return the buffer to the free list by setting the corresponding * entries to indicate the number of contiguous entries available. * While returning the entries to the free list, we merge the entries * with slots below and above the pool being returned. */ spin_lock_irqsave(&io_tlb_lock, flags); { count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? io_tlb_list[index + nslots] : 0); /* * Step 1: return the slots to the free list, merging the * slots with superceeding slots */ for (i = index + nslots - 1; i >= index; i--) io_tlb_list[i] = ++count; /* * Step 2: merge the returned slots with the preceding slots, * if available (non zero) */ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) io_tlb_list[i] = ++count; } spin_unlock_irqrestore(&io_tlb_lock, flags); } EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, enum dma_data_direction dir, enum dma_sync_target target) { int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t phys = io_tlb_orig_addr[index]; phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1)); switch (target) { case SYNC_FOR_CPU: if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); else BUG_ON(dir != DMA_TO_DEVICE); break; case SYNC_FOR_DEVICE: if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); else BUG_ON(dir != DMA_FROM_DEVICE); break; default: BUG(); } } EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); void * swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { dma_addr_t dev_addr; void *ret; int order = get_order(size); u64 dma_mask = DMA_BIT_MASK(32); if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) { /* * The allocated memory isn't reachable by the device. */ free_pages((unsigned long) ret, order); ret = NULL; } if (!ret) { /* * We are either out of memory or the device can't DMA to * GFP_DMA memory; fall back on map_single(), which * will grab memory from the lowest available address range. */ ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); if (!ret) return NULL; } memset(ret, 0, size); dev_addr = swiotlb_virt_to_bus(hwdev, ret); /* Confirm address can be DMA'd by device */ if (dev_addr + size - 1 > dma_mask) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", (unsigned long long)dma_mask, (unsigned long long)dev_addr); /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); return NULL; } *dma_handle = dev_addr; return ret; } EXPORT_SYMBOL(swiotlb_alloc_coherent); void swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dev_addr) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); WARN_ON(irqs_disabled()); if (!is_swiotlb_buffer(paddr)) free_pages((unsigned long)vaddr, get_order(size)); else /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); } EXPORT_SYMBOL(swiotlb_free_coherent); static void swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, int do_panic) { /* * Ran out of IOMMU space for this operation. This is very bad. * Unfortunately the drivers cannot handle this operation properly. * unless they check for dma_mapping_error (most don't) * When the mapping is small enough return a static buffer to limit * the damage, or panic when the transfer is too big. */ printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " "device %s\n", size, dev ? dev_name(dev) : "?"); if (size <= io_tlb_overflow || !do_panic) return; if (dir == DMA_BIDIRECTIONAL) panic("DMA: Random memory could be DMA accessed\n"); if (dir == DMA_FROM_DEVICE) panic("DMA: Random memory could be DMA written\n"); if (dir == DMA_TO_DEVICE) panic("DMA: Random memory could be DMA read\n"); } /* * Map a single buffer of the indicated size for DMA in streaming mode. The * physical address to use is returned. * * Once the device is given the dma address, the device owns this memory until * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. */ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { phys_addr_t phys = page_to_phys(page) + offset; dma_addr_t dev_addr = phys_to_dma(dev, phys); void *map; BUG_ON(dir == DMA_NONE); /* * If the address happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce * buffering it. */ if (dma_capable(dev, dev_addr, size) && !swiotlb_force) return dev_addr; /* * Oh well, have to allocate and map a bounce buffer. */ map = map_single(dev, phys, size, dir); if (!map) { swiotlb_full(dev, size, dir, 1); map = io_tlb_overflow_buffer; } dev_addr = swiotlb_virt_to_bus(dev, map); /* * Ensure that the address returned is DMA'ble */ if (!dma_capable(dev, dev_addr, size)) { swiotlb_tbl_unmap_single(dev, map, size, dir); dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer); } return dev_addr; } EXPORT_SYMBOL_GPL(swiotlb_map_page); /* * Unmap a single streaming mode DMA translation. The dma_addr and size must * match what was provided for in a previous swiotlb_map_page call. All * other usages are undefined. * * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(paddr)) { swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); return; } if (dir != DMA_FROM_DEVICE) return; /* * phys_to_virt doesn't work with hihgmem page but we could * call dma_mark_clean() with hihgmem page here. However, we * are fine since dma_mark_clean() is null on POWERPC. We can * make dma_mark_clean() take a physical address if necessary. */ dma_mark_clean(phys_to_virt(paddr), size); } void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { unmap_single(hwdev, dev_addr, size, dir); } EXPORT_SYMBOL_GPL(swiotlb_unmap_page); /* * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. * * If you perform a swiotlb_map_page() but wish to interrogate the buffer * using the cpu, yet do not wish to teardown the dma mapping, you must * call this function before doing so. At the next point you give the dma * address back to the card, you must first perform a * swiotlb_dma_sync_for_device, and then the device again owns the buffer */ static void swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, enum dma_sync_target target) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(paddr)) { swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, target); return; } if (dir != DMA_FROM_DEVICE) return; dma_mark_clean(phys_to_virt(paddr), size); } void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); } EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); } EXPORT_SYMBOL(swiotlb_sync_single_for_device); /* * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the above swiotlb_map_page * interface. Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}(SG). * * NOTE: An implementation may be able to use a smaller number of * DMA address/length pairs than there are SG table elements. * (for example via virtual mapping capabilities) * The routine returns the number of addr/length pairs actually * used, at most nents. * * Device ownership issues as mentioned above for swiotlb_map_page are the * same here. */ int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { phys_addr_t paddr = sg_phys(sg); dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); if (swiotlb_force || !dma_capable(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, sg_phys(sg), sg->length, dir); if (!map) { /* Don't panic here, we expect map_sg users to do proper error handling. */ swiotlb_full(hwdev, sg->length, dir, 0); swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, attrs); sgl[0].dma_length = 0; return 0; } sg->dma_address = swiotlb_virt_to_bus(hwdev, map); } else sg->dma_address = dev_addr; sg->dma_length = sg->length; } return nelems; } EXPORT_SYMBOL(swiotlb_map_sg_attrs); int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); } EXPORT_SYMBOL(swiotlb_map_sg); /* * Unmap a set of streaming mode DMA translations. Again, cpu read rules * concerning calls here are the same as for swiotlb_unmap_page() above. */ void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); } EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); } EXPORT_SYMBOL(swiotlb_unmap_sg); /* * Make physical memory consistent for a set of streaming mode DMA translations * after a transfer. * * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules * and usage. */ static void swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, enum dma_sync_target target) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nelems, i) swiotlb_sync_single(hwdev, sg->dma_address, sg->dma_length, dir, target); } void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); } EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); } EXPORT_SYMBOL(swiotlb_sync_sg_for_device); int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer)); } EXPORT_SYMBOL(swiotlb_dma_mapping_error); /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits * during bus mastering, then you would pass 0x00ffffff as the mask to * this function. */ int swiotlb_dma_supported(struct device *hwdev, u64 mask) { return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask; } EXPORT_SYMBOL(swiotlb_dma_supported);
gpl-2.0
wpandroidios/android_kernel_htc_b2wlj_LP50_Sense7
tools/perf/util/trace-event-read.c
4805
9873
/* * Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License (not later!) * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define _FILE_OFFSET_BITS 64 #include <dirent.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <getopt.h> #include <stdarg.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/wait.h> #include <sys/mman.h> #include <pthread.h> #include <fcntl.h> #include <unistd.h> #include <errno.h> #include "../perf.h" #include "util.h" #include "trace-event.h" static int input_fd; static int read_page; int file_bigendian; int host_bigendian; static int long_size; static unsigned long page_size; static ssize_t calc_data_size; static bool repipe; static int do_read(int fd, void *buf, int size) { int rsize = size; while (size) { int ret = read(fd, buf, size); if (ret <= 0) return -1; if (repipe) { int retw = write(STDOUT_FILENO, buf, ret); if (retw <= 0 || retw != ret) die("repiping input file"); } size -= ret; buf += ret; } return rsize; } static int read_or_die(void *data, int size) { int r; r = do_read(input_fd, data, size); if (r <= 0) die("reading input file (size expected=%d received=%d)", size, r); if (calc_data_size) calc_data_size += r; return r; } /* If it fails, the next read will report it */ static void skip(int size) { char buf[BUFSIZ]; int r; while (size) { r = size > BUFSIZ ? BUFSIZ : size; read_or_die(buf, r); size -= r; }; } static unsigned int read4(void) { unsigned int data; read_or_die(&data, 4); return __data2host4(data); } static unsigned long long read8(void) { unsigned long long data; read_or_die(&data, 8); return __data2host8(data); } static char *read_string(void) { char buf[BUFSIZ]; char *str = NULL; int size = 0; off_t r; char c; for (;;) { r = read(input_fd, &c, 1); if (r < 0) die("reading input file"); if (!r) die("no data"); if (repipe) { int retw = write(STDOUT_FILENO, &c, 1); if (retw <= 0 || retw != r) die("repiping input file string"); } buf[size++] = c; if (!c) break; } if (calc_data_size) calc_data_size += size; str = malloc_or_die(size); memcpy(str, buf, size); return str; } static void read_proc_kallsyms(void) { unsigned int size; char *buf; size = read4(); if (!size) return; buf = malloc_or_die(size + 1); read_or_die(buf, size); buf[size] = '\0'; parse_proc_kallsyms(buf, size); free(buf); } static void read_ftrace_printk(void) { unsigned int size; char *buf; size = read4(); if (!size) return; buf = malloc_or_die(size); read_or_die(buf, size); parse_ftrace_printk(buf, size); free(buf); } static void read_header_files(void) { unsigned long long size; char *header_event; char buf[BUFSIZ]; read_or_die(buf, 12); if (memcmp(buf, "header_page", 12) != 0) die("did not read header page"); size = read8(); skip(size); /* * The size field in the page is of type long, * use that instead, since it represents the kernel. */ long_size = header_page_size_size; read_or_die(buf, 13); if (memcmp(buf, "header_event", 13) != 0) die("did not read header event"); size = read8(); header_event = malloc_or_die(size); read_or_die(header_event, size); free(header_event); } static void read_ftrace_file(unsigned long long size) { char *buf; buf = malloc_or_die(size); read_or_die(buf, size); parse_ftrace_file(buf, size); free(buf); } static void read_event_file(char *sys, unsigned long long size) { char *buf; buf = malloc_or_die(size); read_or_die(buf, size); parse_event_file(buf, size, sys); free(buf); } static void read_ftrace_files(void) { unsigned long long size; int count; int i; count = read4(); for (i = 0; i < count; i++) { size = read8(); read_ftrace_file(size); } } static void read_event_files(void) { unsigned long long size; char *sys; int systems; int count; int i,x; systems = read4(); for (i = 0; i < systems; i++) { sys = read_string(); count = read4(); for (x=0; x < count; x++) { size = read8(); read_event_file(sys, size); } } } struct cpu_data { unsigned long long offset; unsigned long long size; unsigned long long timestamp; struct record *next; char *page; int cpu; int index; int page_size; }; static struct cpu_data *cpu_data; static void update_cpu_data_index(int cpu) { cpu_data[cpu].offset += page_size; cpu_data[cpu].size -= page_size; cpu_data[cpu].index = 0; } static void get_next_page(int cpu) { off_t save_seek; off_t ret; if (!cpu_data[cpu].page) return; if (read_page) { if (cpu_data[cpu].size <= page_size) { free(cpu_data[cpu].page); cpu_data[cpu].page = NULL; return; } update_cpu_data_index(cpu); /* other parts of the code may expect the pointer to not move */ save_seek = lseek(input_fd, 0, SEEK_CUR); ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET); if (ret == (off_t)-1) die("failed to lseek"); ret = read(input_fd, cpu_data[cpu].page, page_size); if (ret < 0) die("failed to read page"); /* reset the file pointer back */ lseek(input_fd, save_seek, SEEK_SET); return; } munmap(cpu_data[cpu].page, page_size); cpu_data[cpu].page = NULL; if (cpu_data[cpu].size <= page_size) return; update_cpu_data_index(cpu); cpu_data[cpu].page = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE, input_fd, cpu_data[cpu].offset); if (cpu_data[cpu].page == MAP_FAILED) die("failed to mmap cpu %d at offset 0x%llx", cpu, cpu_data[cpu].offset); } static unsigned int type_len4host(unsigned int type_len_ts) { if (file_bigendian) return (type_len_ts >> 27) & ((1 << 5) - 1); else return type_len_ts & ((1 << 5) - 1); } static unsigned int ts4host(unsigned int type_len_ts) { if (file_bigendian) return type_len_ts & ((1 << 27) - 1); else return type_len_ts >> 5; } static int calc_index(void *ptr, int cpu) { return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page; } struct record *trace_peek_data(int cpu) { struct record *data; void *page = cpu_data[cpu].page; int idx = cpu_data[cpu].index; void *ptr = page + idx; unsigned long long extend; unsigned int type_len_ts; unsigned int type_len; unsigned int delta; unsigned int length = 0; if (cpu_data[cpu].next) return cpu_data[cpu].next; if (!page) return NULL; if (!idx) { /* FIXME: handle header page */ if (header_page_ts_size != 8) die("expected a long long type for timestamp"); cpu_data[cpu].timestamp = data2host8(ptr); ptr += 8; switch (header_page_size_size) { case 4: cpu_data[cpu].page_size = data2host4(ptr); ptr += 4; break; case 8: cpu_data[cpu].page_size = data2host8(ptr); ptr += 8; break; default: die("bad long size"); } ptr = cpu_data[cpu].page + header_page_data_offset; } read_again: idx = calc_index(ptr, cpu); if (idx >= cpu_data[cpu].page_size) { get_next_page(cpu); return trace_peek_data(cpu); } type_len_ts = data2host4(ptr); ptr += 4; type_len = type_len4host(type_len_ts); delta = ts4host(type_len_ts); switch (type_len) { case RINGBUF_TYPE_PADDING: if (!delta) die("error, hit unexpected end of page"); length = data2host4(ptr); ptr += 4; length *= 4; ptr += length; goto read_again; case RINGBUF_TYPE_TIME_EXTEND: extend = data2host4(ptr); ptr += 4; extend <<= TS_SHIFT; extend += delta; cpu_data[cpu].timestamp += extend; goto read_again; case RINGBUF_TYPE_TIME_STAMP: ptr += 12; break; case 0: length = data2host4(ptr); ptr += 4; die("here! length=%d", length); break; default: length = type_len * 4; break; } cpu_data[cpu].timestamp += delta; data = malloc_or_die(sizeof(*data)); memset(data, 0, sizeof(*data)); data->ts = cpu_data[cpu].timestamp; data->size = length; data->data = ptr; ptr += length; cpu_data[cpu].index = calc_index(ptr, cpu); cpu_data[cpu].next = data; return data; } struct record *trace_read_data(int cpu) { struct record *data; data = trace_peek_data(cpu); cpu_data[cpu].next = NULL; return data; } ssize_t trace_report(int fd, bool __repipe) { char buf[BUFSIZ]; char test[] = { 23, 8, 68 }; char *version; int show_version = 0; int show_funcs = 0; int show_printk = 0; ssize_t size; calc_data_size = 1; repipe = __repipe; input_fd = fd; read_or_die(buf, 3); if (memcmp(buf, test, 3) != 0) die("no trace data in the file"); read_or_die(buf, 7); if (memcmp(buf, "tracing", 7) != 0) die("not a trace file (missing 'tracing' tag)"); version = read_string(); if (show_version) printf("version = %s\n", version); free(version); read_or_die(buf, 1); file_bigendian = buf[0]; host_bigendian = bigendian(); read_or_die(buf, 1); long_size = buf[0]; page_size = read4(); read_header_files(); read_ftrace_files(); read_event_files(); read_proc_kallsyms(); read_ftrace_printk(); size = calc_data_size - 1; calc_data_size = 0; repipe = false; if (show_funcs) { print_funcs(); return size; } if (show_printk) { print_printk(); return size; } return size; }
gpl-2.0
mseskir/android_kernel_vestel_55g
net/mac80211/rc80211_minstrel_debugfs.c
5573
4795
/* * Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Based on minstrel.c: * Copyright (C) 2005-2007 Derek Smithies <derek@indranet.co.nz> * Sponsored by Indranet Technologies Ltd * * Based on sample.c: * Copyright (c) 2005 John Bicket * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/debugfs.h> #include <linux/ieee80211.h> #include <linux/slab.h> #include <linux/export.h> #include <net/mac80211.h> #include "rc80211_minstrel.h" int minstrel_stats_open(struct inode *inode, struct file *file) { struct minstrel_sta_info *mi = inode->i_private; struct minstrel_debugfs_info *ms; unsigned int i, tp, prob, eprob; char *p; ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL); if (!ms) return -ENOMEM; file->private_data = ms; p = ms->buf; p += sprintf(p, "rate throughput ewma prob this prob " "this succ/attempt success attempts\n"); for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; *(p++) = (i == mi->max_tp_rate) ? 'T' : ' '; *(p++) = (i == mi->max_tp_rate2) ? 't' : ' '; *(p++) = (i == mi->max_prob_rate) ? 'P' : ' '; p += sprintf(p, "%3u%s", mr->bitrate / 2, (mr->bitrate & 1 ? ".5" : " ")); tp = mr->cur_tp / ((18000 << 10) / 96); prob = mr->cur_prob / 18; eprob = mr->probability / 18; p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " "%3u(%3u) %8llu %8llu\n", tp / 10, tp % 10, eprob / 10, eprob % 10, prob / 10, prob % 10, mr->last_success, mr->last_attempts, (unsigned long long)mr->succ_hist, (unsigned long long)mr->att_hist); } p += sprintf(p, "\nTotal packet count:: ideal %d " "lookaround %d\n\n", mi->packet_count - mi->sample_count, mi->sample_count); ms->len = p - ms->buf; return 0; } ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct minstrel_debugfs_info *ms; ms = file->private_data; return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len); } int minstrel_stats_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } static const struct file_operations minstrel_stat_fops = { .owner = THIS_MODULE, .open = minstrel_stats_open, .read = minstrel_stats_read, .release = minstrel_stats_release, .llseek = default_llseek, }; void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir) { struct minstrel_sta_info *mi = priv_sta; mi->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, mi, &minstrel_stat_fops); } void minstrel_remove_sta_debugfs(void *priv, void *priv_sta) { struct minstrel_sta_info *mi = priv_sta; debugfs_remove(mi->dbg_stats); }
gpl-2.0
Sudokamikaze/XKernel-taoshan
sound/drivers/opl3/opl3_seq.c
8389
7636
/* * Copyright (c) by Uros Bizjak <uros@kss-loka.si> * * Midi Sequencer interface routines for OPL2/OPL3/OPL4 FM * * OPL2/3 FM instrument loader: * alsa-tools/seq/sbiload/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "opl3_voice.h" #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/module.h> #include <sound/initval.h> MODULE_AUTHOR("Uros Bizjak <uros@kss-loka.si>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ALSA driver for OPL3 FM synth"); bool use_internal_drums = 0; module_param(use_internal_drums, bool, 0444); MODULE_PARM_DESC(use_internal_drums, "Enable internal OPL2/3 drums."); int snd_opl3_synth_use_inc(struct snd_opl3 * opl3) { if (!try_module_get(opl3->card->module)) return -EFAULT; return 0; } void snd_opl3_synth_use_dec(struct snd_opl3 * opl3) { module_put(opl3->card->module); } int snd_opl3_synth_setup(struct snd_opl3 * opl3) { int idx; struct snd_hwdep *hwdep = opl3->hwdep; mutex_lock(&hwdep->open_mutex); if (hwdep->used) { mutex_unlock(&hwdep->open_mutex); return -EBUSY; } hwdep->used++; mutex_unlock(&hwdep->open_mutex); snd_opl3_reset(opl3); for (idx = 0; idx < MAX_OPL3_VOICES; idx++) { opl3->voices[idx].state = SNDRV_OPL3_ST_OFF; opl3->voices[idx].time = 0; opl3->voices[idx].keyon_reg = 0x00; } opl3->use_time = 0; opl3->connection_reg = 0x00; if (opl3->hardware >= OPL3_HW_OPL3) { /* Clear 4-op connections */ opl3->command(opl3, OPL3_RIGHT | OPL3_REG_CONNECTION_SELECT, opl3->connection_reg); opl3->max_voices = MAX_OPL3_VOICES; } return 0; } void snd_opl3_synth_cleanup(struct snd_opl3 * opl3) { unsigned long flags; struct snd_hwdep *hwdep; /* Stop system timer */ spin_lock_irqsave(&opl3->sys_timer_lock, flags); if (opl3->sys_timer_status) { del_timer(&opl3->tlist); opl3->sys_timer_status = 0; } spin_unlock_irqrestore(&opl3->sys_timer_lock, flags); snd_opl3_reset(opl3); hwdep = opl3->hwdep; mutex_lock(&hwdep->open_mutex); hwdep->used--; mutex_unlock(&hwdep->open_mutex); wake_up(&hwdep->open_wait); } static int snd_opl3_synth_use(void *private_data, struct snd_seq_port_subscribe * info) { struct snd_opl3 *opl3 = private_data; int err; if ((err = snd_opl3_synth_setup(opl3)) < 0) return err; if (use_internal_drums) { /* Percussion mode */ opl3->voices[6].state = opl3->voices[7].state = opl3->voices[8].state = SNDRV_OPL3_ST_NOT_AVAIL; snd_opl3_load_drums(opl3); opl3->drum_reg = OPL3_PERCUSSION_ENABLE; opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION, opl3->drum_reg); } else { opl3->drum_reg = 0x00; } if (info->sender.client != SNDRV_SEQ_CLIENT_SYSTEM) { if ((err = snd_opl3_synth_use_inc(opl3)) < 0) return err; } opl3->synth_mode = SNDRV_OPL3_MODE_SEQ; return 0; } static int snd_opl3_synth_unuse(void *private_data, struct snd_seq_port_subscribe * info) { struct snd_opl3 *opl3 = private_data; snd_opl3_synth_cleanup(opl3); if (info->sender.client != SNDRV_SEQ_CLIENT_SYSTEM) snd_opl3_synth_use_dec(opl3); return 0; } /* * MIDI emulation operators */ struct snd_midi_op opl3_ops = { .note_on = snd_opl3_note_on, .note_off = snd_opl3_note_off, .key_press = snd_opl3_key_press, .note_terminate = snd_opl3_terminate_note, .control = snd_opl3_control, .nrpn = snd_opl3_nrpn, .sysex = snd_opl3_sysex, }; static int snd_opl3_synth_event_input(struct snd_seq_event * ev, int direct, void *private_data, int atomic, int hop) { struct snd_opl3 *opl3 = private_data; snd_midi_process_event(&opl3_ops, ev, opl3->chset); return 0; } /* ------------------------------ */ static void snd_opl3_synth_free_port(void *private_data) { struct snd_opl3 *opl3 = private_data; snd_midi_channel_free_set(opl3->chset); } static int snd_opl3_synth_create_port(struct snd_opl3 * opl3) { struct snd_seq_port_callback callbacks; char name[32]; int voices, opl_ver; voices = (opl3->hardware < OPL3_HW_OPL3) ? MAX_OPL2_VOICES : MAX_OPL3_VOICES; opl3->chset = snd_midi_channel_alloc_set(16); if (opl3->chset == NULL) return -ENOMEM; opl3->chset->private_data = opl3; memset(&callbacks, 0, sizeof(callbacks)); callbacks.owner = THIS_MODULE; callbacks.use = snd_opl3_synth_use; callbacks.unuse = snd_opl3_synth_unuse; callbacks.event_input = snd_opl3_synth_event_input; callbacks.private_free = snd_opl3_synth_free_port; callbacks.private_data = opl3; opl_ver = (opl3->hardware & OPL3_HW_MASK) >> 8; sprintf(name, "OPL%i FM Port", opl_ver); opl3->chset->client = opl3->seq_client; opl3->chset->port = snd_seq_event_port_attach(opl3->seq_client, &callbacks, SNDRV_SEQ_PORT_CAP_WRITE | SNDRV_SEQ_PORT_CAP_SUBS_WRITE, SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC | SNDRV_SEQ_PORT_TYPE_MIDI_GM | SNDRV_SEQ_PORT_TYPE_DIRECT_SAMPLE | SNDRV_SEQ_PORT_TYPE_HARDWARE | SNDRV_SEQ_PORT_TYPE_SYNTHESIZER, 16, voices, name); if (opl3->chset->port < 0) { int port; port = opl3->chset->port; snd_midi_channel_free_set(opl3->chset); return port; } return 0; } /* ------------------------------ */ static int snd_opl3_seq_new_device(struct snd_seq_device *dev) { struct snd_opl3 *opl3; int client, err; char name[32]; int opl_ver; opl3 = *(struct snd_opl3 **)SNDRV_SEQ_DEVICE_ARGPTR(dev); if (opl3 == NULL) return -EINVAL; spin_lock_init(&opl3->voice_lock); opl3->seq_client = -1; /* allocate new client */ opl_ver = (opl3->hardware & OPL3_HW_MASK) >> 8; sprintf(name, "OPL%i FM synth", opl_ver); client = opl3->seq_client = snd_seq_create_kernel_client(opl3->card, opl3->seq_dev_num, name); if (client < 0) return client; if ((err = snd_opl3_synth_create_port(opl3)) < 0) { snd_seq_delete_kernel_client(client); opl3->seq_client = -1; return err; } /* setup system timer */ init_timer(&opl3->tlist); opl3->tlist.function = snd_opl3_timer_func; opl3->tlist.data = (unsigned long) opl3; spin_lock_init(&opl3->sys_timer_lock); opl3->sys_timer_status = 0; #ifdef CONFIG_SND_SEQUENCER_OSS snd_opl3_init_seq_oss(opl3, name); #endif return 0; } static int snd_opl3_seq_delete_device(struct snd_seq_device *dev) { struct snd_opl3 *opl3; opl3 = *(struct snd_opl3 **)SNDRV_SEQ_DEVICE_ARGPTR(dev); if (opl3 == NULL) return -EINVAL; #ifdef CONFIG_SND_SEQUENCER_OSS snd_opl3_free_seq_oss(opl3); #endif if (opl3->seq_client >= 0) { snd_seq_delete_kernel_client(opl3->seq_client); opl3->seq_client = -1; } return 0; } static int __init alsa_opl3_seq_init(void) { static struct snd_seq_dev_ops ops = { snd_opl3_seq_new_device, snd_opl3_seq_delete_device }; return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL3, &ops, sizeof(struct snd_opl3 *)); } static void __exit alsa_opl3_seq_exit(void) { snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_OPL3); } module_init(alsa_opl3_seq_init) module_exit(alsa_opl3_seq_exit)
gpl-2.0
OneEducation/kernel-rk310-lollipop-cx929
sound/drivers/opl3/opl3_seq.c
8389
7636
/* * Copyright (c) by Uros Bizjak <uros@kss-loka.si> * * Midi Sequencer interface routines for OPL2/OPL3/OPL4 FM * * OPL2/3 FM instrument loader: * alsa-tools/seq/sbiload/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "opl3_voice.h" #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/module.h> #include <sound/initval.h> MODULE_AUTHOR("Uros Bizjak <uros@kss-loka.si>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ALSA driver for OPL3 FM synth"); bool use_internal_drums = 0; module_param(use_internal_drums, bool, 0444); MODULE_PARM_DESC(use_internal_drums, "Enable internal OPL2/3 drums."); int snd_opl3_synth_use_inc(struct snd_opl3 * opl3) { if (!try_module_get(opl3->card->module)) return -EFAULT; return 0; } void snd_opl3_synth_use_dec(struct snd_opl3 * opl3) { module_put(opl3->card->module); } int snd_opl3_synth_setup(struct snd_opl3 * opl3) { int idx; struct snd_hwdep *hwdep = opl3->hwdep; mutex_lock(&hwdep->open_mutex); if (hwdep->used) { mutex_unlock(&hwdep->open_mutex); return -EBUSY; } hwdep->used++; mutex_unlock(&hwdep->open_mutex); snd_opl3_reset(opl3); for (idx = 0; idx < MAX_OPL3_VOICES; idx++) { opl3->voices[idx].state = SNDRV_OPL3_ST_OFF; opl3->voices[idx].time = 0; opl3->voices[idx].keyon_reg = 0x00; } opl3->use_time = 0; opl3->connection_reg = 0x00; if (opl3->hardware >= OPL3_HW_OPL3) { /* Clear 4-op connections */ opl3->command(opl3, OPL3_RIGHT | OPL3_REG_CONNECTION_SELECT, opl3->connection_reg); opl3->max_voices = MAX_OPL3_VOICES; } return 0; } void snd_opl3_synth_cleanup(struct snd_opl3 * opl3) { unsigned long flags; struct snd_hwdep *hwdep; /* Stop system timer */ spin_lock_irqsave(&opl3->sys_timer_lock, flags); if (opl3->sys_timer_status) { del_timer(&opl3->tlist); opl3->sys_timer_status = 0; } spin_unlock_irqrestore(&opl3->sys_timer_lock, flags); snd_opl3_reset(opl3); hwdep = opl3->hwdep; mutex_lock(&hwdep->open_mutex); hwdep->used--; mutex_unlock(&hwdep->open_mutex); wake_up(&hwdep->open_wait); } static int snd_opl3_synth_use(void *private_data, struct snd_seq_port_subscribe * info) { struct snd_opl3 *opl3 = private_data; int err; if ((err = snd_opl3_synth_setup(opl3)) < 0) return err; if (use_internal_drums) { /* Percussion mode */ opl3->voices[6].state = opl3->voices[7].state = opl3->voices[8].state = SNDRV_OPL3_ST_NOT_AVAIL; snd_opl3_load_drums(opl3); opl3->drum_reg = OPL3_PERCUSSION_ENABLE; opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION, opl3->drum_reg); } else { opl3->drum_reg = 0x00; } if (info->sender.client != SNDRV_SEQ_CLIENT_SYSTEM) { if ((err = snd_opl3_synth_use_inc(opl3)) < 0) return err; } opl3->synth_mode = SNDRV_OPL3_MODE_SEQ; return 0; } static int snd_opl3_synth_unuse(void *private_data, struct snd_seq_port_subscribe * info) { struct snd_opl3 *opl3 = private_data; snd_opl3_synth_cleanup(opl3); if (info->sender.client != SNDRV_SEQ_CLIENT_SYSTEM) snd_opl3_synth_use_dec(opl3); return 0; } /* * MIDI emulation operators */ struct snd_midi_op opl3_ops = { .note_on = snd_opl3_note_on, .note_off = snd_opl3_note_off, .key_press = snd_opl3_key_press, .note_terminate = snd_opl3_terminate_note, .control = snd_opl3_control, .nrpn = snd_opl3_nrpn, .sysex = snd_opl3_sysex, }; static int snd_opl3_synth_event_input(struct snd_seq_event * ev, int direct, void *private_data, int atomic, int hop) { struct snd_opl3 *opl3 = private_data; snd_midi_process_event(&opl3_ops, ev, opl3->chset); return 0; } /* ------------------------------ */ static void snd_opl3_synth_free_port(void *private_data) { struct snd_opl3 *opl3 = private_data; snd_midi_channel_free_set(opl3->chset); } static int snd_opl3_synth_create_port(struct snd_opl3 * opl3) { struct snd_seq_port_callback callbacks; char name[32]; int voices, opl_ver; voices = (opl3->hardware < OPL3_HW_OPL3) ? MAX_OPL2_VOICES : MAX_OPL3_VOICES; opl3->chset = snd_midi_channel_alloc_set(16); if (opl3->chset == NULL) return -ENOMEM; opl3->chset->private_data = opl3; memset(&callbacks, 0, sizeof(callbacks)); callbacks.owner = THIS_MODULE; callbacks.use = snd_opl3_synth_use; callbacks.unuse = snd_opl3_synth_unuse; callbacks.event_input = snd_opl3_synth_event_input; callbacks.private_free = snd_opl3_synth_free_port; callbacks.private_data = opl3; opl_ver = (opl3->hardware & OPL3_HW_MASK) >> 8; sprintf(name, "OPL%i FM Port", opl_ver); opl3->chset->client = opl3->seq_client; opl3->chset->port = snd_seq_event_port_attach(opl3->seq_client, &callbacks, SNDRV_SEQ_PORT_CAP_WRITE | SNDRV_SEQ_PORT_CAP_SUBS_WRITE, SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC | SNDRV_SEQ_PORT_TYPE_MIDI_GM | SNDRV_SEQ_PORT_TYPE_DIRECT_SAMPLE | SNDRV_SEQ_PORT_TYPE_HARDWARE | SNDRV_SEQ_PORT_TYPE_SYNTHESIZER, 16, voices, name); if (opl3->chset->port < 0) { int port; port = opl3->chset->port; snd_midi_channel_free_set(opl3->chset); return port; } return 0; } /* ------------------------------ */ static int snd_opl3_seq_new_device(struct snd_seq_device *dev) { struct snd_opl3 *opl3; int client, err; char name[32]; int opl_ver; opl3 = *(struct snd_opl3 **)SNDRV_SEQ_DEVICE_ARGPTR(dev); if (opl3 == NULL) return -EINVAL; spin_lock_init(&opl3->voice_lock); opl3->seq_client = -1; /* allocate new client */ opl_ver = (opl3->hardware & OPL3_HW_MASK) >> 8; sprintf(name, "OPL%i FM synth", opl_ver); client = opl3->seq_client = snd_seq_create_kernel_client(opl3->card, opl3->seq_dev_num, name); if (client < 0) return client; if ((err = snd_opl3_synth_create_port(opl3)) < 0) { snd_seq_delete_kernel_client(client); opl3->seq_client = -1; return err; } /* setup system timer */ init_timer(&opl3->tlist); opl3->tlist.function = snd_opl3_timer_func; opl3->tlist.data = (unsigned long) opl3; spin_lock_init(&opl3->sys_timer_lock); opl3->sys_timer_status = 0; #ifdef CONFIG_SND_SEQUENCER_OSS snd_opl3_init_seq_oss(opl3, name); #endif return 0; } static int snd_opl3_seq_delete_device(struct snd_seq_device *dev) { struct snd_opl3 *opl3; opl3 = *(struct snd_opl3 **)SNDRV_SEQ_DEVICE_ARGPTR(dev); if (opl3 == NULL) return -EINVAL; #ifdef CONFIG_SND_SEQUENCER_OSS snd_opl3_free_seq_oss(opl3); #endif if (opl3->seq_client >= 0) { snd_seq_delete_kernel_client(opl3->seq_client); opl3->seq_client = -1; } return 0; } static int __init alsa_opl3_seq_init(void) { static struct snd_seq_dev_ops ops = { snd_opl3_seq_new_device, snd_opl3_seq_delete_device }; return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL3, &ops, sizeof(struct snd_opl3 *)); } static void __exit alsa_opl3_seq_exit(void) { snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_OPL3); } module_init(alsa_opl3_seq_init) module_exit(alsa_opl3_seq_exit)
gpl-2.0
robcore/machinex
arch/x86/kernel/cpu/mtrr/centaur.c
13253
3027
#include <linux/init.h> #include <linux/mm.h> #include <asm/mtrr.h> #include <asm/msr.h> #include "mtrr.h" static struct { unsigned long high; unsigned long low; } centaur_mcr[8]; static u8 centaur_mcr_reserved; static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ /** * centaur_get_free_region - Get a free MTRR. * * @base: The starting (base) address of the region. * @size: The size (in bytes) of the region. * * Returns: the index of the region on success, else -1 on error. */ static int centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) { unsigned long lbase, lsize; mtrr_type ltype; int i, max; max = num_var_ranges; if (replace_reg >= 0 && replace_reg < max) return replace_reg; for (i = 0; i < max; ++i) { if (centaur_mcr_reserved & (1 << i)) continue; mtrr_if->get(i, &lbase, &lsize, &ltype); if (lsize == 0) return i; } return -ENOSPC; } /* * Report boot time MCR setups */ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) { centaur_mcr[mcr].low = lo; centaur_mcr[mcr].high = hi; } static void centaur_get_mcr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type * type) { *base = centaur_mcr[reg].high >> PAGE_SHIFT; *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; *type = MTRR_TYPE_WRCOMB; /* write-combining */ if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) *type = MTRR_TYPE_UNCACHABLE; if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) *type = MTRR_TYPE_WRBACK; if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) *type = MTRR_TYPE_WRBACK; } static void centaur_set_mcr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { unsigned long low, high; if (size == 0) { /* Disable */ high = low = 0; } else { high = base << PAGE_SHIFT; if (centaur_mcr_type == 0) { /* Only support write-combining... */ low = -size << PAGE_SHIFT | 0x1f; } else { if (type == MTRR_TYPE_UNCACHABLE) low = -size << PAGE_SHIFT | 0x02; /* NC */ else low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */ } } centaur_mcr[reg].high = high; centaur_mcr[reg].low = low; wrmsr(MSR_IDT_MCR0 + reg, low, high); } static int centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int type) { /* * FIXME: Winchip2 supports uncached */ if (type != MTRR_TYPE_WRCOMB && (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { pr_warning("mtrr: only write-combining%s supported\n", centaur_mcr_type ? " and uncacheable are" : " is"); return -EINVAL; } return 0; } static const struct mtrr_ops centaur_mtrr_ops = { .vendor = X86_VENDOR_CENTAUR, .set = centaur_set_mcr, .get = centaur_get_mcr, .get_free_region = centaur_get_free_region, .validate_add_page = centaur_validate_add_page, .have_wrcomb = positive_have_wrcomb, }; int __init centaur_init_mtrr(void) { set_mtrr_ops(&centaur_mtrr_ops); return 0; }
gpl-2.0
klquicksall/Ace-GB-DHD
net/ipv4/xfrm4_mode_transport.c
14277
2135
/* * xfrm4_mode_transport.c - Transport mode encapsulation for IPv4. * * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/stringify.h> #include <net/dst.h> #include <net/ip.h> #include <net/xfrm.h> /* Add encapsulation header. * * The IP header will be moved forward to make space for the encapsulation * header. */ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); int ihl = iph->ihl * 4; skb_set_network_header(skb, -x->props.header_len); skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol); skb->transport_header = skb->network_header + ihl; __skb_pull(skb, ihl); memmove(skb_network_header(skb), iph, ihl); return 0; } /* Remove encapsulation header. * * The IP header will be moved over the top of the encapsulation header. * * On entry, skb->h shall point to where the IP header should be and skb->nh * shall be set to where the IP header currently is. skb->data shall point * to the start of the payload. */ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) { int ihl = skb->data - skb_transport_header(skb); if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), skb_network_header(skb), ihl); skb->network_header = skb->transport_header; } ip_hdr(skb)->tot_len = htons(skb->len + ihl); skb_reset_transport_header(skb); return 0; } static struct xfrm_mode xfrm4_transport_mode = { .input = xfrm4_transport_input, .output = xfrm4_transport_output, .owner = THIS_MODULE, .encap = XFRM_MODE_TRANSPORT, }; static int __init xfrm4_transport_init(void) { return xfrm_register_mode(&xfrm4_transport_mode, AF_INET); } static void __exit xfrm4_transport_exit(void) { int err; err = xfrm_unregister_mode(&xfrm4_transport_mode, AF_INET); BUG_ON(err); } module_init(xfrm4_transport_init); module_exit(xfrm4_transport_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_MODE(AF_INET, XFRM_MODE_TRANSPORT);
gpl-2.0
Pafcholini/Nadia-kernel-KK-N910F-EUR-KK-OpenSource
drivers/char/mwave/smapi.c
14533
19459
/* * * smapi.c -- SMAPI interface routines * * * Written By: Mike Sullivan IBM Corporation * * Copyright (C) 1999 IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * 10/23/2000 - Alpha Release * First release to the public */ #include <linux/kernel.h> #include <linux/mc146818rtc.h> /* CMOS defines */ #include "smapi.h" #include "mwavedd.h" static unsigned short g_usSmapiPort = 0; static int smapi_request(unsigned short inBX, unsigned short inCX, unsigned short inDI, unsigned short inSI, unsigned short *outAX, unsigned short *outBX, unsigned short *outCX, unsigned short *outDX, unsigned short *outDI, unsigned short *outSI) { unsigned short myoutAX = 2, *pmyoutAX = &myoutAX; unsigned short myoutBX = 3, *pmyoutBX = &myoutBX; unsigned short myoutCX = 4, *pmyoutCX = &myoutCX; unsigned short myoutDX = 5, *pmyoutDX = &myoutDX; unsigned short myoutDI = 6, *pmyoutDI = &myoutDI; unsigned short myoutSI = 7, *pmyoutSI = &myoutSI; unsigned short usSmapiOK = -EIO, *pusSmapiOK = &usSmapiOK; unsigned int inBXCX = (inBX << 16) | inCX; unsigned int inDISI = (inDI << 16) | inSI; int retval = 0; PRINTK_5(TRACE_SMAPI, "inBX %x inCX %x inDI %x inSI %x\n", inBX, inCX, inDI, inSI); __asm__ __volatile__("movw $0x5380,%%ax\n\t" "movl %7,%%ebx\n\t" "shrl $16, %%ebx\n\t" "movw %7,%%cx\n\t" "movl %8,%%edi\n\t" "shrl $16,%%edi\n\t" "movw %8,%%si\n\t" "movw %9,%%dx\n\t" "out %%al,%%dx\n\t" "out %%al,$0x4F\n\t" "cmpb $0x53,%%ah\n\t" "je 2f\n\t" "1:\n\t" "orb %%ah,%%ah\n\t" "jnz 2f\n\t" "movw %%ax,%0\n\t" "movw %%bx,%1\n\t" "movw %%cx,%2\n\t" "movw %%dx,%3\n\t" "movw %%di,%4\n\t" "movw %%si,%5\n\t" "movw $1,%6\n\t" "2:\n\t":"=m"(*(unsigned short *) pmyoutAX), "=m"(*(unsigned short *) pmyoutBX), "=m"(*(unsigned short *) pmyoutCX), "=m"(*(unsigned short *) pmyoutDX), "=m"(*(unsigned short *) pmyoutDI), "=m"(*(unsigned short *) pmyoutSI), "=m"(*(unsigned short *) pusSmapiOK) :"m"(inBXCX), "m"(inDISI), "m"(g_usSmapiPort) :"%eax", "%ebx", "%ecx", "%edx", "%edi", "%esi"); PRINTK_8(TRACE_SMAPI, "myoutAX %x myoutBX %x myoutCX %x myoutDX %x myoutDI %x myoutSI %x usSmapiOK %x\n", myoutAX, myoutBX, myoutCX, myoutDX, myoutDI, myoutSI, usSmapiOK); *outAX = myoutAX; *outBX = myoutBX; *outCX = myoutCX; *outDX = myoutDX; *outDI = myoutDI; *outSI = myoutSI; retval = (usSmapiOK == 1) ? 0 : -EIO; PRINTK_2(TRACE_SMAPI, "smapi::smapi_request exit retval %x\n", retval); return retval; } int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings) { int bRC = -EIO; unsigned short usAX, usBX, usCX, usDX, usDI, usSI; unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 }; unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 }; unsigned short numDspBases = 8; unsigned short numUartBases = 4; PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n"); bRC = smapi_request(0x1802, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Error: Could not get DSP Settings. Aborting.\n"); return bRC; } PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n"); pSettings->bDSPPresent = ((usBX & 0x0100) != 0); pSettings->bDSPEnabled = ((usCX & 0x0001) != 0); pSettings->usDspIRQ = usSI & 0x00FF; pSettings->usDspDMA = (usSI & 0xFF00) >> 8; if ((usDI & 0x00FF) < numDspBases) { pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF]; } else { pSettings->usDspBaseIO = 0; } PRINTK_6(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg get DSP Settings bDSPPresent %x bDSPEnabled %x usDspIRQ %x usDspDMA %x usDspBaseIO %x\n", pSettings->bDSPPresent, pSettings->bDSPEnabled, pSettings->usDspIRQ, pSettings->usDspDMA, pSettings->usDspBaseIO); /* check for illegal values */ if ( pSettings->usDspBaseIO == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP base I/O address is 0\n"); if ( pSettings->usDspIRQ == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP IRQ line is 0\n"); bRC = smapi_request(0x1804, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) { PRINTK_ERROR("smapi::smapi_query_DSP_cfg: Error: Could not get DSP modem settings. Aborting.\n"); return bRC; } PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n"); pSettings->bModemEnabled = ((usCX & 0x0001) != 0); pSettings->usUartIRQ = usSI & 0x000F; if (((usSI & 0xFF00) >> 8) < numUartBases) { pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8]; } else { pSettings->usUartBaseIO = 0; } PRINTK_4(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg get DSP modem settings bModemEnabled %x usUartIRQ %x usUartBaseIO %x\n", pSettings->bModemEnabled, pSettings->usUartIRQ, pSettings->usUartBaseIO); /* check for illegal values */ if ( pSettings->usUartBaseIO == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART base I/O address is 0\n"); if ( pSettings->usUartIRQ == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART IRQ line is 0\n"); PRINTK_2(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg exit bRC %x\n", bRC); return bRC; } int smapi_set_DSP_cfg(void) { int bRC = -EIO; int i; unsigned short usAX, usBX, usCX, usDX, usDI, usSI; unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 }; unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 }; unsigned short ausDspIrqs[] = { 5, 7, 10, 11, 15 }; unsigned short ausUartIrqs[] = { 3, 4 }; unsigned short numDspBases = 8; unsigned short numUartBases = 4; unsigned short numDspIrqs = 5; unsigned short numUartIrqs = 2; unsigned short dspio_index = 0, uartio_index = 0; PRINTK_5(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg entry mwave_3780i_irq %x mwave_3780i_io %x mwave_uart_irq %x mwave_uart_io %x\n", mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io); if (mwave_3780i_io) { for (i = 0; i < numDspBases; i++) { if (mwave_3780i_io == ausDspBases[i]) break; } if (i == numDspBases) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io); return bRC; } dspio_index = i; } if (mwave_3780i_irq) { for (i = 0; i < numDspIrqs; i++) { if (mwave_3780i_irq == ausDspIrqs[i]) break; } if (i == numDspIrqs) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq); return bRC; } } if (mwave_uart_io) { for (i = 0; i < numUartBases; i++) { if (mwave_uart_io == ausUartBases[i]) break; } if (i == numUartBases) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io); return bRC; } uartio_index = i; } if (mwave_uart_irq) { for (i = 0; i < numUartIrqs; i++) { if (mwave_uart_irq == ausUartIrqs[i]) break; } if (i == numUartIrqs) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq); return bRC; } } if (mwave_uart_irq || mwave_uart_io) { /* Check serial port A */ bRC = smapi_request(0x1402, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* bRC == 0 */ if (usBX & 0x0100) { /* serial port A is present */ if (usCX & 1) { /* serial port is enabled */ if ((usSI & 0xFF) == mwave_uart_irq) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port\n"); bRC = smapi_request(0x1403, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1402, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } else { if ((usSI >> 8) == uartio_index) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port A\n"); bRC = smapi_request (0x1403, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request (0x1402, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } } } } /* Check serial port B */ bRC = smapi_request(0x1404, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* bRC == 0 */ if (usBX & 0x0100) { /* serial port B is present */ if (usCX & 1) { /* serial port is enabled */ if ((usSI & 0xFF) == mwave_uart_irq) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n"); bRC = smapi_request(0x1405, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1404, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } else { if ((usSI >> 8) == uartio_index) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1 (TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n"); bRC = smapi_request (0x1405, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request (0x1404, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } } } } /* Check IR port */ bRC = smapi_request(0x1700, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1704, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* bRC == 0 */ if ((usCX & 0xff) != 0xff) { /* IR port not disabled */ if ((usCX & 0xff) == mwave_uart_irq) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n"); bRC = smapi_request(0x1701, 0x0100, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1700, 0, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1705, 0x01ff, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1704, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } else { if ((usSI & 0xff) == uartio_index) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n"); bRC = smapi_request(0x1701, 0x0100, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1700, 0, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1705, 0x01ff, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1704, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } } } } bRC = smapi_request(0x1802, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; if (mwave_3780i_io) { usDI = dspio_index; } if (mwave_3780i_irq) { usSI = (usSI & 0xff00) | mwave_3780i_irq; } bRC = smapi_request(0x1803, 0x0101, usDI, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1804, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; if (mwave_uart_io) { usSI = (usSI & 0x00ff) | (uartio_index << 8); } if (mwave_uart_irq) { usSI = (usSI & 0xff00) | mwave_uart_irq; } bRC = smapi_request(0x1805, 0x0101, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1802, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1804, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* normal exit: */ PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg exit\n"); return 0; exit_conflict: /* Message has already been printed */ return -EIO; exit_smapi_request_error: PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg exit on smapi_request error bRC %x\n", bRC); return bRC; } int smapi_set_DSP_power_state(BOOLEAN bOn) { int bRC = -EIO; unsigned short usAX, usBX, usCX, usDX, usDI, usSI; unsigned short usPowerFunction; PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state entry bOn %x\n", bOn); usPowerFunction = (bOn) ? 1 : 0; bRC = smapi_request(0x4901, 0x0000, 0, usPowerFunction, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state exit bRC %x\n", bRC); return bRC; } #if 0 static int SmapiQuerySystemID(void) { int bRC = -EIO; unsigned short usAX = 0xffff, usBX = 0xffff, usCX = 0xffff, usDX = 0xffff, usDI = 0xffff, usSI = 0xffff; printk("smapi::SmapiQUerySystemID entry\n"); bRC = smapi_request(0x0000, 0, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC == 0) { printk("AX=%x, BX=%x, CX=%x, DX=%x, DI=%x, SI=%x\n", usAX, usBX, usCX, usDX, usDI, usSI); } else { printk("smapi::SmapiQuerySystemID smapi_request error\n"); } return bRC; } #endif /* 0 */ int smapi_init(void) { int retval = -EIO; unsigned short usSmapiID = 0; unsigned long flags; PRINTK_1(TRACE_SMAPI, "smapi::smapi_init entry\n"); spin_lock_irqsave(&rtc_lock, flags); usSmapiID = CMOS_READ(0x7C); usSmapiID |= (CMOS_READ(0x7D) << 8); spin_unlock_irqrestore(&rtc_lock, flags); PRINTK_2(TRACE_SMAPI, "smapi::smapi_init usSmapiID %x\n", usSmapiID); if (usSmapiID == 0x5349) { spin_lock_irqsave(&rtc_lock, flags); g_usSmapiPort = CMOS_READ(0x7E); g_usSmapiPort |= (CMOS_READ(0x7F) << 8); spin_unlock_irqrestore(&rtc_lock, flags); if (g_usSmapiPort == 0) { PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n"); } else { PRINTK_2(TRACE_SMAPI, "smapi::smapi_init, exit TRUE g_usSmapiPort %x\n", g_usSmapiPort); retval = 0; //SmapiQuerySystemID(); } } else { PRINTK_ERROR("smapi::smapi_init, ERROR invalid usSmapiID\n"); retval = -ENXIO; } return retval; }
gpl-2.0
stefanbucur/linux-s2e
drivers/char/mwave/smapi.c
14533
19459
/* * * smapi.c -- SMAPI interface routines * * * Written By: Mike Sullivan IBM Corporation * * Copyright (C) 1999 IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * 10/23/2000 - Alpha Release * First release to the public */ #include <linux/kernel.h> #include <linux/mc146818rtc.h> /* CMOS defines */ #include "smapi.h" #include "mwavedd.h" static unsigned short g_usSmapiPort = 0; static int smapi_request(unsigned short inBX, unsigned short inCX, unsigned short inDI, unsigned short inSI, unsigned short *outAX, unsigned short *outBX, unsigned short *outCX, unsigned short *outDX, unsigned short *outDI, unsigned short *outSI) { unsigned short myoutAX = 2, *pmyoutAX = &myoutAX; unsigned short myoutBX = 3, *pmyoutBX = &myoutBX; unsigned short myoutCX = 4, *pmyoutCX = &myoutCX; unsigned short myoutDX = 5, *pmyoutDX = &myoutDX; unsigned short myoutDI = 6, *pmyoutDI = &myoutDI; unsigned short myoutSI = 7, *pmyoutSI = &myoutSI; unsigned short usSmapiOK = -EIO, *pusSmapiOK = &usSmapiOK; unsigned int inBXCX = (inBX << 16) | inCX; unsigned int inDISI = (inDI << 16) | inSI; int retval = 0; PRINTK_5(TRACE_SMAPI, "inBX %x inCX %x inDI %x inSI %x\n", inBX, inCX, inDI, inSI); __asm__ __volatile__("movw $0x5380,%%ax\n\t" "movl %7,%%ebx\n\t" "shrl $16, %%ebx\n\t" "movw %7,%%cx\n\t" "movl %8,%%edi\n\t" "shrl $16,%%edi\n\t" "movw %8,%%si\n\t" "movw %9,%%dx\n\t" "out %%al,%%dx\n\t" "out %%al,$0x4F\n\t" "cmpb $0x53,%%ah\n\t" "je 2f\n\t" "1:\n\t" "orb %%ah,%%ah\n\t" "jnz 2f\n\t" "movw %%ax,%0\n\t" "movw %%bx,%1\n\t" "movw %%cx,%2\n\t" "movw %%dx,%3\n\t" "movw %%di,%4\n\t" "movw %%si,%5\n\t" "movw $1,%6\n\t" "2:\n\t":"=m"(*(unsigned short *) pmyoutAX), "=m"(*(unsigned short *) pmyoutBX), "=m"(*(unsigned short *) pmyoutCX), "=m"(*(unsigned short *) pmyoutDX), "=m"(*(unsigned short *) pmyoutDI), "=m"(*(unsigned short *) pmyoutSI), "=m"(*(unsigned short *) pusSmapiOK) :"m"(inBXCX), "m"(inDISI), "m"(g_usSmapiPort) :"%eax", "%ebx", "%ecx", "%edx", "%edi", "%esi"); PRINTK_8(TRACE_SMAPI, "myoutAX %x myoutBX %x myoutCX %x myoutDX %x myoutDI %x myoutSI %x usSmapiOK %x\n", myoutAX, myoutBX, myoutCX, myoutDX, myoutDI, myoutSI, usSmapiOK); *outAX = myoutAX; *outBX = myoutBX; *outCX = myoutCX; *outDX = myoutDX; *outDI = myoutDI; *outSI = myoutSI; retval = (usSmapiOK == 1) ? 0 : -EIO; PRINTK_2(TRACE_SMAPI, "smapi::smapi_request exit retval %x\n", retval); return retval; } int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings) { int bRC = -EIO; unsigned short usAX, usBX, usCX, usDX, usDI, usSI; unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 }; unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 }; unsigned short numDspBases = 8; unsigned short numUartBases = 4; PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n"); bRC = smapi_request(0x1802, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Error: Could not get DSP Settings. Aborting.\n"); return bRC; } PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n"); pSettings->bDSPPresent = ((usBX & 0x0100) != 0); pSettings->bDSPEnabled = ((usCX & 0x0001) != 0); pSettings->usDspIRQ = usSI & 0x00FF; pSettings->usDspDMA = (usSI & 0xFF00) >> 8; if ((usDI & 0x00FF) < numDspBases) { pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF]; } else { pSettings->usDspBaseIO = 0; } PRINTK_6(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg get DSP Settings bDSPPresent %x bDSPEnabled %x usDspIRQ %x usDspDMA %x usDspBaseIO %x\n", pSettings->bDSPPresent, pSettings->bDSPEnabled, pSettings->usDspIRQ, pSettings->usDspDMA, pSettings->usDspBaseIO); /* check for illegal values */ if ( pSettings->usDspBaseIO == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP base I/O address is 0\n"); if ( pSettings->usDspIRQ == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP IRQ line is 0\n"); bRC = smapi_request(0x1804, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) { PRINTK_ERROR("smapi::smapi_query_DSP_cfg: Error: Could not get DSP modem settings. Aborting.\n"); return bRC; } PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n"); pSettings->bModemEnabled = ((usCX & 0x0001) != 0); pSettings->usUartIRQ = usSI & 0x000F; if (((usSI & 0xFF00) >> 8) < numUartBases) { pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8]; } else { pSettings->usUartBaseIO = 0; } PRINTK_4(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg get DSP modem settings bModemEnabled %x usUartIRQ %x usUartBaseIO %x\n", pSettings->bModemEnabled, pSettings->usUartIRQ, pSettings->usUartBaseIO); /* check for illegal values */ if ( pSettings->usUartBaseIO == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART base I/O address is 0\n"); if ( pSettings->usUartIRQ == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART IRQ line is 0\n"); PRINTK_2(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg exit bRC %x\n", bRC); return bRC; } int smapi_set_DSP_cfg(void) { int bRC = -EIO; int i; unsigned short usAX, usBX, usCX, usDX, usDI, usSI; unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 }; unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 }; unsigned short ausDspIrqs[] = { 5, 7, 10, 11, 15 }; unsigned short ausUartIrqs[] = { 3, 4 }; unsigned short numDspBases = 8; unsigned short numUartBases = 4; unsigned short numDspIrqs = 5; unsigned short numUartIrqs = 2; unsigned short dspio_index = 0, uartio_index = 0; PRINTK_5(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg entry mwave_3780i_irq %x mwave_3780i_io %x mwave_uart_irq %x mwave_uart_io %x\n", mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io); if (mwave_3780i_io) { for (i = 0; i < numDspBases; i++) { if (mwave_3780i_io == ausDspBases[i]) break; } if (i == numDspBases) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io); return bRC; } dspio_index = i; } if (mwave_3780i_irq) { for (i = 0; i < numDspIrqs; i++) { if (mwave_3780i_irq == ausDspIrqs[i]) break; } if (i == numDspIrqs) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq); return bRC; } } if (mwave_uart_io) { for (i = 0; i < numUartBases; i++) { if (mwave_uart_io == ausUartBases[i]) break; } if (i == numUartBases) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io); return bRC; } uartio_index = i; } if (mwave_uart_irq) { for (i = 0; i < numUartIrqs; i++) { if (mwave_uart_irq == ausUartIrqs[i]) break; } if (i == numUartIrqs) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq); return bRC; } } if (mwave_uart_irq || mwave_uart_io) { /* Check serial port A */ bRC = smapi_request(0x1402, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* bRC == 0 */ if (usBX & 0x0100) { /* serial port A is present */ if (usCX & 1) { /* serial port is enabled */ if ((usSI & 0xFF) == mwave_uart_irq) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port\n"); bRC = smapi_request(0x1403, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1402, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } else { if ((usSI >> 8) == uartio_index) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port A\n"); bRC = smapi_request (0x1403, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request (0x1402, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } } } } /* Check serial port B */ bRC = smapi_request(0x1404, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* bRC == 0 */ if (usBX & 0x0100) { /* serial port B is present */ if (usCX & 1) { /* serial port is enabled */ if ((usSI & 0xFF) == mwave_uart_irq) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n"); bRC = smapi_request(0x1405, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1404, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } else { if ((usSI >> 8) == uartio_index) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1 (TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n"); bRC = smapi_request (0x1405, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request (0x1404, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } } } } /* Check IR port */ bRC = smapi_request(0x1700, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1704, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* bRC == 0 */ if ((usCX & 0xff) != 0xff) { /* IR port not disabled */ if ((usCX & 0xff) == mwave_uart_irq) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n"); bRC = smapi_request(0x1701, 0x0100, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1700, 0, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1705, 0x01ff, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1704, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } else { if ((usSI & 0xff) == uartio_index) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n"); bRC = smapi_request(0x1701, 0x0100, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1700, 0, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1705, 0x01ff, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1704, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } } } } bRC = smapi_request(0x1802, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; if (mwave_3780i_io) { usDI = dspio_index; } if (mwave_3780i_irq) { usSI = (usSI & 0xff00) | mwave_3780i_irq; } bRC = smapi_request(0x1803, 0x0101, usDI, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1804, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; if (mwave_uart_io) { usSI = (usSI & 0x00ff) | (uartio_index << 8); } if (mwave_uart_irq) { usSI = (usSI & 0xff00) | mwave_uart_irq; } bRC = smapi_request(0x1805, 0x0101, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1802, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1804, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* normal exit: */ PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg exit\n"); return 0; exit_conflict: /* Message has already been printed */ return -EIO; exit_smapi_request_error: PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg exit on smapi_request error bRC %x\n", bRC); return bRC; } int smapi_set_DSP_power_state(BOOLEAN bOn) { int bRC = -EIO; unsigned short usAX, usBX, usCX, usDX, usDI, usSI; unsigned short usPowerFunction; PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state entry bOn %x\n", bOn); usPowerFunction = (bOn) ? 1 : 0; bRC = smapi_request(0x4901, 0x0000, 0, usPowerFunction, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state exit bRC %x\n", bRC); return bRC; } #if 0 static int SmapiQuerySystemID(void) { int bRC = -EIO; unsigned short usAX = 0xffff, usBX = 0xffff, usCX = 0xffff, usDX = 0xffff, usDI = 0xffff, usSI = 0xffff; printk("smapi::SmapiQUerySystemID entry\n"); bRC = smapi_request(0x0000, 0, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC == 0) { printk("AX=%x, BX=%x, CX=%x, DX=%x, DI=%x, SI=%x\n", usAX, usBX, usCX, usDX, usDI, usSI); } else { printk("smapi::SmapiQuerySystemID smapi_request error\n"); } return bRC; } #endif /* 0 */ int smapi_init(void) { int retval = -EIO; unsigned short usSmapiID = 0; unsigned long flags; PRINTK_1(TRACE_SMAPI, "smapi::smapi_init entry\n"); spin_lock_irqsave(&rtc_lock, flags); usSmapiID = CMOS_READ(0x7C); usSmapiID |= (CMOS_READ(0x7D) << 8); spin_unlock_irqrestore(&rtc_lock, flags); PRINTK_2(TRACE_SMAPI, "smapi::smapi_init usSmapiID %x\n", usSmapiID); if (usSmapiID == 0x5349) { spin_lock_irqsave(&rtc_lock, flags); g_usSmapiPort = CMOS_READ(0x7E); g_usSmapiPort |= (CMOS_READ(0x7F) << 8); spin_unlock_irqrestore(&rtc_lock, flags); if (g_usSmapiPort == 0) { PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n"); } else { PRINTK_2(TRACE_SMAPI, "smapi::smapi_init, exit TRUE g_usSmapiPort %x\n", g_usSmapiPort); retval = 0; //SmapiQuerySystemID(); } } else { PRINTK_ERROR("smapi::smapi_init, ERROR invalid usSmapiID\n"); retval = -ENXIO; } return retval; }
gpl-2.0
amalappunni/msm8916_jalebi
drivers/input/misc/stk3x1x.c
198
72413
/* * stk3x1x.c - Linux kernel modules for sensortek stk301x, stk321x and stk331x * proximity/ambient light sensor * * Copyright (c) 2013, The Linux Foundation. All Rights Reserved. * Copyright (C) 2012 Lex Hsieh / sensortek <lex_hsieh@sitronix.com.tw> or * <lex_hsieh@sensortek.com.tw> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Linux Foundation chooses to take subject only to the GPLv2 license * terms, and distributes only under these terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/kdev_t.h> #include <linux/fs.h> #include <linux/input.h> #include <linux/sensors.h> #include <linux/workqueue.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/errno.h> #include <linux/wakelock.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/regulator/consumer.h> #ifdef CONFIG_OF #include <linux/of_gpio.h> #endif #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> #endif #include "linux/stk3x1x.h" #define DRIVER_VERSION "3.4.4ts" /* Driver Settings */ #define CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD #ifdef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD #define STK_ALS_CHANGE_THD 20 /* The threshold to trigger ALS interrupt, unit: lux */ #endif /* #ifdef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD */ #define STK_INT_PS_MODE 1 /* 1, 2, or 3 */ #define STK_POLL_PS #define STK_POLL_ALS /* ALS interrupt is valid only when STK_PS_INT_MODE = 1 or 4*/ /* Define Register Map */ #define STK_STATE_REG 0x00 #define STK_PSCTRL_REG 0x01 #define STK_ALSCTRL_REG 0x02 #define STK_LEDCTRL_REG 0x03 #define STK_INT_REG 0x04 #define STK_WAIT_REG 0x05 #define STK_THDH1_PS_REG 0x06 #define STK_THDH2_PS_REG 0x07 #define STK_THDL1_PS_REG 0x08 #define STK_THDL2_PS_REG 0x09 #define STK_THDH1_ALS_REG 0x0A #define STK_THDH2_ALS_REG 0x0B #define STK_THDL1_ALS_REG 0x0C #define STK_THDL2_ALS_REG 0x0D #define STK_FLAG_REG 0x10 #define STK_DATA1_PS_REG 0x11 #define STK_DATA2_PS_REG 0x12 #define STK_DATA1_ALS_REG 0x13 #define STK_DATA2_ALS_REG 0x14 #define STK_DATA1_OFFSET_REG 0x15 #define STK_DATA2_OFFSET_REG 0x16 #define STK_DATA1_IR_REG 0x17 #define STK_DATA2_IR_REG 0x18 #define STK_PDT_ID_REG 0x3E #define STK_RSRVD_REG 0x3F #define STK_SW_RESET_REG 0x80 /* Define state reg */ #define STK_STATE_EN_IRS_SHIFT 7 #define STK_STATE_EN_AK_SHIFT 6 #define STK_STATE_EN_ASO_SHIFT 5 #define STK_STATE_EN_IRO_SHIFT 4 #define STK_STATE_EN_WAIT_SHIFT 2 #define STK_STATE_EN_ALS_SHIFT 1 #define STK_STATE_EN_PS_SHIFT 0 #define STK_STATE_EN_IRS_MASK 0x80 #define STK_STATE_EN_AK_MASK 0x40 #define STK_STATE_EN_ASO_MASK 0x20 #define STK_STATE_EN_IRO_MASK 0x10 #define STK_STATE_EN_WAIT_MASK 0x04 #define STK_STATE_EN_ALS_MASK 0x02 #define STK_STATE_EN_PS_MASK 0x01 /* Define PS ctrl reg */ #define STK_PS_PRS_SHIFT 6 #define STK_PS_GAIN_SHIFT 4 #define STK_PS_IT_SHIFT 0 #define STK_PS_PRS_MASK 0xC0 #define STK_PS_GAIN_MASK 0x30 #define STK_PS_IT_MASK 0x0F /* Define ALS ctrl reg */ #define STK_ALS_PRS_SHIFT 6 #define STK_ALS_GAIN_SHIFT 4 #define STK_ALS_IT_SHIFT 0 #define STK_ALS_PRS_MASK 0xC0 #define STK_ALS_GAIN_MASK 0x30 #define STK_ALS_IT_MASK 0x0F /* Define LED ctrl reg */ #define STK_LED_IRDR_SHIFT 6 #define STK_LED_DT_SHIFT 0 #define STK_LED_IRDR_MASK 0xC0 #define STK_LED_DT_MASK 0x3F /* Define interrupt reg */ #define STK_INT_CTRL_SHIFT 7 #define STK_INT_OUI_SHIFT 4 #define STK_INT_ALS_SHIFT 3 #define STK_INT_PS_SHIFT 0 #define STK_INT_CTRL_MASK 0x80 #define STK_INT_OUI_MASK 0x10 #define STK_INT_ALS_MASK 0x08 #define STK_INT_PS_MASK 0x07 #define STK_INT_ALS 0x08 /* Define flag reg */ #define STK_FLG_ALSDR_SHIFT 7 #define STK_FLG_PSDR_SHIFT 6 #define STK_FLG_ALSINT_SHIFT 5 #define STK_FLG_PSINT_SHIFT 4 #define STK_FLG_OUI_SHIFT 2 #define STK_FLG_IR_RDY_SHIFT 1 #define STK_FLG_NF_SHIFT 0 #define STK_FLG_ALSDR_MASK 0x80 #define STK_FLG_PSDR_MASK 0x40 #define STK_FLG_ALSINT_MASK 0x20 #define STK_FLG_PSINT_MASK 0x10 #define STK_FLG_OUI_MASK 0x04 #define STK_FLG_IR_RDY_MASK 0x02 #define STK_FLG_NF_MASK 0x01 /* misc define */ #define MIN_ALS_POLL_DELAY_NS 110000000 #define DEVICE_NAME "stk_ps" #define ALS_NAME "stk3x1x-ls" #define PS_NAME "proximity" /* POWER SUPPLY VOLTAGE RANGE */ #define STK3X1X_VDD_MIN_UV 2000000 #define STK3X1X_VDD_MAX_UV 3300000 #define STK3X1X_VIO_MIN_UV 1750000 #define STK3X1X_VIO_MAX_UV 1950000 #define STK_FIR_LEN 16 #define MAX_FIR_LEN 32 static struct sensors_classdev sensors_light_cdev = { .name = "stk3x1x-light", .vendor = "Sensortek", .version = 1, .handle = SENSORS_LIGHT_HANDLE, .type = SENSOR_TYPE_LIGHT, .max_range = "6500", .resolution = "0.0625", .sensor_power = "0.09", .min_delay = (MIN_ALS_POLL_DELAY_NS / 1000), /* us */ .fifo_reserved_event_count = 0, .fifo_max_event_count = 0, .enabled = 0, .delay_msec = 200, .sensors_enable = NULL, .sensors_poll_delay = NULL, }; static struct sensors_classdev sensors_proximity_cdev = { .name = "stk3x1x-proximity", .vendor = "Sensortek", .version = 1, .handle = SENSORS_PROXIMITY_HANDLE, .type = SENSOR_TYPE_PROXIMITY, .max_range = "5.0", .resolution = "5.0", .sensor_power = "0.1", .min_delay = 0, .fifo_reserved_event_count = 0, .fifo_max_event_count = 0, .enabled = 0, .delay_msec = 200, .sensors_enable = NULL, .sensors_poll_delay = NULL, }; struct data_filter { u16 raw[MAX_FIR_LEN]; int sum; int number; int idx; }; struct stk3x1x_data { struct i2c_client *client; struct stk3x1x_platform_data *pdata; struct sensors_classdev als_cdev; struct sensors_classdev ps_cdev; #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) int32_t irq; struct work_struct stk_work; struct workqueue_struct *stk_wq; #endif int int_pin; uint8_t wait_reg; #ifdef CONFIG_HAS_EARLYSUSPEND struct early_suspend stk_early_suspend; #endif uint16_t ps_thd_h; uint16_t ps_thd_l; struct mutex io_lock; struct input_dev *ps_input_dev; int32_t ps_distance_last; bool ps_enabled; struct wake_lock ps_wakelock; struct work_struct stk_ps_work; struct workqueue_struct *stk_ps_wq; #ifdef STK_POLL_PS struct wake_lock ps_nosuspend_wl; #endif struct input_dev *als_input_dev; int32_t als_lux_last; uint32_t als_transmittance; bool als_enabled; struct hrtimer als_timer; struct hrtimer ps_timer; ktime_t als_poll_delay; ktime_t ps_poll_delay; #ifdef STK_POLL_ALS struct work_struct stk_als_work; struct workqueue_struct *stk_als_wq; #endif struct regulator *vdd; struct regulator *vio; bool power_enabled; bool use_fir; struct data_filter fir; atomic_t firlength; }; #if( !defined(CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD)) static uint32_t lux_threshold_table[] = { 3, 10, 40, 65, 145, 300, 550, 930, 1250, 1700, }; #define LUX_THD_TABLE_SIZE (sizeof(lux_threshold_table)/sizeof(uint32_t)+1) static uint16_t code_threshold_table[LUX_THD_TABLE_SIZE+1]; #endif static int32_t stk3x1x_enable_ps(struct stk3x1x_data *ps_data, uint8_t enable); static int32_t stk3x1x_enable_als(struct stk3x1x_data *ps_data, uint8_t enable); static int32_t stk3x1x_set_ps_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l); static int32_t stk3x1x_set_ps_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h); static int32_t stk3x1x_set_als_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l); static int32_t stk3x1x_set_als_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h); static int stk3x1x_device_ctl(struct stk3x1x_data *ps_data, bool enable); //static int32_t stk3x1x_set_ps_aoffset(struct stk3x1x_data *ps_data, uint16_t offset); inline uint32_t stk_alscode2lux(struct stk3x1x_data *ps_data, uint32_t alscode) { alscode += ((alscode<<7)+(alscode<<3)+(alscode>>1)); alscode<<=3; alscode/=ps_data->als_transmittance; return alscode; } inline uint32_t stk_lux2alscode(struct stk3x1x_data *ps_data, uint32_t lux) { lux*=ps_data->als_transmittance; lux/=1100; if (unlikely(lux>=(1<<16))) lux = (1<<16) -1; return lux; } #ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD static void stk_init_code_threshold_table(struct stk3x1x_data *ps_data) { uint32_t i,j; uint32_t alscode; code_threshold_table[0] = 0; #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "alscode[0]=%d\n",0); #endif for (i=1,j=0;i<LUX_THD_TABLE_SIZE;i++,j++) { alscode = stk_lux2alscode(ps_data, lux_threshold_table[j]); dev_dbg(&ps_data->client->dev, "alscode[%d]=%d\n", i, alscode); code_threshold_table[i] = (uint16_t)(alscode); } code_threshold_table[i] = 0xffff; dev_dbg(&ps_data->client->dev, "alscode[%d]=%d\n", i, alscode); } static uint32_t stk_get_lux_interval_index(uint16_t alscode) { uint32_t i; for (i=1;i<=LUX_THD_TABLE_SIZE;i++) { if ((alscode>=code_threshold_table[i-1])&&(alscode<code_threshold_table[i])) { return i; } } return LUX_THD_TABLE_SIZE; } #else inline void stk_als_set_new_thd(struct stk3x1x_data *ps_data, uint16_t alscode) { int32_t high_thd,low_thd; high_thd = alscode + stk_lux2alscode(ps_data, STK_ALS_CHANGE_THD); low_thd = alscode - stk_lux2alscode(ps_data, STK_ALS_CHANGE_THD); if (high_thd >= (1<<16)) high_thd = (1<<16) -1; if (low_thd <0) low_thd = 0; stk3x1x_set_als_thd_h(ps_data, (uint16_t)high_thd); stk3x1x_set_als_thd_l(ps_data, (uint16_t)low_thd); } #endif // CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD static int32_t stk3x1x_init_all_reg(struct stk3x1x_data *ps_data, struct stk3x1x_platform_data *plat_data) { int32_t ret; uint8_t w_reg; w_reg = plat_data->state_reg; ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } ps_data->ps_thd_h = plat_data->ps_thd_h; ps_data->ps_thd_l = plat_data->ps_thd_l; w_reg = plat_data->psctrl_reg; ret = i2c_smbus_write_byte_data(ps_data->client, STK_PSCTRL_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } w_reg = plat_data->alsctrl_reg; ret = i2c_smbus_write_byte_data(ps_data->client, STK_ALSCTRL_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } w_reg = plat_data->ledctrl_reg; ret = i2c_smbus_write_byte_data(ps_data->client, STK_LEDCTRL_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } ps_data->wait_reg = plat_data->wait_reg; if(ps_data->wait_reg < 2) { printk(KERN_WARNING "%s: wait_reg should be larger than 2, force to write 2\n", __func__); ps_data->wait_reg = 2; } else if (ps_data->wait_reg > 0xFF) { printk(KERN_WARNING "%s: wait_reg should be less than 0xFF, force to write 0xFF\n", __func__); ps_data->wait_reg = 0xFF; } w_reg = plat_data->wait_reg; ret = i2c_smbus_write_byte_data(ps_data->client, STK_WAIT_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } stk3x1x_set_ps_thd_h(ps_data, ps_data->ps_thd_h); stk3x1x_set_ps_thd_l(ps_data, ps_data->ps_thd_l); w_reg = 0; #ifndef STK_POLL_PS w_reg |= STK_INT_PS_MODE; #else w_reg |= 0x01; #endif #if (!defined(STK_POLL_ALS) && (STK_INT_PS_MODE != 0x02) && (STK_INT_PS_MODE != 0x03)) w_reg |= STK_INT_ALS; #endif ret = i2c_smbus_write_byte_data(ps_data->client, STK_INT_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } ret = i2c_smbus_write_byte_data(ps_data->client, 0x87, 0x60); if (ret < 0) { dev_err(&ps_data->client->dev, "%s: write i2c error\n", __func__); return ret; } return 0; } static int32_t stk3x1x_check_pid(struct stk3x1x_data *ps_data) { int32_t err1, err2; err1 = i2c_smbus_read_byte_data(ps_data->client,STK_PDT_ID_REG); if (err1 < 0) { printk(KERN_ERR "%s: read i2c error, err=%d\n", __func__, err1); return err1; } err2 = i2c_smbus_read_byte_data(ps_data->client,STK_RSRVD_REG); if (err2 < 0) { printk(KERN_ERR "%s: read i2c error, err=%d\n", __func__, err2); return -1; } if(err2 == 0xC0) printk(KERN_INFO "%s: RID=0xC0!!!!!!!!!!!!!\n", __func__); return 0; } static int32_t stk3x1x_software_reset(struct stk3x1x_data *ps_data) { int32_t r; uint8_t w_reg; w_reg = 0x7F; r = i2c_smbus_write_byte_data(ps_data->client,STK_WAIT_REG,w_reg); if (r<0) { printk(KERN_ERR "%s: software reset: write i2c error, ret=%d\n", __func__, r); return r; } r = i2c_smbus_read_byte_data(ps_data->client,STK_WAIT_REG); if (w_reg != r) { printk(KERN_ERR "%s: software reset: read-back value is not the same\n", __func__); return -1; } r = i2c_smbus_write_byte_data(ps_data->client,STK_SW_RESET_REG,0); if (r<0) { printk(KERN_ERR "%s: software reset: read error after reset\n", __func__); return r; } msleep(1); return 0; } static int32_t stk3x1x_set_als_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&thd_l; temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; return i2c_smbus_write_word_data(ps_data->client,STK_THDL1_ALS_REG,thd_l); } static int32_t stk3x1x_set_als_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&thd_h; temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; return i2c_smbus_write_word_data(ps_data->client,STK_THDH1_ALS_REG,thd_h); } static int32_t stk3x1x_set_ps_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&thd_l; temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; ps_data->ps_thd_l = thd_l; return i2c_smbus_write_word_data(ps_data->client,STK_THDL1_PS_REG,thd_l); } static int32_t stk3x1x_set_ps_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&thd_h; temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; ps_data->ps_thd_h = thd_h; return i2c_smbus_write_word_data(ps_data->client,STK_THDH1_PS_REG,thd_h); } /* static int32_t stk3x1x_set_ps_foffset(struct stk3x1x_data *ps_data, uint16_t offset) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&offset; temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; return i2c_smbus_write_word_data(ps_data->client,STK_DATA1_OFFSET_REG,offset); } static int32_t stk3x1x_set_ps_aoffset(struct stk3x1x_data *ps_data, uint16_t offset) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&offset; int ret; uint8_t w_state_reg; uint8_t re_en; ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } re_en = (ret & STK_STATE_EN_AK_MASK) ? 1: 0; if(re_en) { w_state_reg = (uint8_t)(ret & (~STK_STATE_EN_AK_MASK)); ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } msleep(1); } temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; ret = i2c_smbus_write_word_data(ps_data->client,0x0E,offset); if(!re_en) return ret; w_state_reg |= STK_STATE_EN_AK_MASK; ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } return 0; } */ static inline uint32_t stk3x1x_get_ps_reading(struct stk3x1x_data *ps_data) { int32_t word_data, tmp_word_data; tmp_word_data = i2c_smbus_read_word_data(ps_data->client,STK_DATA1_PS_REG); if(tmp_word_data < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data); return tmp_word_data; } word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ; return word_data; } static int32_t stk3x1x_set_flag(struct stk3x1x_data *ps_data, uint8_t org_flag_reg, uint8_t clr) { uint8_t w_flag; w_flag = org_flag_reg | (STK_FLG_ALSINT_MASK | STK_FLG_PSINT_MASK | STK_FLG_OUI_MASK | STK_FLG_IR_RDY_MASK); w_flag &= (~clr); //printk(KERN_INFO "%s: org_flag_reg=0x%x, w_flag = 0x%x\n", __func__, org_flag_reg, w_flag); return i2c_smbus_write_byte_data(ps_data->client,STK_FLAG_REG, w_flag); } static int32_t stk3x1x_get_flag(struct stk3x1x_data *ps_data) { return i2c_smbus_read_byte_data(ps_data->client,STK_FLAG_REG); } static int32_t stk3x1x_enable_ps(struct stk3x1x_data *ps_data, uint8_t enable) { int32_t ret; uint8_t w_state_reg; uint8_t curr_ps_enable; curr_ps_enable = ps_data->ps_enabled?1:0; if(curr_ps_enable == enable) return 0; if (enable) { ret = stk3x1x_device_ctl(ps_data, enable); if (ret) return ret; } ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG); if (ret < 0) { printk(KERN_ERR "%s: write i2c error, ret=%d\n", __func__, ret); return ret; } w_state_reg = ret; w_state_reg &= ~(STK_STATE_EN_PS_MASK | STK_STATE_EN_WAIT_MASK | 0x60); if(enable) { w_state_reg |= STK_STATE_EN_PS_MASK; if(!(ps_data->als_enabled)) w_state_reg |= STK_STATE_EN_WAIT_MASK; } ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error, ret=%d\n", __func__, ret); return ret; } if(enable) { #ifdef STK_POLL_PS hrtimer_start(&ps_data->ps_timer, ps_data->ps_poll_delay, HRTIMER_MODE_REL); ps_data->ps_distance_last = -1; #endif ps_data->ps_enabled = true; #ifndef STK_POLL_PS #ifndef STK_POLL_ALS if(!(ps_data->als_enabled)) #endif /* #ifndef STK_POLL_ALS */ enable_irq(ps_data->irq); msleep(1); ret = stk3x1x_get_flag(ps_data); if (ret < 0) { printk(KERN_ERR "%s: read i2c error, ret=%d\n", __func__, ret); return ret; } near_far_state = ret & STK_FLG_NF_MASK; ps_data->ps_distance_last = near_far_state; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state); input_sync(ps_data->ps_input_dev); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); reading = stk3x1x_get_ps_reading(ps_data); dev_dbg(&ps_data->client->dev, "%s: ps input event=%d, ps code = %d\n", __func__, near_far_state, reading); #endif /* #ifndef STK_POLL_PS */ } else { #ifdef STK_POLL_PS hrtimer_cancel(&ps_data->ps_timer); #else #ifndef STK_POLL_ALS if(!(ps_data->als_enabled)) #endif disable_irq(ps_data->irq); #endif ps_data->ps_enabled = false; } if (!enable) { ret = stk3x1x_device_ctl(ps_data, enable); if (ret) return ret; } return ret; } static int32_t stk3x1x_enable_als(struct stk3x1x_data *ps_data, uint8_t enable) { int32_t ret; uint8_t w_state_reg; uint8_t curr_als_enable = (ps_data->als_enabled)?1:0; if(curr_als_enable == enable) return 0; if (enable) { ret = stk3x1x_device_ctl(ps_data, enable); if (ret) return ret; } #ifndef STK_POLL_ALS if (enable) { stk3x1x_set_als_thd_h(ps_data, 0x0000); stk3x1x_set_als_thd_l(ps_data, 0xFFFF); } #endif ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } w_state_reg = (uint8_t)(ret & (~(STK_STATE_EN_ALS_MASK | STK_STATE_EN_WAIT_MASK))); if(enable) w_state_reg |= STK_STATE_EN_ALS_MASK; else if (ps_data->ps_enabled) w_state_reg |= STK_STATE_EN_WAIT_MASK; ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } if (enable) { ps_data->als_enabled = true; #ifdef STK_POLL_ALS hrtimer_start(&ps_data->als_timer, ps_data->als_poll_delay, HRTIMER_MODE_REL); #else #ifndef STK_POLL_PS if(!(ps_data->ps_enabled)) #endif enable_irq(ps_data->irq); #endif } else { ps_data->als_enabled = false; #ifdef STK_POLL_ALS hrtimer_cancel(&ps_data->als_timer); #else #ifndef STK_POLL_PS if(!(ps_data->ps_enabled)) #endif disable_irq(ps_data->irq); #endif } if (!enable) { ret = stk3x1x_device_ctl(ps_data, enable); if (ret) return ret; } return ret; } static inline int32_t stk3x1x_filter_reading(struct stk3x1x_data *ps_data, int32_t word_data) { int index; int firlen = atomic_read(&ps_data->firlength); if (ps_data->fir.number < firlen) { ps_data->fir.raw[ps_data->fir.number] = word_data; ps_data->fir.sum += word_data; ps_data->fir.number++; ps_data->fir.idx++; } else { index = ps_data->fir.idx % firlen; ps_data->fir.sum -= ps_data->fir.raw[index]; ps_data->fir.raw[index] = word_data; ps_data->fir.sum += word_data; ps_data->fir.idx++; word_data = ps_data->fir.sum/firlen; } return word_data; } static inline int32_t stk3x1x_get_als_reading(struct stk3x1x_data *ps_data) { int32_t word_data, tmp_word_data; tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_ALS_REG); if(tmp_word_data < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data); return tmp_word_data; } word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ; if (ps_data->use_fir) word_data = stk3x1x_filter_reading(ps_data, word_data); return word_data; } static int32_t stk3x1x_get_ir_reading(struct stk3x1x_data *ps_data) { int32_t word_data, tmp_word_data; int32_t ret; uint8_t w_reg, retry = 0; if(ps_data->ps_enabled) { stk3x1x_enable_ps(ps_data, 0); ps_data->ps_enabled = true; } ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } w_reg = (uint8_t)(ret & (~STK_STATE_EN_IRS_MASK)); w_reg |= STK_STATE_EN_IRS_MASK; ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } msleep(100); do { msleep(50); ret = stk3x1x_get_flag(ps_data); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } retry++; }while(retry < 5 && ((ret&STK_FLG_IR_RDY_MASK) == 0)); if(retry == 5) { printk(KERN_ERR "%s: ir data is not ready for 300ms\n", __func__); return -EINVAL; } ret = stk3x1x_get_flag(ps_data); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } ret = stk3x1x_set_flag(ps_data, ret, STK_FLG_IR_RDY_MASK); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_IR_REG); if(tmp_word_data < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data); return tmp_word_data; } word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ; if(ps_data->ps_enabled) stk3x1x_enable_ps(ps_data, 1); return word_data; } static ssize_t stk_als_code_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t reading; reading = stk3x1x_get_als_reading(ps_data); return scnprintf(buf, PAGE_SIZE, "%d\n", reading); } static int stk_als_enable_set(struct sensors_classdev *sensors_cdev, unsigned int enabled) { struct stk3x1x_data *als_data = container_of(sensors_cdev, struct stk3x1x_data, als_cdev); int err; mutex_lock(&als_data->io_lock); err = stk3x1x_enable_als(als_data, enabled); mutex_unlock(&als_data->io_lock); if (err < 0) return err; return 0; } static ssize_t stk_als_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t enable, ret; mutex_lock(&ps_data->io_lock); enable = (ps_data->als_enabled)?1:0; mutex_unlock(&ps_data->io_lock); ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG); ret = (ret & STK_STATE_EN_ALS_MASK)?1:0; if(enable != ret) printk(KERN_ERR "%s: driver and sensor mismatch! driver_enable=0x%x, sensor_enable=%x\n", __func__, enable, ret); return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } static ssize_t stk_als_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint8_t en; if (sysfs_streq(buf, "1")) en = 1; else if (sysfs_streq(buf, "0")) en = 0; else { printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf); return -EINVAL; } dev_dbg(dev, "%s: Enable ALS : %d\n", __func__, en); mutex_lock(&ps_data->io_lock); stk3x1x_enable_als(ps_data, en); mutex_unlock(&ps_data->io_lock); return size; } static ssize_t stk_als_lux_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t als_reading; uint32_t als_lux; als_reading = stk3x1x_get_als_reading(ps_data); mutex_lock(&ps_data->io_lock); als_lux = stk_alscode2lux(ps_data, als_reading); mutex_unlock(&ps_data->io_lock); return scnprintf(buf, PAGE_SIZE, "%d lux\n", als_lux); } static ssize_t stk_als_lux_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 16, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); ps_data->als_lux_last = value; input_report_abs(ps_data->als_input_dev, ABS_MISC, value); input_sync(ps_data->als_input_dev); mutex_unlock(&ps_data->io_lock); dev_dbg(dev, "%s: als input event %ld lux\n", __func__, value); return size; } static ssize_t stk_als_transmittance_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t transmittance; mutex_lock(&ps_data->io_lock); transmittance = ps_data->als_transmittance; mutex_unlock(&ps_data->io_lock); return scnprintf(buf, PAGE_SIZE, "%d\n", transmittance); } static ssize_t stk_als_transmittance_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); ps_data->als_transmittance = value; mutex_unlock(&ps_data->io_lock); return size; } static ssize_t stk_als_delay_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%u\n", (u32)ktime_to_ms(ps_data->als_poll_delay)); } static inline void stk_als_delay_store_fir(struct stk3x1x_data *ps_data) { ps_data->fir.number = 0; ps_data->fir.idx = 0; ps_data->fir.sum = 0; } static int stk_als_poll_delay_set(struct sensors_classdev *sensors_cdev, unsigned int delay_msec) { struct stk3x1x_data *als_data = container_of(sensors_cdev, struct stk3x1x_data, als_cdev); uint64_t value = 0; value = delay_msec * 1000000; if (value < MIN_ALS_POLL_DELAY_NS) value = MIN_ALS_POLL_DELAY_NS; mutex_lock(&als_data->io_lock); if (value != ktime_to_ns(als_data->als_poll_delay)) als_data->als_poll_delay = ns_to_ktime(value); if (als_data->use_fir) stk_als_delay_store_fir(als_data); mutex_unlock(&als_data->io_lock); return 0; } static ssize_t stk_als_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { uint64_t value = 0; int ret; struct stk3x1x_data *als_data = dev_get_drvdata(dev); ret = kstrtoull(buf, 10, &value); if(ret < 0) { dev_err(dev, "%s:kstrtoull failed, ret=0x%x\n", __func__, ret); return ret; } #ifdef STK_DEBUG_PRINTF dev_dbg(dev, "%s: set als poll delay=%lld\n", __func__, value); #endif ret = stk_als_poll_delay_set(&als_data->als_cdev, value); if (ret < 0) return ret; return size; } static ssize_t stk_als_ir_code_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t reading; reading = stk3x1x_get_ir_reading(ps_data); return scnprintf(buf, PAGE_SIZE, "%d\n", reading); } static ssize_t stk_als_firlen_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int len = atomic_read(&ps_data->firlength); dev_dbg(dev, "%s: len = %2d, idx = %2d\n", __func__, len, ps_data->fir.idx); dev_dbg(dev, "%s: sum = %5d, ave = %5d\n", __func__, ps_data->fir.sum, ps_data->fir.sum/len); return scnprintf(buf, PAGE_SIZE, "%d\n", len); } static ssize_t stk_als_firlen_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { uint64_t value = 0; int ret; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); ret = kstrtoull(buf, 10, &value); if (ret < 0) { dev_err(dev, "%s:strict_strtoull failed, ret=0x%x\n", __func__, ret); return ret; } if (value > MAX_FIR_LEN) { dev_err(dev, "%s: firlen exceed maximum filter length\n", __func__); } else if (value < 1) { atomic_set(&ps_data->firlength, 1); memset(&ps_data->fir, 0x00, sizeof(ps_data->fir)); } else { atomic_set(&ps_data->firlength, value); memset(&ps_data->fir, 0x00, sizeof(ps_data->fir)); } return size; } static ssize_t stk_als_fir_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", ps_data->use_fir); } static ssize_t stk_als_fir_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { uint64_t value = 0; int ret; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); ret = kstrtoull(buf, 10, &value); if (ret < 0) { dev_err(dev, "%s:strict_strtoull failed, ret=0x%x\n", __func__, ret); return ret; } if (value) { ps_data->use_fir = true; memset(&ps_data->fir, 0x00, sizeof(ps_data->fir)); } else { ps_data->use_fir = false; } return size; } static ssize_t stk_ps_code_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint32_t reading; reading = stk3x1x_get_ps_reading(ps_data); return scnprintf(buf, PAGE_SIZE, "%d\n", reading); } static int stk_ps_enable_set(struct sensors_classdev *sensors_cdev, unsigned int enabled) { struct stk3x1x_data *ps_data = container_of(sensors_cdev, struct stk3x1x_data, ps_cdev); int err; mutex_lock(&ps_data->io_lock); err = stk3x1x_enable_ps(ps_data, enabled); mutex_unlock(&ps_data->io_lock); if (err < 0) return err; return 0; } static ssize_t stk_ps_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t enable, ret; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); mutex_lock(&ps_data->io_lock); enable = (ps_data->ps_enabled)?1:0; mutex_unlock(&ps_data->io_lock); ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG); ret = (ret & STK_STATE_EN_PS_MASK)?1:0; if(enable != ret) printk(KERN_ERR "%s: driver and sensor mismatch! driver_enable=0x%x, sensor_enable=%x\n", __func__, enable, ret); return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } static ssize_t stk_ps_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint8_t en; if (sysfs_streq(buf, "1")) en = 1; else if (sysfs_streq(buf, "0")) en = 0; else { printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf); return -EINVAL; } dev_dbg(dev, "%s: Enable PS : %d\n", __func__, en); mutex_lock(&ps_data->io_lock); stk3x1x_enable_ps(ps_data, en); mutex_unlock(&ps_data->io_lock); return size; } static ssize_t stk_ps_enable_aso_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t ret; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG); ret = (ret & STK_STATE_EN_ASO_MASK)?1:0; return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } static ssize_t stk_ps_enable_aso_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint8_t en; int32_t ret; uint8_t w_state_reg; if (sysfs_streq(buf, "1")) en = 1; else if (sysfs_streq(buf, "0")) en = 0; else { printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf); return -EINVAL; } dev_dbg(dev, "%s: Enable PS ASO : %d\n", __func__, en); ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } w_state_reg = (uint8_t)(ret & (~STK_STATE_EN_ASO_MASK)); if(en) w_state_reg |= STK_STATE_EN_ASO_MASK; ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } return size; } static ssize_t stk_ps_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t word_data, tmp_word_data; tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_OFFSET_REG); if(tmp_word_data < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data); return tmp_word_data; } word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ; return scnprintf(buf, PAGE_SIZE, "%d\n", word_data); } static ssize_t stk_ps_offset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; uint16_t offset; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } if(value > 65535) { printk(KERN_ERR "%s: invalid value, offset=%ld\n", __func__, value); return -EINVAL; } offset = (uint16_t) ((value&0x00FF) << 8) | ((value&0xFF00) >>8); ret = i2c_smbus_write_word_data(ps_data->client,STK_DATA1_OFFSET_REG,offset); if(ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } return size; } static ssize_t stk_ps_distance_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t dist=1, ret; mutex_lock(&ps_data->io_lock); ret = stk3x1x_get_flag(ps_data); if(ret < 0) { printk(KERN_ERR "%s: stk3x1x_get_flag failed, ret=0x%x\n", __func__, ret); return ret; } dist = (ret & STK_FLG_NF_MASK)?1:0; ps_data->ps_distance_last = dist; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, dist); input_sync(ps_data->ps_input_dev); mutex_unlock(&ps_data->io_lock); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); dev_dbg(dev, "%s: ps input event %d cm\n", __func__, dist); return scnprintf(buf, PAGE_SIZE, "%d\n", dist); } static ssize_t stk_ps_distance_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); ps_data->ps_distance_last = value; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, value); input_sync(ps_data->ps_input_dev); mutex_unlock(&ps_data->io_lock); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); dev_dbg(dev, "%s: ps input event %ld cm\n", __func__, value); return size; } static ssize_t stk_ps_code_thd_l_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t ps_thd_l1_reg, ps_thd_l2_reg; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); mutex_lock(&ps_data->io_lock); ps_thd_l1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL1_PS_REG); if(ps_thd_l1_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_l1_reg); return -EINVAL; } ps_thd_l2_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL2_PS_REG); if(ps_thd_l2_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_l2_reg); return -EINVAL; } mutex_unlock(&ps_data->io_lock); ps_thd_l1_reg = ps_thd_l1_reg<<8 | ps_thd_l2_reg; return scnprintf(buf, PAGE_SIZE, "%d\n", ps_thd_l1_reg); } static ssize_t stk_ps_code_thd_l_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); stk3x1x_set_ps_thd_l(ps_data, value); mutex_unlock(&ps_data->io_lock); return size; } static ssize_t stk_ps_code_thd_h_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t ps_thd_h1_reg, ps_thd_h2_reg; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); mutex_lock(&ps_data->io_lock); ps_thd_h1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH1_PS_REG); if(ps_thd_h1_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_h1_reg); return -EINVAL; } ps_thd_h2_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH2_PS_REG); if(ps_thd_h2_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_h2_reg); return -EINVAL; } mutex_unlock(&ps_data->io_lock); ps_thd_h1_reg = ps_thd_h1_reg<<8 | ps_thd_h2_reg; return scnprintf(buf, PAGE_SIZE, "%d\n", ps_thd_h1_reg); } static ssize_t stk_ps_code_thd_h_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); stk3x1x_set_ps_thd_h(ps_data, value); mutex_unlock(&ps_data->io_lock); return size; } #if 0 static ssize_t stk_als_lux_thd_l_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t als_thd_l0_reg,als_thd_l1_reg; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint32_t als_lux; mutex_lock(&ps_data->io_lock); als_thd_l0_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL1_ALS_REG); als_thd_l1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL2_ALS_REG); if(als_thd_l0_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_l0_reg); return -EINVAL; } if(als_thd_l1_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_l1_reg); return -EINVAL; } als_thd_l0_reg|=(als_thd_l1_reg<<8); als_lux = stk_alscode2lux(ps_data, als_thd_l0_reg); mutex_unlock(&ps_data->io_lock); return scnprintf(buf, PAGE_SIZE, "%d\n", als_lux); } static ssize_t stk_als_lux_thd_l_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); value = stk_lux2alscode(ps_data, value); stk3x1x_set_als_thd_l(ps_data, value); mutex_unlock(&ps_data->io_lock); return size; } static ssize_t stk_als_lux_thd_h_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t als_thd_h0_reg,als_thd_h1_reg; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint32_t als_lux; mutex_lock(&ps_data->io_lock); als_thd_h0_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH1_ALS_REG); als_thd_h1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH2_ALS_REG); if(als_thd_h0_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_h0_reg); return -EINVAL; } if(als_thd_h1_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_h1_reg); return -EINVAL; } als_thd_h0_reg|=(als_thd_h1_reg<<8); als_lux = stk_alscode2lux(ps_data, als_thd_h0_reg); mutex_unlock(&ps_data->io_lock); return scnprintf(buf, PAGE_SIZE, "%d\n", als_lux); } static ssize_t stk_als_lux_thd_h_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = strict_strtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); value = stk_lux2alscode(ps_data, value); stk3x1x_set_als_thd_h(ps_data, value); mutex_unlock(&ps_data->io_lock); return size; } #endif static ssize_t stk_all_reg_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t ps_reg[27]; uint8_t cnt; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); mutex_lock(&ps_data->io_lock); for(cnt=0;cnt<25;cnt++) { ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, (cnt)); if(ps_reg[cnt] < 0) { mutex_unlock(&ps_data->io_lock); printk(KERN_ERR "stk_all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]); return -EINVAL; } else { dev_dbg(dev, "reg[0x%2X]=0x%2X\n", cnt, ps_reg[cnt]); } } ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, STK_PDT_ID_REG); if(ps_reg[cnt] < 0) { mutex_unlock(&ps_data->io_lock); printk( KERN_ERR "all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]); return -EINVAL; } dev_dbg(dev, "reg[0x%x]=0x%2X\n", STK_PDT_ID_REG, ps_reg[cnt]); cnt++; ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, STK_RSRVD_REG); if(ps_reg[cnt] < 0) { mutex_unlock(&ps_data->io_lock); printk( KERN_ERR "all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]); return -EINVAL; } dev_dbg(dev, "reg[0x%x]=0x%2X\n", STK_RSRVD_REG, ps_reg[cnt]); mutex_unlock(&ps_data->io_lock); return scnprintf(buf, PAGE_SIZE, "%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X\n", ps_reg[0], ps_reg[1], ps_reg[2], ps_reg[3], ps_reg[4], ps_reg[5], ps_reg[6], ps_reg[7], ps_reg[8], ps_reg[9], ps_reg[10], ps_reg[11], ps_reg[12], ps_reg[13], ps_reg[14], ps_reg[15], ps_reg[16], ps_reg[17], ps_reg[18], ps_reg[19], ps_reg[20], ps_reg[21], ps_reg[22], ps_reg[23], ps_reg[24], ps_reg[25], ps_reg[26]); } static ssize_t stk_recv_show(struct device *dev, struct device_attribute *attr, char *buf) { return 0; } static ssize_t stk_recv_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned long value = 0; int ret; int32_t recv_data; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); ret = kstrtoul(buf, 16, &value); if (ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } recv_data = i2c_smbus_read_byte_data(ps_data->client,value); printk("%s: reg 0x%x=0x%x\n", __func__, (int)value, recv_data); return size; } static ssize_t stk_send_show(struct device *dev, struct device_attribute *attr, char *buf) { return 0; } static ssize_t stk_send_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int addr, cmd; u8 addr_u8, cmd_u8; int32_t ret, i; char *token[10]; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); for (i = 0; i < 2; i++) token[i] = strsep((char **)&buf, " "); ret = kstrtoul(token[0], 16, (unsigned long *)&(addr)); if (ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } ret = kstrtoul(token[1], 16, (unsigned long *)&(cmd)); if (ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } dev_dbg(dev, "%s: write reg 0x%x=0x%x\n", __func__, addr, cmd); addr_u8 = (u8) addr; cmd_u8 = (u8) cmd; //mutex_lock(&ps_data->io_lock); ret = i2c_smbus_write_byte_data(ps_data->client,addr_u8,cmd_u8); //mutex_unlock(&ps_data->io_lock); if (0 != ret) { printk(KERN_ERR "%s: i2c_smbus_write_byte_data fail\n", __func__); return ret; } return size; } static struct device_attribute als_enable_attribute = __ATTR(enable,0664,stk_als_enable_show,stk_als_enable_store); static struct device_attribute als_lux_attribute = __ATTR(lux,0664,stk_als_lux_show,stk_als_lux_store); static struct device_attribute als_code_attribute = __ATTR(code, 0444, stk_als_code_show, NULL); static struct device_attribute als_transmittance_attribute = __ATTR(transmittance,0664,stk_als_transmittance_show,stk_als_transmittance_store); static struct device_attribute als_poll_delay_attribute = __ATTR(poll_delay, 0664, stk_als_delay_show, stk_als_delay_store); static struct device_attribute als_ir_code_attribute = __ATTR(ircode,0444,stk_als_ir_code_show,NULL); static struct device_attribute als_firlen_attribute = __ATTR(firlen, 0664, stk_als_firlen_show, stk_als_firlen_store); static struct device_attribute als_fir_enable_attribute = __ATTR(fir_enable, 0664, stk_als_fir_enable_show, stk_als_fir_enable_store); static struct attribute *stk_als_attrs [] = { &als_enable_attribute.attr, &als_lux_attribute.attr, &als_code_attribute.attr, &als_transmittance_attribute.attr, &als_poll_delay_attribute.attr, &als_ir_code_attribute.attr, &als_firlen_attribute.attr, &als_fir_enable_attribute.attr, NULL }; static struct attribute_group stk_als_attribute_group = { .attrs = stk_als_attrs, }; static struct device_attribute ps_enable_attribute = __ATTR(enable,0664,stk_ps_enable_show,stk_ps_enable_store); static struct device_attribute ps_enable_aso_attribute = __ATTR(enableaso,0664,stk_ps_enable_aso_show,stk_ps_enable_aso_store); static struct device_attribute ps_distance_attribute = __ATTR(distance,0664,stk_ps_distance_show, stk_ps_distance_store); static struct device_attribute ps_offset_attribute = __ATTR(offset,0664,stk_ps_offset_show, stk_ps_offset_store); static struct device_attribute ps_code_attribute = __ATTR(code, 0444, stk_ps_code_show, NULL); static struct device_attribute ps_code_thd_l_attribute = __ATTR(codethdl,0664,stk_ps_code_thd_l_show,stk_ps_code_thd_l_store); static struct device_attribute ps_code_thd_h_attribute = __ATTR(codethdh,0664,stk_ps_code_thd_h_show,stk_ps_code_thd_h_store); static struct device_attribute recv_attribute = __ATTR(recv,0664,stk_recv_show,stk_recv_store); static struct device_attribute send_attribute = __ATTR(send,0664,stk_send_show, stk_send_store); static struct device_attribute all_reg_attribute = __ATTR(allreg, 0444, stk_all_reg_show, NULL); static struct attribute *stk_ps_attrs [] = { &ps_enable_attribute.attr, &ps_enable_aso_attribute.attr, &ps_distance_attribute.attr, &ps_offset_attribute.attr, &ps_code_attribute.attr, &ps_code_thd_l_attribute.attr, &ps_code_thd_h_attribute.attr, &recv_attribute.attr, &send_attribute.attr, &all_reg_attribute.attr, NULL }; static struct attribute_group stk_ps_attribute_group = { .attrs = stk_ps_attrs, }; #ifdef STK_POLL_ALS static enum hrtimer_restart stk_als_timer_func(struct hrtimer *timer) { struct stk3x1x_data *ps_data = container_of(timer, struct stk3x1x_data, als_timer); queue_work(ps_data->stk_als_wq, &ps_data->stk_als_work); hrtimer_forward_now(&ps_data->als_timer, ps_data->als_poll_delay); return HRTIMER_RESTART; } static void stk_als_work_func(struct work_struct *work) { struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_als_work); int32_t reading; mutex_lock(&ps_data->io_lock); reading = stk3x1x_get_als_reading(ps_data); if(reading < 0) return; ps_data->als_lux_last = stk_alscode2lux(ps_data, reading); input_report_abs(ps_data->als_input_dev, ABS_MISC, ps_data->als_lux_last); input_sync(ps_data->als_input_dev); mutex_unlock(&ps_data->io_lock); } #endif static enum hrtimer_restart stk_ps_timer_func(struct hrtimer *timer) { struct stk3x1x_data *ps_data = container_of(timer, struct stk3x1x_data, ps_timer); queue_work(ps_data->stk_ps_wq, &ps_data->stk_ps_work); #ifdef STK_POLL_PS hrtimer_forward_now(&ps_data->ps_timer, ps_data->ps_poll_delay); return HRTIMER_RESTART; #else hrtimer_cancel(&ps_data->ps_timer); return HRTIMER_NORESTART; #endif } static void stk_ps_work_func(struct work_struct *work) { struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_ps_work); uint32_t reading; int32_t near_far_state; uint8_t org_flag_reg; int32_t ret; uint8_t disable_flag = 0; mutex_lock(&ps_data->io_lock); org_flag_reg = stk3x1x_get_flag(ps_data); if(org_flag_reg < 0) { printk(KERN_ERR "%s: get_status_reg fail, ret=%d", __func__, org_flag_reg); goto err_i2c_rw; } near_far_state = (org_flag_reg & STK_FLG_NF_MASK)?1:0; reading = stk3x1x_get_ps_reading(ps_data); if(ps_data->ps_distance_last != near_far_state) { ps_data->ps_distance_last = near_far_state; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state); input_sync(ps_data->ps_input_dev); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "%s: ps input event %d cm, ps code = %d\n",__func__, near_far_state, reading); #endif } ret = stk3x1x_set_flag(ps_data, org_flag_reg, disable_flag); if(ret < 0) { printk(KERN_ERR "%s:stk3x1x_set_flag fail, ret=%d\n", __func__, ret); goto err_i2c_rw; } mutex_unlock(&ps_data->io_lock); return; err_i2c_rw: mutex_unlock(&ps_data->io_lock); msleep(30); return; } #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) static void stk_work_func(struct work_struct *work) { uint32_t reading; #if ((STK_INT_PS_MODE != 0x03) && (STK_INT_PS_MODE != 0x02)) int32_t ret; uint8_t disable_flag = 0; uint8_t org_flag_reg; #endif /* #if ((STK_INT_PS_MODE != 0x03) && (STK_INT_PS_MODE != 0x02)) */ #ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD uint32_t nLuxIndex; #endif struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_work); int32_t near_far_state; mutex_lock(&ps_data->io_lock); #if (STK_INT_PS_MODE == 0x03) near_far_state = gpio_get_value(ps_data->int_pin); #elif (STK_INT_PS_MODE == 0x02) near_far_state = !(gpio_get_value(ps_data->int_pin)); #endif #if ((STK_INT_PS_MODE == 0x03) || (STK_INT_PS_MODE == 0x02)) ps_data->ps_distance_last = near_far_state; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state); input_sync(ps_data->ps_input_dev); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); reading = stk3x1x_get_ps_reading(ps_data); #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "%s: ps input event %d cm, ps code = %d\n",__func__, near_far_state, reading); #endif #else /* mode 0x01 or 0x04 */ org_flag_reg = stk3x1x_get_flag(ps_data); if(org_flag_reg < 0) { printk(KERN_ERR "%s: get_status_reg fail, org_flag_reg=%d", __func__, org_flag_reg); goto err_i2c_rw; } if (org_flag_reg & STK_FLG_ALSINT_MASK) { disable_flag |= STK_FLG_ALSINT_MASK; reading = stk3x1x_get_als_reading(ps_data); if(reading < 0) { printk(KERN_ERR "%s: stk3x1x_get_als_reading fail, ret=%d", __func__, reading); goto err_i2c_rw; } #ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD nLuxIndex = stk_get_lux_interval_index(reading); stk3x1x_set_als_thd_h(ps_data, code_threshold_table[nLuxIndex]); stk3x1x_set_als_thd_l(ps_data, code_threshold_table[nLuxIndex-1]); #else stk_als_set_new_thd(ps_data, reading); #endif //CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD ps_data->als_lux_last = stk_alscode2lux(ps_data, reading); input_report_abs(ps_data->als_input_dev, ABS_MISC, ps_data->als_lux_last); input_sync(ps_data->als_input_dev); #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "%s: als input event %d lux\n",__func__, ps_data->als_lux_last); #endif } if (org_flag_reg & STK_FLG_PSINT_MASK) { disable_flag |= STK_FLG_PSINT_MASK; near_far_state = (org_flag_reg & STK_FLG_NF_MASK)?1:0; ps_data->ps_distance_last = near_far_state; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state); input_sync(ps_data->ps_input_dev); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); reading = stk3x1x_get_ps_reading(ps_data); #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "%s: ps input event=%d, ps code = %d\n",__func__, near_far_state, reading); #endif } ret = stk3x1x_set_flag(ps_data, org_flag_reg, disable_flag); if(ret < 0) { printk(KERN_ERR "%s:reset_int_flag fail, ret=%d\n", __func__, ret); goto err_i2c_rw; } #endif msleep(1); enable_irq(ps_data->irq); mutex_unlock(&ps_data->io_lock); return; err_i2c_rw: mutex_unlock(&ps_data->io_lock); msleep(30); enable_irq(ps_data->irq); return; } #endif #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) static irqreturn_t stk_oss_irq_handler(int irq, void *data) { struct stk3x1x_data *pData = data; disable_irq_nosync(irq); queue_work(pData->stk_wq,&pData->stk_work); return IRQ_HANDLED; } #endif /* #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) */ static inline void stk3x1x_init_fir(struct stk3x1x_data *ps_data) { memset(&ps_data->fir, 0x00, sizeof(ps_data->fir)); atomic_set(&ps_data->firlength, STK_FIR_LEN); } static int32_t stk3x1x_init_all_setting(struct i2c_client *client, struct stk3x1x_platform_data *plat_data) { int32_t ret; struct stk3x1x_data *ps_data = i2c_get_clientdata(client); ret = stk3x1x_software_reset(ps_data); if(ret < 0) return ret; stk3x1x_check_pid(ps_data); if(ret < 0) return ret; ret = stk3x1x_init_all_reg(ps_data, plat_data); if(ret < 0) return ret; #ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD stk_init_code_threshold_table(ps_data); #endif if (plat_data->use_fir) stk3x1x_init_fir(ps_data); return 0; } #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) static int stk3x1x_setup_irq(struct i2c_client *client) { int irq, err = -EIO; struct stk3x1x_data *ps_data = i2c_get_clientdata(client); irq = gpio_to_irq(ps_data->int_pin); #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "%s: int pin #=%d, irq=%d\n",__func__, ps_data->int_pin, irq); #endif if (irq <= 0) { printk(KERN_ERR "irq number is not specified, irq # = %d, int pin=%d\n",irq, ps_data->int_pin); return irq; } ps_data->irq = irq; err = gpio_request(ps_data->int_pin,"stk-int"); if(err < 0) { printk(KERN_ERR "%s: gpio_request, err=%d", __func__, err); return err; } err = gpio_direction_input(ps_data->int_pin); if(err < 0) { printk(KERN_ERR "%s: gpio_direction_input, err=%d", __func__, err); return err; } #if ((STK_INT_PS_MODE == 0x03) || (STK_INT_PS_MODE == 0x02)) err = request_any_context_irq(irq, stk_oss_irq_handler, IRQF_TRIGGER_FALLING|IRQF_TRIGGER_RISING, DEVICE_NAME, ps_data); #else err = request_any_context_irq(irq, stk_oss_irq_handler, IRQF_TRIGGER_LOW, DEVICE_NAME, ps_data); #endif if (err < 0) { printk(KERN_WARNING "%s: request_any_context_irq(%d) failed for (%d)\n", __func__, irq, err); goto err_request_any_context_irq; } disable_irq(irq); return 0; err_request_any_context_irq: gpio_free(ps_data->int_pin); return err; } #endif #ifdef CONFIG_HAS_EARLYSUSPEND static void stk3x1x_early_suspend(struct early_suspend *h) { struct stk3x1x_data *ps_data = container_of(h, struct stk3x1x_data, stk_early_suspend); #ifndef STK_POLL_PS int err; #endif mutex_lock(&ps_data->io_lock); if(ps_data->als_enabled) { stk3x1x_enable_als(ps_data, 0); ps_data->als_enabled = true; } if(ps_data->ps_enabled) { #ifdef STK_POLL_PS wake_lock(&ps_data->ps_nosuspend_wl); #else err = enable_irq_wake(ps_data->irq); if (err) printk(KERN_WARNING "%s: set_irq_wake(%d) failed, err=(%d)\n", __func__, ps_data->irq, err); #endif } mutex_unlock(&ps_data->io_lock); return; } static void stk3x1x_late_resume(struct early_suspend *h) { struct stk3x1x_data *ps_data = container_of(h, struct stk3x1x_data, stk_early_suspend); #ifndef STK_POLL_PS int err; #endif mutex_lock(&ps_data->io_lock); if(ps_data->als_enabled) stk3x1x_enable_als(ps_data, 1); if(ps_data->ps_enabled) { #ifdef STK_POLL_PS wake_lock(&ps_data->ps_nosuspend_wl); #else err = disable_irq_wake(ps_data->irq); if (err) printk(KERN_WARNING "%s: disable_irq_wake(%d) failed, err=(%d)\n", __func__, ps_data->irq, err); #endif } mutex_unlock(&ps_data->io_lock); return; } #endif //#ifdef CONFIG_HAS_EARLYSUSPEND static int stk3x1x_power_ctl(struct stk3x1x_data *data, bool on) { int ret = 0; if (!on && data->power_enabled) { ret = regulator_disable(data->vdd); if (ret) { dev_err(&data->client->dev, "Regulator vdd disable failed ret=%d\n", ret); return ret; } ret = regulator_disable(data->vio); if (ret) { dev_err(&data->client->dev, "Regulator vio disable failed ret=%d\n", ret); ret = regulator_enable(data->vdd); if (ret) { dev_err(&data->client->dev, "Regulator vdd enable failed ret=%d\n", ret); } return ret; } data->power_enabled = on; dev_dbg(&data->client->dev, "stk3x1x_power_ctl on=%d\n", on); } else if (on && !data->power_enabled) { ret = regulator_enable(data->vdd); if (ret) { dev_err(&data->client->dev, "Regulator vdd enable failed ret=%d\n", ret); return ret; } ret = regulator_enable(data->vio); if (ret) { dev_err(&data->client->dev, "Regulator vio enable failed ret=%d\n", ret); regulator_disable(data->vdd); return ret; } data->power_enabled = on; dev_dbg(&data->client->dev, "stk3x1x_power_ctl on=%d\n", on); } else { dev_warn(&data->client->dev, "Power on=%d. enabled=%d\n", on, data->power_enabled); } return ret; } static int stk3x1x_power_init(struct stk3x1x_data *data, bool on) { int ret; if (!on) { if (regulator_count_voltages(data->vdd) > 0) regulator_set_voltage(data->vdd, 0, STK3X1X_VDD_MAX_UV); regulator_put(data->vdd); if (regulator_count_voltages(data->vio) > 0) regulator_set_voltage(data->vio, 0, STK3X1X_VIO_MAX_UV); regulator_put(data->vio); } else { data->vdd = regulator_get(&data->client->dev, "vdd"); if (IS_ERR(data->vdd)) { ret = PTR_ERR(data->vdd); dev_err(&data->client->dev, "Regulator get failed vdd ret=%d\n", ret); return ret; } if (regulator_count_voltages(data->vdd) > 0) { ret = regulator_set_voltage(data->vdd, STK3X1X_VDD_MIN_UV, STK3X1X_VDD_MAX_UV); if (ret) { dev_err(&data->client->dev, "Regulator set failed vdd ret=%d\n", ret); goto reg_vdd_put; } } data->vio = regulator_get(&data->client->dev, "vio"); if (IS_ERR(data->vio)) { ret = PTR_ERR(data->vio); dev_err(&data->client->dev, "Regulator get failed vio ret=%d\n", ret); goto reg_vdd_set; } if (regulator_count_voltages(data->vio) > 0) { ret = regulator_set_voltage(data->vio, STK3X1X_VIO_MIN_UV, STK3X1X_VIO_MAX_UV); if (ret) { dev_err(&data->client->dev, "Regulator set failed vio ret=%d\n", ret); goto reg_vio_put; } } } return 0; reg_vio_put: regulator_put(data->vio); reg_vdd_set: if (regulator_count_voltages(data->vdd) > 0) regulator_set_voltage(data->vdd, 0, STK3X1X_VDD_MAX_UV); reg_vdd_put: regulator_put(data->vdd); return ret; } static int stk3x1x_device_ctl(struct stk3x1x_data *ps_data, bool enable) { int ret; struct device *dev = &ps_data->client->dev; if (enable && !ps_data->power_enabled) { ret = stk3x1x_power_ctl(ps_data, true); if (ret) { dev_err(dev, "Failed to enable device power\n"); goto err_exit; } ret = stk3x1x_init_all_setting(ps_data->client, ps_data->pdata); if (ret < 0) { stk3x1x_power_ctl(ps_data, false); dev_err(dev, "Failed to re-init device setting\n"); goto err_exit; } } else if (!enable && ps_data->power_enabled) { if (!ps_data->als_enabled && !ps_data->ps_enabled) { ret = stk3x1x_power_ctl(ps_data, false); if (ret) { dev_err(dev, "Failed to disable device power\n"); goto err_exit; } } else { dev_dbg(dev, "device control: als_enabled=%d, ps_enabled=%d\n", ps_data->als_enabled, ps_data->ps_enabled); } } else { dev_dbg(dev, "device control: enable=%d, power_enabled=%d\n", enable, ps_data->power_enabled); } return 0; err_exit: return ret; } #ifdef CONFIG_OF static int stk3x1x_parse_dt(struct device *dev, struct stk3x1x_platform_data *pdata) { int rc; struct device_node *np = dev->of_node; u32 temp_val; pdata->int_pin = of_get_named_gpio_flags(np, "stk,irq-gpio", 0, &pdata->int_flags); if (pdata->int_pin < 0) { dev_err(dev, "Unable to read irq-gpio\n"); return pdata->int_pin; } rc = of_property_read_u32(np, "stk,transmittance", &temp_val); if (!rc) pdata->transmittance = temp_val; else { dev_err(dev, "Unable to read transmittance\n"); return rc; } rc = of_property_read_u32(np, "stk,state-reg", &temp_val); if (!rc) pdata->state_reg = temp_val; else { dev_err(dev, "Unable to read state-reg\n"); return rc; } rc = of_property_read_u32(np, "stk,psctrl-reg", &temp_val); if (!rc) pdata->psctrl_reg = (u8)temp_val; else { dev_err(dev, "Unable to read psctrl-reg\n"); return rc; } rc = of_property_read_u32(np, "stk,alsctrl-reg", &temp_val); if (!rc) pdata->alsctrl_reg = (u8)temp_val; else { dev_err(dev, "Unable to read alsctrl-reg\n"); return rc; } rc = of_property_read_u32(np, "stk,ledctrl-reg", &temp_val); if (!rc) pdata->ledctrl_reg = (u8)temp_val; else { dev_err(dev, "Unable to read ledctrl-reg\n"); return rc; } rc = of_property_read_u32(np, "stk,wait-reg", &temp_val); if (!rc) pdata->wait_reg = (u8)temp_val; else { dev_err(dev, "Unable to read wait-reg\n"); return rc; } rc = of_property_read_u32(np, "stk,ps-thdh", &temp_val); if (!rc) pdata->ps_thd_h = (u16)temp_val; else { dev_err(dev, "Unable to read ps-thdh\n"); return rc; } rc = of_property_read_u32(np, "stk,ps-thdl", &temp_val); if (!rc) pdata->ps_thd_l = (u16)temp_val; else { dev_err(dev, "Unable to read ps-thdl\n"); return rc; } pdata->use_fir = of_property_read_bool(np, "stk,use-fir"); return 0; } #else static int stk3x1x_parse_dt(struct device *dev, struct stk3x1x_platform_data *pdata) { return -ENODEV; } #endif /* !CONFIG_OF */ static int stk3x1x_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err = -ENODEV; struct stk3x1x_data *ps_data; struct stk3x1x_platform_data *plat_data; printk(KERN_INFO "%s: driver version = %s\n", __func__, DRIVER_VERSION); if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { printk(KERN_ERR "%s: No Support for I2C_FUNC_SMBUS_BYTE_DATA\n", __func__); return -ENODEV; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { printk(KERN_ERR "%s: No Support for I2C_FUNC_SMBUS_WORD_DATA\n", __func__); return -ENODEV; } ps_data = kzalloc(sizeof(struct stk3x1x_data),GFP_KERNEL); if(!ps_data) { printk(KERN_ERR "%s: failed to allocate stk3x1x_data\n", __func__); return -ENOMEM; } ps_data->client = client; i2c_set_clientdata(client,ps_data); mutex_init(&ps_data->io_lock); wake_lock_init(&ps_data->ps_wakelock,WAKE_LOCK_SUSPEND, "stk_input_wakelock"); #ifdef STK_POLL_PS wake_lock_init(&ps_data->ps_nosuspend_wl,WAKE_LOCK_SUSPEND, "stk_nosuspend_wakelock"); #endif if (client->dev.of_node) { plat_data = devm_kzalloc(&client->dev, sizeof(struct stk3x1x_platform_data), GFP_KERNEL); if (!plat_data) { dev_err(&client->dev, "Failed to allocate memory\n"); return -ENOMEM; } err = stk3x1x_parse_dt(&client->dev, plat_data); dev_err(&client->dev, "%s: stk3x1x_parse_dt ret=%d\n", __func__, err); if (err) return err; } else plat_data = client->dev.platform_data; if (!plat_data) { dev_err(&client->dev, "%s: no stk3x1x platform data!\n", __func__); goto err_als_input_allocate; } ps_data->als_transmittance = plat_data->transmittance; ps_data->int_pin = plat_data->int_pin; ps_data->use_fir = plat_data->use_fir; ps_data->pdata = plat_data; if (ps_data->als_transmittance == 0) { dev_err(&client->dev, "%s: Please set als_transmittance\n", __func__); goto err_als_input_allocate; } ps_data->als_input_dev = devm_input_allocate_device(&client->dev); if (ps_data->als_input_dev==NULL) { printk(KERN_ERR "%s: could not allocate als device\n", __func__); err = -ENOMEM; goto err_als_input_allocate; } ps_data->ps_input_dev = devm_input_allocate_device(&client->dev); if (ps_data->ps_input_dev==NULL) { printk(KERN_ERR "%s: could not allocate ps device\n", __func__); err = -ENOMEM; goto err_als_input_allocate; } ps_data->als_input_dev->name = ALS_NAME; ps_data->ps_input_dev->name = PS_NAME; set_bit(EV_ABS, ps_data->als_input_dev->evbit); set_bit(EV_ABS, ps_data->ps_input_dev->evbit); input_set_abs_params(ps_data->als_input_dev, ABS_MISC, 0, stk_alscode2lux(ps_data, (1<<16)-1), 0, 0); input_set_abs_params(ps_data->ps_input_dev, ABS_DISTANCE, 0,1, 0, 0); err = input_register_device(ps_data->als_input_dev); if (err<0) { printk(KERN_ERR "%s: can not register als input device\n", __func__); goto err_als_input_allocate; } err = input_register_device(ps_data->ps_input_dev); if (err<0) { printk(KERN_ERR "%s: can not register ps input device\n", __func__); goto err_als_input_allocate; } err = sysfs_create_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group); if (err < 0) { printk(KERN_ERR "%s:could not create sysfs group for als\n", __func__); goto err_als_input_allocate; } err = sysfs_create_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group); if (err < 0) { printk(KERN_ERR "%s:could not create sysfs group for ps\n", __func__); goto err_ps_sysfs_create_group; } input_set_drvdata(ps_data->als_input_dev, ps_data); input_set_drvdata(ps_data->ps_input_dev, ps_data); #ifdef STK_POLL_ALS ps_data->stk_als_wq = create_singlethread_workqueue("stk_als_wq"); INIT_WORK(&ps_data->stk_als_work, stk_als_work_func); hrtimer_init(&ps_data->als_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ps_data->als_poll_delay = ns_to_ktime(110 * NSEC_PER_MSEC); ps_data->als_timer.function = stk_als_timer_func; #endif ps_data->stk_ps_wq = create_singlethread_workqueue("stk_ps_wq"); INIT_WORK(&ps_data->stk_ps_work, stk_ps_work_func); hrtimer_init(&ps_data->ps_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ps_data->ps_poll_delay = ns_to_ktime(110 * NSEC_PER_MSEC); ps_data->ps_timer.function = stk_ps_timer_func; #if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS)) ps_data->stk_wq = create_singlethread_workqueue("stk_wq"); INIT_WORK(&ps_data->stk_work, stk_work_func); err = stk3x1x_setup_irq(client); if(err < 0) goto err_stk3x1x_setup_irq; #endif err = stk3x1x_power_init(ps_data, true); if (err) goto err_power_init; err = stk3x1x_power_ctl(ps_data, true); if (err) goto err_power_on; ps_data->als_enabled = false; ps_data->ps_enabled = false; #ifdef CONFIG_HAS_EARLYSUSPEND ps_data->stk_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; ps_data->stk_early_suspend.suspend = stk3x1x_early_suspend; ps_data->stk_early_suspend.resume = stk3x1x_late_resume; register_early_suspend(&ps_data->stk_early_suspend); #endif /* make sure everything is ok before registering the class device */ ps_data->als_cdev = sensors_light_cdev; ps_data->als_cdev.sensors_enable = stk_als_enable_set; ps_data->als_cdev.sensors_poll_delay = stk_als_poll_delay_set; err = sensors_classdev_register(&ps_data->als_input_dev->dev, &ps_data->als_cdev); if (err) goto err_power_on; ps_data->ps_cdev = sensors_proximity_cdev; ps_data->ps_cdev.sensors_enable = stk_ps_enable_set; err = sensors_classdev_register(&ps_data->ps_input_dev->dev, &ps_data->ps_cdev); if (err) goto err_class_sysfs; /* enable device power only when it is enabled */ err = stk3x1x_power_ctl(ps_data, false); if (err) goto err_init_all_setting; dev_dbg(&client->dev, "%s: probe successfully", __func__); return 0; err_init_all_setting: stk3x1x_power_ctl(ps_data, false); sensors_classdev_unregister(&ps_data->ps_cdev); err_class_sysfs: sensors_classdev_unregister(&ps_data->als_cdev); err_power_on: stk3x1x_power_init(ps_data, false); err_power_init: #ifndef STK_POLL_PS free_irq(ps_data->irq, ps_data); gpio_free(plat_data->int_pin); #endif #if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS)) err_stk3x1x_setup_irq: #endif #ifdef STK_POLL_ALS hrtimer_try_to_cancel(&ps_data->als_timer); destroy_workqueue(ps_data->stk_als_wq); #endif destroy_workqueue(ps_data->stk_ps_wq); #if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS)) destroy_workqueue(ps_data->stk_wq); #endif sysfs_remove_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group); err_ps_sysfs_create_group: sysfs_remove_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group); err_als_input_allocate: #ifdef STK_POLL_PS wake_lock_destroy(&ps_data->ps_nosuspend_wl); #endif wake_lock_destroy(&ps_data->ps_wakelock); mutex_destroy(&ps_data->io_lock); kfree(ps_data); return err; } static int stk3x1x_remove(struct i2c_client *client) { struct stk3x1x_data *ps_data = i2c_get_clientdata(client); #ifndef STK_POLL_PS free_irq(ps_data->irq, ps_data); gpio_free(ps_data->int_pin); #endif #ifdef STK_POLL_ALS hrtimer_try_to_cancel(&ps_data->als_timer); destroy_workqueue(ps_data->stk_als_wq); #endif destroy_workqueue(ps_data->stk_ps_wq); #if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS)) destroy_workqueue(ps_data->stk_wq); #endif sysfs_remove_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group); sysfs_remove_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group); #ifdef STK_POLL_PS wake_lock_destroy(&ps_data->ps_nosuspend_wl); #endif wake_lock_destroy(&ps_data->ps_wakelock); mutex_destroy(&ps_data->io_lock); kfree(ps_data); return 0; } static const struct i2c_device_id stk_ps_id[] = { { "stk_ps", 0}, {} }; MODULE_DEVICE_TABLE(i2c, stk_ps_id); static struct of_device_id stk_match_table[] = { { .compatible = "stk,stk3x1x", }, { }, }; static struct i2c_driver stk_ps_driver = { .driver = { .name = DEVICE_NAME, .owner = THIS_MODULE, .of_match_table = stk_match_table, }, .probe = stk3x1x_probe, .remove = stk3x1x_remove, .id_table = stk_ps_id, }; static int __init stk3x1x_init(void) { int ret; ret = i2c_add_driver(&stk_ps_driver); if (ret) return ret; return 0; } static void __exit stk3x1x_exit(void) { i2c_del_driver(&stk_ps_driver); } module_init(stk3x1x_init); module_exit(stk3x1x_exit); MODULE_AUTHOR("Lex Hsieh <lex_hsieh@sitronix.com.tw>"); MODULE_DESCRIPTION("Sensortek stk3x1x Proximity Sensor driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION);
gpl-2.0
xobs/u-boot-novena-spl
fs/yaffs2/yaffs_mtdif2.c
198
5636
/* * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. * * Copyright (C) 2002-2007 Aleph One Ltd. * for Toby Churchill Ltd and Brightstar Engineering * * Created by Charles Manning <charles@aleph1.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* mtd interface for YAFFS2 */ /* XXX U-BOOT XXX */ #include <common.h> #include "asm/errno.h" const char *yaffs_mtdif2_c_version = "$Id: yaffs_mtdif2.c,v 1.17 2007/02/14 01:09:06 wookey Exp $"; #include "yportenv.h" #include "yaffs_mtdif2.h" #include "linux/mtd/mtd.h" #include "linux/types.h" #include "linux/time.h" #include "yaffs_packedtags2.h" int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND, const __u8 * data, const yaffs_ExtendedTags * tags) { struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice); #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)) struct mtd_oob_ops ops; #else size_t dummy; #endif int retval = 0; loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk; yaffs_PackedTags2 pt; T(YAFFS_TRACE_MTD, (TSTR ("nandmtd2_WriteChunkWithTagsToNAND chunk %d data %p tags %p" TENDSTR), chunkInNAND, data, tags)); #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)) if (tags) yaffs_PackTags2(&pt, tags); else BUG(); /* both tags and data should always be present */ if (data) { ops.mode = MTD_OOB_AUTO; ops.ooblen = sizeof(pt); ops.len = dev->nDataBytesPerChunk; ops.ooboffs = 0; ops.datbuf = (__u8 *)data; ops.oobbuf = (void *)&pt; retval = mtd->write_oob(mtd, addr, &ops); } else BUG(); /* both tags and data should always be present */ #else if (tags) { yaffs_PackTags2(&pt, tags); } if (data && tags) { if (dev->useNANDECC) retval = mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk, &dummy, data, (__u8 *) & pt, NULL); else retval = mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk, &dummy, data, (__u8 *) & pt, NULL); } else { if (data) retval = mtd->write(mtd, addr, dev->nDataBytesPerChunk, &dummy, data); if (tags) retval = mtd->write_oob(mtd, addr, mtd->oobsize, &dummy, (__u8 *) & pt); } #endif if (retval == 0) return YAFFS_OK; else return YAFFS_FAIL; } int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND, __u8 * data, yaffs_ExtendedTags * tags) { struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice); #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)) struct mtd_oob_ops ops; #endif size_t dummy; int retval = 0; loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk; yaffs_PackedTags2 pt; T(YAFFS_TRACE_MTD, (TSTR ("nandmtd2_ReadChunkWithTagsFromNAND chunk %d data %p tags %p" TENDSTR), chunkInNAND, data, tags)); #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)) if (data && !tags) retval = mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy, data); else if (tags) { ops.mode = MTD_OOB_AUTO; ops.ooblen = sizeof(pt); ops.len = data ? dev->nDataBytesPerChunk : sizeof(pt); ops.ooboffs = 0; ops.datbuf = data; ops.oobbuf = dev->spareBuffer; retval = mtd->read_oob(mtd, addr, &ops); } #else if (data && tags) { if (dev->useNANDECC) { retval = mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk, &dummy, data, dev->spareBuffer, NULL); } else { retval = mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk, &dummy, data, dev->spareBuffer, NULL); } } else { if (data) retval = mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy, data); if (tags) retval = mtd->read_oob(mtd, addr, mtd->oobsize, &dummy, dev->spareBuffer); } #endif memcpy(&pt, dev->spareBuffer, sizeof(pt)); if (tags) yaffs_UnpackTags2(tags, &pt); if(tags && retval == -EBADMSG && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR) tags->eccResult = YAFFS_ECC_RESULT_UNFIXED; if (retval == 0) return YAFFS_OK; else return YAFFS_FAIL; } int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo) { struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice); int retval; T(YAFFS_TRACE_MTD, (TSTR("nandmtd2_MarkNANDBlockBad %d" TENDSTR), blockNo)); retval = mtd->block_markbad(mtd, blockNo * dev->nChunksPerBlock * dev->nDataBytesPerChunk); if (retval == 0) return YAFFS_OK; else return YAFFS_FAIL; } int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo, yaffs_BlockState * state, int *sequenceNumber) { struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice); int retval; T(YAFFS_TRACE_MTD, (TSTR("nandmtd2_QueryNANDBlock %d" TENDSTR), blockNo)); retval = mtd->block_isbad(mtd, blockNo * dev->nChunksPerBlock * dev->nDataBytesPerChunk); if (retval) { T(YAFFS_TRACE_MTD, (TSTR("block is bad" TENDSTR))); *state = YAFFS_BLOCK_STATE_DEAD; *sequenceNumber = 0; } else { yaffs_ExtendedTags t; nandmtd2_ReadChunkWithTagsFromNAND(dev, blockNo * dev->nChunksPerBlock, NULL, &t); if (t.chunkUsed) { *sequenceNumber = t.sequenceNumber; *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING; } else { *sequenceNumber = 0; *state = YAFFS_BLOCK_STATE_EMPTY; } } T(YAFFS_TRACE_MTD, (TSTR("block is bad seq %d state %d" TENDSTR), *sequenceNumber, *state)); if (retval == 0) return YAFFS_OK; else return YAFFS_FAIL; }
gpl-2.0
jomeister15/ICS-SGH-I727-kernel
arch/arm/mm/vcm.c
454
33427
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/vcm_mm.h> #include <linux/vcm.h> #include <linux/vcm_alloc.h> #include <linux/vcm_types.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <asm/page.h> #include <asm/sizes.h> #include <linux/iommu.h> /* alloc_vm_area */ #include <linux/pfn.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <asm/cacheflush.h> #include <asm/mach/map.h> #define ONE_TO_ONE_CHK 1 #define vcm_err(a, ...) \ pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__) static unsigned int smmu_map_sizes[4] = {SZ_16M, SZ_1M, SZ_64K, SZ_4K}; static phys_addr_t *bootmem_cont; static int cont_sz; static struct vcm *cont_vcm_id; static struct phys_chunk *cont_phys_chunk; DEFINE_SPINLOCK(vcmlock); /* Leaving this in for now to keep compatibility of the API. */ /* This will disappear. */ phys_addr_t vcm_get_dev_addr(struct res *res) { if (!res) { vcm_err("NULL RES"); return -EINVAL; } return res->dev_addr; } static int vcm_no_res(struct vcm *vcm) { if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } return list_empty(&vcm->res_head); fail: return -EINVAL; } static int vcm_no_assoc(struct vcm *vcm) { if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } return list_empty(&vcm->assoc_head); fail: return -EINVAL; } static int vcm_all_activated(struct vcm *vcm) { struct avcm *avcm; if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } list_for_each_entry(avcm, &vcm->assoc_head, assoc_elm) if (!avcm->is_active) return 0; return 1; fail: return -EINVAL; } static void vcm_destroy_common(struct vcm *vcm) { if (!vcm) { vcm_err("NULL vcm\n"); return; } memset(vcm, 0, sizeof(*vcm)); kfree(vcm); } static struct vcm *vcm_create_common(void) { struct vcm *vcm = 0; vcm = kzalloc(sizeof(*vcm), GFP_KERNEL); if (!vcm) { vcm_err("kzalloc(%i, GFP_KERNEL) ret 0\n", sizeof(*vcm)); goto fail; } INIT_LIST_HEAD(&vcm->res_head); INIT_LIST_HEAD(&vcm->assoc_head); return vcm; fail: return NULL; } static int vcm_create_pool(struct vcm *vcm, unsigned long start_addr, size_t len) { int ret = 0; if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } vcm->start_addr = start_addr; vcm->len = len; vcm->pool = gen_pool_create(PAGE_SHIFT, -1); if (!vcm->pool) { vcm_err("gen_pool_create(%x, -1) ret 0\n", PAGE_SHIFT); ret = -EINVAL; goto fail; } ret = gen_pool_add(vcm->pool, start_addr, len, -1); if (ret) { vcm_err("gen_pool_add(%p, %p, %i, -1) ret %i\n", vcm->pool, (void *) start_addr, len, ret); goto fail; } vcm->domain = iommu_domain_alloc(); if (!vcm->domain) { vcm_err("Could not allocate domain\n"); ret = -ENOMEM; goto fail; } fail: if (ret && vcm->pool) gen_pool_destroy(vcm->pool); return ret; } static struct vcm *vcm_create_flagged(int flag, unsigned long start_addr, size_t len) { int ret = 0; struct vcm *vcm = 0; vcm = vcm_create_common(); if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } /* special one-to-one mapping case */ if ((flag & ONE_TO_ONE_CHK) && bootmem_cont && start_addr == (size_t) bootmem_cont && len == cont_sz) { vcm->type = VCM_ONE_TO_ONE; } else { ret = vcm_create_pool(vcm, start_addr, len); vcm->type = VCM_DEVICE; } if (ret) { vcm_err("vcm_create_pool(%p, %p, %i) ret %i\n", vcm, (void *) start_addr, len, ret); goto fail2; } return vcm; fail2: vcm_destroy_common(vcm); fail: return NULL; } struct vcm *vcm_create(unsigned long start_addr, size_t len) { unsigned long flags; struct vcm *vcm; spin_lock_irqsave(&vcmlock, flags); vcm = vcm_create_flagged(ONE_TO_ONE_CHK, start_addr, len); spin_unlock_irqrestore(&vcmlock, flags); return vcm; } static int ext_vcm_id_valid(size_t ext_vcm_id) { return ((ext_vcm_id == VCM_PREBUILT_KERNEL) || (ext_vcm_id == VCM_PREBUILT_USER)); } struct vcm *vcm_create_from_prebuilt(size_t ext_vcm_id) { unsigned long flags; struct vcm *vcm = 0; spin_lock_irqsave(&vcmlock, flags); if (!ext_vcm_id_valid(ext_vcm_id)) { vcm_err("ext_vcm_id_valid(%i) ret 0\n", ext_vcm_id); goto fail; } vcm = vcm_create_common(); if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } if (ext_vcm_id == VCM_PREBUILT_KERNEL) vcm->type = VCM_EXT_KERNEL; else if (ext_vcm_id == VCM_PREBUILT_USER) vcm->type = VCM_EXT_USER; else { vcm_err("UNREACHABLE ext_vcm_id is illegal\n"); goto fail_free; } /* TODO: set kernel and userspace start_addr and len, if this * makes sense */ spin_unlock_irqrestore(&vcmlock, flags); return vcm; fail_free: vcm_destroy_common(vcm); fail: spin_unlock_irqrestore(&vcmlock, flags); return NULL; } struct vcm *vcm_clone(struct vcm *vcm) { return 0; } /* No lock needed, vcm->start_addr is never updated after creation */ size_t vcm_get_start_addr(struct vcm *vcm) { if (!vcm) { vcm_err("NULL vcm\n"); return 1; } return vcm->start_addr; } /* No lock needed, vcm->len is never updated after creation */ size_t vcm_get_len(struct vcm *vcm) { if (!vcm) { vcm_err("NULL vcm\n"); return 0; } return vcm->len; } static int vcm_free_common_rule(struct vcm *vcm) { int ret; if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } ret = vcm_no_res(vcm); if (!ret) { vcm_err("vcm_no_res(%p) ret 0\n", vcm); goto fail_busy; } if (ret == -EINVAL) { vcm_err("vcm_no_res(%p) ret -EINVAL\n", vcm); goto fail; } ret = vcm_no_assoc(vcm); if (!ret) { vcm_err("vcm_no_assoc(%p) ret 0\n", vcm); goto fail_busy; } if (ret == -EINVAL) { vcm_err("vcm_no_assoc(%p) ret -EINVAL\n", vcm); goto fail; } return 0; fail_busy: return -EBUSY; fail: return -EINVAL; } static int vcm_free_pool_rule(struct vcm *vcm) { if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } /* A vcm always has a valid pool, don't free the vcm because what we got is probably invalid. */ if (!vcm->pool) { vcm_err("NULL vcm->pool\n"); goto fail; } return 0; fail: return -EINVAL; } static void vcm_free_common(struct vcm *vcm) { memset(vcm, 0, sizeof(*vcm)); kfree(vcm); } static int vcm_free_pool(struct vcm *vcm) { if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } gen_pool_destroy(vcm->pool); return 0; fail: return -EINVAL; } static int __vcm_free(struct vcm *vcm) { int ret; if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } ret = vcm_free_common_rule(vcm); if (ret != 0) { vcm_err("vcm_free_common_rule(%p) ret %i\n", vcm, ret); goto fail; } if (vcm->type == VCM_DEVICE) { ret = vcm_free_pool_rule(vcm); if (ret != 0) { vcm_err("vcm_free_pool_rule(%p) ret %i\n", (void *) vcm, ret); goto fail; } if (vcm->domain) iommu_domain_free(vcm->domain); vcm->domain = NULL; ret = vcm_free_pool(vcm); if (ret != 0) { vcm_err("vcm_free_pool(%p) ret %i", (void *) vcm, ret); goto fail; } } vcm_free_common(vcm); return 0; fail: return -EINVAL; } int vcm_free(struct vcm *vcm) { unsigned long flags; int ret; spin_lock_irqsave(&vcmlock, flags); ret = __vcm_free(vcm); spin_unlock_irqrestore(&vcmlock, flags); return ret; } static struct res *__vcm_reserve(struct vcm *vcm, size_t len, u32 attr) { struct res *res = NULL; int align_attr = 0, i = 0; if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } if (len == 0) { vcm_err("len is 0\n"); goto fail; } res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) { vcm_err("kzalloc(%i, GFP_KERNEL) ret 0", sizeof(*res)); goto fail; } align_attr = (attr >> VCM_ALIGN_SHIFT) & VCM_ALIGN_MASK; if (align_attr >= 32) { vcm_err("Invalid alignment attribute: %d\n", align_attr); goto fail2; } INIT_LIST_HEAD(&res->res_elm); res->vcm = vcm; res->len = len; res->attr = attr; res->alignment_req = smmu_map_sizes[ARRAY_SIZE(smmu_map_sizes) - 1]; if (align_attr == 0) { for (i = 0; i < ARRAY_SIZE(smmu_map_sizes); i++) if (len / smmu_map_sizes[i]) { res->alignment_req = smmu_map_sizes[i]; break; } } else res->alignment_req = 1 << align_attr; res->aligned_len = res->alignment_req + len; switch (vcm->type) { case VCM_DEVICE: /* should always be not zero */ if (!vcm->pool) { vcm_err("NULL vcm->pool\n"); goto fail2; } res->ptr = gen_pool_alloc(vcm->pool, res->aligned_len); if (!res->ptr) { vcm_err("gen_pool_alloc(%p, %i) ret 0\n", vcm->pool, res->aligned_len); goto fail2; } /* Calculate alignment... this will all change anyway */ res->dev_addr = res->ptr + (res->alignment_req - (res->ptr & (res->alignment_req - 1))); break; case VCM_EXT_KERNEL: res->vm_area = alloc_vm_area(res->aligned_len); res->mapped = 0; /* be explicit */ if (!res->vm_area) { vcm_err("NULL res->vm_area\n"); goto fail2; } res->dev_addr = (size_t) res->vm_area->addr + (res->alignment_req - ((size_t) res->vm_area->addr & (res->alignment_req - 1))); break; case VCM_ONE_TO_ONE: break; default: vcm_err("%i is an invalid vcm->type\n", vcm->type); goto fail2; } list_add_tail(&res->res_elm, &vcm->res_head); return res; fail2: kfree(res); fail: return 0; } struct res *vcm_reserve(struct vcm *vcm, size_t len, u32 attr) { unsigned long flags; struct res *res; spin_lock_irqsave(&vcmlock, flags); res = __vcm_reserve(vcm, len, attr); spin_unlock_irqrestore(&vcmlock, flags); return res; } struct res *vcm_reserve_at(enum memtarget_t memtarget, struct vcm *vcm, size_t len, u32 attr) { return 0; } static int __vcm_unreserve(struct res *res) { struct vcm *vcm; if (!res) { vcm_err("NULL res\n"); goto fail; } if (!res->vcm) { vcm_err("NULL res->vcm\n"); goto fail; } vcm = res->vcm; if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } switch (vcm->type) { case VCM_DEVICE: if (!res->vcm->pool) { vcm_err("NULL (res->vcm))->pool\n"); goto fail; } /* res->ptr could be zero, this isn't an error */ gen_pool_free(res->vcm->pool, res->ptr, res->aligned_len); break; case VCM_EXT_KERNEL: if (res->mapped) { vcm_err("res->mapped is true\n"); goto fail; } /* This may take a little explaining. * In the kernel vunmap will free res->vm_area * so if we've called it then we shouldn't call * free_vm_area(). If we've called it we set * res->vm_area to 0. */ if (res->vm_area) { free_vm_area(res->vm_area); res->vm_area = 0; } break; case VCM_ONE_TO_ONE: break; default: vcm_err("%i is an invalid vcm->type\n", vcm->type); goto fail; } list_del(&res->res_elm); /* be extra careful by clearing the memory before freeing it */ memset(res, 0, sizeof(*res)); kfree(res); return 0; fail: return -EINVAL; } int vcm_unreserve(struct res *res) { unsigned long flags; int ret; spin_lock_irqsave(&vcmlock, flags); ret = __vcm_unreserve(res); spin_unlock_irqrestore(&vcmlock, flags); return ret; } /* No lock needed, res->len is never updated after creation */ size_t vcm_get_res_len(struct res *res) { if (!res) { vcm_err("res is 0\n"); return 0; } return res->len; } int vcm_set_res_attr(struct res *res, u32 attr) { return 0; } u32 vcm_get_res_attr(struct res *res) { return 0; } size_t vcm_get_num_res(struct vcm *vcm) { return 0; } struct res *vcm_get_next_res(struct vcm *vcm, struct res *res) { return 0; } size_t vcm_res_copy(struct res *to, size_t to_off, struct res *from, size_t from_off, size_t len) { return 0; } size_t vcm_get_min_page_size(void) { return PAGE_SIZE; } static int vcm_to_smmu_attr(u32 attr) { int smmu_attr = 0; switch (attr & VCM_CACHE_POLICY) { case VCM_NOTCACHED: smmu_attr = VCM_DEV_ATTR_NONCACHED; break; case VCM_WB_WA: smmu_attr = VCM_DEV_ATTR_CACHED_WB_WA; smmu_attr |= VCM_DEV_ATTR_SH; break; case VCM_WB_NWA: smmu_attr = VCM_DEV_ATTR_CACHED_WB_NWA; smmu_attr |= VCM_DEV_ATTR_SH; break; case VCM_WT: smmu_attr = VCM_DEV_ATTR_CACHED_WT; smmu_attr |= VCM_DEV_ATTR_SH; break; default: return -EINVAL; } return smmu_attr; } static int vcm_process_chunk(struct iommu_domain *domain, phys_addr_t pa, unsigned long va, size_t len, u32 attr, int map) { int ret, i, map_order; unsigned long map_len = smmu_map_sizes[ARRAY_SIZE(smmu_map_sizes) - 1]; for (i = 0; i < ARRAY_SIZE(smmu_map_sizes); i++) { if (IS_ALIGNED(va, smmu_map_sizes[i]) && len >= smmu_map_sizes[i]) { map_len = smmu_map_sizes[i]; break; } } #ifdef VCM_PERF_DEBUG if (va & (len - 1)) pr_warning("Warning! Suboptimal VCM mapping alignment " "va = %p, len = %p. Expect TLB performance " "degradation.\n", (void *) va, (void *) len); #endif map_order = get_order(map_len); while (len) { if (va & (SZ_4K - 1)) { vcm_err("Tried to map w/ align < 4k! va = %08lx\n", va); goto fail; } if (map_len > len) { vcm_err("map_len = %lu, len = %d, trying to overmap\n", map_len, len); goto fail; } if (map) ret = iommu_map(domain, va, pa, map_order, attr); else ret = iommu_unmap(domain, va, map_order); if (ret) { vcm_err("iommu_map/unmap(%p, %p, %p, 0x%x, 0x%x) ret %i" "map = %d", (void *) domain, (void *) pa, (void *) va, (int) map_len, attr, ret, map); goto fail; } va += map_len; pa += map_len; len -= map_len; } return 0; fail: return -EINVAL; } /* TBD if you vcm_back again what happens? */ int vcm_back(struct res *res, struct physmem *physmem) { unsigned long flags; struct vcm *vcm; struct phys_chunk *chunk; size_t va = 0; int ret; int attr; spin_lock_irqsave(&vcmlock, flags); if (!res) { vcm_err("NULL res\n"); goto fail; } vcm = res->vcm; if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } switch (vcm->type) { case VCM_DEVICE: case VCM_EXT_KERNEL: /* hack part 1 */ attr = vcm_to_smmu_attr(res->attr); if (attr == -1) { vcm_err("Bad SMMU attr\n"); goto fail; } break; default: attr = 0; break; } if (!physmem) { vcm_err("NULL physmem\n"); goto fail; } if (res->len == 0) { vcm_err("res->len is 0\n"); goto fail; } if (physmem->len == 0) { vcm_err("physmem->len is 0\n"); goto fail; } if (res->len != physmem->len) { vcm_err("res->len (%i) != physmem->len (%i)\n", res->len, physmem->len); goto fail; } if (physmem->is_cont) { if (physmem->res == 0) { vcm_err("cont physmem->res is 0"); goto fail; } } else { /* fail if no physmem */ if (list_empty(&physmem->alloc_head.allocated)) { vcm_err("no allocated phys memory"); goto fail; } } ret = vcm_no_assoc(res->vcm); if (ret == 1) { vcm_err("can't back un associated VCM\n"); goto fail; } if (ret == -1) { vcm_err("vcm_no_assoc() ret -1\n"); goto fail; } ret = vcm_all_activated(res->vcm); if (ret == 0) { vcm_err("can't back, not all associations are activated\n"); goto fail_eagain; } if (ret == -1) { vcm_err("vcm_all_activated() ret -1\n"); goto fail; } va = res->dev_addr; list_for_each_entry(chunk, &physmem->alloc_head.allocated, allocated) { struct vcm *vcm = res->vcm; size_t chunk_size = chunk->size; if (chunk_size <= 0) { vcm_err("Bad chunk size: %d\n", chunk_size); goto fail; } switch (vcm->type) { case VCM_DEVICE: { /* map all */ ret = vcm_process_chunk(vcm->domain, chunk->pa, va, chunk_size, attr, 1); if (ret != 0) { vcm_err("vcm_process_chunk(%p, %p, %p," " 0x%x, 0x%x)" " ret %i", vcm->domain, (void *) chunk->pa, (void *) va, (int) chunk_size, attr, ret); goto fail; } break; } case VCM_EXT_KERNEL: { unsigned int pages_in_chunk = chunk_size / PAGE_SIZE; unsigned long loc_va = va; unsigned long loc_pa = chunk->pa; const struct mem_type *mtype; /* TODO: get this based on MEMTYPE */ mtype = get_mem_type(MT_DEVICE); if (!mtype) { vcm_err("mtype is 0\n"); goto fail; } /* TODO: Map with the same chunk size */ while (pages_in_chunk--) { ret = ioremap_page(loc_va, loc_pa, mtype); if (ret != 0) { vcm_err("ioremap_page(%p, %p, %p) ret" " %i", (void *) loc_va, (void *) loc_pa, (void *) mtype, ret); goto fail; /* TODO handle weird inter-map case */ } /* hack part 2 */ /* we're changing the PT entry behind * linux's back */ ret = cpu_set_attr(loc_va, PAGE_SIZE, attr); if (ret != 0) { vcm_err("cpu_set_attr(%p, %lu, %x)" "ret %i\n", (void *) loc_va, PAGE_SIZE, attr, ret); goto fail; /* TODO handle weird inter-map case */ } res->mapped = 1; loc_va += PAGE_SIZE; loc_pa += PAGE_SIZE; } flush_cache_vmap(va, loc_va); break; } case VCM_ONE_TO_ONE: va = chunk->pa; break; default: /* this should never happen */ goto fail; } va += chunk_size; /* also add res to the allocated chunk list of refs */ } /* note the reservation */ res->physmem = physmem; spin_unlock_irqrestore(&vcmlock, flags); return 0; fail_eagain: spin_unlock_irqrestore(&vcmlock, flags); return -EAGAIN; fail: spin_unlock_irqrestore(&vcmlock, flags); return -EINVAL; } int vcm_unback(struct res *res) { unsigned long flags; struct vcm *vcm; struct physmem *physmem; int ret; spin_lock_irqsave(&vcmlock, flags); if (!res) goto fail; vcm = res->vcm; if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } if (!res->physmem) { vcm_err("can't unback a non-backed reservation\n"); goto fail; } physmem = res->physmem; if (!physmem) { vcm_err("physmem is NULL\n"); goto fail; } if (list_empty(&physmem->alloc_head.allocated)) { vcm_err("physmem allocation is empty\n"); goto fail; } ret = vcm_no_assoc(res->vcm); if (ret == 1) { vcm_err("can't unback a unassociated reservation\n"); goto fail; } if (ret == -1) { vcm_err("vcm_no_assoc(%p) ret -1\n", (void *) res->vcm); goto fail; } ret = vcm_all_activated(res->vcm); if (ret == 0) { vcm_err("can't unback, not all associations are active\n"); goto fail_eagain; } if (ret == -1) { vcm_err("vcm_all_activated(%p) ret -1\n", (void *) res->vcm); goto fail; } switch (vcm->type) { case VCM_EXT_KERNEL: if (!res->mapped) { vcm_err("can't unback an unmapped VCM_EXT_KERNEL" " VCM\n"); goto fail; } /* vunmap free's vm_area */ vunmap(res->vm_area->addr); res->vm_area = 0; res->mapped = 0; break; case VCM_DEVICE: { struct phys_chunk *chunk; size_t va = res->dev_addr; list_for_each_entry(chunk, &physmem->alloc_head.allocated, allocated) { struct vcm *vcm = res->vcm; size_t chunk_size = chunk->size; ret = vcm_process_chunk(vcm->domain, 0, va, chunk_size, 0, 0); if (ret != 0) { vcm_err("vcm_unback_chunk(%p, %p, 0x%x)" " ret %i", (void *) vcm->domain, (void *) va, (int) chunk_size, ret); goto fail; /* TODO handle weird inter-unmap state*/ } va += chunk_size; /* may to a light unback, depending on the requested * functionality */ } break; } case VCM_ONE_TO_ONE: break; default: /* this should never happen */ goto fail; } /* clear the reservation */ res->physmem = 0; spin_unlock_irqrestore(&vcmlock, flags); return 0; fail_eagain: spin_unlock_irqrestore(&vcmlock, flags); return -EAGAIN; fail: spin_unlock_irqrestore(&vcmlock, flags); return -EINVAL; } enum memtarget_t vcm_get_memtype_of_res(struct res *res) { return VCM_INVALID; } static int vcm_free_max_munch_cont(struct phys_chunk *head) { struct phys_chunk *chunk, *tmp; if (!head) return -EINVAL; list_for_each_entry_safe(chunk, tmp, &head->allocated, allocated) { list_del_init(&chunk->allocated); } return 0; } static int vcm_alloc_max_munch_cont(size_t start_addr, size_t len, struct phys_chunk *head) { /* this function should always succeed, since it parallels a VCM */ int i, j; if (!head) { vcm_err("head is NULL in continuous map.\n"); goto fail; } if (start_addr < (int) bootmem_cont) { vcm_err("phys start addr (%p) < base (%p)\n", (void *) start_addr, (void *) bootmem_cont); goto fail; } if ((start_addr + len) >= ((size_t) bootmem_cont + cont_sz)) { vcm_err("requested region (%p + %i) > " " available region (%p + %i)", (void *) start_addr, (int) len, (void *) bootmem_cont, cont_sz); goto fail; } i = (start_addr - (size_t) bootmem_cont)/SZ_4K; for (j = 0; j < ARRAY_SIZE(smmu_map_sizes); ++j) { while (len/smmu_map_sizes[j]) { if (!list_empty(&cont_phys_chunk[i].allocated)) { vcm_err("chunk %i ( addr %p) already mapped\n", i, (void *) (start_addr + (i*smmu_map_sizes[j]))); goto fail_free; } list_add_tail(&cont_phys_chunk[i].allocated, &head->allocated); cont_phys_chunk[i].size = smmu_map_sizes[j]; len -= smmu_map_sizes[j]; i += smmu_map_sizes[j]/SZ_4K; } } if (len % SZ_4K) { if (!list_empty(&cont_phys_chunk[i].allocated)) { vcm_err("chunk %i (addr %p) already mapped\n", i, (void *) (start_addr + (i*SZ_4K))); goto fail_free; } len -= SZ_4K; list_add_tail(&cont_phys_chunk[i].allocated, &head->allocated); i++; } return i; fail_free: { struct phys_chunk *chunk, *tmp; /* just remove from list, if we're double alloc'ing we don't want to stamp on the other guy */ list_for_each_entry_safe(chunk, tmp, &head->allocated, allocated) { list_del(&chunk->allocated); } } fail: return 0; } struct physmem *vcm_phys_alloc(enum memtype_t memtype, size_t len, u32 attr) { unsigned long flags; int ret; struct physmem *physmem = NULL; int blocks_allocated; spin_lock_irqsave(&vcmlock, flags); physmem = kzalloc(sizeof(*physmem), GFP_KERNEL); if (!physmem) { vcm_err("physmem is NULL\n"); goto fail; } physmem->memtype = memtype; physmem->len = len; physmem->attr = attr; INIT_LIST_HEAD(&physmem->alloc_head.allocated); if (attr & VCM_PHYS_CONT) { if (!cont_vcm_id) { vcm_err("cont_vcm_id is NULL\n"); goto fail2; } physmem->is_cont = 1; /* TODO: get attributes */ physmem->res = __vcm_reserve(cont_vcm_id, len, 0); if (physmem->res == 0) { vcm_err("contiguous space allocation failed\n"); goto fail2; } /* if we're here we know we have memory, create the shadow physmem links*/ blocks_allocated = vcm_alloc_max_munch_cont( physmem->res->dev_addr, len, &physmem->alloc_head); if (blocks_allocated == 0) { vcm_err("shadow physmem allocation failed\n"); goto fail3; } } else { blocks_allocated = vcm_alloc_max_munch(len, memtype, &physmem->alloc_head); if (blocks_allocated == 0) { vcm_err("physical allocation failed:" " vcm_alloc_max_munch(%i, %p) ret 0\n", len, &physmem->alloc_head); goto fail2; } } spin_unlock_irqrestore(&vcmlock, flags); return physmem; fail3: ret = __vcm_unreserve(physmem->res); if (ret != 0) { vcm_err("vcm_unreserve(%p) ret %i during cleanup", (void *) physmem->res, ret); spin_unlock_irqrestore(&vcmlock, flags); return 0; } fail2: kfree(physmem); fail: spin_unlock_irqrestore(&vcmlock, flags); return 0; } int vcm_phys_free(struct physmem *physmem) { unsigned long flags; int ret; spin_lock_irqsave(&vcmlock, flags); if (!physmem) { vcm_err("physmem is NULL\n"); goto fail; } if (physmem->is_cont) { if (physmem->res == 0) { vcm_err("contiguous reservation is NULL\n"); goto fail; } ret = vcm_free_max_munch_cont(&physmem->alloc_head); if (ret != 0) { vcm_err("failed to free physical blocks:" " vcm_free_max_munch_cont(%p) ret %i\n", (void *) &physmem->alloc_head, ret); goto fail; } ret = __vcm_unreserve(physmem->res); if (ret != 0) { vcm_err("failed to free virtual blocks:" " vcm_unreserve(%p) ret %i\n", (void *) physmem->res, ret); goto fail; } } else { ret = vcm_alloc_free_blocks(physmem->memtype, &physmem->alloc_head); if (ret != 0) { vcm_err("failed to free physical blocks:" " vcm_alloc_free_blocks(%p) ret %i\n", (void *) &physmem->alloc_head, ret); goto fail; } } memset(physmem, 0, sizeof(*physmem)); kfree(physmem); spin_unlock_irqrestore(&vcmlock, flags); return 0; fail: spin_unlock_irqrestore(&vcmlock, flags); return -EINVAL; } struct avcm *vcm_assoc(struct vcm *vcm, struct device *dev, u32 attr) { unsigned long flags; struct avcm *avcm = NULL; spin_lock_irqsave(&vcmlock, flags); if (!vcm) { vcm_err("vcm is NULL\n"); goto fail; } if (!dev) { vcm_err("dev_id is NULL\n"); goto fail; } if (vcm->type == VCM_EXT_KERNEL && !list_empty(&vcm->assoc_head)) { vcm_err("only one device may be assocoated with a" " VCM_EXT_KERNEL\n"); goto fail; } avcm = kzalloc(sizeof(*avcm), GFP_KERNEL); if (!avcm) { vcm_err("kzalloc(%i, GFP_KERNEL) ret NULL\n", sizeof(*avcm)); goto fail; } avcm->dev = dev; avcm->vcm = vcm; avcm->attr = attr; avcm->is_active = 0; INIT_LIST_HEAD(&avcm->assoc_elm); list_add(&avcm->assoc_elm, &vcm->assoc_head); spin_unlock_irqrestore(&vcmlock, flags); return avcm; fail: spin_unlock_irqrestore(&vcmlock, flags); return 0; } int vcm_deassoc(struct avcm *avcm) { unsigned long flags; spin_lock_irqsave(&vcmlock, flags); if (!avcm) { vcm_err("avcm is NULL\n"); goto fail; } if (list_empty(&avcm->assoc_elm)) { vcm_err("nothing to deassociate\n"); goto fail; } if (avcm->is_active) { vcm_err("association still activated\n"); goto fail_busy; } list_del(&avcm->assoc_elm); memset(avcm, 0, sizeof(*avcm)); kfree(avcm); spin_unlock_irqrestore(&vcmlock, flags); return 0; fail_busy: spin_unlock_irqrestore(&vcmlock, flags); return -EBUSY; fail: spin_unlock_irqrestore(&vcmlock, flags); return -EINVAL; } int vcm_set_assoc_attr(struct avcm *avcm, u32 attr) { return 0; } u32 vcm_get_assoc_attr(struct avcm *avcm) { return 0; } int vcm_activate(struct avcm *avcm) { unsigned long flags; struct vcm *vcm; spin_lock_irqsave(&vcmlock, flags); if (!avcm) { vcm_err("avcm is NULL\n"); goto fail; } vcm = avcm->vcm; if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } if (!avcm->dev) { vcm_err("cannot activate without a device\n"); goto fail_nodev; } if (avcm->is_active) { vcm_err("double activate\n"); goto fail_busy; } if (vcm->type == VCM_DEVICE) { #ifdef CONFIG_SMMU int ret; ret = iommu_attach_device(vcm->domain, avcm->dev); if (ret != 0) { dev_err(avcm->dev, "failed to attach to domain\n"); goto fail_dev; } #else vcm_err("No SMMU support - cannot activate/deactivate\n"); goto fail_nodev; #endif } avcm->is_active = 1; spin_unlock_irqrestore(&vcmlock, flags); return 0; #ifdef CONFIG_SMMU fail_dev: spin_unlock_irqrestore(&vcmlock, flags); return -ENODEV; #endif fail_busy: spin_unlock_irqrestore(&vcmlock, flags); return -EBUSY; fail_nodev: spin_unlock_irqrestore(&vcmlock, flags); return -ENODEV; fail: spin_unlock_irqrestore(&vcmlock, flags); return -EINVAL; } int vcm_deactivate(struct avcm *avcm) { unsigned long flags; struct vcm *vcm; spin_lock_irqsave(&vcmlock, flags); if (!avcm) goto fail; vcm = avcm->vcm; if (!vcm) { vcm_err("NULL vcm\n"); goto fail; } if (!avcm->dev) { vcm_err("cannot deactivate without a device\n"); goto fail; } if (!avcm->is_active) { vcm_err("double deactivate\n"); goto fail_nobusy; } if (vcm->type == VCM_DEVICE) { #ifdef CONFIG_SMMU /* TODO, pmem check */ iommu_detach_device(vcm->domain, avcm->dev); #else vcm_err("No SMMU support - cannot activate/deactivate\n"); goto fail; #endif } avcm->is_active = 0; spin_unlock_irqrestore(&vcmlock, flags); return 0; fail_nobusy: spin_unlock_irqrestore(&vcmlock, flags); return -ENOENT; fail: spin_unlock_irqrestore(&vcmlock, flags); return -EINVAL; } struct bound *vcm_create_bound(struct vcm *vcm, size_t len) { return 0; } int vcm_free_bound(struct bound *bound) { return -EINVAL; } struct res *vcm_reserve_from_bound(struct bound *bound, size_t len, u32 attr) { return 0; } size_t vcm_get_bound_start_addr(struct bound *bound) { return 0; } size_t vcm_get_bound_len(struct bound *bound) { return 0; } struct physmem *vcm_map_phys_addr(phys_addr_t phys, size_t len) { return 0; } size_t vcm_get_next_phys_addr(struct physmem *physmem, phys_addr_t phys, size_t *len) { return 0; } struct res *vcm_get_res(unsigned long dev_addr, struct vcm *vcm) { return 0; } size_t vcm_translate(struct device *src_dev, struct vcm *src_vcm, struct vcm *dst_vcm) { return 0; } size_t vcm_get_phys_num_res(phys_addr_t phys) { return 0; } struct res *vcm_get_next_phys_res(phys_addr_t phys, struct res *res, size_t *len) { return 0; } phys_addr_t vcm_get_pgtbl_pa(struct vcm *vcm) { return 0; } /* No lock needed, smmu_translate has its own lock */ phys_addr_t vcm_dev_addr_to_phys_addr(struct vcm *vcm, unsigned long dev_addr) { if (!vcm) return -EINVAL; #ifdef CONFIG_SMMU return iommu_iova_to_phys(vcm->domain, dev_addr); #else vcm_err("No support for SMMU - manual translation not supported\n"); return -ENODEV; #endif } /* No lock needed, bootmem_cont never changes after */ phys_addr_t vcm_get_cont_memtype_pa(enum memtype_t memtype) { if (memtype != VCM_MEMTYPE_0) { vcm_err("memtype != VCM_MEMTYPE_0\n"); goto fail; } if (!bootmem_cont) { vcm_err("bootmem_cont 0\n"); goto fail; } return (size_t) bootmem_cont; fail: return 0; } /* No lock needed, constant */ size_t vcm_get_cont_memtype_len(enum memtype_t memtype) { if (memtype != VCM_MEMTYPE_0) { vcm_err("memtype != VCM_MEMTYPE_0\n"); return 0; } return cont_sz; } int vcm_hook(struct device *dev, vcm_handler handler, void *data) { #ifdef CONFIG_SMMU vcm_err("No interrupts in IOMMU API\n"); return -ENODEV; #else vcm_err("No support for SMMU - interrupts not supported\n"); return -ENODEV; #endif } size_t vcm_hw_ver(size_t dev) { return 0; } static int vcm_cont_phys_chunk_init(void) { int i; int cont_pa; if (!cont_phys_chunk) { vcm_err("cont_phys_chunk 0\n"); goto fail; } if (!bootmem_cont) { vcm_err("bootmem_cont 0\n"); goto fail; } cont_pa = (size_t) bootmem_cont; for (i = 0; i < cont_sz/PAGE_SIZE; ++i) { cont_phys_chunk[i].pa = cont_pa; cont_pa += PAGE_SIZE; cont_phys_chunk[i].size = SZ_4K; /* Not part of an allocator-managed pool */ cont_phys_chunk[i].pool_idx = -1; INIT_LIST_HEAD(&cont_phys_chunk[i].allocated); } return 0; fail: return -EINVAL; } int vcm_sys_init(struct physmem_region *mem, int n_regions, struct vcm_memtype_map *mt_map, int n_mt, void *cont_pa, unsigned int cont_len) { int ret; printk(KERN_INFO "VCM Initialization\n"); bootmem_cont = cont_pa; cont_sz = cont_len; if (!bootmem_cont) { vcm_err("bootmem_cont is 0\n"); ret = -1; goto fail; } ret = vcm_setup_tex_classes(); if (ret != 0) { printk(KERN_INFO "Could not determine TEX attribute mapping\n"); ret = -1; goto fail; } ret = vcm_alloc_init(mem, n_regions, mt_map, n_mt); if (ret != 0) { vcm_err("vcm_alloc_init() ret %i\n", ret); ret = -1; goto fail; } cont_phys_chunk = kzalloc(sizeof(*cont_phys_chunk)*(cont_sz/PAGE_SIZE), GFP_KERNEL); if (!cont_phys_chunk) { vcm_err("kzalloc(%lu, GFP_KERNEL) ret 0", sizeof(*cont_phys_chunk)*(cont_sz/PAGE_SIZE)); goto fail_free; } /* the address and size will hit our special case unless we pass an override */ cont_vcm_id = vcm_create_flagged(0, (size_t)bootmem_cont, cont_sz); if (cont_vcm_id == 0) { vcm_err("vcm_create_flagged(0, %p, %i) ret 0\n", bootmem_cont, cont_sz); ret = -1; goto fail_free2; } ret = vcm_cont_phys_chunk_init(); if (ret != 0) { vcm_err("vcm_cont_phys_chunk_init() ret %i\n", ret); goto fail_free3; } printk(KERN_INFO "VCM Initialization OK\n"); return 0; fail_free3: ret = __vcm_free(cont_vcm_id); if (ret != 0) { vcm_err("vcm_free(%p) ret %i during failure path\n", (void *) cont_vcm_id, ret); return ret; } fail_free2: kfree(cont_phys_chunk); cont_phys_chunk = 0; fail_free: ret = vcm_alloc_destroy(); if (ret != 0) vcm_err("vcm_alloc_destroy() ret %i during failure path\n", ret); ret = -EINVAL; fail: return ret; } int vcm_sys_destroy(void) { int ret = 0; if (!cont_phys_chunk) { vcm_err("cont_phys_chunk is 0\n"); return -ENODEV; } if (!cont_vcm_id) { vcm_err("cont_vcm_id is 0\n"); return -ENODEV; } ret = __vcm_free(cont_vcm_id); if (ret != 0) { vcm_err("vcm_free(%p) ret %i\n", (void *) cont_vcm_id, ret); return -ENODEV; } cont_vcm_id = 0; kfree(cont_phys_chunk); cont_phys_chunk = 0; ret = vcm_alloc_destroy(); if (ret != 0) { vcm_err("vcm_alloc_destroy() ret %i\n", ret); return ret; } return ret; } MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zach Pfeffer <zpfeffer@codeaurora.org>");
gpl-2.0
XCage15/linux-2
arch/arm/mach-s3c24xx/common.c
1478
17109
/* linux/arch/arm/plat-s3c24xx/cpu.c * * Copyright (c) 2004-2005 Simtec Electronics * http://www.simtec.co.uk/products/SWLINUX/ * Ben Dooks <ben@simtec.co.uk> * * Common code for S3C24XX machines * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/serial_core.h> #include <linux/serial_s3c.h> #include <clocksource/samsung_pwm.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/platform_data/dma-s3c24xx.h> #include <mach/hardware.h> #include <mach/regs-clock.h> #include <asm/irq.h> #include <asm/cacheflush.h> #include <asm/system_info.h> #include <asm/system_misc.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/regs-gpio.h> #include <mach/dma.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/cpu-freq.h> #include <plat/pwm-core.h> #include "common.h" /* table of supported CPUs */ static const char name_s3c2410[] = "S3C2410"; static const char name_s3c2412[] = "S3C2412"; static const char name_s3c2416[] = "S3C2416/S3C2450"; static const char name_s3c2440[] = "S3C2440"; static const char name_s3c2442[] = "S3C2442"; static const char name_s3c2442b[] = "S3C2442B"; static const char name_s3c2443[] = "S3C2443"; static const char name_s3c2410a[] = "S3C2410A"; static const char name_s3c2440a[] = "S3C2440A"; static struct cpu_table cpu_ids[] __initdata = { { .idcode = 0x32410000, .idmask = 0xffffffff, .map_io = s3c2410_map_io, .init_uarts = s3c2410_init_uarts, .init = s3c2410_init, .name = name_s3c2410 }, { .idcode = 0x32410002, .idmask = 0xffffffff, .map_io = s3c2410_map_io, .init_uarts = s3c2410_init_uarts, .init = s3c2410a_init, .name = name_s3c2410a }, { .idcode = 0x32440000, .idmask = 0xffffffff, .map_io = s3c2440_map_io, .init_uarts = s3c244x_init_uarts, .init = s3c2440_init, .name = name_s3c2440 }, { .idcode = 0x32440001, .idmask = 0xffffffff, .map_io = s3c2440_map_io, .init_uarts = s3c244x_init_uarts, .init = s3c2440_init, .name = name_s3c2440a }, { .idcode = 0x32440aaa, .idmask = 0xffffffff, .map_io = s3c2442_map_io, .init_uarts = s3c244x_init_uarts, .init = s3c2442_init, .name = name_s3c2442 }, { .idcode = 0x32440aab, .idmask = 0xffffffff, .map_io = s3c2442_map_io, .init_uarts = s3c244x_init_uarts, .init = s3c2442_init, .name = name_s3c2442b }, { .idcode = 0x32412001, .idmask = 0xffffffff, .map_io = s3c2412_map_io, .init_uarts = s3c2412_init_uarts, .init = s3c2412_init, .name = name_s3c2412, }, { /* a newer version of the s3c2412 */ .idcode = 0x32412003, .idmask = 0xffffffff, .map_io = s3c2412_map_io, .init_uarts = s3c2412_init_uarts, .init = s3c2412_init, .name = name_s3c2412, }, { /* a strange version of the s3c2416 */ .idcode = 0x32450003, .idmask = 0xffffffff, .map_io = s3c2416_map_io, .init_uarts = s3c2416_init_uarts, .init = s3c2416_init, .name = name_s3c2416, }, { .idcode = 0x32443001, .idmask = 0xffffffff, .map_io = s3c2443_map_io, .init_uarts = s3c2443_init_uarts, .init = s3c2443_init, .name = name_s3c2443, }, }; /* minimal IO mapping */ static struct map_desc s3c_iodesc[] __initdata = { IODESC_ENT(GPIO), IODESC_ENT(IRQ), IODESC_ENT(MEMCTRL), IODESC_ENT(UART) }; /* read cpu identificaiton code */ static unsigned long s3c24xx_read_idcode_v5(void) { #if defined(CONFIG_CPU_S3C2416) /* s3c2416 is v5, with S3C24XX_GSTATUS1 instead of S3C2412_GSTATUS1 */ u32 gs = __raw_readl(S3C24XX_GSTATUS1); /* test for s3c2416 or similar device */ if ((gs >> 16) == 0x3245) return gs; #endif #if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) return __raw_readl(S3C2412_GSTATUS1); #else return 1UL; /* don't look like an 2400 */ #endif } static unsigned long s3c24xx_read_idcode_v4(void) { return __raw_readl(S3C2410_GSTATUS1); } static void s3c24xx_default_idle(void) { unsigned long tmp = 0; int i; /* idle the system by using the idle mode which will wait for an * interrupt to happen before restarting the system. */ /* Warning: going into idle state upsets jtag scanning */ __raw_writel(__raw_readl(S3C2410_CLKCON) | S3C2410_CLKCON_IDLE, S3C2410_CLKCON); /* the samsung port seems to do a loop and then unset idle.. */ for (i = 0; i < 50; i++) tmp += __raw_readl(S3C2410_CLKCON); /* ensure loop not optimised out */ /* this bit is not cleared on re-start... */ __raw_writel(__raw_readl(S3C2410_CLKCON) & ~S3C2410_CLKCON_IDLE, S3C2410_CLKCON); } static struct samsung_pwm_variant s3c24xx_pwm_variant = { .bits = 16, .div_base = 1, .has_tint_cstat = false, .tclk_mask = (1 << 4), }; void __init s3c24xx_init_io(struct map_desc *mach_desc, int size) { arm_pm_idle = s3c24xx_default_idle; /* initialise the io descriptors we need for initialisation */ iotable_init(mach_desc, size); iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc)); if (cpu_architecture() >= CPU_ARCH_ARMv5) { samsung_cpu_id = s3c24xx_read_idcode_v5(); } else { samsung_cpu_id = s3c24xx_read_idcode_v4(); } s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids)); samsung_pwm_set_platdata(&s3c24xx_pwm_variant); } void __init samsung_set_timer_source(unsigned int event, unsigned int source) { s3c24xx_pwm_variant.output_mask = BIT(SAMSUNG_PWM_NUM) - 1; s3c24xx_pwm_variant.output_mask &= ~(BIT(event) | BIT(source)); } void __init samsung_timer_init(void) { unsigned int timer_irqs[SAMSUNG_PWM_NUM] = { IRQ_TIMER0, IRQ_TIMER1, IRQ_TIMER2, IRQ_TIMER3, IRQ_TIMER4, }; samsung_pwm_clocksource_init(S3C_VA_TIMER, timer_irqs, &s3c24xx_pwm_variant); } /* Serial port registrations */ #define S3C2410_PA_UART0 (S3C24XX_PA_UART) #define S3C2410_PA_UART1 (S3C24XX_PA_UART + 0x4000 ) #define S3C2410_PA_UART2 (S3C24XX_PA_UART + 0x8000 ) #define S3C2443_PA_UART3 (S3C24XX_PA_UART + 0xC000 ) static struct resource s3c2410_uart0_resource[] = { [0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K), [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \ IRQ_S3CUART_ERR0 - IRQ_S3CUART_RX0 + 1, \ NULL, IORESOURCE_IRQ) }; static struct resource s3c2410_uart1_resource[] = { [0] = DEFINE_RES_MEM(S3C2410_PA_UART1, SZ_16K), [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX1, \ IRQ_S3CUART_ERR1 - IRQ_S3CUART_RX1 + 1, \ NULL, IORESOURCE_IRQ) }; static struct resource s3c2410_uart2_resource[] = { [0] = DEFINE_RES_MEM(S3C2410_PA_UART2, SZ_16K), [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX2, \ IRQ_S3CUART_ERR2 - IRQ_S3CUART_RX2 + 1, \ NULL, IORESOURCE_IRQ) }; static struct resource s3c2410_uart3_resource[] = { [0] = DEFINE_RES_MEM(S3C2443_PA_UART3, SZ_16K), [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX3, \ IRQ_S3CUART_ERR3 - IRQ_S3CUART_RX3 + 1, \ NULL, IORESOURCE_IRQ) }; struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = { [0] = { .resources = s3c2410_uart0_resource, .nr_resources = ARRAY_SIZE(s3c2410_uart0_resource), }, [1] = { .resources = s3c2410_uart1_resource, .nr_resources = ARRAY_SIZE(s3c2410_uart1_resource), }, [2] = { .resources = s3c2410_uart2_resource, .nr_resources = ARRAY_SIZE(s3c2410_uart2_resource), }, [3] = { .resources = s3c2410_uart3_resource, .nr_resources = ARRAY_SIZE(s3c2410_uart3_resource), }, }; #if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2412) || \ defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2442) static struct resource s3c2410_dma_resource[] = { [0] = DEFINE_RES_MEM(S3C24XX_PA_DMA, S3C24XX_SZ_DMA), [1] = DEFINE_RES_IRQ(IRQ_DMA0), [2] = DEFINE_RES_IRQ(IRQ_DMA1), [3] = DEFINE_RES_IRQ(IRQ_DMA2), [4] = DEFINE_RES_IRQ(IRQ_DMA3), }; #endif #if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2442) static struct s3c24xx_dma_channel s3c2410_dma_channels[DMACH_MAX] = { [DMACH_XD0] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 0), }, [DMACH_XD1] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 1), }, [DMACH_SDI] = { S3C24XX_DMA_APB, false, S3C24XX_DMA_CHANREQ(2, 0) | S3C24XX_DMA_CHANREQ(2, 2) | S3C24XX_DMA_CHANREQ(1, 3), }, [DMACH_SPI0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 1), }, [DMACH_SPI1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 3), }, [DMACH_UART0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 0), }, [DMACH_UART1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 1), }, [DMACH_UART2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 3), }, [DMACH_TIMER] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 0) | S3C24XX_DMA_CHANREQ(3, 2) | S3C24XX_DMA_CHANREQ(3, 3), }, [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 1) | S3C24XX_DMA_CHANREQ(1, 2), }, [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 2), }, [DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 0), }, [DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 1), }, [DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 2), }, [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), }, }; static struct s3c24xx_dma_platdata s3c2410_dma_platdata = { .num_phy_channels = 4, .channels = s3c2410_dma_channels, .num_channels = DMACH_MAX, }; struct platform_device s3c2410_device_dma = { .name = "s3c2410-dma", .id = 0, .num_resources = ARRAY_SIZE(s3c2410_dma_resource), .resource = s3c2410_dma_resource, .dev = { .platform_data = &s3c2410_dma_platdata, }, }; #endif #ifdef CONFIG_CPU_S3C2412 static struct s3c24xx_dma_channel s3c2412_dma_channels[DMACH_MAX] = { [DMACH_XD0] = { S3C24XX_DMA_AHB, true, 17 }, [DMACH_XD1] = { S3C24XX_DMA_AHB, true, 18 }, [DMACH_SDI] = { S3C24XX_DMA_APB, false, 10 }, [DMACH_SPI0_RX] = { S3C24XX_DMA_APB, true, 1 }, [DMACH_SPI0_TX] = { S3C24XX_DMA_APB, true, 0 }, [DMACH_SPI1_RX] = { S3C24XX_DMA_APB, true, 3 }, [DMACH_SPI1_TX] = { S3C24XX_DMA_APB, true, 2 }, [DMACH_UART0] = { S3C24XX_DMA_APB, true, 19 }, [DMACH_UART1] = { S3C24XX_DMA_APB, true, 21 }, [DMACH_UART2] = { S3C24XX_DMA_APB, true, 23 }, [DMACH_UART0_SRC2] = { S3C24XX_DMA_APB, true, 20 }, [DMACH_UART1_SRC2] = { S3C24XX_DMA_APB, true, 22 }, [DMACH_UART2_SRC2] = { S3C24XX_DMA_APB, true, 24 }, [DMACH_TIMER] = { S3C24XX_DMA_APB, true, 9 }, [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, 5 }, [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, 4 }, [DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, 13 }, [DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, 14 }, [DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, 15 }, [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, 16 }, }; static struct s3c24xx_dma_platdata s3c2412_dma_platdata = { .num_phy_channels = 4, .channels = s3c2412_dma_channels, .num_channels = DMACH_MAX, }; struct platform_device s3c2412_device_dma = { .name = "s3c2412-dma", .id = 0, .num_resources = ARRAY_SIZE(s3c2410_dma_resource), .resource = s3c2410_dma_resource, .dev = { .platform_data = &s3c2412_dma_platdata, }, }; #endif #if defined(CONFIG_CPU_S3C2440) static struct s3c24xx_dma_channel s3c2440_dma_channels[DMACH_MAX] = { [DMACH_XD0] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 0), }, [DMACH_XD1] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 1), }, [DMACH_SDI] = { S3C24XX_DMA_APB, false, S3C24XX_DMA_CHANREQ(2, 0) | S3C24XX_DMA_CHANREQ(6, 1) | S3C24XX_DMA_CHANREQ(2, 2) | S3C24XX_DMA_CHANREQ(1, 3), }, [DMACH_SPI0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 1), }, [DMACH_SPI1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 3), }, [DMACH_UART0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 0), }, [DMACH_UART1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 1), }, [DMACH_UART2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 3), }, [DMACH_TIMER] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 0) | S3C24XX_DMA_CHANREQ(3, 2) | S3C24XX_DMA_CHANREQ(3, 3), }, [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 1) | S3C24XX_DMA_CHANREQ(1, 2), }, [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(5, 0) | S3C24XX_DMA_CHANREQ(0, 2), }, [DMACH_PCM_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(6, 0) | S3C24XX_DMA_CHANREQ(5, 2), }, [DMACH_PCM_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(5, 1) | S3C24XX_DMA_CHANREQ(6, 3), }, [DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(6, 2) | S3C24XX_DMA_CHANREQ(5, 3), }, [DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 0), }, [DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 1), }, [DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 2), }, [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), }, }; static struct s3c24xx_dma_platdata s3c2440_dma_platdata = { .num_phy_channels = 4, .channels = s3c2440_dma_channels, .num_channels = DMACH_MAX, }; struct platform_device s3c2440_device_dma = { .name = "s3c2410-dma", .id = 0, .num_resources = ARRAY_SIZE(s3c2410_dma_resource), .resource = s3c2410_dma_resource, .dev = { .platform_data = &s3c2440_dma_platdata, }, }; #endif #if defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416) static struct resource s3c2443_dma_resource[] = { [0] = DEFINE_RES_MEM(S3C24XX_PA_DMA, S3C24XX_SZ_DMA), [1] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA0), [2] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA1), [3] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA2), [4] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA3), [5] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA4), [6] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA5), }; static struct s3c24xx_dma_channel s3c2443_dma_channels[DMACH_MAX] = { [DMACH_XD0] = { S3C24XX_DMA_AHB, true, 17 }, [DMACH_XD1] = { S3C24XX_DMA_AHB, true, 18 }, [DMACH_SDI] = { S3C24XX_DMA_APB, false, 10 }, [DMACH_SPI0_RX] = { S3C24XX_DMA_APB, true, 1 }, [DMACH_SPI0_TX] = { S3C24XX_DMA_APB, true, 0 }, [DMACH_SPI1_RX] = { S3C24XX_DMA_APB, true, 3 }, [DMACH_SPI1_TX] = { S3C24XX_DMA_APB, true, 2 }, [DMACH_UART0] = { S3C24XX_DMA_APB, true, 19 }, [DMACH_UART1] = { S3C24XX_DMA_APB, true, 21 }, [DMACH_UART2] = { S3C24XX_DMA_APB, true, 23 }, [DMACH_UART3] = { S3C24XX_DMA_APB, true, 25 }, [DMACH_UART0_SRC2] = { S3C24XX_DMA_APB, true, 20 }, [DMACH_UART1_SRC2] = { S3C24XX_DMA_APB, true, 22 }, [DMACH_UART2_SRC2] = { S3C24XX_DMA_APB, true, 24 }, [DMACH_UART3_SRC2] = { S3C24XX_DMA_APB, true, 26 }, [DMACH_TIMER] = { S3C24XX_DMA_APB, true, 9 }, [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, 5 }, [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, 4 }, [DMACH_PCM_IN] = { S3C24XX_DMA_APB, true, 28 }, [DMACH_PCM_OUT] = { S3C24XX_DMA_APB, true, 27 }, [DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, 29 }, }; static struct s3c24xx_dma_platdata s3c2443_dma_platdata = { .num_phy_channels = 6, .channels = s3c2443_dma_channels, .num_channels = DMACH_MAX, }; struct platform_device s3c2443_device_dma = { .name = "s3c2443-dma", .id = 0, .num_resources = ARRAY_SIZE(s3c2443_dma_resource), .resource = s3c2443_dma_resource, .dev = { .platform_data = &s3c2443_dma_platdata, }, }; #endif #if defined(CONFIG_COMMON_CLK) && defined(CONFIG_CPU_S3C2410) void __init s3c2410_init_clocks(int xtal) { s3c2410_common_clk_init(NULL, xtal, 0, S3C24XX_VA_CLKPWR); } #endif #ifdef CONFIG_CPU_S3C2412 void __init s3c2412_init_clocks(int xtal) { s3c2412_common_clk_init(NULL, xtal, 0, S3C24XX_VA_CLKPWR); } #endif #ifdef CONFIG_CPU_S3C2416 void __init s3c2416_init_clocks(int xtal) { s3c2443_common_clk_init(NULL, xtal, 0, S3C24XX_VA_CLKPWR); } #endif #if defined(CONFIG_COMMON_CLK) && defined(CONFIG_CPU_S3C2440) void __init s3c2440_init_clocks(int xtal) { s3c2410_common_clk_init(NULL, xtal, 1, S3C24XX_VA_CLKPWR); } #endif #if defined(CONFIG_COMMON_CLK) && defined(CONFIG_CPU_S3C2442) void __init s3c2442_init_clocks(int xtal) { s3c2410_common_clk_init(NULL, xtal, 2, S3C24XX_VA_CLKPWR); } #endif #ifdef CONFIG_CPU_S3C2443 void __init s3c2443_init_clocks(int xtal) { s3c2443_common_clk_init(NULL, xtal, 1, S3C24XX_VA_CLKPWR); } #endif #if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2440) || \ defined(CONFIG_CPU_S3C2442) static struct resource s3c2410_dclk_resource[] = { [0] = DEFINE_RES_MEM(0x56000084, 0x4), }; struct platform_device s3c2410_device_dclk = { .name = "s3c2410-dclk", .id = 0, .num_resources = ARRAY_SIZE(s3c2410_dclk_resource), .resource = s3c2410_dclk_resource, }; #endif
gpl-2.0
OneEducation/kernel-rk310-lollipop-firefly
drivers/s390/net/qeth_core_main.c
1734
160513
/* * Copyright IBM Corp. 2007, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, * Frank Blaschka <frank.blaschka@de.ibm.com> */ #define KMSG_COMPONENT "qeth" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/mii.h> #include <linux/kthread.h> #include <linux/slab.h> #include <net/iucv/af_iucv.h> #include <asm/ebcdic.h> #include <asm/io.h> #include <asm/sysinfo.h> #include <asm/compat.h> #include "qeth_core.h" struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ /* N P A M L V H */ [QETH_DBF_SETUP] = {"qeth_setup", 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 128, 3, &debug_sprintf_view, NULL}, [QETH_DBF_CTRL] = {"qeth_control", 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, }; EXPORT_SYMBOL_GPL(qeth_dbf); struct qeth_card_list_struct qeth_core_card_list; EXPORT_SYMBOL_GPL(qeth_core_card_list); struct kmem_cache *qeth_core_header_cache; EXPORT_SYMBOL_GPL(qeth_core_header_cache); static struct kmem_cache *qeth_qdio_outbuf_cache; static struct device *qeth_core_root_dev; static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY; static struct lock_class_key qdio_out_skb_queue_key; static struct mutex qeth_mod_mutex; static void qeth_send_control_data_cb(struct qeth_channel *, struct qeth_cmd_buffer *); static int qeth_issue_next_read(struct qeth_card *); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); static void qeth_free_qdio_buffers(struct qeth_card *); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum qeth_qdio_buffer_states newbufstate); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); static struct workqueue_struct *qeth_wq; static void qeth_close_dev_handler(struct work_struct *work) { struct qeth_card *card; card = container_of(work, struct qeth_card, close_dev_work); QETH_CARD_TEXT(card, 2, "cldevhdl"); rtnl_lock(); dev_close(card->dev); rtnl_unlock(); ccwgroup_set_offline(card->gdev); } void qeth_close_dev(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "cldevsubm"); queue_work(qeth_wq, &card->close_dev_work); } EXPORT_SYMBOL_GPL(qeth_close_dev); static inline const char *qeth_get_cardname(struct qeth_card *card) { if (card->info.guestlan) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return " Virtual NIC QDIO"; case QETH_CARD_TYPE_IQD: return " Virtual NIC Hiper"; case QETH_CARD_TYPE_OSM: return " Virtual NIC QDIO - OSM"; case QETH_CARD_TYPE_OSX: return " Virtual NIC QDIO - OSX"; default: return " unknown"; } } else { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return " OSD Express"; case QETH_CARD_TYPE_IQD: return " HiperSockets"; case QETH_CARD_TYPE_OSN: return " OSN QDIO"; case QETH_CARD_TYPE_OSM: return " OSM QDIO"; case QETH_CARD_TYPE_OSX: return " OSX QDIO"; default: return " unknown"; } } return " n/a"; } /* max length to be returned: 14 */ const char *qeth_get_cardname_short(struct qeth_card *card) { if (card->info.guestlan) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return "Virt.NIC QDIO"; case QETH_CARD_TYPE_IQD: return "Virt.NIC Hiper"; case QETH_CARD_TYPE_OSM: return "Virt.NIC OSM"; case QETH_CARD_TYPE_OSX: return "Virt.NIC OSX"; default: return "unknown"; } } else { switch (card->info.type) { case QETH_CARD_TYPE_OSD: switch (card->info.link_type) { case QETH_LINK_TYPE_FAST_ETH: return "OSD_100"; case QETH_LINK_TYPE_HSTR: return "HSTR"; case QETH_LINK_TYPE_GBIT_ETH: return "OSD_1000"; case QETH_LINK_TYPE_10GBIT_ETH: return "OSD_10GIG"; case QETH_LINK_TYPE_LANE_ETH100: return "OSD_FE_LANE"; case QETH_LINK_TYPE_LANE_TR: return "OSD_TR_LANE"; case QETH_LINK_TYPE_LANE_ETH1000: return "OSD_GbE_LANE"; case QETH_LINK_TYPE_LANE: return "OSD_ATM_LANE"; default: return "OSD_Express"; } case QETH_CARD_TYPE_IQD: return "HiperSockets"; case QETH_CARD_TYPE_OSN: return "OSN"; case QETH_CARD_TYPE_OSM: return "OSM_1000"; case QETH_CARD_TYPE_OSX: return "OSX_10GIG"; default: return "unknown"; } } return "n/a"; } void qeth_set_recovery_task(struct qeth_card *card) { card->recovery_task = current; } EXPORT_SYMBOL_GPL(qeth_set_recovery_task); void qeth_clear_recovery_task(struct qeth_card *card) { card->recovery_task = NULL; } EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); static bool qeth_is_recovery_task(const struct qeth_card *card) { return card->recovery_task == current; } void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, int clear_start_mask) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_allowed_mask = threads; if (clear_start_mask) card->thread_start_mask &= threads; spin_unlock_irqrestore(&card->thread_mask_lock, flags); wake_up(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); int qeth_threads_running(struct qeth_card *card, unsigned long threads) { unsigned long flags; int rc = 0; spin_lock_irqsave(&card->thread_mask_lock, flags); rc = (card->thread_running_mask & threads); spin_unlock_irqrestore(&card->thread_mask_lock, flags); return rc; } EXPORT_SYMBOL_GPL(qeth_threads_running); int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) { if (qeth_is_recovery_task(card)) return 0; return wait_event_interruptible(card->wait_q, qeth_threads_running(card, threads) == 0); } EXPORT_SYMBOL_GPL(qeth_wait_for_threads); void qeth_clear_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; QETH_CARD_TEXT(card, 5, "clwrklst"); list_for_each_entry_safe(pool_entry, tmp, &card->qdio.in_buf_pool.entry_list, list){ list_del(&pool_entry->list); } } EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); static int qeth_alloc_buffer_pool(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry; void *ptr; int i, j; QETH_CARD_TEXT(card, 5, "alocpool"); for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); if (!pool_entry) { qeth_free_buffer_pool(card); return -ENOMEM; } for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { ptr = (void *) __get_free_page(GFP_KERNEL); if (!ptr) { while (j > 0) free_page((unsigned long) pool_entry->elements[--j]); kfree(pool_entry); qeth_free_buffer_pool(card); return -ENOMEM; } pool_entry->elements[j] = ptr; } list_add(&pool_entry->init_list, &card->qdio.init_pool.entry_list); } return 0; } int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) { QETH_CARD_TEXT(card, 2, "realcbp"); if ((card->state != CARD_STATE_DOWN) && (card->state != CARD_STATE_RECOVER)) return -EPERM; /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ qeth_clear_working_pool_list(card); qeth_free_buffer_pool(card); card->qdio.in_buf_pool.buf_count = bufcnt; card->qdio.init_pool.buf_count = bufcnt; return qeth_alloc_buffer_pool(card); } EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); static inline int qeth_cq_init(struct qeth_card *card) { int rc; if (card->options.cq == QETH_CQ_ENABLED) { QETH_DBF_TEXT(SETUP, 2, "cqinit"); memset(card->qdio.c_q->qdio_bufs, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); card->qdio.c_q->next_buf_to_init = 127; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, card->qdio.no_in_queues - 1, 0, 127); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); goto out; } } rc = 0; out: return rc; } static inline int qeth_alloc_cq(struct qeth_card *card) { int rc; if (card->options.cq == QETH_CQ_ENABLED) { int i; struct qdio_outbuf_state *outbuf_states; QETH_DBF_TEXT(SETUP, 2, "cqon"); card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); if (!card->qdio.c_q) { rc = -1; goto kmsg_out; } QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *)); for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { card->qdio.c_q->bufs[i].buffer = &card->qdio.c_q->qdio_bufs[i]; } card->qdio.no_in_queues = 2; card->qdio.out_bufstates = kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_outbuf_state), GFP_KERNEL); outbuf_states = card->qdio.out_bufstates; if (outbuf_states == NULL) { rc = -1; goto free_cq_out; } for (i = 0; i < card->qdio.no_out_queues; ++i) { card->qdio.out_qs[i]->bufstates = outbuf_states; outbuf_states += QDIO_MAX_BUFFERS_PER_Q; } } else { QETH_DBF_TEXT(SETUP, 2, "nocq"); card->qdio.c_q = NULL; card->qdio.no_in_queues = 1; } QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues); rc = 0; out: return rc; free_cq_out: kfree(card->qdio.c_q); card->qdio.c_q = NULL; kmsg_out: dev_err(&card->gdev->dev, "Failed to create completion queue\n"); goto out; } static inline void qeth_free_cq(struct qeth_card *card) { if (card->qdio.c_q) { --card->qdio.no_in_queues; kfree(card->qdio.c_q); card->qdio.c_q = NULL; } kfree(card->qdio.out_bufstates); card->qdio.out_bufstates = NULL; } static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, int delayed) { enum iucv_tx_notify n; switch (sbalf15) { case 0: n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; break; case 4: case 16: case 17: case 18: n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : TX_NOTIFY_UNREACHABLE; break; default: n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : TX_NOTIFY_GENERALERROR; break; } return n; } static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, int forced_cleanup) { if (q->card->options.cq != QETH_CQ_ENABLED) return; if (q->bufs[bidx]->next_pending != NULL) { struct qeth_qdio_out_buffer *head = q->bufs[bidx]; struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; while (c) { if (forced_cleanup || atomic_read(&c->state) == QETH_QDIO_BUF_HANDLED_DELAYED) { struct qeth_qdio_out_buffer *f = c; QETH_CARD_TEXT(f->q->card, 5, "fp"); QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); /* release here to avoid interleaving between outbound tasklet and inbound tasklet regarding notifications and lifecycle */ qeth_release_skbs(c); c = f->next_pending; WARN_ON_ONCE(head->next_pending != f); head->next_pending = c; kmem_cache_free(qeth_qdio_outbuf_cache, f); } else { head = c; c = c->next_pending; } } } if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == QETH_QDIO_BUF_HANDLED_DELAYED)) { /* for recovery situations */ q->bufs[bidx]->aob = q->bufstates[bidx].aob; qeth_init_qdio_out_buf(q, bidx); QETH_CARD_TEXT(q->card, 2, "clprecov"); } } static inline void qeth_qdio_handle_aob(struct qeth_card *card, unsigned long phys_aob_addr) { struct qaob *aob; struct qeth_qdio_out_buffer *buffer; enum iucv_tx_notify notification; aob = (struct qaob *) phys_to_virt(phys_aob_addr); QETH_CARD_TEXT(card, 5, "haob"); QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); buffer = (struct qeth_qdio_out_buffer *) aob->user1; QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) { notification = TX_NOTIFY_OK; } else { WARN_ON_ONCE(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING); atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); notification = TX_NOTIFY_DELAYED_OK; } if (aob->aorc != 0) { QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); notification = qeth_compute_cq_notification(aob->aorc, 1); } qeth_notify_skbs(buffer->q, buffer, notification); buffer->aob = NULL; qeth_clear_output_buffer(buffer->q, buffer, QETH_QDIO_BUF_HANDLED_DELAYED); /* from here on: do not touch buffer anymore */ qdio_release_aob(aob); } static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) { return card->options.cq == QETH_CQ_ENABLED && card->qdio.c_q != NULL && queue != 0 && queue == card->qdio.no_in_queues - 1; } static int qeth_issue_next_read(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_CARD_TEXT(card, 5, "issnxrd"); if (card->read.state != CH_STATE_UP) return -EIO; iob = qeth_get_buffer(&card->read); if (!iob) { dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob " "available\n", dev_name(&card->gdev->dev)); return -ENOMEM; } qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); QETH_CARD_TEXT(card, 6, "noirqpnd"); rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, (addr_t) iob, 0, 0); if (rc) { QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " "rc=%i\n", dev_name(&card->gdev->dev), rc); atomic_set(&card->read.irq_pending, 0); card->read_or_write_problem = 1; qeth_schedule_recovery(card); wake_up(&card->wait_q); } return rc; } static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) { struct qeth_reply *reply; reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); if (reply) { atomic_set(&reply->refcnt, 1); atomic_set(&reply->received, 0); reply->card = card; } return reply; } static void qeth_get_reply(struct qeth_reply *reply) { WARN_ON(atomic_read(&reply->refcnt) <= 0); atomic_inc(&reply->refcnt); } static void qeth_put_reply(struct qeth_reply *reply) { WARN_ON(atomic_read(&reply->refcnt) <= 0); if (atomic_dec_and_test(&reply->refcnt)) kfree(reply); } static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, struct qeth_card *card) { char *ipa_name; int com = cmd->hdr.command; ipa_name = qeth_get_ipa_cmd_name(com); if (rc) QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned " "x%X \"%s\"\n", ipa_name, com, dev_name(&card->gdev->dev), QETH_CARD_IFNAME(card), rc, qeth_get_ipa_msg(rc)); else QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n", ipa_name, com, dev_name(&card->gdev->dev), QETH_CARD_IFNAME(card)); } static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) { struct qeth_ipa_cmd *cmd = NULL; QETH_CARD_TEXT(card, 5, "chkipad"); if (IS_IPA(iob->data)) { cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); if (IS_IPA_REPLY(cmd)) { if (cmd->hdr.command != IPA_CMD_SETCCID && cmd->hdr.command != IPA_CMD_DELCCID && cmd->hdr.command != IPA_CMD_MODCCID && cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); return cmd; } else { switch (cmd->hdr.command) { case IPA_CMD_STOPLAN: if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { dev_err(&card->gdev->dev, "Interface %s is down because the " "adjacent port is no longer in " "reflective relay mode\n", QETH_CARD_IFNAME(card)); qeth_close_dev(card); } else { dev_warn(&card->gdev->dev, "The link for interface %s on CHPID" " 0x%X failed\n", QETH_CARD_IFNAME(card), card->info.chpid); qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); } card->lan_online = 0; if (card->dev && netif_carrier_ok(card->dev)) netif_carrier_off(card->dev); return NULL; case IPA_CMD_STARTLAN: dev_info(&card->gdev->dev, "The link for %s on CHPID 0x%X has" " been restored\n", QETH_CARD_IFNAME(card), card->info.chpid); netif_carrier_on(card->dev); card->lan_online = 1; if (card->info.hwtrap) card->info.hwtrap = 2; qeth_schedule_recovery(card); return NULL; case IPA_CMD_MODCCID: return cmd; case IPA_CMD_REGISTER_LOCAL_ADDR: QETH_CARD_TEXT(card, 3, "irla"); break; case IPA_CMD_UNREGISTER_LOCAL_ADDR: QETH_CARD_TEXT(card, 3, "urla"); break; default: QETH_DBF_MESSAGE(2, "Received data is IPA " "but not a reply!\n"); break; } } } return cmd; } void qeth_clear_ipacmd_list(struct qeth_card *card) { struct qeth_reply *reply, *r; unsigned long flags; QETH_CARD_TEXT(card, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { qeth_get_reply(reply); reply->rc = -EIO; atomic_inc(&reply->received); list_del_init(&reply->list); wake_up(&reply->wait_q); qeth_put_reply(reply); } spin_unlock_irqrestore(&card->lock, flags); atomic_set(&card->write.irq_pending, 0); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); static int qeth_check_idx_response(struct qeth_card *card, unsigned char *buffer) { if (!buffer) return 0; QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); if ((buffer[2] & 0xc0) == 0xc0) { QETH_DBF_MESSAGE(2, "received an IDX TERMINATE " "with cause code 0x%02x%s\n", buffer[4], ((buffer[4] == 0x22) ? " -- try another portname" : "")); QETH_CARD_TEXT(card, 2, "ckidxres"); QETH_CARD_TEXT(card, 2, " idxterm"); QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); if (buffer[4] == 0xf6) { dev_err(&card->gdev->dev, "The qeth device is not configured " "for the OSI layer required by z/VM\n"); return -EPERM; } return -EIO; } return 0; } static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, __u32 len) { struct qeth_card *card; card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 4, "setupccw"); if (channel == &card->read) memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); else memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); channel->ccw.count = len; channel->ccw.cda = (__u32) __pa(iob); } static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) { __u8 index; QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff"); index = channel->io_buf_no; do { if (channel->iob[index].state == BUF_STATE_FREE) { channel->iob[index].state = BUF_STATE_LOCKED; channel->io_buf_no = (channel->io_buf_no + 1) % QETH_CMD_BUFFER_NO; memset(channel->iob[index].data, 0, QETH_BUFSIZE); return channel->iob + index; } index = (index + 1) % QETH_CMD_BUFFER_NO; } while (index != channel->io_buf_no); return NULL; } void qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { unsigned long flags; QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff"); spin_lock_irqsave(&channel->iob_lock, flags); memset(iob->data, 0, QETH_BUFSIZE); iob->state = BUF_STATE_FREE; iob->callback = qeth_send_control_data_cb; iob->rc = 0; spin_unlock_irqrestore(&channel->iob_lock, flags); wake_up(&channel->wait_q); } EXPORT_SYMBOL_GPL(qeth_release_buffer); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel) { struct qeth_cmd_buffer *buffer = NULL; unsigned long flags; spin_lock_irqsave(&channel->iob_lock, flags); buffer = __qeth_get_buffer(channel); spin_unlock_irqrestore(&channel->iob_lock, flags); return buffer; } struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel) { struct qeth_cmd_buffer *buffer; wait_event(channel->wait_q, ((buffer = qeth_get_buffer(channel)) != NULL)); return buffer; } EXPORT_SYMBOL_GPL(qeth_wait_for_buffer); void qeth_clear_cmd_buffers(struct qeth_channel *channel) { int cnt; for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) qeth_release_buffer(channel, &channel->iob[cnt]); channel->buf_no = 0; channel->io_buf_no = 0; } EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); static void qeth_send_control_data_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { struct qeth_card *card; struct qeth_reply *reply, *r; struct qeth_ipa_cmd *cmd; unsigned long flags; int keep_reply; int rc = 0; card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 4, "sndctlcb"); rc = qeth_check_idx_response(card, iob->data); switch (rc) { case 0: break; case -EIO: qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); /* fall through */ default: goto out; } cmd = qeth_check_ipa_data(card, iob); if ((cmd == NULL) && (card->state != CARD_STATE_DOWN)) goto out; /*in case of OSN : check if cmd is set */ if (card->info.type == QETH_CARD_TYPE_OSN && cmd && cmd->hdr.command != IPA_CMD_STARTLAN && card->osn_info.assist_cb != NULL) { card->osn_info.assist_cb(card->dev, cmd); goto out; } spin_lock_irqsave(&card->lock, flags); list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) || ((cmd) && (reply->seqno == cmd->hdr.seqno))) { qeth_get_reply(reply); list_del_init(&reply->list); spin_unlock_irqrestore(&card->lock, flags); keep_reply = 0; if (reply->callback != NULL) { if (cmd) { reply->offset = (__u16)((char *)cmd - (char *)iob->data); keep_reply = reply->callback(card, reply, (unsigned long)cmd); } else keep_reply = reply->callback(card, reply, (unsigned long)iob); } if (cmd) reply->rc = (u16) cmd->hdr.return_code; else if (iob->rc) reply->rc = iob->rc; if (keep_reply) { spin_lock_irqsave(&card->lock, flags); list_add_tail(&reply->list, &card->cmd_waiter_list); spin_unlock_irqrestore(&card->lock, flags); } else { atomic_inc(&reply->received); wake_up(&reply->wait_q); } qeth_put_reply(reply); goto out; } } spin_unlock_irqrestore(&card->lock, flags); out: memcpy(&card->seqno.pdu_hdr_ack, QETH_PDU_HEADER_SEQ_NO(iob->data), QETH_SEQ_NO_LENGTH); qeth_release_buffer(channel, iob); } static int qeth_setup_channel(struct qeth_channel *channel) { int cnt; QETH_DBF_TEXT(SETUP, 2, "setupch"); for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { channel->iob[cnt].data = kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); if (channel->iob[cnt].data == NULL) break; channel->iob[cnt].state = BUF_STATE_FREE; channel->iob[cnt].channel = channel; channel->iob[cnt].callback = qeth_send_control_data_cb; channel->iob[cnt].rc = 0; } if (cnt < QETH_CMD_BUFFER_NO) { while (cnt-- > 0) kfree(channel->iob[cnt].data); return -ENOMEM; } channel->buf_no = 0; channel->io_buf_no = 0; atomic_set(&channel->irq_pending, 0); spin_lock_init(&channel->iob_lock); init_waitqueue_head(&channel->wait_q); return 0; } static int qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); if (!(card->thread_allowed_mask & thread) || (card->thread_start_mask & thread)) { spin_unlock_irqrestore(&card->thread_mask_lock, flags); return -EPERM; } card->thread_start_mask |= thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); return 0; } void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_start_mask &= ~thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); wake_up(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit); void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_running_mask &= ~thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); wake_up(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) { unsigned long flags; int rc = 0; spin_lock_irqsave(&card->thread_mask_lock, flags); if (card->thread_start_mask & thread) { if ((card->thread_allowed_mask & thread) && !(card->thread_running_mask & thread)) { rc = 1; card->thread_start_mask &= ~thread; card->thread_running_mask |= thread; } else rc = -EPERM; } spin_unlock_irqrestore(&card->thread_mask_lock, flags); return rc; } int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) { int rc = 0; wait_event(card->wait_q, (rc = __qeth_do_run_thread(card, thread)) >= 0); return rc; } EXPORT_SYMBOL_GPL(qeth_do_run_thread); void qeth_schedule_recovery(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "startrec"); if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) schedule_work(&card->kernel_thread_starter); } EXPORT_SYMBOL_GPL(qeth_schedule_recovery); static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) { int dstat, cstat; char *sense; struct qeth_card *card; sense = (char *) irb->ecw; cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; card = CARD_FROM_CDEV(cdev); if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { QETH_CARD_TEXT(card, 2, "CGENCHK"); dev_warn(&cdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n", dev_name(&cdev->dev), dstat, cstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 64, 1); return 1; } if (dstat & DEV_STAT_UNIT_CHECK) { if (sense[SENSE_RESETTING_EVENT_BYTE] & SENSE_RESETTING_EVENT_FLAG) { QETH_CARD_TEXT(card, 2, "REVIND"); return 1; } if (sense[SENSE_COMMAND_REJECT_BYTE] & SENSE_COMMAND_REJECT_FLAG) { QETH_CARD_TEXT(card, 2, "CMDREJi"); return 1; } if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { QETH_CARD_TEXT(card, 2, "AFFE"); return 1; } if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { QETH_CARD_TEXT(card, 2, "ZEROSEN"); return 0; } QETH_CARD_TEXT(card, 2, "DGENCHK"); return 1; } return 0; } static long __qeth_check_irb_error(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { struct qeth_card *card; card = CARD_FROM_CDEV(cdev); if (!IS_ERR(irb)) return 0; switch (PTR_ERR(irb)) { case -EIO: QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", dev_name(&cdev->dev)); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); break; case -ETIMEDOUT: dev_warn(&cdev->dev, "A hardware operation timed out" " on the device\n"); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); if (intparm == QETH_RCD_PARM) { if (card && (card->data.ccwdev == cdev)) { card->data.state = CH_STATE_DOWN; wake_up(&card->wait_q); } } break; default: QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", dev_name(&cdev->dev), PTR_ERR(irb)); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT(card, 2, " rc???"); } return PTR_ERR(irb); } static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { int rc; int cstat, dstat; struct qeth_cmd_buffer *buffer; struct qeth_channel *channel; struct qeth_card *card; struct qeth_cmd_buffer *iob; __u8 index; if (__qeth_check_irb_error(cdev, intparm, irb)) return; cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; card = CARD_FROM_CDEV(cdev); if (!card) return; QETH_CARD_TEXT(card, 5, "irq"); if (card->read.ccwdev == cdev) { channel = &card->read; QETH_CARD_TEXT(card, 5, "read"); } else if (card->write.ccwdev == cdev) { channel = &card->write; QETH_CARD_TEXT(card, 5, "write"); } else { channel = &card->data; QETH_CARD_TEXT(card, 5, "data"); } atomic_set(&channel->irq_pending, 0); if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) channel->state = CH_STATE_STOPPED; if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) channel->state = CH_STATE_HALTED; /*let's wake up immediately on data channel*/ if ((channel == &card->data) && (intparm != 0) && (intparm != QETH_RCD_PARM)) goto out; if (intparm == QETH_CLEAR_CHANNEL_PARM) { QETH_CARD_TEXT(card, 6, "clrchpar"); /* we don't have to handle this further */ intparm = 0; } if (intparm == QETH_HALT_CHANNEL_PARM) { QETH_CARD_TEXT(card, 6, "hltchpar"); /* we don't have to handle this further */ intparm = 0; } if ((dstat & DEV_STAT_UNIT_EXCEP) || (dstat & DEV_STAT_UNIT_CHECK) || (cstat)) { if (irb->esw.esw0.erw.cons) { dev_warn(&channel->ccwdev->dev, "The qeth device driver failed to recover " "an error on the device\n"); QETH_DBF_MESSAGE(2, "%s sense data available. cstat " "0x%X dstat 0x%X\n", dev_name(&channel->ccwdev->dev), cstat, dstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); print_hex_dump(KERN_WARNING, "qeth: sense data ", DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); } if (intparm == QETH_RCD_PARM) { channel->state = CH_STATE_DOWN; goto out; } rc = qeth_get_problem(cdev, irb); if (rc) { qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); goto out; } } if (intparm == QETH_RCD_PARM) { channel->state = CH_STATE_RCD_DONE; goto out; } if (intparm) { buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm); buffer->state = BUF_STATE_PROCESSED; } if (channel == &card->data) return; if (channel == &card->read && channel->state == CH_STATE_UP) qeth_issue_next_read(card); iob = channel->iob; index = channel->buf_no; while (iob[index].state == BUF_STATE_PROCESSED) { if (iob[index].callback != NULL) iob[index].callback(channel, iob + index); index = (index + 1) % QETH_CMD_BUFFER_NO; } channel->buf_no = index; out: wake_up(&card->wait_q); return; } static void qeth_notify_skbs(struct qeth_qdio_out_q *q, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification) { struct sk_buff *skb; if (skb_queue_empty(&buf->skb_list)) goto out; skb = skb_peek(&buf->skb_list); while (skb) { QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); if (skb->protocol == ETH_P_AF_IUCV) { if (skb->sk) { struct iucv_sock *iucv = iucv_sk(skb->sk); iucv->sk_txnotify(skb, notification); } } if (skb_queue_is_last(&buf->skb_list, skb)) skb = NULL; else skb = skb_queue_next(&buf->skb_list, skb); } out: return; } static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) { struct sk_buff *skb; struct iucv_sock *iucv; int notify_general_error = 0; if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) notify_general_error = 1; /* release may never happen from within CQ tasklet scope */ WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); skb = skb_dequeue(&buf->skb_list); while (skb) { QETH_CARD_TEXT(buf->q->card, 5, "skbr"); QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb); if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) { if (skb->sk) { iucv = iucv_sk(skb->sk); iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR); } } atomic_dec(&skb->users); dev_kfree_skb_any(skb); skb = skb_dequeue(&buf->skb_list); } } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum qeth_qdio_buffer_states newbufstate) { int i; /* is PCI flag set on buffer? */ if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) atomic_dec(&queue->set_pci_flags_count); if (newbufstate == QETH_QDIO_BUF_EMPTY) { qeth_release_skbs(buf); } for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) kmem_cache_free(qeth_core_header_cache, buf->buffer->element[i].addr); buf->is_header[i] = 0; buf->buffer->element[i].length = 0; buf->buffer->element[i].addr = NULL; buf->buffer->element[i].eflags = 0; buf->buffer->element[i].sflags = 0; } buf->buffer->element[15].eflags = 0; buf->buffer->element[15].sflags = 0; buf->next_element_to_fill = 0; atomic_set(&buf->state, newbufstate); } static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) { int j; for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (!q->bufs[j]) continue; qeth_cleanup_handled_pending(q, j, 1); qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY); if (free) { kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); q->bufs[j] = NULL; } } } void qeth_clear_qdio_buffers(struct qeth_card *card) { int i; QETH_CARD_TEXT(card, 2, "clearqdbf"); /* clear outbound buffers to free skbs */ for (i = 0; i < card->qdio.no_out_queues; ++i) { if (card->qdio.out_qs[i]) { qeth_clear_outq_buffers(card->qdio.out_qs[i], 0); } } } EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); static void qeth_free_buffer_pool(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; int i = 0; list_for_each_entry_safe(pool_entry, tmp, &card->qdio.init_pool.entry_list, init_list){ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) free_page((unsigned long)pool_entry->elements[i]); list_del(&pool_entry->init_list); kfree(pool_entry); } } static void qeth_free_qdio_buffers(struct qeth_card *card) { int i, j; if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == QETH_QDIO_UNINITIALIZED) return; qeth_free_cq(card); cancel_delayed_work_sync(&card->buffer_reclaim_work); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); kfree(card->qdio.in_q); card->qdio.in_q = NULL; /* inbound buffer pool */ qeth_free_buffer_pool(card); /* free outbound qdio_qs */ if (card->qdio.out_qs) { for (i = 0; i < card->qdio.no_out_queues; ++i) { qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); kfree(card->qdio.out_qs[i]); } kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; } } static void qeth_clean_channel(struct qeth_channel *channel) { int cnt; QETH_DBF_TEXT(SETUP, 2, "freech"); for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) kfree(channel->iob[cnt].data); } static void qeth_set_single_write_queues(struct qeth_card *card) { if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && (card->qdio.no_out_queues == 4)) qeth_free_qdio_buffers(card); card->qdio.no_out_queues = 1; if (card->qdio.default_out_queue != 0) dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); card->qdio.default_out_queue = 0; } static void qeth_set_multiple_write_queues(struct qeth_card *card) { if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && (card->qdio.no_out_queues == 1)) { qeth_free_qdio_buffers(card); card->qdio.default_out_queue = 2; } card->qdio.no_out_queues = 4; } static void qeth_update_from_chp_desc(struct qeth_card *card) { struct ccw_device *ccwdev; struct channelPath_dsc { u8 flags; u8 lsn; u8 desc; u8 chpid; u8 swla; u8 zeroes; u8 chla; u8 chpp; } *chp_dsc; QETH_DBF_TEXT(SETUP, 2, "chp_desc"); ccwdev = card->data.ccwdev; chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); if (!chp_dsc) goto out; card->info.func_level = 0x4100 + chp_dsc->desc; if (card->info.type == QETH_CARD_TYPE_IQD) goto out; /* CHPP field bit 6 == 1 -> single queue */ if ((chp_dsc->chpp & 0x02) == 0x02) qeth_set_single_write_queues(card); else qeth_set_multiple_write_queues(card); out: kfree(chp_dsc); QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); } static void qeth_init_qdio_info(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 4, "intqdinf"); atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); /* inbound */ card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; if (card->info.type == QETH_CARD_TYPE_IQD) card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; else card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); } static void qeth_set_intial_options(struct qeth_card *card) { card->options.route4.type = NO_ROUTER; card->options.route6.type = NO_ROUTER; card->options.fake_broadcast = 0; card->options.add_hhlen = DEFAULT_ADD_HHLEN; card->options.performance_stats = 0; card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; card->options.cq = QETH_CQ_DISABLED; } static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) { unsigned long flags; int rc = 0; spin_lock_irqsave(&card->thread_mask_lock, flags); QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", (u8) card->thread_start_mask, (u8) card->thread_allowed_mask, (u8) card->thread_running_mask); rc = (card->thread_start_mask & thread); spin_unlock_irqrestore(&card->thread_mask_lock, flags); return rc; } static void qeth_start_kernel_thread(struct work_struct *work) { struct task_struct *ts; struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter); QETH_CARD_TEXT(card , 2, "strthrd"); if (card->read.state != CH_STATE_UP && card->write.state != CH_STATE_UP) return; if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { ts = kthread_run(card->discipline->recover, (void *)card, "qeth_recover"); if (IS_ERR(ts)) { qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); } } } static int qeth_setup_card(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 2, "setupcrd"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); card->read.state = CH_STATE_DOWN; card->write.state = CH_STATE_DOWN; card->data.state = CH_STATE_DOWN; card->state = CARD_STATE_DOWN; card->lan_online = 0; card->read_or_write_problem = 0; card->dev = NULL; spin_lock_init(&card->vlanlock); spin_lock_init(&card->mclock); spin_lock_init(&card->lock); spin_lock_init(&card->ip_lock); spin_lock_init(&card->thread_mask_lock); mutex_init(&card->conf_mutex); mutex_init(&card->discipline_mutex); card->thread_start_mask = 0; card->thread_allowed_mask = 0; card->thread_running_mask = 0; INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); INIT_LIST_HEAD(&card->ip_list); INIT_LIST_HEAD(card->ip_tbd_list); INIT_LIST_HEAD(&card->cmd_waiter_list); init_waitqueue_head(&card->wait_q); /* initial options */ qeth_set_intial_options(card); /* IP address takeover */ INIT_LIST_HEAD(&card->ipato.entries); card->ipato.enabled = 0; card->ipato.invert4 = 0; card->ipato.invert6 = 0; /* init QDIO stuff */ qeth_init_qdio_info(card); INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); return 0; } static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) { struct qeth_card *card = container_of(slr, struct qeth_card, qeth_service_level); if (card->info.mcl_level[0]) seq_printf(m, "qeth: %s firmware level %s\n", CARD_BUS_ID(card), card->info.mcl_level); } static struct qeth_card *qeth_alloc_card(void) { struct qeth_card *card; QETH_DBF_TEXT(SETUP, 2, "alloccrd"); card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); if (!card) goto out; QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); if (!card->ip_tbd_list) { QETH_DBF_TEXT(SETUP, 0, "iptbdnom"); goto out_card; } if (qeth_setup_channel(&card->read)) goto out_ip; if (qeth_setup_channel(&card->write)) goto out_channel; card->options.layer2 = -1; card->qeth_service_level.seq_print = qeth_core_sl_print; register_service_level(&card->qeth_service_level); return card; out_channel: qeth_clean_channel(&card->read); out_ip: kfree(card->ip_tbd_list); out_card: kfree(card); out: return NULL; } static int qeth_determine_card_type(struct qeth_card *card) { int i = 0; QETH_DBF_TEXT(SETUP, 2, "detcdtyp"); card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; while (known_devices[i][QETH_DEV_MODEL_IND]) { if ((CARD_RDEV(card)->id.dev_type == known_devices[i][QETH_DEV_TYPE_IND]) && (CARD_RDEV(card)->id.dev_model == known_devices[i][QETH_DEV_MODEL_IND])) { card->info.type = known_devices[i][QETH_DEV_MODEL_IND]; card->qdio.no_out_queues = known_devices[i][QETH_QUEUE_NO_IND]; card->qdio.no_in_queues = 1; card->info.is_multicast_different = known_devices[i][QETH_MULTICAST_IND]; qeth_update_from_chp_desc(card); return 0; } i++; } card->info.type = QETH_CARD_TYPE_UNKNOWN; dev_err(&card->gdev->dev, "The adapter hardware is of an " "unknown type\n"); return -ENOENT; } static int qeth_clear_channel(struct qeth_channel *channel) { unsigned long flags; struct qeth_card *card; int rc; card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 3, "clearch"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) return rc; rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_STOPPED) return -ETIME; channel->state = CH_STATE_DOWN; return 0; } static int qeth_halt_channel(struct qeth_channel *channel) { unsigned long flags; struct qeth_card *card; int rc; card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 3, "haltch"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) return rc; rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_HALTED, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_HALTED) return -ETIME; return 0; } static int qeth_halt_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; QETH_CARD_TEXT(card, 3, "haltchs"); rc1 = qeth_halt_channel(&card->read); rc2 = qeth_halt_channel(&card->write); rc3 = qeth_halt_channel(&card->data); if (rc1) return rc1; if (rc2) return rc2; return rc3; } static int qeth_clear_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; QETH_CARD_TEXT(card, 3, "clearchs"); rc1 = qeth_clear_channel(&card->read); rc2 = qeth_clear_channel(&card->write); rc3 = qeth_clear_channel(&card->data); if (rc1) return rc1; if (rc2) return rc2; return rc3; } static int qeth_clear_halt_card(struct qeth_card *card, int halt) { int rc = 0; QETH_CARD_TEXT(card, 3, "clhacrd"); if (halt) rc = qeth_halt_channels(card); if (rc) return rc; return qeth_clear_channels(card); } int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) { int rc = 0; QETH_CARD_TEXT(card, 3, "qdioclr"); switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, QETH_QDIO_CLEANING)) { case QETH_QDIO_ESTABLISHED: if (card->info.type == QETH_CARD_TYPE_IQD) rc = qdio_shutdown(CARD_DDEV(card), QDIO_FLAG_CLEANUP_USING_HALT); else rc = qdio_shutdown(CARD_DDEV(card), QDIO_FLAG_CLEANUP_USING_CLEAR); if (rc) QETH_CARD_TEXT_(card, 3, "1err%d", rc); qdio_free(CARD_DDEV(card)); atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); break; case QETH_QDIO_CLEANING: return rc; default: break; } rc = qeth_clear_halt_card(card, use_halt); if (rc) QETH_CARD_TEXT_(card, 3, "2err%d", rc); card->state = CARD_STATE_DOWN; return rc; } EXPORT_SYMBOL_GPL(qeth_qdio_clear_card); static int qeth_read_conf_data(struct qeth_card *card, void **buffer, int *length) { struct ciw *ciw; char *rcd_buf; int ret; struct qeth_channel *channel = &card->data; unsigned long flags; /* * scan for RCD command in extended SenseID data */ ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); if (!ciw || ciw->cmd == 0) return -EOPNOTSUPP; rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); if (!rcd_buf) return -ENOMEM; channel->ccw.cmd_code = ciw->cmd; channel->ccw.cda = (__u32) __pa(rcd_buf); channel->ccw.count = ciw->count; channel->ccw.flags = CCW_FLAG_SLI; channel->state = CH_STATE_RCD; spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, QETH_RCD_PARM, LPM_ANYPATH, 0, QETH_RCD_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (!ret) wait_event(card->wait_q, (channel->state == CH_STATE_RCD_DONE || channel->state == CH_STATE_DOWN)); if (channel->state == CH_STATE_DOWN) ret = -EIO; else channel->state = CH_STATE_DOWN; if (ret) { kfree(rcd_buf); *buffer = NULL; *length = 0; } else { *length = ciw->count; *buffer = rcd_buf; } return ret; } static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd) { QETH_DBF_TEXT(SETUP, 2, "cfgunit"); card->info.chpid = prcd[30]; card->info.unit_addr2 = prcd[31]; card->info.cula = prcd[63]; card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && (prcd[0x11] == _ascebc['M'])); } static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd) { QETH_DBF_TEXT(SETUP, 2, "cfgblkt"); if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && (prcd[76] == 0xF5 || prcd[76] == 0xF6)) { card->info.blkt.time_total = 250; card->info.blkt.inter_packet = 5; card->info.blkt.inter_packet_jumbo = 15; } else { card->info.blkt.time_total = 0; card->info.blkt.inter_packet = 0; card->info.blkt.inter_packet_jumbo = 0; } } static void qeth_init_tokens(struct qeth_card *card) { card->token.issuer_rm_w = 0x00010103UL; card->token.cm_filter_w = 0x00010108UL; card->token.cm_connection_w = 0x0001010aUL; card->token.ulp_filter_w = 0x0001010bUL; card->token.ulp_connection_w = 0x0001010dUL; } static void qeth_init_func_level(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_IQD: card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; break; case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSN: card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; break; default: break; } } static int qeth_idx_activate_get_answer(struct qeth_channel *channel, void (*idx_reply_cb)(struct qeth_channel *, struct qeth_cmd_buffer *)) { struct qeth_cmd_buffer *iob; unsigned long flags; int rc; struct qeth_card *card; QETH_DBF_TEXT(SETUP, 2, "idxanswr"); card = CARD_FROM_CDEV(channel->ccwdev); iob = qeth_get_buffer(channel); iob->callback = idx_reply_cb; memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); channel->ccw.count = QETH_BUFSIZE; channel->ccw.cda = (__u32) __pa(iob->data); wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_start(channel->ccwdev, &channel->ccw, (addr_t) iob, 0, 0); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); return rc; } rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_UP, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_UP) { rc = -ETIME; QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); qeth_clear_cmd_buffers(channel); } else rc = 0; return rc; } static int qeth_idx_activate_channel(struct qeth_channel *channel, void (*idx_reply_cb)(struct qeth_channel *, struct qeth_cmd_buffer *)) { struct qeth_card *card; struct qeth_cmd_buffer *iob; unsigned long flags; __u16 temp; __u8 tmp; int rc; struct ccw_dev_id temp_devid; card = CARD_FROM_CDEV(channel->ccwdev); QETH_DBF_TEXT(SETUP, 2, "idxactch"); iob = qeth_get_buffer(channel); iob->callback = idx_reply_cb; memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); channel->ccw.count = IDX_ACTIVATE_SIZE; channel->ccw.cda = (__u32) __pa(iob->data); if (channel == &card->write) { memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); card->seqno.trans_hdr++; } else { memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); } tmp = ((__u8)card->info.portno) | 0x80; memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1); memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), &card->info.func_level, sizeof(__u16)); ccw_device_get_id(CARD_DDEV(card), &temp_devid); memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2); temp = (card->info.cula << 8) + card->info.unit_addr2; memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_start(channel->ccwdev, &channel->ccw, (addr_t) iob, 0, 0); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n", rc); QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); return rc; } rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_ACTIVATING) { dev_warn(&channel->ccwdev->dev, "The qeth device driver" " failed to recover an error on the device\n"); QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", dev_name(&channel->ccwdev->dev)); QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); qeth_clear_cmd_buffers(channel); return -ETIME; } return qeth_idx_activate_get_answer(channel, idx_reply_cb); } static int qeth_peer_func_level(int level) { if ((level & 0xff) == 8) return (level & 0xff) + 0x400; if (((level >> 8) & 3) == 1) return (level & 0xff) + 0x200; return level; } static void qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { struct qeth_card *card; __u16 temp; QETH_DBF_TEXT(SETUP , 2, "idxwrcb"); if (channel->state == CH_STATE_DOWN) { channel->state = CH_STATE_ACTIVATING; goto out; } card = CARD_FROM_CDEV(channel->ccwdev); if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL) dev_err(&card->write.ccwdev->dev, "The adapter is used exclusively by another " "host\n"); else QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:" " negative reply\n", dev_name(&card->write.ccwdev->dev)); goto out; } memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: " "function level mismatch (sent: 0x%x, received: " "0x%x)\n", dev_name(&card->write.ccwdev->dev), card->info.func_level, temp); goto out; } channel->state = CH_STATE_UP; out: qeth_release_buffer(channel, iob); } static void qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { struct qeth_card *card; __u16 temp; QETH_DBF_TEXT(SETUP , 2, "idxrdcb"); if (channel->state == CH_STATE_DOWN) { channel->state = CH_STATE_ACTIVATING; goto out; } card = CARD_FROM_CDEV(channel->ccwdev); if (qeth_check_idx_response(card, iob->data)) goto out; if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { case QETH_IDX_ACT_ERR_EXCL: dev_err(&card->write.ccwdev->dev, "The adapter is used exclusively by another " "host\n"); break; case QETH_IDX_ACT_ERR_AUTH: case QETH_IDX_ACT_ERR_AUTH_USER: dev_err(&card->read.ccwdev->dev, "Setting the device online failed because of " "insufficient authorization\n"); break; default: QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" " negative reply\n", dev_name(&card->read.ccwdev->dev)); } QETH_CARD_TEXT_(card, 2, "idxread%c", QETH_IDX_ACT_CAUSE_CODE(iob->data)); goto out; } /** * * temporary fix for microcode bug * * to revert it,replace OR by AND * */ if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) || (card->info.type == QETH_CARD_TYPE_OSD)) card->info.portname_required = 1; memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); if (temp != qeth_peer_func_level(card->info.func_level)) { QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function " "level mismatch (sent: 0x%x, received: 0x%x)\n", dev_name(&card->read.ccwdev->dev), card->info.func_level, temp); goto out; } memcpy(&card->token.issuer_rm_r, QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); memcpy(&card->info.mcl_level[0], QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); channel->state = CH_STATE_UP; out: qeth_release_buffer(channel, iob); } void qeth_prepare_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) { qeth_setup_ccw(&card->write, iob->data, len); iob->callback = qeth_release_buffer; memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); card->seqno.trans_hdr++; memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); card->seqno.pdu_hdr++; memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); } EXPORT_SYMBOL_GPL(qeth_prepare_control_data); int qeth_send_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob, int (*reply_cb)(struct qeth_card *, struct qeth_reply *, unsigned long), void *reply_param) { int rc; unsigned long flags; struct qeth_reply *reply = NULL; unsigned long timeout, event_timeout; struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 2, "sendctl"); if (card->read_or_write_problem) { qeth_release_buffer(iob->channel, iob); return -EIO; } reply = qeth_alloc_reply(card); if (!reply) { return -ENOMEM; } reply->callback = reply_cb; reply->param = reply_param; if (card->state == CARD_STATE_DOWN) reply->seqno = QETH_IDX_COMMAND_SEQNO; else reply->seqno = card->seqno.ipa++; init_waitqueue_head(&reply->wait_q); spin_lock_irqsave(&card->lock, flags); list_add_tail(&reply->list, &card->cmd_waiter_list); spin_unlock_irqrestore(&card->lock, flags); QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; qeth_prepare_control_data(card, len, iob); if (IS_IPA(iob->data)) event_timeout = QETH_IPA_TIMEOUT; else event_timeout = QETH_TIMEOUT; timeout = jiffies + event_timeout; QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, (addr_t) iob, 0, 0); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " "ccw_device_start rc = %i\n", dev_name(&card->write.ccwdev->dev), rc); QETH_CARD_TEXT_(card, 2, " err%d", rc); spin_lock_irqsave(&card->lock, flags); list_del_init(&reply->list); qeth_put_reply(reply); spin_unlock_irqrestore(&card->lock, flags); qeth_release_buffer(iob->channel, iob); atomic_set(&card->write.irq_pending, 0); wake_up(&card->wait_q); return rc; } /* we have only one long running ipassist, since we can ensure process context of this command we can sleep */ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); if ((cmd->hdr.command == IPA_CMD_SETIP) && (cmd->hdr.prot_version == QETH_PROT_IPV4)) { if (!wait_event_timeout(reply->wait_q, atomic_read(&reply->received), event_timeout)) goto time_err; } else { while (!atomic_read(&reply->received)) { if (time_after(jiffies, timeout)) goto time_err; cpu_relax(); } } if (reply->rc == -EIO) goto error; rc = reply->rc; qeth_put_reply(reply); return rc; time_err: reply->rc = -ETIME; spin_lock_irqsave(&reply->card->lock, flags); list_del_init(&reply->list); spin_unlock_irqrestore(&reply->card->lock, flags); atomic_inc(&reply->received); error: atomic_set(&card->write.irq_pending, 0); qeth_release_buffer(iob->channel, iob); card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; rc = reply->rc; qeth_put_reply(reply); return rc; } EXPORT_SYMBOL_GPL(qeth_send_control_data); static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmenblcb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.cm_filter_r, QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } static int qeth_cm_enable(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmenable"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE); memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob, qeth_cm_enable_cb, NULL); return rc; } static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmsetpcb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.cm_connection_r, QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), QETH_MPC_TOKEN_LENGTH); QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } static int qeth_cm_setup(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmsetup"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE); memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob, qeth_cm_setup_cb, NULL); return rc; } static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_UNKNOWN: return 1500; case QETH_CARD_TYPE_IQD: return card->info.max_mtu; case QETH_CARD_TYPE_OSD: switch (card->info.link_type) { case QETH_LINK_TYPE_HSTR: case QETH_LINK_TYPE_LANE_TR: return 2000; default: return 1492; } case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: return 1492; default: return 1500; } } static inline int qeth_get_mtu_outof_framesize(int framesize) { switch (framesize) { case 0x4000: return 8192; case 0x6000: return 16384; case 0xa000: return 32768; case 0xffff: return 57344; default: return 0; } } static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: case QETH_CARD_TYPE_IQD: return ((mtu >= 576) && (mtu <= card->info.max_mtu)); case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_UNKNOWN: default: return 1; } } static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { __u16 mtu, framesize; __u16 len; __u8 link_type; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "ulpenacb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.ulp_filter_r, QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); if (card->info.type == QETH_CARD_TYPE_IQD) { memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); mtu = qeth_get_mtu_outof_framesize(framesize); if (!mtu) { iob->rc = -EINVAL; QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) { /* frame size has changed */ if (card->dev && ((card->dev->mtu == card->info.initial_mtu) || (card->dev->mtu > mtu))) card->dev->mtu = mtu; qeth_free_qdio_buffers(card); } card->info.initial_mtu = mtu; card->info.max_mtu = mtu; card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE; } else { card->info.initial_mtu = qeth_get_initial_mtu_for_card(card); card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU( iob->data); card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; } memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { memcpy(&link_type, QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); card->info.link_type = link_type; } else card->info.link_type = 0; QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type); QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } static int qeth_ulp_enable(struct qeth_card *card) { int rc; char prot_type; struct qeth_cmd_buffer *iob; /*FIXME: trace view callbacks*/ QETH_DBF_TEXT(SETUP, 2, "ulpenabl"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (__u8) card->info.portno; if (card->options.layer2) if (card->info.type == QETH_CARD_TYPE_OSN) prot_type = QETH_PROT_OSN2; else prot_type = QETH_PROT_LAYER2; else prot_type = QETH_PROT_TCPIP; memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data), card->info.portname, 9); rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob, qeth_ulp_enable_cb, NULL); return rc; } static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "ulpstpcb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.ulp_connection_r, QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 3)) { QETH_DBF_TEXT(SETUP, 2, "olmlimit"); dev_err(&card->gdev->dev, "A connection could not be " "established because of an OLM limit\n"); iob->rc = -EMLINK; } QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } static int qeth_ulp_setup(struct qeth_card *card) { int rc; __u16 temp; struct qeth_cmd_buffer *iob; struct ccw_dev_id dev_id; QETH_DBF_TEXT(SETUP, 2, "ulpsetup"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE); memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); ccw_device_get_id(CARD_DDEV(card), &dev_id); memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2); temp = (card->info.cula << 8) + card->info.unit_addr2; memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob, qeth_ulp_setup_cb, NULL); return rc; } static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) { int rc; struct qeth_qdio_out_buffer *newbuf; rc = 0; newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); if (!newbuf) { rc = -ENOMEM; goto out; } newbuf->buffer = &q->qdio_bufs[bidx]; skb_queue_head_init(&newbuf->skb_list); lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); newbuf->q = q; newbuf->aob = NULL; newbuf->next_pending = q->bufs[bidx]; atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); q->bufs[bidx] = newbuf; if (q->bufstates) { q->bufstates[bidx].user = newbuf; QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx); QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf); QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf->next_pending); } out: return rc; } static int qeth_alloc_qdio_buffers(struct qeth_card *card) { int i, j; QETH_DBF_TEXT(SETUP, 2, "allcqdbf"); if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) return 0; card->qdio.in_q = kzalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); if (!card->qdio.in_q) goto out_nomem; QETH_DBF_TEXT(SETUP, 2, "inq"); QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *)); memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); /* give inbound qeth_qdio_buffers their qdio_buffers */ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { card->qdio.in_q->bufs[i].buffer = &card->qdio.in_q->qdio_bufs[i]; card->qdio.in_q->bufs[i].rx_skb = NULL; } /* inbound buffer pool */ if (qeth_alloc_buffer_pool(card)) goto out_freeinq; /* outbound */ card->qdio.out_qs = kzalloc(card->qdio.no_out_queues * sizeof(struct qeth_qdio_out_q *), GFP_KERNEL); if (!card->qdio.out_qs) goto out_freepool; for (i = 0; i < card->qdio.no_out_queues; ++i) { card->qdio.out_qs[i] = kzalloc(sizeof(struct qeth_qdio_out_q), GFP_KERNEL); if (!card->qdio.out_qs[i]) goto out_freeoutq; QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); card->qdio.out_qs[i]->queue_no = i; /* give outbound qeth_qdio_buffers their qdio_buffers */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL); if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) goto out_freeoutqbufs; } } /* completion */ if (qeth_alloc_cq(card)) goto out_freeoutq; return 0; out_freeoutqbufs: while (j > 0) { --j; kmem_cache_free(qeth_qdio_outbuf_cache, card->qdio.out_qs[i]->bufs[j]); card->qdio.out_qs[i]->bufs[j] = NULL; } out_freeoutq: while (i > 0) { kfree(card->qdio.out_qs[--i]); qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); } kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; out_freepool: qeth_free_buffer_pool(card); out_freeinq: kfree(card->qdio.in_q); card->qdio.in_q = NULL; out_nomem: atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); return -ENOMEM; } static void qeth_create_qib_param_field(struct qeth_card *card, char *param_field) { param_field[0] = _ascebc['P']; param_field[1] = _ascebc['C']; param_field[2] = _ascebc['I']; param_field[3] = _ascebc['T']; *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card); *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card); *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card); } static void qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field) { param_field[16] = _ascebc['B']; param_field[17] = _ascebc['L']; param_field[18] = _ascebc['K']; param_field[19] = _ascebc['T']; *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total; *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet; *((unsigned int *) (&param_field[28])) = card->info.blkt.inter_packet_jumbo; } static int qeth_qdio_activate(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 3, "qdioact"); return qdio_activate(CARD_DDEV(card)); } static int qeth_dm_act(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "dmact"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, DM_ACT, DM_ACT_SIZE); memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL); return rc; } static int qeth_mpc_initialize(struct qeth_card *card) { int rc; QETH_DBF_TEXT(SETUP, 2, "mpcinit"); rc = qeth_issue_next_read(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); return rc; } rc = qeth_cm_enable(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); goto out_qdio; } rc = qeth_cm_setup(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); goto out_qdio; } rc = qeth_ulp_enable(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); goto out_qdio; } rc = qeth_ulp_setup(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_qdio; } rc = qeth_alloc_qdio_buffers(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_qdio; } rc = qeth_qdio_establish(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); qeth_free_qdio_buffers(card); goto out_qdio; } rc = qeth_qdio_activate(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); goto out_qdio; } rc = qeth_dm_act(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); goto out_qdio; } return 0; out_qdio: qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); return rc; } static void qeth_print_status_with_portname(struct qeth_card *card) { char dbf_text[15]; int i; sprintf(dbf_text, "%s", card->info.portname + 1); for (i = 0; i < 8; i++) dbf_text[i] = (char) _ebcasc[(__u8) dbf_text[i]]; dbf_text[8] = 0; dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n" "with link type %s (portname: %s)\n", qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", (card->info.mcl_level[0]) ? ")" : "", qeth_get_cardname_short(card), dbf_text); } static void qeth_print_status_no_portname(struct qeth_card *card) { if (card->info.portname[0]) dev_info(&card->gdev->dev, "Device is a%s " "card%s%s%s\nwith link type %s " "(no portname needed by interface).\n", qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", (card->info.mcl_level[0]) ? ")" : "", qeth_get_cardname_short(card)); else dev_info(&card->gdev->dev, "Device is a%s " "card%s%s%s\nwith link type %s.\n", qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", (card->info.mcl_level[0]) ? ")" : "", qeth_get_cardname_short(card)); } void qeth_print_status_message(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: /* VM will use a non-zero first character * to indicate a HiperSockets like reporting * of the level OSA sets the first character to zero * */ if (!card->info.mcl_level[0]) { sprintf(card->info.mcl_level, "%02x%02x", card->info.mcl_level[2], card->info.mcl_level[3]); card->info.mcl_level[QETH_MCL_LENGTH] = 0; break; } /* fallthrough */ case QETH_CARD_TYPE_IQD: if ((card->info.guestlan) || (card->info.mcl_level[0] & 0x80)) { card->info.mcl_level[0] = (char) _ebcasc[(__u8) card->info.mcl_level[0]]; card->info.mcl_level[1] = (char) _ebcasc[(__u8) card->info.mcl_level[1]]; card->info.mcl_level[2] = (char) _ebcasc[(__u8) card->info.mcl_level[2]]; card->info.mcl_level[3] = (char) _ebcasc[(__u8) card->info.mcl_level[3]]; card->info.mcl_level[QETH_MCL_LENGTH] = 0; } break; default: memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); } if (card->info.portname_required) qeth_print_status_with_portname(card); else qeth_print_status_no_portname(card); } EXPORT_SYMBOL_GPL(qeth_print_status_message); static void qeth_initialize_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *entry; QETH_CARD_TEXT(card, 5, "inwrklst"); list_for_each_entry(entry, &card->qdio.init_pool.entry_list, init_list) { qeth_put_buffer_pool_entry(card, entry); } } static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( struct qeth_card *card) { struct list_head *plh; struct qeth_buffer_pool_entry *entry; int i, free; struct page *page; if (list_empty(&card->qdio.in_buf_pool.entry_list)) return NULL; list_for_each(plh, &card->qdio.in_buf_pool.entry_list) { entry = list_entry(plh, struct qeth_buffer_pool_entry, list); free = 1; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { if (page_count(virt_to_page(entry->elements[i])) > 1) { free = 0; break; } } if (free) { list_del_init(&entry->list); return entry; } } /* no free buffer in pool so take first one and swap pages */ entry = list_entry(card->qdio.in_buf_pool.entry_list.next, struct qeth_buffer_pool_entry, list); for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { if (page_count(virt_to_page(entry->elements[i])) > 1) { page = alloc_page(GFP_ATOMIC); if (!page) { return NULL; } else { free_page((unsigned long)entry->elements[i]); entry->elements[i] = page_address(page); if (card->options.performance_stats) card->perf_stats.sg_alloc_page_rx++; } } } list_del_init(&entry->list); return entry; } static int qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) { struct qeth_buffer_pool_entry *pool_entry; int i; if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { buf->rx_skb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN); if (!buf->rx_skb) return 1; } pool_entry = qeth_find_free_buffer_pool_entry(card); if (!pool_entry) return 1; /* * since the buffer is accessed only from the input_tasklet * there shouldn't be a need to synchronize; also, since we use * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off * buffers */ buf->pool_entry = pool_entry; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { buf->buffer->element[i].length = PAGE_SIZE; buf->buffer->element[i].addr = pool_entry->elements[i]; if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; else buf->buffer->element[i].eflags = 0; buf->buffer->element[i].sflags = 0; } return 0; } int qeth_init_qdio_queues(struct qeth_card *card) { int i, j; int rc; QETH_DBF_TEXT(SETUP, 2, "initqdqs"); /* inbound queue */ memset(card->qdio.in_q->qdio_bufs, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); qeth_initialize_working_pool_list(card); /*give only as many buffers to hardware as we have buffer pool entries*/ for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, card->qdio.in_buf_pool.buf_count - 1); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); return rc; } /* completion */ rc = qeth_cq_init(card); if (rc) { return rc; } /* outbound queue */ for (i = 0; i < card->qdio.no_out_queues; ++i) { memset(card->qdio.out_qs[i]->qdio_bufs, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { qeth_clear_output_buffer(card->qdio.out_qs[i], card->qdio.out_qs[i]->bufs[j], QETH_QDIO_BUF_EMPTY); } card->qdio.out_qs[i]->card = card; card->qdio.out_qs[i]->next_buf_to_fill = 0; card->qdio.out_qs[i]->do_pack = 0; atomic_set(&card->qdio.out_qs[i]->used_buffers, 0); atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0); atomic_set(&card->qdio.out_qs[i]->state, QETH_OUT_Q_UNLOCKED); } return 0; } EXPORT_SYMBOL_GPL(qeth_init_qdio_queues); static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) { switch (link_type) { case QETH_LINK_TYPE_HSTR: return 2; default: return 1; } } static void qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd, __u8 command, enum qeth_prot_versions prot) { memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); cmd->hdr.command = command; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; cmd->hdr.seqno = card->seqno.ipa; cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.rel_adapter_no = (__u8) card->info.portno; if (card->options.layer2) cmd->hdr.prim_version_no = 2; else cmd->hdr.prim_version_no = 1; cmd->hdr.param_count = 1; cmd->hdr.prot_version = prot; cmd->hdr.ipa_supported = 0; cmd->hdr.ipa_enabled = 0; } struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; iob = qeth_wait_for_buffer(&card->write); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); return iob; } EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer); void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, char prot_type) { memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); } EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), void *reply_param) { int rc; char prot_type; QETH_CARD_TEXT(card, 4, "sendipa"); if (card->options.layer2) if (card->info.type == QETH_CARD_TYPE_OSN) prot_type = QETH_PROT_OSN2; else prot_type = QETH_PROT_LAYER2; else prot_type = QETH_PROT_TCPIP; qeth_prepare_ipa_cmd(card, iob, prot_type); rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob, reply_cb, reply_param); if (rc == -ETIME) { qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); } return rc; } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); int qeth_send_startlan(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "strtlan"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_send_startlan); static int qeth_default_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 4, "defadpcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code == 0) cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; return 0; } static int qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 3, "quyadpcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { card->info.link_type = cmd->data.setadapterparms.data.query_cmds_supp.lan_type; QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type); } card->options.adp.supported_funcs = cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); } static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.hdr.cmdlength = cmdlen; cmd->data.setadapterparms.hdr.command_code = command; cmd->data.setadapterparms.hdr.used_total = 1; cmd->data.setadapterparms.hdr.seq_no = 1; return iob; } int qeth_query_setadapterparms(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_CARD_TEXT(card, 3, "queryadp"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, sizeof(struct qeth_ipacmd_setadpparms)); rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); static int qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(SETUP, 2, "qipasscb"); cmd = (struct qeth_ipa_cmd *) data; switch (cmd->hdr.return_code) { case IPA_RC_NOTSUPP: case IPA_RC_L2_UNSUPPORTED_CMD: QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; return -0; default: if (cmd->hdr.return_code) { QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled " "rc=%d\n", dev_name(&card->gdev->dev), cmd->hdr.return_code); return 0; } } if (cmd->hdr.prot_version == QETH_PROT_IPV4) { card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; } else QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected" "\n", dev_name(&card->gdev->dev)); return 0; } int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_query_ipassists); static int qeth_query_setdiagass_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; __u16 rc; cmd = (struct qeth_ipa_cmd *)data; rc = cmd->hdr.return_code; if (rc) QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); else card->info.diagass_support = cmd->data.diagass.ext; return 0; } static int qeth_query_setdiagass(struct qeth_card *card) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(SETUP, 2, "qdiagass"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.diagass.subcmd_len = 16; cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); } static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) { unsigned long info = get_zeroed_page(GFP_KERNEL); struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; struct ccw_dev_id ccwid; int level; tid->chpid = card->info.chpid; ccw_device_get_id(CARD_RDEV(card), &ccwid); tid->ssid = ccwid.ssid; tid->devno = ccwid.devno; if (!info) return; level = stsi(NULL, 0, 0, 0); if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) tid->lparnr = info222->lpar_number; if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); } free_page(info); return; } static int qeth_hw_trap_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; __u16 rc; cmd = (struct qeth_ipa_cmd *)data; rc = cmd->hdr.return_code; if (rc) QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); return 0; } int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(SETUP, 2, "diagtrap"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.diagass.subcmd_len = 80; cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; cmd->data.diagass.type = 1; cmd->data.diagass.action = action; switch (action) { case QETH_DIAGS_TRAP_ARM: cmd->data.diagass.options = 0x0003; cmd->data.diagass.ext = 0x00010000 + sizeof(struct qeth_trap_id); qeth_get_trap_id(card, (struct qeth_trap_id *)cmd->data.diagass.cdata); break; case QETH_DIAGS_TRAP_DISARM: cmd->data.diagass.options = 0x0001; break; case QETH_DIAGS_TRAP_CAPTURE: break; } return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); } EXPORT_SYMBOL_GPL(qeth_hw_trap); int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, unsigned int qdio_error, const char *dbftext) { if (qdio_error) { QETH_CARD_TEXT(card, 2, dbftext); QETH_CARD_TEXT_(card, 2, " F15=%02X", buf->element[15].sflags); QETH_CARD_TEXT_(card, 2, " F14=%02X", buf->element[14].sflags); QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); if ((buf->element[15].sflags) == 0x12) { card->stats.rx_dropped++; return 0; } else return 1; } return 0; } EXPORT_SYMBOL_GPL(qeth_check_qdio_errors); void qeth_buffer_reclaim_work(struct work_struct *work) { struct qeth_card *card = container_of(work, struct qeth_card, buffer_reclaim_work.work); QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); qeth_queue_input_buffer(card, card->reclaim_index); } void qeth_queue_input_buffer(struct qeth_card *card, int index) { struct qeth_qdio_q *queue = card->qdio.in_q; struct list_head *lh; int count; int i; int rc; int newcount = 0; count = (index < queue->next_buf_to_init)? card->qdio.in_buf_pool.buf_count - (queue->next_buf_to_init - index) : card->qdio.in_buf_pool.buf_count - (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index); /* only requeue at a certain threshold to avoid SIGAs */ if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { for (i = queue->next_buf_to_init; i < queue->next_buf_to_init + count; ++i) { if (qeth_init_input_buffer(card, &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) { break; } else { newcount++; } } if (newcount < count) { /* we are in memory shortage so we switch back to traditional skb allocation and drop packages */ atomic_set(&card->force_alloc_skb, 3); count = newcount; } else { atomic_add_unless(&card->force_alloc_skb, -1, 0); } if (!count) { i = 0; list_for_each(lh, &card->qdio.in_buf_pool.entry_list) i++; if (i == card->qdio.in_buf_pool.buf_count) { QETH_CARD_TEXT(card, 2, "qsarbw"); card->reclaim_index = index; schedule_delayed_work( &card->buffer_reclaim_work, QETH_RECLAIM_WORK_TIME); } return; } /* * according to old code it should be avoided to requeue all * 128 buffers in order to benefit from PCI avoidance. * this function keeps at least one buffer (the buffer at * 'index') un-requeued -> this buffer is the first buffer that * will be requeued the next time */ if (card->options.performance_stats) { card->perf_stats.inbound_do_qdio_cnt++; card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros(); } rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, queue->next_buf_to_init, count); if (card->options.performance_stats) card->perf_stats.inbound_do_qdio_time += qeth_get_micros() - card->perf_stats.inbound_do_qdio_start_time; if (rc) { QETH_CARD_TEXT(card, 2, "qinberr"); } queue->next_buf_to_init = (queue->next_buf_to_init + count) % QDIO_MAX_BUFFERS_PER_Q; } } EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); static int qeth_handle_send_error(struct qeth_card *card, struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) { int sbalf15 = buffer->buffer->element[15].sflags; QETH_CARD_TEXT(card, 6, "hdsnderr"); if (card->info.type == QETH_CARD_TYPE_IQD) { if (sbalf15 == 0) { qdio_err = 0; } else { qdio_err = 1; } } qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); if (!qdio_err) return QETH_SEND_ERROR_NONE; if ((sbalf15 >= 15) && (sbalf15 <= 31)) return QETH_SEND_ERROR_RETRY; QETH_CARD_TEXT(card, 1, "lnkfail"); QETH_CARD_TEXT_(card, 1, "%04x %02x", (u16)qdio_err, (u8)sbalf15); return QETH_SEND_ERROR_LINK_FAILURE; } /* * Switched to packing state if the number of used buffers on a queue * reaches a certain limit. */ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) { if (!queue->do_pack) { if (atomic_read(&queue->used_buffers) >= QETH_HIGH_WATERMARK_PACK){ /* switch non-PACKING -> PACKING */ QETH_CARD_TEXT(queue->card, 6, "np->pack"); if (queue->card->options.performance_stats) queue->card->perf_stats.sc_dp_p++; queue->do_pack = 1; } } } /* * Switches from packing to non-packing mode. If there is a packing * buffer on the queue this buffer will be prepared to be flushed. * In that case 1 is returned to inform the caller. If no buffer * has to be flushed, zero is returned. */ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) { struct qeth_qdio_out_buffer *buffer; int flush_count = 0; if (queue->do_pack) { if (atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) { /* switch PACKING -> non-PACKING */ QETH_CARD_TEXT(queue->card, 6, "pack->np"); if (queue->card->options.performance_stats) queue->card->perf_stats.sc_p_dp++; queue->do_pack = 0; /* flush packing buffers */ buffer = queue->bufs[queue->next_buf_to_fill]; if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && (buffer->next_element_to_fill > 0)) { atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); flush_count++; queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; } } } return flush_count; } /* * Called to flush a packing buffer if no more pci flags are on the queue. * Checks if there is a packing buffer and prepares it to be flushed. * In that case returns 1, otherwise zero. */ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) { struct qeth_qdio_out_buffer *buffer; buffer = queue->bufs[queue->next_buf_to_fill]; if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && (buffer->next_element_to_fill > 0)) { /* it's a packing buffer */ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; return 1; } return 0; } static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, int count) { struct qeth_qdio_out_buffer *buf; int rc; int i; unsigned int qdio_flags; for (i = index; i < index + count; ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; buf = queue->bufs[bidx]; buf->buffer->element[buf->next_element_to_fill - 1].eflags |= SBAL_EFLAGS_LAST_ENTRY; if (queue->bufstates) queue->bufstates[bidx].user = buf; if (queue->card->info.type == QETH_CARD_TYPE_IQD) continue; if (!queue->do_pack) { if ((atomic_read(&queue->used_buffers) >= (QETH_HIGH_WATERMARK_PACK - QETH_WATERMARK_PACK_FUZZ)) && !atomic_read(&queue->set_pci_flags_count)) { /* it's likely that we'll go to packing * mode soon */ atomic_inc(&queue->set_pci_flags_count); buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; } } else { if (!atomic_read(&queue->set_pci_flags_count)) { /* * there's no outstanding PCI any more, so we * have to request a PCI to be sure the the PCI * will wake at some time in the future then we * can flush packed buffers that might still be * hanging around, which can happen if no * further send was requested by the stack */ atomic_inc(&queue->set_pci_flags_count); buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; } } } queue->card->dev->trans_start = jiffies; if (queue->card->options.performance_stats) { queue->card->perf_stats.outbound_do_qdio_cnt++; queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros(); } qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); if (queue->card->options.performance_stats) queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - queue->card->perf_stats.outbound_do_qdio_start_time; atomic_add(count, &queue->used_buffers); if (rc) { queue->card->stats.tx_errors += count; /* ignore temporary SIGA errors without busy condition */ if (rc == -ENOBUFS) return; QETH_CARD_TEXT(queue->card, 2, "flushbuf"); QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); QETH_CARD_TEXT_(queue->card, 2, " c%d", count); QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); /* this must not happen under normal circumstances. if it * happens something is really wrong -> recover */ qeth_schedule_recovery(queue->card); return; } if (queue->card->options.performance_stats) queue->card->perf_stats.bufs_sent += count; } static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) { int index; int flush_cnt = 0; int q_was_packing = 0; /* * check if weed have to switch to non-packing mode or if * we have to get a pci flag out on the queue */ if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || !atomic_read(&queue->set_pci_flags_count)) { if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == QETH_OUT_Q_UNLOCKED) { /* * If we get in here, there was no action in * do_send_packet. So, we check if there is a * packing buffer to be flushed here. */ netif_stop_queue(queue->card->dev); index = queue->next_buf_to_fill; q_was_packing = queue->do_pack; /* queue->do_pack may change */ barrier(); flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) flush_cnt += qeth_flush_buffers_on_no_pci(queue); if (queue->card->options.performance_stats && q_was_packing) queue->card->perf_stats.bufs_sent_pack += flush_cnt; if (flush_cnt) qeth_flush_buffers(queue, index, flush_cnt); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); } } } void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *)card_ptr; if (card->dev && (card->dev->flags & IFF_UP)) napi_schedule(&card->napi); } EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) { int rc; if (card->options.cq == QETH_CQ_NOTAVAILABLE) { rc = -1; goto out; } else { if (card->options.cq == cq) { rc = 0; goto out; } if (card->state != CARD_STATE_DOWN && card->state != CARD_STATE_RECOVER) { rc = -1; goto out; } qeth_free_qdio_buffers(card); card->options.cq = cq; rc = 0; } out: return rc; } EXPORT_SYMBOL_GPL(qeth_configure_cq); static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, unsigned int queue, int first_element, int count) { struct qeth_qdio_q *cq = card->qdio.c_q; int i; int rc; if (!qeth_is_cq(card, queue)) goto out; QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); if (qdio_err) { netif_stop_queue(card->dev); qeth_schedule_recovery(card); goto out; } if (card->options.performance_stats) { card->perf_stats.cq_cnt++; card->perf_stats.cq_start_time = qeth_get_micros(); } for (i = first_element; i < first_element + count; ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; struct qdio_buffer *buffer = &cq->qdio_bufs[bidx]; int e; e = 0; while (buffer->element[e].addr) { unsigned long phys_aob_addr; phys_aob_addr = (unsigned long) buffer->element[e].addr; qeth_qdio_handle_aob(card, phys_aob_addr); buffer->element[e].addr = NULL; buffer->element[e].eflags = 0; buffer->element[e].sflags = 0; buffer->element[e].length = 0; ++e; } buffer->element[15].eflags = 0; buffer->element[15].sflags = 0; } rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, card->qdio.c_q->next_buf_to_init, count); if (rc) { dev_warn(&card->gdev->dev, "QDIO reported an error, rc=%i\n", rc); QETH_CARD_TEXT(card, 2, "qcqherr"); } card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init + count) % QDIO_MAX_BUFFERS_PER_Q; netif_wake_queue(card->dev); if (card->options.performance_stats) { int delta_t = qeth_get_micros(); delta_t -= card->perf_stats.cq_start_time; card->perf_stats.cq_time += delta_t; } out: return; } void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, unsigned int queue, int first_elem, int count, unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *)card_ptr; QETH_CARD_TEXT_(card, 2, "qihq%d", queue); QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); if (qeth_is_cq(card, queue)) qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); else if (qdio_err) qeth_schedule_recovery(card); } EXPORT_SYMBOL_GPL(qeth_qdio_input_handler); void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int qdio_error, int __queue, int first_element, int count, unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *) card_ptr; struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; struct qeth_qdio_out_buffer *buffer; int i; QETH_CARD_TEXT(card, 6, "qdouhdl"); if (qdio_error & QDIO_ERROR_FATAL) { QETH_CARD_TEXT(card, 2, "achkcond"); netif_stop_queue(card->dev); qeth_schedule_recovery(card); return; } if (card->options.performance_stats) { card->perf_stats.outbound_handler_cnt++; card->perf_stats.outbound_handler_start_time = qeth_get_micros(); } for (i = first_element; i < (first_element + count); ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; buffer = queue->bufs[bidx]; qeth_handle_send_error(card, buffer, qdio_error); if (queue->bufstates && (queue->bufstates[bidx].flags & QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) { WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, QETH_QDIO_BUF_PENDING) == QETH_QDIO_BUF_PRIMED) { qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); } buffer->aob = queue->bufstates[bidx].aob; QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); QETH_CARD_TEXT(queue->card, 5, "aob"); QETH_CARD_TEXT_(queue->card, 5, "%lx", virt_to_phys(buffer->aob)); if (qeth_init_qdio_out_buf(queue, bidx)) { QETH_CARD_TEXT(card, 2, "outofbuf"); qeth_schedule_recovery(card); } } else { if (card->options.cq == QETH_CQ_ENABLED) { enum iucv_tx_notify n; n = qeth_compute_cq_notification( buffer->buffer->element[15].sflags, 0); qeth_notify_skbs(queue, buffer, n); } qeth_clear_output_buffer(queue, buffer, QETH_QDIO_BUF_EMPTY); } qeth_cleanup_handled_pending(queue, bidx, 0); } atomic_sub(count, &queue->used_buffers); /* check if we need to do something on this outbound queue */ if (card->info.type != QETH_CARD_TYPE_IQD) qeth_check_outbound_queue(queue); netif_wake_queue(queue->card->dev); if (card->options.performance_stats) card->perf_stats.outbound_handler_time += qeth_get_micros() - card->perf_stats.outbound_handler_start_time; } EXPORT_SYMBOL_GPL(qeth_qdio_output_handler); int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, int ipv, int cast_type) { if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD || card->info.type == QETH_CARD_TYPE_OSX)) return card->qdio.default_out_queue; switch (card->qdio.no_out_queues) { case 4: if (cast_type && card->info.is_multicast_different) return card->info.is_multicast_different & (card->qdio.no_out_queues - 1); if (card->qdio.do_prio_queueing && (ipv == 4)) { const u8 tos = ip_hdr(skb)->tos; if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_TOS) { if (tos & IP_TOS_NOTIMPORTANT) return 3; if (tos & IP_TOS_HIGHRELIABILITY) return 2; if (tos & IP_TOS_HIGHTHROUGHPUT) return 1; if (tos & IP_TOS_LOWDELAY) return 0; } if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) return 3 - (tos >> 6); } else if (card->qdio.do_prio_queueing && (ipv == 6)) { /* TODO: IPv6!!! */ } return card->qdio.default_out_queue; case 1: /* fallthrough for single-out-queue 1920-device */ default: return card->qdio.default_out_queue; } } EXPORT_SYMBOL_GPL(qeth_get_priority_queue); int qeth_get_elements_for_frags(struct sk_buff *skb) { int cnt, length, e, elements = 0; struct skb_frag_struct *frag; char *data; for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { frag = &skb_shinfo(skb)->frags[cnt]; data = (char *)page_to_phys(skb_frag_page(frag)) + frag->page_offset; length = frag->size; e = PFN_UP((unsigned long)data + length - 1) - PFN_DOWN((unsigned long)data); elements += e; } return elements; } EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int elems) { int dlen = skb->len - skb->data_len; int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - PFN_DOWN((unsigned long)skb->data); elements_needed += qeth_get_elements_for_frags(skb); if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " "(Number=%d / Length=%d). Discarded.\n", (elements_needed+elems), skb->len); return 0; } return elements_needed; } EXPORT_SYMBOL_GPL(qeth_get_elements_no); int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) { int hroom, inpage, rest; if (((unsigned long)skb->data & PAGE_MASK) != (((unsigned long)skb->data + len - 1) & PAGE_MASK)) { hroom = skb_headroom(skb); inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE); rest = len - inpage; if (rest > hroom) return 1; memmove(skb->data - rest, skb->data, skb->len - skb->data_len); skb->data -= rest; skb->tail -= rest; *hdr = (struct qeth_hdr *)skb->data; QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest); } return 0; } EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); static inline void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, int offset) { int length = skb->len - skb->data_len; int length_here; int element; char *data; int first_lap, cnt; struct skb_frag_struct *frag; element = *next_element_to_fill; data = skb->data; first_lap = (is_tso == 0 ? 1 : 0); if (offset >= 0) { data = skb->data + offset; length -= offset; first_lap = 0; } while (length > 0) { /* length_here is the remaining amount of data in this page */ length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); if (length < length_here) length_here = length; buffer->element[element].addr = data; buffer->element[element].length = length_here; length -= length_here; if (!length) { if (first_lap) if (skb_shinfo(skb)->nr_frags) buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; else buffer->element[element].eflags = 0; else buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; } else { if (first_lap) buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; else buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; } data += length_here; element++; first_lap = 0; } for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { frag = &skb_shinfo(skb)->frags[cnt]; data = (char *)page_to_phys(skb_frag_page(frag)) + frag->page_offset; length = frag->size; while (length > 0) { length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); if (length < length_here) length_here = length; buffer->element[element].addr = data; buffer->element[element].length = length_here; buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; length -= length_here; data += length_here; element++; } } if (buffer->element[element - 1].eflags) buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; *next_element_to_fill = element; } static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, struct sk_buff *skb, struct qeth_hdr *hdr, int offset, int hd_len) { struct qdio_buffer *buffer; int flush_cnt = 0, hdr_len, large_send = 0; buffer = buf->buffer; atomic_inc(&skb->users); skb_queue_tail(&buf->skb_list, skb); /*check first on TSO ....*/ if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) { int element = buf->next_element_to_fill; hdr_len = sizeof(struct qeth_hdr_tso) + ((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len; /*fill first buffer entry only with header information */ buffer->element[element].addr = skb->data; buffer->element[element].length = hdr_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; buf->next_element_to_fill++; skb->data += hdr_len; skb->len -= hdr_len; large_send = 1; } if (offset >= 0) { int element = buf->next_element_to_fill; buffer->element[element].addr = hdr; buffer->element[element].length = sizeof(struct qeth_hdr) + hd_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; buf->is_header[element] = 1; buf->next_element_to_fill++; } __qeth_fill_buffer(skb, buffer, large_send, (int *)&buf->next_element_to_fill, offset); if (!queue->do_pack) { QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); /* set state to PRIMED -> will be flushed */ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); flush_cnt = 1; } else { QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); if (queue->card->options.performance_stats) queue->card->perf_stats.skbs_sent_pack++; if (buf->next_element_to_fill >= QETH_MAX_BUFFER_ELEMENTS(queue->card)) { /* * packed buffer if full -> set state PRIMED * -> will be flushed */ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); flush_cnt = 1; } } return flush_cnt; } int qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed, int offset, int hd_len) { struct qeth_qdio_out_buffer *buffer; int index; /* spin until we get the queue ... */ while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); /* ... now we've got the queue */ index = queue->next_buf_to_fill; buffer = queue->bufs[queue->next_buf_to_fill]; /* * check if buffer is empty to make sure that we do not 'overtake' * ourselves and try to fill a buffer that is already primed */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) goto out; queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); qeth_flush_buffers(queue, index, 1); return 0; out: atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; } EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed) { struct qeth_qdio_out_buffer *buffer; int start_index; int flush_count = 0; int do_pack = 0; int tmp; int rc = 0; /* spin until we get the queue ... */ while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); start_index = queue->next_buf_to_fill; buffer = queue->bufs[queue->next_buf_to_fill]; /* * check if buffer is empty to make sure that we do not 'overtake' * ourselves and try to fill a buffer that is already primed */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; } /* check if we need to switch packing state of this queue */ qeth_switch_to_packing_if_needed(queue); if (queue->do_pack) { do_pack = 1; /* does packet fit in current buffer? */ if ((QETH_MAX_BUFFER_ELEMENTS(card) - buffer->next_element_to_fill) < elements_needed) { /* ... no -> set state PRIMED */ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); flush_count++; queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; buffer = queue->bufs[queue->next_buf_to_fill]; /* we did a step forward, so check buffer state * again */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { qeth_flush_buffers(queue, start_index, flush_count); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; } } } tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0); queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % QDIO_MAX_BUFFERS_PER_Q; flush_count += tmp; if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); else if (!atomic_read(&queue->set_pci_flags_count)) atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); /* * queue->state will go from LOCKED -> UNLOCKED or from * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us * (switch packing state or flush buffer to get another pci flag out). * In that case we will enter this loop */ while (atomic_dec_return(&queue->state)) { flush_count = 0; start_index = queue->next_buf_to_fill; /* check if we can go back to non-packing state */ flush_count += qeth_switch_to_nonpacking_if_needed(queue); /* * check if we need to flush a packing buffer to get a pci * flag out on the queue */ if (!flush_count && !atomic_read(&queue->set_pci_flags_count)) flush_count += qeth_flush_buffers_on_no_pci(queue); if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); } /* at this point the queue is UNLOCKED again */ if (queue->card->options.performance_stats && do_pack) queue->card->perf_stats.bufs_sent_pack += flush_count; return rc; } EXPORT_SYMBOL_GPL(qeth_do_send_packet); static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; struct qeth_ipacmd_setadpparms *setparms; QETH_CARD_TEXT(card, 4, "prmadpcb"); cmd = (struct qeth_ipa_cmd *) data; setparms = &(cmd->data.setadapterparms); qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); if (cmd->hdr.return_code) { QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code); setparms->data.mode = SET_PROMISC_MODE_OFF; } card->info.promisc_mode = setparms->data.mode; return 0; } void qeth_setadp_promisc_mode(struct qeth_card *card) { enum qeth_ipa_promisc_modes mode; struct net_device *dev = card->dev; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 4, "setprom"); if (((dev->flags & IFF_PROMISC) && (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || (!(dev->flags & IFF_PROMISC) && (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) return; mode = SET_PROMISC_MODE_OFF; if (dev->flags & IFF_PROMISC) mode = SET_PROMISC_MODE_ON; QETH_CARD_TEXT_(card, 4, "mode:%x", mode); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, sizeof(struct qeth_ipacmd_setadpparms)); cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.data.mode = mode; qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); } EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); int qeth_change_mtu(struct net_device *dev, int new_mtu) { struct qeth_card *card; char dbf_text[15]; card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "chgmtu"); sprintf(dbf_text, "%8x", new_mtu); QETH_CARD_TEXT(card, 4, dbf_text); if (new_mtu < 64) return -EINVAL; if (new_mtu > 65535) return -EINVAL; if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) && (!qeth_mtu_is_valid(card, new_mtu))) return -EINVAL; dev->mtu = new_mtu; return 0; } EXPORT_SYMBOL_GPL(qeth_change_mtu); struct net_device_stats *qeth_get_stats(struct net_device *dev) { struct qeth_card *card; card = dev->ml_priv; QETH_CARD_TEXT(card, 5, "getstat"); return &card->stats; } EXPORT_SYMBOL_GPL(qeth_get_stats); static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 4, "chgmaccb"); cmd = (struct qeth_ipa_cmd *) data; if (!card->options.layer2 || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { memcpy(card->dev->dev_addr, &cmd->data.setadapterparms.data.change_addr.addr, OSA_ADDR_LEN); card->info.mac_bits |= QETH_LAYER2_MAC_READ; } qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } int qeth_setadpparms_change_macaddr(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 4, "chgmac"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, sizeof(struct qeth_ipacmd_setadpparms)); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; memcpy(&cmd->data.setadapterparms.data.change_addr.addr, card->dev->dev_addr, OSA_ADDR_LEN); rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; struct qeth_set_access_ctrl *access_ctrl_req; int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); cmd = (struct qeth_ipa_cmd *) data; access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; QETH_DBF_TEXT_(SETUP, 2, "setaccb"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); QETH_DBF_TEXT_(SETUP, 2, "rc=%d", cmd->data.setadapterparms.hdr.return_code); if (cmd->data.setadapterparms.hdr.return_code != SET_ACCESS_CTRL_RC_SUCCESS) QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n", card->gdev->dev.kobj.name, access_ctrl_req->subcmd_code, cmd->data.setadapterparms.hdr.return_code); switch (cmd->data.setadapterparms.hdr.return_code) { case SET_ACCESS_CTRL_RC_SUCCESS: if (card->options.isolation == ISOLATION_MODE_NONE) { dev_info(&card->gdev->dev, "QDIO data connection isolation is deactivated\n"); } else { dev_info(&card->gdev->dev, "QDIO data connection isolation is activated\n"); } break; case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already " "deactivated\n", dev_name(&card->gdev->dev)); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already" " activated\n", dev_name(&card->gdev->dev)); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); break; case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: dev_err(&card->gdev->dev, "Adapter is dedicated. " "QDIO data connection isolation not supported\n"); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: dev_err(&card->gdev->dev, "TSO does not permit QDIO data connection isolation\n"); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: dev_err(&card->gdev->dev, "The adjacent switch port does not " "support reflective relay mode\n"); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_REFLREL_FAILED: dev_err(&card->gdev->dev, "The reflective relay mode cannot be " "enabled at the adjacent switch port"); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: dev_warn(&card->gdev->dev, "Turning off reflective relay mode " "at the adjacent switch failed\n"); break; default: /* this should never happen */ if (fallback) card->options.isolation = card->options.prev_isolation; break; } qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, enum qeth_ipa_isolation_modes isolation, int fallback) { int rc; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; struct qeth_set_access_ctrl *access_ctrl_req; QETH_CARD_TEXT(card, 4, "setacctl"); QETH_DBF_TEXT_(SETUP, 2, "setacctl"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, sizeof(struct qeth_ipacmd_setadpparms_hdr) + sizeof(struct qeth_set_access_ctrl)); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; access_ctrl_req->subcmd_code = isolation; rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, &fallback); QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc); return rc; } int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) { int rc = 0; QETH_CARD_TEXT(card, 4, "setactlo"); if ((card->info.type == QETH_CARD_TYPE_OSD || card->info.type == QETH_CARD_TYPE_OSX) && qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { rc = qeth_setadpparms_set_access_ctrl(card, card->options.isolation, fallback); if (rc) { QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n", card->gdev->dev.kobj.name, rc); rc = -EOPNOTSUPP; } } else if (card->options.isolation != ISOLATION_MODE_NONE) { card->options.isolation = ISOLATION_MODE_NONE; dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); rc = -EOPNOTSUPP; } return rc; } EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online); void qeth_tx_timeout(struct net_device *dev) { struct qeth_card *card; card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "txtimeo"); card->stats.tx_errors++; qeth_schedule_recovery(card); } EXPORT_SYMBOL_GPL(qeth_tx_timeout); int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) { struct qeth_card *card = dev->ml_priv; int rc = 0; switch (regnum) { case MII_BMCR: /* Basic mode control register */ rc = BMCR_FULLDPLX; if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && (card->info.link_type != QETH_LINK_TYPE_OSN) && (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH)) rc |= BMCR_SPEED100; break; case MII_BMSR: /* Basic mode status register */ rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | BMSR_100BASE4; break; case MII_PHYSID1: /* PHYS ID 1 */ rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | dev->dev_addr[2]; rc = (rc >> 5) & 0xFFFF; break; case MII_PHYSID2: /* PHYS ID 2 */ rc = (dev->dev_addr[2] << 10) & 0xFFFF; break; case MII_ADVERTISE: /* Advertisement control reg */ rc = ADVERTISE_ALL; break; case MII_LPA: /* Link partner ability reg */ rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | LPA_100BASE4 | LPA_LPACK; break; case MII_EXPANSION: /* Expansion register */ break; case MII_DCOUNTER: /* disconnect counter */ break; case MII_FCSCOUNTER: /* false carrier counter */ break; case MII_NWAYTEST: /* N-way auto-neg test register */ break; case MII_RERRCOUNTER: /* rx error counter */ rc = card->stats.rx_errors; break; case MII_SREVISION: /* silicon revision */ break; case MII_RESV1: /* reserved 1 */ break; case MII_LBRERROR: /* loopback, rx, bypass error */ break; case MII_PHYADDR: /* physical address */ break; case MII_RESV2: /* reserved 2 */ break; case MII_TPISTATUS: /* TPI status for 10mbps */ break; case MII_NCONFIG: /* network interface config */ break; default: break; } return rc; } EXPORT_SYMBOL_GPL(qeth_mdio_read); static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int len, int (*reply_cb)(struct qeth_card *, struct qeth_reply *, unsigned long), void *reply_param) { u16 s1, s2; QETH_CARD_TEXT(card, 4, "sendsnmp"); memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); /* adjust PDU length fields in IPA_PDU_HEADER */ s1 = (u32) IPA_PDU_HEADER_SIZE + len; s2 = (u32) len; memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2); memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2); memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, reply_cb, reply_param); } static int qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long sdata) { struct qeth_ipa_cmd *cmd; struct qeth_arp_query_info *qinfo; struct qeth_snmp_cmd *snmp; unsigned char *data; __u16 data_len; QETH_CARD_TEXT(card, 3, "snpcmdcb"); cmd = (struct qeth_ipa_cmd *) sdata; data = (unsigned char *)((char *)cmd - reply->offset); qinfo = (struct qeth_arp_query_info *) reply->param; snmp = &cmd->data.setadapterparms.data.snmp; if (cmd->hdr.return_code) { QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code); return 0; } if (cmd->data.setadapterparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code); return 0; } data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); if (cmd->data.setadapterparms.hdr.seq_no == 1) data_len -= (__u16)((char *)&snmp->data - (char *)cmd); else data_len -= (__u16)((char *)&snmp->request - (char *)cmd); /* check if there is enough room in userspace */ if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM); cmd->hdr.return_code = IPA_RC_ENOMEM; return 0; } QETH_CARD_TEXT_(card, 4, "snore%i", cmd->data.setadapterparms.hdr.used_total); QETH_CARD_TEXT_(card, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no); /*copy entries to user buffer*/ if (cmd->data.setadapterparms.hdr.seq_no == 1) { memcpy(qinfo->udata + qinfo->udata_offset, (char *)snmp, data_len + offsetof(struct qeth_snmp_cmd, data)); qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data); } else { memcpy(qinfo->udata + qinfo->udata_offset, (char *)&snmp->request, data_len); } qinfo->udata_offset += data_len; /* check if all replies received ... */ QETH_CARD_TEXT_(card, 4, "srtot%i", cmd->data.setadapterparms.hdr.used_total); QETH_CARD_TEXT_(card, 4, "srseq%i", cmd->data.setadapterparms.hdr.seq_no); if (cmd->data.setadapterparms.hdr.seq_no < cmd->data.setadapterparms.hdr.used_total) return 1; return 0; } int qeth_snmp_command(struct qeth_card *card, char __user *udata) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; struct qeth_snmp_ureq *ureq; unsigned int req_len; struct qeth_arp_query_info qinfo = {0, }; int rc = 0; QETH_CARD_TEXT(card, 3, "snmpcmd"); if (card->info.guestlan) return -EOPNOTSUPP; if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && (!card->options.layer2)) { return -EOPNOTSUPP; } /* skip 4 bytes (data_len struct member) to get req_len */ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) return -EFAULT; if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE - sizeof(struct qeth_ipacmd_hdr) - sizeof(struct qeth_ipacmd_setadpparms_hdr))) return -EINVAL; ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr)); if (IS_ERR(ureq)) { QETH_CARD_TEXT(card, 2, "snmpnome"); return PTR_ERR(ureq); } qinfo.udata_len = ureq->hdr.data_len; qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); if (!qinfo.udata) { kfree(ureq); return -ENOMEM; } qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, QETH_SNMP_SETADP_CMDLENGTH + req_len); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, qeth_snmp_command_cb, (void *)&qinfo); if (rc) QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n", QETH_CARD_IFNAME(card), rc); else { if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) rc = -EFAULT; } kfree(ureq); kfree(qinfo.udata); return rc; } EXPORT_SYMBOL_GPL(qeth_snmp_command); static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; struct qeth_qoat_priv *priv; char *resdata; int resdatalen; QETH_CARD_TEXT(card, 3, "qoatcb"); cmd = (struct qeth_ipa_cmd *)data; priv = (struct qeth_qoat_priv *)reply->param; resdatalen = cmd->data.setadapterparms.hdr.cmdlength; resdata = (char *)data + 28; if (resdatalen > (priv->buffer_len - priv->response_len)) { cmd->hdr.return_code = IPA_RC_FFFF; return 0; } memcpy((priv->buffer + priv->response_len), resdata, resdatalen); priv->response_len += resdatalen; if (cmd->data.setadapterparms.hdr.seq_no < cmd->data.setadapterparms.hdr.used_total) return 1; return 0; } int qeth_query_oat_command(struct qeth_card *card, char __user *udata) { int rc = 0; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; struct qeth_query_oat *oat_req; struct qeth_query_oat_data oat_data; struct qeth_qoat_priv priv; void __user *tmp; QETH_CARD_TEXT(card, 3, "qoatcmd"); if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { rc = -EOPNOTSUPP; goto out; } if (copy_from_user(&oat_data, udata, sizeof(struct qeth_query_oat_data))) { rc = -EFAULT; goto out; } priv.buffer_len = oat_data.buffer_len; priv.response_len = 0; priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL); if (!priv.buffer) { rc = -ENOMEM; goto out; } iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, sizeof(struct qeth_ipacmd_setadpparms_hdr) + sizeof(struct qeth_query_oat)); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); oat_req = &cmd->data.setadapterparms.data.query_oat; oat_req->subcmd_code = oat_data.command; rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); if (!rc) { if (is_compat_task()) tmp = compat_ptr(oat_data.ptr); else tmp = (void __user *)(unsigned long)oat_data.ptr; if (copy_to_user(tmp, priv.buffer, priv.response_len)) { rc = -EFAULT; goto out_free; } oat_data.response_len = priv.response_len; if (copy_to_user(udata, &oat_data, sizeof(struct qeth_query_oat_data))) rc = -EFAULT; } else if (rc == IPA_RC_FFFF) rc = -EFAULT; out_free: kfree(priv.buffer); out: return rc; } EXPORT_SYMBOL_GPL(qeth_query_oat_command); static inline int qeth_get_qdio_q_format(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_IQD: return 2; default: return 0; } } static void qeth_determine_capabilities(struct qeth_card *card) { int rc; int length; char *prcd; struct ccw_device *ddev; int ddev_offline = 0; QETH_DBF_TEXT(SETUP, 2, "detcapab"); ddev = CARD_DDEV(card); if (!ddev->online) { ddev_offline = 1; rc = ccw_device_set_online(ddev); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); goto out; } } rc = qeth_read_conf_data(card, (void **) &prcd, &length); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", dev_name(&card->gdev->dev), rc); QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_offline; } qeth_configure_unitaddr(card, prcd); if (ddev_offline) qeth_configure_blkt_default(card, prcd); kfree(prcd); rc = qdio_get_ssqd_desc(ddev, &card->ssqd); if (rc) QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt); QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1); QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3); QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt); if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { dev_info(&card->gdev->dev, "Completion Queueing supported\n"); } else { card->options.cq = QETH_CQ_NOTAVAILABLE; } out_offline: if (ddev_offline == 1) ccw_device_set_offline(ddev); out: return; } static inline void qeth_qdio_establish_cq(struct qeth_card *card, struct qdio_buffer **in_sbal_ptrs, void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) { int i; if (card->options.cq == QETH_CQ_ENABLED) { int offset = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1); i = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1); for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { in_sbal_ptrs[offset + i] = (struct qdio_buffer *) virt_to_phys(card->qdio.c_q->bufs[i].buffer); } queue_start_poll[card->qdio.no_in_queues - 1] = NULL; } } static int qeth_qdio_establish(struct qeth_card *card) { struct qdio_initialize init_data; char *qib_param_field; struct qdio_buffer **in_sbal_ptrs; void (**queue_start_poll) (struct ccw_device *, int, unsigned long); struct qdio_buffer **out_sbal_ptrs; int i, j, k; int rc = 0; QETH_DBF_TEXT(SETUP, 2, "qdioest"); qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), GFP_KERNEL); if (!qib_param_field) { rc = -ENOMEM; goto out_free_nothing; } qeth_create_qib_param_field(card, qib_param_field); qeth_create_qib_param_field_blkt(card, qib_param_field); in_sbal_ptrs = kzalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), GFP_KERNEL); if (!in_sbal_ptrs) { rc = -ENOMEM; goto out_free_qib_param; } for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { in_sbal_ptrs[i] = (struct qdio_buffer *) virt_to_phys(card->qdio.in_q->bufs[i].buffer); } queue_start_poll = kzalloc(sizeof(void *) * card->qdio.no_in_queues, GFP_KERNEL); if (!queue_start_poll) { rc = -ENOMEM; goto out_free_in_sbals; } for (i = 0; i < card->qdio.no_in_queues; ++i) queue_start_poll[i] = card->discipline->start_poll; qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); out_sbal_ptrs = kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), GFP_KERNEL); if (!out_sbal_ptrs) { rc = -ENOMEM; goto out_free_queue_start_poll; } for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( card->qdio.out_qs[i]->bufs[j]->buffer); } memset(&init_data, 0, sizeof(struct qdio_initialize)); init_data.cdev = CARD_DDEV(card); init_data.q_format = qeth_get_qdio_q_format(card); init_data.qib_param_field_format = 0; init_data.qib_param_field = qib_param_field; init_data.no_input_qs = card->qdio.no_in_queues; init_data.no_output_qs = card->qdio.no_out_queues; init_data.input_handler = card->discipline->input_handler; init_data.output_handler = card->discipline->output_handler; init_data.queue_start_poll_array = queue_start_poll; init_data.int_parm = (unsigned long) card; init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; init_data.output_sbal_state_array = card->qdio.out_bufstates; init_data.scan_threshold = (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32; if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { rc = qdio_allocate(&init_data); if (rc) { atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); goto out; } rc = qdio_establish(&init_data); if (rc) { atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); qdio_free(CARD_DDEV(card)); } } switch (card->options.cq) { case QETH_CQ_ENABLED: dev_info(&card->gdev->dev, "Completion Queue support enabled"); break; case QETH_CQ_DISABLED: dev_info(&card->gdev->dev, "Completion Queue support disabled"); break; default: break; } out: kfree(out_sbal_ptrs); out_free_queue_start_poll: kfree(queue_start_poll); out_free_in_sbals: kfree(in_sbal_ptrs); out_free_qib_param: kfree(qib_param_field); out_free_nothing: return rc; } static void qeth_core_free_card(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 2, "freecrd"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); qeth_clean_channel(&card->read); qeth_clean_channel(&card->write); if (card->dev) free_netdev(card->dev); kfree(card->ip_tbd_list); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); kfree(card); } void qeth_trace_features(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "features"); QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.supported_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.enabled_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.supported_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.enabled_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.supported_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.enabled_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->info.diagass_support); } EXPORT_SYMBOL_GPL(qeth_trace_features); static struct ccw_device_id qeth_ids[] = { {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), .driver_info = QETH_CARD_TYPE_OSD}, {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), .driver_info = QETH_CARD_TYPE_IQD}, {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), .driver_info = QETH_CARD_TYPE_OSN}, {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), .driver_info = QETH_CARD_TYPE_OSM}, {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), .driver_info = QETH_CARD_TYPE_OSX}, {}, }; MODULE_DEVICE_TABLE(ccw, qeth_ids); static struct ccw_driver qeth_ccw_driver = { .driver = { .owner = THIS_MODULE, .name = "qeth", }, .ids = qeth_ids, .probe = ccwgroup_probe_ccwdev, .remove = ccwgroup_remove_ccwdev, }; int qeth_core_hardsetup_card(struct qeth_card *card) { int retries = 3; int rc; QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); atomic_set(&card->force_alloc_skb, 0); qeth_update_from_chp_desc(card); retry: if (retries < 3) QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", dev_name(&card->gdev->dev)); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); rc = ccw_device_set_online(CARD_RDEV(card)); if (rc) goto retriable; rc = ccw_device_set_online(CARD_WDEV(card)); if (rc) goto retriable; rc = ccw_device_set_online(CARD_DDEV(card)); if (rc) goto retriable; rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); retriable: if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break1"); return rc; } else if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); if (--retries < 0) goto out; else goto retry; } qeth_determine_capabilities(card); qeth_init_tokens(card); qeth_init_func_level(card); rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break2"); return rc; } else if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); if (--retries < 0) goto out; else goto retry; } rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb); if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break3"); return rc; } else if (rc) { QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); if (--retries < 0) goto out; else goto retry; } card->read_or_write_problem = 0; rc = qeth_mpc_initialize(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out; } card->options.ipa4.supported_funcs = 0; card->options.adp.supported_funcs = 0; card->info.diagass_support = 0; qeth_query_ipassists(card, QETH_PROT_IPV4); if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) qeth_query_setadapterparms(card); if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) qeth_query_setdiagass(card); return 0; out: dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " "an error on the device\n"); QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n", dev_name(&card->gdev->dev), rc); return rc; } EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, struct qdio_buffer_element *element, struct sk_buff **pskb, int offset, int *pfrag, int data_len) { struct page *page = virt_to_page(element->addr); if (*pskb == NULL) { if (qethbuffer->rx_skb) { /* only if qeth_card.options.cq == QETH_CQ_ENABLED */ *pskb = qethbuffer->rx_skb; qethbuffer->rx_skb = NULL; } else { *pskb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN); if (!(*pskb)) return -ENOMEM; } skb_reserve(*pskb, ETH_HLEN); if (data_len <= QETH_RX_PULL_LEN) { memcpy(skb_put(*pskb, data_len), element->addr + offset, data_len); } else { get_page(page); memcpy(skb_put(*pskb, QETH_RX_PULL_LEN), element->addr + offset, QETH_RX_PULL_LEN); skb_fill_page_desc(*pskb, *pfrag, page, offset + QETH_RX_PULL_LEN, data_len - QETH_RX_PULL_LEN); (*pskb)->data_len += data_len - QETH_RX_PULL_LEN; (*pskb)->len += data_len - QETH_RX_PULL_LEN; (*pskb)->truesize += data_len - QETH_RX_PULL_LEN; (*pfrag)++; } } else { get_page(page); skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len); (*pskb)->data_len += data_len; (*pskb)->len += data_len; (*pskb)->truesize += data_len; (*pfrag)++; } return 0; } struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, struct qeth_qdio_buffer *qethbuffer, struct qdio_buffer_element **__element, int *__offset, struct qeth_hdr **hdr) { struct qdio_buffer_element *element = *__element; struct qdio_buffer *buffer = qethbuffer->buffer; int offset = *__offset; struct sk_buff *skb = NULL; int skb_len = 0; void *data_ptr; int data_len; int headroom = 0; int use_rx_sg = 0; int frag = 0; /* qeth_hdr must not cross element boundaries */ if (element->length < offset + sizeof(struct qeth_hdr)) { if (qeth_is_last_sbale(element)) return NULL; element++; offset = 0; if (element->length < sizeof(struct qeth_hdr)) return NULL; } *hdr = element->addr + offset; offset += sizeof(struct qeth_hdr); switch ((*hdr)->hdr.l2.id) { case QETH_HEADER_TYPE_LAYER2: skb_len = (*hdr)->hdr.l2.pkt_length; break; case QETH_HEADER_TYPE_LAYER3: skb_len = (*hdr)->hdr.l3.length; headroom = ETH_HLEN; break; case QETH_HEADER_TYPE_OSN: skb_len = (*hdr)->hdr.osn.pdu_length; headroom = sizeof(struct qeth_hdr); break; default: break; } if (!skb_len) return NULL; if (((skb_len >= card->options.rx_sg_cb) && (!(card->info.type == QETH_CARD_TYPE_OSN)) && (!atomic_read(&card->force_alloc_skb))) || (card->options.cq == QETH_CQ_ENABLED)) { use_rx_sg = 1; } else { skb = dev_alloc_skb(skb_len + headroom); if (!skb) goto no_mem; if (headroom) skb_reserve(skb, headroom); } data_ptr = element->addr + offset; while (skb_len) { data_len = min(skb_len, (int)(element->length - offset)); if (data_len) { if (use_rx_sg) { if (qeth_create_skb_frag(qethbuffer, element, &skb, offset, &frag, data_len)) goto no_mem; } else { memcpy(skb_put(skb, data_len), data_ptr, data_len); } } skb_len -= data_len; if (skb_len) { if (qeth_is_last_sbale(element)) { QETH_CARD_TEXT(card, 4, "unexeob"); QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); dev_kfree_skb_any(skb); card->stats.rx_errors++; return NULL; } element++; offset = 0; data_ptr = element->addr; } else { offset += data_len; } } *__element = element; *__offset = offset; if (use_rx_sg && card->options.performance_stats) { card->perf_stats.sg_skbs_rx++; card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags; } return skb; no_mem: if (net_ratelimit()) { QETH_CARD_TEXT(card, 2, "noskbmem"); } card->stats.rx_dropped++; return NULL; } EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); static void qeth_unregister_dbf_views(void) { int x; for (x = 0; x < QETH_DBF_INFOS; x++) { debug_unregister(qeth_dbf[x].id); qeth_dbf[x].id = NULL; } } void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) { char dbf_txt_buf[32]; va_list args; if (level > id->level) return; va_start(args, fmt); vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); va_end(args); debug_text_event(id, level, dbf_txt_buf); } EXPORT_SYMBOL_GPL(qeth_dbf_longtext); static int qeth_register_dbf_views(void) { int ret; int x; for (x = 0; x < QETH_DBF_INFOS; x++) { /* register the areas */ qeth_dbf[x].id = debug_register(qeth_dbf[x].name, qeth_dbf[x].pages, qeth_dbf[x].areas, qeth_dbf[x].len); if (qeth_dbf[x].id == NULL) { qeth_unregister_dbf_views(); return -ENOMEM; } /* register a view */ ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); if (ret) { qeth_unregister_dbf_views(); return ret; } /* set a passing level */ debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); } return 0; } int qeth_core_load_discipline(struct qeth_card *card, enum qeth_discipline_id discipline) { int rc = 0; mutex_lock(&qeth_mod_mutex); switch (discipline) { case QETH_DISCIPLINE_LAYER3: card->discipline = try_then_request_module( symbol_get(qeth_l3_discipline), "qeth_l3"); break; case QETH_DISCIPLINE_LAYER2: card->discipline = try_then_request_module( symbol_get(qeth_l2_discipline), "qeth_l2"); break; } if (!card->discipline) { dev_err(&card->gdev->dev, "There is no kernel module to " "support discipline %d\n", discipline); rc = -EINVAL; } mutex_unlock(&qeth_mod_mutex); return rc; } void qeth_core_free_discipline(struct qeth_card *card) { if (card->options.layer2) symbol_put(qeth_l2_discipline); else symbol_put(qeth_l3_discipline); card->discipline = NULL; } static const struct device_type qeth_generic_devtype = { .name = "qeth_generic", .groups = qeth_generic_attr_groups, }; static const struct device_type qeth_osn_devtype = { .name = "qeth_osn", .groups = qeth_osn_attr_groups, }; #define DBF_NAME_LEN 20 struct qeth_dbf_entry { char dbf_name[DBF_NAME_LEN]; debug_info_t *dbf_info; struct list_head dbf_list; }; static LIST_HEAD(qeth_dbf_list); static DEFINE_MUTEX(qeth_dbf_list_mutex); static debug_info_t *qeth_get_dbf_entry(char *name) { struct qeth_dbf_entry *entry; debug_info_t *rc = NULL; mutex_lock(&qeth_dbf_list_mutex); list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { if (strcmp(entry->dbf_name, name) == 0) { rc = entry->dbf_info; break; } } mutex_unlock(&qeth_dbf_list_mutex); return rc; } static int qeth_add_dbf_entry(struct qeth_card *card, char *name) { struct qeth_dbf_entry *new_entry; card->debug = debug_register(name, 2, 1, 8); if (!card->debug) { QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); goto err; } if (debug_register_view(card->debug, &debug_hex_ascii_view)) goto err_dbg; new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); if (!new_entry) goto err_dbg; strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); new_entry->dbf_info = card->debug; mutex_lock(&qeth_dbf_list_mutex); list_add(&new_entry->dbf_list, &qeth_dbf_list); mutex_unlock(&qeth_dbf_list_mutex); return 0; err_dbg: debug_unregister(card->debug); err: return -ENOMEM; } static void qeth_clear_dbf_list(void) { struct qeth_dbf_entry *entry, *tmp; mutex_lock(&qeth_dbf_list_mutex); list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { list_del(&entry->dbf_list); debug_unregister(entry->dbf_info); kfree(entry); } mutex_unlock(&qeth_dbf_list_mutex); } static int qeth_core_probe_device(struct ccwgroup_device *gdev) { struct qeth_card *card; struct device *dev; int rc; unsigned long flags; char dbf_name[DBF_NAME_LEN]; QETH_DBF_TEXT(SETUP, 2, "probedev"); dev = &gdev->dev; if (!get_device(dev)) return -ENODEV; QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); card = qeth_alloc_card(); if (!card) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); rc = -ENOMEM; goto err_dev; } snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", dev_name(&gdev->dev)); card->debug = qeth_get_dbf_entry(dbf_name); if (!card->debug) { rc = qeth_add_dbf_entry(card, dbf_name); if (rc) goto err_card; } card->read.ccwdev = gdev->cdev[0]; card->write.ccwdev = gdev->cdev[1]; card->data.ccwdev = gdev->cdev[2]; dev_set_drvdata(&gdev->dev, card); card->gdev = gdev; gdev->cdev[0]->handler = qeth_irq; gdev->cdev[1]->handler = qeth_irq; gdev->cdev[2]->handler = qeth_irq; rc = qeth_determine_card_type(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); goto err_card; } rc = qeth_setup_card(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); goto err_card; } if (card->info.type == QETH_CARD_TYPE_OSN) gdev->dev.type = &qeth_osn_devtype; else gdev->dev.type = &qeth_generic_devtype; switch (card->info.type) { case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_OSM: rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); if (rc) goto err_card; rc = card->discipline->setup(card->gdev); if (rc) goto err_disc; case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSX: default: break; } write_lock_irqsave(&qeth_core_card_list.rwlock, flags); list_add_tail(&card->list, &qeth_core_card_list.list); write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); qeth_determine_capabilities(card); return 0; err_disc: qeth_core_free_discipline(card); err_card: qeth_core_free_card(card); err_dev: put_device(dev); return rc; } static void qeth_core_remove_device(struct ccwgroup_device *gdev) { unsigned long flags; struct qeth_card *card = dev_get_drvdata(&gdev->dev); QETH_DBF_TEXT(SETUP, 2, "removedv"); if (card->discipline) { card->discipline->remove(gdev); qeth_core_free_discipline(card); } write_lock_irqsave(&qeth_core_card_list.rwlock, flags); list_del(&card->list); write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); qeth_core_free_card(card); dev_set_drvdata(&gdev->dev, NULL); put_device(&gdev->dev); return; } static int qeth_core_set_online(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc = 0; int def_discipline; if (!card->discipline) { if (card->info.type == QETH_CARD_TYPE_IQD) def_discipline = QETH_DISCIPLINE_LAYER3; else def_discipline = QETH_DISCIPLINE_LAYER2; rc = qeth_core_load_discipline(card, def_discipline); if (rc) goto err; rc = card->discipline->setup(card->gdev); if (rc) goto err; } rc = card->discipline->set_online(gdev); err: return rc; } static int qeth_core_set_offline(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); return card->discipline->set_offline(gdev); } static void qeth_core_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->shutdown) card->discipline->shutdown(gdev); } static int qeth_core_prepare(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->prepare) return card->discipline->prepare(gdev); return 0; } static void qeth_core_complete(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->complete) card->discipline->complete(gdev); } static int qeth_core_freeze(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->freeze) return card->discipline->freeze(gdev); return 0; } static int qeth_core_thaw(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->thaw) return card->discipline->thaw(gdev); return 0; } static int qeth_core_restore(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->restore) return card->discipline->restore(gdev); return 0; } static struct ccwgroup_driver qeth_core_ccwgroup_driver = { .driver = { .owner = THIS_MODULE, .name = "qeth", }, .setup = qeth_core_probe_device, .remove = qeth_core_remove_device, .set_online = qeth_core_set_online, .set_offline = qeth_core_set_offline, .shutdown = qeth_core_shutdown, .prepare = qeth_core_prepare, .complete = qeth_core_complete, .freeze = qeth_core_freeze, .thaw = qeth_core_thaw, .restore = qeth_core_restore, }; static ssize_t qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf, size_t count) { int err; err = ccwgroup_create_dev(qeth_core_root_dev, &qeth_core_ccwgroup_driver, 3, buf); return err ? err : count; } static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store); static struct attribute *qeth_drv_attrs[] = { &driver_attr_group.attr, NULL, }; static struct attribute_group qeth_drv_attr_group = { .attrs = qeth_drv_attrs, }; static const struct attribute_group *qeth_drv_attr_groups[] = { &qeth_drv_attr_group, NULL, }; static struct { const char str[ETH_GSTRING_LEN]; } qeth_ethtool_stats_keys[] = { /* 0 */{"rx skbs"}, {"rx buffers"}, {"tx skbs"}, {"tx buffers"}, {"tx skbs no packing"}, {"tx buffers no packing"}, {"tx skbs packing"}, {"tx buffers packing"}, {"tx sg skbs"}, {"tx sg frags"}, /* 10 */{"rx sg skbs"}, {"rx sg frags"}, {"rx sg page allocs"}, {"tx large kbytes"}, {"tx large count"}, {"tx pk state ch n->p"}, {"tx pk state ch p->n"}, {"tx pk watermark low"}, {"tx pk watermark high"}, {"queue 0 buffer usage"}, /* 20 */{"queue 1 buffer usage"}, {"queue 2 buffer usage"}, {"queue 3 buffer usage"}, {"rx poll time"}, {"rx poll count"}, {"rx do_QDIO time"}, {"rx do_QDIO count"}, {"tx handler time"}, {"tx handler count"}, {"tx time"}, /* 30 */{"tx count"}, {"tx do_QDIO time"}, {"tx do_QDIO count"}, {"tx csum"}, {"tx lin"}, {"cq handler count"}, {"cq handler time"} }; int qeth_core_get_sset_count(struct net_device *dev, int stringset) { switch (stringset) { case ETH_SS_STATS: return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN); default: return -EINVAL; } } EXPORT_SYMBOL_GPL(qeth_core_get_sset_count); void qeth_core_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct qeth_card *card = dev->ml_priv; data[0] = card->stats.rx_packets - card->perf_stats.initial_rx_packets; data[1] = card->perf_stats.bufs_rec; data[2] = card->stats.tx_packets - card->perf_stats.initial_tx_packets; data[3] = card->perf_stats.bufs_sent; data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets - card->perf_stats.skbs_sent_pack; data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack; data[6] = card->perf_stats.skbs_sent_pack; data[7] = card->perf_stats.bufs_sent_pack; data[8] = card->perf_stats.sg_skbs_sent; data[9] = card->perf_stats.sg_frags_sent; data[10] = card->perf_stats.sg_skbs_rx; data[11] = card->perf_stats.sg_frags_rx; data[12] = card->perf_stats.sg_alloc_page_rx; data[13] = (card->perf_stats.large_send_bytes >> 10); data[14] = card->perf_stats.large_send_cnt; data[15] = card->perf_stats.sc_dp_p; data[16] = card->perf_stats.sc_p_dp; data[17] = QETH_LOW_WATERMARK_PACK; data[18] = QETH_HIGH_WATERMARK_PACK; data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers); data[20] = (card->qdio.no_out_queues > 1) ? atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0; data[21] = (card->qdio.no_out_queues > 2) ? atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0; data[22] = (card->qdio.no_out_queues > 3) ? atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0; data[23] = card->perf_stats.inbound_time; data[24] = card->perf_stats.inbound_cnt; data[25] = card->perf_stats.inbound_do_qdio_time; data[26] = card->perf_stats.inbound_do_qdio_cnt; data[27] = card->perf_stats.outbound_handler_time; data[28] = card->perf_stats.outbound_handler_cnt; data[29] = card->perf_stats.outbound_time; data[30] = card->perf_stats.outbound_cnt; data[31] = card->perf_stats.outbound_do_qdio_time; data[32] = card->perf_stats.outbound_do_qdio_cnt; data[33] = card->perf_stats.tx_csum; data[34] = card->perf_stats.tx_lin; data[35] = card->perf_stats.cq_cnt; data[36] = card->perf_stats.cq_time; } EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: memcpy(data, &qeth_ethtool_stats_keys, sizeof(qeth_ethtool_stats_keys)); break; default: WARN_ON(1); break; } } EXPORT_SYMBOL_GPL(qeth_core_get_strings); void qeth_core_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct qeth_card *card = dev->ml_priv; strlcpy(info->driver, card->options.layer2 ? "qeth_l2" : "qeth_l3", sizeof(info->driver)); strlcpy(info->version, "1.0", sizeof(info->version)); strlcpy(info->fw_version, card->info.mcl_level, sizeof(info->fw_version)); snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s", CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); } EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); int qeth_core_ethtool_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct qeth_card *card = netdev->ml_priv; enum qeth_link_types link_type; if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) link_type = QETH_LINK_TYPE_10GBIT_ETH; else link_type = card->info.link_type; ecmd->transceiver = XCVR_INTERNAL; ecmd->supported = SUPPORTED_Autoneg; ecmd->advertising = ADVERTISED_Autoneg; ecmd->duplex = DUPLEX_FULL; ecmd->autoneg = AUTONEG_ENABLE; switch (link_type) { case QETH_LINK_TYPE_FAST_ETH: case QETH_LINK_TYPE_LANE_ETH100: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_TP; ecmd->speed = SPEED_100; ecmd->port = PORT_TP; break; case QETH_LINK_TYPE_GBIT_ETH: case QETH_LINK_TYPE_LANE_ETH1000: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE; ecmd->speed = SPEED_1000; ecmd->port = PORT_FIBRE; break; case QETH_LINK_TYPE_10GBIT_ETH: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE; ecmd->speed = SPEED_10000; ecmd->port = PORT_FIBRE; break; default: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_TP; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_TP; ecmd->speed = SPEED_10; ecmd->port = PORT_TP; } return 0; } EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings); static int __init qeth_core_init(void) { int rc; pr_info("loading core functions\n"); INIT_LIST_HEAD(&qeth_core_card_list.list); INIT_LIST_HEAD(&qeth_dbf_list); rwlock_init(&qeth_core_card_list.rwlock); mutex_init(&qeth_mod_mutex); qeth_wq = create_singlethread_workqueue("qeth_wq"); rc = qeth_register_dbf_views(); if (rc) goto out_err; qeth_core_root_dev = root_device_register("qeth"); rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0; if (rc) goto register_err; qeth_core_header_cache = kmem_cache_create("qeth_hdr", sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL); if (!qeth_core_header_cache) { rc = -ENOMEM; goto slab_err; } qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); if (!qeth_qdio_outbuf_cache) { rc = -ENOMEM; goto cqslab_err; } rc = ccw_driver_register(&qeth_ccw_driver); if (rc) goto ccw_err; qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups; rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); if (rc) goto ccwgroup_err; return 0; ccwgroup_err: ccw_driver_unregister(&qeth_ccw_driver); ccw_err: kmem_cache_destroy(qeth_qdio_outbuf_cache); cqslab_err: kmem_cache_destroy(qeth_core_header_cache); slab_err: root_device_unregister(qeth_core_root_dev); register_err: qeth_unregister_dbf_views(); out_err: pr_err("Initializing the qeth device driver failed\n"); return rc; } static void __exit qeth_core_exit(void) { qeth_clear_dbf_list(); destroy_workqueue(qeth_wq); ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); ccw_driver_unregister(&qeth_ccw_driver); kmem_cache_destroy(qeth_qdio_outbuf_cache); kmem_cache_destroy(qeth_core_header_cache); root_device_unregister(qeth_core_root_dev); qeth_unregister_dbf_views(); pr_info("core functions removed\n"); } module_init(qeth_core_init); module_exit(qeth_core_exit); MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); MODULE_DESCRIPTION("qeth core functions"); MODULE_LICENSE("GPL");
gpl-2.0
pgielda/linux-renesas
drivers/input/keyboard/opencores-kbd.c
2246
4205
/* * OpenCores Keyboard Controller Driver * http://www.opencores.org/project,keyboardcontroller * * Copyright 2007-2009 HV Sistemas S.L. * * Licensed under the GPL-2 or later. */ #include <linux/input.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> struct opencores_kbd { struct input_dev *input; struct resource *addr_res; void __iomem *addr; int irq; unsigned short keycodes[128]; }; static irqreturn_t opencores_kbd_isr(int irq, void *dev_id) { struct opencores_kbd *opencores_kbd = dev_id; struct input_dev *input = opencores_kbd->input; unsigned char c; c = readb(opencores_kbd->addr); input_report_key(input, c & 0x7f, c & 0x80 ? 0 : 1); input_sync(input); return IRQ_HANDLED; } static int opencores_kbd_probe(struct platform_device *pdev) { struct input_dev *input; struct opencores_kbd *opencores_kbd; struct resource *res; int irq, i, error; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "missing board memory resource\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "missing board IRQ resource\n"); return -EINVAL; } opencores_kbd = kzalloc(sizeof(*opencores_kbd), GFP_KERNEL); input = input_allocate_device(); if (!opencores_kbd || !input) { dev_err(&pdev->dev, "failed to allocate device structures\n"); error = -ENOMEM; goto err_free_mem; } opencores_kbd->addr_res = res; res = request_mem_region(res->start, resource_size(res), pdev->name); if (!res) { dev_err(&pdev->dev, "failed to request I/O memory\n"); error = -EBUSY; goto err_free_mem; } opencores_kbd->addr = ioremap(res->start, resource_size(res)); if (!opencores_kbd->addr) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -ENXIO; goto err_rel_mem; } opencores_kbd->input = input; opencores_kbd->irq = irq; input->name = pdev->name; input->phys = "opencores-kbd/input0"; input->dev.parent = &pdev->dev; input_set_drvdata(input, opencores_kbd); input->id.bustype = BUS_HOST; input->id.vendor = 0x0001; input->id.product = 0x0001; input->id.version = 0x0100; input->keycode = opencores_kbd->keycodes; input->keycodesize = sizeof(opencores_kbd->keycodes[0]); input->keycodemax = ARRAY_SIZE(opencores_kbd->keycodes); __set_bit(EV_KEY, input->evbit); for (i = 0; i < ARRAY_SIZE(opencores_kbd->keycodes); i++) { /* * OpenCores controller happens to have scancodes match * our KEY_* definitions. */ opencores_kbd->keycodes[i] = i; __set_bit(opencores_kbd->keycodes[i], input->keybit); } __clear_bit(KEY_RESERVED, input->keybit); error = request_irq(irq, &opencores_kbd_isr, IRQF_TRIGGER_RISING, pdev->name, opencores_kbd); if (error) { dev_err(&pdev->dev, "unable to claim irq %d\n", irq); goto err_unmap_mem; } error = input_register_device(input); if (error) { dev_err(&pdev->dev, "unable to register input device\n"); goto err_free_irq; } platform_set_drvdata(pdev, opencores_kbd); return 0; err_free_irq: free_irq(irq, opencores_kbd); err_unmap_mem: iounmap(opencores_kbd->addr); err_rel_mem: release_mem_region(res->start, resource_size(res)); err_free_mem: input_free_device(input); kfree(opencores_kbd); return error; } static int opencores_kbd_remove(struct platform_device *pdev) { struct opencores_kbd *opencores_kbd = platform_get_drvdata(pdev); free_irq(opencores_kbd->irq, opencores_kbd); iounmap(opencores_kbd->addr); release_mem_region(opencores_kbd->addr_res->start, resource_size(opencores_kbd->addr_res)); input_unregister_device(opencores_kbd->input); kfree(opencores_kbd); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver opencores_kbd_device_driver = { .probe = opencores_kbd_probe, .remove = opencores_kbd_remove, .driver = { .name = "opencores-kbd", }, }; module_platform_driver(opencores_kbd_device_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Javier Herrero <jherrero@hvsistemas.es>"); MODULE_DESCRIPTION("Keyboard driver for OpenCores Keyboard Controller");
gpl-2.0
rassillon/android_kernel_samsung_grandneove3g
arch/x86/mm/memtest.c
2246
3060
#include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/pfn.h> #include <linux/memblock.h> static u64 patterns[] __initdata = { /* The first entry has to be 0 to leave memtest with zeroed memory */ 0, 0xffffffffffffffffULL, 0x5555555555555555ULL, 0xaaaaaaaaaaaaaaaaULL, 0x1111111111111111ULL, 0x2222222222222222ULL, 0x4444444444444444ULL, 0x8888888888888888ULL, 0x3333333333333333ULL, 0x6666666666666666ULL, 0x9999999999999999ULL, 0xccccccccccccccccULL, 0x7777777777777777ULL, 0xbbbbbbbbbbbbbbbbULL, 0xddddddddddddddddULL, 0xeeeeeeeeeeeeeeeeULL, 0x7a6c7258554e494cULL, /* yeah ;-) */ }; static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) { printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n", (unsigned long long) pattern, (unsigned long long) start_bad, (unsigned long long) end_bad); memblock_reserve(start_bad, end_bad - start_bad); } static void __init memtest(u64 pattern, u64 start_phys, u64 size) { u64 *p, *start, *end; u64 start_bad, last_bad; u64 start_phys_aligned; const size_t incr = sizeof(pattern); start_phys_aligned = ALIGN(start_phys, incr); start = __va(start_phys_aligned); end = start + (size - (start_phys_aligned - start_phys)) / incr; start_bad = 0; last_bad = 0; for (p = start; p < end; p++) *p = pattern; for (p = start; p < end; p++, start_phys_aligned += incr) { if (*p == pattern) continue; if (start_phys_aligned == last_bad + incr) { last_bad += incr; continue; } if (start_bad) reserve_bad_mem(pattern, start_bad, last_bad + incr); start_bad = last_bad = start_phys_aligned; } if (start_bad) reserve_bad_mem(pattern, start_bad, last_bad + incr); } static void __init do_one_pass(u64 pattern, u64 start, u64 end) { u64 i; phys_addr_t this_start, this_end; for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) { this_start = clamp_t(phys_addr_t, this_start, start, end); this_end = clamp_t(phys_addr_t, this_end, start, end); if (this_start < this_end) { printk(KERN_INFO " %010llx - %010llx pattern %016llx\n", (unsigned long long)this_start, (unsigned long long)this_end, (unsigned long long)cpu_to_be64(pattern)); memtest(pattern, this_start, this_end - this_start); } } } /* default is disabled */ static int memtest_pattern __initdata; static int __init parse_memtest(char *arg) { if (arg) memtest_pattern = simple_strtoul(arg, NULL, 0); else memtest_pattern = ARRAY_SIZE(patterns); return 0; } early_param("memtest", parse_memtest); void __init early_memtest(unsigned long start, unsigned long end) { unsigned int i; unsigned int idx = 0; if (!memtest_pattern) return; printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern); for (i = memtest_pattern-1; i < UINT_MAX; --i) { idx = i % ARRAY_SIZE(patterns); do_one_pass(patterns[idx], start, end); } }
gpl-2.0
mitwo-dev/android_kernel_xiaomi_msm8960
scripts/dtc/dtc.c
2502
6942
/* * (C) Copyright David Gibson <dwg@au1.ibm.com>, IBM Corporation. 2005. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ #include "dtc.h" #include "srcpos.h" #include "version_gen.h" /* * Command line options */ int quiet; /* Level of quietness */ int reservenum; /* Number of memory reservation slots */ int minsize; /* Minimum blob size */ int padsize; /* Additional padding to blob */ int phandle_format = PHANDLE_BOTH; /* Use linux,phandle or phandle properties */ static void fill_fullpaths(struct node *tree, const char *prefix) { struct node *child; const char *unit; tree->fullpath = join_path(prefix, tree->name); unit = strchr(tree->name, '@'); if (unit) tree->basenamelen = unit - tree->name; else tree->basenamelen = strlen(tree->name); for_each_child(tree, child) fill_fullpaths(child, tree->fullpath); } static void __attribute__ ((noreturn)) usage(void) { fprintf(stderr, "Usage:\n"); fprintf(stderr, "\tdtc [options] <input file>\n"); fprintf(stderr, "\nOptions:\n"); fprintf(stderr, "\t-h\n"); fprintf(stderr, "\t\tThis help text\n"); fprintf(stderr, "\t-q\n"); fprintf(stderr, "\t\tQuiet: -q suppress warnings, -qq errors, -qqq all\n"); fprintf(stderr, "\t-I <input format>\n"); fprintf(stderr, "\t\tInput formats are:\n"); fprintf(stderr, "\t\t\tdts - device tree source text\n"); fprintf(stderr, "\t\t\tdtb - device tree blob\n"); fprintf(stderr, "\t\t\tfs - /proc/device-tree style directory\n"); fprintf(stderr, "\t-o <output file>\n"); fprintf(stderr, "\t-O <output format>\n"); fprintf(stderr, "\t\tOutput formats are:\n"); fprintf(stderr, "\t\t\tdts - device tree source text\n"); fprintf(stderr, "\t\t\tdtb - device tree blob\n"); fprintf(stderr, "\t\t\tasm - assembler source\n"); fprintf(stderr, "\t-V <output version>\n"); fprintf(stderr, "\t\tBlob version to produce, defaults to %d (relevant for dtb\n\t\tand asm output only)\n", DEFAULT_FDT_VERSION); fprintf(stderr, "\t-d <output dependency file>\n"); fprintf(stderr, "\t-R <number>\n"); fprintf(stderr, "\t\tMake space for <number> reserve map entries (relevant for \n\t\tdtb and asm output only)\n"); fprintf(stderr, "\t-S <bytes>\n"); fprintf(stderr, "\t\tMake the blob at least <bytes> long (extra space)\n"); fprintf(stderr, "\t-p <bytes>\n"); fprintf(stderr, "\t\tAdd padding to the blob of <bytes> long (extra space)\n"); fprintf(stderr, "\t-b <number>\n"); fprintf(stderr, "\t\tSet the physical boot cpu\n"); fprintf(stderr, "\t-f\n"); fprintf(stderr, "\t\tForce - try to produce output even if the input tree has errors\n"); fprintf(stderr, "\t-s\n"); fprintf(stderr, "\t\tSort nodes and properties before outputting (only useful for\n\t\tcomparing trees)\n"); fprintf(stderr, "\t-v\n"); fprintf(stderr, "\t\tPrint DTC version and exit\n"); fprintf(stderr, "\t-H <phandle format>\n"); fprintf(stderr, "\t\tphandle formats are:\n"); fprintf(stderr, "\t\t\tlegacy - \"linux,phandle\" properties only\n"); fprintf(stderr, "\t\t\tepapr - \"phandle\" properties only\n"); fprintf(stderr, "\t\t\tboth - Both \"linux,phandle\" and \"phandle\" properties\n"); exit(3); } int main(int argc, char *argv[]) { struct boot_info *bi; const char *inform = "dts"; const char *outform = "dts"; const char *outname = "-"; const char *depname = NULL; int force = 0, sort = 0; const char *arg; int opt; FILE *outf = NULL; int outversion = DEFAULT_FDT_VERSION; long long cmdline_boot_cpuid = -1; quiet = 0; reservenum = 0; minsize = 0; padsize = 0; while ((opt = getopt(argc, argv, "hI:O:o:V:d:R:S:p:fcqb:vH:s")) != EOF) { switch (opt) { case 'I': inform = optarg; break; case 'O': outform = optarg; break; case 'o': outname = optarg; break; case 'V': outversion = strtol(optarg, NULL, 0); break; case 'd': depname = optarg; break; case 'R': reservenum = strtol(optarg, NULL, 0); break; case 'S': minsize = strtol(optarg, NULL, 0); break; case 'p': padsize = strtol(optarg, NULL, 0); break; case 'f': force = 1; break; case 'q': quiet++; break; case 'b': cmdline_boot_cpuid = strtoll(optarg, NULL, 0); break; case 'v': printf("Version: %s\n", DTC_VERSION); exit(0); case 'H': if (streq(optarg, "legacy")) phandle_format = PHANDLE_LEGACY; else if (streq(optarg, "epapr")) phandle_format = PHANDLE_EPAPR; else if (streq(optarg, "both")) phandle_format = PHANDLE_BOTH; else die("Invalid argument \"%s\" to -H option\n", optarg); break; case 's': sort = 1; break; case 'h': default: usage(); } } if (argc > (optind+1)) usage(); else if (argc < (optind+1)) arg = "-"; else arg = argv[optind]; /* minsize and padsize are mutually exclusive */ if (minsize && padsize) die("Can't set both -p and -S\n"); if (minsize) fprintf(stderr, "DTC: Use of \"-S\" is deprecated; it will be removed soon, use \"-p\" instead\n"); fprintf(stderr, "DTC: %s->%s on file \"%s\"\n", inform, outform, arg); if (depname) { depfile = fopen(depname, "w"); if (!depfile) die("Couldn't open dependency file %s: %s\n", depname, strerror(errno)); fprintf(depfile, "%s:", outname); } if (streq(inform, "dts")) bi = dt_from_source(arg); else if (streq(inform, "fs")) bi = dt_from_fs(arg); else if(streq(inform, "dtb")) bi = dt_from_blob(arg); else die("Unknown input format \"%s\"\n", inform); if (depfile) { fputc('\n', depfile); fclose(depfile); } if (cmdline_boot_cpuid != -1) bi->boot_cpuid_phys = cmdline_boot_cpuid; fill_fullpaths(bi->dt, ""); process_checks(force, bi); if (sort) sort_tree(bi); if (streq(outname, "-")) { outf = stdout; } else { outf = fopen(outname, "w"); if (! outf) die("Couldn't open output file %s: %s\n", outname, strerror(errno)); } if (streq(outform, "dts")) { dt_to_source(outf, bi); } else if (streq(outform, "dtb")) { dt_to_blob(outf, bi, outversion); } else if (streq(outform, "asm")) { dt_to_asm(outf, bi, outversion); } else if (streq(outform, "null")) { /* do nothing */ } else { die("Unknown output format \"%s\"\n", outform); } exit(0); }
gpl-2.0
pst1337/cm11_mkc_boeffla
mm/mremap.c
2758
13496
/* * mm/mremap.c * * (C) Copyright 1996 Linus Torvalds * * Address space accounting code <alan@lxorguk.ukuu.org.uk> * (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/ksm.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/mmu_notifier.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include "internal.h" static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return NULL; pud = pud_offset(pgd, addr); if (pud_none_or_clear_bad(pud)) return NULL; pmd = pmd_offset(pud, addr); split_huge_page_pmd(mm, pmd); if (pmd_none_or_clear_bad(pmd)) return NULL; return pmd; } static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (!pud) return NULL; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return NULL; VM_BUG_ON(pmd_trans_huge(*pmd)); if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr)) return NULL; return pmd; } static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr) { struct address_space *mapping = NULL; struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; unsigned long old_start; old_start = old_addr; mmu_notifier_invalidate_range_start(vma->vm_mm, old_start, old_end); if (vma->vm_file) { /* * Subtle point from Rajesh Venkatasubramanian: before * moving file-based ptes, we must lock truncate_pagecache * out, since it might clean the dst vma before the src vma, * and we propagate stale pages into the dst afterward. */ mapping = vma->vm_file->f_mapping; mutex_lock(&mapping->i_mmap_mutex); } /* * We don't have to worry about the ordering of src and dst * pte locks because exclusive mmap_sem prevents deadlock. */ old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); new_pte = pte_offset_map(new_pmd, new_addr); new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); arch_enter_lazy_mmu_mode(); for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; pte = ptep_clear_flush(vma, old_addr, old_pte); pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); set_pte_at(mm, new_addr, new_pte, pte); } arch_leave_lazy_mmu_mode(); if (new_ptl != old_ptl) spin_unlock(new_ptl); pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); if (mapping) mutex_unlock(&mapping->i_mmap_mutex); mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); } #define LATENCY_LIMIT (64 * PAGE_SIZE) unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len) { unsigned long extent, next, old_end; pmd_t *old_pmd, *new_pmd; old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); for (; old_addr < old_end; old_addr += extent, new_addr += extent) { cond_resched(); next = (old_addr + PMD_SIZE) & PMD_MASK; if (next - 1 > old_end) next = old_end; extent = next - old_addr; old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); if (!new_pmd) break; next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) extent = next - new_addr; if (extent > LATENCY_LIMIT) extent = LATENCY_LIMIT; move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, new_pmd, new_addr); } return len + old_addr - old_end; /* how much done */ } static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; unsigned long excess = 0; unsigned long hiwater_vm; int split = 0; int err; /* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ if (mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; /* * Advise KSM to break any KSM pages in the area to be moved: * it would be confusing if they were to turn up at the new * location, where they happen to coincide with different KSM * pages recently unmapped. But leave vma->vm_flags as it was, * so KSM can come around to merge on vma and new_vma afterwards. */ err = ksm_madvise(vma, old_addr, old_addr + old_len, MADV_UNMERGEABLE, &vm_flags); if (err) return err; new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff); if (!new_vma) return -ENOMEM; moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len); if (moved_len < old_len) { /* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len); vma = new_vma; old_len = new_len; old_addr = new_addr; new_addr = -ENOMEM; } /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { vma->vm_flags &= ~VM_ACCOUNT; excess = vma->vm_end - vma->vm_start - old_len; if (old_addr > vma->vm_start && old_addr + old_len < vma->vm_end) split = 1; } /* * If we failed to move page tables we still do total_vm increment * since do_munmap() will decrement it by old_len == new_len. * * Since total_vm is about to be raised artificially high for a * moment, we need to restore high watermark afterwards: if stats * are taken meanwhile, total_vm and hiwater_vm appear too high. * If this were a serious issue, we'd add a flag to do_munmap(). */ hiwater_vm = mm->hiwater_vm; mm->total_vm += new_len >> PAGE_SHIFT; vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); if (do_munmap(mm, old_addr, old_len) < 0) { /* OOM: unable to split vma, just get accounts right */ vm_unacct_memory(excess >> PAGE_SHIFT); excess = 0; } mm->hiwater_vm = hiwater_vm; /* Restore VM_ACCOUNT if one or two pieces of vma left */ if (excess) { vma->vm_flags |= VM_ACCOUNT; if (split) vma->vm_next->vm_flags |= VM_ACCOUNT; } if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; if (new_len > old_len) mlock_vma_pages_range(new_vma, new_addr + old_len, new_addr + new_len); } return new_addr; } static struct vm_area_struct *vma_to_resize(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long *p) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto Efault; if (is_vm_hugetlb_page(vma)) goto Einval; /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto Efault; /* Need to be careful about a growing mapping */ if (new_len > old_len) { unsigned long pgoff; if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) goto Efault; pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) goto Einval; } if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); locked += new_len - old_len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto Eagain; } if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) goto Enomem; if (vma->vm_flags & VM_ACCOUNT) { unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; if (security_vm_enough_memory(charged)) goto Efault; *p = charged; } return vma; Efault: /* very odd choice for most of the cases, but... */ return ERR_PTR(-EFAULT); Einval: return ERR_PTR(-EINVAL); Enomem: return ERR_PTR(-ENOMEM); Eagain: return ERR_PTR(-EAGAIN); } static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; unsigned long map_flags; if (new_addr & ~PAGE_MASK) goto out; if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; /* Check if the location we're moving into overlaps the * old location at all, and fail if it does. */ if ((new_addr <= addr) && (new_addr+new_len) > addr) goto out; if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; ret = do_munmap(mm, new_addr, new_len); if (ret) goto out; if (old_len >= new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; old_len = new_len; } vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } map_flags = MAP_FIXED; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); if (ret & ~PAGE_MASK) goto out1; ret = move_vma(vma, addr, old_len, new_len, new_addr); if (!(ret & ~PAGE_MASK)) goto out; out1: vm_unacct_memory(charged); out: return ret; } static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) { unsigned long end = vma->vm_end + delta; if (end < vma->vm_end) /* overflow */ return 0; if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ return 0; if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 0, MAP_FIXED) & ~PAGE_MASK) return 0; return 1; } /* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. */ unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; if (addr & ~PAGE_MASK) goto out; old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); /* * We allow a zero old-len as a special case * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ if (!new_len) goto out; if (flags & MREMAP_FIXED) { if (flags & MREMAP_MAYMOVE) ret = mremap_to(addr, old_len, new_addr, new_len); goto out; } /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. * do_munmap does all the needed commit accounting */ if (old_len >= new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; ret = addr; goto out; } /* * Ok, we need to grow.. */ vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } /* old_len exactly to the end of the area.. */ if (old_len == vma->vm_end - addr) { /* can we just expand the current mapping? */ if (vma_expandable(vma, new_len - old_len)) { int pages = (new_len - old_len) >> PAGE_SHIFT; if (vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL)) { ret = -ENOMEM; goto out; } mm->total_vm += pages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); if (vma->vm_flags & VM_LOCKED) { mm->locked_vm += pages; mlock_vma_pages_range(vma, addr + old_len, addr + new_len); } ret = addr; goto out; } } /* * We weren't able to just expand or shrink the area, * we need to create a new one and move it.. */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { unsigned long map_flags = 0; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; new_addr = get_unmapped_area(vma->vm_file, 0, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); if (new_addr & ~PAGE_MASK) { ret = new_addr; goto out; } ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: if (ret & ~PAGE_MASK) vm_unacct_memory(charged); return ret; } SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) { unsigned long ret; down_write(&current->mm->mmap_sem); ret = do_mremap(addr, old_len, new_len, flags, new_addr); up_write(&current->mm->mmap_sem); return ret; }
gpl-2.0
moonlightly/android_kernel_zte_msm8960
drivers/tty/serial/max3107-aava.c
2758
9277
/* * max3107.c - spi uart protocol driver for Maxim 3107 * Based on max3100.c * by Christian Pellegrin <chripell@evolware.org> * and max3110.c * by Feng Tang <feng.tang@intel.com> * * Copyright (C) Aavamobile 2009 * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * */ #include <linux/delay.h> #include <linux/device.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/spi/spi.h> #include <linux/freezer.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/sfi.h> #include <asm/mrst.h> #include "max3107.h" /* GPIO direction to input function */ static int max3107_gpio_direction_in(struct gpio_chip *chip, unsigned offset) { struct max3107_port *s = container_of(chip, struct max3107_port, chip); u16 buf[1]; /* Buffer for SPI transfer */ if (offset >= MAX3107_GPIO_COUNT) { dev_err(&s->spi->dev, "Invalid GPIO\n"); return -EINVAL; } /* Read current GPIO configuration register */ buf[0] = MAX3107_GPIOCFG_REG; /* Perform SPI transfer */ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) { dev_err(&s->spi->dev, "SPI transfer GPIO read failed\n"); return -EIO; } buf[0] &= MAX3107_SPI_RX_DATA_MASK; /* Set GPIO to input */ buf[0] &= ~(0x0001 << offset); /* Write new GPIO configuration register value */ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG); /* Perform SPI transfer */ if (max3107_rw(s, (u8 *)buf, NULL, 2)) { dev_err(&s->spi->dev, "SPI transfer GPIO write failed\n"); return -EIO; } return 0; } /* GPIO direction to output function */ static int max3107_gpio_direction_out(struct gpio_chip *chip, unsigned offset, int value) { struct max3107_port *s = container_of(chip, struct max3107_port, chip); u16 buf[2]; /* Buffer for SPI transfers */ if (offset >= MAX3107_GPIO_COUNT) { dev_err(&s->spi->dev, "Invalid GPIO\n"); return -EINVAL; } /* Read current GPIO configuration and data registers */ buf[0] = MAX3107_GPIOCFG_REG; buf[1] = MAX3107_GPIODATA_REG; /* Perform SPI transfer */ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) { dev_err(&s->spi->dev, "SPI transfer gpio failed\n"); return -EIO; } buf[0] &= MAX3107_SPI_RX_DATA_MASK; buf[1] &= MAX3107_SPI_RX_DATA_MASK; /* Set GPIO to output */ buf[0] |= (0x0001 << offset); /* Set value */ if (value) buf[1] |= (0x0001 << offset); else buf[1] &= ~(0x0001 << offset); /* Write new GPIO configuration and data register values */ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG); buf[1] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG); /* Perform SPI transfer */ if (max3107_rw(s, (u8 *)buf, NULL, 4)) { dev_err(&s->spi->dev, "SPI transfer for GPIO conf data w failed\n"); return -EIO; } return 0; } /* GPIO value query function */ static int max3107_gpio_get(struct gpio_chip *chip, unsigned offset) { struct max3107_port *s = container_of(chip, struct max3107_port, chip); u16 buf[1]; /* Buffer for SPI transfer */ if (offset >= MAX3107_GPIO_COUNT) { dev_err(&s->spi->dev, "Invalid GPIO\n"); return -EINVAL; } /* Read current GPIO data register */ buf[0] = MAX3107_GPIODATA_REG; /* Perform SPI transfer */ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) { dev_err(&s->spi->dev, "SPI transfer GPIO data r failed\n"); return -EIO; } buf[0] &= MAX3107_SPI_RX_DATA_MASK; /* Return value */ return buf[0] & (0x0001 << offset); } /* GPIO value set function */ static void max3107_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct max3107_port *s = container_of(chip, struct max3107_port, chip); u16 buf[2]; /* Buffer for SPI transfers */ if (offset >= MAX3107_GPIO_COUNT) { dev_err(&s->spi->dev, "Invalid GPIO\n"); return; } /* Read current GPIO configuration registers*/ buf[0] = MAX3107_GPIODATA_REG; buf[1] = MAX3107_GPIOCFG_REG; /* Perform SPI transfer */ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) { dev_err(&s->spi->dev, "SPI transfer for GPIO data and config read failed\n"); return; } buf[0] &= MAX3107_SPI_RX_DATA_MASK; buf[1] &= MAX3107_SPI_RX_DATA_MASK; if (!(buf[1] & (0x0001 << offset))) { /* Configured as input, can't set value */ dev_warn(&s->spi->dev, "Trying to set value for input GPIO\n"); return; } /* Set value */ if (value) buf[0] |= (0x0001 << offset); else buf[0] &= ~(0x0001 << offset); /* Write new GPIO data register value */ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG); /* Perform SPI transfer */ if (max3107_rw(s, (u8 *)buf, NULL, 2)) dev_err(&s->spi->dev, "SPI transfer GPIO data w failed\n"); } /* GPIO chip data */ static struct gpio_chip max3107_gpio_chip = { .owner = THIS_MODULE, .direction_input = max3107_gpio_direction_in, .direction_output = max3107_gpio_direction_out, .get = max3107_gpio_get, .set = max3107_gpio_set, .can_sleep = 1, .base = MAX3107_GPIO_BASE, .ngpio = MAX3107_GPIO_COUNT, }; /** * max3107_aava_reset - reset on AAVA systems * @spi: The SPI device we are probing * * Reset the device ready for probing. */ static int max3107_aava_reset(struct spi_device *spi) { /* Reset the chip */ if (gpio_request(MAX3107_RESET_GPIO, "max3107")) { pr_err("Requesting RESET GPIO failed\n"); return -EIO; } if (gpio_direction_output(MAX3107_RESET_GPIO, 0)) { pr_err("Setting RESET GPIO to 0 failed\n"); gpio_free(MAX3107_RESET_GPIO); return -EIO; } msleep(MAX3107_RESET_DELAY); if (gpio_direction_output(MAX3107_RESET_GPIO, 1)) { pr_err("Setting RESET GPIO to 1 failed\n"); gpio_free(MAX3107_RESET_GPIO); return -EIO; } gpio_free(MAX3107_RESET_GPIO); msleep(MAX3107_WAKEUP_DELAY); return 0; } static int max3107_aava_configure(struct max3107_port *s) { int retval; /* Initialize GPIO chip data */ s->chip = max3107_gpio_chip; s->chip.label = s->spi->modalias; s->chip.dev = &s->spi->dev; /* Add GPIO chip */ retval = gpiochip_add(&s->chip); if (retval) { dev_err(&s->spi->dev, "Adding GPIO chip failed\n"); return retval; } /* Temporary fix for EV2 boot problems, set modem reset to 0 */ max3107_gpio_direction_out(&s->chip, 3, 0); return 0; } #if 0 /* This will get enabled once we have the board stuff merged for this specific case */ static const struct baud_table brg13_ext[] = { { 300, MAX3107_BRG13_B300 }, { 600, MAX3107_BRG13_B600 }, { 1200, MAX3107_BRG13_B1200 }, { 2400, MAX3107_BRG13_B2400 }, { 4800, MAX3107_BRG13_B4800 }, { 9600, MAX3107_BRG13_B9600 }, { 19200, MAX3107_BRG13_B19200 }, { 57600, MAX3107_BRG13_B57600 }, { 115200, MAX3107_BRG13_B115200 }, { 230400, MAX3107_BRG13_B230400 }, { 460800, MAX3107_BRG13_B460800 }, { 921600, MAX3107_BRG13_B921600 }, { 0, 0 } }; static void max3107_aava_init(struct max3107_port *s) { /*override for AAVA SC specific*/ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) { if (get_koski_build_id() <= KOSKI_EV2) if (s->ext_clk) { s->brg_cfg = MAX3107_BRG13_B9600; s->baud_tbl = (struct baud_table *)brg13_ext; } } } #endif static int __devexit max3107_aava_remove(struct spi_device *spi) { struct max3107_port *s = dev_get_drvdata(&spi->dev); /* Remove GPIO chip */ if (gpiochip_remove(&s->chip)) dev_warn(&spi->dev, "Removing GPIO chip failed\n"); /* Then do the default remove */ return max3107_remove(spi); } /* Platform data */ static struct max3107_plat aava_plat_data = { .loopback = 0, .ext_clk = 1, /* .init = max3107_aava_init, */ .configure = max3107_aava_configure, .hw_suspend = max3107_hw_susp, .polled_mode = 0, .poll_time = 0, }; static int __devinit max3107_probe_aava(struct spi_device *spi) { int err = max3107_aava_reset(spi); if (err < 0) return err; return max3107_probe(spi, &aava_plat_data); } /* Spi driver data */ static struct spi_driver max3107_driver = { .driver = { .name = "aava-max3107", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = max3107_probe_aava, .remove = __devexit_p(max3107_aava_remove), .suspend = max3107_suspend, .resume = max3107_resume, }; /* Driver init function */ static int __init max3107_init(void) { return spi_register_driver(&max3107_driver); } /* Driver exit function */ static void __exit max3107_exit(void) { spi_unregister_driver(&max3107_driver); } module_init(max3107_init); module_exit(max3107_exit); MODULE_DESCRIPTION("MAX3107 driver"); MODULE_AUTHOR("Aavamobile"); MODULE_ALIAS("spi:aava-max3107"); MODULE_LICENSE("GPL v2");
gpl-2.0
Arc-Team/android_kernel_htc_pyramid
mm/mremap.c
2758
13496
/* * mm/mremap.c * * (C) Copyright 1996 Linus Torvalds * * Address space accounting code <alan@lxorguk.ukuu.org.uk> * (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/ksm.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/mmu_notifier.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include "internal.h" static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return NULL; pud = pud_offset(pgd, addr); if (pud_none_or_clear_bad(pud)) return NULL; pmd = pmd_offset(pud, addr); split_huge_page_pmd(mm, pmd); if (pmd_none_or_clear_bad(pmd)) return NULL; return pmd; } static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (!pud) return NULL; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return NULL; VM_BUG_ON(pmd_trans_huge(*pmd)); if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr)) return NULL; return pmd; } static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr) { struct address_space *mapping = NULL; struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; unsigned long old_start; old_start = old_addr; mmu_notifier_invalidate_range_start(vma->vm_mm, old_start, old_end); if (vma->vm_file) { /* * Subtle point from Rajesh Venkatasubramanian: before * moving file-based ptes, we must lock truncate_pagecache * out, since it might clean the dst vma before the src vma, * and we propagate stale pages into the dst afterward. */ mapping = vma->vm_file->f_mapping; mutex_lock(&mapping->i_mmap_mutex); } /* * We don't have to worry about the ordering of src and dst * pte locks because exclusive mmap_sem prevents deadlock. */ old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); new_pte = pte_offset_map(new_pmd, new_addr); new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); arch_enter_lazy_mmu_mode(); for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; pte = ptep_clear_flush(vma, old_addr, old_pte); pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); set_pte_at(mm, new_addr, new_pte, pte); } arch_leave_lazy_mmu_mode(); if (new_ptl != old_ptl) spin_unlock(new_ptl); pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); if (mapping) mutex_unlock(&mapping->i_mmap_mutex); mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); } #define LATENCY_LIMIT (64 * PAGE_SIZE) unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len) { unsigned long extent, next, old_end; pmd_t *old_pmd, *new_pmd; old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); for (; old_addr < old_end; old_addr += extent, new_addr += extent) { cond_resched(); next = (old_addr + PMD_SIZE) & PMD_MASK; if (next - 1 > old_end) next = old_end; extent = next - old_addr; old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); if (!new_pmd) break; next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) extent = next - new_addr; if (extent > LATENCY_LIMIT) extent = LATENCY_LIMIT; move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, new_pmd, new_addr); } return len + old_addr - old_end; /* how much done */ } static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; unsigned long excess = 0; unsigned long hiwater_vm; int split = 0; int err; /* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ if (mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; /* * Advise KSM to break any KSM pages in the area to be moved: * it would be confusing if they were to turn up at the new * location, where they happen to coincide with different KSM * pages recently unmapped. But leave vma->vm_flags as it was, * so KSM can come around to merge on vma and new_vma afterwards. */ err = ksm_madvise(vma, old_addr, old_addr + old_len, MADV_UNMERGEABLE, &vm_flags); if (err) return err; new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff); if (!new_vma) return -ENOMEM; moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len); if (moved_len < old_len) { /* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len); vma = new_vma; old_len = new_len; old_addr = new_addr; new_addr = -ENOMEM; } /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { vma->vm_flags &= ~VM_ACCOUNT; excess = vma->vm_end - vma->vm_start - old_len; if (old_addr > vma->vm_start && old_addr + old_len < vma->vm_end) split = 1; } /* * If we failed to move page tables we still do total_vm increment * since do_munmap() will decrement it by old_len == new_len. * * Since total_vm is about to be raised artificially high for a * moment, we need to restore high watermark afterwards: if stats * are taken meanwhile, total_vm and hiwater_vm appear too high. * If this were a serious issue, we'd add a flag to do_munmap(). */ hiwater_vm = mm->hiwater_vm; mm->total_vm += new_len >> PAGE_SHIFT; vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); if (do_munmap(mm, old_addr, old_len) < 0) { /* OOM: unable to split vma, just get accounts right */ vm_unacct_memory(excess >> PAGE_SHIFT); excess = 0; } mm->hiwater_vm = hiwater_vm; /* Restore VM_ACCOUNT if one or two pieces of vma left */ if (excess) { vma->vm_flags |= VM_ACCOUNT; if (split) vma->vm_next->vm_flags |= VM_ACCOUNT; } if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; if (new_len > old_len) mlock_vma_pages_range(new_vma, new_addr + old_len, new_addr + new_len); } return new_addr; } static struct vm_area_struct *vma_to_resize(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long *p) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto Efault; if (is_vm_hugetlb_page(vma)) goto Einval; /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto Efault; /* Need to be careful about a growing mapping */ if (new_len > old_len) { unsigned long pgoff; if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) goto Efault; pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) goto Einval; } if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); locked += new_len - old_len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto Eagain; } if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) goto Enomem; if (vma->vm_flags & VM_ACCOUNT) { unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; if (security_vm_enough_memory(charged)) goto Efault; *p = charged; } return vma; Efault: /* very odd choice for most of the cases, but... */ return ERR_PTR(-EFAULT); Einval: return ERR_PTR(-EINVAL); Enomem: return ERR_PTR(-ENOMEM); Eagain: return ERR_PTR(-EAGAIN); } static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; unsigned long map_flags; if (new_addr & ~PAGE_MASK) goto out; if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; /* Check if the location we're moving into overlaps the * old location at all, and fail if it does. */ if ((new_addr <= addr) && (new_addr+new_len) > addr) goto out; if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; ret = do_munmap(mm, new_addr, new_len); if (ret) goto out; if (old_len >= new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; old_len = new_len; } vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } map_flags = MAP_FIXED; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); if (ret & ~PAGE_MASK) goto out1; ret = move_vma(vma, addr, old_len, new_len, new_addr); if (!(ret & ~PAGE_MASK)) goto out; out1: vm_unacct_memory(charged); out: return ret; } static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) { unsigned long end = vma->vm_end + delta; if (end < vma->vm_end) /* overflow */ return 0; if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ return 0; if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 0, MAP_FIXED) & ~PAGE_MASK) return 0; return 1; } /* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. */ unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; if (addr & ~PAGE_MASK) goto out; old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); /* * We allow a zero old-len as a special case * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ if (!new_len) goto out; if (flags & MREMAP_FIXED) { if (flags & MREMAP_MAYMOVE) ret = mremap_to(addr, old_len, new_addr, new_len); goto out; } /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. * do_munmap does all the needed commit accounting */ if (old_len >= new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; ret = addr; goto out; } /* * Ok, we need to grow.. */ vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } /* old_len exactly to the end of the area.. */ if (old_len == vma->vm_end - addr) { /* can we just expand the current mapping? */ if (vma_expandable(vma, new_len - old_len)) { int pages = (new_len - old_len) >> PAGE_SHIFT; if (vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL)) { ret = -ENOMEM; goto out; } mm->total_vm += pages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); if (vma->vm_flags & VM_LOCKED) { mm->locked_vm += pages; mlock_vma_pages_range(vma, addr + old_len, addr + new_len); } ret = addr; goto out; } } /* * We weren't able to just expand or shrink the area, * we need to create a new one and move it.. */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { unsigned long map_flags = 0; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; new_addr = get_unmapped_area(vma->vm_file, 0, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); if (new_addr & ~PAGE_MASK) { ret = new_addr; goto out; } ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: if (ret & ~PAGE_MASK) vm_unacct_memory(charged); return ret; } SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) { unsigned long ret; down_write(&current->mm->mmap_sem); ret = do_mremap(addr, old_len, new_len, flags, new_addr); up_write(&current->mm->mmap_sem); return ret; }
gpl-2.0
ashwinr64/android_kernel_motorola_msm8610
drivers/staging/rts5139/rts51x_transport.c
4806
27545
/* Driver for Realtek RTS51xx USB card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China * Maintainer: * Edwin Rong (edwin_rong@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_device.h> #include "debug.h" #include "rts51x.h" #include "rts51x_chip.h" #include "rts51x_card.h" #include "rts51x_scsi.h" #include "rts51x_transport.h" #include "trace.h" /*********************************************************************** * Scatter-gather transfer buffer access routines ***********************************************************************/ /* Copy a buffer of length buflen to/from the srb's transfer buffer. * Update the **sgptr and *offset variables so that the next copy will * pick up from where this one left off. */ unsigned int rts51x_access_sglist(unsigned char *buffer, unsigned int buflen, void *sglist, void **sgptr, unsigned int *offset, enum xfer_buf_dir dir) { unsigned int cnt; struct scatterlist *sg = (struct scatterlist *)*sgptr; /* We have to go through the list one entry * at a time. Each s-g entry contains some number of pages, and * each page has to be kmap()'ed separately. If the page is already * in kernel-addressable memory then kmap() will return its address. * If the page is not directly accessible -- such as a user buffer * located in high memory -- then kmap() will map it to a temporary * position in the kernel's virtual address space. */ if (!sg) sg = (struct scatterlist *)sglist; /* This loop handles a single s-g list entry, which may * include multiple pages. Find the initial page structure * and the starting offset within the page, and update * the *offset and **sgptr values for the next loop. */ cnt = 0; while (cnt < buflen && sg) { struct page *page = sg_page(sg) + ((sg->offset + *offset) >> PAGE_SHIFT); unsigned int poff = (sg->offset + *offset) & (PAGE_SIZE - 1); unsigned int sglen = sg->length - *offset; if (sglen > buflen - cnt) { /* Transfer ends within this s-g entry */ sglen = buflen - cnt; *offset += sglen; } else { /* Transfer continues to next s-g entry */ *offset = 0; sg = sg_next(sg); } /* Transfer the data for all the pages in this * s-g entry. For each page: call kmap(), do the * transfer, and call kunmap() immediately after. */ while (sglen > 0) { unsigned int plen = min(sglen, (unsigned int) PAGE_SIZE - poff); unsigned char *ptr = kmap(page); if (dir == TO_XFER_BUF) memcpy(ptr + poff, buffer + cnt, plen); else memcpy(buffer + cnt, ptr + poff, plen); kunmap(page); /* Start at the beginning of the next page */ poff = 0; ++page; cnt += plen; sglen -= plen; } } *sgptr = sg; /* Return the amount actually transferred */ return cnt; } unsigned int rts51x_access_xfer_buf(unsigned char *buffer, unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr, unsigned int *offset, enum xfer_buf_dir dir) { return rts51x_access_sglist(buffer, buflen, (void *)scsi_sglist(srb), (void **)sgptr, offset, dir); } /* Store the contents of buffer into srb's transfer buffer and set the * SCSI residue. */ void rts51x_set_xfer_buf(unsigned char *buffer, unsigned int buflen, struct scsi_cmnd *srb) { unsigned int offset = 0; struct scatterlist *sg = NULL; buflen = min(buflen, scsi_bufflen(srb)); buflen = rts51x_access_xfer_buf(buffer, buflen, srb, &sg, &offset, TO_XFER_BUF); if (buflen < scsi_bufflen(srb)) scsi_set_resid(srb, scsi_bufflen(srb) - buflen); } void rts51x_get_xfer_buf(unsigned char *buffer, unsigned int buflen, struct scsi_cmnd *srb) { unsigned int offset = 0; struct scatterlist *sg = NULL; buflen = min(buflen, scsi_bufflen(srb)); buflen = rts51x_access_xfer_buf(buffer, buflen, srb, &sg, &offset, FROM_XFER_BUF); if (buflen < scsi_bufflen(srb)) scsi_set_resid(srb, scsi_bufflen(srb) - buflen); } /* This is the completion handler which will wake us up when an URB * completes. */ static void urb_done_completion(struct urb *urb) { struct completion *urb_done_ptr = urb->context; if (urb_done_ptr) complete(urb_done_ptr); } /* This is the common part of the URB message submission code * * All URBs from the driver involved in handling a queued scsi * command _must_ pass through this function (or something like it) for the * abort mechanisms to work properly. */ static int rts51x_msg_common(struct rts51x_chip *chip, struct urb *urb, int timeout) { struct rts51x_usb *rts51x = chip->usb; struct completion urb_done; long timeleft; int status; /* don't submit URBs during abort processing */ if (test_bit(FLIDX_ABORTING, &rts51x->dflags)) TRACE_RET(chip, -EIO); /* set up data structures for the wakeup system */ init_completion(&urb_done); /* fill the common fields in the URB */ urb->context = &urb_done; urb->actual_length = 0; urb->error_count = 0; urb->status = 0; /* we assume that if transfer_buffer isn't us->iobuf then it * hasn't been mapped for DMA. Yes, this is clunky, but it's * easier than always having the caller tell us whether the * transfer buffer has already been mapped. */ urb->transfer_flags = URB_NO_SETUP_DMA_MAP; if (urb->transfer_buffer == rts51x->iobuf) { urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; urb->transfer_dma = rts51x->iobuf_dma; } urb->setup_dma = rts51x->cr_dma; /* submit the URB */ status = usb_submit_urb(urb, GFP_NOIO); if (status) { /* something went wrong */ TRACE_RET(chip, status); } /* since the URB has been submitted successfully, it's now okay * to cancel it */ set_bit(FLIDX_URB_ACTIVE, &rts51x->dflags); /* did an abort occur during the submission? */ if (test_bit(FLIDX_ABORTING, &rts51x->dflags)) { /* cancel the URB, if it hasn't been cancelled already */ if (test_and_clear_bit(FLIDX_URB_ACTIVE, &rts51x->dflags)) { RTS51X_DEBUGP("-- cancelling URB\n"); usb_unlink_urb(urb); } } /* wait for the completion of the URB */ timeleft = wait_for_completion_interruptible_timeout(&urb_done, (timeout * HZ / 1000) ? : MAX_SCHEDULE_TIMEOUT); clear_bit(FLIDX_URB_ACTIVE, &rts51x->dflags); if (timeleft <= 0) { RTS51X_DEBUGP("%s -- cancelling URB\n", timeleft == 0 ? "Timeout" : "Signal"); usb_kill_urb(urb); if (timeleft == 0) status = -ETIMEDOUT; else status = -EINTR; } else { status = urb->status; } return status; } /* * Interpret the results of a URB transfer */ static int interpret_urb_result(struct rts51x_chip *chip, unsigned int pipe, unsigned int length, int result, unsigned int partial) { int retval = STATUS_SUCCESS; /* RTS51X_DEBUGP("Status code %d; transferred %u/%u\n", result, partial, length); */ switch (result) { /* no error code; did we send all the data? */ case 0: if (partial != length) { RTS51X_DEBUGP("-- short transfer\n"); TRACE_RET(chip, STATUS_TRANS_SHORT); } /* RTS51X_DEBUGP("-- transfer complete\n"); */ return STATUS_SUCCESS; /* stalled */ case -EPIPE: /* for control endpoints, (used by CB[I]) a stall indicates * a failed command */ if (usb_pipecontrol(pipe)) { RTS51X_DEBUGP("-- stall on control pipe\n"); TRACE_RET(chip, STATUS_STALLED); } /* for other sorts of endpoint, clear the stall */ RTS51X_DEBUGP("clearing endpoint halt for pipe 0x%x\n", pipe); if (rts51x_clear_halt(chip, pipe) < 0) TRACE_RET(chip, STATUS_ERROR); retval = STATUS_STALLED; TRACE_GOTO(chip, Exit); /* babble - the device tried to send more than * we wanted to read */ case -EOVERFLOW: RTS51X_DEBUGP("-- babble\n"); retval = STATUS_TRANS_LONG; TRACE_GOTO(chip, Exit); /* the transfer was cancelled by abort, * disconnect, or timeout */ case -ECONNRESET: RTS51X_DEBUGP("-- transfer cancelled\n"); retval = STATUS_ERROR; TRACE_GOTO(chip, Exit); /* short scatter-gather read transfer */ case -EREMOTEIO: RTS51X_DEBUGP("-- short read transfer\n"); retval = STATUS_TRANS_SHORT; TRACE_GOTO(chip, Exit); /* abort or disconnect in progress */ case -EIO: RTS51X_DEBUGP("-- abort or disconnect in progress\n"); retval = STATUS_ERROR; TRACE_GOTO(chip, Exit); case -ETIMEDOUT: RTS51X_DEBUGP("-- time out\n"); retval = STATUS_TIMEDOUT; TRACE_GOTO(chip, Exit); /* the catch-all error case */ default: RTS51X_DEBUGP("-- unknown error\n"); retval = STATUS_ERROR; TRACE_GOTO(chip, Exit); } Exit: if ((retval != STATUS_SUCCESS) && !usb_pipecontrol(pipe)) rts51x_clear_hw_error(chip); return retval; } int rts51x_ctrl_transfer(struct rts51x_chip *chip, unsigned int pipe, u8 request, u8 requesttype, u16 value, u16 index, void *data, u16 size, int timeout) { struct rts51x_usb *rts51x = chip->usb; int result; RTS51X_DEBUGP("%s: rq=%02x rqtype=%02x value=%04x index=%02x len=%u\n", __func__, request, requesttype, value, index, size); /* fill in the devrequest structure */ rts51x->cr->bRequestType = requesttype; rts51x->cr->bRequest = request; rts51x->cr->wValue = cpu_to_le16(value); rts51x->cr->wIndex = cpu_to_le16(index); rts51x->cr->wLength = cpu_to_le16(size); /* fill and submit the URB */ usb_fill_control_urb(rts51x->current_urb, rts51x->pusb_dev, pipe, (unsigned char *)rts51x->cr, data, size, urb_done_completion, NULL); result = rts51x_msg_common(chip, rts51x->current_urb, timeout); return interpret_urb_result(chip, pipe, size, result, rts51x->current_urb->actual_length); } int rts51x_clear_halt(struct rts51x_chip *chip, unsigned int pipe) { int result; int endp = usb_pipeendpoint(pipe); if (usb_pipein(pipe)) endp |= USB_DIR_IN; result = rts51x_ctrl_transfer(chip, SND_CTRL_PIPE(chip), USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT, USB_ENDPOINT_HALT, endp, NULL, 0, 3000); if (result != STATUS_SUCCESS) TRACE_RET(chip, STATUS_FAIL); usb_reset_endpoint(chip->usb->pusb_dev, endp); return STATUS_SUCCESS; } int rts51x_reset_pipe(struct rts51x_chip *chip, char pipe) { return rts51x_clear_halt(chip, pipe); } static void rts51x_sg_clean(struct usb_sg_request *io) { if (io->urbs) { while (io->entries--) usb_free_urb(io->urbs[io->entries]); kfree(io->urbs); io->urbs = NULL; } #if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) */ if (io->dev->dev.dma_mask != NULL) usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe), io->sg, io->nents); #endif io->dev = NULL; } #if 0 static void rts51x_sg_complete(struct urb *urb) { struct usb_sg_request *io = urb->context; int status = urb->status; spin_lock(&io->lock); /* In 2.5 we require hcds' endpoint queues not to progress after fault * reports, until the completion callback (this!) returns. That lets * device driver code (like this routine) unlink queued urbs first, * if it needs to, since the HC won't work on them at all. So it's * not possible for page N+1 to overwrite page N, and so on. * * That's only for "hard" faults; "soft" faults (unlinks) sometimes * complete before the HCD can get requests away from hardware, * though never during cleanup after a hard fault. */ if (io->status && (io->status != -ECONNRESET || status != -ECONNRESET) && urb->actual_length) { dev_err(io->dev->bus->controller, "dev %s ep%d%s scatterlist error %d/%d\n", io->dev->devpath, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", status, io->status); /* BUG (); */ } if (io->status == 0 && status && status != -ECONNRESET) { int i, found, retval; io->status = status; /* the previous urbs, and this one, completed already. * unlink pending urbs so they won't rx/tx bad data. * careful: unlink can sometimes be synchronous... */ spin_unlock(&io->lock); for (i = 0, found = 0; i < io->entries; i++) { if (!io->urbs[i] || !io->urbs[i]->dev) continue; if (found) { retval = usb_unlink_urb(io->urbs[i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY) dev_err(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } else if (urb == io->urbs[i]) found = 1; } spin_lock(&io->lock); } urb->dev = NULL; /* on the last completion, signal usb_sg_wait() */ io->bytes += urb->actual_length; io->count--; if (!io->count) complete(&io->complete); spin_unlock(&io->lock); } /* This function is ported from usb_sg_init, which can transfer * sg list partially */ int rts51x_sg_init_partial(struct usb_sg_request *io, struct usb_device *dev, unsigned pipe, unsigned period, void *buf, struct scatterlist **sgptr, unsigned int *offset, int nents, size_t length, gfp_t mem_flags) { int i; int urb_flags; int dma; struct scatterlist *sg = *sgptr, *first_sg; first_sg = (struct scatterlist *)buf; if (!sg) sg = first_sg; if (!io || !dev || !sg || usb_pipecontrol(pipe) || usb_pipeisoc(pipe) || (nents <= 0)) return -EINVAL; spin_lock_init(&io->lock); io->dev = dev; io->pipe = pipe; io->sg = first_sg; /* used by unmap */ io->nents = nents; RTS51X_DEBUGP("Before map, sg address: 0x%x\n", (unsigned int)sg); RTS51X_DEBUGP("Before map, dev address: 0x%x\n", (unsigned int)dev); /* not all host controllers use DMA (like the mainstream pci ones); * they can use PIO (sl811) or be software over another transport. */ dma = (dev->dev.dma_mask != NULL); if (dma) { /* map the whole sg list, because here we only know the * total nents */ io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe), first_sg, nents); } else { io->entries = nents; } /* initialize all the urbs we'll use */ if (io->entries <= 0) return io->entries; io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags); if (!io->urbs) goto nomem; urb_flags = URB_NO_INTERRUPT; if (dma) urb_flags |= URB_NO_TRANSFER_DMA_MAP; if (usb_pipein(pipe)) urb_flags |= URB_SHORT_NOT_OK; RTS51X_DEBUGP("io->entries = %d\n", io->entries); for (i = 0; (sg != NULL) && (length > 0); i++) { unsigned len; RTS51X_DEBUGP("sg address: 0x%x\n", (unsigned int)sg); RTS51X_DEBUGP("length = %d, *offset = %d\n", length, *offset); io->urbs[i] = usb_alloc_urb(0, mem_flags); if (!io->urbs[i]) { io->entries = i; goto nomem; } io->urbs[i]->dev = NULL; io->urbs[i]->pipe = pipe; io->urbs[i]->interval = period; io->urbs[i]->transfer_flags = urb_flags; io->urbs[i]->complete = rts51x_sg_complete; io->urbs[i]->context = io; if (dma) { io->urbs[i]->transfer_dma = sg_dma_address(sg) + *offset; len = sg_dma_len(sg) - *offset; io->urbs[i]->transfer_buffer = NULL; RTS51X_DEBUGP(" -- sg entry dma length = %d\n", sg_dma_len(sg)); } else { /* hc may use _only_ transfer_buffer */ io->urbs[i]->transfer_buffer = sg_virt(sg) + *offset; len = sg->length - *offset; RTS51X_DEBUGP(" -- sg entry length = %d\n", sg->length); } if (length >= len) { *offset = 0; io->urbs[i]->transfer_buffer_length = len; length -= len; sg = sg_next(sg); } else { *offset += length; io->urbs[i]->transfer_buffer_length = length; length = 0; } if (length == 0) io->entries = i + 1; #if 0 if (length) { len = min_t(unsigned, len, length); length -= len; if (length == 0) { io->entries = i + 1; *offset += len; } else { *offset = 0; } } #endif } RTS51X_DEBUGP("In %s, urb count: %d\n", __func__, i); io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; RTS51X_DEBUGP("sg address stored in sgptr: 0x%x\n", (unsigned int)sg); *sgptr = sg; /* transaction state */ io->count = io->entries; io->status = 0; io->bytes = 0; init_completion(&io->complete); return 0; nomem: rts51x_sg_clean(io); return -ENOMEM; } #endif int rts51x_sg_init(struct usb_sg_request *io, struct usb_device *dev, unsigned pipe, unsigned period, struct scatterlist *sg, int nents, size_t length, gfp_t mem_flags) { return usb_sg_init(io, dev, pipe, period, sg, nents, length, mem_flags); } int rts51x_sg_wait(struct usb_sg_request *io, int timeout) { long timeleft; int i; int entries = io->entries; /* queue the urbs. */ spin_lock_irq(&io->lock); i = 0; while (i < entries && !io->status) { int retval; io->urbs[i]->dev = io->dev; retval = usb_submit_urb(io->urbs[i], GFP_ATOMIC); /* after we submit, let completions or cancelations fire; * we handshake using io->status. */ spin_unlock_irq(&io->lock); switch (retval) { /* maybe we retrying will recover */ case -ENXIO: /* hc didn't queue this one */ case -EAGAIN: case -ENOMEM: io->urbs[i]->dev = NULL; retval = 0; yield(); break; /* no error? continue immediately. * * NOTE: to work better with UHCI (4K I/O buffer may * need 3K of TDs) it may be good to limit how many * URBs are queued at once; N milliseconds? */ case 0: ++i; cpu_relax(); break; /* fail any uncompleted urbs */ default: io->urbs[i]->dev = NULL; io->urbs[i]->status = retval; dev_dbg(&io->dev->dev, "%s, submit --> %d\n", __func__, retval); usb_sg_cancel(io); } spin_lock_irq(&io->lock); if (retval && (io->status == 0 || io->status == -ECONNRESET)) io->status = retval; } io->count -= entries - i; if (io->count == 0) complete(&io->complete); spin_unlock_irq(&io->lock); timeleft = wait_for_completion_interruptible_timeout(&io->complete, (timeout * HZ / 1000) ? : MAX_SCHEDULE_TIMEOUT); if (timeleft <= 0) { RTS51X_DEBUGP("%s -- cancelling SG request\n", timeleft == 0 ? "Timeout" : "Signal"); usb_sg_cancel(io); if (timeleft == 0) io->status = -ETIMEDOUT; else io->status = -EINTR; } rts51x_sg_clean(io); return io->status; } /* * Transfer a scatter-gather list via bulk transfer * * This function does basically the same thing as usb_stor_bulk_transfer_buf() * above, but it uses the usbcore scatter-gather library. */ static int rts51x_bulk_transfer_sglist(struct rts51x_chip *chip, unsigned int pipe, struct scatterlist *sg, int num_sg, unsigned int length, unsigned int *act_len, int timeout) { int result; /* don't submit s-g requests during abort processing */ if (test_bit(FLIDX_ABORTING, &chip->usb->dflags)) TRACE_RET(chip, STATUS_ERROR); /* initialize the scatter-gather request block */ RTS51X_DEBUGP("%s: xfer %u bytes, %d entries\n", __func__, length, num_sg); result = rts51x_sg_init(&chip->usb->current_sg, chip->usb->pusb_dev, pipe, 0, sg, num_sg, length, GFP_NOIO); if (result) { RTS51X_DEBUGP("rts51x_sg_init returned %d\n", result); TRACE_RET(chip, STATUS_ERROR); } /* since the block has been initialized successfully, it's now * okay to cancel it */ set_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags); /* did an abort occur during the submission? */ if (test_bit(FLIDX_ABORTING, &chip->usb->dflags)) { /* cancel the request, if it hasn't been cancelled already */ if (test_and_clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags)) { RTS51X_DEBUGP("-- cancelling sg request\n"); usb_sg_cancel(&chip->usb->current_sg); } } /* wait for the completion of the transfer */ result = rts51x_sg_wait(&chip->usb->current_sg, timeout); clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags); /* result = us->current_sg.status; */ if (act_len) *act_len = chip->usb->current_sg.bytes; return interpret_urb_result(chip, pipe, length, result, chip->usb->current_sg.bytes); } #if 0 static int rts51x_bulk_transfer_sglist_partial(struct rts51x_chip *chip, unsigned int pipe, void *buf, struct scatterlist **sgptr, unsigned int *offset, int num_sg, unsigned int length, unsigned int *act_len, int timeout) { int result; /* don't submit s-g requests during abort processing */ if (test_bit(FLIDX_ABORTING, &chip->usb->dflags)) TRACE_RET(chip, STATUS_ERROR); /* initialize the scatter-gather request block */ RTS51X_DEBUGP("%s: xfer %u bytes, %d entries\n", __func__, length, num_sg); result = rts51x_sg_init_partial(&chip->usb->current_sg, chip->usb->pusb_dev, pipe, 0, buf, sgptr, offset, num_sg, length, GFP_NOIO); if (result) { RTS51X_DEBUGP("rts51x_sg_init_partial returned %d\n", result); TRACE_RET(chip, STATUS_ERROR); } /* since the block has been initialized successfully, it's now * okay to cancel it */ set_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags); /* did an abort occur during the submission? */ if (test_bit(FLIDX_ABORTING, &chip->usb->dflags)) { /* cancel the request, if it hasn't been cancelled already */ if (test_and_clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags)) { RTS51X_DEBUGP("-- cancelling sg request\n"); usb_sg_cancel(&chip->usb->current_sg); } } /* wait for the completion of the transfer */ result = rts51x_sg_wait(&chip->usb->current_sg, timeout); clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags); /* result = us->current_sg.status; */ if (act_len) *act_len = chip->usb->current_sg.bytes; return interpret_urb_result(chip, pipe, length, result, chip->usb->current_sg.bytes); } #endif int rts51x_bulk_transfer_buf(struct rts51x_chip *chip, unsigned int pipe, void *buf, unsigned int length, unsigned int *act_len, int timeout) { int result; /* fill and submit the URB */ usb_fill_bulk_urb(chip->usb->current_urb, chip->usb->pusb_dev, pipe, buf, length, urb_done_completion, NULL); result = rts51x_msg_common(chip, chip->usb->current_urb, timeout); /* store the actual length of the data transferred */ if (act_len) *act_len = chip->usb->current_urb->actual_length; return interpret_urb_result(chip, pipe, length, result, chip->usb->current_urb->actual_length); } int rts51x_transfer_data(struct rts51x_chip *chip, unsigned int pipe, void *buf, unsigned int len, int use_sg, unsigned int *act_len, int timeout) { int result; if (timeout < 600) timeout = 600; if (use_sg) { result = rts51x_bulk_transfer_sglist(chip, pipe, (struct scatterlist *)buf, use_sg, len, act_len, timeout); } else { result = rts51x_bulk_transfer_buf(chip, pipe, buf, len, act_len, timeout); } return result; } int rts51x_transfer_data_partial(struct rts51x_chip *chip, unsigned int pipe, void *buf, void **ptr, unsigned int *offset, unsigned int len, int use_sg, unsigned int *act_len, int timeout) { int result; if (timeout < 600) timeout = 600; if (use_sg) { void *tmp_buf = kmalloc(len, GFP_KERNEL); if (!tmp_buf) TRACE_RET(chip, STATUS_NOMEM); if (usb_pipeout(pipe)) { rts51x_access_sglist(tmp_buf, len, buf, ptr, offset, FROM_XFER_BUF); } result = rts51x_bulk_transfer_buf(chip, pipe, tmp_buf, len, act_len, timeout); if (result == STATUS_SUCCESS) { if (usb_pipein(pipe)) { rts51x_access_sglist(tmp_buf, len, buf, ptr, offset, TO_XFER_BUF); } } kfree(tmp_buf); #if 0 result = rts51x_bulk_transfer_sglist_partial(chip, pipe, buf, (struct scatterlist **)ptr, offset, use_sg, len, act_len, timeout); #endif } else { unsigned int step = 0; if (offset) step = *offset; result = rts51x_bulk_transfer_buf(chip, pipe, buf + step, len, act_len, timeout); if (act_len) step += *act_len; else step += len; if (offset) *offset = step; } return result; } int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status) { unsigned int pipe = RCV_INTR_PIPE(chip); struct usb_host_endpoint *ep; struct completion urb_done; int result; if (!status) TRACE_RET(chip, STATUS_ERROR); /* set up data structures for the wakeup system */ init_completion(&urb_done); ep = chip->usb->pusb_dev->ep_in[usb_pipeendpoint(pipe)]; /* fill and submit the URB */ /* We set interval to 1 here, so the polling interval is controlled * by our polling thread */ usb_fill_int_urb(chip->usb->intr_urb, chip->usb->pusb_dev, pipe, status, 2, urb_done_completion, &urb_done, 1); result = rts51x_msg_common(chip, chip->usb->intr_urb, 50); return interpret_urb_result(chip, pipe, 2, result, chip->usb->intr_urb->actual_length); } u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0 }; u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0 }; void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip) { int result; #ifdef CONFIG_PM if (chip->option.ss_en) { if (srb->cmnd[0] == TEST_UNIT_READY) { if (RTS51X_CHK_STAT(chip, STAT_SS)) { if (check_fake_card_ready(chip, SCSI_LUN(srb))) { srb->result = SAM_STAT_GOOD; } else { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, media_not_present, SENSE_SIZE); } return; } } else if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { if (RTS51X_CHK_STAT(chip, STAT_SS)) { int prevent = srb->cmnd[4] & 0x1; if (prevent) { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, invalid_cmd_field, SENSE_SIZE); } else { srb->result = SAM_STAT_GOOD; } return; } } else { if (RTS51X_CHK_STAT(chip, STAT_SS) || RTS51X_CHK_STAT(chip, STAT_SS_PRE)) { /* Wake up device */ RTS51X_DEBUGP("Try to wake up device\n"); chip->resume_from_scsi = 1; rts51x_try_to_exit_ss(chip); if (RTS51X_CHK_STAT(chip, STAT_SS)) { wait_timeout(3000); rts51x_init_chip(chip); rts51x_init_cards(chip); } } } } #endif result = rts51x_scsi_handler(srb, chip); /* if there is a transport error, reset and don't auto-sense */ if (result == TRANSPORT_ERROR) { RTS51X_DEBUGP("-- transport indicates error, resetting\n"); srb->result = DID_ERROR << 16; goto Handle_Errors; } srb->result = SAM_STAT_GOOD; /* * If we have a failure, we're going to do a REQUEST_SENSE * automatically. Note that we differentiate between a command * "failure" and an "error" in the transport mechanism. */ if (result == TRANSPORT_FAILED) { /* set the result so the higher layers expect this data */ srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]), sizeof(struct sense_data_t)); } return; /* Error and abort processing: try to resynchronize with the device * by issuing a port reset. If that fails, try a class-specific * device reset. */ Handle_Errors: return; }
gpl-2.0