repo_name
string
path
string
copies
string
size
string
content
string
license
string
wuxianlin/android_kernel_zte_pluto
arch/x86/platform/mrst/vrtc.c
5260
4049
/* * vrtc.c: Driver for virtual RTC device on Intel MID platform * * (C) Copyright 2009 Intel Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * * Note: * VRTC is emulated by system controller firmware, the real HW * RTC is located in the PMIC device. SCU FW shadows PMIC RTC * in a memory mapped IO space that is visible to the host IA * processor. * * This driver is based on RTC CMOS driver. */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/init.h> #include <linux/sfi.h> #include <linux/platform_device.h> #include <asm/mrst.h> #include <asm/mrst-vrtc.h> #include <asm/time.h> #include <asm/fixmap.h> static unsigned char __iomem *vrtc_virt_base; unsigned char vrtc_cmos_read(unsigned char reg) { unsigned char retval; /* vRTC's registers range from 0x0 to 0xD */ if (reg > 0xd || !vrtc_virt_base) return 0xff; lock_cmos_prefix(reg); retval = __raw_readb(vrtc_virt_base + (reg << 2)); lock_cmos_suffix(reg); return retval; } EXPORT_SYMBOL_GPL(vrtc_cmos_read); void vrtc_cmos_write(unsigned char val, unsigned char reg) { if (reg > 0xd || !vrtc_virt_base) return; lock_cmos_prefix(reg); __raw_writeb(val, vrtc_virt_base + (reg << 2)); lock_cmos_suffix(reg); } EXPORT_SYMBOL_GPL(vrtc_cmos_write); unsigned long vrtc_get_time(void) { u8 sec, min, hour, mday, mon; unsigned long flags; u32 year; spin_lock_irqsave(&rtc_lock, flags); while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) cpu_relax(); sec = vrtc_cmos_read(RTC_SECONDS); min = vrtc_cmos_read(RTC_MINUTES); hour = vrtc_cmos_read(RTC_HOURS); mday = vrtc_cmos_read(RTC_DAY_OF_MONTH); mon = vrtc_cmos_read(RTC_MONTH); year = vrtc_cmos_read(RTC_YEAR); spin_unlock_irqrestore(&rtc_lock, flags); /* vRTC YEAR reg contains the offset to 1972 */ year += 1972; printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d " "mon: %d year: %d\n", sec, min, hour, mday, mon, year); return mktime(year, mon, mday, hour, min, sec); } /* Only care about the minutes and seconds */ int vrtc_set_mmss(unsigned long nowtime) { int real_sec, real_min; unsigned long flags; int vrtc_min; spin_lock_irqsave(&rtc_lock, flags); vrtc_min = vrtc_cmos_read(RTC_MINUTES); real_sec = nowtime % 60; real_min = nowtime / 60; if (((abs(real_min - vrtc_min) + 15)/30) & 1) real_min += 30; real_min %= 60; vrtc_cmos_write(real_sec, RTC_SECONDS); vrtc_cmos_write(real_min, RTC_MINUTES); spin_unlock_irqrestore(&rtc_lock, flags); return 0; } void __init mrst_rtc_init(void) { unsigned long vrtc_paddr; sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc); vrtc_paddr = sfi_mrtc_array[0].phys_addr; if (!sfi_mrtc_num || !vrtc_paddr) return; vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC, vrtc_paddr); x86_platform.get_wallclock = vrtc_get_time; x86_platform.set_wallclock = vrtc_set_mmss; } /* * The Moorestown platform has a memory mapped virtual RTC device that emulates * the programming interface of the RTC. */ static struct resource vrtc_resources[] = { [0] = { .flags = IORESOURCE_MEM, }, [1] = { .flags = IORESOURCE_IRQ, } }; static struct platform_device vrtc_device = { .name = "rtc_mrst", .id = -1, .resource = vrtc_resources, .num_resources = ARRAY_SIZE(vrtc_resources), }; /* Register the RTC device if appropriate */ static int __init mrst_device_create(void) { /* No Moorestown, no device */ if (!mrst_identify_cpu()) return -ENODEV; /* No timer, no device */ if (!sfi_mrtc_num) return -ENODEV; /* iomem resource */ vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr; vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr + MRST_VRTC_MAP_SZ; /* irq resource */ vrtc_resources[1].start = sfi_mrtc_array[0].irq; vrtc_resources[1].end = sfi_mrtc_array[0].irq; return platform_device_register(&vrtc_device); } module_init(mrst_device_create);
gpl-2.0
Jackeagle/CAF-Kernel
drivers/gpu/drm/radeon/r600_blit_kms.c
5260
24257
/* * Copyright 2009 Advanced Micro Devices, Inc. * Copyright 2009 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm.h" #include "radeon_drm.h" #include "radeon.h" #include "r600d.h" #include "r600_blit_shaders.h" #include "radeon_blit_common.h" /* emits 21 on rv770+, 23 on r600 */ static void set_render_target(struct radeon_device *rdev, int format, int w, int h, u64 gpu_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 cb_color_info; int pitch, slice; h = ALIGN(h, 8); if (h < 8) h = 8; cb_color_info = CB_FORMAT(format) | CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) | CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); pitch = (w / 8) - 1; slice = ((w * h) / 64) - 1; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, gpu_addr >> 8); if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) { radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0)); radeon_ring_write(ring, 2 << 0); } radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, (pitch << 0) | (slice << 10)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, cb_color_info); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); } /* emits 5dw */ static void cp_set_surface_sync(struct radeon_device *rdev, u32 sync_type, u32 size, u64 mc_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 cp_coher_size; if (size == 0xffffffff) cp_coher_size = 0xffffffff; else cp_coher_size = ((size + 255) >> 8); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, sync_type); radeon_ring_write(ring, cp_coher_size); radeon_ring_write(ring, mc_addr >> 8); radeon_ring_write(ring, 10); /* poll interval */ } /* emits 21dw + 1 surface sync = 26dw */ static void set_shaders(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u64 gpu_addr; u32 sq_pgm_resources; /* setup shader regs */ sq_pgm_resources = (1 << 0); /* VS */ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, sq_pgm_resources); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); /* PS */ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, sq_pgm_resources | (1 << 28)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 2); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, 0); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); } /* emits 9 + 1 sync (5) = 14*/ static void set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_vtx_constant_word2; sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | SQ_VTXC_STRIDE(16); #ifdef __BIG_ENDIAN sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); #endif radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7)); radeon_ring_write(ring, 0x460); radeon_ring_write(ring, gpu_addr & 0xffffffff); radeon_ring_write(ring, 48 - 1); radeon_ring_write(ring, sq_vtx_constant_word2); radeon_ring_write(ring, 1 << 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30); if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV620) || (rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880) || (rdev->family == CHIP_RV710)) cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, 48, gpu_addr); else cp_set_surface_sync(rdev, PACKET3_VC_ACTION_ENA, 48, gpu_addr); } /* emits 9 */ static void set_tex_resource(struct radeon_device *rdev, int format, int w, int h, int pitch, u64 gpu_addr, u32 size) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; if (h < 1) h = 1; sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) | S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) | S_038000_TEX_WIDTH(w - 1); sq_tex_resource_word1 = S_038004_DATA_FORMAT(format); sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1); sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) | S_038010_DST_SEL_X(SQ_SEL_X) | S_038010_DST_SEL_Y(SQ_SEL_Y) | S_038010_DST_SEL_Z(SQ_SEL_Z) | S_038010_DST_SEL_W(SQ_SEL_W); cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, size, gpu_addr); radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7)); radeon_ring_write(ring, 0); radeon_ring_write(ring, sq_tex_resource_word0); radeon_ring_write(ring, sq_tex_resource_word1); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, gpu_addr >> 8); radeon_ring_write(ring, sq_tex_resource_word4); radeon_ring_write(ring, 0); radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30); } /* emits 12 */ static void set_scissors(struct radeon_device *rdev, int x1, int y1, int x2, int y2) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, (x1 << 0) | (y1 << 16)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); } /* emits 10 */ static void draw_auto(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(ring, DI_PT_RECTLIST); radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0)); radeon_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 2) | #endif DI_INDEX_SIZE_16_BIT); radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0)); radeon_ring_write(ring, 1); radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); radeon_ring_write(ring, 3); radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX); } /* emits 14 */ static void set_default_state(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; u64 gpu_addr; int dwords; switch (rdev->family) { case CHIP_R600: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV630: case CHIP_RV635: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 40; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV610: case CHIP_RV620: case CHIP_RS780: case CHIP_RS880: default: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV670: num_ps_gprs = 144; num_vs_gprs = 40; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV770: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 256; num_vs_stack_entries = 256; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV730: case CHIP_RV740: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV710: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 48; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; } if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV620) || (rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880) || (rdev->family == CHIP_RV710)) sq_config = 0; else sq_config = VC_ENABLE; sq_config |= (DX9_CONSTS | ALU_INST_PREFER_VECTOR | PS_PRIO(0) | VS_PRIO(1) | GS_PRIO(2) | ES_PRIO(3)); sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | NUM_VS_GPRS(num_vs_gprs) | NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | NUM_ES_GPRS(num_es_gprs)); sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | NUM_VS_THREADS(num_vs_threads) | NUM_GS_THREADS(num_gs_threads) | NUM_ES_THREADS(num_es_threads)); sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | NUM_ES_STACK_ENTRIES(num_es_stack_entries)); /* emit an IB pointing at default state */ dwords = ALIGN(rdev->r600_blit.state_len, 0x10); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | #endif (gpu_addr & 0xFFFFFFFC)); radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF); radeon_ring_write(ring, dwords); /* SQ config */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6)); radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(ring, sq_config); radeon_ring_write(ring, sq_gpr_resource_mgmt_1); radeon_ring_write(ring, sq_gpr_resource_mgmt_2); radeon_ring_write(ring, sq_thread_resource_mgmt); radeon_ring_write(ring, sq_stack_resource_mgmt_1); radeon_ring_write(ring, sq_stack_resource_mgmt_2); } #define I2F_MAX_BITS 15 #define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1) #define I2F_SHIFT (24 - I2F_MAX_BITS) /* * Converts unsigned integer into 32-bit IEEE floating point representation. * Conversion is not universal and only works for the range from 0 * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary, * I2F_MAX_BITS can be increased, but that will add to the loop iterations * and slow us down. Conversion is done by shifting the input and counting * down until the first 1 reaches bit position 23. The resulting counter * and the shifted input are, respectively, the exponent and the fraction. * The sign is always zero. */ static uint32_t i2f(uint32_t input) { u32 result, i, exponent, fraction; WARN_ON_ONCE(input > I2F_MAX_INPUT); if ((input & I2F_MAX_INPUT) == 0) result = 0; else { exponent = 126 + I2F_MAX_BITS; fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT; for (i = 0; i < I2F_MAX_BITS; i++) { if (fraction & 0x800000) break; else { fraction = fraction << 1; exponent = exponent - 1; } } result = exponent << 23 | (fraction & 0x7fffff); } return result; } int r600_blit_init(struct radeon_device *rdev) { u32 obj_size; int i, r, dwords; void *ptr; u32 packet2s[16]; int num_packet2s = 0; rdev->r600_blit.primitives.set_render_target = set_render_target; rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync; rdev->r600_blit.primitives.set_shaders = set_shaders; rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource; rdev->r600_blit.primitives.set_tex_resource = set_tex_resource; rdev->r600_blit.primitives.set_scissors = set_scissors; rdev->r600_blit.primitives.draw_auto = draw_auto; rdev->r600_blit.primitives.set_default_state = set_default_state; rdev->r600_blit.ring_size_common = 40; /* shaders + def state */ rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */ rdev->r600_blit.ring_size_common += 5; /* done copy */ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ rdev->r600_blit.ring_size_per_loop = 76; /* set_render_target emits 2 extra dwords on rv6xx */ if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) rdev->r600_blit.ring_size_per_loop += 2; rdev->r600_blit.max_dim = 8192; /* pin copy shader into vram if already initialized */ if (rdev->r600_blit.shader_obj) goto done; mutex_init(&rdev->r600_blit.mutex); rdev->r600_blit.state_offset = 0; if (rdev->family >= CHIP_RV770) rdev->r600_blit.state_len = r7xx_default_size; else rdev->r600_blit.state_len = r6xx_default_size; dwords = rdev->r600_blit.state_len; while (dwords & 0xf) { packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); dwords++; } obj_size = dwords * 4; obj_size = ALIGN(obj_size, 256); rdev->r600_blit.vs_offset = obj_size; obj_size += r6xx_vs_size * 4; obj_size = ALIGN(obj_size, 256); rdev->r600_blit.ps_offset = obj_size; obj_size += r6xx_ps_size * 4; obj_size = ALIGN(obj_size, 256); r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->r600_blit.shader_obj); if (r) { DRM_ERROR("r600 failed to allocate shader\n"); return r; } DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n", obj_size, rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (unlikely(r != 0)) return r; r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); if (r) { DRM_ERROR("failed to map blit object %d\n", r); return r; } if (rdev->family >= CHIP_RV770) memcpy_toio(ptr + rdev->r600_blit.state_offset, r7xx_default_state, rdev->r600_blit.state_len * 4); else memcpy_toio(ptr + rdev->r600_blit.state_offset, r6xx_default_state, rdev->r600_blit.state_len * 4); if (num_packet2s) memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), packet2s, num_packet2s * 4); for (i = 0; i < r6xx_vs_size; i++) *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]); for (i = 0; i < r6xx_ps_size; i++) *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]); radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); done: r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (unlikely(r != 0)) return r; r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, &rdev->r600_blit.shader_gpu_addr); radeon_bo_unreserve(rdev->r600_blit.shader_obj); if (r) { dev_err(rdev->dev, "(%d) pin blit object failed\n", r); return r; } radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; } void r600_blit_fini(struct radeon_device *rdev) { int r; radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); if (rdev->r600_blit.shader_obj == NULL) return; /* If we can't reserve the bo, unref should be enough to destroy * it when it becomes idle. */ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (!r) { radeon_bo_unpin(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); } radeon_bo_unref(&rdev->r600_blit.shader_obj); } static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size) { int r; r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->r600_blit.vb_ib, size); if (r) { DRM_ERROR("failed to get IB for vertex buffer\n"); return r; } rdev->r600_blit.vb_total = size; rdev->r600_blit.vb_used = 0; return 0; } static void r600_vb_ib_put(struct radeon_device *rdev) { radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); } static unsigned r600_blit_create_rect(unsigned num_gpu_pages, int *width, int *height, int max_dim) { unsigned max_pages; unsigned pages = num_gpu_pages; int w, h; if (num_gpu_pages == 0) { /* not supposed to be called with no pages, but just in case */ h = 0; w = 0; pages = 0; WARN_ON(1); } else { int rect_order = 2; h = RECT_UNIT_H; while (num_gpu_pages / rect_order) { h *= 2; rect_order *= 4; if (h >= max_dim) { h = max_dim; break; } } max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H); if (pages > max_pages) pages = max_pages; w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h; w = (w / RECT_UNIT_W) * RECT_UNIT_W; pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H); BUG_ON(pages == 0); } DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages); /* return width and height only of the caller wants it */ if (height) *height = h; if (width) *width = w; return pages; } int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r; int ring_size; int num_loops = 0; int dwords_per_loop = rdev->r600_blit.ring_size_per_loop; /* num loops */ while (num_gpu_pages) { num_gpu_pages -= r600_blit_create_rect(num_gpu_pages, NULL, NULL, rdev->r600_blit.max_dim); num_loops++; } /* 48 bytes for vertex per loop */ r = r600_vb_ib_get(rdev, (num_loops*48)+256); if (r) return r; /* calculate number of loops correctly */ ring_size = num_loops * dwords_per_loop; ring_size += rdev->r600_blit.ring_size_common; r = radeon_ring_lock(rdev, ring, ring_size); if (r) return r; rdev->r600_blit.primitives.set_default_state(rdev); rdev->r600_blit.primitives.set_shaders(rdev); return 0; } void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) { int r; if (rdev->r600_blit.vb_ib) r600_vb_ib_put(rdev); if (fence) r = radeon_fence_emit(rdev, fence); radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); } void r600_kms_blit_copy(struct radeon_device *rdev, u64 src_gpu_addr, u64 dst_gpu_addr, unsigned num_gpu_pages) { u64 vb_gpu_addr; u32 *vb; DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, num_gpu_pages, rdev->r600_blit.vb_used); vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); while (num_gpu_pages) { int w, h; unsigned size_in_bytes; unsigned pages_per_loop = r600_blit_create_rect(num_gpu_pages, &w, &h, rdev->r600_blit.max_dim); size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE; DRM_DEBUG("rectangle w=%d h=%d\n", w, h); if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { WARN_ON(1); } vb[0] = 0; vb[1] = 0; vb[2] = 0; vb[3] = 0; vb[4] = 0; vb[5] = i2f(h); vb[6] = 0; vb[7] = i2f(h); vb[8] = i2f(w); vb[9] = i2f(h); vb[10] = i2f(w); vb[11] = i2f(h); rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, w, h, w, src_gpu_addr, size_in_bytes); rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8, w, h, dst_gpu_addr); rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h); vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr); rdev->r600_blit.primitives.draw_auto(rdev); rdev->r600_blit.primitives.cp_set_surface_sync(rdev, PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, size_in_bytes, dst_gpu_addr); vb += 12; rdev->r600_blit.vb_used += 4*12; src_gpu_addr += size_in_bytes; dst_gpu_addr += size_in_bytes; num_gpu_pages -= pages_per_loop; } }
gpl-2.0
TheNotOnly/linux-3.5
net/netfilter/ipset/ip_set_getport.c
7308
3479
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Get Layer-4 data from the packets */ #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/icmp.h> #include <linux/icmpv6.h> #include <linux/sctp.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/export.h> /* We must handle non-linear skbs */ static bool get_port(const struct sk_buff *skb, int protocol, unsigned int protooff, bool src, __be16 *port, u8 *proto) { switch (protocol) { case IPPROTO_TCP: { struct tcphdr _tcph; const struct tcphdr *th; th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph); if (th == NULL) /* No choice either */ return false; *port = src ? th->source : th->dest; break; } case IPPROTO_SCTP: { sctp_sctphdr_t _sh; const sctp_sctphdr_t *sh; sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh); if (sh == NULL) /* No choice either */ return false; *port = src ? sh->source : sh->dest; break; } case IPPROTO_UDP: case IPPROTO_UDPLITE: { struct udphdr _udph; const struct udphdr *uh; uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph); if (uh == NULL) /* No choice either */ return false; *port = src ? uh->source : uh->dest; break; } case IPPROTO_ICMP: { struct icmphdr _ich; const struct icmphdr *ic; ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); if (ic == NULL) return false; *port = (__force __be16)htons((ic->type << 8) | ic->code); break; } case IPPROTO_ICMPV6: { struct icmp6hdr _ich; const struct icmp6hdr *ic; ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); if (ic == NULL) return false; *port = (__force __be16) htons((ic->icmp6_type << 8) | ic->icmp6_code); break; } default: break; } *proto = protocol; return true; } bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto) { const struct iphdr *iph = ip_hdr(skb); unsigned int protooff = ip_hdrlen(skb); int protocol = iph->protocol; /* See comments at tcp_match in ip_tables.c */ if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET)) return false; return get_port(skb, protocol, protooff, src, port, proto); } EXPORT_SYMBOL_GPL(ip_set_get_ip4_port); #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto) { int protoff; u8 nexthdr; __be16 frag_off; nexthdr = ipv6_hdr(skb)->nexthdr; protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); if (protoff < 0) return false; return get_port(skb, nexthdr, protoff, src, port, proto); } EXPORT_SYMBOL_GPL(ip_set_get_ip6_port); #endif bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port) { bool ret; u8 proto; switch (pf) { case NFPROTO_IPV4: ret = ip_set_get_ip4_port(skb, src, port, &proto); break; case NFPROTO_IPV6: ret = ip_set_get_ip6_port(skb, src, port, &proto); break; default: return false; } if (!ret) return ret; switch (proto) { case IPPROTO_TCP: case IPPROTO_UDP: return true; default: return false; } } EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
gpl-2.0
cuteprince/kernel_3.4_pico
drivers/crypto/caam/jr.c
7820
14645
/* * CAAM/SEC 4.x transport/backend driver * JobR backend functionality * * Copyright 2008-2011 Freescale Semiconductor, Inc. */ #include "compat.h" #include "regs.h" #include "jr.h" #include "desc.h" #include "intern.h" /* Main per-ring interrupt handler */ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) { struct device *dev = st_dev; struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); u32 irqstate; /* * Check the output ring for ready responses, kick * tasklet if jobs done. */ irqstate = rd_reg32(&jrp->rregs->jrintstatus); if (!irqstate) return IRQ_NONE; /* * If JobR error, we got more development work to do * Flag a bug now, but we really need to shut down and * restart the queue (and fix code). */ if (irqstate & JRINT_JR_ERROR) { dev_err(dev, "job ring error: irqstate: %08x\n", irqstate); BUG(); } /* mask valid interrupts */ setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); /* Have valid interrupt at this point, just ACK and trigger */ wr_reg32(&jrp->rregs->jrintstatus, irqstate); preempt_disable(); tasklet_schedule(&jrp->irqtask[smp_processor_id()]); preempt_enable(); return IRQ_HANDLED; } /* Deferred service handler, run as interrupt-fired tasklet */ static void caam_jr_dequeue(unsigned long devarg) { int hw_idx, sw_idx, i, head, tail; struct device *dev = (struct device *)devarg; struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); u32 *userdesc, userstatus; void *userarg; unsigned long flags; spin_lock_irqsave(&jrp->outlock, flags); head = ACCESS_ONCE(jrp->head); sw_idx = tail = jrp->tail; while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && rd_reg32(&jrp->rregs->outring_used)) { hw_idx = jrp->out_ring_read_index; for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { sw_idx = (tail + i) & (JOBR_DEPTH - 1); smp_read_barrier_depends(); if (jrp->outring[hw_idx].desc == jrp->entinfo[sw_idx].desc_addr_dma) break; /* found */ } /* we should never fail to find a matching descriptor */ BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); /* Unmap just-run descriptor so we can post-process */ dma_unmap_single(dev, jrp->outring[hw_idx].desc, jrp->entinfo[sw_idx].desc_size, DMA_TO_DEVICE); /* mark completed, avoid matching on a recycled desc addr */ jrp->entinfo[sw_idx].desc_addr_dma = 0; /* Stash callback params for use outside of lock */ usercall = jrp->entinfo[sw_idx].callbk; userarg = jrp->entinfo[sw_idx].cbkarg; userdesc = jrp->entinfo[sw_idx].desc_addr_virt; userstatus = jrp->outring[hw_idx].jrstatus; smp_mb(); jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & (JOBR_DEPTH - 1); /* * if this job completed out-of-order, do not increment * the tail. Otherwise, increment tail by 1 plus the * number of subsequent jobs already completed out-of-order */ if (sw_idx == tail) { do { tail = (tail + 1) & (JOBR_DEPTH - 1); smp_read_barrier_depends(); } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && jrp->entinfo[tail].desc_addr_dma == 0); jrp->tail = tail; } /* set done */ wr_reg32(&jrp->rregs->outring_rmvd, 1); spin_unlock_irqrestore(&jrp->outlock, flags); /* Finally, execute user's callback */ usercall(dev, userdesc, userstatus, userarg); spin_lock_irqsave(&jrp->outlock, flags); head = ACCESS_ONCE(jrp->head); sw_idx = tail = jrp->tail; } spin_unlock_irqrestore(&jrp->outlock, flags); /* reenable / unmask IRQs */ clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); } /** * caam_jr_register() - Alloc a ring for someone to use as needed. Returns * an ordinal of the rings allocated, else returns -ENODEV if no rings * are available. * @ctrldev: points to the controller level dev (parent) that * owns rings available for use. * @dev: points to where a pointer to the newly allocated queue's * dev can be written to if successful. **/ int caam_jr_register(struct device *ctrldev, struct device **rdev) { struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); struct caam_drv_private_jr *jrpriv = NULL; unsigned long flags; int ring; /* Lock, if free ring - assign, unlock */ spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); if (jrpriv->assign == JOBR_UNASSIGNED) { jrpriv->assign = JOBR_ASSIGNED; *rdev = ctrlpriv->jrdev[ring]; spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); return ring; } } /* If assigned, write dev where caller needs it */ spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); *rdev = NULL; return -ENODEV; } EXPORT_SYMBOL(caam_jr_register); /** * caam_jr_deregister() - Deregister an API and release the queue. * Returns 0 if OK, -EBUSY if queue still contains pending entries * or unprocessed results at the time of the call * @dev - points to the dev that identifies the queue to * be released. **/ int caam_jr_deregister(struct device *rdev) { struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); struct caam_drv_private *ctrlpriv; unsigned long flags; /* Get the owning controller's private space */ ctrlpriv = dev_get_drvdata(jrpriv->parentdev); /* * Make sure ring empty before release */ if (rd_reg32(&jrpriv->rregs->outring_used) || (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH)) return -EBUSY; /* Release ring */ spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); jrpriv->assign = JOBR_UNASSIGNED; spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); return 0; } EXPORT_SYMBOL(caam_jr_deregister); /** * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, * -EBUSY if the queue is full, -EIO if it cannot map the caller's * descriptor. * @dev: device of the job ring to be used. This device should have * been assigned prior by caam_jr_register(). * @desc: points to a job descriptor that execute our request. All * descriptors (and all referenced data) must be in a DMAable * region, and all data references must be physical addresses * accessible to CAAM (i.e. within a PAMU window granted * to it). * @cbk: pointer to a callback function to be invoked upon completion * of this request. This has the form: * callback(struct device *dev, u32 *desc, u32 stat, void *arg) * where: * @dev: contains the job ring device that processed this * response. * @desc: descriptor that initiated the request, same as * "desc" being argued to caam_jr_enqueue(). * @status: untranslated status received from CAAM. See the * reference manual for a detailed description of * error meaning, or see the JRSTA definitions in the * register header file * @areq: optional pointer to an argument passed with the * original request * @areq: optional pointer to a user argument for use at callback * time. **/ int caam_jr_enqueue(struct device *dev, u32 *desc, void (*cbk)(struct device *dev, u32 *desc, u32 status, void *areq), void *areq) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); struct caam_jrentry_info *head_entry; unsigned long flags; int head, tail, desc_size; dma_addr_t desc_dma; desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32); desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, desc_dma)) { dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); return -EIO; } spin_lock_irqsave(&jrp->inplock, flags); head = jrp->head; tail = ACCESS_ONCE(jrp->tail); if (!rd_reg32(&jrp->rregs->inpring_avail) || CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { spin_unlock_irqrestore(&jrp->inplock, flags); dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); return -EBUSY; } head_entry = &jrp->entinfo[head]; head_entry->desc_addr_virt = desc; head_entry->desc_size = desc_size; head_entry->callbk = (void *)cbk; head_entry->cbkarg = areq; head_entry->desc_addr_dma = desc_dma; jrp->inpring[jrp->inp_ring_write_index] = desc_dma; smp_wmb(); jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) & (JOBR_DEPTH - 1); jrp->head = (head + 1) & (JOBR_DEPTH - 1); wmb(); wr_reg32(&jrp->rregs->inpring_jobadd, 1); spin_unlock_irqrestore(&jrp->inplock, flags); return 0; } EXPORT_SYMBOL(caam_jr_enqueue); static int caam_reset_hw_jr(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); unsigned int timeout = 100000; /* * mask interrupts since we are going to poll * for reset completion status */ setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); /* initiate flush (required prior to reset) */ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == JRINT_ERR_HALT_INPROGRESS) && --timeout) cpu_relax(); if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != JRINT_ERR_HALT_COMPLETE || timeout == 0) { dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); return -EIO; } /* initiate reset */ timeout = 100000; wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) cpu_relax(); if (timeout == 0) { dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); return -EIO; } /* unmask interrupts */ clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); return 0; } /* * Init JobR independent of platform property detection */ static int caam_jr_init(struct device *dev) { struct caam_drv_private_jr *jrp; dma_addr_t inpbusaddr, outbusaddr; int i, error; jrp = dev_get_drvdata(dev); /* Connect job ring interrupt handler. */ for_each_possible_cpu(i) tasklet_init(&jrp->irqtask[i], caam_jr_dequeue, (unsigned long)dev); error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, "caam-jobr", dev); if (error) { dev_err(dev, "can't connect JobR %d interrupt (%d)\n", jrp->ridx, jrp->irq); irq_dispose_mapping(jrp->irq); jrp->irq = 0; return -EINVAL; } error = caam_reset_hw_jr(dev); if (error) return error; jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH, GFP_KERNEL | GFP_DMA); jrp->outring = kzalloc(sizeof(struct jr_outentry) * JOBR_DEPTH, GFP_KERNEL | GFP_DMA); jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, GFP_KERNEL); if ((jrp->inpring == NULL) || (jrp->outring == NULL) || (jrp->entinfo == NULL)) { dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx); return -ENOMEM; } for (i = 0; i < JOBR_DEPTH; i++) jrp->entinfo[i].desc_addr_dma = !0; /* Setup rings */ inpbusaddr = dma_map_single(dev, jrp->inpring, sizeof(u32 *) * JOBR_DEPTH, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, inpbusaddr)) { dev_err(dev, "caam_jr_init(): can't map input ring\n"); kfree(jrp->inpring); kfree(jrp->outring); kfree(jrp->entinfo); return -EIO; } outbusaddr = dma_map_single(dev, jrp->outring, sizeof(struct jr_outentry) * JOBR_DEPTH, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, outbusaddr)) { dev_err(dev, "caam_jr_init(): can't map output ring\n"); dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH, DMA_BIDIRECTIONAL); kfree(jrp->inpring); kfree(jrp->outring); kfree(jrp->entinfo); return -EIO; } jrp->inp_ring_write_index = 0; jrp->out_ring_read_index = 0; jrp->head = 0; jrp->tail = 0; wr_reg64(&jrp->rregs->inpring_base, inpbusaddr); wr_reg64(&jrp->rregs->outring_base, outbusaddr); wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); jrp->ringsize = JOBR_DEPTH; spin_lock_init(&jrp->inplock); spin_lock_init(&jrp->outlock); /* Select interrupt coalescing parameters */ setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC | (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); jrp->assign = JOBR_UNASSIGNED; return 0; } /* * Shutdown JobR independent of platform property code */ int caam_jr_shutdown(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); dma_addr_t inpbusaddr, outbusaddr; int ret, i; ret = caam_reset_hw_jr(dev); for_each_possible_cpu(i) tasklet_kill(&jrp->irqtask[i]); /* Release interrupt */ free_irq(jrp->irq, dev); /* Free rings */ inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); outbusaddr = rd_reg64(&jrp->rregs->outring_base); dma_unmap_single(dev, outbusaddr, sizeof(struct jr_outentry) * JOBR_DEPTH, DMA_BIDIRECTIONAL); dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH, DMA_BIDIRECTIONAL); kfree(jrp->outring); kfree(jrp->inpring); kfree(jrp->entinfo); return ret; } /* * Probe routine for each detected JobR subsystem. It assumes that * property detection was picked up externally. */ int caam_jr_probe(struct platform_device *pdev, struct device_node *np, int ring) { struct device *ctrldev, *jrdev; struct platform_device *jr_pdev; struct caam_drv_private *ctrlpriv; struct caam_drv_private_jr *jrpriv; u32 *jroffset; int error; ctrldev = &pdev->dev; ctrlpriv = dev_get_drvdata(ctrldev); jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), GFP_KERNEL); if (jrpriv == NULL) { dev_err(ctrldev, "can't alloc private mem for job ring %d\n", ring); return -ENOMEM; } jrpriv->parentdev = ctrldev; /* point back to parent */ jrpriv->ridx = ring; /* save ring identity relative to detection */ /* * Derive a pointer to the detected JobRs regs * Driver has already iomapped the entire space, we just * need to add in the offset to this JobR. Don't know if I * like this long-term, but it'll run */ jroffset = (u32 *)of_get_property(np, "reg", NULL); jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl + *jroffset); /* Build a local dev for each detected queue */ jr_pdev = of_platform_device_create(np, NULL, ctrldev); if (jr_pdev == NULL) { kfree(jrpriv); return -EINVAL; } jrdev = &jr_pdev->dev; dev_set_drvdata(jrdev, jrpriv); ctrlpriv->jrdev[ring] = jrdev; /* Identify the interrupt */ jrpriv->irq = of_irq_to_resource(np, 0, NULL); /* Now do the platform independent part */ error = caam_jr_init(jrdev); /* now turn on hardware */ if (error) { kfree(jrpriv); return error; } return error; }
gpl-2.0
Team-SennyC2/senny_kernel-3.4
drivers/net/wireless/rtlwifi/rtl8192de/table.c
9612
36915
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * * Created on 2010/12/23, 6:38 *****************************************************************************/ #include <linux/types.h> #include "table.h" u32 rtl8192de_phy_reg_2tarray[PHY_REG_2T_ARRAYLENGTH] = { 0x024, 0x0011800d, 0x028, 0x00ffdb83, 0x014, 0x088ba955, 0x010, 0x49022b03, 0x800, 0x80040002, 0x804, 0x00000003, 0x808, 0x0000fc00, 0x80c, 0x0000000a, 0x810, 0x80706388, 0x814, 0x020c3d10, 0x818, 0x02200385, 0x81c, 0x00000000, 0x820, 0x01000100, 0x824, 0x00390004, 0x828, 0x01000100, 0x82c, 0x00390004, 0x830, 0x27272727, 0x834, 0x27272727, 0x838, 0x27272727, 0x83c, 0x27272727, 0x840, 0x00010000, 0x844, 0x00010000, 0x848, 0x27272727, 0x84c, 0x27272727, 0x850, 0x00000000, 0x854, 0x00000000, 0x858, 0x569a569a, 0x85c, 0x0c1b25a4, 0x860, 0x66e60230, 0x864, 0x061f0130, 0x868, 0x27272727, 0x86c, 0x272b2b2b, 0x870, 0x07000700, 0x874, 0x22188000, 0x878, 0x08080808, 0x87c, 0x00007ff8, 0x880, 0xc0083070, 0x884, 0x00000cd5, 0x888, 0x00000000, 0x88c, 0xcc0000c0, 0x890, 0x00000800, 0x894, 0xfffffffe, 0x898, 0x40302010, 0x89c, 0x00706050, 0x900, 0x00000000, 0x904, 0x00000023, 0x908, 0x00000000, 0x90c, 0x81121313, 0xa00, 0x00d047c8, 0xa04, 0x80ff000c, 0xa08, 0x8c838300, 0xa0c, 0x2e68120f, 0xa10, 0x9500bb78, 0xa14, 0x11144028, 0xa18, 0x00881117, 0xa1c, 0x89140f00, 0xa20, 0x1a1b0000, 0xa24, 0x090e1317, 0xa28, 0x00000204, 0xa2c, 0x00d30000, 0xa70, 0x101fbf00, 0xa74, 0x00000007, 0xc00, 0x40071d40, 0xc04, 0x03a05633, 0xc08, 0x001000e4, 0xc0c, 0x6c6c6c6c, 0xc10, 0x08800000, 0xc14, 0x40000100, 0xc18, 0x08800000, 0xc1c, 0x40000100, 0xc20, 0x00000000, 0xc24, 0x00000000, 0xc28, 0x00000000, 0xc2c, 0x00000000, 0xc30, 0x69e9ac44, 0xc34, 0x469652cf, 0xc38, 0x49795994, 0xc3c, 0x0a979718, 0xc40, 0x1f7c403f, 0xc44, 0x000100b7, 0xc48, 0xec020107, 0xc4c, 0x007f037f, 0xc50, 0x69543420, 0xc54, 0x43bc009e, 0xc58, 0x69543420, 0xc5c, 0x433c00a8, 0xc60, 0x00000000, 0xc64, 0x5116848b, 0xc68, 0x47c00bff, 0xc6c, 0x00000036, 0xc70, 0x2c7f000d, 0xc74, 0x058610db, 0xc78, 0x0000001f, 0xc7c, 0x40b95612, 0xc80, 0x40000100, 0xc84, 0x20f60000, 0xc88, 0x40000100, 0xc8c, 0x20e00000, 0xc90, 0x00121820, 0xc94, 0x00000007, 0xc98, 0x00121820, 0xc9c, 0x00007f7f, 0xca0, 0x00000000, 0xca4, 0x00000080, 0xca8, 0x00000000, 0xcac, 0x00000000, 0xcb0, 0x00000000, 0xcb4, 0x00000000, 0xcb8, 0x00000000, 0xcbc, 0x28000000, 0xcc0, 0x00000000, 0xcc4, 0x00000000, 0xcc8, 0x00000000, 0xccc, 0x00000000, 0xcd0, 0x00000000, 0xcd4, 0x00000000, 0xcd8, 0x64b11e20, 0xcdc, 0xe8767533, 0xce0, 0x00222222, 0xce4, 0x00000000, 0xce8, 0x37644302, 0xcec, 0x2f97d40c, 0xd00, 0x00080740, 0xd04, 0x00020403, 0xd08, 0x0000907f, 0xd0c, 0x20010201, 0xd10, 0xa0633333, 0xd14, 0x3333bc43, 0xd18, 0x7a8f5b6b, 0xd2c, 0xcc979975, 0xd30, 0x00000000, 0xd34, 0x80608404, 0xd38, 0x00000000, 0xd3c, 0x00027293, 0xd40, 0x00000000, 0xd44, 0x00000000, 0xd48, 0x00000000, 0xd4c, 0x00000000, 0xd50, 0x6437140a, 0xd54, 0x00000000, 0xd58, 0x00000000, 0xd5c, 0x30032064, 0xd60, 0x4653de68, 0xd64, 0x04518a3c, 0xd68, 0x00002101, 0xd6c, 0x2a201c16, 0xd70, 0x1812362e, 0xd74, 0x322c2220, 0xd78, 0x000e3c24, 0xe00, 0x2a2a2a2a, 0xe04, 0x2a2a2a2a, 0xe08, 0x03902a2a, 0xe10, 0x2a2a2a2a, 0xe14, 0x2a2a2a2a, 0xe18, 0x2a2a2a2a, 0xe1c, 0x2a2a2a2a, 0xe28, 0x00000000, 0xe30, 0x1000dc1f, 0xe34, 0x10008c1f, 0xe38, 0x02140102, 0xe3c, 0x681604c2, 0xe40, 0x01007c00, 0xe44, 0x01004800, 0xe48, 0xfb000000, 0xe4c, 0x000028d1, 0xe50, 0x1000dc1f, 0xe54, 0x10008c1f, 0xe58, 0x02140102, 0xe5c, 0x28160d05, 0xe60, 0x00000010, 0xe68, 0x001b25a4, 0xe6c, 0x63db25a4, 0xe70, 0x63db25a4, 0xe74, 0x0c126da4, 0xe78, 0x0c126da4, 0xe7c, 0x0c126da4, 0xe80, 0x0c126da4, 0xe84, 0x63db25a4, 0xe88, 0x0c126da4, 0xe8c, 0x63db25a4, 0xed0, 0x63db25a4, 0xed4, 0x63db25a4, 0xed8, 0x63db25a4, 0xedc, 0x001b25a4, 0xee0, 0x001b25a4, 0xeec, 0x6fdb25a4, 0xf14, 0x00000003, 0xf1c, 0x00000064, 0xf4c, 0x00000004, 0xf00, 0x00000300, }; u32 rtl8192de_phy_reg_array_pg[PHY_REG_ARRAY_PG_LENGTH] = { 0xe00, 0xffffffff, 0x07090c0c, 0xe04, 0xffffffff, 0x01020405, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x0b0c0c0e, 0xe14, 0xffffffff, 0x01030506, 0xe18, 0xffffffff, 0x0b0c0d0e, 0xe1c, 0xffffffff, 0x01030509, 0x830, 0xffffffff, 0x07090c0c, 0x834, 0xffffffff, 0x01020405, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x0b0c0c0e, 0x848, 0xffffffff, 0x01030506, 0x84c, 0xffffffff, 0x0b0c0d0e, 0x868, 0xffffffff, 0x01030509, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x06060606, 0xe14, 0xffffffff, 0x00020406, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x04040404, 0x834, 0xffffffff, 0x00020204, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x06060606, 0x848, 0xffffffff, 0x00020406, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x04040404, 0x834, 0xffffffff, 0x00020204, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x00000000, 0xe04, 0xffffffff, 0x00000000, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x00000000, 0xe14, 0xffffffff, 0x00000000, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x00000000, 0x834, 0xffffffff, 0x00000000, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x00000000, 0x848, 0xffffffff, 0x00000000, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x08080808, 0xe14, 0xffffffff, 0x00040408, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x04040404, 0x834, 0xffffffff, 0x00020204, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x08080808, 0x848, 0xffffffff, 0x00040408, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x08080808, 0xe14, 0xffffffff, 0x00040408, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x04040404, 0x834, 0xffffffff, 0x00020204, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x08080808, 0x848, 0xffffffff, 0x00040408, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x08080808, 0xe14, 0xffffffff, 0x00040408, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x04040404, 0x834, 0xffffffff, 0x00020204, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x08080808, 0x848, 0xffffffff, 0x00040408, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x08080808, 0xe14, 0xffffffff, 0x00040408, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x04040404, 0x834, 0xffffffff, 0x00020204, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x08080808, 0x848, 0xffffffff, 0x00040408, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x08080808, 0xe14, 0xffffffff, 0x00040408, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x04040404, 0x834, 0xffffffff, 0x00020204, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x08080808, 0x848, 0xffffffff, 0x00040408, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0x86c, 0xffffff00, 0x00000000, 0xe10, 0xffffffff, 0x08080808, 0xe14, 0xffffffff, 0x00040408, 0xe18, 0xffffffff, 0x00000000, 0xe1c, 0xffffffff, 0x00000000, 0x830, 0xffffffff, 0x04040404, 0x834, 0xffffffff, 0x00020204, 0x838, 0xffffff00, 0x00000000, 0x86c, 0x000000ff, 0x00000000, 0x83c, 0xffffffff, 0x08080808, 0x848, 0xffffffff, 0x00040408, 0x84c, 0xffffffff, 0x00000000, 0x868, 0xffffffff, 0x00000000, }; u32 rtl8192de_radioa_2tarray[RADIOA_2T_ARRAYLENGTH] = { 0x000, 0x00030000, 0x001, 0x00030000, 0x002, 0x00000000, 0x003, 0x00018c63, 0x004, 0x00018c63, 0x008, 0x00084000, 0x00b, 0x0001c000, 0x00e, 0x00018c67, 0x00f, 0x00000851, 0x014, 0x00021440, 0x018, 0x00017524, 0x019, 0x00000000, 0x01d, 0x000a1290, 0x023, 0x00001558, 0x01a, 0x00030a99, 0x01b, 0x00040b00, 0x01c, 0x000fc339, 0x03a, 0x000a57eb, 0x03b, 0x00020000, 0x03c, 0x000ff454, 0x020, 0x0000aa52, 0x021, 0x00054000, 0x040, 0x0000aa52, 0x041, 0x00014000, 0x025, 0x000803be, 0x026, 0x000fc638, 0x027, 0x00077c18, 0x028, 0x000de471, 0x029, 0x000d7110, 0x02a, 0x0008cb04, 0x02b, 0x0004128b, 0x02c, 0x00001840, 0x043, 0x0002444f, 0x044, 0x0001adb0, 0x045, 0x00056467, 0x046, 0x0008992c, 0x047, 0x0000452c, 0x048, 0x000f9c43, 0x049, 0x00002e0c, 0x04a, 0x000546eb, 0x04b, 0x0008966c, 0x04c, 0x0000dde9, 0x018, 0x00007401, 0x000, 0x00070000, 0x012, 0x000dc000, 0x012, 0x00090000, 0x012, 0x00051000, 0x012, 0x00012000, 0x013, 0x000287b7, 0x013, 0x000247ab, 0x013, 0x0002079f, 0x013, 0x0001c793, 0x013, 0x0001839b, 0x013, 0x00014392, 0x013, 0x0001019a, 0x013, 0x0000c191, 0x013, 0x00008194, 0x013, 0x000040a0, 0x013, 0x00000018, 0x015, 0x0000f424, 0x015, 0x0004f424, 0x015, 0x0008f424, 0x016, 0x000e1330, 0x016, 0x000a1330, 0x016, 0x00061330, 0x016, 0x00021330, 0x018, 0x00017524, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bc, 0x013, 0x000247b0, 0x013, 0x000203b4, 0x013, 0x0001c3a8, 0x013, 0x000181b4, 0x013, 0x000141a8, 0x013, 0x000100b0, 0x013, 0x0000c0a4, 0x013, 0x0000b02c, 0x013, 0x00004020, 0x013, 0x00000014, 0x015, 0x0000f4c3, 0x015, 0x0004f4c3, 0x015, 0x0008f4c3, 0x016, 0x000e085f, 0x016, 0x000a085f, 0x016, 0x0006085f, 0x016, 0x0002085f, 0x018, 0x00037524, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bc, 0x013, 0x000247b0, 0x013, 0x000203b4, 0x013, 0x0001c3a8, 0x013, 0x000181b4, 0x013, 0x000141a8, 0x013, 0x000100b0, 0x013, 0x0000c0a4, 0x013, 0x0000b02c, 0x013, 0x00004020, 0x013, 0x00000014, 0x015, 0x0000f4c3, 0x015, 0x0004f4c3, 0x015, 0x0008f4c3, 0x016, 0x000e085f, 0x016, 0x000a085f, 0x016, 0x0006085f, 0x016, 0x0002085f, 0x018, 0x00057568, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bc, 0x013, 0x000247b0, 0x013, 0x000203b4, 0x013, 0x0001c3a8, 0x013, 0x000181b4, 0x013, 0x000141a8, 0x013, 0x000100b0, 0x013, 0x0000c0a4, 0x013, 0x0000b02c, 0x013, 0x00004020, 0x013, 0x00000014, 0x015, 0x0000f4c3, 0x015, 0x0004f4c3, 0x015, 0x0008f4c3, 0x016, 0x000e085f, 0x016, 0x000a085f, 0x016, 0x0006085f, 0x016, 0x0002085f, 0x030, 0x0004470f, 0x031, 0x00044ff0, 0x032, 0x00000070, 0x033, 0x000dd480, 0x034, 0x000ffac0, 0x035, 0x000b80c0, 0x036, 0x00077000, 0x037, 0x00064ff2, 0x038, 0x000e7661, 0x039, 0x00000e90, 0x000, 0x00030000, 0x018, 0x0000f401, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x01e, 0x00088009, 0x01f, 0x00080003, 0x0fe, 0x00000000, 0x01e, 0x00088001, 0x01f, 0x00080000, 0x0fe, 0x00000000, 0x018, 0x00097524, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x02b, 0x00041289, 0x0fe, 0x00000000, 0x02d, 0x0006aaaa, 0x02e, 0x000b4d01, 0x02d, 0x00080000, 0x02e, 0x00004d02, 0x02d, 0x00095555, 0x02e, 0x00054d03, 0x02d, 0x000aaaaa, 0x02e, 0x000b4d04, 0x02d, 0x000c0000, 0x02e, 0x00004d05, 0x02d, 0x000d5555, 0x02e, 0x00054d06, 0x02d, 0x000eaaaa, 0x02e, 0x000b4d07, 0x02d, 0x00000000, 0x02e, 0x00005108, 0x02d, 0x00015555, 0x02e, 0x00055109, 0x02d, 0x0002aaaa, 0x02e, 0x000b510a, 0x02d, 0x00040000, 0x02e, 0x0000510b, 0x02d, 0x00055555, 0x02e, 0x0005510c, }; u32 rtl8192de_radiob_2tarray[RADIOB_2T_ARRAYLENGTH] = { 0x000, 0x00030000, 0x001, 0x00030000, 0x002, 0x00000000, 0x003, 0x00018c63, 0x004, 0x00018c63, 0x008, 0x00084000, 0x00b, 0x0001c000, 0x00e, 0x00018c67, 0x00f, 0x00000851, 0x014, 0x00021440, 0x018, 0x00007401, 0x019, 0x00000060, 0x01d, 0x000a1290, 0x023, 0x00001558, 0x01a, 0x00030a99, 0x01b, 0x00040b00, 0x01c, 0x000fc339, 0x03a, 0x000a57eb, 0x03b, 0x00020000, 0x03c, 0x000ff454, 0x020, 0x0000aa52, 0x021, 0x00054000, 0x040, 0x0000aa52, 0x041, 0x00014000, 0x025, 0x000803be, 0x026, 0x000fc638, 0x027, 0x00077c18, 0x028, 0x000d1c31, 0x029, 0x000d7110, 0x02a, 0x000aeb04, 0x02b, 0x0004128b, 0x02c, 0x00001840, 0x043, 0x0002444f, 0x044, 0x0001adb0, 0x045, 0x00056467, 0x046, 0x0008992c, 0x047, 0x0000452c, 0x048, 0x000f9c43, 0x049, 0x00002e0c, 0x04a, 0x000546eb, 0x04b, 0x0008966c, 0x04c, 0x0000dde9, 0x018, 0x00007401, 0x000, 0x00070000, 0x012, 0x000dc000, 0x012, 0x00090000, 0x012, 0x00051000, 0x012, 0x00012000, 0x013, 0x000287b7, 0x013, 0x000247ab, 0x013, 0x0002079f, 0x013, 0x0001c793, 0x013, 0x0001839b, 0x013, 0x00014392, 0x013, 0x0001019a, 0x013, 0x0000c191, 0x013, 0x00008194, 0x013, 0x000040a0, 0x013, 0x00000018, 0x015, 0x0000f424, 0x015, 0x0004f424, 0x015, 0x0008f424, 0x016, 0x000e1330, 0x016, 0x000a1330, 0x016, 0x00061330, 0x016, 0x00021330, 0x018, 0x00017524, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bc, 0x013, 0x000247b0, 0x013, 0x000203b4, 0x013, 0x0001c3a8, 0x013, 0x000181b4, 0x013, 0x000141a8, 0x013, 0x000100b0, 0x013, 0x0000c0a4, 0x013, 0x0000b02c, 0x013, 0x00004020, 0x013, 0x00000014, 0x015, 0x0000f4c3, 0x015, 0x0004f4c3, 0x015, 0x0008f4c3, 0x016, 0x000e085f, 0x016, 0x000a085f, 0x016, 0x0006085f, 0x016, 0x0002085f, 0x018, 0x00037524, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bc, 0x013, 0x000247b0, 0x013, 0x000203b4, 0x013, 0x0001c3a8, 0x013, 0x000181b4, 0x013, 0x000141a8, 0x013, 0x000100b0, 0x013, 0x0000c0a4, 0x013, 0x0000b02c, 0x013, 0x00004020, 0x013, 0x00000014, 0x015, 0x0000f4c3, 0x015, 0x0004f4c3, 0x015, 0x0008f4c3, 0x016, 0x000e085f, 0x016, 0x000a085f, 0x016, 0x0006085f, 0x016, 0x0002085f, 0x018, 0x00057524, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bc, 0x013, 0x000247b0, 0x013, 0x000203b4, 0x013, 0x0001c3a8, 0x013, 0x000181b4, 0x013, 0x000141a8, 0x013, 0x000100b0, 0x013, 0x0000c0a4, 0x013, 0x0000b02c, 0x013, 0x00004020, 0x013, 0x00000014, 0x015, 0x0000f4c3, 0x015, 0x0004f4c3, 0x015, 0x0008f4c3, 0x016, 0x000e085f, 0x016, 0x000a085f, 0x016, 0x0006085f, 0x016, 0x0002085f, 0x030, 0x0004470f, 0x031, 0x00044ff0, 0x032, 0x00000070, 0x033, 0x000dd480, 0x034, 0x000ffac0, 0x035, 0x000b80c0, 0x036, 0x00077000, 0x037, 0x00064ff2, 0x038, 0x000e7661, 0x039, 0x00000e90, 0x000, 0x00030000, 0x018, 0x0000f401, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x01e, 0x00088009, 0x01f, 0x00080003, 0x0fe, 0x00000000, 0x01e, 0x00088001, 0x01f, 0x00080000, 0x0fe, 0x00000000, 0x018, 0x00087401, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x02b, 0x00041289, 0x0fe, 0x00000000, 0x02d, 0x00066666, 0x02e, 0x00064001, 0x02d, 0x00091111, 0x02e, 0x00014002, 0x02d, 0x000bbbbb, 0x02e, 0x000b4003, 0x02d, 0x000e6666, 0x02e, 0x00064004, 0x02d, 0x00088888, 0x02e, 0x00084005, 0x02d, 0x0009dddd, 0x02e, 0x000d4006, 0x02d, 0x000b3333, 0x02e, 0x00034007, 0x02d, 0x00048888, 0x02e, 0x00084408, 0x02d, 0x000bbbbb, 0x02e, 0x000b4409, 0x02d, 0x000e6666, 0x02e, 0x0006440a, 0x02d, 0x00011111, 0x02e, 0x0001480b, 0x02d, 0x0003bbbb, 0x02e, 0x000b480c, 0x02d, 0x00066666, 0x02e, 0x0006480d, 0x02d, 0x000ccccc, 0x02e, 0x000c480e, }; u32 rtl8192de_radioa_2t_int_paarray[RADIOA_2T_INT_PA_ARRAYLENGTH] = { 0x000, 0x00030000, 0x001, 0x00030000, 0x002, 0x00000000, 0x003, 0x00018c63, 0x004, 0x00018c63, 0x008, 0x00084000, 0x00b, 0x0001c000, 0x00e, 0x00018c67, 0x00f, 0x00000851, 0x014, 0x00021440, 0x018, 0x00017524, 0x019, 0x00000000, 0x01d, 0x000a1290, 0x023, 0x00001558, 0x01a, 0x00030a99, 0x01b, 0x00040b00, 0x01c, 0x000fc339, 0x03a, 0x000a57eb, 0x03b, 0x00020000, 0x03c, 0x000ff454, 0x020, 0x0000aa52, 0x021, 0x00054000, 0x040, 0x0000aa52, 0x041, 0x00014000, 0x025, 0x000803be, 0x026, 0x000fc638, 0x027, 0x00077c18, 0x028, 0x000de471, 0x029, 0x000d7110, 0x02a, 0x0008eb04, 0x02b, 0x0004128b, 0x02c, 0x00001840, 0x043, 0x0002444f, 0x044, 0x0001adb0, 0x045, 0x00056467, 0x046, 0x0008992c, 0x047, 0x0000452c, 0x048, 0x000c0443, 0x049, 0x00000730, 0x04a, 0x00050f0f, 0x04b, 0x000896ee, 0x04c, 0x0000ddee, 0x018, 0x00007401, 0x000, 0x00070000, 0x012, 0x000dc000, 0x012, 0x00090000, 0x012, 0x00051000, 0x012, 0x00012000, 0x013, 0x000287b7, 0x013, 0x000247ab, 0x013, 0x0002079f, 0x013, 0x0001c793, 0x013, 0x0001839b, 0x013, 0x00014392, 0x013, 0x0001019a, 0x013, 0x0000c191, 0x013, 0x00008194, 0x013, 0x000040a0, 0x013, 0x00000018, 0x015, 0x0000f424, 0x015, 0x0004f424, 0x015, 0x0008f424, 0x016, 0x000e1330, 0x016, 0x000a1330, 0x016, 0x00061330, 0x016, 0x00021330, 0x018, 0x00017524, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bf, 0x013, 0x000247b3, 0x013, 0x000207a7, 0x013, 0x0001c79b, 0x013, 0x0001839f, 0x013, 0x00014393, 0x013, 0x00010399, 0x013, 0x0000c38d, 0x013, 0x00008199, 0x013, 0x0000418d, 0x013, 0x00000099, 0x015, 0x0000f495, 0x015, 0x0004f495, 0x015, 0x0008f495, 0x016, 0x000e1874, 0x016, 0x000a1874, 0x016, 0x00061874, 0x016, 0x00021874, 0x018, 0x00037564, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bf, 0x013, 0x000247b3, 0x013, 0x000207a7, 0x013, 0x0001c79b, 0x013, 0x0001839f, 0x013, 0x00014393, 0x013, 0x00010399, 0x013, 0x0000c38d, 0x013, 0x00008199, 0x013, 0x0000418d, 0x013, 0x00000099, 0x015, 0x0000f495, 0x015, 0x0004f495, 0x015, 0x0008f495, 0x016, 0x000e1874, 0x016, 0x000a1874, 0x016, 0x00061874, 0x016, 0x00021874, 0x018, 0x00057595, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bf, 0x013, 0x000247b3, 0x013, 0x000207a7, 0x013, 0x0001c79b, 0x013, 0x0001839f, 0x013, 0x00014393, 0x013, 0x00010399, 0x013, 0x0000c38d, 0x013, 0x00008199, 0x013, 0x0000418d, 0x013, 0x00000099, 0x015, 0x0000f495, 0x015, 0x0004f495, 0x015, 0x0008f495, 0x016, 0x000e1874, 0x016, 0x000a1874, 0x016, 0x00061874, 0x016, 0x00021874, 0x030, 0x0004470f, 0x031, 0x00044ff0, 0x032, 0x00000070, 0x033, 0x000dd480, 0x034, 0x000ffac0, 0x035, 0x000b80c0, 0x036, 0x00077000, 0x037, 0x00064ff2, 0x038, 0x000e7661, 0x039, 0x00000e90, 0x000, 0x00030000, 0x018, 0x0000f401, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x01e, 0x00088009, 0x01f, 0x00080003, 0x0fe, 0x00000000, 0x01e, 0x00088001, 0x01f, 0x00080000, 0x0fe, 0x00000000, 0x018, 0x00097524, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x02b, 0x00041289, 0x0fe, 0x00000000, 0x02d, 0x0006aaaa, 0x02e, 0x000b4d01, 0x02d, 0x00080000, 0x02e, 0x00004d02, 0x02d, 0x00095555, 0x02e, 0x00054d03, 0x02d, 0x000aaaaa, 0x02e, 0x000b4d04, 0x02d, 0x000c0000, 0x02e, 0x00004d05, 0x02d, 0x000d5555, 0x02e, 0x00054d06, 0x02d, 0x000eaaaa, 0x02e, 0x000b4d07, 0x02d, 0x00000000, 0x02e, 0x00005108, 0x02d, 0x00015555, 0x02e, 0x00055109, 0x02d, 0x0002aaaa, 0x02e, 0x000b510a, 0x02d, 0x00040000, 0x02e, 0x0000510b, 0x02d, 0x00055555, 0x02e, 0x0005510c, }; u32 rtl8192de_radiob_2t_int_paarray[RADIOB_2T_INT_PA_ARRAYLENGTH] = { 0x000, 0x00030000, 0x001, 0x00030000, 0x002, 0x00000000, 0x003, 0x00018c63, 0x004, 0x00018c63, 0x008, 0x00084000, 0x00b, 0x0001c000, 0x00e, 0x00018c67, 0x00f, 0x00000851, 0x014, 0x00021440, 0x018, 0x00007401, 0x019, 0x00000060, 0x01d, 0x000a1290, 0x023, 0x00001558, 0x01a, 0x00030a99, 0x01b, 0x00040b00, 0x01c, 0x000fc339, 0x03a, 0x000a57eb, 0x03b, 0x00020000, 0x03c, 0x000ff454, 0x020, 0x0000aa52, 0x021, 0x00054000, 0x040, 0x0000aa52, 0x041, 0x00014000, 0x025, 0x000803be, 0x026, 0x000fc638, 0x027, 0x00077c18, 0x028, 0x000d1c31, 0x029, 0x000d7110, 0x02a, 0x000aeb04, 0x02b, 0x0004128b, 0x02c, 0x00001840, 0x043, 0x0002444f, 0x044, 0x0001adb0, 0x045, 0x00056467, 0x046, 0x0008992c, 0x047, 0x0000452c, 0x048, 0x000c0443, 0x049, 0x00000730, 0x04a, 0x00050f0f, 0x04b, 0x000896ee, 0x04c, 0x0000ddee, 0x018, 0x00007401, 0x000, 0x00070000, 0x012, 0x000dc000, 0x012, 0x00090000, 0x012, 0x00051000, 0x012, 0x00012000, 0x013, 0x000287b7, 0x013, 0x000247ab, 0x013, 0x0002079f, 0x013, 0x0001c793, 0x013, 0x0001839b, 0x013, 0x00014392, 0x013, 0x0001019a, 0x013, 0x0000c191, 0x013, 0x00008194, 0x013, 0x000040a0, 0x013, 0x00000018, 0x015, 0x0000f424, 0x015, 0x0004f424, 0x015, 0x0008f424, 0x016, 0x000e1330, 0x016, 0x000a1330, 0x016, 0x00061330, 0x016, 0x00021330, 0x018, 0x00017524, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bf, 0x013, 0x000247b3, 0x013, 0x000207a7, 0x013, 0x0001c79b, 0x013, 0x0001839f, 0x013, 0x00014393, 0x013, 0x00010399, 0x013, 0x0000c38d, 0x013, 0x00008199, 0x013, 0x0000418d, 0x013, 0x00000099, 0x015, 0x0000f495, 0x015, 0x0004f495, 0x015, 0x0008f495, 0x016, 0x000e1874, 0x016, 0x000a1874, 0x016, 0x00061874, 0x016, 0x00021874, 0x018, 0x00037564, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bf, 0x013, 0x000247b3, 0x013, 0x000207a7, 0x013, 0x0001c79b, 0x013, 0x0001839f, 0x013, 0x00014393, 0x013, 0x00010399, 0x013, 0x0000c38d, 0x013, 0x00008199, 0x013, 0x0000418d, 0x013, 0x00000099, 0x015, 0x0000f495, 0x015, 0x0004f495, 0x015, 0x0008f495, 0x016, 0x000e1874, 0x016, 0x000a1874, 0x016, 0x00061874, 0x016, 0x00021874, 0x018, 0x00057595, 0x000, 0x00070000, 0x012, 0x000cf000, 0x012, 0x000bc000, 0x012, 0x00078000, 0x012, 0x00000000, 0x013, 0x000287bf, 0x013, 0x000247b3, 0x013, 0x000207a7, 0x013, 0x0001c79b, 0x013, 0x0001839f, 0x013, 0x00014393, 0x013, 0x00010399, 0x013, 0x0000c38d, 0x013, 0x00008199, 0x013, 0x0000418d, 0x013, 0x00000099, 0x015, 0x0000f495, 0x015, 0x0004f495, 0x015, 0x0008f495, 0x016, 0x000e1874, 0x016, 0x000a1874, 0x016, 0x00061874, 0x016, 0x00021874, 0x030, 0x0004470f, 0x031, 0x00044ff0, 0x032, 0x00000070, 0x033, 0x000dd480, 0x034, 0x000ffac0, 0x035, 0x000b80c0, 0x036, 0x00077000, 0x037, 0x00064ff2, 0x038, 0x000e7661, 0x039, 0x00000e90, 0x000, 0x00030000, 0x018, 0x0000f401, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x01e, 0x00088009, 0x01f, 0x00080003, 0x0fe, 0x00000000, 0x01e, 0x00088001, 0x01f, 0x00080000, 0x0fe, 0x00000000, 0x018, 0x00087401, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x0fe, 0x00000000, 0x02b, 0x00041289, 0x0fe, 0x00000000, 0x02d, 0x00066666, 0x02e, 0x00064001, 0x02d, 0x00091111, 0x02e, 0x00014002, 0x02d, 0x000bbbbb, 0x02e, 0x000b4003, 0x02d, 0x000e6666, 0x02e, 0x00064004, 0x02d, 0x00088888, 0x02e, 0x00084005, 0x02d, 0x0009dddd, 0x02e, 0x000d4006, 0x02d, 0x000b3333, 0x02e, 0x00034007, 0x02d, 0x00048888, 0x02e, 0x00084408, 0x02d, 0x000bbbbb, 0x02e, 0x000b4409, 0x02d, 0x000e6666, 0x02e, 0x0006440a, 0x02d, 0x00011111, 0x02e, 0x0001480b, 0x02d, 0x0003bbbb, 0x02e, 0x000b480c, 0x02d, 0x00066666, 0x02e, 0x0006480d, 0x02d, 0x000ccccc, 0x02e, 0x000c480e, }; u32 rtl8192de_mac_2tarray[MAC_2T_ARRAYLENGTH] = { 0x420, 0x00000080, 0x423, 0x00000000, 0x430, 0x00000000, 0x431, 0x00000000, 0x432, 0x00000000, 0x433, 0x00000001, 0x434, 0x00000004, 0x435, 0x00000005, 0x436, 0x00000006, 0x437, 0x00000007, 0x438, 0x00000000, 0x439, 0x00000000, 0x43a, 0x00000000, 0x43b, 0x00000001, 0x43c, 0x00000004, 0x43d, 0x00000005, 0x43e, 0x00000006, 0x43f, 0x00000007, 0x440, 0x00000050, 0x441, 0x00000001, 0x442, 0x00000000, 0x444, 0x00000015, 0x445, 0x000000f0, 0x446, 0x0000000f, 0x447, 0x00000000, 0x462, 0x00000008, 0x463, 0x00000003, 0x4c8, 0x000000ff, 0x4c9, 0x00000008, 0x4cc, 0x000000ff, 0x4cd, 0x000000ff, 0x4ce, 0x00000001, 0x500, 0x00000026, 0x501, 0x000000a2, 0x502, 0x0000002f, 0x503, 0x00000000, 0x504, 0x00000028, 0x505, 0x000000a3, 0x506, 0x0000005e, 0x507, 0x00000000, 0x508, 0x0000002b, 0x509, 0x000000a4, 0x50a, 0x0000005e, 0x50b, 0x00000000, 0x50c, 0x0000004f, 0x50d, 0x000000a4, 0x50e, 0x00000000, 0x50f, 0x00000000, 0x512, 0x0000001c, 0x514, 0x0000000a, 0x515, 0x00000010, 0x516, 0x0000000a, 0x517, 0x00000010, 0x51a, 0x00000016, 0x524, 0x0000000f, 0x525, 0x0000004f, 0x546, 0x00000040, 0x547, 0x00000000, 0x550, 0x00000010, 0x551, 0x00000010, 0x559, 0x00000002, 0x55a, 0x00000002, 0x55d, 0x000000ff, 0x605, 0x00000030, 0x608, 0x0000000e, 0x609, 0x0000002a, 0x652, 0x00000020, 0x63c, 0x0000000a, 0x63d, 0x0000000a, 0x63e, 0x0000000e, 0x63f, 0x0000000e, 0x66e, 0x00000005, 0x700, 0x00000021, 0x701, 0x00000043, 0x702, 0x00000065, 0x703, 0x00000087, 0x708, 0x00000021, 0x709, 0x00000043, 0x70a, 0x00000065, 0x70b, 0x00000087, }; u32 rtl8192de_agctab_array[AGCTAB_ARRAYLENGTH] = { 0xc78, 0x7b000001, 0xc78, 0x7b010001, 0xc78, 0x7b020001, 0xc78, 0x7b030001, 0xc78, 0x7b040001, 0xc78, 0x7b050001, 0xc78, 0x7b060001, 0xc78, 0x7a070001, 0xc78, 0x79080001, 0xc78, 0x78090001, 0xc78, 0x770a0001, 0xc78, 0x760b0001, 0xc78, 0x750c0001, 0xc78, 0x740d0001, 0xc78, 0x730e0001, 0xc78, 0x720f0001, 0xc78, 0x71100001, 0xc78, 0x70110001, 0xc78, 0x6f120001, 0xc78, 0x6e130001, 0xc78, 0x6d140001, 0xc78, 0x6c150001, 0xc78, 0x6b160001, 0xc78, 0x6a170001, 0xc78, 0x69180001, 0xc78, 0x68190001, 0xc78, 0x671a0001, 0xc78, 0x661b0001, 0xc78, 0x651c0001, 0xc78, 0x641d0001, 0xc78, 0x631e0001, 0xc78, 0x621f0001, 0xc78, 0x61200001, 0xc78, 0x60210001, 0xc78, 0x49220001, 0xc78, 0x48230001, 0xc78, 0x47240001, 0xc78, 0x46250001, 0xc78, 0x45260001, 0xc78, 0x44270001, 0xc78, 0x43280001, 0xc78, 0x42290001, 0xc78, 0x412a0001, 0xc78, 0x402b0001, 0xc78, 0x262c0001, 0xc78, 0x252d0001, 0xc78, 0x242e0001, 0xc78, 0x232f0001, 0xc78, 0x22300001, 0xc78, 0x21310001, 0xc78, 0x20320001, 0xc78, 0x06330001, 0xc78, 0x05340001, 0xc78, 0x04350001, 0xc78, 0x03360001, 0xc78, 0x02370001, 0xc78, 0x01380001, 0xc78, 0x00390001, 0xc78, 0x003a0001, 0xc78, 0x003b0001, 0xc78, 0x003c0001, 0xc78, 0x003d0001, 0xc78, 0x003e0001, 0xc78, 0x003f0001, 0xc78, 0x7b400001, 0xc78, 0x7b410001, 0xc78, 0x7a420001, 0xc78, 0x79430001, 0xc78, 0x78440001, 0xc78, 0x77450001, 0xc78, 0x76460001, 0xc78, 0x75470001, 0xc78, 0x74480001, 0xc78, 0x73490001, 0xc78, 0x724a0001, 0xc78, 0x714b0001, 0xc78, 0x704c0001, 0xc78, 0x6f4d0001, 0xc78, 0x6e4e0001, 0xc78, 0x6d4f0001, 0xc78, 0x6c500001, 0xc78, 0x6b510001, 0xc78, 0x6a520001, 0xc78, 0x69530001, 0xc78, 0x68540001, 0xc78, 0x67550001, 0xc78, 0x66560001, 0xc78, 0x65570001, 0xc78, 0x64580001, 0xc78, 0x63590001, 0xc78, 0x625a0001, 0xc78, 0x615b0001, 0xc78, 0x605c0001, 0xc78, 0x485d0001, 0xc78, 0x475e0001, 0xc78, 0x465f0001, 0xc78, 0x45600001, 0xc78, 0x44610001, 0xc78, 0x43620001, 0xc78, 0x42630001, 0xc78, 0x41640001, 0xc78, 0x40650001, 0xc78, 0x27660001, 0xc78, 0x26670001, 0xc78, 0x25680001, 0xc78, 0x24690001, 0xc78, 0x236a0001, 0xc78, 0x226b0001, 0xc78, 0x216c0001, 0xc78, 0x206d0001, 0xc78, 0x206e0001, 0xc78, 0x206f0001, 0xc78, 0x20700001, 0xc78, 0x20710001, 0xc78, 0x20720001, 0xc78, 0x20730001, 0xc78, 0x20740001, 0xc78, 0x20750001, 0xc78, 0x20760001, 0xc78, 0x20770001, 0xc78, 0x20780001, 0xc78, 0x20790001, 0xc78, 0x207a0001, 0xc78, 0x207b0001, 0xc78, 0x207c0001, 0xc78, 0x207d0001, 0xc78, 0x207e0001, 0xc78, 0x207f0001, 0xc78, 0x38000002, 0xc78, 0x38010002, 0xc78, 0x38020002, 0xc78, 0x38030002, 0xc78, 0x38040002, 0xc78, 0x38050002, 0xc78, 0x38060002, 0xc78, 0x38070002, 0xc78, 0x38080002, 0xc78, 0x3c090002, 0xc78, 0x3e0a0002, 0xc78, 0x400b0002, 0xc78, 0x440c0002, 0xc78, 0x480d0002, 0xc78, 0x4c0e0002, 0xc78, 0x500f0002, 0xc78, 0x52100002, 0xc78, 0x56110002, 0xc78, 0x5a120002, 0xc78, 0x5e130002, 0xc78, 0x60140002, 0xc78, 0x60150002, 0xc78, 0x60160002, 0xc78, 0x62170002, 0xc78, 0x62180002, 0xc78, 0x62190002, 0xc78, 0x621a0002, 0xc78, 0x621b0002, 0xc78, 0x621c0002, 0xc78, 0x621d0002, 0xc78, 0x621e0002, 0xc78, 0x621f0002, 0xc78, 0x32000044, 0xc78, 0x32010044, 0xc78, 0x32020044, 0xc78, 0x32030044, 0xc78, 0x32040044, 0xc78, 0x32050044, 0xc78, 0x32060044, 0xc78, 0x32070044, 0xc78, 0x32080044, 0xc78, 0x34090044, 0xc78, 0x350a0044, 0xc78, 0x360b0044, 0xc78, 0x370c0044, 0xc78, 0x380d0044, 0xc78, 0x390e0044, 0xc78, 0x3a0f0044, 0xc78, 0x3e100044, 0xc78, 0x42110044, 0xc78, 0x44120044, 0xc78, 0x46130044, 0xc78, 0x4a140044, 0xc78, 0x4e150044, 0xc78, 0x50160044, 0xc78, 0x55170044, 0xc78, 0x5a180044, 0xc78, 0x5e190044, 0xc78, 0x641a0044, 0xc78, 0x6e1b0044, 0xc78, 0x6e1c0044, 0xc78, 0x6e1d0044, 0xc78, 0x6e1e0044, 0xc78, 0x6e1f0044, 0xc78, 0x6e1f0000, }; u32 rtl8192de_agctab_5garray[AGCTAB_5G_ARRAYLENGTH] = { 0xc78, 0x7b000001, 0xc78, 0x7b010001, 0xc78, 0x7a020001, 0xc78, 0x79030001, 0xc78, 0x78040001, 0xc78, 0x77050001, 0xc78, 0x76060001, 0xc78, 0x75070001, 0xc78, 0x74080001, 0xc78, 0x73090001, 0xc78, 0x720a0001, 0xc78, 0x710b0001, 0xc78, 0x700c0001, 0xc78, 0x6f0d0001, 0xc78, 0x6e0e0001, 0xc78, 0x6d0f0001, 0xc78, 0x6c100001, 0xc78, 0x6b110001, 0xc78, 0x6a120001, 0xc78, 0x69130001, 0xc78, 0x68140001, 0xc78, 0x67150001, 0xc78, 0x66160001, 0xc78, 0x65170001, 0xc78, 0x64180001, 0xc78, 0x63190001, 0xc78, 0x621a0001, 0xc78, 0x611b0001, 0xc78, 0x601c0001, 0xc78, 0x481d0001, 0xc78, 0x471e0001, 0xc78, 0x461f0001, 0xc78, 0x45200001, 0xc78, 0x44210001, 0xc78, 0x43220001, 0xc78, 0x42230001, 0xc78, 0x41240001, 0xc78, 0x40250001, 0xc78, 0x27260001, 0xc78, 0x26270001, 0xc78, 0x25280001, 0xc78, 0x24290001, 0xc78, 0x232a0001, 0xc78, 0x222b0001, 0xc78, 0x212c0001, 0xc78, 0x202d0001, 0xc78, 0x202e0001, 0xc78, 0x202f0001, 0xc78, 0x20300001, 0xc78, 0x20310001, 0xc78, 0x20320001, 0xc78, 0x20330001, 0xc78, 0x20340001, 0xc78, 0x20350001, 0xc78, 0x20360001, 0xc78, 0x20370001, 0xc78, 0x20380001, 0xc78, 0x20390001, 0xc78, 0x203a0001, 0xc78, 0x203b0001, 0xc78, 0x203c0001, 0xc78, 0x203d0001, 0xc78, 0x203e0001, 0xc78, 0x203f0001, 0xc78, 0x32000044, 0xc78, 0x32010044, 0xc78, 0x32020044, 0xc78, 0x32030044, 0xc78, 0x32040044, 0xc78, 0x32050044, 0xc78, 0x32060044, 0xc78, 0x32070044, 0xc78, 0x32080044, 0xc78, 0x34090044, 0xc78, 0x350a0044, 0xc78, 0x360b0044, 0xc78, 0x370c0044, 0xc78, 0x380d0044, 0xc78, 0x390e0044, 0xc78, 0x3a0f0044, 0xc78, 0x3e100044, 0xc78, 0x42110044, 0xc78, 0x44120044, 0xc78, 0x46130044, 0xc78, 0x4a140044, 0xc78, 0x4e150044, 0xc78, 0x50160044, 0xc78, 0x55170044, 0xc78, 0x5a180044, 0xc78, 0x5e190044, 0xc78, 0x641a0044, 0xc78, 0x6e1b0044, 0xc78, 0x6e1c0044, 0xc78, 0x6e1d0044, 0xc78, 0x6e1e0044, 0xc78, 0x6e1f0044, 0xc78, 0x6e1f0000, }; u32 rtl8192de_agctab_2garray[AGCTAB_2G_ARRAYLENGTH] = { 0xc78, 0x7b000001, 0xc78, 0x7b010001, 0xc78, 0x7b020001, 0xc78, 0x7b030001, 0xc78, 0x7b040001, 0xc78, 0x7b050001, 0xc78, 0x7b060001, 0xc78, 0x7a070001, 0xc78, 0x79080001, 0xc78, 0x78090001, 0xc78, 0x770a0001, 0xc78, 0x760b0001, 0xc78, 0x750c0001, 0xc78, 0x740d0001, 0xc78, 0x730e0001, 0xc78, 0x720f0001, 0xc78, 0x71100001, 0xc78, 0x70110001, 0xc78, 0x6f120001, 0xc78, 0x6e130001, 0xc78, 0x6d140001, 0xc78, 0x6c150001, 0xc78, 0x6b160001, 0xc78, 0x6a170001, 0xc78, 0x69180001, 0xc78, 0x68190001, 0xc78, 0x671a0001, 0xc78, 0x661b0001, 0xc78, 0x651c0001, 0xc78, 0x641d0001, 0xc78, 0x631e0001, 0xc78, 0x621f0001, 0xc78, 0x61200001, 0xc78, 0x60210001, 0xc78, 0x49220001, 0xc78, 0x48230001, 0xc78, 0x47240001, 0xc78, 0x46250001, 0xc78, 0x45260001, 0xc78, 0x44270001, 0xc78, 0x43280001, 0xc78, 0x42290001, 0xc78, 0x412a0001, 0xc78, 0x402b0001, 0xc78, 0x262c0001, 0xc78, 0x252d0001, 0xc78, 0x242e0001, 0xc78, 0x232f0001, 0xc78, 0x22300001, 0xc78, 0x21310001, 0xc78, 0x20320001, 0xc78, 0x06330001, 0xc78, 0x05340001, 0xc78, 0x04350001, 0xc78, 0x03360001, 0xc78, 0x02370001, 0xc78, 0x01380001, 0xc78, 0x00390001, 0xc78, 0x003a0001, 0xc78, 0x003b0001, 0xc78, 0x003c0001, 0xc78, 0x003d0001, 0xc78, 0x003e0001, 0xc78, 0x003f0001, 0xc78, 0x38000002, 0xc78, 0x38010002, 0xc78, 0x38020002, 0xc78, 0x38030002, 0xc78, 0x38040002, 0xc78, 0x38050002, 0xc78, 0x38060002, 0xc78, 0x38070002, 0xc78, 0x38080002, 0xc78, 0x3c090002, 0xc78, 0x3e0a0002, 0xc78, 0x400b0002, 0xc78, 0x440c0002, 0xc78, 0x480d0002, 0xc78, 0x4c0e0002, 0xc78, 0x500f0002, 0xc78, 0x52100002, 0xc78, 0x56110002, 0xc78, 0x5a120002, 0xc78, 0x5e130002, 0xc78, 0x60140002, 0xc78, 0x60150002, 0xc78, 0x60160002, 0xc78, 0x62170002, 0xc78, 0x62180002, 0xc78, 0x62190002, 0xc78, 0x621a0002, 0xc78, 0x621b0002, 0xc78, 0x621c0002, 0xc78, 0x621d0002, 0xc78, 0x621e0002, 0xc78, 0x621f0002, 0xc78, 0x6e1f0000, };
gpl-2.0
Jackeagle/android_kernel_htc_dlxub1
drivers/video/riva/riva_hw.c
12940
79994
/***************************************************************************\ |* *| |* Copyright 1993-1999 NVIDIA, Corporation. All rights reserved. *| |* *| |* NOTICE TO USER: The source code is copyrighted under U.S. and *| |* international laws. Users and possessors of this source code are *| |* hereby granted a nonexclusive, royalty-free copyright license to *| |* use this code in individual and commercial software. *| |* *| |* Any use of this source code must include, in the user documenta- *| |* tion and internal comments to the code, notices to the end user *| |* as follows: *| |* *| |* Copyright 1993-1999 NVIDIA, Corporation. All rights reserved. *| |* *| |* NVIDIA, CORPORATION MAKES NO REPRESENTATION ABOUT THE SUITABILITY *| |* OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" *| |* WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. NVIDIA, CORPOR- *| |* ATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, *| |* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGE- *| |* MENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL *| |* NVIDIA, CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT, INCI- *| |* DENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RE- *| |* SULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION *| |* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF *| |* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. *| |* *| |* U.S. Government End Users. This source code is a "commercial *| |* item," as that term is defined at 48 C.F.R. 2.101 (OCT 1995), *| |* consisting of "commercial computer software" and "commercial *| |* computer software documentation," as such terms are used in *| |* 48 C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Govern- *| |* ment only as a commercial end item. Consistent with 48 C.F.R. *| |* 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), *| |* all U.S. Government End Users acquire the source code with only *| |* those rights set forth herein. *| |* *| \***************************************************************************/ /* * GPL licensing note -- nVidia is allowing a liberal interpretation of * the documentation restriction above, to merely say that this nVidia's * copyright and disclaimer should be included with all code derived * from this source. -- Jeff Garzik <jgarzik@pobox.com>, 01/Nov/99 */ /* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/riva_hw.c,v 1.33 2002/08/05 20:47:06 mvojkovi Exp $ */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include "riva_hw.h" #include "riva_tbl.h" #include "nv_type.h" /* * This file is an OS-agnostic file used to make RIVA 128 and RIVA TNT * operate identically (except TNT has more memory and better 3D quality. */ static int nv3Busy ( RIVA_HW_INST *chip ) { return ((NV_RD32(&chip->Rop->FifoFree, 0) < chip->FifoEmptyCount) || NV_RD32(&chip->PGRAPH[0x000006B0/4], 0) & 0x01); } static int nv4Busy ( RIVA_HW_INST *chip ) { return ((NV_RD32(&chip->Rop->FifoFree, 0) < chip->FifoEmptyCount) || NV_RD32(&chip->PGRAPH[0x00000700/4], 0) & 0x01); } static int nv10Busy ( RIVA_HW_INST *chip ) { return ((NV_RD32(&chip->Rop->FifoFree, 0) < chip->FifoEmptyCount) || NV_RD32(&chip->PGRAPH[0x00000700/4], 0) & 0x01); } static void vgaLockUnlock ( RIVA_HW_INST *chip, int Lock ) { U008 cr11; VGA_WR08(chip->PCIO, 0x3D4, 0x11); cr11 = VGA_RD08(chip->PCIO, 0x3D5); if(Lock) cr11 |= 0x80; else cr11 &= ~0x80; VGA_WR08(chip->PCIO, 0x3D5, cr11); } static void nv3LockUnlock ( RIVA_HW_INST *chip, int Lock ) { VGA_WR08(chip->PVIO, 0x3C4, 0x06); VGA_WR08(chip->PVIO, 0x3C5, Lock ? 0x99 : 0x57); vgaLockUnlock(chip, Lock); } static void nv4LockUnlock ( RIVA_HW_INST *chip, int Lock ) { VGA_WR08(chip->PCIO, 0x3D4, 0x1F); VGA_WR08(chip->PCIO, 0x3D5, Lock ? 0x99 : 0x57); vgaLockUnlock(chip, Lock); } static int ShowHideCursor ( RIVA_HW_INST *chip, int ShowHide ) { int cursor; cursor = chip->CurrentState->cursor1; chip->CurrentState->cursor1 = (chip->CurrentState->cursor1 & 0xFE) | (ShowHide & 0x01); VGA_WR08(chip->PCIO, 0x3D4, 0x31); VGA_WR08(chip->PCIO, 0x3D5, chip->CurrentState->cursor1); return (cursor & 0x01); } /****************************************************************************\ * * * The video arbitration routines calculate some "magic" numbers. Fixes * * the snow seen when accessing the framebuffer without it. * * It just works (I hope). * * * \****************************************************************************/ #define DEFAULT_GR_LWM 100 #define DEFAULT_VID_LWM 100 #define DEFAULT_GR_BURST_SIZE 256 #define DEFAULT_VID_BURST_SIZE 128 #define VIDEO 0 #define GRAPHICS 1 #define MPORT 2 #define ENGINE 3 #define GFIFO_SIZE 320 #define GFIFO_SIZE_128 256 #define MFIFO_SIZE 120 #define VFIFO_SIZE 256 typedef struct { int gdrain_rate; int vdrain_rate; int mdrain_rate; int gburst_size; int vburst_size; char vid_en; char gr_en; int wcmocc, wcgocc, wcvocc, wcvlwm, wcglwm; int by_gfacc; char vid_only_once; char gr_only_once; char first_vacc; char first_gacc; char first_macc; int vocc; int gocc; int mocc; char cur; char engine_en; char converged; int priority; } nv3_arb_info; typedef struct { int graphics_lwm; int video_lwm; int graphics_burst_size; int video_burst_size; int graphics_hi_priority; int media_hi_priority; int rtl_values; int valid; } nv3_fifo_info; typedef struct { char pix_bpp; char enable_video; char gr_during_vid; char enable_mp; int memory_width; int video_scale; int pclk_khz; int mclk_khz; int mem_page_miss; int mem_latency; char mem_aligned; } nv3_sim_state; typedef struct { int graphics_lwm; int video_lwm; int graphics_burst_size; int video_burst_size; int valid; } nv4_fifo_info; typedef struct { int pclk_khz; int mclk_khz; int nvclk_khz; char mem_page_miss; char mem_latency; int memory_width; char enable_video; char gr_during_vid; char pix_bpp; char mem_aligned; char enable_mp; } nv4_sim_state; typedef struct { int graphics_lwm; int video_lwm; int graphics_burst_size; int video_burst_size; int valid; } nv10_fifo_info; typedef struct { int pclk_khz; int mclk_khz; int nvclk_khz; char mem_page_miss; char mem_latency; u32 memory_type; int memory_width; char enable_video; char gr_during_vid; char pix_bpp; char mem_aligned; char enable_mp; } nv10_sim_state; static int nv3_iterate(nv3_fifo_info *res_info, nv3_sim_state * state, nv3_arb_info *ainfo) { int iter = 0; int tmp; int vfsize, mfsize, gfsize; int mburst_size = 32; int mmisses, gmisses, vmisses; int misses; int vlwm, glwm, mlwm; int last, next, cur; int max_gfsize ; long ns; vlwm = 0; glwm = 0; mlwm = 0; vfsize = 0; gfsize = 0; cur = ainfo->cur; mmisses = 2; gmisses = 2; vmisses = 2; if (ainfo->gburst_size == 128) max_gfsize = GFIFO_SIZE_128; else max_gfsize = GFIFO_SIZE; max_gfsize = GFIFO_SIZE; while (1) { if (ainfo->vid_en) { if (ainfo->wcvocc > ainfo->vocc) ainfo->wcvocc = ainfo->vocc; if (ainfo->wcvlwm > vlwm) ainfo->wcvlwm = vlwm ; ns = 1000000 * ainfo->vburst_size/(state->memory_width/8)/state->mclk_khz; vfsize = ns * ainfo->vdrain_rate / 1000000; vfsize = ainfo->wcvlwm - ainfo->vburst_size + vfsize; } if (state->enable_mp) { if (ainfo->wcmocc > ainfo->mocc) ainfo->wcmocc = ainfo->mocc; } if (ainfo->gr_en) { if (ainfo->wcglwm > glwm) ainfo->wcglwm = glwm ; if (ainfo->wcgocc > ainfo->gocc) ainfo->wcgocc = ainfo->gocc; ns = 1000000 * (ainfo->gburst_size/(state->memory_width/8))/state->mclk_khz; gfsize = (ns * (long) ainfo->gdrain_rate)/1000000; gfsize = ainfo->wcglwm - ainfo->gburst_size + gfsize; } mfsize = 0; if (!state->gr_during_vid && ainfo->vid_en) if (ainfo->vid_en && (ainfo->vocc < 0) && !ainfo->vid_only_once) next = VIDEO; else if (ainfo->mocc < 0) next = MPORT; else if (ainfo->gocc< ainfo->by_gfacc) next = GRAPHICS; else return (0); else switch (ainfo->priority) { case VIDEO: if (ainfo->vid_en && ainfo->vocc<0 && !ainfo->vid_only_once) next = VIDEO; else if (ainfo->gr_en && ainfo->gocc<0 && !ainfo->gr_only_once) next = GRAPHICS; else if (ainfo->mocc<0) next = MPORT; else return (0); break; case GRAPHICS: if (ainfo->gr_en && ainfo->gocc<0 && !ainfo->gr_only_once) next = GRAPHICS; else if (ainfo->vid_en && ainfo->vocc<0 && !ainfo->vid_only_once) next = VIDEO; else if (ainfo->mocc<0) next = MPORT; else return (0); break; default: if (ainfo->mocc<0) next = MPORT; else if (ainfo->gr_en && ainfo->gocc<0 && !ainfo->gr_only_once) next = GRAPHICS; else if (ainfo->vid_en && ainfo->vocc<0 && !ainfo->vid_only_once) next = VIDEO; else return (0); break; } last = cur; cur = next; iter++; switch (cur) { case VIDEO: if (last==cur) misses = 0; else if (ainfo->first_vacc) misses = vmisses; else misses = 1; ainfo->first_vacc = 0; if (last!=cur) { ns = 1000000 * (vmisses*state->mem_page_miss + state->mem_latency)/state->mclk_khz; vlwm = ns * ainfo->vdrain_rate/ 1000000; vlwm = ainfo->vocc - vlwm; } ns = 1000000*(misses*state->mem_page_miss + ainfo->vburst_size)/(state->memory_width/8)/state->mclk_khz; ainfo->vocc = ainfo->vocc + ainfo->vburst_size - ns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gocc - ns*ainfo->gdrain_rate/1000000; ainfo->mocc = ainfo->mocc - ns*ainfo->mdrain_rate/1000000; break; case GRAPHICS: if (last==cur) misses = 0; else if (ainfo->first_gacc) misses = gmisses; else misses = 1; ainfo->first_gacc = 0; if (last!=cur) { ns = 1000000*(gmisses*state->mem_page_miss + state->mem_latency)/state->mclk_khz ; glwm = ns * ainfo->gdrain_rate/1000000; glwm = ainfo->gocc - glwm; } ns = 1000000*(misses*state->mem_page_miss + ainfo->gburst_size/(state->memory_width/8))/state->mclk_khz; ainfo->vocc = ainfo->vocc + 0 - ns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gocc + ainfo->gburst_size - ns*ainfo->gdrain_rate/1000000; ainfo->mocc = ainfo->mocc + 0 - ns*ainfo->mdrain_rate/1000000; break; default: if (last==cur) misses = 0; else if (ainfo->first_macc) misses = mmisses; else misses = 1; ainfo->first_macc = 0; ns = 1000000*(misses*state->mem_page_miss + mburst_size/(state->memory_width/8))/state->mclk_khz; ainfo->vocc = ainfo->vocc + 0 - ns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gocc + 0 - ns*ainfo->gdrain_rate/1000000; ainfo->mocc = ainfo->mocc + mburst_size - ns*ainfo->mdrain_rate/1000000; break; } if (iter>100) { ainfo->converged = 0; return (1); } ns = 1000000*ainfo->gburst_size/(state->memory_width/8)/state->mclk_khz; tmp = ns * ainfo->gdrain_rate/1000000; if (abs(ainfo->gburst_size) + ((abs(ainfo->wcglwm) + 16 ) & ~0x7) - tmp > max_gfsize) { ainfo->converged = 0; return (1); } ns = 1000000*ainfo->vburst_size/(state->memory_width/8)/state->mclk_khz; tmp = ns * ainfo->vdrain_rate/1000000; if (abs(ainfo->vburst_size) + (abs(ainfo->wcvlwm + 32) & ~0xf) - tmp> VFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(ainfo->gocc) > max_gfsize) { ainfo->converged = 0; return (1); } if (abs(ainfo->vocc) > VFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(ainfo->mocc) > MFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(vfsize) > VFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(gfsize) > max_gfsize) { ainfo->converged = 0; return (1); } if (abs(mfsize) > MFIFO_SIZE) { ainfo->converged = 0; return (1); } } } static char nv3_arb(nv3_fifo_info * res_info, nv3_sim_state * state, nv3_arb_info *ainfo) { long ens, vns, mns, gns; int mmisses, gmisses, vmisses, eburst_size, mburst_size; int refresh_cycle; refresh_cycle = 0; refresh_cycle = 2*(state->mclk_khz/state->pclk_khz) + 5; mmisses = 2; if (state->mem_aligned) gmisses = 2; else gmisses = 3; vmisses = 2; eburst_size = state->memory_width * 1; mburst_size = 32; gns = 1000000 * (gmisses*state->mem_page_miss + state->mem_latency)/state->mclk_khz; ainfo->by_gfacc = gns*ainfo->gdrain_rate/1000000; ainfo->wcmocc = 0; ainfo->wcgocc = 0; ainfo->wcvocc = 0; ainfo->wcvlwm = 0; ainfo->wcglwm = 0; ainfo->engine_en = 1; ainfo->converged = 1; if (ainfo->engine_en) { ens = 1000000*(state->mem_page_miss + eburst_size/(state->memory_width/8) +refresh_cycle)/state->mclk_khz; ainfo->mocc = state->enable_mp ? 0-ens*ainfo->mdrain_rate/1000000 : 0; ainfo->vocc = ainfo->vid_en ? 0-ens*ainfo->vdrain_rate/1000000 : 0; ainfo->gocc = ainfo->gr_en ? 0-ens*ainfo->gdrain_rate/1000000 : 0; ainfo->cur = ENGINE; ainfo->first_vacc = 1; ainfo->first_gacc = 1; ainfo->first_macc = 1; nv3_iterate(res_info, state,ainfo); } if (state->enable_mp) { mns = 1000000 * (mmisses*state->mem_page_miss + mburst_size/(state->memory_width/8) + refresh_cycle)/state->mclk_khz; ainfo->mocc = state->enable_mp ? 0 : mburst_size - mns*ainfo->mdrain_rate/1000000; ainfo->vocc = ainfo->vid_en ? 0 : 0- mns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gr_en ? 0: 0- mns*ainfo->gdrain_rate/1000000; ainfo->cur = MPORT; ainfo->first_vacc = 1; ainfo->first_gacc = 1; ainfo->first_macc = 0; nv3_iterate(res_info, state,ainfo); } if (ainfo->gr_en) { ainfo->first_vacc = 1; ainfo->first_gacc = 0; ainfo->first_macc = 1; gns = 1000000*(gmisses*state->mem_page_miss + ainfo->gburst_size/(state->memory_width/8) + refresh_cycle)/state->mclk_khz; ainfo->gocc = ainfo->gburst_size - gns*ainfo->gdrain_rate/1000000; ainfo->vocc = ainfo->vid_en? 0-gns*ainfo->vdrain_rate/1000000 : 0; ainfo->mocc = state->enable_mp ? 0-gns*ainfo->mdrain_rate/1000000: 0; ainfo->cur = GRAPHICS; nv3_iterate(res_info, state,ainfo); } if (ainfo->vid_en) { ainfo->first_vacc = 0; ainfo->first_gacc = 1; ainfo->first_macc = 1; vns = 1000000*(vmisses*state->mem_page_miss + ainfo->vburst_size/(state->memory_width/8) + refresh_cycle)/state->mclk_khz; ainfo->vocc = ainfo->vburst_size - vns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gr_en? (0-vns*ainfo->gdrain_rate/1000000) : 0; ainfo->mocc = state->enable_mp? 0-vns*ainfo->mdrain_rate/1000000 :0 ; ainfo->cur = VIDEO; nv3_iterate(res_info, state, ainfo); } if (ainfo->converged) { res_info->graphics_lwm = (int)abs(ainfo->wcglwm) + 16; res_info->video_lwm = (int)abs(ainfo->wcvlwm) + 32; res_info->graphics_burst_size = ainfo->gburst_size; res_info->video_burst_size = ainfo->vburst_size; res_info->graphics_hi_priority = (ainfo->priority == GRAPHICS); res_info->media_hi_priority = (ainfo->priority == MPORT); if (res_info->video_lwm > 160) { res_info->graphics_lwm = 256; res_info->video_lwm = 128; res_info->graphics_burst_size = 64; res_info->video_burst_size = 64; res_info->graphics_hi_priority = 0; res_info->media_hi_priority = 0; ainfo->converged = 0; return (0); } if (res_info->video_lwm > 128) { res_info->video_lwm = 128; } return (1); } else { res_info->graphics_lwm = 256; res_info->video_lwm = 128; res_info->graphics_burst_size = 64; res_info->video_burst_size = 64; res_info->graphics_hi_priority = 0; res_info->media_hi_priority = 0; return (0); } } static char nv3_get_param(nv3_fifo_info *res_info, nv3_sim_state * state, nv3_arb_info *ainfo) { int done, g,v, p; done = 0; for (p=0; p < 2; p++) { for (g=128 ; g > 32; g= g>> 1) { for (v=128; v >=32; v = v>> 1) { ainfo->priority = p; ainfo->gburst_size = g; ainfo->vburst_size = v; done = nv3_arb(res_info, state,ainfo); if (done && (g==128)) if ((res_info->graphics_lwm + g) > 256) done = 0; if (done) goto Done; } } } Done: return done; } static void nv3CalcArbitration ( nv3_fifo_info * res_info, nv3_sim_state * state ) { nv3_fifo_info save_info; nv3_arb_info ainfo; char res_gr, res_vid; ainfo.gr_en = 1; ainfo.vid_en = state->enable_video; ainfo.vid_only_once = 0; ainfo.gr_only_once = 0; ainfo.gdrain_rate = (int) state->pclk_khz * (state->pix_bpp/8); ainfo.vdrain_rate = (int) state->pclk_khz * 2; if (state->video_scale != 0) ainfo.vdrain_rate = ainfo.vdrain_rate/state->video_scale; ainfo.mdrain_rate = 33000; res_info->rtl_values = 0; if (!state->gr_during_vid && state->enable_video) { ainfo.gr_only_once = 1; ainfo.gr_en = 1; ainfo.gdrain_rate = 0; res_vid = nv3_get_param(res_info, state, &ainfo); res_vid = ainfo.converged; save_info.video_lwm = res_info->video_lwm; save_info.video_burst_size = res_info->video_burst_size; ainfo.vid_en = 1; ainfo.vid_only_once = 1; ainfo.gr_en = 1; ainfo.gdrain_rate = (int) state->pclk_khz * (state->pix_bpp/8); ainfo.vdrain_rate = 0; res_gr = nv3_get_param(res_info, state, &ainfo); res_gr = ainfo.converged; res_info->video_lwm = save_info.video_lwm; res_info->video_burst_size = save_info.video_burst_size; res_info->valid = res_gr & res_vid; } else { if (!ainfo.gr_en) ainfo.gdrain_rate = 0; if (!ainfo.vid_en) ainfo.vdrain_rate = 0; res_gr = nv3_get_param(res_info, state, &ainfo); res_info->valid = ainfo.converged; } } static void nv3UpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv3_fifo_info fifo_data; nv3_sim_state sim_data; unsigned int M, N, P, pll, MClk; pll = NV_RD32(&chip->PRAMDAC0[0x00000504/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; MClk = (N * chip->CrystalFreqKHz / M) >> P; sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; sim_data.video_scale = 1; sim_data.memory_width = (NV_RD32(&chip->PEXTDEV[0x00000000/4], 0) & 0x10) ? 128 : 64; sim_data.memory_width = 128; sim_data.mem_latency = 9; sim_data.mem_aligned = 1; sim_data.mem_page_miss = 11; sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; nv3CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } else { *lwm = 0x24; *burst = 0x2; } } static void nv4CalcArbitration ( nv4_fifo_info *fifo, nv4_sim_state *arb ) { int data, pagemiss, cas,width, video_enable, color_key_enable, bpp, align; int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs; int found, mclk_extra, mclk_loop, cbs, m1, p1; int mclk_freq, pclk_freq, nvclk_freq, mp_enable; int us_m, us_n, us_p, video_drain_rate, crtc_drain_rate; int vpm_us, us_video, vlwm, video_fill_us, cpm_us, us_crt,clwm; int craw, vraw; fifo->valid = 1; pclk_freq = arb->pclk_khz; mclk_freq = arb->mclk_khz; nvclk_freq = arb->nvclk_khz; pagemiss = arb->mem_page_miss; cas = arb->mem_latency; width = arb->memory_width >> 6; video_enable = arb->enable_video; color_key_enable = arb->gr_during_vid; bpp = arb->pix_bpp; align = arb->mem_aligned; mp_enable = arb->enable_mp; clwm = 0; vlwm = 0; cbs = 128; pclks = 2; nvclks = 2; nvclks += 2; nvclks += 1; mclks = 5; mclks += 3; mclks += 1; mclks += cas; mclks += 1; mclks += 1; mclks += 1; mclks += 1; mclk_extra = 3; nvclks += 2; nvclks += 1; nvclks += 1; nvclks += 1; if (mp_enable) mclks+=4; nvclks += 0; pclks += 0; found = 0; vbs = 0; while (found != 1) { fifo->valid = 1; found = 1; mclk_loop = mclks+mclk_extra; us_m = mclk_loop *1000*1000 / mclk_freq; us_n = nvclks*1000*1000 / nvclk_freq; us_p = nvclks*1000*1000 / pclk_freq; if (video_enable) { video_drain_rate = pclk_freq * 2; crtc_drain_rate = pclk_freq * bpp/8; vpagemiss = 2; vpagemiss += 1; crtpagemiss = 2; vpm_us = (vpagemiss * pagemiss)*1000*1000/mclk_freq; if (nvclk_freq * 2 > mclk_freq * width) video_fill_us = cbs*1000*1000 / 16 / nvclk_freq ; else video_fill_us = cbs*1000*1000 / (8 * width) / mclk_freq; us_video = vpm_us + us_m + us_n + us_p + video_fill_us; vlwm = us_video * video_drain_rate/(1000*1000); vlwm++; vbs = 128; if (vlwm > 128) vbs = 64; if (vlwm > (256-64)) vbs = 32; if (nvclk_freq * 2 > mclk_freq * width) video_fill_us = vbs *1000*1000/ 16 / nvclk_freq ; else video_fill_us = vbs*1000*1000 / (8 * width) / mclk_freq; cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = us_video +video_fill_us +cpm_us +us_m + us_n +us_p ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; } else { crtc_drain_rate = pclk_freq * bpp/8; crtpagemiss = 2; crtpagemiss += 1; cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = cpm_us + us_m + us_n + us_p ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; } m1 = clwm + cbs - 512; p1 = m1 * pclk_freq / mclk_freq; p1 = p1 * bpp / 8; if ((p1 < m1) && (m1 > 0)) { fifo->valid = 0; found = 0; if (mclk_extra ==0) found = 1; mclk_extra--; } else if (video_enable) { if ((clwm > 511) || (vlwm > 255)) { fifo->valid = 0; found = 0; if (mclk_extra ==0) found = 1; mclk_extra--; } } else { if (clwm > 519) { fifo->valid = 0; found = 0; if (mclk_extra ==0) found = 1; mclk_extra--; } } craw = clwm; vraw = vlwm; if (clwm < 384) clwm = 384; if (vlwm < 128) vlwm = 128; data = (int)(clwm); fifo->graphics_lwm = data; fifo->graphics_burst_size = 128; data = (int)((vlwm+15)); fifo->video_lwm = data; fifo->video_burst_size = vbs; } } static void nv4UpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv4_fifo_info fifo_data; nv4_sim_state sim_data; unsigned int M, N, P, pll, MClk, NVClk, cfg1; pll = NV_RD32(&chip->PRAMDAC0[0x00000504/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; MClk = (N * chip->CrystalFreqKHz / M) >> P; pll = NV_RD32(&chip->PRAMDAC0[0x00000500/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; NVClk = (N * chip->CrystalFreqKHz / M) >> P; cfg1 = NV_RD32(&chip->PFB[0x00000204/4], 0); sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; sim_data.memory_width = (NV_RD32(&chip->PEXTDEV[0x00000000/4], 0) & 0x10) ? 128 : 64; sim_data.mem_latency = (char)cfg1 & 0x0F; sim_data.mem_aligned = 1; sim_data.mem_page_miss = (char)(((cfg1 >> 4) &0x0F) + ((cfg1 >> 31) & 0x01)); sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; sim_data.nvclk_khz = NVClk; nv4CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } } static void nv10CalcArbitration ( nv10_fifo_info *fifo, nv10_sim_state *arb ) { int data, pagemiss, cas,width, video_enable, color_key_enable, bpp, align; int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs; int nvclk_fill, us_extra; int found, mclk_extra, mclk_loop, cbs, m1; int mclk_freq, pclk_freq, nvclk_freq, mp_enable; int us_m, us_m_min, us_n, us_p, video_drain_rate, crtc_drain_rate; int vus_m, vus_n, vus_p; int vpm_us, us_video, vlwm, cpm_us, us_crt,clwm; int clwm_rnd_down; int craw, m2us, us_pipe, us_pipe_min, vus_pipe, p1clk, p2; int pclks_2_top_fifo, min_mclk_extra; int us_min_mclk_extra; fifo->valid = 1; pclk_freq = arb->pclk_khz; /* freq in KHz */ mclk_freq = arb->mclk_khz; nvclk_freq = arb->nvclk_khz; pagemiss = arb->mem_page_miss; cas = arb->mem_latency; width = arb->memory_width/64; video_enable = arb->enable_video; color_key_enable = arb->gr_during_vid; bpp = arb->pix_bpp; align = arb->mem_aligned; mp_enable = arb->enable_mp; clwm = 0; vlwm = 1024; cbs = 512; vbs = 512; pclks = 4; /* lwm detect. */ nvclks = 3; /* lwm -> sync. */ nvclks += 2; /* fbi bus cycles (1 req + 1 busy) */ mclks = 1; /* 2 edge sync. may be very close to edge so just put one. */ mclks += 1; /* arb_hp_req */ mclks += 5; /* ap_hp_req tiling pipeline */ mclks += 2; /* tc_req latency fifo */ mclks += 2; /* fb_cas_n_ memory request to fbio block */ mclks += 7; /* sm_d_rdv data returned from fbio block */ /* fb.rd.d.Put_gc need to accumulate 256 bits for read */ if (arb->memory_type == 0) if (arb->memory_width == 64) /* 64 bit bus */ mclks += 4; else mclks += 2; else if (arb->memory_width == 64) /* 64 bit bus */ mclks += 2; else mclks += 1; if ((!video_enable) && (arb->memory_width == 128)) { mclk_extra = (bpp == 32) ? 31 : 42; /* Margin of error */ min_mclk_extra = 17; } else { mclk_extra = (bpp == 32) ? 8 : 4; /* Margin of error */ /* mclk_extra = 4; */ /* Margin of error */ min_mclk_extra = 18; } nvclks += 1; /* 2 edge sync. may be very close to edge so just put one. */ nvclks += 1; /* fbi_d_rdv_n */ nvclks += 1; /* Fbi_d_rdata */ nvclks += 1; /* crtfifo load */ if(mp_enable) mclks+=4; /* Mp can get in with a burst of 8. */ /* Extra clocks determined by heuristics */ nvclks += 0; pclks += 0; found = 0; while(found != 1) { fifo->valid = 1; found = 1; mclk_loop = mclks+mclk_extra; us_m = mclk_loop *1000*1000 / mclk_freq; /* Mclk latency in us */ us_m_min = mclks * 1000*1000 / mclk_freq; /* Minimum Mclk latency in us */ us_min_mclk_extra = min_mclk_extra *1000*1000 / mclk_freq; us_n = nvclks*1000*1000 / nvclk_freq;/* nvclk latency in us */ us_p = pclks*1000*1000 / pclk_freq;/* nvclk latency in us */ us_pipe = us_m + us_n + us_p; us_pipe_min = us_m_min + us_n + us_p; us_extra = 0; vus_m = mclk_loop *1000*1000 / mclk_freq; /* Mclk latency in us */ vus_n = (4)*1000*1000 / nvclk_freq;/* nvclk latency in us */ vus_p = 0*1000*1000 / pclk_freq;/* pclk latency in us */ vus_pipe = vus_m + vus_n + vus_p; if(video_enable) { video_drain_rate = pclk_freq * 4; /* MB/s */ crtc_drain_rate = pclk_freq * bpp/8; /* MB/s */ vpagemiss = 1; /* self generating page miss */ vpagemiss += 1; /* One higher priority before */ crtpagemiss = 2; /* self generating page miss */ if(mp_enable) crtpagemiss += 1; /* if MA0 conflict */ vpm_us = (vpagemiss * pagemiss)*1000*1000/mclk_freq; us_video = vpm_us + vus_m; /* Video has separate read return path */ cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = us_video /* Wait for video */ +cpm_us /* CRT Page miss */ +us_m + us_n +us_p /* other latency */ ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; /* fixed point <= float_point - 1. Fixes that */ } else { crtc_drain_rate = pclk_freq * bpp/8; /* bpp * pclk/8 */ crtpagemiss = 1; /* self generating page miss */ crtpagemiss += 1; /* MA0 page miss */ if(mp_enable) crtpagemiss += 1; /* if MA0 conflict */ cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = cpm_us + us_m + us_n + us_p ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; /* fixed point <= float_point - 1. Fixes that */ /* // // Another concern, only for high pclks so don't do this // with video: // What happens if the latency to fetch the cbs is so large that // fifo empties. In that case we need to have an alternate clwm value // based off the total burst fetch // us_crt = (cbs * 1000 * 1000)/ (8*width)/mclk_freq ; us_crt = us_crt + us_m + us_n + us_p + (4 * 1000 * 1000)/mclk_freq; clwm_mt = us_crt * crtc_drain_rate/(1000*1000); clwm_mt ++; if(clwm_mt > clwm) clwm = clwm_mt; */ /* Finally, a heuristic check when width == 64 bits */ if(width == 1){ nvclk_fill = nvclk_freq * 8; if(crtc_drain_rate * 100 >= nvclk_fill * 102) clwm = 0xfff; /*Large number to fail */ else if(crtc_drain_rate * 100 >= nvclk_fill * 98) { clwm = 1024; cbs = 512; us_extra = (cbs * 1000 * 1000)/ (8*width)/mclk_freq ; } } } /* Overfill check: */ clwm_rnd_down = ((int)clwm/8)*8; if (clwm_rnd_down < clwm) clwm += 8; m1 = clwm + cbs - 1024; /* Amount of overfill */ m2us = us_pipe_min + us_min_mclk_extra; pclks_2_top_fifo = (1024-clwm)/(8*width); /* pclk cycles to drain */ p1clk = m2us * pclk_freq/(1000*1000); p2 = p1clk * bpp / 8; /* bytes drained. */ if((p2 < m1) && (m1 > 0)) { fifo->valid = 0; found = 0; if(min_mclk_extra == 0) { if(cbs <= 32) { found = 1; /* Can't adjust anymore! */ } else { cbs = cbs/2; /* reduce the burst size */ } } else { min_mclk_extra--; } } else { if (clwm > 1023){ /* Have some margin */ fifo->valid = 0; found = 0; if(min_mclk_extra == 0) found = 1; /* Can't adjust anymore! */ else min_mclk_extra--; } } craw = clwm; if(clwm < (1024-cbs+8)) clwm = 1024-cbs+8; data = (int)(clwm); /* printf("CRT LWM: %f bytes, prog: 0x%x, bs: 256\n", clwm, data ); */ fifo->graphics_lwm = data; fifo->graphics_burst_size = cbs; /* printf("VID LWM: %f bytes, prog: 0x%x, bs: %d\n, ", vlwm, data, vbs ); */ fifo->video_lwm = 1024; fifo->video_burst_size = 512; } } static void nv10UpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv10_fifo_info fifo_data; nv10_sim_state sim_data; unsigned int M, N, P, pll, MClk, NVClk, cfg1; pll = NV_RD32(&chip->PRAMDAC0[0x00000504/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; MClk = (N * chip->CrystalFreqKHz / M) >> P; pll = NV_RD32(&chip->PRAMDAC0[0x00000500/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; NVClk = (N * chip->CrystalFreqKHz / M) >> P; cfg1 = NV_RD32(&chip->PFB[0x00000204/4], 0); sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; sim_data.memory_type = (NV_RD32(&chip->PFB[0x00000200/4], 0) & 0x01) ? 1 : 0; sim_data.memory_width = (NV_RD32(&chip->PEXTDEV[0x00000000/4], 0) & 0x10) ? 128 : 64; sim_data.mem_latency = (char)cfg1 & 0x0F; sim_data.mem_aligned = 1; sim_data.mem_page_miss = (char)(((cfg1 >> 4) &0x0F) + ((cfg1 >> 31) & 0x01)); sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; sim_data.nvclk_khz = NVClk; nv10CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } } static void nForceUpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv10_fifo_info fifo_data; nv10_sim_state sim_data; unsigned int M, N, P, pll, MClk, NVClk; unsigned int uMClkPostDiv; struct pci_dev *dev; dev = pci_get_bus_and_slot(0, 3); pci_read_config_dword(dev, 0x6C, &uMClkPostDiv); pci_dev_put(dev); uMClkPostDiv = (uMClkPostDiv >> 8) & 0xf; if(!uMClkPostDiv) uMClkPostDiv = 4; MClk = 400000 / uMClkPostDiv; pll = NV_RD32(&chip->PRAMDAC0[0x00000500/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; NVClk = (N * chip->CrystalFreqKHz / M) >> P; sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; dev = pci_get_bus_and_slot(0, 1); pci_read_config_dword(dev, 0x7C, &sim_data.memory_type); pci_dev_put(dev); sim_data.memory_type = (sim_data.memory_type >> 12) & 1; sim_data.memory_width = 64; sim_data.mem_latency = 3; sim_data.mem_aligned = 1; sim_data.mem_page_miss = 10; sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; sim_data.nvclk_khz = NVClk; nv10CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } } /****************************************************************************\ * * * RIVA Mode State Routines * * * \****************************************************************************/ /* * Calculate the Video Clock parameters for the PLL. */ static int CalcVClock ( int clockIn, int *clockOut, int *mOut, int *nOut, int *pOut, RIVA_HW_INST *chip ) { unsigned lowM, highM, highP; unsigned DeltaNew, DeltaOld; unsigned VClk, Freq; unsigned M, N, P; DeltaOld = 0xFFFFFFFF; VClk = (unsigned)clockIn; if (chip->CrystalFreqKHz == 13500) { lowM = 7; highM = 13 - (chip->Architecture == NV_ARCH_03); } else { lowM = 8; highM = 14 - (chip->Architecture == NV_ARCH_03); } highP = 4 - (chip->Architecture == NV_ARCH_03); for (P = 0; P <= highP; P ++) { Freq = VClk << P; if ((Freq >= 128000) && (Freq <= chip->MaxVClockFreqKHz)) { for (M = lowM; M <= highM; M++) { N = (VClk << P) * M / chip->CrystalFreqKHz; if(N <= 255) { Freq = (chip->CrystalFreqKHz * N / M) >> P; if (Freq > VClk) DeltaNew = Freq - VClk; else DeltaNew = VClk - Freq; if (DeltaNew < DeltaOld) { *mOut = M; *nOut = N; *pOut = P; *clockOut = Freq; DeltaOld = DeltaNew; } } } } } /* non-zero: M/N/P/clock values assigned. zero: error (not set) */ return (DeltaOld != 0xFFFFFFFF); } /* * Calculate extended mode parameters (SVGA) and save in a * mode state structure. */ int CalcStateExt ( RIVA_HW_INST *chip, RIVA_HW_STATE *state, int bpp, int width, int hDisplaySize, int height, int dotClock ) { int pixelDepth; int uninitialized_var(VClk),uninitialized_var(m), uninitialized_var(n), uninitialized_var(p); /* * Save mode parameters. */ state->bpp = bpp; /* this is not bitsPerPixel, it's 8,15,16,32 */ state->width = width; state->height = height; /* * Extended RIVA registers. */ pixelDepth = (bpp + 1)/8; if (!CalcVClock(dotClock, &VClk, &m, &n, &p, chip)) return -EINVAL; switch (chip->Architecture) { case NV_ARCH_03: nv3UpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); state->cursor0 = 0x00; state->cursor1 = 0x78; state->cursor2 = 0x00000000; state->pllsel = 0x10010100; state->config = ((width + 31)/32) | (((pixelDepth > 2) ? 3 : pixelDepth) << 8) | 0x1000; state->general = 0x00100100; state->repaint1 = hDisplaySize < 1280 ? 0x06 : 0x02; break; case NV_ARCH_04: nv4UpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); state->cursor0 = 0x00; state->cursor1 = 0xFC; state->cursor2 = 0x00000000; state->pllsel = 0x10000700; state->config = 0x00001114; state->general = bpp == 16 ? 0x00101100 : 0x00100100; state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00; break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: if((chip->Chipset == NV_CHIP_IGEFORCE2) || (chip->Chipset == NV_CHIP_0x01F0)) { nForceUpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); } else { nv10UpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); } state->cursor0 = 0x80 | (chip->CursorStart >> 17); state->cursor1 = (chip->CursorStart >> 11) << 2; state->cursor2 = chip->CursorStart >> 24; state->pllsel = 0x10000700; state->config = NV_RD32(&chip->PFB[0x00000200/4], 0); state->general = bpp == 16 ? 0x00101100 : 0x00100100; state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00; break; } /* Paul Richards: below if block borks things in kernel for some reason */ /* Tony: Below is needed to set hardware in DirectColor */ if((bpp != 8) && (chip->Architecture != NV_ARCH_03)) state->general |= 0x00000030; state->vpll = (p << 16) | (n << 8) | m; state->repaint0 = (((width/8)*pixelDepth) & 0x700) >> 3; state->pixel = pixelDepth > 2 ? 3 : pixelDepth; state->offset0 = state->offset1 = state->offset2 = state->offset3 = 0; state->pitch0 = state->pitch1 = state->pitch2 = state->pitch3 = pixelDepth * width; return 0; } /* * Load fixed function state and pre-calculated/stored state. */ #if 0 #define LOAD_FIXED_STATE(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \ chip->dev[tbl##Table##dev[i][0]] = tbl##Table##dev[i][1] #define LOAD_FIXED_STATE_8BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++) \ chip->dev[tbl##Table##dev##_8BPP[i][0]] = tbl##Table##dev##_8BPP[i][1] #define LOAD_FIXED_STATE_15BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++) \ chip->dev[tbl##Table##dev##_15BPP[i][0]] = tbl##Table##dev##_15BPP[i][1] #define LOAD_FIXED_STATE_16BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_16BPP)/8; i++) \ chip->dev[tbl##Table##dev##_16BPP[i][0]] = tbl##Table##dev##_16BPP[i][1] #define LOAD_FIXED_STATE_32BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_32BPP)/8; i++) \ chip->dev[tbl##Table##dev##_32BPP[i][0]] = tbl##Table##dev##_32BPP[i][1] #endif #define LOAD_FIXED_STATE(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev[i][0]], 0, tbl##Table##dev[i][1]) #define LOAD_FIXED_STATE_8BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_8BPP[i][0]], 0, tbl##Table##dev##_8BPP[i][1]) #define LOAD_FIXED_STATE_15BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_15BPP[i][0]], 0, tbl##Table##dev##_15BPP[i][1]) #define LOAD_FIXED_STATE_16BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_16BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_16BPP[i][0]], 0, tbl##Table##dev##_16BPP[i][1]) #define LOAD_FIXED_STATE_32BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_32BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_32BPP[i][0]], 0, tbl##Table##dev##_32BPP[i][1]) static void UpdateFifoState ( RIVA_HW_INST *chip ) { int i; switch (chip->Architecture) { case NV_ARCH_04: LOAD_FIXED_STATE(nv4,FIFO); chip->Tri03 = NULL; chip->Tri05 = (RivaTexturedTriangle05 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: /* * Initialize state for the RivaTriangle3D05 routines. */ LOAD_FIXED_STATE(nv10tri05,PGRAPH); LOAD_FIXED_STATE(nv10,FIFO); chip->Tri03 = NULL; chip->Tri05 = (RivaTexturedTriangle05 __iomem *)&(chip->FIFO[0x0000E000/4]); break; } } static void LoadStateExt ( RIVA_HW_INST *chip, RIVA_HW_STATE *state ) { int i; /* * Load HW fixed function state. */ LOAD_FIXED_STATE(Riva,PMC); LOAD_FIXED_STATE(Riva,PTIMER); switch (chip->Architecture) { case NV_ARCH_03: /* * Make sure frame buffer config gets set before loading PRAMIN. */ NV_WR32(chip->PFB, 0x00000200, state->config); LOAD_FIXED_STATE(nv3,PFIFO); LOAD_FIXED_STATE(nv3,PRAMIN); LOAD_FIXED_STATE(nv3,PGRAPH); switch (state->bpp) { case 15: case 16: LOAD_FIXED_STATE_15BPP(nv3,PRAMIN); LOAD_FIXED_STATE_15BPP(nv3,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 24: case 32: LOAD_FIXED_STATE_32BPP(nv3,PRAMIN); LOAD_FIXED_STATE_32BPP(nv3,PGRAPH); chip->Tri03 = NULL; break; case 8: default: LOAD_FIXED_STATE_8BPP(nv3,PRAMIN); LOAD_FIXED_STATE_8BPP(nv3,PGRAPH); chip->Tri03 = NULL; break; } for (i = 0x00000; i < 0x00800; i++) NV_WR32(&chip->PRAMIN[0x00000502 + i], 0, (i << 12) | 0x03); NV_WR32(chip->PGRAPH, 0x00000630, state->offset0); NV_WR32(chip->PGRAPH, 0x00000634, state->offset1); NV_WR32(chip->PGRAPH, 0x00000638, state->offset2); NV_WR32(chip->PGRAPH, 0x0000063C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000650, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000654, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000658, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000065C, state->pitch3); break; case NV_ARCH_04: /* * Make sure frame buffer config gets set before loading PRAMIN. */ NV_WR32(chip->PFB, 0x00000200, state->config); LOAD_FIXED_STATE(nv4,PFIFO); LOAD_FIXED_STATE(nv4,PRAMIN); LOAD_FIXED_STATE(nv4,PGRAPH); switch (state->bpp) { case 15: LOAD_FIXED_STATE_15BPP(nv4,PRAMIN); LOAD_FIXED_STATE_15BPP(nv4,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 16: LOAD_FIXED_STATE_16BPP(nv4,PRAMIN); LOAD_FIXED_STATE_16BPP(nv4,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 24: case 32: LOAD_FIXED_STATE_32BPP(nv4,PRAMIN); LOAD_FIXED_STATE_32BPP(nv4,PGRAPH); chip->Tri03 = NULL; break; case 8: default: LOAD_FIXED_STATE_8BPP(nv4,PRAMIN); LOAD_FIXED_STATE_8BPP(nv4,PGRAPH); chip->Tri03 = NULL; break; } NV_WR32(chip->PGRAPH, 0x00000640, state->offset0); NV_WR32(chip->PGRAPH, 0x00000644, state->offset1); NV_WR32(chip->PGRAPH, 0x00000648, state->offset2); NV_WR32(chip->PGRAPH, 0x0000064C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000670, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000674, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000678, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000067C, state->pitch3); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: if(chip->twoHeads) { VGA_WR08(chip->PCIO, 0x03D4, 0x44); VGA_WR08(chip->PCIO, 0x03D5, state->crtcOwner); chip->LockUnlock(chip, 0); } LOAD_FIXED_STATE(nv10,PFIFO); LOAD_FIXED_STATE(nv10,PRAMIN); LOAD_FIXED_STATE(nv10,PGRAPH); switch (state->bpp) { case 15: LOAD_FIXED_STATE_15BPP(nv10,PRAMIN); LOAD_FIXED_STATE_15BPP(nv10,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 16: LOAD_FIXED_STATE_16BPP(nv10,PRAMIN); LOAD_FIXED_STATE_16BPP(nv10,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 24: case 32: LOAD_FIXED_STATE_32BPP(nv10,PRAMIN); LOAD_FIXED_STATE_32BPP(nv10,PGRAPH); chip->Tri03 = NULL; break; case 8: default: LOAD_FIXED_STATE_8BPP(nv10,PRAMIN); LOAD_FIXED_STATE_8BPP(nv10,PGRAPH); chip->Tri03 = NULL; break; } if(chip->Architecture == NV_ARCH_10) { NV_WR32(chip->PGRAPH, 0x00000640, state->offset0); NV_WR32(chip->PGRAPH, 0x00000644, state->offset1); NV_WR32(chip->PGRAPH, 0x00000648, state->offset2); NV_WR32(chip->PGRAPH, 0x0000064C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000670, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000674, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000678, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000067C, state->pitch3); NV_WR32(chip->PGRAPH, 0x00000680, state->pitch3); } else { NV_WR32(chip->PGRAPH, 0x00000820, state->offset0); NV_WR32(chip->PGRAPH, 0x00000824, state->offset1); NV_WR32(chip->PGRAPH, 0x00000828, state->offset2); NV_WR32(chip->PGRAPH, 0x0000082C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000850, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000854, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000858, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000085C, state->pitch3); NV_WR32(chip->PGRAPH, 0x00000860, state->pitch3); NV_WR32(chip->PGRAPH, 0x00000864, state->pitch3); NV_WR32(chip->PGRAPH, 0x000009A4, NV_RD32(chip->PFB, 0x00000200)); NV_WR32(chip->PGRAPH, 0x000009A8, NV_RD32(chip->PFB, 0x00000204)); } if(chip->twoHeads) { NV_WR32(chip->PCRTC0, 0x00000860, state->head); NV_WR32(chip->PCRTC0, 0x00002860, state->head2); } NV_WR32(chip->PRAMDAC, 0x00000404, NV_RD32(chip->PRAMDAC, 0x00000404) | (1 << 25)); NV_WR32(chip->PMC, 0x00008704, 1); NV_WR32(chip->PMC, 0x00008140, 0); NV_WR32(chip->PMC, 0x00008920, 0); NV_WR32(chip->PMC, 0x00008924, 0); NV_WR32(chip->PMC, 0x00008908, 0x01ffffff); NV_WR32(chip->PMC, 0x0000890C, 0x01ffffff); NV_WR32(chip->PMC, 0x00001588, 0); NV_WR32(chip->PFB, 0x00000240, 0); NV_WR32(chip->PFB, 0x00000250, 0); NV_WR32(chip->PFB, 0x00000260, 0); NV_WR32(chip->PFB, 0x00000270, 0); NV_WR32(chip->PFB, 0x00000280, 0); NV_WR32(chip->PFB, 0x00000290, 0); NV_WR32(chip->PFB, 0x000002A0, 0); NV_WR32(chip->PFB, 0x000002B0, 0); NV_WR32(chip->PGRAPH, 0x00000B00, NV_RD32(chip->PFB, 0x00000240)); NV_WR32(chip->PGRAPH, 0x00000B04, NV_RD32(chip->PFB, 0x00000244)); NV_WR32(chip->PGRAPH, 0x00000B08, NV_RD32(chip->PFB, 0x00000248)); NV_WR32(chip->PGRAPH, 0x00000B0C, NV_RD32(chip->PFB, 0x0000024C)); NV_WR32(chip->PGRAPH, 0x00000B10, NV_RD32(chip->PFB, 0x00000250)); NV_WR32(chip->PGRAPH, 0x00000B14, NV_RD32(chip->PFB, 0x00000254)); NV_WR32(chip->PGRAPH, 0x00000B18, NV_RD32(chip->PFB, 0x00000258)); NV_WR32(chip->PGRAPH, 0x00000B1C, NV_RD32(chip->PFB, 0x0000025C)); NV_WR32(chip->PGRAPH, 0x00000B20, NV_RD32(chip->PFB, 0x00000260)); NV_WR32(chip->PGRAPH, 0x00000B24, NV_RD32(chip->PFB, 0x00000264)); NV_WR32(chip->PGRAPH, 0x00000B28, NV_RD32(chip->PFB, 0x00000268)); NV_WR32(chip->PGRAPH, 0x00000B2C, NV_RD32(chip->PFB, 0x0000026C)); NV_WR32(chip->PGRAPH, 0x00000B30, NV_RD32(chip->PFB, 0x00000270)); NV_WR32(chip->PGRAPH, 0x00000B34, NV_RD32(chip->PFB, 0x00000274)); NV_WR32(chip->PGRAPH, 0x00000B38, NV_RD32(chip->PFB, 0x00000278)); NV_WR32(chip->PGRAPH, 0x00000B3C, NV_RD32(chip->PFB, 0x0000027C)); NV_WR32(chip->PGRAPH, 0x00000B40, NV_RD32(chip->PFB, 0x00000280)); NV_WR32(chip->PGRAPH, 0x00000B44, NV_RD32(chip->PFB, 0x00000284)); NV_WR32(chip->PGRAPH, 0x00000B48, NV_RD32(chip->PFB, 0x00000288)); NV_WR32(chip->PGRAPH, 0x00000B4C, NV_RD32(chip->PFB, 0x0000028C)); NV_WR32(chip->PGRAPH, 0x00000B50, NV_RD32(chip->PFB, 0x00000290)); NV_WR32(chip->PGRAPH, 0x00000B54, NV_RD32(chip->PFB, 0x00000294)); NV_WR32(chip->PGRAPH, 0x00000B58, NV_RD32(chip->PFB, 0x00000298)); NV_WR32(chip->PGRAPH, 0x00000B5C, NV_RD32(chip->PFB, 0x0000029C)); NV_WR32(chip->PGRAPH, 0x00000B60, NV_RD32(chip->PFB, 0x000002A0)); NV_WR32(chip->PGRAPH, 0x00000B64, NV_RD32(chip->PFB, 0x000002A4)); NV_WR32(chip->PGRAPH, 0x00000B68, NV_RD32(chip->PFB, 0x000002A8)); NV_WR32(chip->PGRAPH, 0x00000B6C, NV_RD32(chip->PFB, 0x000002AC)); NV_WR32(chip->PGRAPH, 0x00000B70, NV_RD32(chip->PFB, 0x000002B0)); NV_WR32(chip->PGRAPH, 0x00000B74, NV_RD32(chip->PFB, 0x000002B4)); NV_WR32(chip->PGRAPH, 0x00000B78, NV_RD32(chip->PFB, 0x000002B8)); NV_WR32(chip->PGRAPH, 0x00000B7C, NV_RD32(chip->PFB, 0x000002BC)); NV_WR32(chip->PGRAPH, 0x00000F40, 0x10000000); NV_WR32(chip->PGRAPH, 0x00000F44, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000040); NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000008); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000200); for (i = 0; i < (3*16); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000040); NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000800); for (i = 0; i < (16*16); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F40, 0x30000000); NV_WR32(chip->PGRAPH, 0x00000F44, 0x00000004); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00006400); for (i = 0; i < (59*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00006800); for (i = 0; i < (47*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00006C00); for (i = 0; i < (3*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00007000); for (i = 0; i < (19*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00007400); for (i = 0; i < (12*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00007800); for (i = 0; i < (12*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00004400); for (i = 0; i < (8*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000000); for (i = 0; i < 16; i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000040); for (i = 0; i < 4; i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PCRTC, 0x00000810, state->cursorConfig); if(chip->flatPanel) { if((chip->Chipset & 0x0ff0) == 0x0110) { NV_WR32(chip->PRAMDAC, 0x0528, state->dither); } else if((chip->Chipset & 0x0ff0) >= 0x0170) { NV_WR32(chip->PRAMDAC, 0x083C, state->dither); } VGA_WR08(chip->PCIO, 0x03D4, 0x53); VGA_WR08(chip->PCIO, 0x03D5, 0); VGA_WR08(chip->PCIO, 0x03D4, 0x54); VGA_WR08(chip->PCIO, 0x03D5, 0); VGA_WR08(chip->PCIO, 0x03D4, 0x21); VGA_WR08(chip->PCIO, 0x03D5, 0xfa); } VGA_WR08(chip->PCIO, 0x03D4, 0x41); VGA_WR08(chip->PCIO, 0x03D5, state->extra); } LOAD_FIXED_STATE(Riva,FIFO); UpdateFifoState(chip); /* * Load HW mode state. */ VGA_WR08(chip->PCIO, 0x03D4, 0x19); VGA_WR08(chip->PCIO, 0x03D5, state->repaint0); VGA_WR08(chip->PCIO, 0x03D4, 0x1A); VGA_WR08(chip->PCIO, 0x03D5, state->repaint1); VGA_WR08(chip->PCIO, 0x03D4, 0x25); VGA_WR08(chip->PCIO, 0x03D5, state->screen); VGA_WR08(chip->PCIO, 0x03D4, 0x28); VGA_WR08(chip->PCIO, 0x03D5, state->pixel); VGA_WR08(chip->PCIO, 0x03D4, 0x2D); VGA_WR08(chip->PCIO, 0x03D5, state->horiz); VGA_WR08(chip->PCIO, 0x03D4, 0x1B); VGA_WR08(chip->PCIO, 0x03D5, state->arbitration0); VGA_WR08(chip->PCIO, 0x03D4, 0x20); VGA_WR08(chip->PCIO, 0x03D5, state->arbitration1); VGA_WR08(chip->PCIO, 0x03D4, 0x30); VGA_WR08(chip->PCIO, 0x03D5, state->cursor0); VGA_WR08(chip->PCIO, 0x03D4, 0x31); VGA_WR08(chip->PCIO, 0x03D5, state->cursor1); VGA_WR08(chip->PCIO, 0x03D4, 0x2F); VGA_WR08(chip->PCIO, 0x03D5, state->cursor2); VGA_WR08(chip->PCIO, 0x03D4, 0x39); VGA_WR08(chip->PCIO, 0x03D5, state->interlace); if(!chip->flatPanel) { NV_WR32(chip->PRAMDAC0, 0x00000508, state->vpll); NV_WR32(chip->PRAMDAC0, 0x0000050C, state->pllsel); if(chip->twoHeads) NV_WR32(chip->PRAMDAC0, 0x00000520, state->vpll2); } else { NV_WR32(chip->PRAMDAC, 0x00000848 , state->scale); } NV_WR32(chip->PRAMDAC, 0x00000600 , state->general); /* * Turn off VBlank enable and reset. */ NV_WR32(chip->PCRTC, 0x00000140, 0); NV_WR32(chip->PCRTC, 0x00000100, chip->VBlankBit); /* * Set interrupt enable. */ NV_WR32(chip->PMC, 0x00000140, chip->EnableIRQ & 0x01); /* * Set current state pointer. */ chip->CurrentState = state; /* * Reset FIFO free and empty counts. */ chip->FifoFreeCount = 0; /* Free count from first subchannel */ chip->FifoEmptyCount = NV_RD32(&chip->Rop->FifoFree, 0); } static void UnloadStateExt ( RIVA_HW_INST *chip, RIVA_HW_STATE *state ) { /* * Save current HW state. */ VGA_WR08(chip->PCIO, 0x03D4, 0x19); state->repaint0 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x1A); state->repaint1 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x25); state->screen = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x28); state->pixel = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x2D); state->horiz = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x1B); state->arbitration0 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x20); state->arbitration1 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x30); state->cursor0 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x31); state->cursor1 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x2F); state->cursor2 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x39); state->interlace = VGA_RD08(chip->PCIO, 0x03D5); state->vpll = NV_RD32(chip->PRAMDAC0, 0x00000508); state->vpll2 = NV_RD32(chip->PRAMDAC0, 0x00000520); state->pllsel = NV_RD32(chip->PRAMDAC0, 0x0000050C); state->general = NV_RD32(chip->PRAMDAC, 0x00000600); state->scale = NV_RD32(chip->PRAMDAC, 0x00000848); state->config = NV_RD32(chip->PFB, 0x00000200); switch (chip->Architecture) { case NV_ARCH_03: state->offset0 = NV_RD32(chip->PGRAPH, 0x00000630); state->offset1 = NV_RD32(chip->PGRAPH, 0x00000634); state->offset2 = NV_RD32(chip->PGRAPH, 0x00000638); state->offset3 = NV_RD32(chip->PGRAPH, 0x0000063C); state->pitch0 = NV_RD32(chip->PGRAPH, 0x00000650); state->pitch1 = NV_RD32(chip->PGRAPH, 0x00000654); state->pitch2 = NV_RD32(chip->PGRAPH, 0x00000658); state->pitch3 = NV_RD32(chip->PGRAPH, 0x0000065C); break; case NV_ARCH_04: state->offset0 = NV_RD32(chip->PGRAPH, 0x00000640); state->offset1 = NV_RD32(chip->PGRAPH, 0x00000644); state->offset2 = NV_RD32(chip->PGRAPH, 0x00000648); state->offset3 = NV_RD32(chip->PGRAPH, 0x0000064C); state->pitch0 = NV_RD32(chip->PGRAPH, 0x00000670); state->pitch1 = NV_RD32(chip->PGRAPH, 0x00000674); state->pitch2 = NV_RD32(chip->PGRAPH, 0x00000678); state->pitch3 = NV_RD32(chip->PGRAPH, 0x0000067C); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: state->offset0 = NV_RD32(chip->PGRAPH, 0x00000640); state->offset1 = NV_RD32(chip->PGRAPH, 0x00000644); state->offset2 = NV_RD32(chip->PGRAPH, 0x00000648); state->offset3 = NV_RD32(chip->PGRAPH, 0x0000064C); state->pitch0 = NV_RD32(chip->PGRAPH, 0x00000670); state->pitch1 = NV_RD32(chip->PGRAPH, 0x00000674); state->pitch2 = NV_RD32(chip->PGRAPH, 0x00000678); state->pitch3 = NV_RD32(chip->PGRAPH, 0x0000067C); if(chip->twoHeads) { state->head = NV_RD32(chip->PCRTC0, 0x00000860); state->head2 = NV_RD32(chip->PCRTC0, 0x00002860); VGA_WR08(chip->PCIO, 0x03D4, 0x44); state->crtcOwner = VGA_RD08(chip->PCIO, 0x03D5); } VGA_WR08(chip->PCIO, 0x03D4, 0x41); state->extra = VGA_RD08(chip->PCIO, 0x03D5); state->cursorConfig = NV_RD32(chip->PCRTC, 0x00000810); if((chip->Chipset & 0x0ff0) == 0x0110) { state->dither = NV_RD32(chip->PRAMDAC, 0x0528); } else if((chip->Chipset & 0x0ff0) >= 0x0170) { state->dither = NV_RD32(chip->PRAMDAC, 0x083C); } break; } } static void SetStartAddress ( RIVA_HW_INST *chip, unsigned start ) { NV_WR32(chip->PCRTC, 0x800, start); } static void SetStartAddress3 ( RIVA_HW_INST *chip, unsigned start ) { int offset = start >> 2; int pan = (start & 3) << 1; unsigned char tmp; /* * Unlock extended registers. */ chip->LockUnlock(chip, 0); /* * Set start address. */ VGA_WR08(chip->PCIO, 0x3D4, 0x0D); VGA_WR08(chip->PCIO, 0x3D5, offset); offset >>= 8; VGA_WR08(chip->PCIO, 0x3D4, 0x0C); VGA_WR08(chip->PCIO, 0x3D5, offset); offset >>= 8; VGA_WR08(chip->PCIO, 0x3D4, 0x19); tmp = VGA_RD08(chip->PCIO, 0x3D5); VGA_WR08(chip->PCIO, 0x3D5, (offset & 0x01F) | (tmp & ~0x1F)); VGA_WR08(chip->PCIO, 0x3D4, 0x2D); tmp = VGA_RD08(chip->PCIO, 0x3D5); VGA_WR08(chip->PCIO, 0x3D5, (offset & 0x60) | (tmp & ~0x60)); /* * 4 pixel pan register. */ offset = VGA_RD08(chip->PCIO, chip->IO + 0x0A); VGA_WR08(chip->PCIO, 0x3C0, 0x13); VGA_WR08(chip->PCIO, 0x3C0, pan); } static void nv3SetSurfaces2D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); RIVA_FIFO_FREE(*chip,Tri03,5); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000003); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000004); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000013); } static void nv4SetSurfaces2D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000003); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000004); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } static void nv10SetSurfaces2D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000003); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000004); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } static void nv3SetSurfaces3D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); RIVA_FIFO_FREE(*chip,Tri03,5); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000005); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000006); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000013); } static void nv4SetSurfaces3D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000005); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000006); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } static void nv10SetSurfaces3D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface3D __iomem *Surfaces3D = (RivaSurface3D __iomem *)&(chip->FIFO[0x0000E000/4]); RIVA_FIFO_FREE(*chip,Tri03,4); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000007); NV_WR32(&Surfaces3D->RenderBufferOffset, 0, surf0); NV_WR32(&Surfaces3D->ZBufferOffset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } /****************************************************************************\ * * * Probe RIVA Chip Configuration * * * \****************************************************************************/ static void nv3GetConfig ( RIVA_HW_INST *chip ) { /* * Fill in chip configuration. */ if (NV_RD32(&chip->PFB[0x00000000/4], 0) & 0x00000020) { if (((NV_RD32(chip->PMC, 0x00000000) & 0xF0) == 0x20) && ((NV_RD32(chip->PMC, 0x00000000) & 0x0F) >= 0x02)) { /* * SDRAM 128 ZX. */ chip->RamBandwidthKBytesPerSec = 800000; switch (NV_RD32(chip->PFB, 0x00000000) & 0x03) { case 2: chip->RamAmountKBytes = 1024 * 4; break; case 1: chip->RamAmountKBytes = 1024 * 2; break; default: chip->RamAmountKBytes = 1024 * 8; break; } } else { chip->RamBandwidthKBytesPerSec = 1000000; chip->RamAmountKBytes = 1024 * 8; } } else { /* * SGRAM 128. */ chip->RamBandwidthKBytesPerSec = 1000000; switch (NV_RD32(chip->PFB, 0x00000000) & 0x00000003) { case 0: chip->RamAmountKBytes = 1024 * 8; break; case 2: chip->RamAmountKBytes = 1024 * 4; break; default: chip->RamAmountKBytes = 1024 * 2; break; } } chip->CrystalFreqKHz = (NV_RD32(chip->PEXTDEV, 0x00000000) & 0x00000040) ? 14318 : 13500; chip->CURSOR = &(chip->PRAMIN[0x00008000/4 - 0x0800/4]); chip->VBlankBit = 0x00000100; chip->MaxVClockFreqKHz = 256000; /* * Set chip functions. */ chip->Busy = nv3Busy; chip->ShowHideCursor = ShowHideCursor; chip->LoadStateExt = LoadStateExt; chip->UnloadStateExt = UnloadStateExt; chip->SetStartAddress = SetStartAddress3; chip->SetSurfaces2D = nv3SetSurfaces2D; chip->SetSurfaces3D = nv3SetSurfaces3D; chip->LockUnlock = nv3LockUnlock; } static void nv4GetConfig ( RIVA_HW_INST *chip ) { /* * Fill in chip configuration. */ if (NV_RD32(chip->PFB, 0x00000000) & 0x00000100) { chip->RamAmountKBytes = ((NV_RD32(chip->PFB, 0x00000000) >> 12) & 0x0F) * 1024 * 2 + 1024 * 2; } else { switch (NV_RD32(chip->PFB, 0x00000000) & 0x00000003) { case 0: chip->RamAmountKBytes = 1024 * 32; break; case 1: chip->RamAmountKBytes = 1024 * 4; break; case 2: chip->RamAmountKBytes = 1024 * 8; break; case 3: default: chip->RamAmountKBytes = 1024 * 16; break; } } switch ((NV_RD32(chip->PFB, 0x00000000) >> 3) & 0x00000003) { case 3: chip->RamBandwidthKBytesPerSec = 800000; break; default: chip->RamBandwidthKBytesPerSec = 1000000; break; } chip->CrystalFreqKHz = (NV_RD32(chip->PEXTDEV, 0x00000000) & 0x00000040) ? 14318 : 13500; chip->CURSOR = &(chip->PRAMIN[0x00010000/4 - 0x0800/4]); chip->VBlankBit = 0x00000001; chip->MaxVClockFreqKHz = 350000; /* * Set chip functions. */ chip->Busy = nv4Busy; chip->ShowHideCursor = ShowHideCursor; chip->LoadStateExt = LoadStateExt; chip->UnloadStateExt = UnloadStateExt; chip->SetStartAddress = SetStartAddress; chip->SetSurfaces2D = nv4SetSurfaces2D; chip->SetSurfaces3D = nv4SetSurfaces3D; chip->LockUnlock = nv4LockUnlock; } static void nv10GetConfig ( RIVA_HW_INST *chip, unsigned int chipset ) { struct pci_dev* dev; u32 amt; #ifdef __BIG_ENDIAN /* turn on big endian register access */ if(!(NV_RD32(chip->PMC, 0x00000004) & 0x01000001)) NV_WR32(chip->PMC, 0x00000004, 0x01000001); #endif /* * Fill in chip configuration. */ if(chipset == NV_CHIP_IGEFORCE2) { dev = pci_get_bus_and_slot(0, 1); pci_read_config_dword(dev, 0x7C, &amt); pci_dev_put(dev); chip->RamAmountKBytes = (((amt >> 6) & 31) + 1) * 1024; } else if(chipset == NV_CHIP_0x01F0) { dev = pci_get_bus_and_slot(0, 1); pci_read_config_dword(dev, 0x84, &amt); pci_dev_put(dev); chip->RamAmountKBytes = (((amt >> 4) & 127) + 1) * 1024; } else { switch ((NV_RD32(chip->PFB, 0x0000020C) >> 20) & 0x000000FF) { case 0x02: chip->RamAmountKBytes = 1024 * 2; break; case 0x04: chip->RamAmountKBytes = 1024 * 4; break; case 0x08: chip->RamAmountKBytes = 1024 * 8; break; case 0x10: chip->RamAmountKBytes = 1024 * 16; break; case 0x20: chip->RamAmountKBytes = 1024 * 32; break; case 0x40: chip->RamAmountKBytes = 1024 * 64; break; case 0x80: chip->RamAmountKBytes = 1024 * 128; break; default: chip->RamAmountKBytes = 1024 * 16; break; } } switch ((NV_RD32(chip->PFB, 0x00000000) >> 3) & 0x00000003) { case 3: chip->RamBandwidthKBytesPerSec = 800000; break; default: chip->RamBandwidthKBytesPerSec = 1000000; break; } chip->CrystalFreqKHz = (NV_RD32(chip->PEXTDEV, 0x0000) & (1 << 6)) ? 14318 : 13500; switch (chipset & 0x0ff0) { case 0x0170: case 0x0180: case 0x01F0: case 0x0250: case 0x0280: case 0x0300: case 0x0310: case 0x0320: case 0x0330: case 0x0340: if(NV_RD32(chip->PEXTDEV, 0x0000) & (1 << 22)) chip->CrystalFreqKHz = 27000; break; default: break; } chip->CursorStart = (chip->RamAmountKBytes - 128) * 1024; chip->CURSOR = NULL; /* can't set this here */ chip->VBlankBit = 0x00000001; chip->MaxVClockFreqKHz = 350000; /* * Set chip functions. */ chip->Busy = nv10Busy; chip->ShowHideCursor = ShowHideCursor; chip->LoadStateExt = LoadStateExt; chip->UnloadStateExt = UnloadStateExt; chip->SetStartAddress = SetStartAddress; chip->SetSurfaces2D = nv10SetSurfaces2D; chip->SetSurfaces3D = nv10SetSurfaces3D; chip->LockUnlock = nv4LockUnlock; switch(chipset & 0x0ff0) { case 0x0110: case 0x0170: case 0x0180: case 0x01F0: case 0x0250: case 0x0280: case 0x0300: case 0x0310: case 0x0320: case 0x0330: case 0x0340: chip->twoHeads = TRUE; break; default: chip->twoHeads = FALSE; break; } } int RivaGetConfig ( RIVA_HW_INST *chip, unsigned int chipset ) { /* * Save this so future SW know whats it's dealing with. */ chip->Version = RIVA_SW_VERSION; /* * Chip specific configuration. */ switch (chip->Architecture) { case NV_ARCH_03: nv3GetConfig(chip); break; case NV_ARCH_04: nv4GetConfig(chip); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: nv10GetConfig(chip, chipset); break; default: return (-1); } chip->Chipset = chipset; /* * Fill in FIFO pointers. */ chip->Rop = (RivaRop __iomem *)&(chip->FIFO[0x00000000/4]); chip->Clip = (RivaClip __iomem *)&(chip->FIFO[0x00002000/4]); chip->Patt = (RivaPattern __iomem *)&(chip->FIFO[0x00004000/4]); chip->Pixmap = (RivaPixmap __iomem *)&(chip->FIFO[0x00006000/4]); chip->Blt = (RivaScreenBlt __iomem *)&(chip->FIFO[0x00008000/4]); chip->Bitmap = (RivaBitmap __iomem *)&(chip->FIFO[0x0000A000/4]); chip->Line = (RivaLine __iomem *)&(chip->FIFO[0x0000C000/4]); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); return (0); }
gpl-2.0
AICP/kernel_lge_mako
drivers/video/riva/riva_hw.c
12940
79994
/***************************************************************************\ |* *| |* Copyright 1993-1999 NVIDIA, Corporation. All rights reserved. *| |* *| |* NOTICE TO USER: The source code is copyrighted under U.S. and *| |* international laws. Users and possessors of this source code are *| |* hereby granted a nonexclusive, royalty-free copyright license to *| |* use this code in individual and commercial software. *| |* *| |* Any use of this source code must include, in the user documenta- *| |* tion and internal comments to the code, notices to the end user *| |* as follows: *| |* *| |* Copyright 1993-1999 NVIDIA, Corporation. All rights reserved. *| |* *| |* NVIDIA, CORPORATION MAKES NO REPRESENTATION ABOUT THE SUITABILITY *| |* OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" *| |* WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. NVIDIA, CORPOR- *| |* ATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, *| |* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGE- *| |* MENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL *| |* NVIDIA, CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT, INCI- *| |* DENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RE- *| |* SULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION *| |* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF *| |* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. *| |* *| |* U.S. Government End Users. This source code is a "commercial *| |* item," as that term is defined at 48 C.F.R. 2.101 (OCT 1995), *| |* consisting of "commercial computer software" and "commercial *| |* computer software documentation," as such terms are used in *| |* 48 C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Govern- *| |* ment only as a commercial end item. Consistent with 48 C.F.R. *| |* 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), *| |* all U.S. Government End Users acquire the source code with only *| |* those rights set forth herein. *| |* *| \***************************************************************************/ /* * GPL licensing note -- nVidia is allowing a liberal interpretation of * the documentation restriction above, to merely say that this nVidia's * copyright and disclaimer should be included with all code derived * from this source. -- Jeff Garzik <jgarzik@pobox.com>, 01/Nov/99 */ /* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/riva_hw.c,v 1.33 2002/08/05 20:47:06 mvojkovi Exp $ */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include "riva_hw.h" #include "riva_tbl.h" #include "nv_type.h" /* * This file is an OS-agnostic file used to make RIVA 128 and RIVA TNT * operate identically (except TNT has more memory and better 3D quality. */ static int nv3Busy ( RIVA_HW_INST *chip ) { return ((NV_RD32(&chip->Rop->FifoFree, 0) < chip->FifoEmptyCount) || NV_RD32(&chip->PGRAPH[0x000006B0/4], 0) & 0x01); } static int nv4Busy ( RIVA_HW_INST *chip ) { return ((NV_RD32(&chip->Rop->FifoFree, 0) < chip->FifoEmptyCount) || NV_RD32(&chip->PGRAPH[0x00000700/4], 0) & 0x01); } static int nv10Busy ( RIVA_HW_INST *chip ) { return ((NV_RD32(&chip->Rop->FifoFree, 0) < chip->FifoEmptyCount) || NV_RD32(&chip->PGRAPH[0x00000700/4], 0) & 0x01); } static void vgaLockUnlock ( RIVA_HW_INST *chip, int Lock ) { U008 cr11; VGA_WR08(chip->PCIO, 0x3D4, 0x11); cr11 = VGA_RD08(chip->PCIO, 0x3D5); if(Lock) cr11 |= 0x80; else cr11 &= ~0x80; VGA_WR08(chip->PCIO, 0x3D5, cr11); } static void nv3LockUnlock ( RIVA_HW_INST *chip, int Lock ) { VGA_WR08(chip->PVIO, 0x3C4, 0x06); VGA_WR08(chip->PVIO, 0x3C5, Lock ? 0x99 : 0x57); vgaLockUnlock(chip, Lock); } static void nv4LockUnlock ( RIVA_HW_INST *chip, int Lock ) { VGA_WR08(chip->PCIO, 0x3D4, 0x1F); VGA_WR08(chip->PCIO, 0x3D5, Lock ? 0x99 : 0x57); vgaLockUnlock(chip, Lock); } static int ShowHideCursor ( RIVA_HW_INST *chip, int ShowHide ) { int cursor; cursor = chip->CurrentState->cursor1; chip->CurrentState->cursor1 = (chip->CurrentState->cursor1 & 0xFE) | (ShowHide & 0x01); VGA_WR08(chip->PCIO, 0x3D4, 0x31); VGA_WR08(chip->PCIO, 0x3D5, chip->CurrentState->cursor1); return (cursor & 0x01); } /****************************************************************************\ * * * The video arbitration routines calculate some "magic" numbers. Fixes * * the snow seen when accessing the framebuffer without it. * * It just works (I hope). * * * \****************************************************************************/ #define DEFAULT_GR_LWM 100 #define DEFAULT_VID_LWM 100 #define DEFAULT_GR_BURST_SIZE 256 #define DEFAULT_VID_BURST_SIZE 128 #define VIDEO 0 #define GRAPHICS 1 #define MPORT 2 #define ENGINE 3 #define GFIFO_SIZE 320 #define GFIFO_SIZE_128 256 #define MFIFO_SIZE 120 #define VFIFO_SIZE 256 typedef struct { int gdrain_rate; int vdrain_rate; int mdrain_rate; int gburst_size; int vburst_size; char vid_en; char gr_en; int wcmocc, wcgocc, wcvocc, wcvlwm, wcglwm; int by_gfacc; char vid_only_once; char gr_only_once; char first_vacc; char first_gacc; char first_macc; int vocc; int gocc; int mocc; char cur; char engine_en; char converged; int priority; } nv3_arb_info; typedef struct { int graphics_lwm; int video_lwm; int graphics_burst_size; int video_burst_size; int graphics_hi_priority; int media_hi_priority; int rtl_values; int valid; } nv3_fifo_info; typedef struct { char pix_bpp; char enable_video; char gr_during_vid; char enable_mp; int memory_width; int video_scale; int pclk_khz; int mclk_khz; int mem_page_miss; int mem_latency; char mem_aligned; } nv3_sim_state; typedef struct { int graphics_lwm; int video_lwm; int graphics_burst_size; int video_burst_size; int valid; } nv4_fifo_info; typedef struct { int pclk_khz; int mclk_khz; int nvclk_khz; char mem_page_miss; char mem_latency; int memory_width; char enable_video; char gr_during_vid; char pix_bpp; char mem_aligned; char enable_mp; } nv4_sim_state; typedef struct { int graphics_lwm; int video_lwm; int graphics_burst_size; int video_burst_size; int valid; } nv10_fifo_info; typedef struct { int pclk_khz; int mclk_khz; int nvclk_khz; char mem_page_miss; char mem_latency; u32 memory_type; int memory_width; char enable_video; char gr_during_vid; char pix_bpp; char mem_aligned; char enable_mp; } nv10_sim_state; static int nv3_iterate(nv3_fifo_info *res_info, nv3_sim_state * state, nv3_arb_info *ainfo) { int iter = 0; int tmp; int vfsize, mfsize, gfsize; int mburst_size = 32; int mmisses, gmisses, vmisses; int misses; int vlwm, glwm, mlwm; int last, next, cur; int max_gfsize ; long ns; vlwm = 0; glwm = 0; mlwm = 0; vfsize = 0; gfsize = 0; cur = ainfo->cur; mmisses = 2; gmisses = 2; vmisses = 2; if (ainfo->gburst_size == 128) max_gfsize = GFIFO_SIZE_128; else max_gfsize = GFIFO_SIZE; max_gfsize = GFIFO_SIZE; while (1) { if (ainfo->vid_en) { if (ainfo->wcvocc > ainfo->vocc) ainfo->wcvocc = ainfo->vocc; if (ainfo->wcvlwm > vlwm) ainfo->wcvlwm = vlwm ; ns = 1000000 * ainfo->vburst_size/(state->memory_width/8)/state->mclk_khz; vfsize = ns * ainfo->vdrain_rate / 1000000; vfsize = ainfo->wcvlwm - ainfo->vburst_size + vfsize; } if (state->enable_mp) { if (ainfo->wcmocc > ainfo->mocc) ainfo->wcmocc = ainfo->mocc; } if (ainfo->gr_en) { if (ainfo->wcglwm > glwm) ainfo->wcglwm = glwm ; if (ainfo->wcgocc > ainfo->gocc) ainfo->wcgocc = ainfo->gocc; ns = 1000000 * (ainfo->gburst_size/(state->memory_width/8))/state->mclk_khz; gfsize = (ns * (long) ainfo->gdrain_rate)/1000000; gfsize = ainfo->wcglwm - ainfo->gburst_size + gfsize; } mfsize = 0; if (!state->gr_during_vid && ainfo->vid_en) if (ainfo->vid_en && (ainfo->vocc < 0) && !ainfo->vid_only_once) next = VIDEO; else if (ainfo->mocc < 0) next = MPORT; else if (ainfo->gocc< ainfo->by_gfacc) next = GRAPHICS; else return (0); else switch (ainfo->priority) { case VIDEO: if (ainfo->vid_en && ainfo->vocc<0 && !ainfo->vid_only_once) next = VIDEO; else if (ainfo->gr_en && ainfo->gocc<0 && !ainfo->gr_only_once) next = GRAPHICS; else if (ainfo->mocc<0) next = MPORT; else return (0); break; case GRAPHICS: if (ainfo->gr_en && ainfo->gocc<0 && !ainfo->gr_only_once) next = GRAPHICS; else if (ainfo->vid_en && ainfo->vocc<0 && !ainfo->vid_only_once) next = VIDEO; else if (ainfo->mocc<0) next = MPORT; else return (0); break; default: if (ainfo->mocc<0) next = MPORT; else if (ainfo->gr_en && ainfo->gocc<0 && !ainfo->gr_only_once) next = GRAPHICS; else if (ainfo->vid_en && ainfo->vocc<0 && !ainfo->vid_only_once) next = VIDEO; else return (0); break; } last = cur; cur = next; iter++; switch (cur) { case VIDEO: if (last==cur) misses = 0; else if (ainfo->first_vacc) misses = vmisses; else misses = 1; ainfo->first_vacc = 0; if (last!=cur) { ns = 1000000 * (vmisses*state->mem_page_miss + state->mem_latency)/state->mclk_khz; vlwm = ns * ainfo->vdrain_rate/ 1000000; vlwm = ainfo->vocc - vlwm; } ns = 1000000*(misses*state->mem_page_miss + ainfo->vburst_size)/(state->memory_width/8)/state->mclk_khz; ainfo->vocc = ainfo->vocc + ainfo->vburst_size - ns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gocc - ns*ainfo->gdrain_rate/1000000; ainfo->mocc = ainfo->mocc - ns*ainfo->mdrain_rate/1000000; break; case GRAPHICS: if (last==cur) misses = 0; else if (ainfo->first_gacc) misses = gmisses; else misses = 1; ainfo->first_gacc = 0; if (last!=cur) { ns = 1000000*(gmisses*state->mem_page_miss + state->mem_latency)/state->mclk_khz ; glwm = ns * ainfo->gdrain_rate/1000000; glwm = ainfo->gocc - glwm; } ns = 1000000*(misses*state->mem_page_miss + ainfo->gburst_size/(state->memory_width/8))/state->mclk_khz; ainfo->vocc = ainfo->vocc + 0 - ns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gocc + ainfo->gburst_size - ns*ainfo->gdrain_rate/1000000; ainfo->mocc = ainfo->mocc + 0 - ns*ainfo->mdrain_rate/1000000; break; default: if (last==cur) misses = 0; else if (ainfo->first_macc) misses = mmisses; else misses = 1; ainfo->first_macc = 0; ns = 1000000*(misses*state->mem_page_miss + mburst_size/(state->memory_width/8))/state->mclk_khz; ainfo->vocc = ainfo->vocc + 0 - ns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gocc + 0 - ns*ainfo->gdrain_rate/1000000; ainfo->mocc = ainfo->mocc + mburst_size - ns*ainfo->mdrain_rate/1000000; break; } if (iter>100) { ainfo->converged = 0; return (1); } ns = 1000000*ainfo->gburst_size/(state->memory_width/8)/state->mclk_khz; tmp = ns * ainfo->gdrain_rate/1000000; if (abs(ainfo->gburst_size) + ((abs(ainfo->wcglwm) + 16 ) & ~0x7) - tmp > max_gfsize) { ainfo->converged = 0; return (1); } ns = 1000000*ainfo->vburst_size/(state->memory_width/8)/state->mclk_khz; tmp = ns * ainfo->vdrain_rate/1000000; if (abs(ainfo->vburst_size) + (abs(ainfo->wcvlwm + 32) & ~0xf) - tmp> VFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(ainfo->gocc) > max_gfsize) { ainfo->converged = 0; return (1); } if (abs(ainfo->vocc) > VFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(ainfo->mocc) > MFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(vfsize) > VFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(gfsize) > max_gfsize) { ainfo->converged = 0; return (1); } if (abs(mfsize) > MFIFO_SIZE) { ainfo->converged = 0; return (1); } } } static char nv3_arb(nv3_fifo_info * res_info, nv3_sim_state * state, nv3_arb_info *ainfo) { long ens, vns, mns, gns; int mmisses, gmisses, vmisses, eburst_size, mburst_size; int refresh_cycle; refresh_cycle = 0; refresh_cycle = 2*(state->mclk_khz/state->pclk_khz) + 5; mmisses = 2; if (state->mem_aligned) gmisses = 2; else gmisses = 3; vmisses = 2; eburst_size = state->memory_width * 1; mburst_size = 32; gns = 1000000 * (gmisses*state->mem_page_miss + state->mem_latency)/state->mclk_khz; ainfo->by_gfacc = gns*ainfo->gdrain_rate/1000000; ainfo->wcmocc = 0; ainfo->wcgocc = 0; ainfo->wcvocc = 0; ainfo->wcvlwm = 0; ainfo->wcglwm = 0; ainfo->engine_en = 1; ainfo->converged = 1; if (ainfo->engine_en) { ens = 1000000*(state->mem_page_miss + eburst_size/(state->memory_width/8) +refresh_cycle)/state->mclk_khz; ainfo->mocc = state->enable_mp ? 0-ens*ainfo->mdrain_rate/1000000 : 0; ainfo->vocc = ainfo->vid_en ? 0-ens*ainfo->vdrain_rate/1000000 : 0; ainfo->gocc = ainfo->gr_en ? 0-ens*ainfo->gdrain_rate/1000000 : 0; ainfo->cur = ENGINE; ainfo->first_vacc = 1; ainfo->first_gacc = 1; ainfo->first_macc = 1; nv3_iterate(res_info, state,ainfo); } if (state->enable_mp) { mns = 1000000 * (mmisses*state->mem_page_miss + mburst_size/(state->memory_width/8) + refresh_cycle)/state->mclk_khz; ainfo->mocc = state->enable_mp ? 0 : mburst_size - mns*ainfo->mdrain_rate/1000000; ainfo->vocc = ainfo->vid_en ? 0 : 0- mns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gr_en ? 0: 0- mns*ainfo->gdrain_rate/1000000; ainfo->cur = MPORT; ainfo->first_vacc = 1; ainfo->first_gacc = 1; ainfo->first_macc = 0; nv3_iterate(res_info, state,ainfo); } if (ainfo->gr_en) { ainfo->first_vacc = 1; ainfo->first_gacc = 0; ainfo->first_macc = 1; gns = 1000000*(gmisses*state->mem_page_miss + ainfo->gburst_size/(state->memory_width/8) + refresh_cycle)/state->mclk_khz; ainfo->gocc = ainfo->gburst_size - gns*ainfo->gdrain_rate/1000000; ainfo->vocc = ainfo->vid_en? 0-gns*ainfo->vdrain_rate/1000000 : 0; ainfo->mocc = state->enable_mp ? 0-gns*ainfo->mdrain_rate/1000000: 0; ainfo->cur = GRAPHICS; nv3_iterate(res_info, state,ainfo); } if (ainfo->vid_en) { ainfo->first_vacc = 0; ainfo->first_gacc = 1; ainfo->first_macc = 1; vns = 1000000*(vmisses*state->mem_page_miss + ainfo->vburst_size/(state->memory_width/8) + refresh_cycle)/state->mclk_khz; ainfo->vocc = ainfo->vburst_size - vns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gr_en? (0-vns*ainfo->gdrain_rate/1000000) : 0; ainfo->mocc = state->enable_mp? 0-vns*ainfo->mdrain_rate/1000000 :0 ; ainfo->cur = VIDEO; nv3_iterate(res_info, state, ainfo); } if (ainfo->converged) { res_info->graphics_lwm = (int)abs(ainfo->wcglwm) + 16; res_info->video_lwm = (int)abs(ainfo->wcvlwm) + 32; res_info->graphics_burst_size = ainfo->gburst_size; res_info->video_burst_size = ainfo->vburst_size; res_info->graphics_hi_priority = (ainfo->priority == GRAPHICS); res_info->media_hi_priority = (ainfo->priority == MPORT); if (res_info->video_lwm > 160) { res_info->graphics_lwm = 256; res_info->video_lwm = 128; res_info->graphics_burst_size = 64; res_info->video_burst_size = 64; res_info->graphics_hi_priority = 0; res_info->media_hi_priority = 0; ainfo->converged = 0; return (0); } if (res_info->video_lwm > 128) { res_info->video_lwm = 128; } return (1); } else { res_info->graphics_lwm = 256; res_info->video_lwm = 128; res_info->graphics_burst_size = 64; res_info->video_burst_size = 64; res_info->graphics_hi_priority = 0; res_info->media_hi_priority = 0; return (0); } } static char nv3_get_param(nv3_fifo_info *res_info, nv3_sim_state * state, nv3_arb_info *ainfo) { int done, g,v, p; done = 0; for (p=0; p < 2; p++) { for (g=128 ; g > 32; g= g>> 1) { for (v=128; v >=32; v = v>> 1) { ainfo->priority = p; ainfo->gburst_size = g; ainfo->vburst_size = v; done = nv3_arb(res_info, state,ainfo); if (done && (g==128)) if ((res_info->graphics_lwm + g) > 256) done = 0; if (done) goto Done; } } } Done: return done; } static void nv3CalcArbitration ( nv3_fifo_info * res_info, nv3_sim_state * state ) { nv3_fifo_info save_info; nv3_arb_info ainfo; char res_gr, res_vid; ainfo.gr_en = 1; ainfo.vid_en = state->enable_video; ainfo.vid_only_once = 0; ainfo.gr_only_once = 0; ainfo.gdrain_rate = (int) state->pclk_khz * (state->pix_bpp/8); ainfo.vdrain_rate = (int) state->pclk_khz * 2; if (state->video_scale != 0) ainfo.vdrain_rate = ainfo.vdrain_rate/state->video_scale; ainfo.mdrain_rate = 33000; res_info->rtl_values = 0; if (!state->gr_during_vid && state->enable_video) { ainfo.gr_only_once = 1; ainfo.gr_en = 1; ainfo.gdrain_rate = 0; res_vid = nv3_get_param(res_info, state, &ainfo); res_vid = ainfo.converged; save_info.video_lwm = res_info->video_lwm; save_info.video_burst_size = res_info->video_burst_size; ainfo.vid_en = 1; ainfo.vid_only_once = 1; ainfo.gr_en = 1; ainfo.gdrain_rate = (int) state->pclk_khz * (state->pix_bpp/8); ainfo.vdrain_rate = 0; res_gr = nv3_get_param(res_info, state, &ainfo); res_gr = ainfo.converged; res_info->video_lwm = save_info.video_lwm; res_info->video_burst_size = save_info.video_burst_size; res_info->valid = res_gr & res_vid; } else { if (!ainfo.gr_en) ainfo.gdrain_rate = 0; if (!ainfo.vid_en) ainfo.vdrain_rate = 0; res_gr = nv3_get_param(res_info, state, &ainfo); res_info->valid = ainfo.converged; } } static void nv3UpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv3_fifo_info fifo_data; nv3_sim_state sim_data; unsigned int M, N, P, pll, MClk; pll = NV_RD32(&chip->PRAMDAC0[0x00000504/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; MClk = (N * chip->CrystalFreqKHz / M) >> P; sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; sim_data.video_scale = 1; sim_data.memory_width = (NV_RD32(&chip->PEXTDEV[0x00000000/4], 0) & 0x10) ? 128 : 64; sim_data.memory_width = 128; sim_data.mem_latency = 9; sim_data.mem_aligned = 1; sim_data.mem_page_miss = 11; sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; nv3CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } else { *lwm = 0x24; *burst = 0x2; } } static void nv4CalcArbitration ( nv4_fifo_info *fifo, nv4_sim_state *arb ) { int data, pagemiss, cas,width, video_enable, color_key_enable, bpp, align; int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs; int found, mclk_extra, mclk_loop, cbs, m1, p1; int mclk_freq, pclk_freq, nvclk_freq, mp_enable; int us_m, us_n, us_p, video_drain_rate, crtc_drain_rate; int vpm_us, us_video, vlwm, video_fill_us, cpm_us, us_crt,clwm; int craw, vraw; fifo->valid = 1; pclk_freq = arb->pclk_khz; mclk_freq = arb->mclk_khz; nvclk_freq = arb->nvclk_khz; pagemiss = arb->mem_page_miss; cas = arb->mem_latency; width = arb->memory_width >> 6; video_enable = arb->enable_video; color_key_enable = arb->gr_during_vid; bpp = arb->pix_bpp; align = arb->mem_aligned; mp_enable = arb->enable_mp; clwm = 0; vlwm = 0; cbs = 128; pclks = 2; nvclks = 2; nvclks += 2; nvclks += 1; mclks = 5; mclks += 3; mclks += 1; mclks += cas; mclks += 1; mclks += 1; mclks += 1; mclks += 1; mclk_extra = 3; nvclks += 2; nvclks += 1; nvclks += 1; nvclks += 1; if (mp_enable) mclks+=4; nvclks += 0; pclks += 0; found = 0; vbs = 0; while (found != 1) { fifo->valid = 1; found = 1; mclk_loop = mclks+mclk_extra; us_m = mclk_loop *1000*1000 / mclk_freq; us_n = nvclks*1000*1000 / nvclk_freq; us_p = nvclks*1000*1000 / pclk_freq; if (video_enable) { video_drain_rate = pclk_freq * 2; crtc_drain_rate = pclk_freq * bpp/8; vpagemiss = 2; vpagemiss += 1; crtpagemiss = 2; vpm_us = (vpagemiss * pagemiss)*1000*1000/mclk_freq; if (nvclk_freq * 2 > mclk_freq * width) video_fill_us = cbs*1000*1000 / 16 / nvclk_freq ; else video_fill_us = cbs*1000*1000 / (8 * width) / mclk_freq; us_video = vpm_us + us_m + us_n + us_p + video_fill_us; vlwm = us_video * video_drain_rate/(1000*1000); vlwm++; vbs = 128; if (vlwm > 128) vbs = 64; if (vlwm > (256-64)) vbs = 32; if (nvclk_freq * 2 > mclk_freq * width) video_fill_us = vbs *1000*1000/ 16 / nvclk_freq ; else video_fill_us = vbs*1000*1000 / (8 * width) / mclk_freq; cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = us_video +video_fill_us +cpm_us +us_m + us_n +us_p ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; } else { crtc_drain_rate = pclk_freq * bpp/8; crtpagemiss = 2; crtpagemiss += 1; cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = cpm_us + us_m + us_n + us_p ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; } m1 = clwm + cbs - 512; p1 = m1 * pclk_freq / mclk_freq; p1 = p1 * bpp / 8; if ((p1 < m1) && (m1 > 0)) { fifo->valid = 0; found = 0; if (mclk_extra ==0) found = 1; mclk_extra--; } else if (video_enable) { if ((clwm > 511) || (vlwm > 255)) { fifo->valid = 0; found = 0; if (mclk_extra ==0) found = 1; mclk_extra--; } } else { if (clwm > 519) { fifo->valid = 0; found = 0; if (mclk_extra ==0) found = 1; mclk_extra--; } } craw = clwm; vraw = vlwm; if (clwm < 384) clwm = 384; if (vlwm < 128) vlwm = 128; data = (int)(clwm); fifo->graphics_lwm = data; fifo->graphics_burst_size = 128; data = (int)((vlwm+15)); fifo->video_lwm = data; fifo->video_burst_size = vbs; } } static void nv4UpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv4_fifo_info fifo_data; nv4_sim_state sim_data; unsigned int M, N, P, pll, MClk, NVClk, cfg1; pll = NV_RD32(&chip->PRAMDAC0[0x00000504/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; MClk = (N * chip->CrystalFreqKHz / M) >> P; pll = NV_RD32(&chip->PRAMDAC0[0x00000500/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; NVClk = (N * chip->CrystalFreqKHz / M) >> P; cfg1 = NV_RD32(&chip->PFB[0x00000204/4], 0); sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; sim_data.memory_width = (NV_RD32(&chip->PEXTDEV[0x00000000/4], 0) & 0x10) ? 128 : 64; sim_data.mem_latency = (char)cfg1 & 0x0F; sim_data.mem_aligned = 1; sim_data.mem_page_miss = (char)(((cfg1 >> 4) &0x0F) + ((cfg1 >> 31) & 0x01)); sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; sim_data.nvclk_khz = NVClk; nv4CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } } static void nv10CalcArbitration ( nv10_fifo_info *fifo, nv10_sim_state *arb ) { int data, pagemiss, cas,width, video_enable, color_key_enable, bpp, align; int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs; int nvclk_fill, us_extra; int found, mclk_extra, mclk_loop, cbs, m1; int mclk_freq, pclk_freq, nvclk_freq, mp_enable; int us_m, us_m_min, us_n, us_p, video_drain_rate, crtc_drain_rate; int vus_m, vus_n, vus_p; int vpm_us, us_video, vlwm, cpm_us, us_crt,clwm; int clwm_rnd_down; int craw, m2us, us_pipe, us_pipe_min, vus_pipe, p1clk, p2; int pclks_2_top_fifo, min_mclk_extra; int us_min_mclk_extra; fifo->valid = 1; pclk_freq = arb->pclk_khz; /* freq in KHz */ mclk_freq = arb->mclk_khz; nvclk_freq = arb->nvclk_khz; pagemiss = arb->mem_page_miss; cas = arb->mem_latency; width = arb->memory_width/64; video_enable = arb->enable_video; color_key_enable = arb->gr_during_vid; bpp = arb->pix_bpp; align = arb->mem_aligned; mp_enable = arb->enable_mp; clwm = 0; vlwm = 1024; cbs = 512; vbs = 512; pclks = 4; /* lwm detect. */ nvclks = 3; /* lwm -> sync. */ nvclks += 2; /* fbi bus cycles (1 req + 1 busy) */ mclks = 1; /* 2 edge sync. may be very close to edge so just put one. */ mclks += 1; /* arb_hp_req */ mclks += 5; /* ap_hp_req tiling pipeline */ mclks += 2; /* tc_req latency fifo */ mclks += 2; /* fb_cas_n_ memory request to fbio block */ mclks += 7; /* sm_d_rdv data returned from fbio block */ /* fb.rd.d.Put_gc need to accumulate 256 bits for read */ if (arb->memory_type == 0) if (arb->memory_width == 64) /* 64 bit bus */ mclks += 4; else mclks += 2; else if (arb->memory_width == 64) /* 64 bit bus */ mclks += 2; else mclks += 1; if ((!video_enable) && (arb->memory_width == 128)) { mclk_extra = (bpp == 32) ? 31 : 42; /* Margin of error */ min_mclk_extra = 17; } else { mclk_extra = (bpp == 32) ? 8 : 4; /* Margin of error */ /* mclk_extra = 4; */ /* Margin of error */ min_mclk_extra = 18; } nvclks += 1; /* 2 edge sync. may be very close to edge so just put one. */ nvclks += 1; /* fbi_d_rdv_n */ nvclks += 1; /* Fbi_d_rdata */ nvclks += 1; /* crtfifo load */ if(mp_enable) mclks+=4; /* Mp can get in with a burst of 8. */ /* Extra clocks determined by heuristics */ nvclks += 0; pclks += 0; found = 0; while(found != 1) { fifo->valid = 1; found = 1; mclk_loop = mclks+mclk_extra; us_m = mclk_loop *1000*1000 / mclk_freq; /* Mclk latency in us */ us_m_min = mclks * 1000*1000 / mclk_freq; /* Minimum Mclk latency in us */ us_min_mclk_extra = min_mclk_extra *1000*1000 / mclk_freq; us_n = nvclks*1000*1000 / nvclk_freq;/* nvclk latency in us */ us_p = pclks*1000*1000 / pclk_freq;/* nvclk latency in us */ us_pipe = us_m + us_n + us_p; us_pipe_min = us_m_min + us_n + us_p; us_extra = 0; vus_m = mclk_loop *1000*1000 / mclk_freq; /* Mclk latency in us */ vus_n = (4)*1000*1000 / nvclk_freq;/* nvclk latency in us */ vus_p = 0*1000*1000 / pclk_freq;/* pclk latency in us */ vus_pipe = vus_m + vus_n + vus_p; if(video_enable) { video_drain_rate = pclk_freq * 4; /* MB/s */ crtc_drain_rate = pclk_freq * bpp/8; /* MB/s */ vpagemiss = 1; /* self generating page miss */ vpagemiss += 1; /* One higher priority before */ crtpagemiss = 2; /* self generating page miss */ if(mp_enable) crtpagemiss += 1; /* if MA0 conflict */ vpm_us = (vpagemiss * pagemiss)*1000*1000/mclk_freq; us_video = vpm_us + vus_m; /* Video has separate read return path */ cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = us_video /* Wait for video */ +cpm_us /* CRT Page miss */ +us_m + us_n +us_p /* other latency */ ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; /* fixed point <= float_point - 1. Fixes that */ } else { crtc_drain_rate = pclk_freq * bpp/8; /* bpp * pclk/8 */ crtpagemiss = 1; /* self generating page miss */ crtpagemiss += 1; /* MA0 page miss */ if(mp_enable) crtpagemiss += 1; /* if MA0 conflict */ cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = cpm_us + us_m + us_n + us_p ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; /* fixed point <= float_point - 1. Fixes that */ /* // // Another concern, only for high pclks so don't do this // with video: // What happens if the latency to fetch the cbs is so large that // fifo empties. In that case we need to have an alternate clwm value // based off the total burst fetch // us_crt = (cbs * 1000 * 1000)/ (8*width)/mclk_freq ; us_crt = us_crt + us_m + us_n + us_p + (4 * 1000 * 1000)/mclk_freq; clwm_mt = us_crt * crtc_drain_rate/(1000*1000); clwm_mt ++; if(clwm_mt > clwm) clwm = clwm_mt; */ /* Finally, a heuristic check when width == 64 bits */ if(width == 1){ nvclk_fill = nvclk_freq * 8; if(crtc_drain_rate * 100 >= nvclk_fill * 102) clwm = 0xfff; /*Large number to fail */ else if(crtc_drain_rate * 100 >= nvclk_fill * 98) { clwm = 1024; cbs = 512; us_extra = (cbs * 1000 * 1000)/ (8*width)/mclk_freq ; } } } /* Overfill check: */ clwm_rnd_down = ((int)clwm/8)*8; if (clwm_rnd_down < clwm) clwm += 8; m1 = clwm + cbs - 1024; /* Amount of overfill */ m2us = us_pipe_min + us_min_mclk_extra; pclks_2_top_fifo = (1024-clwm)/(8*width); /* pclk cycles to drain */ p1clk = m2us * pclk_freq/(1000*1000); p2 = p1clk * bpp / 8; /* bytes drained. */ if((p2 < m1) && (m1 > 0)) { fifo->valid = 0; found = 0; if(min_mclk_extra == 0) { if(cbs <= 32) { found = 1; /* Can't adjust anymore! */ } else { cbs = cbs/2; /* reduce the burst size */ } } else { min_mclk_extra--; } } else { if (clwm > 1023){ /* Have some margin */ fifo->valid = 0; found = 0; if(min_mclk_extra == 0) found = 1; /* Can't adjust anymore! */ else min_mclk_extra--; } } craw = clwm; if(clwm < (1024-cbs+8)) clwm = 1024-cbs+8; data = (int)(clwm); /* printf("CRT LWM: %f bytes, prog: 0x%x, bs: 256\n", clwm, data ); */ fifo->graphics_lwm = data; fifo->graphics_burst_size = cbs; /* printf("VID LWM: %f bytes, prog: 0x%x, bs: %d\n, ", vlwm, data, vbs ); */ fifo->video_lwm = 1024; fifo->video_burst_size = 512; } } static void nv10UpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv10_fifo_info fifo_data; nv10_sim_state sim_data; unsigned int M, N, P, pll, MClk, NVClk, cfg1; pll = NV_RD32(&chip->PRAMDAC0[0x00000504/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; MClk = (N * chip->CrystalFreqKHz / M) >> P; pll = NV_RD32(&chip->PRAMDAC0[0x00000500/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; NVClk = (N * chip->CrystalFreqKHz / M) >> P; cfg1 = NV_RD32(&chip->PFB[0x00000204/4], 0); sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; sim_data.memory_type = (NV_RD32(&chip->PFB[0x00000200/4], 0) & 0x01) ? 1 : 0; sim_data.memory_width = (NV_RD32(&chip->PEXTDEV[0x00000000/4], 0) & 0x10) ? 128 : 64; sim_data.mem_latency = (char)cfg1 & 0x0F; sim_data.mem_aligned = 1; sim_data.mem_page_miss = (char)(((cfg1 >> 4) &0x0F) + ((cfg1 >> 31) & 0x01)); sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; sim_data.nvclk_khz = NVClk; nv10CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } } static void nForceUpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv10_fifo_info fifo_data; nv10_sim_state sim_data; unsigned int M, N, P, pll, MClk, NVClk; unsigned int uMClkPostDiv; struct pci_dev *dev; dev = pci_get_bus_and_slot(0, 3); pci_read_config_dword(dev, 0x6C, &uMClkPostDiv); pci_dev_put(dev); uMClkPostDiv = (uMClkPostDiv >> 8) & 0xf; if(!uMClkPostDiv) uMClkPostDiv = 4; MClk = 400000 / uMClkPostDiv; pll = NV_RD32(&chip->PRAMDAC0[0x00000500/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; NVClk = (N * chip->CrystalFreqKHz / M) >> P; sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; dev = pci_get_bus_and_slot(0, 1); pci_read_config_dword(dev, 0x7C, &sim_data.memory_type); pci_dev_put(dev); sim_data.memory_type = (sim_data.memory_type >> 12) & 1; sim_data.memory_width = 64; sim_data.mem_latency = 3; sim_data.mem_aligned = 1; sim_data.mem_page_miss = 10; sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; sim_data.nvclk_khz = NVClk; nv10CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } } /****************************************************************************\ * * * RIVA Mode State Routines * * * \****************************************************************************/ /* * Calculate the Video Clock parameters for the PLL. */ static int CalcVClock ( int clockIn, int *clockOut, int *mOut, int *nOut, int *pOut, RIVA_HW_INST *chip ) { unsigned lowM, highM, highP; unsigned DeltaNew, DeltaOld; unsigned VClk, Freq; unsigned M, N, P; DeltaOld = 0xFFFFFFFF; VClk = (unsigned)clockIn; if (chip->CrystalFreqKHz == 13500) { lowM = 7; highM = 13 - (chip->Architecture == NV_ARCH_03); } else { lowM = 8; highM = 14 - (chip->Architecture == NV_ARCH_03); } highP = 4 - (chip->Architecture == NV_ARCH_03); for (P = 0; P <= highP; P ++) { Freq = VClk << P; if ((Freq >= 128000) && (Freq <= chip->MaxVClockFreqKHz)) { for (M = lowM; M <= highM; M++) { N = (VClk << P) * M / chip->CrystalFreqKHz; if(N <= 255) { Freq = (chip->CrystalFreqKHz * N / M) >> P; if (Freq > VClk) DeltaNew = Freq - VClk; else DeltaNew = VClk - Freq; if (DeltaNew < DeltaOld) { *mOut = M; *nOut = N; *pOut = P; *clockOut = Freq; DeltaOld = DeltaNew; } } } } } /* non-zero: M/N/P/clock values assigned. zero: error (not set) */ return (DeltaOld != 0xFFFFFFFF); } /* * Calculate extended mode parameters (SVGA) and save in a * mode state structure. */ int CalcStateExt ( RIVA_HW_INST *chip, RIVA_HW_STATE *state, int bpp, int width, int hDisplaySize, int height, int dotClock ) { int pixelDepth; int uninitialized_var(VClk),uninitialized_var(m), uninitialized_var(n), uninitialized_var(p); /* * Save mode parameters. */ state->bpp = bpp; /* this is not bitsPerPixel, it's 8,15,16,32 */ state->width = width; state->height = height; /* * Extended RIVA registers. */ pixelDepth = (bpp + 1)/8; if (!CalcVClock(dotClock, &VClk, &m, &n, &p, chip)) return -EINVAL; switch (chip->Architecture) { case NV_ARCH_03: nv3UpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); state->cursor0 = 0x00; state->cursor1 = 0x78; state->cursor2 = 0x00000000; state->pllsel = 0x10010100; state->config = ((width + 31)/32) | (((pixelDepth > 2) ? 3 : pixelDepth) << 8) | 0x1000; state->general = 0x00100100; state->repaint1 = hDisplaySize < 1280 ? 0x06 : 0x02; break; case NV_ARCH_04: nv4UpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); state->cursor0 = 0x00; state->cursor1 = 0xFC; state->cursor2 = 0x00000000; state->pllsel = 0x10000700; state->config = 0x00001114; state->general = bpp == 16 ? 0x00101100 : 0x00100100; state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00; break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: if((chip->Chipset == NV_CHIP_IGEFORCE2) || (chip->Chipset == NV_CHIP_0x01F0)) { nForceUpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); } else { nv10UpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); } state->cursor0 = 0x80 | (chip->CursorStart >> 17); state->cursor1 = (chip->CursorStart >> 11) << 2; state->cursor2 = chip->CursorStart >> 24; state->pllsel = 0x10000700; state->config = NV_RD32(&chip->PFB[0x00000200/4], 0); state->general = bpp == 16 ? 0x00101100 : 0x00100100; state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00; break; } /* Paul Richards: below if block borks things in kernel for some reason */ /* Tony: Below is needed to set hardware in DirectColor */ if((bpp != 8) && (chip->Architecture != NV_ARCH_03)) state->general |= 0x00000030; state->vpll = (p << 16) | (n << 8) | m; state->repaint0 = (((width/8)*pixelDepth) & 0x700) >> 3; state->pixel = pixelDepth > 2 ? 3 : pixelDepth; state->offset0 = state->offset1 = state->offset2 = state->offset3 = 0; state->pitch0 = state->pitch1 = state->pitch2 = state->pitch3 = pixelDepth * width; return 0; } /* * Load fixed function state and pre-calculated/stored state. */ #if 0 #define LOAD_FIXED_STATE(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \ chip->dev[tbl##Table##dev[i][0]] = tbl##Table##dev[i][1] #define LOAD_FIXED_STATE_8BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++) \ chip->dev[tbl##Table##dev##_8BPP[i][0]] = tbl##Table##dev##_8BPP[i][1] #define LOAD_FIXED_STATE_15BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++) \ chip->dev[tbl##Table##dev##_15BPP[i][0]] = tbl##Table##dev##_15BPP[i][1] #define LOAD_FIXED_STATE_16BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_16BPP)/8; i++) \ chip->dev[tbl##Table##dev##_16BPP[i][0]] = tbl##Table##dev##_16BPP[i][1] #define LOAD_FIXED_STATE_32BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_32BPP)/8; i++) \ chip->dev[tbl##Table##dev##_32BPP[i][0]] = tbl##Table##dev##_32BPP[i][1] #endif #define LOAD_FIXED_STATE(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev[i][0]], 0, tbl##Table##dev[i][1]) #define LOAD_FIXED_STATE_8BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_8BPP[i][0]], 0, tbl##Table##dev##_8BPP[i][1]) #define LOAD_FIXED_STATE_15BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_15BPP[i][0]], 0, tbl##Table##dev##_15BPP[i][1]) #define LOAD_FIXED_STATE_16BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_16BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_16BPP[i][0]], 0, tbl##Table##dev##_16BPP[i][1]) #define LOAD_FIXED_STATE_32BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_32BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_32BPP[i][0]], 0, tbl##Table##dev##_32BPP[i][1]) static void UpdateFifoState ( RIVA_HW_INST *chip ) { int i; switch (chip->Architecture) { case NV_ARCH_04: LOAD_FIXED_STATE(nv4,FIFO); chip->Tri03 = NULL; chip->Tri05 = (RivaTexturedTriangle05 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: /* * Initialize state for the RivaTriangle3D05 routines. */ LOAD_FIXED_STATE(nv10tri05,PGRAPH); LOAD_FIXED_STATE(nv10,FIFO); chip->Tri03 = NULL; chip->Tri05 = (RivaTexturedTriangle05 __iomem *)&(chip->FIFO[0x0000E000/4]); break; } } static void LoadStateExt ( RIVA_HW_INST *chip, RIVA_HW_STATE *state ) { int i; /* * Load HW fixed function state. */ LOAD_FIXED_STATE(Riva,PMC); LOAD_FIXED_STATE(Riva,PTIMER); switch (chip->Architecture) { case NV_ARCH_03: /* * Make sure frame buffer config gets set before loading PRAMIN. */ NV_WR32(chip->PFB, 0x00000200, state->config); LOAD_FIXED_STATE(nv3,PFIFO); LOAD_FIXED_STATE(nv3,PRAMIN); LOAD_FIXED_STATE(nv3,PGRAPH); switch (state->bpp) { case 15: case 16: LOAD_FIXED_STATE_15BPP(nv3,PRAMIN); LOAD_FIXED_STATE_15BPP(nv3,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 24: case 32: LOAD_FIXED_STATE_32BPP(nv3,PRAMIN); LOAD_FIXED_STATE_32BPP(nv3,PGRAPH); chip->Tri03 = NULL; break; case 8: default: LOAD_FIXED_STATE_8BPP(nv3,PRAMIN); LOAD_FIXED_STATE_8BPP(nv3,PGRAPH); chip->Tri03 = NULL; break; } for (i = 0x00000; i < 0x00800; i++) NV_WR32(&chip->PRAMIN[0x00000502 + i], 0, (i << 12) | 0x03); NV_WR32(chip->PGRAPH, 0x00000630, state->offset0); NV_WR32(chip->PGRAPH, 0x00000634, state->offset1); NV_WR32(chip->PGRAPH, 0x00000638, state->offset2); NV_WR32(chip->PGRAPH, 0x0000063C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000650, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000654, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000658, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000065C, state->pitch3); break; case NV_ARCH_04: /* * Make sure frame buffer config gets set before loading PRAMIN. */ NV_WR32(chip->PFB, 0x00000200, state->config); LOAD_FIXED_STATE(nv4,PFIFO); LOAD_FIXED_STATE(nv4,PRAMIN); LOAD_FIXED_STATE(nv4,PGRAPH); switch (state->bpp) { case 15: LOAD_FIXED_STATE_15BPP(nv4,PRAMIN); LOAD_FIXED_STATE_15BPP(nv4,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 16: LOAD_FIXED_STATE_16BPP(nv4,PRAMIN); LOAD_FIXED_STATE_16BPP(nv4,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 24: case 32: LOAD_FIXED_STATE_32BPP(nv4,PRAMIN); LOAD_FIXED_STATE_32BPP(nv4,PGRAPH); chip->Tri03 = NULL; break; case 8: default: LOAD_FIXED_STATE_8BPP(nv4,PRAMIN); LOAD_FIXED_STATE_8BPP(nv4,PGRAPH); chip->Tri03 = NULL; break; } NV_WR32(chip->PGRAPH, 0x00000640, state->offset0); NV_WR32(chip->PGRAPH, 0x00000644, state->offset1); NV_WR32(chip->PGRAPH, 0x00000648, state->offset2); NV_WR32(chip->PGRAPH, 0x0000064C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000670, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000674, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000678, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000067C, state->pitch3); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: if(chip->twoHeads) { VGA_WR08(chip->PCIO, 0x03D4, 0x44); VGA_WR08(chip->PCIO, 0x03D5, state->crtcOwner); chip->LockUnlock(chip, 0); } LOAD_FIXED_STATE(nv10,PFIFO); LOAD_FIXED_STATE(nv10,PRAMIN); LOAD_FIXED_STATE(nv10,PGRAPH); switch (state->bpp) { case 15: LOAD_FIXED_STATE_15BPP(nv10,PRAMIN); LOAD_FIXED_STATE_15BPP(nv10,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 16: LOAD_FIXED_STATE_16BPP(nv10,PRAMIN); LOAD_FIXED_STATE_16BPP(nv10,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 24: case 32: LOAD_FIXED_STATE_32BPP(nv10,PRAMIN); LOAD_FIXED_STATE_32BPP(nv10,PGRAPH); chip->Tri03 = NULL; break; case 8: default: LOAD_FIXED_STATE_8BPP(nv10,PRAMIN); LOAD_FIXED_STATE_8BPP(nv10,PGRAPH); chip->Tri03 = NULL; break; } if(chip->Architecture == NV_ARCH_10) { NV_WR32(chip->PGRAPH, 0x00000640, state->offset0); NV_WR32(chip->PGRAPH, 0x00000644, state->offset1); NV_WR32(chip->PGRAPH, 0x00000648, state->offset2); NV_WR32(chip->PGRAPH, 0x0000064C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000670, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000674, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000678, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000067C, state->pitch3); NV_WR32(chip->PGRAPH, 0x00000680, state->pitch3); } else { NV_WR32(chip->PGRAPH, 0x00000820, state->offset0); NV_WR32(chip->PGRAPH, 0x00000824, state->offset1); NV_WR32(chip->PGRAPH, 0x00000828, state->offset2); NV_WR32(chip->PGRAPH, 0x0000082C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000850, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000854, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000858, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000085C, state->pitch3); NV_WR32(chip->PGRAPH, 0x00000860, state->pitch3); NV_WR32(chip->PGRAPH, 0x00000864, state->pitch3); NV_WR32(chip->PGRAPH, 0x000009A4, NV_RD32(chip->PFB, 0x00000200)); NV_WR32(chip->PGRAPH, 0x000009A8, NV_RD32(chip->PFB, 0x00000204)); } if(chip->twoHeads) { NV_WR32(chip->PCRTC0, 0x00000860, state->head); NV_WR32(chip->PCRTC0, 0x00002860, state->head2); } NV_WR32(chip->PRAMDAC, 0x00000404, NV_RD32(chip->PRAMDAC, 0x00000404) | (1 << 25)); NV_WR32(chip->PMC, 0x00008704, 1); NV_WR32(chip->PMC, 0x00008140, 0); NV_WR32(chip->PMC, 0x00008920, 0); NV_WR32(chip->PMC, 0x00008924, 0); NV_WR32(chip->PMC, 0x00008908, 0x01ffffff); NV_WR32(chip->PMC, 0x0000890C, 0x01ffffff); NV_WR32(chip->PMC, 0x00001588, 0); NV_WR32(chip->PFB, 0x00000240, 0); NV_WR32(chip->PFB, 0x00000250, 0); NV_WR32(chip->PFB, 0x00000260, 0); NV_WR32(chip->PFB, 0x00000270, 0); NV_WR32(chip->PFB, 0x00000280, 0); NV_WR32(chip->PFB, 0x00000290, 0); NV_WR32(chip->PFB, 0x000002A0, 0); NV_WR32(chip->PFB, 0x000002B0, 0); NV_WR32(chip->PGRAPH, 0x00000B00, NV_RD32(chip->PFB, 0x00000240)); NV_WR32(chip->PGRAPH, 0x00000B04, NV_RD32(chip->PFB, 0x00000244)); NV_WR32(chip->PGRAPH, 0x00000B08, NV_RD32(chip->PFB, 0x00000248)); NV_WR32(chip->PGRAPH, 0x00000B0C, NV_RD32(chip->PFB, 0x0000024C)); NV_WR32(chip->PGRAPH, 0x00000B10, NV_RD32(chip->PFB, 0x00000250)); NV_WR32(chip->PGRAPH, 0x00000B14, NV_RD32(chip->PFB, 0x00000254)); NV_WR32(chip->PGRAPH, 0x00000B18, NV_RD32(chip->PFB, 0x00000258)); NV_WR32(chip->PGRAPH, 0x00000B1C, NV_RD32(chip->PFB, 0x0000025C)); NV_WR32(chip->PGRAPH, 0x00000B20, NV_RD32(chip->PFB, 0x00000260)); NV_WR32(chip->PGRAPH, 0x00000B24, NV_RD32(chip->PFB, 0x00000264)); NV_WR32(chip->PGRAPH, 0x00000B28, NV_RD32(chip->PFB, 0x00000268)); NV_WR32(chip->PGRAPH, 0x00000B2C, NV_RD32(chip->PFB, 0x0000026C)); NV_WR32(chip->PGRAPH, 0x00000B30, NV_RD32(chip->PFB, 0x00000270)); NV_WR32(chip->PGRAPH, 0x00000B34, NV_RD32(chip->PFB, 0x00000274)); NV_WR32(chip->PGRAPH, 0x00000B38, NV_RD32(chip->PFB, 0x00000278)); NV_WR32(chip->PGRAPH, 0x00000B3C, NV_RD32(chip->PFB, 0x0000027C)); NV_WR32(chip->PGRAPH, 0x00000B40, NV_RD32(chip->PFB, 0x00000280)); NV_WR32(chip->PGRAPH, 0x00000B44, NV_RD32(chip->PFB, 0x00000284)); NV_WR32(chip->PGRAPH, 0x00000B48, NV_RD32(chip->PFB, 0x00000288)); NV_WR32(chip->PGRAPH, 0x00000B4C, NV_RD32(chip->PFB, 0x0000028C)); NV_WR32(chip->PGRAPH, 0x00000B50, NV_RD32(chip->PFB, 0x00000290)); NV_WR32(chip->PGRAPH, 0x00000B54, NV_RD32(chip->PFB, 0x00000294)); NV_WR32(chip->PGRAPH, 0x00000B58, NV_RD32(chip->PFB, 0x00000298)); NV_WR32(chip->PGRAPH, 0x00000B5C, NV_RD32(chip->PFB, 0x0000029C)); NV_WR32(chip->PGRAPH, 0x00000B60, NV_RD32(chip->PFB, 0x000002A0)); NV_WR32(chip->PGRAPH, 0x00000B64, NV_RD32(chip->PFB, 0x000002A4)); NV_WR32(chip->PGRAPH, 0x00000B68, NV_RD32(chip->PFB, 0x000002A8)); NV_WR32(chip->PGRAPH, 0x00000B6C, NV_RD32(chip->PFB, 0x000002AC)); NV_WR32(chip->PGRAPH, 0x00000B70, NV_RD32(chip->PFB, 0x000002B0)); NV_WR32(chip->PGRAPH, 0x00000B74, NV_RD32(chip->PFB, 0x000002B4)); NV_WR32(chip->PGRAPH, 0x00000B78, NV_RD32(chip->PFB, 0x000002B8)); NV_WR32(chip->PGRAPH, 0x00000B7C, NV_RD32(chip->PFB, 0x000002BC)); NV_WR32(chip->PGRAPH, 0x00000F40, 0x10000000); NV_WR32(chip->PGRAPH, 0x00000F44, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000040); NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000008); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000200); for (i = 0; i < (3*16); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000040); NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000800); for (i = 0; i < (16*16); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F40, 0x30000000); NV_WR32(chip->PGRAPH, 0x00000F44, 0x00000004); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00006400); for (i = 0; i < (59*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00006800); for (i = 0; i < (47*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00006C00); for (i = 0; i < (3*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00007000); for (i = 0; i < (19*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00007400); for (i = 0; i < (12*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00007800); for (i = 0; i < (12*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00004400); for (i = 0; i < (8*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000000); for (i = 0; i < 16; i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000040); for (i = 0; i < 4; i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PCRTC, 0x00000810, state->cursorConfig); if(chip->flatPanel) { if((chip->Chipset & 0x0ff0) == 0x0110) { NV_WR32(chip->PRAMDAC, 0x0528, state->dither); } else if((chip->Chipset & 0x0ff0) >= 0x0170) { NV_WR32(chip->PRAMDAC, 0x083C, state->dither); } VGA_WR08(chip->PCIO, 0x03D4, 0x53); VGA_WR08(chip->PCIO, 0x03D5, 0); VGA_WR08(chip->PCIO, 0x03D4, 0x54); VGA_WR08(chip->PCIO, 0x03D5, 0); VGA_WR08(chip->PCIO, 0x03D4, 0x21); VGA_WR08(chip->PCIO, 0x03D5, 0xfa); } VGA_WR08(chip->PCIO, 0x03D4, 0x41); VGA_WR08(chip->PCIO, 0x03D5, state->extra); } LOAD_FIXED_STATE(Riva,FIFO); UpdateFifoState(chip); /* * Load HW mode state. */ VGA_WR08(chip->PCIO, 0x03D4, 0x19); VGA_WR08(chip->PCIO, 0x03D5, state->repaint0); VGA_WR08(chip->PCIO, 0x03D4, 0x1A); VGA_WR08(chip->PCIO, 0x03D5, state->repaint1); VGA_WR08(chip->PCIO, 0x03D4, 0x25); VGA_WR08(chip->PCIO, 0x03D5, state->screen); VGA_WR08(chip->PCIO, 0x03D4, 0x28); VGA_WR08(chip->PCIO, 0x03D5, state->pixel); VGA_WR08(chip->PCIO, 0x03D4, 0x2D); VGA_WR08(chip->PCIO, 0x03D5, state->horiz); VGA_WR08(chip->PCIO, 0x03D4, 0x1B); VGA_WR08(chip->PCIO, 0x03D5, state->arbitration0); VGA_WR08(chip->PCIO, 0x03D4, 0x20); VGA_WR08(chip->PCIO, 0x03D5, state->arbitration1); VGA_WR08(chip->PCIO, 0x03D4, 0x30); VGA_WR08(chip->PCIO, 0x03D5, state->cursor0); VGA_WR08(chip->PCIO, 0x03D4, 0x31); VGA_WR08(chip->PCIO, 0x03D5, state->cursor1); VGA_WR08(chip->PCIO, 0x03D4, 0x2F); VGA_WR08(chip->PCIO, 0x03D5, state->cursor2); VGA_WR08(chip->PCIO, 0x03D4, 0x39); VGA_WR08(chip->PCIO, 0x03D5, state->interlace); if(!chip->flatPanel) { NV_WR32(chip->PRAMDAC0, 0x00000508, state->vpll); NV_WR32(chip->PRAMDAC0, 0x0000050C, state->pllsel); if(chip->twoHeads) NV_WR32(chip->PRAMDAC0, 0x00000520, state->vpll2); } else { NV_WR32(chip->PRAMDAC, 0x00000848 , state->scale); } NV_WR32(chip->PRAMDAC, 0x00000600 , state->general); /* * Turn off VBlank enable and reset. */ NV_WR32(chip->PCRTC, 0x00000140, 0); NV_WR32(chip->PCRTC, 0x00000100, chip->VBlankBit); /* * Set interrupt enable. */ NV_WR32(chip->PMC, 0x00000140, chip->EnableIRQ & 0x01); /* * Set current state pointer. */ chip->CurrentState = state; /* * Reset FIFO free and empty counts. */ chip->FifoFreeCount = 0; /* Free count from first subchannel */ chip->FifoEmptyCount = NV_RD32(&chip->Rop->FifoFree, 0); } static void UnloadStateExt ( RIVA_HW_INST *chip, RIVA_HW_STATE *state ) { /* * Save current HW state. */ VGA_WR08(chip->PCIO, 0x03D4, 0x19); state->repaint0 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x1A); state->repaint1 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x25); state->screen = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x28); state->pixel = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x2D); state->horiz = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x1B); state->arbitration0 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x20); state->arbitration1 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x30); state->cursor0 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x31); state->cursor1 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x2F); state->cursor2 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x39); state->interlace = VGA_RD08(chip->PCIO, 0x03D5); state->vpll = NV_RD32(chip->PRAMDAC0, 0x00000508); state->vpll2 = NV_RD32(chip->PRAMDAC0, 0x00000520); state->pllsel = NV_RD32(chip->PRAMDAC0, 0x0000050C); state->general = NV_RD32(chip->PRAMDAC, 0x00000600); state->scale = NV_RD32(chip->PRAMDAC, 0x00000848); state->config = NV_RD32(chip->PFB, 0x00000200); switch (chip->Architecture) { case NV_ARCH_03: state->offset0 = NV_RD32(chip->PGRAPH, 0x00000630); state->offset1 = NV_RD32(chip->PGRAPH, 0x00000634); state->offset2 = NV_RD32(chip->PGRAPH, 0x00000638); state->offset3 = NV_RD32(chip->PGRAPH, 0x0000063C); state->pitch0 = NV_RD32(chip->PGRAPH, 0x00000650); state->pitch1 = NV_RD32(chip->PGRAPH, 0x00000654); state->pitch2 = NV_RD32(chip->PGRAPH, 0x00000658); state->pitch3 = NV_RD32(chip->PGRAPH, 0x0000065C); break; case NV_ARCH_04: state->offset0 = NV_RD32(chip->PGRAPH, 0x00000640); state->offset1 = NV_RD32(chip->PGRAPH, 0x00000644); state->offset2 = NV_RD32(chip->PGRAPH, 0x00000648); state->offset3 = NV_RD32(chip->PGRAPH, 0x0000064C); state->pitch0 = NV_RD32(chip->PGRAPH, 0x00000670); state->pitch1 = NV_RD32(chip->PGRAPH, 0x00000674); state->pitch2 = NV_RD32(chip->PGRAPH, 0x00000678); state->pitch3 = NV_RD32(chip->PGRAPH, 0x0000067C); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: state->offset0 = NV_RD32(chip->PGRAPH, 0x00000640); state->offset1 = NV_RD32(chip->PGRAPH, 0x00000644); state->offset2 = NV_RD32(chip->PGRAPH, 0x00000648); state->offset3 = NV_RD32(chip->PGRAPH, 0x0000064C); state->pitch0 = NV_RD32(chip->PGRAPH, 0x00000670); state->pitch1 = NV_RD32(chip->PGRAPH, 0x00000674); state->pitch2 = NV_RD32(chip->PGRAPH, 0x00000678); state->pitch3 = NV_RD32(chip->PGRAPH, 0x0000067C); if(chip->twoHeads) { state->head = NV_RD32(chip->PCRTC0, 0x00000860); state->head2 = NV_RD32(chip->PCRTC0, 0x00002860); VGA_WR08(chip->PCIO, 0x03D4, 0x44); state->crtcOwner = VGA_RD08(chip->PCIO, 0x03D5); } VGA_WR08(chip->PCIO, 0x03D4, 0x41); state->extra = VGA_RD08(chip->PCIO, 0x03D5); state->cursorConfig = NV_RD32(chip->PCRTC, 0x00000810); if((chip->Chipset & 0x0ff0) == 0x0110) { state->dither = NV_RD32(chip->PRAMDAC, 0x0528); } else if((chip->Chipset & 0x0ff0) >= 0x0170) { state->dither = NV_RD32(chip->PRAMDAC, 0x083C); } break; } } static void SetStartAddress ( RIVA_HW_INST *chip, unsigned start ) { NV_WR32(chip->PCRTC, 0x800, start); } static void SetStartAddress3 ( RIVA_HW_INST *chip, unsigned start ) { int offset = start >> 2; int pan = (start & 3) << 1; unsigned char tmp; /* * Unlock extended registers. */ chip->LockUnlock(chip, 0); /* * Set start address. */ VGA_WR08(chip->PCIO, 0x3D4, 0x0D); VGA_WR08(chip->PCIO, 0x3D5, offset); offset >>= 8; VGA_WR08(chip->PCIO, 0x3D4, 0x0C); VGA_WR08(chip->PCIO, 0x3D5, offset); offset >>= 8; VGA_WR08(chip->PCIO, 0x3D4, 0x19); tmp = VGA_RD08(chip->PCIO, 0x3D5); VGA_WR08(chip->PCIO, 0x3D5, (offset & 0x01F) | (tmp & ~0x1F)); VGA_WR08(chip->PCIO, 0x3D4, 0x2D); tmp = VGA_RD08(chip->PCIO, 0x3D5); VGA_WR08(chip->PCIO, 0x3D5, (offset & 0x60) | (tmp & ~0x60)); /* * 4 pixel pan register. */ offset = VGA_RD08(chip->PCIO, chip->IO + 0x0A); VGA_WR08(chip->PCIO, 0x3C0, 0x13); VGA_WR08(chip->PCIO, 0x3C0, pan); } static void nv3SetSurfaces2D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); RIVA_FIFO_FREE(*chip,Tri03,5); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000003); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000004); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000013); } static void nv4SetSurfaces2D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000003); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000004); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } static void nv10SetSurfaces2D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000003); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000004); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } static void nv3SetSurfaces3D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); RIVA_FIFO_FREE(*chip,Tri03,5); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000005); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000006); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000013); } static void nv4SetSurfaces3D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000005); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000006); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } static void nv10SetSurfaces3D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface3D __iomem *Surfaces3D = (RivaSurface3D __iomem *)&(chip->FIFO[0x0000E000/4]); RIVA_FIFO_FREE(*chip,Tri03,4); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000007); NV_WR32(&Surfaces3D->RenderBufferOffset, 0, surf0); NV_WR32(&Surfaces3D->ZBufferOffset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } /****************************************************************************\ * * * Probe RIVA Chip Configuration * * * \****************************************************************************/ static void nv3GetConfig ( RIVA_HW_INST *chip ) { /* * Fill in chip configuration. */ if (NV_RD32(&chip->PFB[0x00000000/4], 0) & 0x00000020) { if (((NV_RD32(chip->PMC, 0x00000000) & 0xF0) == 0x20) && ((NV_RD32(chip->PMC, 0x00000000) & 0x0F) >= 0x02)) { /* * SDRAM 128 ZX. */ chip->RamBandwidthKBytesPerSec = 800000; switch (NV_RD32(chip->PFB, 0x00000000) & 0x03) { case 2: chip->RamAmountKBytes = 1024 * 4; break; case 1: chip->RamAmountKBytes = 1024 * 2; break; default: chip->RamAmountKBytes = 1024 * 8; break; } } else { chip->RamBandwidthKBytesPerSec = 1000000; chip->RamAmountKBytes = 1024 * 8; } } else { /* * SGRAM 128. */ chip->RamBandwidthKBytesPerSec = 1000000; switch (NV_RD32(chip->PFB, 0x00000000) & 0x00000003) { case 0: chip->RamAmountKBytes = 1024 * 8; break; case 2: chip->RamAmountKBytes = 1024 * 4; break; default: chip->RamAmountKBytes = 1024 * 2; break; } } chip->CrystalFreqKHz = (NV_RD32(chip->PEXTDEV, 0x00000000) & 0x00000040) ? 14318 : 13500; chip->CURSOR = &(chip->PRAMIN[0x00008000/4 - 0x0800/4]); chip->VBlankBit = 0x00000100; chip->MaxVClockFreqKHz = 256000; /* * Set chip functions. */ chip->Busy = nv3Busy; chip->ShowHideCursor = ShowHideCursor; chip->LoadStateExt = LoadStateExt; chip->UnloadStateExt = UnloadStateExt; chip->SetStartAddress = SetStartAddress3; chip->SetSurfaces2D = nv3SetSurfaces2D; chip->SetSurfaces3D = nv3SetSurfaces3D; chip->LockUnlock = nv3LockUnlock; } static void nv4GetConfig ( RIVA_HW_INST *chip ) { /* * Fill in chip configuration. */ if (NV_RD32(chip->PFB, 0x00000000) & 0x00000100) { chip->RamAmountKBytes = ((NV_RD32(chip->PFB, 0x00000000) >> 12) & 0x0F) * 1024 * 2 + 1024 * 2; } else { switch (NV_RD32(chip->PFB, 0x00000000) & 0x00000003) { case 0: chip->RamAmountKBytes = 1024 * 32; break; case 1: chip->RamAmountKBytes = 1024 * 4; break; case 2: chip->RamAmountKBytes = 1024 * 8; break; case 3: default: chip->RamAmountKBytes = 1024 * 16; break; } } switch ((NV_RD32(chip->PFB, 0x00000000) >> 3) & 0x00000003) { case 3: chip->RamBandwidthKBytesPerSec = 800000; break; default: chip->RamBandwidthKBytesPerSec = 1000000; break; } chip->CrystalFreqKHz = (NV_RD32(chip->PEXTDEV, 0x00000000) & 0x00000040) ? 14318 : 13500; chip->CURSOR = &(chip->PRAMIN[0x00010000/4 - 0x0800/4]); chip->VBlankBit = 0x00000001; chip->MaxVClockFreqKHz = 350000; /* * Set chip functions. */ chip->Busy = nv4Busy; chip->ShowHideCursor = ShowHideCursor; chip->LoadStateExt = LoadStateExt; chip->UnloadStateExt = UnloadStateExt; chip->SetStartAddress = SetStartAddress; chip->SetSurfaces2D = nv4SetSurfaces2D; chip->SetSurfaces3D = nv4SetSurfaces3D; chip->LockUnlock = nv4LockUnlock; } static void nv10GetConfig ( RIVA_HW_INST *chip, unsigned int chipset ) { struct pci_dev* dev; u32 amt; #ifdef __BIG_ENDIAN /* turn on big endian register access */ if(!(NV_RD32(chip->PMC, 0x00000004) & 0x01000001)) NV_WR32(chip->PMC, 0x00000004, 0x01000001); #endif /* * Fill in chip configuration. */ if(chipset == NV_CHIP_IGEFORCE2) { dev = pci_get_bus_and_slot(0, 1); pci_read_config_dword(dev, 0x7C, &amt); pci_dev_put(dev); chip->RamAmountKBytes = (((amt >> 6) & 31) + 1) * 1024; } else if(chipset == NV_CHIP_0x01F0) { dev = pci_get_bus_and_slot(0, 1); pci_read_config_dword(dev, 0x84, &amt); pci_dev_put(dev); chip->RamAmountKBytes = (((amt >> 4) & 127) + 1) * 1024; } else { switch ((NV_RD32(chip->PFB, 0x0000020C) >> 20) & 0x000000FF) { case 0x02: chip->RamAmountKBytes = 1024 * 2; break; case 0x04: chip->RamAmountKBytes = 1024 * 4; break; case 0x08: chip->RamAmountKBytes = 1024 * 8; break; case 0x10: chip->RamAmountKBytes = 1024 * 16; break; case 0x20: chip->RamAmountKBytes = 1024 * 32; break; case 0x40: chip->RamAmountKBytes = 1024 * 64; break; case 0x80: chip->RamAmountKBytes = 1024 * 128; break; default: chip->RamAmountKBytes = 1024 * 16; break; } } switch ((NV_RD32(chip->PFB, 0x00000000) >> 3) & 0x00000003) { case 3: chip->RamBandwidthKBytesPerSec = 800000; break; default: chip->RamBandwidthKBytesPerSec = 1000000; break; } chip->CrystalFreqKHz = (NV_RD32(chip->PEXTDEV, 0x0000) & (1 << 6)) ? 14318 : 13500; switch (chipset & 0x0ff0) { case 0x0170: case 0x0180: case 0x01F0: case 0x0250: case 0x0280: case 0x0300: case 0x0310: case 0x0320: case 0x0330: case 0x0340: if(NV_RD32(chip->PEXTDEV, 0x0000) & (1 << 22)) chip->CrystalFreqKHz = 27000; break; default: break; } chip->CursorStart = (chip->RamAmountKBytes - 128) * 1024; chip->CURSOR = NULL; /* can't set this here */ chip->VBlankBit = 0x00000001; chip->MaxVClockFreqKHz = 350000; /* * Set chip functions. */ chip->Busy = nv10Busy; chip->ShowHideCursor = ShowHideCursor; chip->LoadStateExt = LoadStateExt; chip->UnloadStateExt = UnloadStateExt; chip->SetStartAddress = SetStartAddress; chip->SetSurfaces2D = nv10SetSurfaces2D; chip->SetSurfaces3D = nv10SetSurfaces3D; chip->LockUnlock = nv4LockUnlock; switch(chipset & 0x0ff0) { case 0x0110: case 0x0170: case 0x0180: case 0x01F0: case 0x0250: case 0x0280: case 0x0300: case 0x0310: case 0x0320: case 0x0330: case 0x0340: chip->twoHeads = TRUE; break; default: chip->twoHeads = FALSE; break; } } int RivaGetConfig ( RIVA_HW_INST *chip, unsigned int chipset ) { /* * Save this so future SW know whats it's dealing with. */ chip->Version = RIVA_SW_VERSION; /* * Chip specific configuration. */ switch (chip->Architecture) { case NV_ARCH_03: nv3GetConfig(chip); break; case NV_ARCH_04: nv4GetConfig(chip); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: nv10GetConfig(chip, chipset); break; default: return (-1); } chip->Chipset = chipset; /* * Fill in FIFO pointers. */ chip->Rop = (RivaRop __iomem *)&(chip->FIFO[0x00000000/4]); chip->Clip = (RivaClip __iomem *)&(chip->FIFO[0x00002000/4]); chip->Patt = (RivaPattern __iomem *)&(chip->FIFO[0x00004000/4]); chip->Pixmap = (RivaPixmap __iomem *)&(chip->FIFO[0x00006000/4]); chip->Blt = (RivaScreenBlt __iomem *)&(chip->FIFO[0x00008000/4]); chip->Bitmap = (RivaBitmap __iomem *)&(chip->FIFO[0x0000A000/4]); chip->Line = (RivaLine __iomem *)&(chip->FIFO[0x0000C000/4]); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); return (0); }
gpl-2.0
GuneetAtwal/android_kernel_iocean_mtk6589t
block/partitions/osf.c
12940
1925
/* * fs/partitions/osf.c * * Code extracted from drivers/block/genhd.c * * Copyright (C) 1991-1998 Linus Torvalds * Re-organised Feb 1998 Russell King */ #include "check.h" #include "osf.h" #define MAX_OSF_PARTITIONS 18 int osf_partition(struct parsed_partitions *state) { int i; int slot = 1; unsigned int npartitions; Sector sect; unsigned char *data; struct disklabel { __le32 d_magic; __le16 d_type,d_subtype; u8 d_typename[16]; u8 d_packname[16]; __le32 d_secsize; __le32 d_nsectors; __le32 d_ntracks; __le32 d_ncylinders; __le32 d_secpercyl; __le32 d_secprtunit; __le16 d_sparespertrack; __le16 d_sparespercyl; __le32 d_acylinders; __le16 d_rpm, d_interleave, d_trackskew, d_cylskew; __le32 d_headswitch, d_trkseek, d_flags; __le32 d_drivedata[5]; __le32 d_spare[5]; __le32 d_magic2; __le16 d_checksum; __le16 d_npartitions; __le32 d_bbsize, d_sbsize; struct d_partition { __le32 p_size; __le32 p_offset; __le32 p_fsize; u8 p_fstype; u8 p_frag; __le16 p_cpg; } d_partitions[MAX_OSF_PARTITIONS]; } * label; struct d_partition * partition; data = read_part_sector(state, 0, &sect); if (!data) return -1; label = (struct disklabel *) (data+64); partition = label->d_partitions; if (le32_to_cpu(label->d_magic) != DISKLABELMAGIC) { put_dev_sector(sect); return 0; } if (le32_to_cpu(label->d_magic2) != DISKLABELMAGIC) { put_dev_sector(sect); return 0; } npartitions = le16_to_cpu(label->d_npartitions); if (npartitions > MAX_OSF_PARTITIONS) { put_dev_sector(sect); return 0; } for (i = 0 ; i < npartitions; i++, partition++) { if (slot == state->limit) break; if (le32_to_cpu(partition->p_size)) put_partition(state, slot, le32_to_cpu(partition->p_offset), le32_to_cpu(partition->p_size)); slot++; } strlcat(state->pp_buf, "\n", PAGE_SIZE); put_dev_sector(sect); return 1; }
gpl-2.0
sleshepic/l900_MK4_Kernel
drivers/misc/sec_jack.c
141
22648
/* drivers/misc/sec_jack.c * * Copyright (C) 2010 Samsung Electronics Co.Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/input.h> #include <linux/platform_device.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/switch.h> #include <linux/input.h> #include <linux/timer.h> #include <linux/wakelock.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/gpio_event.h> #include <linux/sec_jack.h> #include <plat/adc.h> #if defined(CONFIG_STMPE811_ADC) #define SEC_JACK_ADC_CH 4 #else #if defined(CONFIG_MACH_TAB3) #define SEC_JACK_ADC_CH 0 #else #define SEC_JACK_ADC_CH 3 #endif #endif #define SEC_JACK_SAMPLE_SIZE 5 #define MAX_ZONE_LIMIT 10 /* keep this value if you support double-pressed concept */ #if defined(CONFIG_TARGET_LOCALE_KOR) #define SEND_KEY_CHECK_TIME_MS 20 /* 20ms - GB VOC in KOR*/ #elif defined(CONFIG_MACH_Q1_BD) || defined(CONFIG_MACH_P4NOTE) || defined(CONFIG_MACH_SP7160LTE) /* 27ms, total delay is approximately double more because hrtimer is called twice by gpio input driver, new sec spec total delay is 60ms +/-10ms */ #define SEND_KEY_CHECK_TIME_MS 27 #else #define SEND_KEY_CHECK_TIME_MS 40 /* 40ms */ #endif #define WAKE_LOCK_TIME (HZ * 5) /* 5 sec */ #define EAR_CHECK_LOOP_CNT 10 #if defined(CONFIG_MACH_PX) || defined(CONFIG_MACH_P4NOTE) || defined(CONFIG_MACH_SP7160LTE) \ || defined(CONFIG_MACH_GC1) || defined(CONFIG_MACH_TAB3) #define JACK_CLASS_NAME "audio" #define JACK_DEV_NAME "earjack" #else #define JACK_CLASS_NAME "jack" #define JACK_DEV_NAME "jack_selector" #endif #define JACK_RESELECTOR_NAME "jack_reselector" static struct class *jack_class; static struct device *jack_dev; static struct device *jack_reselector; static bool recheck_jack; struct sec_jack_info { struct s3c_adc_client *padc; struct sec_jack_platform_data *pdata; struct delayed_work jack_detect_work; struct work_struct buttons_work; struct workqueue_struct *queue; struct input_dev *input_dev; struct wake_lock det_wake_lock; struct sec_jack_zone *zone; struct input_handler handler; struct input_handle handle; struct input_device_id ids[2]; int det_irq; int dev_id; int pressed; int pressed_code; struct platform_device *send_key_dev; unsigned int cur_jack_type; int det_status; }; /* with some modifications like moving all the gpio structs inside * the platform data and getting the name for the switch and * gpio_event from the platform data, the driver could support more than * one headset jack, but currently user space is looking only for * one key file and switch for a headset so it'd be overkill and * untestable so we limit to one instantiation for now. */ static atomic_t instantiated = ATOMIC_INIT(0); /* sysfs name HeadsetObserver.java looks for to track headset state */ struct switch_dev switch_jack_detection = { .name = "h2w", }; /* To support AT+FCESTEST=1 */ struct switch_dev switch_sendend = { .name = "send_end", }; static struct gpio_event_direct_entry sec_jack_key_map[] = { { .code = KEY_UNKNOWN, }, }; static struct gpio_event_input_info sec_jack_key_info = { .info.func = gpio_event_input_func, .info.no_suspend = true, .type = EV_KEY, .debounce_time.tv64 = SEND_KEY_CHECK_TIME_MS * NSEC_PER_MSEC, .keymap = sec_jack_key_map, .keymap_size = ARRAY_SIZE(sec_jack_key_map) }; static struct gpio_event_info *sec_jack_input_info[] = { &sec_jack_key_info.info, }; static struct gpio_event_platform_data sec_jack_input_data = { .name = "sec_jack", .info = sec_jack_input_info, .info_count = ARRAY_SIZE(sec_jack_input_info), }; static int sec_jack_get_adc_data(struct s3c_adc_client *padc) { int adc_data; int adc_max = 0; int adc_min = 0xFFFF; int adc_total = 0; int adc_retry_cnt = 0; int i; for (i = 0; i < SEC_JACK_SAMPLE_SIZE; i++) { #if defined(CONFIG_STMPE811_ADC) adc_data = stmpe811_get_adc_data(SEC_JACK_ADC_CH); #else adc_data = s3c_adc_read(padc, SEC_JACK_ADC_CH); #endif if (adc_data < 0) { adc_retry_cnt++; if (adc_retry_cnt > 10) return adc_data; } if (i != 0) { if (adc_data > adc_max) adc_max = adc_data; else if (adc_data < adc_min) adc_min = adc_data; } else { adc_max = adc_data; adc_min = adc_data; } adc_total += adc_data; } return (adc_total - adc_max - adc_min) / (SEC_JACK_SAMPLE_SIZE - 2); } /* gpio_input driver does not support to read adc value. * We use input filter to support 3-buttons of headset * without changing gpio_input driver. */ static bool sec_jack_buttons_filter(struct input_handle *handle, unsigned int type, unsigned int code, int value) { struct sec_jack_info *hi = handle->handler->private; if (hi->det_status == true) return false; if (type != EV_KEY || code != KEY_UNKNOWN) return false; hi->pressed = value; /* This is called in timer handler of gpio_input driver. * We use workqueue to read adc value. */ queue_work(hi->queue, &hi->buttons_work); return true; } static int sec_jack_buttons_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct sec_jack_info *hi; struct sec_jack_platform_data *pdata; struct sec_jack_buttons_zone *btn_zones; int err; int i; /* bind input_handler to input device related to only sec_jack */ if (dev->name != sec_jack_input_data.name) return -ENODEV; hi = handler->private; pdata = hi->pdata; btn_zones = pdata->buttons_zones; hi->input_dev = dev; hi->handle.dev = dev; hi->handle.handler = handler; hi->handle.open = 0; hi->handle.name = "sec_jack_buttons"; err = input_register_handle(&hi->handle); if (err) { pr_err("%s: Failed to register sec_jack buttons handle, " "error %d\n", __func__, err); goto err_register_handle; } err = input_open_device(&hi->handle); if (err) { pr_err("%s: Failed to open input device, error %d\n", __func__, err); goto err_open_device; } for (i = 0; i < pdata->num_buttons_zones; i++) input_set_capability(dev, EV_KEY, btn_zones[i].code); return 0; err_open_device: input_unregister_handle(&hi->handle); err_register_handle: return err; } static void sec_jack_buttons_disconnect(struct input_handle *handle) { input_close_device(handle); input_unregister_handle(handle); } static void sec_jack_set_type(struct sec_jack_info *hi, int jack_type) { struct sec_jack_platform_data *pdata = hi->pdata; /* this can happen during slow inserts where we think we identified * the type but then we get another interrupt and do it again */ if (jack_type == hi->cur_jack_type) { if (jack_type != SEC_HEADSET_4POLE) pdata->set_micbias_state(false); return; } if (jack_type == SEC_HEADSET_4POLE) { /* for a 4 pole headset, enable detection of send/end key */ if (hi->send_key_dev == NULL) /* enable to get events again */ hi->send_key_dev = platform_device_register_data(NULL, GPIO_EVENT_DEV_NAME, hi->dev_id, &sec_jack_input_data, sizeof(sec_jack_input_data)); } else { /* for all other jacks, disable send/end key detection */ if (hi->send_key_dev != NULL) { /* disable to prevent false events on next insert */ platform_device_unregister(hi->send_key_dev); hi->send_key_dev = NULL; } /* micbias is left enabled for 4pole and disabled otherwise */ pdata->set_micbias_state(false); } /* if user inserted ear jack slowly, different jack event can occur * sometimes because irq_thread is defined IRQ_ONESHOT, detach status * can be ignored sometimes so in that case, driver inform detach * event to user side */ switch_set_state(&switch_jack_detection, SEC_JACK_NO_DEVICE); hi->cur_jack_type = jack_type; pr_info("%s : jack_type = %d\n", __func__, jack_type); switch_set_state(&switch_jack_detection, jack_type); } static void handle_jack_not_inserted(struct sec_jack_info *hi) { sec_jack_set_type(hi, SEC_JACK_NO_DEVICE); hi->pdata->set_micbias_state(false); } static void determine_jack_type(struct sec_jack_info *hi) { struct sec_jack_platform_data *pdata = hi->pdata; struct sec_jack_zone *zones = pdata->zones; int size = pdata->num_zones; int count[MAX_ZONE_LIMIT] = {0}; int adc; int i; unsigned npolarity = !pdata->det_active_high; /* set mic bias to enable adc */ pdata->set_micbias_state(true); while (gpio_get_value(pdata->det_gpio) ^ npolarity) { adc = sec_jack_get_adc_data(hi->padc); #if defined(CONFIG_TARGET_LOCALE_KOR) pr_info("%s: adc = %d\n", __func__, adc); #else pr_debug("%s: adc = %d\n", __func__, adc); #endif if (adc < 0) break; /* determine the type of headset based on the * adc value. An adc value can fall in various * ranges or zones. Within some ranges, the type * can be returned immediately. Within others, the * value is considered unstable and we need to sample * a few more types (up to the limit determined by * the range) before we return the type for that range. */ for (i = 0; i < size; i++) { if (adc <= zones[i].adc_high) { if (++count[i] > zones[i].check_count) { if (recheck_jack == true && i == 4) { pr_info("%s : something wrong connection!\n", __func__); handle_jack_not_inserted(hi); recheck_jack = false; return; } sec_jack_set_type(hi, zones[i].jack_type); return; } msleep(zones[i].delay_ms); break; } } } recheck_jack = false; /* jack removed before detection complete */ pr_debug("%s : jack removed before detection complete\n", __func__); handle_jack_not_inserted(hi); } /* thread run whenever the headset detect state changes (either insertion * or removal). */ static irqreturn_t sec_jack_detect_irq_thread(int irq, void *dev_id) { struct sec_jack_info *hi = dev_id; struct sec_jack_platform_data *pdata = hi->pdata; unsigned npolarity = !pdata->det_active_high; int curr_data; int pre_data; int loopcnt; int check_loop_cnt = EAR_CHECK_LOOP_CNT; hi->det_status = true; /* prevent suspend to allow user space to respond to switch */ wake_lock_timeout(&hi->det_wake_lock, WAKE_LOCK_TIME); /* debounce headset jack. don't try to determine the type of * headset until the detect state is true for a while. */ pre_data = 0; loopcnt = 0; while (true) { curr_data = gpio_get_value(pdata->det_gpio); if (pre_data == curr_data) loopcnt++; else loopcnt = 0; pre_data = curr_data; if (loopcnt >= check_loop_cnt) { if (!curr_data ^ npolarity) { /* jack not detected. */ handle_jack_not_inserted(hi); hi->det_status = false; return IRQ_HANDLED; } break; } msleep(20); } /* jack presence was detected the whole time, figure out which type */ determine_jack_type(hi); hi->det_status = false; return IRQ_HANDLED; } /* thread run whenever the button of headset is pressed or released */ void sec_jack_buttons_work(struct work_struct *work) { struct sec_jack_info *hi = container_of(work, struct sec_jack_info, buttons_work); struct sec_jack_platform_data *pdata = hi->pdata; struct sec_jack_buttons_zone *btn_zones = pdata->buttons_zones; int adc; int i; /* prevent suspend to allow user space to respond to switch */ wake_lock_timeout(&hi->det_wake_lock, WAKE_LOCK_TIME); /* when button is released */ if (hi->pressed == 0) { input_report_key(hi->input_dev, hi->pressed_code, 0); switch_set_state(&switch_sendend, 0); input_sync(hi->input_dev); pr_info("%s: earkey is released\n", __func__); pr_debug("keycode=%d\n", hi->pressed_code); return; } /* when button is pressed */ adc = sec_jack_get_adc_data(hi->padc); for (i = 0; i < pdata->num_buttons_zones; i++) if (adc >= btn_zones[i].adc_low && adc <= btn_zones[i].adc_high) { hi->pressed_code = btn_zones[i].code; input_report_key(hi->input_dev, btn_zones[i].code, 1); switch_set_state(&switch_sendend, 1); input_sync(hi->input_dev); pr_info("%s: earkey is pressed (adc:%d)\n", __func__, adc); pr_debug("keycode=%d, is pressed\n", btn_zones[i].code); return; } pr_warn("%s: key is skipped. ADC value is %d\n", __func__, adc); } static ssize_t select_jack_show(struct device *dev, struct device_attribute *attr, char *buf) { pr_info("%s : operate nothing\n", __func__); return 0; } static ssize_t select_jack_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct sec_jack_info *hi = dev_get_drvdata(dev); struct sec_jack_platform_data *pdata = hi->pdata; int value = 0; sscanf(buf, "%d", &value); pr_err("%s: User selection : 0X%x", __func__, value); if (value == SEC_HEADSET_4POLE) { pdata->set_micbias_state(true); msleep(100); } sec_jack_set_type(hi, value); return size; } #if defined(CONFIG_MACH_PX) || defined(CONFIG_MACH_P4NOTE) || defined(CONFIG_MACH_SP7160LTE) \ || defined(CONFIG_MACH_GC1) || defined(CONFIG_MACH_TAB3) static ssize_t earjack_key_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct sec_jack_info *hi = dev_get_drvdata(dev); int value = 0; if (hi->pressed <= 0) value = 0; else value = 1; return sprintf(buf, "%d\n", value); } static ssize_t earjack_key_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { pr_info("%s : operate nothing\n", __func__); return size; } static ssize_t earjack_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct sec_jack_info *hi = dev_get_drvdata(dev); int value = 0; if (hi->cur_jack_type == SEC_HEADSET_4POLE) value = 1; else value = 0; return sprintf(buf, "%d\n", value); } static ssize_t earjack_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { pr_info("%s : operate nothing\n", __func__); return size; } static DEVICE_ATTR(key_state, S_IRUGO | S_IWUSR | S_IWGRP, earjack_key_state_show, earjack_key_state_store); static DEVICE_ATTR(state, S_IRUGO | S_IWUSR | S_IWGRP, earjack_state_show, earjack_state_store); #endif static DEVICE_ATTR(select_jack, S_IRUGO | S_IWUSR | S_IWGRP, select_jack_show, select_jack_store); static ssize_t reselect_jack_show(struct device *dev, struct device_attribute *attr, char *buf) { pr_info("%s : operate nothing\n", __func__); return 0; } static ssize_t reselect_jack_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct sec_jack_info *hi = dev_get_drvdata(dev); struct sec_jack_platform_data *pdata = hi->pdata; int value = 0; sscanf(buf, "%d", &value); pr_err("%s: User reselection : 0X%x", __func__, value); if (value == 1) { recheck_jack = true; determine_jack_type(hi); } return size; } static DEVICE_ATTR(reselect_jack, S_IRUGO | S_IWUSR | S_IWGRP, reselect_jack_show, reselect_jack_store); static int sec_jack_probe(struct platform_device *pdev) { struct sec_jack_info *hi; struct sec_jack_platform_data *pdata = pdev->dev.platform_data; int ret; pr_info("%s : Registering jack driver\n", __func__); if (!pdata) { pr_err("%s : pdata is NULL.\n", __func__); return -ENODEV; } if (!pdata->zones || !pdata->set_micbias_state || pdata->num_zones > MAX_ZONE_LIMIT) { pr_err("%s : need to check pdata\n", __func__); return -ENODEV; } if (atomic_xchg(&instantiated, 1)) { pr_err("%s : already instantiated, can only have one\n", __func__); return -ENODEV; } sec_jack_key_map[0].gpio = pdata->send_end_gpio; /* If no other keys in pdata, make all keys default to KEY_MEDIA */ if (pdata->num_buttons_zones == 0) sec_jack_key_map[0].code = KEY_MEDIA; hi = kzalloc(sizeof(struct sec_jack_info), GFP_KERNEL); if (hi == NULL) { pr_err("%s : Failed to allocate memory.\n", __func__); ret = -ENOMEM; goto err_kzalloc; } hi->pdata = pdata; /* make the id of our gpio_event device the same as our platform device, * which makes it the responsiblity of the board file to make sure * it is unique relative to other gpio_event devices */ hi->dev_id = pdev->id; ret = gpio_request(pdata->det_gpio, "ear_jack_detect"); if (ret) { pr_err("%s : gpio_request failed for %d\n", __func__, pdata->det_gpio); goto err_gpio_request; } ret = switch_dev_register(&switch_jack_detection); if (ret < 0) { pr_err("%s : Failed to register switch device\n", __func__); goto err_switch_dev_register; } ret = switch_dev_register(&switch_sendend); if (ret < 0) { printk(KERN_ERR "SEC JACK: Failed to register switch device\n"); goto err_switch_dev_register_send_end; } wake_lock_init(&hi->det_wake_lock, WAKE_LOCK_SUSPEND, "sec_jack_det"); INIT_WORK(&hi->buttons_work, sec_jack_buttons_work); hi->queue = create_singlethread_workqueue("sec_jack_wq"); if (hi->queue == NULL) { ret = -ENOMEM; pr_err("%s: Failed to create workqueue\n", __func__); goto err_create_wq_failed; } hi->det_irq = gpio_to_irq(pdata->det_gpio); jack_class = class_create(THIS_MODULE, JACK_CLASS_NAME); if (IS_ERR(jack_class)) pr_err("Failed to create class(sec_jack)\n"); /* support PBA function test */ jack_dev = device_create(jack_class, NULL, 0, hi, JACK_DEV_NAME); if (IS_ERR(jack_dev)) pr_err("Failed to create device(sec_jack)!= %ld\n", IS_ERR(jack_dev)); if (device_create_file(jack_dev, &dev_attr_select_jack) < 0) pr_err("Failed to create device file(%s)!\n", dev_attr_select_jack.attr.name); jack_reselector = device_create(jack_class, NULL, 0, hi, JACK_RESELECTOR_NAME); if (IS_ERR(jack_reselector)) pr_err("Failed to create device(sec_jack)!= %ld\n", IS_ERR(jack_reselector)); if (device_create_file(jack_reselector, &dev_attr_reselect_jack) < 0) pr_err("Failed to create device file(%s)!\n", dev_attr_reselect_jack.attr.name); #if defined(CONFIG_MACH_PX) || defined(CONFIG_MACH_P4NOTE) || defined(CONFIG_MACH_SP7160LTE) \ || defined(CONFIG_MACH_GC1) || defined(CONFIG_MACH_TAB3) if (device_create_file(jack_dev, &dev_attr_key_state) < 0) pr_err("Failed to create device file (%s)!\n", dev_attr_key_state.attr.name); if (device_create_file(jack_dev, &dev_attr_state) < 0) pr_err("Failed to create device file (%s)!\n", dev_attr_state.attr.name); #endif set_bit(EV_KEY, hi->ids[0].evbit); hi->ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT; hi->handler.filter = sec_jack_buttons_filter; hi->handler.connect = sec_jack_buttons_connect; hi->handler.disconnect = sec_jack_buttons_disconnect; hi->handler.name = "sec_jack_buttons"; hi->handler.id_table = hi->ids; hi->handler.private = hi; /* Register adc client */ hi->padc = s3c_adc_register(pdev, NULL, NULL, 0); if (IS_ERR(hi->padc)) { dev_err(&pdev->dev, "cannot register adc\n"); ret = PTR_ERR(hi->padc); goto err_register_adc; } ret = input_register_handler(&hi->handler); if (ret) { pr_err("%s : Failed to register_handler\n", __func__); goto err_register_input_handler; } #if defined(CONFIG_MACH_TAB3) if (pdata->send_end_active_high == true) sec_jack_key_info.flags = 1; #endif ret = request_threaded_irq(hi->det_irq, NULL, sec_jack_detect_irq_thread, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "sec_headset_detect", hi); if (ret) { pr_err("%s : Failed to request_irq.\n", __func__); goto err_request_detect_irq; } /* to handle insert/removal when we're sleeping in a call */ ret = enable_irq_wake(hi->det_irq); if (ret) { pr_err("%s : Failed to enable_irq_wake.\n", __func__); goto err_enable_irq_wake; } dev_set_drvdata(&pdev->dev, hi); /* Prove current earjack state */ determine_jack_type(hi); return 0; err_enable_irq_wake: free_irq(hi->det_irq, hi); err_request_detect_irq: input_unregister_handler(&hi->handler); err_register_input_handler: s3c_adc_release(hi->padc); err_register_adc: destroy_workqueue(hi->queue); err_create_wq_failed: wake_lock_destroy(&hi->det_wake_lock); switch_dev_unregister(&switch_sendend); err_switch_dev_register_send_end: switch_dev_unregister(&switch_jack_detection); err_switch_dev_register: gpio_free(pdata->det_gpio); err_gpio_request: kfree(hi); err_kzalloc: atomic_set(&instantiated, 0); return ret; } static int sec_jack_remove(struct platform_device *pdev) { struct sec_jack_info *hi = dev_get_drvdata(&pdev->dev); pr_info("%s :\n", __func__); disable_irq_wake(hi->det_irq); free_irq(hi->det_irq, hi); destroy_workqueue(hi->queue); if (hi->send_key_dev) { platform_device_unregister(hi->send_key_dev); hi->send_key_dev = NULL; } input_unregister_handler(&hi->handler); wake_lock_destroy(&hi->det_wake_lock); switch_dev_unregister(&switch_sendend); switch_dev_unregister(&switch_jack_detection); gpio_free(hi->pdata->det_gpio); s3c_adc_release(hi->padc); kfree(hi); atomic_set(&instantiated, 0); return 0; } static int sec_jack_suspend(struct device *dev) { struct sec_jack_info *hi = dev_get_drvdata(dev); int ret; ret = enable_irq_wake(hi->det_irq); pr_info("%s: enable_irq_wake(%d)\n", __func__, ret); disable_irq(hi->det_irq); return 0; } static int sec_jack_resume(struct device *dev) { struct sec_jack_info *hi = dev_get_drvdata(dev); int ret; ret = disable_irq_wake(hi->det_irq); pr_info("%s: disable_irq_wake(%d)\n", __func__, ret); enable_irq(hi->det_irq); return 0; } static const struct dev_pm_ops sec_jack_dev_pm_ops = { .suspend = sec_jack_suspend, .resume = sec_jack_resume, }; static struct platform_driver sec_jack_driver = { .probe = sec_jack_probe, .remove = sec_jack_remove, .driver = { .name = "sec_jack", .owner = THIS_MODULE, .pm = &sec_jack_dev_pm_ops, }, }; static int __init sec_jack_init(void) { int ret; ret = platform_driver_register(&sec_jack_driver); if (ret) pr_err("%s: Failed to add sec jack driver\n", __func__); return ret; } static void __exit sec_jack_exit(void) { platform_driver_unregister(&sec_jack_driver); } module_init(sec_jack_init); module_exit(sec_jack_exit); MODULE_AUTHOR("ms17.kim@samsung.com"); MODULE_DESCRIPTION("Samsung Electronics Corp Ear-Jack detection driver"); MODULE_LICENSE("GPL");
gpl-2.0
tjstyle/android_kernel_fih_msm7x30
drivers/media/video/pvrusb2/pvrusb2-v4l2.c
909
32696
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/version.h> #include "pvrusb2-context.h" #include "pvrusb2-hdw.h" #include "pvrusb2.h" #include "pvrusb2-debug.h" #include "pvrusb2-v4l2.h" #include "pvrusb2-ioread.h" #include <linux/videodev2.h> #include <media/v4l2-dev.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> struct pvr2_v4l2_dev; struct pvr2_v4l2_fh; struct pvr2_v4l2; struct pvr2_v4l2_dev { struct video_device devbase; /* MUST be first! */ struct pvr2_v4l2 *v4lp; struct pvr2_context_stream *stream; /* Information about this device: */ enum pvr2_config config; /* Expected stream format */ int v4l_type; /* V4L defined type for this device node */ enum pvr2_v4l_type minor_type; /* pvr2-understood minor device type */ }; struct pvr2_v4l2_fh { struct pvr2_channel channel; struct pvr2_v4l2_dev *pdi; enum v4l2_priority prio; struct pvr2_ioread *rhp; struct file *file; struct pvr2_v4l2 *vhead; struct pvr2_v4l2_fh *vnext; struct pvr2_v4l2_fh *vprev; wait_queue_head_t wait_data; int fw_mode_flag; /* Map contiguous ordinal value to input id */ unsigned char *input_map; unsigned int input_cnt; }; struct pvr2_v4l2 { struct pvr2_channel channel; struct pvr2_v4l2_fh *vfirst; struct pvr2_v4l2_fh *vlast; struct v4l2_prio_state prio; /* streams - Note that these must be separately, individually, * allocated pointers. This is because the v4l core is going to * manage their deletion - separately, individually... */ struct pvr2_v4l2_dev *dev_video; struct pvr2_v4l2_dev *dev_radio; }; static int video_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; module_param_array(video_nr, int, NULL, 0444); MODULE_PARM_DESC(video_nr, "Offset for device's video dev minor"); static int radio_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_nr, "Offset for device's radio dev minor"); static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; module_param_array(vbi_nr, int, NULL, 0444); MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor"); static struct v4l2_capability pvr_capability ={ .driver = "pvrusb2", .card = "Hauppauge WinTV pvr-usb2", .bus_info = "usb", .version = KERNEL_VERSION(0, 9, 0), .capabilities = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO | V4L2_CAP_READWRITE), .reserved = {0,0,0,0} }; static struct v4l2_fmtdesc pvr_fmtdesc [] = { { .index = 0, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = V4L2_FMT_FLAG_COMPRESSED, .description = "MPEG1/2", // This should really be V4L2_PIX_FMT_MPEG, but xawtv // breaks when I do that. .pixelformat = 0, // V4L2_PIX_FMT_MPEG, .reserved = { 0, 0, 0, 0 } } }; #define PVR_FORMAT_PIX 0 #define PVR_FORMAT_VBI 1 static struct v4l2_format pvr_format [] = { [PVR_FORMAT_PIX] = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt = { .pix = { .width = 720, .height = 576, // This should really be V4L2_PIX_FMT_MPEG, // but xawtv breaks when I do that. .pixelformat = 0, // V4L2_PIX_FMT_MPEG, .field = V4L2_FIELD_INTERLACED, .bytesperline = 0, // doesn't make sense // here //FIXME : Don't know what to put here... .sizeimage = (32*1024), .colorspace = 0, // doesn't make sense here .priv = 0 } } }, [PVR_FORMAT_VBI] = { .type = V4L2_BUF_TYPE_VBI_CAPTURE, .fmt = { .vbi = { .sampling_rate = 27000000, .offset = 248, .samples_per_line = 1443, .sample_format = V4L2_PIX_FMT_GREY, .start = { 0, 0 }, .count = { 0, 0 }, .flags = 0, .reserved = { 0, 0 } } } } }; /* * pvr_ioctl() * * This is part of Video 4 Linux API. The procedure handles ioctl() calls. * */ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) { struct pvr2_v4l2_fh *fh = file->private_data; struct pvr2_v4l2 *vp = fh->vhead; struct pvr2_v4l2_dev *pdi = fh->pdi; struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; long ret = -EINVAL; if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),cmd); } if (!pvr2_hdw_dev_ok(hdw)) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "ioctl failed - bad or no context"); return -EFAULT; } /* check priority */ switch (cmd) { case VIDIOC_S_CTRL: case VIDIOC_S_STD: case VIDIOC_S_INPUT: case VIDIOC_S_TUNER: case VIDIOC_S_FREQUENCY: ret = v4l2_prio_check(&vp->prio, fh->prio); if (ret) return ret; } switch (cmd) { case VIDIOC_QUERYCAP: { struct v4l2_capability *cap = arg; memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability)); strlcpy(cap->bus_info,pvr2_hdw_get_bus_info(hdw), sizeof(cap->bus_info)); strlcpy(cap->card,pvr2_hdw_get_desc(hdw),sizeof(cap->card)); ret = 0; break; } case VIDIOC_G_PRIORITY: { enum v4l2_priority *p = arg; *p = v4l2_prio_max(&vp->prio); ret = 0; break; } case VIDIOC_S_PRIORITY: { enum v4l2_priority *prio = arg; ret = v4l2_prio_change(&vp->prio, &fh->prio, *prio); break; } case VIDIOC_ENUMSTD: { struct v4l2_standard *vs = (struct v4l2_standard *)arg; int idx = vs->index; ret = pvr2_hdw_get_stdenum_value(hdw,vs,idx+1); break; } case VIDIOC_G_STD: { int val = 0; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),&val); *(v4l2_std_id *)arg = val; break; } case VIDIOC_S_STD: { ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR), *(v4l2_std_id *)arg); break; } case VIDIOC_ENUMINPUT: { struct pvr2_ctrl *cptr; struct v4l2_input *vi = (struct v4l2_input *)arg; struct v4l2_input tmp; unsigned int cnt; int val; cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT); memset(&tmp,0,sizeof(tmp)); tmp.index = vi->index; ret = 0; if (vi->index >= fh->input_cnt) { ret = -EINVAL; break; } val = fh->input_map[vi->index]; switch (val) { case PVR2_CVAL_INPUT_TV: case PVR2_CVAL_INPUT_DTV: case PVR2_CVAL_INPUT_RADIO: tmp.type = V4L2_INPUT_TYPE_TUNER; break; case PVR2_CVAL_INPUT_SVIDEO: case PVR2_CVAL_INPUT_COMPOSITE: tmp.type = V4L2_INPUT_TYPE_CAMERA; break; default: ret = -EINVAL; break; } if (ret < 0) break; cnt = 0; pvr2_ctrl_get_valname(cptr,val, tmp.name,sizeof(tmp.name)-1,&cnt); tmp.name[cnt] = 0; /* Don't bother with audioset, since this driver currently always switches the audio whenever the video is switched. */ /* Handling std is a tougher problem. It doesn't make sense in cases where a device might be multi-standard. We could just copy out the current value for the standard, but it can change over time. For now just leave it zero. */ memcpy(vi, &tmp, sizeof(tmp)); ret = 0; break; } case VIDIOC_G_INPUT: { unsigned int idx; struct pvr2_ctrl *cptr; struct v4l2_input *vi = (struct v4l2_input *)arg; int val; cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT); val = 0; ret = pvr2_ctrl_get_value(cptr,&val); vi->index = 0; for (idx = 0; idx < fh->input_cnt; idx++) { if (fh->input_map[idx] == val) { vi->index = idx; break; } } break; } case VIDIOC_S_INPUT: { struct v4l2_input *vi = (struct v4l2_input *)arg; if (vi->index >= fh->input_cnt) { ret = -ERANGE; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT), fh->input_map[vi->index]); break; } case VIDIOC_ENUMAUDIO: { /* pkt: FIXME: We are returning one "fake" input here which could very well be called "whatever_we_like". This is for apps that want to see an audio input just to feel comfortable, as well as to test if it can do stereo or sth. There is actually no guarantee that the actual audio input cannot change behind the app's back, but most applications should not mind that either. Hopefully, mplayer people will work with us on this (this whole mess is to support mplayer pvr://), or Hans will come up with a more standard way to say "we have inputs but we don 't want you to change them independent of video" which will sort this mess. */ struct v4l2_audio *vin = arg; ret = -EINVAL; if (vin->index > 0) break; strncpy(vin->name, "PVRUSB2 Audio",14); vin->capability = V4L2_AUDCAP_STEREO; ret = 0; break; break; } case VIDIOC_G_AUDIO: { /* pkt: FIXME: see above comment (VIDIOC_ENUMAUDIO) */ struct v4l2_audio *vin = arg; memset(vin,0,sizeof(*vin)); vin->index = 0; strncpy(vin->name, "PVRUSB2 Audio",14); vin->capability = V4L2_AUDCAP_STEREO; ret = 0; break; } case VIDIOC_S_AUDIO: { ret = -EINVAL; break; } case VIDIOC_G_TUNER: { struct v4l2_tuner *vt = (struct v4l2_tuner *)arg; if (vt->index != 0) break; /* Only answer for the 1st tuner */ pvr2_hdw_execute_tuner_poll(hdw); ret = pvr2_hdw_get_tuner_status(hdw,vt); break; } case VIDIOC_S_TUNER: { struct v4l2_tuner *vt=(struct v4l2_tuner *)arg; if (vt->index != 0) break; ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE), vt->audmode); break; } case VIDIOC_S_FREQUENCY: { const struct v4l2_frequency *vf = (struct v4l2_frequency *)arg; unsigned long fv; struct v4l2_tuner vt; int cur_input; struct pvr2_ctrl *ctrlp; ret = pvr2_hdw_get_tuner_status(hdw,&vt); if (ret != 0) break; ctrlp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT); ret = pvr2_ctrl_get_value(ctrlp,&cur_input); if (ret != 0) break; if (vf->type == V4L2_TUNER_RADIO) { if (cur_input != PVR2_CVAL_INPUT_RADIO) { pvr2_ctrl_set_value(ctrlp, PVR2_CVAL_INPUT_RADIO); } } else { if (cur_input == PVR2_CVAL_INPUT_RADIO) { pvr2_ctrl_set_value(ctrlp, PVR2_CVAL_INPUT_TV); } } fv = vf->frequency; if (vt.capability & V4L2_TUNER_CAP_LOW) { fv = (fv * 125) / 2; } else { fv = fv * 62500; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),fv); break; } case VIDIOC_G_FREQUENCY: { struct v4l2_frequency *vf = (struct v4l2_frequency *)arg; int val = 0; int cur_input; struct v4l2_tuner vt; ret = pvr2_hdw_get_tuner_status(hdw,&vt); if (ret != 0) break; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY), &val); if (ret != 0) break; pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT), &cur_input); if (cur_input == PVR2_CVAL_INPUT_RADIO) { vf->type = V4L2_TUNER_RADIO; } else { vf->type = V4L2_TUNER_ANALOG_TV; } if (vt.capability & V4L2_TUNER_CAP_LOW) { val = (val * 2) / 125; } else { val /= 62500; } vf->frequency = val; break; } case VIDIOC_ENUM_FMT: { struct v4l2_fmtdesc *fd = (struct v4l2_fmtdesc *)arg; /* Only one format is supported : mpeg.*/ if (fd->index != 0) break; memcpy(fd, pvr_fmtdesc, sizeof(struct v4l2_fmtdesc)); ret = 0; break; } case VIDIOC_G_FMT: { struct v4l2_format *vf = (struct v4l2_format *)arg; int val; switch(vf->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: memcpy(vf, &pvr_format[PVR_FORMAT_PIX], sizeof(struct v4l2_format)); val = 0; pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES), &val); vf->fmt.pix.width = val; val = 0; pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES), &val); vf->fmt.pix.height = val; ret = 0; break; case V4L2_BUF_TYPE_VBI_CAPTURE: // ????? Still need to figure out to do VBI correctly ret = -EINVAL; break; default: ret = -EINVAL; break; } break; } case VIDIOC_TRY_FMT: case VIDIOC_S_FMT: { struct v4l2_format *vf = (struct v4l2_format *)arg; ret = 0; switch(vf->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: { int lmin,lmax,ldef; struct pvr2_ctrl *hcp,*vcp; int h = vf->fmt.pix.height; int w = vf->fmt.pix.width; hcp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES); vcp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES); lmin = pvr2_ctrl_get_min(hcp); lmax = pvr2_ctrl_get_max(hcp); pvr2_ctrl_get_def(hcp, &ldef); if (w == -1) { w = ldef; } else if (w < lmin) { w = lmin; } else if (w > lmax) { w = lmax; } lmin = pvr2_ctrl_get_min(vcp); lmax = pvr2_ctrl_get_max(vcp); pvr2_ctrl_get_def(vcp, &ldef); if (h == -1) { h = ldef; } else if (h < lmin) { h = lmin; } else if (h > lmax) { h = lmax; } memcpy(vf, &pvr_format[PVR_FORMAT_PIX], sizeof(struct v4l2_format)); vf->fmt.pix.width = w; vf->fmt.pix.height = h; if (cmd == VIDIOC_S_FMT) { pvr2_ctrl_set_value(hcp,vf->fmt.pix.width); pvr2_ctrl_set_value(vcp,vf->fmt.pix.height); } } break; case V4L2_BUF_TYPE_VBI_CAPTURE: // ????? Still need to figure out to do VBI correctly ret = -EINVAL; break; default: ret = -EINVAL; break; } break; } case VIDIOC_STREAMON: { if (!fh->pdi->stream) { /* No stream defined for this node. This means that we're not currently allowed to stream from this node. */ ret = -EPERM; break; } ret = pvr2_hdw_set_stream_type(hdw,pdi->config); if (ret < 0) return ret; ret = pvr2_hdw_set_streaming(hdw,!0); break; } case VIDIOC_STREAMOFF: { if (!fh->pdi->stream) { /* No stream defined for this node. This means that we're not currently allowed to stream from this node. */ ret = -EPERM; break; } ret = pvr2_hdw_set_streaming(hdw,0); break; } case VIDIOC_QUERYCTRL: { struct pvr2_ctrl *cptr; int val; struct v4l2_queryctrl *vc = (struct v4l2_queryctrl *)arg; ret = 0; if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) { cptr = pvr2_hdw_get_ctrl_nextv4l( hdw,(vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL)); if (cptr) vc->id = pvr2_ctrl_get_v4lid(cptr); } else { cptr = pvr2_hdw_get_ctrl_v4l(hdw,vc->id); } if (!cptr) { pvr2_trace(PVR2_TRACE_V4LIOCTL, "QUERYCTRL id=0x%x not implemented here", vc->id); ret = -EINVAL; break; } pvr2_trace(PVR2_TRACE_V4LIOCTL, "QUERYCTRL id=0x%x mapping name=%s (%s)", vc->id,pvr2_ctrl_get_name(cptr), pvr2_ctrl_get_desc(cptr)); strlcpy(vc->name,pvr2_ctrl_get_desc(cptr),sizeof(vc->name)); vc->flags = pvr2_ctrl_get_v4lflags(cptr); pvr2_ctrl_get_def(cptr, &val); vc->default_value = val; switch (pvr2_ctrl_get_type(cptr)) { case pvr2_ctl_enum: vc->type = V4L2_CTRL_TYPE_MENU; vc->minimum = 0; vc->maximum = pvr2_ctrl_get_cnt(cptr) - 1; vc->step = 1; break; case pvr2_ctl_bool: vc->type = V4L2_CTRL_TYPE_BOOLEAN; vc->minimum = 0; vc->maximum = 1; vc->step = 1; break; case pvr2_ctl_int: vc->type = V4L2_CTRL_TYPE_INTEGER; vc->minimum = pvr2_ctrl_get_min(cptr); vc->maximum = pvr2_ctrl_get_max(cptr); vc->step = 1; break; default: pvr2_trace(PVR2_TRACE_V4LIOCTL, "QUERYCTRL id=0x%x name=%s not mappable", vc->id,pvr2_ctrl_get_name(cptr)); ret = -EINVAL; break; } break; } case VIDIOC_QUERYMENU: { struct v4l2_querymenu *vm = (struct v4l2_querymenu *)arg; unsigned int cnt = 0; ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw,vm->id), vm->index, vm->name,sizeof(vm->name)-1, &cnt); vm->name[cnt] = 0; break; } case VIDIOC_G_CTRL: { struct v4l2_control *vc = (struct v4l2_control *)arg; int val = 0; ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id), &val); vc->value = val; break; } case VIDIOC_S_CTRL: { struct v4l2_control *vc = (struct v4l2_control *)arg; ret = pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id), vc->value); break; } case VIDIOC_G_EXT_CTRLS: { struct v4l2_ext_controls *ctls = (struct v4l2_ext_controls *)arg; struct v4l2_ext_control *ctrl; unsigned int idx; int val; ret = 0; for (idx = 0; idx < ctls->count; idx++) { ctrl = ctls->controls + idx; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),&val); if (ret) { ctls->error_idx = idx; break; } /* Ensure that if read as a 64 bit value, the user will still get a hopefully sane value */ ctrl->value64 = 0; ctrl->value = val; } break; } case VIDIOC_S_EXT_CTRLS: { struct v4l2_ext_controls *ctls = (struct v4l2_ext_controls *)arg; struct v4l2_ext_control *ctrl; unsigned int idx; ret = 0; for (idx = 0; idx < ctls->count; idx++) { ctrl = ctls->controls + idx; ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id), ctrl->value); if (ret) { ctls->error_idx = idx; break; } } break; } case VIDIOC_TRY_EXT_CTRLS: { struct v4l2_ext_controls *ctls = (struct v4l2_ext_controls *)arg; struct v4l2_ext_control *ctrl; struct pvr2_ctrl *pctl; unsigned int idx; /* For the moment just validate that the requested control actually exists. */ ret = 0; for (idx = 0; idx < ctls->count; idx++) { ctrl = ctls->controls + idx; pctl = pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id); if (!pctl) { ret = -EINVAL; ctls->error_idx = idx; break; } } break; } case VIDIOC_CROPCAP: { struct v4l2_cropcap *cap = (struct v4l2_cropcap *)arg; if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = -EINVAL; break; } ret = pvr2_hdw_get_cropcap(hdw, cap); cap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* paranoia */ break; } case VIDIOC_G_CROP: { struct v4l2_crop *crop = (struct v4l2_crop *)arg; int val = 0; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = -EINVAL; break; } ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.left = val; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.top = val; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.width = val; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.height = val; } case VIDIOC_S_CROP: { struct v4l2_crop *crop = (struct v4l2_crop *)arg; struct v4l2_cropcap cap; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = -EINVAL; break; } cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), crop->c.left); if (ret != 0) { ret = -EINVAL; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), crop->c.top); if (ret != 0) { ret = -EINVAL; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), crop->c.width); if (ret != 0) { ret = -EINVAL; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), crop->c.height); if (ret != 0) { ret = -EINVAL; break; } } case VIDIOC_LOG_STATUS: { pvr2_hdw_trigger_module_log(hdw); ret = 0; break; } #ifdef CONFIG_VIDEO_ADV_DEBUG case VIDIOC_DBG_S_REGISTER: case VIDIOC_DBG_G_REGISTER: { u64 val; struct v4l2_dbg_register *req = (struct v4l2_dbg_register *)arg; if (cmd == VIDIOC_DBG_S_REGISTER) val = req->val; ret = pvr2_hdw_register_access( hdw, &req->match, req->reg, cmd == VIDIOC_DBG_S_REGISTER, &val); if (cmd == VIDIOC_DBG_G_REGISTER) req->val = val; break; } #endif default : ret = v4l_compat_translate_ioctl(file, cmd, arg, pvr2_v4l2_do_ioctl); } pvr2_hdw_commit_ctl(hdw); if (ret < 0) { if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { pvr2_trace(PVR2_TRACE_V4LIOCTL, "pvr2_v4l2_do_ioctl failure, ret=%ld", ret); } else { if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { pvr2_trace(PVR2_TRACE_V4LIOCTL, "pvr2_v4l2_do_ioctl failure, ret=%ld" " command was:", ret); v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw), cmd); } } } else { pvr2_trace(PVR2_TRACE_V4LIOCTL, "pvr2_v4l2_do_ioctl complete, ret=%ld (0x%lx)", ret, ret); } return ret; } static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip) { struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw; enum pvr2_config cfg = dip->config; char msg[80]; unsigned int mcnt; /* Construct the unregistration message *before* we actually perform the unregistration step. By doing it this way we don't have to worry about potentially touching deleted resources. */ mcnt = scnprintf(msg, sizeof(msg) - 1, "pvrusb2: unregistered device %s [%s]", video_device_node_name(&dip->devbase), pvr2_config_get_name(cfg)); msg[mcnt] = 0; pvr2_hdw_v4l_store_minor_number(hdw,dip->minor_type,-1); /* Paranoia */ dip->v4lp = NULL; dip->stream = NULL; /* Actual deallocation happens later when all internal references are gone. */ video_unregister_device(&dip->devbase); printk(KERN_INFO "%s\n", msg); } static void pvr2_v4l2_dev_disassociate_parent(struct pvr2_v4l2_dev *dip) { if (!dip) return; if (!dip->devbase.parent) return; dip->devbase.parent = NULL; device_move(&dip->devbase.dev, NULL, DPM_ORDER_NONE); } static void pvr2_v4l2_destroy_no_lock(struct pvr2_v4l2 *vp) { if (vp->dev_video) { pvr2_v4l2_dev_destroy(vp->dev_video); vp->dev_video = NULL; } if (vp->dev_radio) { pvr2_v4l2_dev_destroy(vp->dev_radio); vp->dev_radio = NULL; } pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr2_v4l2 id=%p",vp); pvr2_channel_done(&vp->channel); kfree(vp); } static void pvr2_video_device_release(struct video_device *vdev) { struct pvr2_v4l2_dev *dev; dev = container_of(vdev,struct pvr2_v4l2_dev,devbase); kfree(dev); } static void pvr2_v4l2_internal_check(struct pvr2_channel *chp) { struct pvr2_v4l2 *vp; vp = container_of(chp,struct pvr2_v4l2,channel); if (!vp->channel.mc_head->disconnect_flag) return; pvr2_v4l2_dev_disassociate_parent(vp->dev_video); pvr2_v4l2_dev_disassociate_parent(vp->dev_radio); if (vp->vfirst) return; pvr2_v4l2_destroy_no_lock(vp); } static long pvr2_v4l2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return video_usercopy(file, cmd, arg, pvr2_v4l2_do_ioctl); } static int pvr2_v4l2_release(struct file *file) { struct pvr2_v4l2_fh *fhp = file->private_data; struct pvr2_v4l2 *vp = fhp->vhead; struct pvr2_hdw *hdw = fhp->channel.mc_head->hdw; pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_release"); if (fhp->rhp) { struct pvr2_stream *sp; pvr2_hdw_set_streaming(hdw,0); sp = pvr2_ioread_get_stream(fhp->rhp); if (sp) pvr2_stream_set_callback(sp,NULL,NULL); pvr2_ioread_destroy(fhp->rhp); fhp->rhp = NULL; } v4l2_prio_close(&vp->prio, fhp->prio); file->private_data = NULL; if (fhp->vnext) { fhp->vnext->vprev = fhp->vprev; } else { vp->vlast = fhp->vprev; } if (fhp->vprev) { fhp->vprev->vnext = fhp->vnext; } else { vp->vfirst = fhp->vnext; } fhp->vnext = NULL; fhp->vprev = NULL; fhp->vhead = NULL; pvr2_channel_done(&fhp->channel); pvr2_trace(PVR2_TRACE_STRUCT, "Destroying pvr_v4l2_fh id=%p",fhp); if (fhp->input_map) { kfree(fhp->input_map); fhp->input_map = NULL; } kfree(fhp); if (vp->channel.mc_head->disconnect_flag && !vp->vfirst) { pvr2_v4l2_destroy_no_lock(vp); } return 0; } static int pvr2_v4l2_open(struct file *file) { struct pvr2_v4l2_dev *dip; /* Our own context pointer */ struct pvr2_v4l2_fh *fhp; struct pvr2_v4l2 *vp; struct pvr2_hdw *hdw; unsigned int input_mask = 0; unsigned int input_cnt,idx; int ret = 0; dip = container_of(video_devdata(file),struct pvr2_v4l2_dev,devbase); vp = dip->v4lp; hdw = vp->channel.hdw; pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_open"); if (!pvr2_hdw_dev_ok(hdw)) { pvr2_trace(PVR2_TRACE_OPEN_CLOSE, "pvr2_v4l2_open: hardware not ready"); return -EIO; } fhp = kzalloc(sizeof(*fhp),GFP_KERNEL); if (!fhp) { return -ENOMEM; } init_waitqueue_head(&fhp->wait_data); fhp->pdi = dip; pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_v4l2_fh id=%p",fhp); pvr2_channel_init(&fhp->channel,vp->channel.mc_head); if (dip->v4l_type == VFL_TYPE_RADIO) { /* Opening device as a radio, legal input selection subset is just the radio. */ input_mask = (1 << PVR2_CVAL_INPUT_RADIO); } else { /* Opening the main V4L device, legal input selection subset includes all analog inputs. */ input_mask = ((1 << PVR2_CVAL_INPUT_RADIO) | (1 << PVR2_CVAL_INPUT_TV) | (1 << PVR2_CVAL_INPUT_COMPOSITE) | (1 << PVR2_CVAL_INPUT_SVIDEO)); } ret = pvr2_channel_limit_inputs(&fhp->channel,input_mask); if (ret) { pvr2_channel_done(&fhp->channel); pvr2_trace(PVR2_TRACE_STRUCT, "Destroying pvr_v4l2_fh id=%p (input mask error)", fhp); kfree(fhp); return ret; } input_mask &= pvr2_hdw_get_input_available(hdw); input_cnt = 0; for (idx = 0; idx < (sizeof(input_mask) << 3); idx++) { if (input_mask & (1 << idx)) input_cnt++; } fhp->input_cnt = input_cnt; fhp->input_map = kzalloc(input_cnt,GFP_KERNEL); if (!fhp->input_map) { pvr2_channel_done(&fhp->channel); pvr2_trace(PVR2_TRACE_STRUCT, "Destroying pvr_v4l2_fh id=%p (input map failure)", fhp); kfree(fhp); return -ENOMEM; } input_cnt = 0; for (idx = 0; idx < (sizeof(input_mask) << 3); idx++) { if (!(input_mask & (1 << idx))) continue; fhp->input_map[input_cnt++] = idx; } fhp->vnext = NULL; fhp->vprev = vp->vlast; if (vp->vlast) { vp->vlast->vnext = fhp; } else { vp->vfirst = fhp; } vp->vlast = fhp; fhp->vhead = vp; fhp->file = file; file->private_data = fhp; v4l2_prio_open(&vp->prio, &fhp->prio); fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw); return 0; } static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp) { wake_up(&fhp->wait_data); } static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh) { int ret; struct pvr2_stream *sp; struct pvr2_hdw *hdw; if (fh->rhp) return 0; if (!fh->pdi->stream) { /* No stream defined for this node. This means that we're not currently allowed to stream from this node. */ return -EPERM; } /* First read() attempt. Try to claim the stream and start it... */ if ((ret = pvr2_channel_claim_stream(&fh->channel, fh->pdi->stream)) != 0) { /* Someone else must already have it */ return ret; } fh->rhp = pvr2_channel_create_mpeg_stream(fh->pdi->stream); if (!fh->rhp) { pvr2_channel_claim_stream(&fh->channel,NULL); return -ENOMEM; } hdw = fh->channel.mc_head->hdw; sp = fh->pdi->stream->stream; pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh); pvr2_hdw_set_stream_type(hdw,fh->pdi->config); if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret; return pvr2_ioread_set_enabled(fh->rhp,!0); } static ssize_t pvr2_v4l2_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) { struct pvr2_v4l2_fh *fh = file->private_data; int ret; if (fh->fw_mode_flag) { struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; char *tbuf; int c1,c2; int tcnt = 0; unsigned int offs = *ppos; tbuf = kmalloc(PAGE_SIZE,GFP_KERNEL); if (!tbuf) return -ENOMEM; while (count) { c1 = count; if (c1 > PAGE_SIZE) c1 = PAGE_SIZE; c2 = pvr2_hdw_cpufw_get(hdw,offs,tbuf,c1); if (c2 < 0) { tcnt = c2; break; } if (!c2) break; if (copy_to_user(buff,tbuf,c2)) { tcnt = -EFAULT; break; } offs += c2; tcnt += c2; buff += c2; count -= c2; *ppos += c2; } kfree(tbuf); return tcnt; } if (!fh->rhp) { ret = pvr2_v4l2_iosetup(fh); if (ret) { return ret; } } for (;;) { ret = pvr2_ioread_read(fh->rhp,buff,count); if (ret >= 0) break; if (ret != -EAGAIN) break; if (file->f_flags & O_NONBLOCK) break; /* Doing blocking I/O. Wait here. */ ret = wait_event_interruptible( fh->wait_data, pvr2_ioread_avail(fh->rhp) >= 0); if (ret < 0) break; } return ret; } static unsigned int pvr2_v4l2_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; struct pvr2_v4l2_fh *fh = file->private_data; int ret; if (fh->fw_mode_flag) { mask |= POLLIN | POLLRDNORM; return mask; } if (!fh->rhp) { ret = pvr2_v4l2_iosetup(fh); if (ret) return POLLERR; } poll_wait(file,&fh->wait_data,wait); if (pvr2_ioread_avail(fh->rhp) >= 0) { mask |= POLLIN | POLLRDNORM; } return mask; } static const struct v4l2_file_operations vdev_fops = { .owner = THIS_MODULE, .open = pvr2_v4l2_open, .release = pvr2_v4l2_release, .read = pvr2_v4l2_read, .ioctl = pvr2_v4l2_ioctl, .poll = pvr2_v4l2_poll, }; static struct video_device vdev_template = { .fops = &vdev_fops, }; static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip, struct pvr2_v4l2 *vp, int v4l_type) { struct usb_device *usbdev; int mindevnum; int unit_number; int *nr_ptr = NULL; dip->v4lp = vp; usbdev = pvr2_hdw_get_dev(vp->channel.mc_head->hdw); dip->v4l_type = v4l_type; switch (v4l_type) { case VFL_TYPE_GRABBER: dip->stream = &vp->channel.mc_head->video_stream; dip->config = pvr2_config_mpeg; dip->minor_type = pvr2_v4l_type_video; nr_ptr = video_nr; if (!dip->stream) { pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l video dev" " due to missing stream instance\n"); return; } break; case VFL_TYPE_VBI: dip->config = pvr2_config_vbi; dip->minor_type = pvr2_v4l_type_vbi; nr_ptr = vbi_nr; break; case VFL_TYPE_RADIO: dip->stream = &vp->channel.mc_head->video_stream; dip->config = pvr2_config_mpeg; dip->minor_type = pvr2_v4l_type_radio; nr_ptr = radio_nr; break; default: /* Bail out (this should be impossible) */ pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l dev" " due to unrecognized config\n"); return; } memcpy(&dip->devbase,&vdev_template,sizeof(vdev_template)); dip->devbase.release = pvr2_video_device_release; mindevnum = -1; unit_number = pvr2_hdw_get_unit_number(vp->channel.mc_head->hdw); if (nr_ptr && (unit_number >= 0) && (unit_number < PVR_NUM)) { mindevnum = nr_ptr[unit_number]; } dip->devbase.parent = &usbdev->dev; if ((video_register_device(&dip->devbase, dip->v4l_type, mindevnum) < 0) && (video_register_device(&dip->devbase, dip->v4l_type, -1) < 0)) { pr_err(KBUILD_MODNAME ": Failed to register pvrusb2 v4l device\n"); } printk(KERN_INFO "pvrusb2: registered device %s [%s]\n", video_device_node_name(&dip->devbase), pvr2_config_get_name(dip->config)); pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw, dip->minor_type,dip->devbase.minor); } struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp) { struct pvr2_v4l2 *vp; vp = kzalloc(sizeof(*vp),GFP_KERNEL); if (!vp) return vp; pvr2_channel_init(&vp->channel,mnp); pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr2_v4l2 id=%p",vp); vp->channel.check_func = pvr2_v4l2_internal_check; /* register streams */ vp->dev_video = kzalloc(sizeof(*vp->dev_video),GFP_KERNEL); if (!vp->dev_video) goto fail; pvr2_v4l2_dev_init(vp->dev_video,vp,VFL_TYPE_GRABBER); if (pvr2_hdw_get_input_available(vp->channel.mc_head->hdw) & (1 << PVR2_CVAL_INPUT_RADIO)) { vp->dev_radio = kzalloc(sizeof(*vp->dev_radio),GFP_KERNEL); if (!vp->dev_radio) goto fail; pvr2_v4l2_dev_init(vp->dev_radio,vp,VFL_TYPE_RADIO); } return vp; fail: pvr2_trace(PVR2_TRACE_STRUCT,"Failure creating pvr2_v4l2 id=%p",vp); pvr2_v4l2_destroy_no_lock(vp); return NULL; } /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 75 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
HRTKernel/Hacker-Kernel-H850
sound/firewire/bebob/bebob_midi.c
909
4076
/* * bebob_midi.c - a part of driver for BeBoB based devices * * Copyright (c) 2013-2014 Takashi Sakamoto * * Licensed under the terms of the GNU General Public License, version 2. */ #include "bebob.h" static int midi_capture_open(struct snd_rawmidi_substream *substream) { struct snd_bebob *bebob = substream->rmidi->private_data; int err; err = snd_bebob_stream_lock_try(bebob); if (err < 0) goto end; atomic_inc(&bebob->capture_substreams); err = snd_bebob_stream_start_duplex(bebob, 0); if (err < 0) snd_bebob_stream_lock_release(bebob); end: return err; } static int midi_playback_open(struct snd_rawmidi_substream *substream) { struct snd_bebob *bebob = substream->rmidi->private_data; int err; err = snd_bebob_stream_lock_try(bebob); if (err < 0) goto end; atomic_inc(&bebob->playback_substreams); err = snd_bebob_stream_start_duplex(bebob, 0); if (err < 0) snd_bebob_stream_lock_release(bebob); end: return err; } static int midi_capture_close(struct snd_rawmidi_substream *substream) { struct snd_bebob *bebob = substream->rmidi->private_data; atomic_dec(&bebob->capture_substreams); snd_bebob_stream_stop_duplex(bebob); snd_bebob_stream_lock_release(bebob); return 0; } static int midi_playback_close(struct snd_rawmidi_substream *substream) { struct snd_bebob *bebob = substream->rmidi->private_data; atomic_dec(&bebob->playback_substreams); snd_bebob_stream_stop_duplex(bebob); snd_bebob_stream_lock_release(bebob); return 0; } static void midi_capture_trigger(struct snd_rawmidi_substream *substrm, int up) { struct snd_bebob *bebob = substrm->rmidi->private_data; unsigned long flags; spin_lock_irqsave(&bebob->lock, flags); if (up) amdtp_stream_midi_trigger(&bebob->tx_stream, substrm->number, substrm); else amdtp_stream_midi_trigger(&bebob->tx_stream, substrm->number, NULL); spin_unlock_irqrestore(&bebob->lock, flags); } static void midi_playback_trigger(struct snd_rawmidi_substream *substrm, int up) { struct snd_bebob *bebob = substrm->rmidi->private_data; unsigned long flags; spin_lock_irqsave(&bebob->lock, flags); if (up) amdtp_stream_midi_trigger(&bebob->rx_stream, substrm->number, substrm); else amdtp_stream_midi_trigger(&bebob->rx_stream, substrm->number, NULL); spin_unlock_irqrestore(&bebob->lock, flags); } static struct snd_rawmidi_ops midi_capture_ops = { .open = midi_capture_open, .close = midi_capture_close, .trigger = midi_capture_trigger, }; static struct snd_rawmidi_ops midi_playback_ops = { .open = midi_playback_open, .close = midi_playback_close, .trigger = midi_playback_trigger, }; static void set_midi_substream_names(struct snd_bebob *bebob, struct snd_rawmidi_str *str) { struct snd_rawmidi_substream *subs; list_for_each_entry(subs, &str->substreams, list) { snprintf(subs->name, sizeof(subs->name), "%s MIDI %d", bebob->card->shortname, subs->number + 1); } } int snd_bebob_create_midi_devices(struct snd_bebob *bebob) { struct snd_rawmidi *rmidi; struct snd_rawmidi_str *str; int err; /* create midi ports */ err = snd_rawmidi_new(bebob->card, bebob->card->driver, 0, bebob->midi_output_ports, bebob->midi_input_ports, &rmidi); if (err < 0) return err; snprintf(rmidi->name, sizeof(rmidi->name), "%s MIDI", bebob->card->shortname); rmidi->private_data = bebob; if (bebob->midi_input_ports > 0) { rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT; snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &midi_capture_ops); str = &rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT]; set_midi_substream_names(bebob, str); } if (bebob->midi_output_ports > 0) { rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT; snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &midi_playback_ops); str = &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]; set_midi_substream_names(bebob, str); } if ((bebob->midi_output_ports > 0) && (bebob->midi_input_ports > 0)) rmidi->info_flags |= SNDRV_RAWMIDI_INFO_DUPLEX; return 0; }
gpl-2.0
MinimalOS/android_kernel_moto_shamu
arch/sh/boards/mach-ecovec24/setup.c
1933
36404
/* * Copyright (C) 2009 Renesas Solutions Corp. * * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mtd/physmap.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/usb/r8a66597.h> #include <linux/usb/renesas_usbhs.h> #include <linux/i2c.h> #include <linux/i2c/tsc2007.h> #include <linux/spi/spi.h> #include <linux/spi/sh_msiof.h> #include <linux/spi/mmc_spi.h> #include <linux/input.h> #include <linux/input/sh_keysc.h> #include <linux/sh_eth.h> #include <linux/sh_intc.h> #include <linux/videodev2.h> #include <video/sh_mobile_lcdc.h> #include <sound/sh_fsi.h> #include <sound/simple_card.h> #include <media/sh_mobile_ceu.h> #include <media/soc_camera.h> #include <media/tw9910.h> #include <media/mt9t112.h> #include <asm/heartbeat.h> #include <asm/clock.h> #include <asm/suspend.h> #include <cpu/sh7724.h> /* * Address Interface BusWidth *----------------------------------------- * 0x0000_0000 uboot 16bit * 0x0004_0000 Linux romImage 16bit * 0x0014_0000 MTD for Linux 16bit * 0x0400_0000 Internal I/O 16/32bit * 0x0800_0000 DRAM 32bit * 0x1800_0000 MFI 16bit */ /* SWITCH *------------------------------ * DS2[1] = FlashROM write protect ON : write protect * OFF : No write protect * DS2[2] = RMII / TS, SCIF ON : RMII * OFF : TS, SCIF3 * DS2[3] = Camera / Video ON : Camera * OFF : NTSC/PAL (IN) * DS2[5] = NTSC_OUT Clock ON : On board OSC * OFF : SH7724 DV_CLK * DS2[6-7] = MMC / SD ON-OFF : SD * OFF-ON : MMC */ /* * FSI - DA7210 * * it needs amixer settings for playing * * amixer set 'HeadPhone' 80 * amixer set 'Out Mixer Left DAC Left' on * amixer set 'Out Mixer Right DAC Right' on */ /* Heartbeat */ static unsigned char led_pos[] = { 0, 1, 2, 3 }; static struct heartbeat_data heartbeat_data = { .nr_bits = 4, .bit_pos = led_pos, }; static struct resource heartbeat_resource = { .start = 0xA405012C, /* PTG */ .end = 0xA405012E - 1, .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .dev = { .platform_data = &heartbeat_data, }, .num_resources = 1, .resource = &heartbeat_resource, }; /* MTD */ static struct mtd_partition nor_flash_partitions[] = { { .name = "boot loader", .offset = 0, .size = (5 * 1024 * 1024), .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "free-area", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data nor_flash_data = { .width = 2, .parts = nor_flash_partitions, .nr_parts = ARRAY_SIZE(nor_flash_partitions), }; static struct resource nor_flash_resources[] = { [0] = { .name = "NOR Flash", .start = 0x00000000, .end = 0x03ffffff, .flags = IORESOURCE_MEM, } }; static struct platform_device nor_flash_device = { .name = "physmap-flash", .resource = nor_flash_resources, .num_resources = ARRAY_SIZE(nor_flash_resources), .dev = { .platform_data = &nor_flash_data, }, }; /* SH Eth */ #define SH_ETH_ADDR (0xA4600000) static struct resource sh_eth_resources[] = { [0] = { .start = SH_ETH_ADDR, .end = SH_ETH_ADDR + 0x1FC, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xd60), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct sh_eth_plat_data sh_eth_plat = { .phy = 0x1f, /* SMSC LAN8700 */ .edmac_endian = EDMAC_LITTLE_ENDIAN, .register_type = SH_ETH_REG_FAST_SH4, .phy_interface = PHY_INTERFACE_MODE_MII, .ether_link_active_low = 1 }; static struct platform_device sh_eth_device = { .name = "sh-eth", .id = 0, .dev = { .platform_data = &sh_eth_plat, }, .num_resources = ARRAY_SIZE(sh_eth_resources), .resource = sh_eth_resources, }; /* USB0 host */ static void usb0_port_power(int port, int power) { gpio_set_value(GPIO_PTB4, power); } static struct r8a66597_platdata usb0_host_data = { .on_chip = 1, .port_power = usb0_port_power, }; static struct resource usb0_host_resources[] = { [0] = { .start = 0xa4d80000, .end = 0xa4d80124 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xa20), .end = evt2irq(0xa20), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device usb0_host_device = { .name = "r8a66597_hcd", .id = 0, .dev = { .dma_mask = NULL, /* not use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &usb0_host_data, }, .num_resources = ARRAY_SIZE(usb0_host_resources), .resource = usb0_host_resources, }; /* USB1 host/function */ static void usb1_port_power(int port, int power) { gpio_set_value(GPIO_PTB5, power); } static struct r8a66597_platdata usb1_common_data = { .on_chip = 1, .port_power = usb1_port_power, }; static struct resource usb1_common_resources[] = { [0] = { .start = 0xa4d90000, .end = 0xa4d90124 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xa40), .end = evt2irq(0xa40), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device usb1_common_device = { /* .name will be added in arch_setup */ .id = 1, .dev = { .dma_mask = NULL, /* not use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &usb1_common_data, }, .num_resources = ARRAY_SIZE(usb1_common_resources), .resource = usb1_common_resources, }; /* * USBHS */ static int usbhs_get_id(struct platform_device *pdev) { return gpio_get_value(GPIO_PTB3); } static int usbhs_phy_reset(struct platform_device *pdev) { /* enable vbus if HOST */ if (!gpio_get_value(GPIO_PTB3)) gpio_set_value(GPIO_PTB5, 1); return 0; } static struct renesas_usbhs_platform_info usbhs_info = { .platform_callback = { .get_id = usbhs_get_id, .phy_reset = usbhs_phy_reset, }, .driver_param = { .buswait_bwait = 4, .detection_delay = 5, .d0_tx_id = SHDMA_SLAVE_USB1D0_TX, .d0_rx_id = SHDMA_SLAVE_USB1D0_RX, .d1_tx_id = SHDMA_SLAVE_USB1D1_TX, .d1_rx_id = SHDMA_SLAVE_USB1D1_RX, }, }; static struct resource usbhs_resources[] = { [0] = { .start = 0xa4d90000, .end = 0xa4d90124 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xa40), .end = evt2irq(0xa40), .flags = IORESOURCE_IRQ, }, }; static struct platform_device usbhs_device = { .name = "renesas_usbhs", .id = 1, .dev = { .dma_mask = NULL, /* not use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &usbhs_info, }, .num_resources = ARRAY_SIZE(usbhs_resources), .resource = usbhs_resources, }; /* LCDC */ static const struct fb_videomode ecovec_lcd_modes[] = { { .name = "Panel", .xres = 800, .yres = 480, .left_margin = 220, .right_margin = 110, .hsync_len = 70, .upper_margin = 20, .lower_margin = 5, .vsync_len = 5, .sync = 0, /* hsync and vsync are active low */ }, }; static const struct fb_videomode ecovec_dvi_modes[] = { { .name = "DVI", .xres = 1280, .yres = 720, .left_margin = 220, .right_margin = 110, .hsync_len = 40, .upper_margin = 20, .lower_margin = 5, .vsync_len = 5, .sync = 0, /* hsync and vsync are active low */ }, }; static int ecovec24_set_brightness(int brightness) { gpio_set_value(GPIO_PTR1, brightness); return 0; } static struct sh_mobile_lcdc_info lcdc_info = { .ch[0] = { .interface_type = RGB18, .chan = LCDC_CHAN_MAINLCD, .fourcc = V4L2_PIX_FMT_RGB565, .panel_cfg = { /* 7.0 inch */ .width = 152, .height = 91, }, .bl_info = { .name = "sh_mobile_lcdc_bl", .max_brightness = 1, .set_brightness = ecovec24_set_brightness, }, } }; static struct resource lcdc_resources[] = { [0] = { .name = "LCDC", .start = 0xfe940000, .end = 0xfe942fff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xf40), .flags = IORESOURCE_IRQ, }, }; static struct platform_device lcdc_device = { .name = "sh_mobile_lcdc_fb", .num_resources = ARRAY_SIZE(lcdc_resources), .resource = lcdc_resources, .dev = { .platform_data = &lcdc_info, }, }; /* CEU0 */ static struct sh_mobile_ceu_info sh_mobile_ceu0_info = { .flags = SH_CEU_FLAG_USE_8BIT_BUS, }; static struct resource ceu0_resources[] = { [0] = { .name = "CEU0", .start = 0xfe910000, .end = 0xfe91009f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x880), .flags = IORESOURCE_IRQ, }, [2] = { /* place holder for contiguous memory */ }, }; static struct platform_device ceu0_device = { .name = "sh_mobile_ceu", .id = 0, /* "ceu0" clock */ .num_resources = ARRAY_SIZE(ceu0_resources), .resource = ceu0_resources, .dev = { .platform_data = &sh_mobile_ceu0_info, }, }; /* CEU1 */ static struct sh_mobile_ceu_info sh_mobile_ceu1_info = { .flags = SH_CEU_FLAG_USE_8BIT_BUS, }; static struct resource ceu1_resources[] = { [0] = { .name = "CEU1", .start = 0xfe914000, .end = 0xfe91409f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x9e0), .flags = IORESOURCE_IRQ, }, [2] = { /* place holder for contiguous memory */ }, }; static struct platform_device ceu1_device = { .name = "sh_mobile_ceu", .id = 1, /* "ceu1" clock */ .num_resources = ARRAY_SIZE(ceu1_resources), .resource = ceu1_resources, .dev = { .platform_data = &sh_mobile_ceu1_info, }, }; /* I2C device */ static struct i2c_board_info i2c0_devices[] = { { I2C_BOARD_INFO("da7210", 0x1a), }, }; static struct i2c_board_info i2c1_devices[] = { { I2C_BOARD_INFO("r2025sd", 0x32), }, { I2C_BOARD_INFO("lis3lv02d", 0x1c), .irq = evt2irq(0x620), } }; /* KEYSC */ static struct sh_keysc_info keysc_info = { .mode = SH_KEYSC_MODE_1, .scan_timing = 3, .delay = 50, .kycr2_delay = 100, .keycodes = { KEY_1, 0, 0, 0, 0, KEY_2, 0, 0, 0, 0, KEY_3, 0, 0, 0, 0, KEY_4, 0, 0, 0, 0, KEY_5, 0, 0, 0, 0, KEY_6, 0, 0, 0, 0, }, }; static struct resource keysc_resources[] = { [0] = { .name = "KEYSC", .start = 0x044b0000, .end = 0x044b000f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xbe0), .flags = IORESOURCE_IRQ, }, }; static struct platform_device keysc_device = { .name = "sh_keysc", .id = 0, /* keysc0 clock */ .num_resources = ARRAY_SIZE(keysc_resources), .resource = keysc_resources, .dev = { .platform_data = &keysc_info, }, }; /* TouchScreen */ #define IRQ0 evt2irq(0x600) static int ts_get_pendown_state(void) { int val = 0; gpio_free(GPIO_FN_INTC_IRQ0); gpio_request(GPIO_PTZ0, NULL); gpio_direction_input(GPIO_PTZ0); val = gpio_get_value(GPIO_PTZ0); gpio_free(GPIO_PTZ0); gpio_request(GPIO_FN_INTC_IRQ0, NULL); return val ? 0 : 1; } static int ts_init(void) { gpio_request(GPIO_FN_INTC_IRQ0, NULL); return 0; } static struct tsc2007_platform_data tsc2007_info = { .model = 2007, .x_plate_ohms = 180, .get_pendown_state = ts_get_pendown_state, .init_platform_hw = ts_init, }; static struct i2c_board_info ts_i2c_clients = { I2C_BOARD_INFO("tsc2007", 0x48), .type = "tsc2007", .platform_data = &tsc2007_info, .irq = IRQ0, }; static struct regulator_consumer_supply cn12_power_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), }; static struct regulator_init_data cn12_power_init_data = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(cn12_power_consumers), .consumer_supplies = cn12_power_consumers, }; static struct fixed_voltage_config cn12_power_info = { .supply_name = "CN12 SD/MMC Vdd", .microvolts = 3300000, .gpio = GPIO_PTB7, .enable_high = 1, .init_data = &cn12_power_init_data, }; static struct platform_device cn12_power = { .name = "reg-fixed-voltage", .id = 0, .dev = { .platform_data = &cn12_power_info, }, }; #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) /* SDHI0 */ static struct regulator_consumer_supply sdhi0_power_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), }; static struct regulator_init_data sdhi0_power_init_data = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(sdhi0_power_consumers), .consumer_supplies = sdhi0_power_consumers, }; static struct fixed_voltage_config sdhi0_power_info = { .supply_name = "CN11 SD/MMC Vdd", .microvolts = 3300000, .gpio = GPIO_PTB6, .enable_high = 1, .init_data = &sdhi0_power_init_data, }; static struct platform_device sdhi0_power = { .name = "reg-fixed-voltage", .id = 1, .dev = { .platform_data = &sdhi0_power_info, }, }; static void sdhi0_set_pwr(struct platform_device *pdev, int state) { static int power_gpio = -EINVAL; if (power_gpio < 0) { int ret = gpio_request(GPIO_PTB6, NULL); if (!ret) { power_gpio = GPIO_PTB6; gpio_direction_output(power_gpio, 0); } } /* * Toggle the GPIO regardless, whether we managed to grab it above or * the fixed regulator driver did. */ gpio_set_value(GPIO_PTB6, state); } static int sdhi0_get_cd(struct platform_device *pdev) { return !gpio_get_value(GPIO_PTY7); } static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, .set_pwr = sdhi0_set_pwr, .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD | MMC_CAP_NEEDS_POLL, .get_cd = sdhi0_get_cd, }; static struct resource sdhi0_resources[] = { [0] = { .name = "SDHI0", .start = 0x04ce0000, .end = 0x04ce00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xe80), .flags = IORESOURCE_IRQ, }, }; static struct platform_device sdhi0_device = { .name = "sh_mobile_sdhi", .num_resources = ARRAY_SIZE(sdhi0_resources), .resource = sdhi0_resources, .id = 0, .dev = { .platform_data = &sdhi0_info, }, }; static void cn12_set_pwr(struct platform_device *pdev, int state) { static int power_gpio = -EINVAL; if (power_gpio < 0) { int ret = gpio_request(GPIO_PTB7, NULL); if (!ret) { power_gpio = GPIO_PTB7; gpio_direction_output(power_gpio, 0); } } /* * Toggle the GPIO regardless, whether we managed to grab it above or * the fixed regulator driver did. */ gpio_set_value(GPIO_PTB7, state); } #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) /* SDHI1 */ static int sdhi1_get_cd(struct platform_device *pdev) { return !gpio_get_value(GPIO_PTW7); } static struct sh_mobile_sdhi_info sdhi1_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD | MMC_CAP_NEEDS_POLL, .set_pwr = cn12_set_pwr, .get_cd = sdhi1_get_cd, }; static struct resource sdhi1_resources[] = { [0] = { .name = "SDHI1", .start = 0x04cf0000, .end = 0x04cf00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x4e0), .flags = IORESOURCE_IRQ, }, }; static struct platform_device sdhi1_device = { .name = "sh_mobile_sdhi", .num_resources = ARRAY_SIZE(sdhi1_resources), .resource = sdhi1_resources, .id = 1, .dev = { .platform_data = &sdhi1_info, }, }; #endif /* CONFIG_MMC_SH_MMCIF */ #else /* MMC SPI */ static int mmc_spi_get_ro(struct device *dev) { return gpio_get_value(GPIO_PTY6); } static int mmc_spi_get_cd(struct device *dev) { return !gpio_get_value(GPIO_PTY7); } static void mmc_spi_setpower(struct device *dev, unsigned int maskval) { gpio_set_value(GPIO_PTB6, maskval ? 1 : 0); } static struct mmc_spi_platform_data mmc_spi_info = { .get_ro = mmc_spi_get_ro, .get_cd = mmc_spi_get_cd, .caps = MMC_CAP_NEEDS_POLL, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, /* 3.3V only */ .setpower = mmc_spi_setpower, }; static struct spi_board_info spi_bus[] = { { .modalias = "mmc_spi", .platform_data = &mmc_spi_info, .max_speed_hz = 5000000, .mode = SPI_MODE_0, .controller_data = (void *) GPIO_PTM4, }, }; /* MSIOF0 */ static struct sh_msiof_spi_info msiof0_data = { .num_chipselect = 1, }; static struct resource msiof0_resources[] = { [0] = { .name = "MSIOF0", .start = 0xa4c40000, .end = 0xa4c40063, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xc80), .flags = IORESOURCE_IRQ, }, }; static struct platform_device msiof0_device = { .name = "spi_sh_msiof", .id = 0, /* MSIOF0 */ .dev = { .platform_data = &msiof0_data, }, .num_resources = ARRAY_SIZE(msiof0_resources), .resource = msiof0_resources, }; #endif /* I2C Video/Camera */ static struct i2c_board_info i2c_camera[] = { { I2C_BOARD_INFO("tw9910", 0x45), }, { /* 1st camera */ I2C_BOARD_INFO("mt9t112", 0x3c), }, { /* 2nd camera */ I2C_BOARD_INFO("mt9t112", 0x3c), }, }; /* tw9910 */ static int tw9910_power(struct device *dev, int mode) { int val = mode ? 0 : 1; gpio_set_value(GPIO_PTU2, val); if (mode) mdelay(100); return 0; } static struct tw9910_video_info tw9910_info = { .buswidth = SOCAM_DATAWIDTH_8, .mpout = TW9910_MPO_FIELD, }; static struct soc_camera_link tw9910_link = { .i2c_adapter_id = 0, .bus_id = 1, .power = tw9910_power, .board_info = &i2c_camera[0], .priv = &tw9910_info, }; /* mt9t112 */ static int mt9t112_power1(struct device *dev, int mode) { gpio_set_value(GPIO_PTA3, mode); if (mode) mdelay(100); return 0; } static struct mt9t112_camera_info mt9t112_info1 = { .flags = MT9T112_FLAG_PCLK_RISING_EDGE | MT9T112_FLAG_DATAWIDTH_8, .divider = { 0x49, 0x6, 0, 6, 0, 9, 9, 6, 0 }, /* for 24MHz */ }; static struct soc_camera_link mt9t112_link1 = { .i2c_adapter_id = 0, .power = mt9t112_power1, .bus_id = 0, .board_info = &i2c_camera[1], .priv = &mt9t112_info1, }; static int mt9t112_power2(struct device *dev, int mode) { gpio_set_value(GPIO_PTA4, mode); if (mode) mdelay(100); return 0; } static struct mt9t112_camera_info mt9t112_info2 = { .flags = MT9T112_FLAG_PCLK_RISING_EDGE | MT9T112_FLAG_DATAWIDTH_8, .divider = { 0x49, 0x6, 0, 6, 0, 9, 9, 6, 0 }, /* for 24MHz */ }; static struct soc_camera_link mt9t112_link2 = { .i2c_adapter_id = 1, .power = mt9t112_power2, .bus_id = 1, .board_info = &i2c_camera[2], .priv = &mt9t112_info2, }; static struct platform_device camera_devices[] = { { .name = "soc-camera-pdrv", .id = 0, .dev = { .platform_data = &tw9910_link, }, }, { .name = "soc-camera-pdrv", .id = 1, .dev = { .platform_data = &mt9t112_link1, }, }, { .name = "soc-camera-pdrv", .id = 2, .dev = { .platform_data = &mt9t112_link2, }, }, }; /* FSI */ static struct resource fsi_resources[] = { [0] = { .name = "FSI", .start = 0xFE3C0000, .end = 0xFE3C021d, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xf80), .flags = IORESOURCE_IRQ, }, }; static struct platform_device fsi_device = { .name = "sh_fsi", .id = 0, .num_resources = ARRAY_SIZE(fsi_resources), .resource = fsi_resources, }; static struct asoc_simple_card_info fsi_da7210_info = { .name = "DA7210", .card = "FSIB-DA7210", .codec = "da7210.0-001a", .platform = "sh_fsi.0", .daifmt = SND_SOC_DAIFMT_I2S, .cpu_dai = { .name = "fsib-dai", .fmt = SND_SOC_DAIFMT_CBS_CFS | SND_SOC_DAIFMT_IB_NF, }, .codec_dai = { .name = "da7210-hifi", .fmt = SND_SOC_DAIFMT_CBM_CFM, }, }; static struct platform_device fsi_da7210_device = { .name = "asoc-simple-card", .dev = { .platform_data = &fsi_da7210_info, }, }; /* IrDA */ static struct resource irda_resources[] = { [0] = { .name = "IrDA", .start = 0xA45D0000, .end = 0xA45D0049, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x480), .flags = IORESOURCE_IRQ, }, }; static struct platform_device irda_device = { .name = "sh_sir", .num_resources = ARRAY_SIZE(irda_resources), .resource = irda_resources, }; #include <media/ak881x.h> #include <media/sh_vou.h> static struct ak881x_pdata ak881x_pdata = { .flags = AK881X_IF_MODE_SLAVE, }; static struct i2c_board_info ak8813 = { I2C_BOARD_INFO("ak8813", 0x20), .platform_data = &ak881x_pdata, }; static struct sh_vou_pdata sh_vou_pdata = { .bus_fmt = SH_VOU_BUS_8BIT, .flags = SH_VOU_HSYNC_LOW | SH_VOU_VSYNC_LOW, .board_info = &ak8813, .i2c_adap = 0, }; static struct resource sh_vou_resources[] = { [0] = { .start = 0xfe960000, .end = 0xfe962043, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x8e0), .flags = IORESOURCE_IRQ, }, }; static struct platform_device vou_device = { .name = "sh-vou", .id = -1, .num_resources = ARRAY_SIZE(sh_vou_resources), .resource = sh_vou_resources, .dev = { .platform_data = &sh_vou_pdata, }, }; #if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE) /* SH_MMCIF */ static void mmcif_down_pwr(struct platform_device *pdev) { cn12_set_pwr(pdev, 0); } static struct resource sh_mmcif_resources[] = { [0] = { .name = "SH_MMCIF", .start = 0xA4CA0000, .end = 0xA4CA00FF, .flags = IORESOURCE_MEM, }, [1] = { /* MMC2I */ .start = evt2irq(0x5a0), .flags = IORESOURCE_IRQ, }, [2] = { /* MMC3I */ .start = evt2irq(0x5c0), .flags = IORESOURCE_IRQ, }, }; static struct sh_mmcif_plat_data sh_mmcif_plat = { .set_pwr = cn12_set_pwr, .down_pwr = mmcif_down_pwr, .sup_pclk = 0, /* SH7724: Max Pclk/2 */ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_NEEDS_POLL, .ocr = MMC_VDD_32_33 | MMC_VDD_33_34, }; static struct platform_device sh_mmcif_device = { .name = "sh_mmcif", .id = 0, .dev = { .platform_data = &sh_mmcif_plat, }, .num_resources = ARRAY_SIZE(sh_mmcif_resources), .resource = sh_mmcif_resources, }; #endif static struct platform_device *ecovec_devices[] __initdata = { &heartbeat_device, &nor_flash_device, &sh_eth_device, &usb0_host_device, &usb1_common_device, &usbhs_device, &lcdc_device, &ceu0_device, &ceu1_device, &keysc_device, &cn12_power, #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) &sdhi0_power, &sdhi0_device, #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) &sdhi1_device, #endif #else &msiof0_device, #endif &camera_devices[0], &camera_devices[1], &camera_devices[2], &fsi_device, &fsi_da7210_device, &irda_device, &vou_device, #if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE) &sh_mmcif_device, #endif }; #ifdef CONFIG_I2C #define EEPROM_ADDR 0x50 static u8 mac_read(struct i2c_adapter *a, u8 command) { struct i2c_msg msg[2]; u8 buf; int ret; msg[0].addr = EEPROM_ADDR; msg[0].flags = 0; msg[0].len = 1; msg[0].buf = &command; msg[1].addr = EEPROM_ADDR; msg[1].flags = I2C_M_RD; msg[1].len = 1; msg[1].buf = &buf; ret = i2c_transfer(a, msg, 2); if (ret < 0) { printk(KERN_ERR "error %d\n", ret); buf = 0xff; } return buf; } static void __init sh_eth_init(struct sh_eth_plat_data *pd) { struct i2c_adapter *a = i2c_get_adapter(1); int i; if (!a) { pr_err("can not get I2C 1\n"); return; } /* read MAC address from EEPROM */ for (i = 0; i < sizeof(pd->mac_addr); i++) { pd->mac_addr[i] = mac_read(a, 0x10 + i); msleep(10); } i2c_put_adapter(a); } #else static void __init sh_eth_init(struct sh_eth_plat_data *pd) { pr_err("unable to read sh_eth MAC address\n"); } #endif #define PORT_HIZA 0xA4050158 #define IODRIVEA 0xA405018A extern char ecovec24_sdram_enter_start; extern char ecovec24_sdram_enter_end; extern char ecovec24_sdram_leave_start; extern char ecovec24_sdram_leave_end; static int __init arch_setup(void) { struct clk *clk; bool cn12_enabled = false; /* register board specific self-refresh code */ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF | SUSP_SH_RSTANDBY, &ecovec24_sdram_enter_start, &ecovec24_sdram_enter_end, &ecovec24_sdram_leave_start, &ecovec24_sdram_leave_end); /* enable STATUS0, STATUS2 and PDSTATUS */ gpio_request(GPIO_FN_STATUS0, NULL); gpio_request(GPIO_FN_STATUS2, NULL); gpio_request(GPIO_FN_PDSTATUS, NULL); /* enable SCIFA0 */ gpio_request(GPIO_FN_SCIF0_TXD, NULL); gpio_request(GPIO_FN_SCIF0_RXD, NULL); /* enable debug LED */ gpio_request(GPIO_PTG0, NULL); gpio_request(GPIO_PTG1, NULL); gpio_request(GPIO_PTG2, NULL); gpio_request(GPIO_PTG3, NULL); gpio_direction_output(GPIO_PTG0, 0); gpio_direction_output(GPIO_PTG1, 0); gpio_direction_output(GPIO_PTG2, 0); gpio_direction_output(GPIO_PTG3, 0); __raw_writew((__raw_readw(PORT_HIZA) & ~(0x1 << 1)) , PORT_HIZA); /* enable SH-Eth */ gpio_request(GPIO_PTA1, NULL); gpio_direction_output(GPIO_PTA1, 1); mdelay(20); gpio_request(GPIO_FN_RMII_RXD0, NULL); gpio_request(GPIO_FN_RMII_RXD1, NULL); gpio_request(GPIO_FN_RMII_TXD0, NULL); gpio_request(GPIO_FN_RMII_TXD1, NULL); gpio_request(GPIO_FN_RMII_REF_CLK, NULL); gpio_request(GPIO_FN_RMII_TX_EN, NULL); gpio_request(GPIO_FN_RMII_RX_ER, NULL); gpio_request(GPIO_FN_RMII_CRS_DV, NULL); gpio_request(GPIO_FN_MDIO, NULL); gpio_request(GPIO_FN_MDC, NULL); gpio_request(GPIO_FN_LNKSTA, NULL); /* enable USB */ __raw_writew(0x0000, 0xA4D80000); __raw_writew(0x0000, 0xA4D90000); gpio_request(GPIO_PTB3, NULL); gpio_request(GPIO_PTB4, NULL); gpio_request(GPIO_PTB5, NULL); gpio_direction_input(GPIO_PTB3); gpio_direction_output(GPIO_PTB4, 0); gpio_direction_output(GPIO_PTB5, 0); __raw_writew(0x0600, 0xa40501d4); __raw_writew(0x0600, 0xa4050192); if (gpio_get_value(GPIO_PTB3)) { printk(KERN_INFO "USB1 function is selected\n"); usb1_common_device.name = "r8a66597_udc"; } else { printk(KERN_INFO "USB1 host is selected\n"); usb1_common_device.name = "r8a66597_hcd"; } /* enable LCDC */ gpio_request(GPIO_FN_LCDD23, NULL); gpio_request(GPIO_FN_LCDD22, NULL); gpio_request(GPIO_FN_LCDD21, NULL); gpio_request(GPIO_FN_LCDD20, NULL); gpio_request(GPIO_FN_LCDD19, NULL); gpio_request(GPIO_FN_LCDD18, NULL); gpio_request(GPIO_FN_LCDD17, NULL); gpio_request(GPIO_FN_LCDD16, NULL); gpio_request(GPIO_FN_LCDD15, NULL); gpio_request(GPIO_FN_LCDD14, NULL); gpio_request(GPIO_FN_LCDD13, NULL); gpio_request(GPIO_FN_LCDD12, NULL); gpio_request(GPIO_FN_LCDD11, NULL); gpio_request(GPIO_FN_LCDD10, NULL); gpio_request(GPIO_FN_LCDD9, NULL); gpio_request(GPIO_FN_LCDD8, NULL); gpio_request(GPIO_FN_LCDD7, NULL); gpio_request(GPIO_FN_LCDD6, NULL); gpio_request(GPIO_FN_LCDD5, NULL); gpio_request(GPIO_FN_LCDD4, NULL); gpio_request(GPIO_FN_LCDD3, NULL); gpio_request(GPIO_FN_LCDD2, NULL); gpio_request(GPIO_FN_LCDD1, NULL); gpio_request(GPIO_FN_LCDD0, NULL); gpio_request(GPIO_FN_LCDDISP, NULL); gpio_request(GPIO_FN_LCDHSYN, NULL); gpio_request(GPIO_FN_LCDDCK, NULL); gpio_request(GPIO_FN_LCDVSYN, NULL); gpio_request(GPIO_FN_LCDDON, NULL); gpio_request(GPIO_FN_LCDLCLK, NULL); __raw_writew((__raw_readw(PORT_HIZA) & ~0x0001), PORT_HIZA); gpio_request(GPIO_PTE6, NULL); gpio_request(GPIO_PTU1, NULL); gpio_request(GPIO_PTR1, NULL); gpio_request(GPIO_PTA2, NULL); gpio_direction_input(GPIO_PTE6); gpio_direction_output(GPIO_PTU1, 0); gpio_direction_output(GPIO_PTR1, 0); gpio_direction_output(GPIO_PTA2, 0); /* I/O buffer drive ability is high */ __raw_writew((__raw_readw(IODRIVEA) & ~0x00c0) | 0x0080 , IODRIVEA); if (gpio_get_value(GPIO_PTE6)) { /* DVI */ lcdc_info.clock_source = LCDC_CLK_EXTERNAL; lcdc_info.ch[0].clock_divider = 1; lcdc_info.ch[0].lcd_modes = ecovec_dvi_modes; lcdc_info.ch[0].num_modes = ARRAY_SIZE(ecovec_dvi_modes); gpio_set_value(GPIO_PTA2, 1); gpio_set_value(GPIO_PTU1, 1); } else { /* Panel */ lcdc_info.clock_source = LCDC_CLK_PERIPHERAL; lcdc_info.ch[0].clock_divider = 2; lcdc_info.ch[0].lcd_modes = ecovec_lcd_modes; lcdc_info.ch[0].num_modes = ARRAY_SIZE(ecovec_lcd_modes); gpio_set_value(GPIO_PTR1, 1); /* FIXME * * LCDDON control is needed for Panel, * but current sh_mobile_lcdc driver doesn't control it. * It is temporary correspondence */ gpio_request(GPIO_PTF4, NULL); gpio_direction_output(GPIO_PTF4, 1); /* enable TouchScreen */ i2c_register_board_info(0, &ts_i2c_clients, 1); irq_set_irq_type(IRQ0, IRQ_TYPE_LEVEL_LOW); } /* enable CEU0 */ gpio_request(GPIO_FN_VIO0_D15, NULL); gpio_request(GPIO_FN_VIO0_D14, NULL); gpio_request(GPIO_FN_VIO0_D13, NULL); gpio_request(GPIO_FN_VIO0_D12, NULL); gpio_request(GPIO_FN_VIO0_D11, NULL); gpio_request(GPIO_FN_VIO0_D10, NULL); gpio_request(GPIO_FN_VIO0_D9, NULL); gpio_request(GPIO_FN_VIO0_D8, NULL); gpio_request(GPIO_FN_VIO0_D7, NULL); gpio_request(GPIO_FN_VIO0_D6, NULL); gpio_request(GPIO_FN_VIO0_D5, NULL); gpio_request(GPIO_FN_VIO0_D4, NULL); gpio_request(GPIO_FN_VIO0_D3, NULL); gpio_request(GPIO_FN_VIO0_D2, NULL); gpio_request(GPIO_FN_VIO0_D1, NULL); gpio_request(GPIO_FN_VIO0_D0, NULL); gpio_request(GPIO_FN_VIO0_VD, NULL); gpio_request(GPIO_FN_VIO0_CLK, NULL); gpio_request(GPIO_FN_VIO0_FLD, NULL); gpio_request(GPIO_FN_VIO0_HD, NULL); platform_resource_setup_memory(&ceu0_device, "ceu0", 4 << 20); /* enable CEU1 */ gpio_request(GPIO_FN_VIO1_D7, NULL); gpio_request(GPIO_FN_VIO1_D6, NULL); gpio_request(GPIO_FN_VIO1_D5, NULL); gpio_request(GPIO_FN_VIO1_D4, NULL); gpio_request(GPIO_FN_VIO1_D3, NULL); gpio_request(GPIO_FN_VIO1_D2, NULL); gpio_request(GPIO_FN_VIO1_D1, NULL); gpio_request(GPIO_FN_VIO1_D0, NULL); gpio_request(GPIO_FN_VIO1_FLD, NULL); gpio_request(GPIO_FN_VIO1_HD, NULL); gpio_request(GPIO_FN_VIO1_VD, NULL); gpio_request(GPIO_FN_VIO1_CLK, NULL); platform_resource_setup_memory(&ceu1_device, "ceu1", 4 << 20); /* enable KEYSC */ gpio_request(GPIO_FN_KEYOUT5_IN5, NULL); gpio_request(GPIO_FN_KEYOUT4_IN6, NULL); gpio_request(GPIO_FN_KEYOUT3, NULL); gpio_request(GPIO_FN_KEYOUT2, NULL); gpio_request(GPIO_FN_KEYOUT1, NULL); gpio_request(GPIO_FN_KEYOUT0, NULL); gpio_request(GPIO_FN_KEYIN0, NULL); /* enable user debug switch */ gpio_request(GPIO_PTR0, NULL); gpio_request(GPIO_PTR4, NULL); gpio_request(GPIO_PTR5, NULL); gpio_request(GPIO_PTR6, NULL); gpio_direction_input(GPIO_PTR0); gpio_direction_input(GPIO_PTR4); gpio_direction_input(GPIO_PTR5); gpio_direction_input(GPIO_PTR6); /* SD-card slot CN11 */ /* Card-detect, used on CN11, either with SDHI0 or with SPI */ gpio_request(GPIO_PTY7, NULL); gpio_direction_input(GPIO_PTY7); #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */ gpio_request(GPIO_FN_SDHI0WP, NULL); gpio_request(GPIO_FN_SDHI0CMD, NULL); gpio_request(GPIO_FN_SDHI0CLK, NULL); gpio_request(GPIO_FN_SDHI0D3, NULL); gpio_request(GPIO_FN_SDHI0D2, NULL); gpio_request(GPIO_FN_SDHI0D1, NULL); gpio_request(GPIO_FN_SDHI0D0, NULL); #else /* enable MSIOF0 on CN11 (needs DS2.4 set to OFF) */ gpio_request(GPIO_FN_MSIOF0_TXD, NULL); gpio_request(GPIO_FN_MSIOF0_RXD, NULL); gpio_request(GPIO_FN_MSIOF0_TSCK, NULL); gpio_request(GPIO_PTM4, NULL); /* software CS control of TSYNC pin */ gpio_direction_output(GPIO_PTM4, 1); /* active low CS */ gpio_request(GPIO_PTB6, NULL); /* 3.3V power control */ gpio_direction_output(GPIO_PTB6, 0); /* disable power by default */ gpio_request(GPIO_PTY6, NULL); /* write protect */ gpio_direction_input(GPIO_PTY6); spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus)); #endif /* MMC/SD-card slot CN12 */ #if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE) /* enable MMCIF (needs DS2.6,7 set to OFF,ON) */ gpio_request(GPIO_FN_MMC_D7, NULL); gpio_request(GPIO_FN_MMC_D6, NULL); gpio_request(GPIO_FN_MMC_D5, NULL); gpio_request(GPIO_FN_MMC_D4, NULL); gpio_request(GPIO_FN_MMC_D3, NULL); gpio_request(GPIO_FN_MMC_D2, NULL); gpio_request(GPIO_FN_MMC_D1, NULL); gpio_request(GPIO_FN_MMC_D0, NULL); gpio_request(GPIO_FN_MMC_CLK, NULL); gpio_request(GPIO_FN_MMC_CMD, NULL); cn12_enabled = true; #elif defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) /* enable SDHI1 on CN12 (needs DS2.6,7 set to ON,OFF) */ gpio_request(GPIO_FN_SDHI1WP, NULL); gpio_request(GPIO_FN_SDHI1CMD, NULL); gpio_request(GPIO_FN_SDHI1CLK, NULL); gpio_request(GPIO_FN_SDHI1D3, NULL); gpio_request(GPIO_FN_SDHI1D2, NULL); gpio_request(GPIO_FN_SDHI1D1, NULL); gpio_request(GPIO_FN_SDHI1D0, NULL); /* Card-detect, used on CN12 with SDHI1 */ gpio_request(GPIO_PTW7, NULL); gpio_direction_input(GPIO_PTW7); cn12_enabled = true; #endif if (cn12_enabled) /* I/O buffer drive ability is high for CN12 */ __raw_writew((__raw_readw(IODRIVEA) & ~0x3000) | 0x2000, IODRIVEA); /* enable Video */ gpio_request(GPIO_PTU2, NULL); gpio_direction_output(GPIO_PTU2, 1); /* enable Camera */ gpio_request(GPIO_PTA3, NULL); gpio_request(GPIO_PTA4, NULL); gpio_direction_output(GPIO_PTA3, 0); gpio_direction_output(GPIO_PTA4, 0); /* enable FSI */ gpio_request(GPIO_FN_FSIMCKB, NULL); gpio_request(GPIO_FN_FSIIBSD, NULL); gpio_request(GPIO_FN_FSIOBSD, NULL); gpio_request(GPIO_FN_FSIIBBCK, NULL); gpio_request(GPIO_FN_FSIIBLRCK, NULL); gpio_request(GPIO_FN_FSIOBBCK, NULL); gpio_request(GPIO_FN_FSIOBLRCK, NULL); gpio_request(GPIO_FN_CLKAUDIOBO, NULL); /* set SPU2 clock to 83.4 MHz */ clk = clk_get(NULL, "spu_clk"); if (!IS_ERR(clk)) { clk_set_rate(clk, clk_round_rate(clk, 83333333)); clk_put(clk); } /* change parent of FSI B */ clk = clk_get(NULL, "fsib_clk"); if (!IS_ERR(clk)) { /* 48kHz dummy clock was used to make sure 1/1 divide */ clk_set_rate(&sh7724_fsimckb_clk, 48000); clk_set_parent(clk, &sh7724_fsimckb_clk); clk_set_rate(clk, 48000); clk_put(clk); } gpio_request(GPIO_PTU0, NULL); gpio_direction_output(GPIO_PTU0, 0); mdelay(20); /* enable motion sensor */ gpio_request(GPIO_FN_INTC_IRQ1, NULL); gpio_direction_input(GPIO_FN_INTC_IRQ1); /* set VPU clock to 166 MHz */ clk = clk_get(NULL, "vpu_clk"); if (!IS_ERR(clk)) { clk_set_rate(clk, clk_round_rate(clk, 166000000)); clk_put(clk); } /* enable IrDA */ gpio_request(GPIO_FN_IRDA_OUT, NULL); gpio_request(GPIO_FN_IRDA_IN, NULL); gpio_request(GPIO_PTU5, NULL); gpio_direction_output(GPIO_PTU5, 0); /* enable I2C device */ i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); i2c_register_board_info(1, i2c1_devices, ARRAY_SIZE(i2c1_devices)); #if defined(CONFIG_VIDEO_SH_VOU) || defined(CONFIG_VIDEO_SH_VOU_MODULE) /* VOU */ gpio_request(GPIO_FN_DV_D15, NULL); gpio_request(GPIO_FN_DV_D14, NULL); gpio_request(GPIO_FN_DV_D13, NULL); gpio_request(GPIO_FN_DV_D12, NULL); gpio_request(GPIO_FN_DV_D11, NULL); gpio_request(GPIO_FN_DV_D10, NULL); gpio_request(GPIO_FN_DV_D9, NULL); gpio_request(GPIO_FN_DV_D8, NULL); gpio_request(GPIO_FN_DV_CLKI, NULL); gpio_request(GPIO_FN_DV_CLK, NULL); gpio_request(GPIO_FN_DV_VSYNC, NULL); gpio_request(GPIO_FN_DV_HSYNC, NULL); /* AK8813 power / reset sequence */ gpio_request(GPIO_PTG4, NULL); gpio_request(GPIO_PTU3, NULL); /* Reset */ gpio_direction_output(GPIO_PTG4, 0); /* Power down */ gpio_direction_output(GPIO_PTU3, 1); udelay(10); /* Power up, reset */ gpio_set_value(GPIO_PTU3, 0); udelay(10); /* Remove reset */ gpio_set_value(GPIO_PTG4, 1); #endif return platform_add_devices(ecovec_devices, ARRAY_SIZE(ecovec_devices)); } arch_initcall(arch_setup); static int __init devices_setup(void) { sh_eth_init(&sh_eth_plat); return 0; } device_initcall(devices_setup); static struct sh_machine_vector mv_ecovec __initmv = { .mv_name = "R0P7724 (EcoVec)", };
gpl-2.0
SlimRoms/kernel_samsung_msm8660
drivers/media/dvb/frontends/ves1x93.c
4749
14027
/* Driver for VES1893 and VES1993 QPSK Demodulators Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de> Copyright (C) 2001 Ronny Strutz <3des@elitedvb.de> Copyright (C) 2002 Dennis Noermann <dennis.noermann@noernet.de> Copyright (C) 2002-2003 Andreas Oberritter <obi@linuxtv.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/delay.h> #include "dvb_frontend.h" #include "ves1x93.h" struct ves1x93_state { struct i2c_adapter* i2c; /* configuration settings */ const struct ves1x93_config* config; struct dvb_frontend frontend; /* previous uncorrected block counter */ fe_spectral_inversion_t inversion; u8 *init_1x93_tab; u8 *init_1x93_wtab; u8 tab_size; u8 demod_type; }; static int debug; #define dprintk if (debug) printk #define DEMOD_VES1893 0 #define DEMOD_VES1993 1 static u8 init_1893_tab [] = { 0x01, 0xa4, 0x35, 0x80, 0x2a, 0x0b, 0x55, 0xc4, 0x09, 0x69, 0x00, 0x86, 0x4c, 0x28, 0x7f, 0x00, 0x00, 0x81, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x21, 0xb0, 0x14, 0x00, 0xdc, 0x00, 0x81, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x7f, 0x00 }; static u8 init_1993_tab [] = { 0x00, 0x9c, 0x35, 0x80, 0x6a, 0x09, 0x72, 0x8c, 0x09, 0x6b, 0x00, 0x00, 0x4c, 0x08, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x40, 0x21, 0xb0, 0x00, 0x00, 0x00, 0x10, 0x81, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x0e, 0x80, 0x00 }; static u8 init_1893_wtab[] = { 1,1,1,1,1,1,1,1, 1,1,0,0,1,1,0,0, 0,1,0,0,0,0,0,0, 1,0,1,1,0,0,0,1, 1,1,1,0,0,0,0,0, 0,0,1,1,0,0,0,0, 1,1,1,0,1,1 }; static u8 init_1993_wtab[] = { 1,1,1,1,1,1,1,1, 1,1,0,0,1,1,0,0, 0,1,0,0,0,0,0,0, 1,1,1,1,0,0,0,1, 1,1,1,0,0,0,0,0, 0,0,1,1,0,0,0,0, 1,1,1,0,1,1,1,1, 1,1,1,1,1 }; static int ves1x93_writereg (struct ves1x93_state* state, u8 reg, u8 data) { u8 buf [] = { 0x00, reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 3 }; int err; if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { dprintk ("%s: writereg error (err == %i, reg == 0x%02x, data == 0x%02x)\n", __func__, err, reg, data); return -EREMOTEIO; } return 0; } static u8 ves1x93_readreg (struct ves1x93_state* state, u8 reg) { int ret; u8 b0 [] = { 0x00, reg }; u8 b1 [] = { 0 }; struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 2 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer (state->i2c, msg, 2); if (ret != 2) return ret; return b1[0]; } static int ves1x93_clr_bit (struct ves1x93_state* state) { msleep(10); ves1x93_writereg (state, 0, state->init_1x93_tab[0] & 0xfe); ves1x93_writereg (state, 0, state->init_1x93_tab[0]); msleep(50); return 0; } static int ves1x93_set_inversion (struct ves1x93_state* state, fe_spectral_inversion_t inversion) { u8 val; /* * inversion on/off are interchanged because i and q seem to * be swapped on the hardware */ switch (inversion) { case INVERSION_OFF: val = 0xc0; break; case INVERSION_ON: val = 0x80; break; case INVERSION_AUTO: val = 0x00; break; default: return -EINVAL; } return ves1x93_writereg (state, 0x0c, (state->init_1x93_tab[0x0c] & 0x3f) | val); } static int ves1x93_set_fec (struct ves1x93_state* state, fe_code_rate_t fec) { if (fec == FEC_AUTO) return ves1x93_writereg (state, 0x0d, 0x08); else if (fec < FEC_1_2 || fec > FEC_8_9) return -EINVAL; else return ves1x93_writereg (state, 0x0d, fec - FEC_1_2); } static fe_code_rate_t ves1x93_get_fec (struct ves1x93_state* state) { return FEC_1_2 + ((ves1x93_readreg (state, 0x0d) >> 4) & 0x7); } static int ves1x93_set_symbolrate (struct ves1x93_state* state, u32 srate) { u32 BDR; u32 ratio; u8 ADCONF, FCONF, FNR, AGCR; u32 BDRI; u32 tmp; u32 FIN; dprintk("%s: srate == %d\n", __func__, (unsigned int) srate); if (srate > state->config->xin/2) srate = state->config->xin/2; if (srate < 500000) srate = 500000; #define MUL (1UL<<26) FIN = (state->config->xin + 6000) >> 4; tmp = srate << 6; ratio = tmp / FIN; tmp = (tmp % FIN) << 8; ratio = (ratio << 8) + tmp / FIN; tmp = (tmp % FIN) << 8; ratio = (ratio << 8) + tmp / FIN; FNR = 0xff; if (ratio < MUL/3) FNR = 0; if (ratio < (MUL*11)/50) FNR = 1; if (ratio < MUL/6) FNR = 2; if (ratio < MUL/9) FNR = 3; if (ratio < MUL/12) FNR = 4; if (ratio < (MUL*11)/200) FNR = 5; if (ratio < MUL/24) FNR = 6; if (ratio < (MUL*27)/1000) FNR = 7; if (ratio < MUL/48) FNR = 8; if (ratio < (MUL*137)/10000) FNR = 9; if (FNR == 0xff) { ADCONF = 0x89; FCONF = 0x80; FNR = 0; } else { ADCONF = 0x81; FCONF = 0x88 | (FNR >> 1) | ((FNR & 0x01) << 5); /*FCONF = 0x80 | ((FNR & 0x01) << 5) | (((FNR > 1) & 0x03) << 3) | ((FNR >> 1) & 0x07);*/ } BDR = (( (ratio << (FNR >> 1)) >> 4) + 1) >> 1; BDRI = ( ((FIN << 8) / ((srate << (FNR >> 1)) >> 2)) + 1) >> 1; dprintk("FNR= %d\n", FNR); dprintk("ratio= %08x\n", (unsigned int) ratio); dprintk("BDR= %08x\n", (unsigned int) BDR); dprintk("BDRI= %02x\n", (unsigned int) BDRI); if (BDRI > 0xff) BDRI = 0xff; ves1x93_writereg (state, 0x06, 0xff & BDR); ves1x93_writereg (state, 0x07, 0xff & (BDR >> 8)); ves1x93_writereg (state, 0x08, 0x0f & (BDR >> 16)); ves1x93_writereg (state, 0x09, BDRI); ves1x93_writereg (state, 0x20, ADCONF); ves1x93_writereg (state, 0x21, FCONF); AGCR = state->init_1x93_tab[0x05]; if (state->config->invert_pwm) AGCR |= 0x20; if (srate < 6000000) AGCR |= 0x80; else AGCR &= ~0x80; ves1x93_writereg (state, 0x05, AGCR); /* ves1993 hates this, will lose lock */ if (state->demod_type != DEMOD_VES1993) ves1x93_clr_bit (state); return 0; } static int ves1x93_init (struct dvb_frontend* fe) { struct ves1x93_state* state = fe->demodulator_priv; int i; int val; dprintk("%s: init chip\n", __func__); for (i = 0; i < state->tab_size; i++) { if (state->init_1x93_wtab[i]) { val = state->init_1x93_tab[i]; if (state->config->invert_pwm && (i == 0x05)) val |= 0x20; /* invert PWM */ ves1x93_writereg (state, i, val); } } return 0; } static int ves1x93_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltage) { struct ves1x93_state* state = fe->demodulator_priv; switch (voltage) { case SEC_VOLTAGE_13: return ves1x93_writereg (state, 0x1f, 0x20); case SEC_VOLTAGE_18: return ves1x93_writereg (state, 0x1f, 0x30); case SEC_VOLTAGE_OFF: return ves1x93_writereg (state, 0x1f, 0x00); default: return -EINVAL; } } static int ves1x93_read_status(struct dvb_frontend* fe, fe_status_t* status) { struct ves1x93_state* state = fe->demodulator_priv; u8 sync = ves1x93_readreg (state, 0x0e); /* * The ves1893 sometimes returns sync values that make no sense, * because, e.g., the SIGNAL bit is 0, while some of the higher * bits are 1 (and how can there be a CARRIER w/o a SIGNAL?). * Tests showed that the VITERBI and SYNC bits are returned * reliably, while the SIGNAL and CARRIER bits ar sometimes wrong. * If such a case occurs, we read the value again, until we get a * valid value. */ int maxtry = 10; /* just for safety - let's not get stuck here */ while ((sync & 0x03) != 0x03 && (sync & 0x0c) && maxtry--) { msleep(10); sync = ves1x93_readreg (state, 0x0e); } *status = 0; if (sync & 1) *status |= FE_HAS_SIGNAL; if (sync & 2) *status |= FE_HAS_CARRIER; if (sync & 4) *status |= FE_HAS_VITERBI; if (sync & 8) *status |= FE_HAS_SYNC; if ((sync & 0x1f) == 0x1f) *status |= FE_HAS_LOCK; return 0; } static int ves1x93_read_ber(struct dvb_frontend* fe, u32* ber) { struct ves1x93_state* state = fe->demodulator_priv; *ber = ves1x93_readreg (state, 0x15); *ber |= (ves1x93_readreg (state, 0x16) << 8); *ber |= ((ves1x93_readreg (state, 0x17) & 0x0F) << 16); *ber *= 10; return 0; } static int ves1x93_read_signal_strength(struct dvb_frontend* fe, u16* strength) { struct ves1x93_state* state = fe->demodulator_priv; u8 signal = ~ves1x93_readreg (state, 0x0b); *strength = (signal << 8) | signal; return 0; } static int ves1x93_read_snr(struct dvb_frontend* fe, u16* snr) { struct ves1x93_state* state = fe->demodulator_priv; u8 _snr = ~ves1x93_readreg (state, 0x1c); *snr = (_snr << 8) | _snr; return 0; } static int ves1x93_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { struct ves1x93_state* state = fe->demodulator_priv; *ucblocks = ves1x93_readreg (state, 0x18) & 0x7f; if (*ucblocks == 0x7f) *ucblocks = 0xffffffff; /* counter overflow... */ ves1x93_writereg (state, 0x18, 0x00); /* reset the counter */ ves1x93_writereg (state, 0x18, 0x80); /* dto. */ return 0; } static int ves1x93_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) { struct ves1x93_state* state = fe->demodulator_priv; if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, p); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } ves1x93_set_inversion (state, p->inversion); ves1x93_set_fec (state, p->u.qpsk.fec_inner); ves1x93_set_symbolrate (state, p->u.qpsk.symbol_rate); state->inversion = p->inversion; return 0; } static int ves1x93_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) { struct ves1x93_state* state = fe->demodulator_priv; int afc; afc = ((int)((char)(ves1x93_readreg (state, 0x0a) << 1)))/2; afc = (afc * (int)(p->u.qpsk.symbol_rate/1000/8))/16; p->frequency -= afc; /* * inversion indicator is only valid * if auto inversion was used */ if (state->inversion == INVERSION_AUTO) p->inversion = (ves1x93_readreg (state, 0x0f) & 2) ? INVERSION_OFF : INVERSION_ON; p->u.qpsk.fec_inner = ves1x93_get_fec (state); /* XXX FIXME: timing offset !! */ return 0; } static int ves1x93_sleep(struct dvb_frontend* fe) { struct ves1x93_state* state = fe->demodulator_priv; return ves1x93_writereg (state, 0x00, 0x08); } static void ves1x93_release(struct dvb_frontend* fe) { struct ves1x93_state* state = fe->demodulator_priv; kfree(state); } static int ves1x93_i2c_gate_ctrl(struct dvb_frontend* fe, int enable) { struct ves1x93_state* state = fe->demodulator_priv; if (enable) { return ves1x93_writereg(state, 0x00, 0x11); } else { return ves1x93_writereg(state, 0x00, 0x01); } } static struct dvb_frontend_ops ves1x93_ops; struct dvb_frontend* ves1x93_attach(const struct ves1x93_config* config, struct i2c_adapter* i2c) { struct ves1x93_state* state = NULL; u8 identity; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct ves1x93_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; state->inversion = INVERSION_OFF; /* check if the demod is there + identify it */ identity = ves1x93_readreg(state, 0x1e); switch (identity) { case 0xdc: /* VES1893A rev1 */ printk("ves1x93: Detected ves1893a rev1\n"); state->demod_type = DEMOD_VES1893; state->init_1x93_tab = init_1893_tab; state->init_1x93_wtab = init_1893_wtab; state->tab_size = sizeof(init_1893_tab); break; case 0xdd: /* VES1893A rev2 */ printk("ves1x93: Detected ves1893a rev2\n"); state->demod_type = DEMOD_VES1893; state->init_1x93_tab = init_1893_tab; state->init_1x93_wtab = init_1893_wtab; state->tab_size = sizeof(init_1893_tab); break; case 0xde: /* VES1993 */ printk("ves1x93: Detected ves1993\n"); state->demod_type = DEMOD_VES1993; state->init_1x93_tab = init_1993_tab; state->init_1x93_wtab = init_1993_wtab; state->tab_size = sizeof(init_1993_tab); break; default: goto error; } /* create dvb_frontend */ memcpy(&state->frontend.ops, &ves1x93_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops ves1x93_ops = { .info = { .name = "VLSI VES1x93 DVB-S", .type = FE_QPSK, .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 125, /* kHz for QPSK frontends */ .frequency_tolerance = 29500, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, /* .symbol_rate_tolerance = ???,*/ .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK }, .release = ves1x93_release, .init = ves1x93_init, .sleep = ves1x93_sleep, .i2c_gate_ctrl = ves1x93_i2c_gate_ctrl, .set_frontend = ves1x93_set_frontend, .get_frontend = ves1x93_get_frontend, .read_status = ves1x93_read_status, .read_ber = ves1x93_read_ber, .read_signal_strength = ves1x93_read_signal_strength, .read_snr = ves1x93_read_snr, .read_ucblocks = ves1x93_read_ucblocks, .set_voltage = ves1x93_set_voltage, }; module_param(debug, int, 0644); MODULE_DESCRIPTION("VLSI VES1x93 DVB-S Demodulator driver"); MODULE_AUTHOR("Ralph Metzler"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ves1x93_attach);
gpl-2.0
poondog/kangaroo-m7-mkv
arch/arm/mach-omap2/omap_l3_smx.c
5005
7441
/* * OMAP3XXX L3 Interconnect Driver * * Copyright (C) 2011 Texas Corporation * Felipe Balbi <balbi@ti.com> * Santosh Shilimkar <santosh.shilimkar@ti.com> * Sricharan <r.sricharan@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/io.h> #include "omap_l3_smx.h" static inline u64 omap3_l3_readll(void __iomem *base, u16 reg) { return __raw_readll(base + reg); } static inline void omap3_l3_writell(void __iomem *base, u16 reg, u64 value) { __raw_writell(value, base + reg); } static inline enum omap3_l3_code omap3_l3_decode_error_code(u64 error) { return (error & 0x0f000000) >> L3_ERROR_LOG_CODE; } static inline u32 omap3_l3_decode_addr(u64 error_addr) { return error_addr & 0xffffffff; } static inline unsigned omap3_l3_decode_cmd(u64 error) { return (error & 0x07) >> L3_ERROR_LOG_CMD; } static inline enum omap3_l3_initiator_id omap3_l3_decode_initid(u64 error) { return (error & 0xff00) >> L3_ERROR_LOG_INITID; } static inline unsigned omap3_l3_decode_req_info(u64 error) { return (error >> 32) & 0xffff; } static char *omap3_l3_code_string(u8 code) { switch (code) { case OMAP_L3_CODE_NOERROR: return "No Error"; case OMAP_L3_CODE_UNSUP_CMD: return "Unsupported Command"; case OMAP_L3_CODE_ADDR_HOLE: return "Address Hole"; case OMAP_L3_CODE_PROTECT_VIOLATION: return "Protection Violation"; case OMAP_L3_CODE_IN_BAND_ERR: return "In-band Error"; case OMAP_L3_CODE_REQ_TOUT_NOT_ACCEPT: return "Request Timeout Not Accepted"; case OMAP_L3_CODE_REQ_TOUT_NO_RESP: return "Request Timeout, no response"; default: return "UNKNOWN error"; } } static char *omap3_l3_initiator_string(u8 initid) { switch (initid) { case OMAP_L3_LCD: return "LCD"; case OMAP_L3_SAD2D: return "SAD2D"; case OMAP_L3_IA_MPU_SS_1: case OMAP_L3_IA_MPU_SS_2: case OMAP_L3_IA_MPU_SS_3: case OMAP_L3_IA_MPU_SS_4: case OMAP_L3_IA_MPU_SS_5: return "MPU"; case OMAP_L3_IA_IVA_SS_1: case OMAP_L3_IA_IVA_SS_2: case OMAP_L3_IA_IVA_SS_3: return "IVA_SS"; case OMAP_L3_IA_IVA_SS_DMA_1: case OMAP_L3_IA_IVA_SS_DMA_2: case OMAP_L3_IA_IVA_SS_DMA_3: case OMAP_L3_IA_IVA_SS_DMA_4: case OMAP_L3_IA_IVA_SS_DMA_5: case OMAP_L3_IA_IVA_SS_DMA_6: return "IVA_SS_DMA"; case OMAP_L3_IA_SGX: return "SGX"; case OMAP_L3_IA_CAM_1: case OMAP_L3_IA_CAM_2: case OMAP_L3_IA_CAM_3: return "CAM"; case OMAP_L3_IA_DAP: return "DAP"; case OMAP_L3_SDMA_WR_1: case OMAP_L3_SDMA_WR_2: return "SDMA_WR"; case OMAP_L3_SDMA_RD_1: case OMAP_L3_SDMA_RD_2: case OMAP_L3_SDMA_RD_3: case OMAP_L3_SDMA_RD_4: return "SDMA_RD"; case OMAP_L3_USBOTG: return "USB_OTG"; case OMAP_L3_USBHOST: return "USB_HOST"; default: return "UNKNOWN Initiator"; } } /* * omap3_l3_block_irq - handles a register block's irq * @l3: struct omap3_l3 * * @base: register block base address * @error: L3_ERROR_LOG register of our block * * Called in hard-irq context. Caller should take care of locking * * OMAP36xx TRM gives, on page 2001, Figure 9-10, the Typical Error * Analysis Sequence, we are following that sequence here, please * refer to that Figure for more information on the subject. */ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3, u64 error, int error_addr) { u8 code = omap3_l3_decode_error_code(error); u8 initid = omap3_l3_decode_initid(error); u8 multi = error & L3_ERROR_LOG_MULTI; u32 address = omap3_l3_decode_addr(error_addr); WARN(true, "%s seen by %s %s at address %x\n", omap3_l3_code_string(code), omap3_l3_initiator_string(initid), multi ? "Multiple Errors" : "", address); return IRQ_HANDLED; } static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) { struct omap3_l3 *l3 = _l3; u64 status, clear; u64 error; u64 error_addr; u64 err_source = 0; void __iomem *base; int int_type; irqreturn_t ret = IRQ_NONE; int_type = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR; if (!int_type) { status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0); /* * if we have a timeout error, there's nothing we can * do besides rebooting the board. So let's BUG on any * of such errors and handle the others. timeout error * is severe and not expected to occur. */ BUG_ON(status & L3_STATUS_0_TIMEOUT_MASK); } else { status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_1); /* No timeout error for debug sources */ } /* identify the error source */ err_source = __ffs(status); base = l3->rt + omap3_l3_bases[int_type][err_source]; error = omap3_l3_readll(base, L3_ERROR_LOG); if (error) { error_addr = omap3_l3_readll(base, L3_ERROR_LOG_ADDR); ret |= omap3_l3_block_irq(l3, error, error_addr); } /* Clear the status register */ clear = (L3_AGENT_STATUS_CLEAR_IA << int_type) | L3_AGENT_STATUS_CLEAR_TA; omap3_l3_writell(base, L3_AGENT_STATUS, clear); /* clear the error log register */ omap3_l3_writell(base, L3_ERROR_LOG, error); return ret; } static int __init omap3_l3_probe(struct platform_device *pdev) { struct omap3_l3 *l3; struct resource *res; int ret; l3 = kzalloc(sizeof(*l3), GFP_KERNEL); if (!l3) return -ENOMEM; platform_set_drvdata(pdev, l3); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "couldn't find resource\n"); ret = -ENODEV; goto err0; } l3->rt = ioremap(res->start, resource_size(res)); if (!l3->rt) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err0; } l3->debug_irq = platform_get_irq(pdev, 0); ret = request_irq(l3->debug_irq, omap3_l3_app_irq, IRQF_DISABLED | IRQF_TRIGGER_RISING, "l3-debug-irq", l3); if (ret) { dev_err(&pdev->dev, "couldn't request debug irq\n"); goto err1; } l3->app_irq = platform_get_irq(pdev, 1); ret = request_irq(l3->app_irq, omap3_l3_app_irq, IRQF_DISABLED | IRQF_TRIGGER_RISING, "l3-app-irq", l3); if (ret) { dev_err(&pdev->dev, "couldn't request app irq\n"); goto err2; } return 0; err2: free_irq(l3->debug_irq, l3); err1: iounmap(l3->rt); err0: kfree(l3); return ret; } static int __exit omap3_l3_remove(struct platform_device *pdev) { struct omap3_l3 *l3 = platform_get_drvdata(pdev); free_irq(l3->app_irq, l3); free_irq(l3->debug_irq, l3); iounmap(l3->rt); kfree(l3); return 0; } static struct platform_driver omap3_l3_driver = { .remove = __exit_p(omap3_l3_remove), .driver = { .name = "omap_l3_smx", }, }; static int __init omap3_l3_init(void) { return platform_driver_probe(&omap3_l3_driver, omap3_l3_probe); } postcore_initcall_sync(omap3_l3_init); static void __exit omap3_l3_exit(void) { platform_driver_unregister(&omap3_l3_driver); } module_exit(omap3_l3_exit);
gpl-2.0
ChronoMonochrome/android_kernel_ste-3.4
arch/x86/pci/mmconfig_64.c
5005
2899
/* * mmconfig.c - Low-level direct PCI config space access via MMCONFIG * * This is an 64bit optimized version that always keeps the full mmconfig * space mapped. This allows lockless config space operation. */ #include <linux/pci.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/bitmap.h> #include <asm/e820.h> #include <asm/pci_x86.h> #define PREFIX "PCI: " static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) { struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus); if (cfg && cfg->virt) return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12)); return NULL; } static int pci_mmcfg_read(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 *value) { char __iomem *addr; /* Why do we have this when nobody checks it. How about a BUG()!? -AK */ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) { err: *value = -1; return -EINVAL; } addr = pci_dev_base(seg, bus, devfn); if (!addr) goto err; switch (len) { case 1: *value = mmio_config_readb(addr + reg); break; case 2: *value = mmio_config_readw(addr + reg); break; case 4: *value = mmio_config_readl(addr + reg); break; } return 0; } static int pci_mmcfg_write(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 value) { char __iomem *addr; /* Why do we have this when nobody checks it. How about a BUG()!? -AK */ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) return -EINVAL; addr = pci_dev_base(seg, bus, devfn); if (!addr) return -EINVAL; switch (len) { case 1: mmio_config_writeb(addr + reg, value); break; case 2: mmio_config_writew(addr + reg, value); break; case 4: mmio_config_writel(addr + reg, value); break; } return 0; } static const struct pci_raw_ops pci_mmcfg = { .read = pci_mmcfg_read, .write = pci_mmcfg_write, }; static void __iomem * __init mcfg_ioremap(struct pci_mmcfg_region *cfg) { void __iomem *addr; u64 start, size; int num_buses; start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus); num_buses = cfg->end_bus - cfg->start_bus + 1; size = PCI_MMCFG_BUS_OFFSET(num_buses); addr = ioremap_nocache(start, size); if (addr) addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus); return addr; } int __init pci_mmcfg_arch_init(void) { struct pci_mmcfg_region *cfg; list_for_each_entry(cfg, &pci_mmcfg_list, list) { cfg->virt = mcfg_ioremap(cfg); if (!cfg->virt) { printk(KERN_ERR PREFIX "can't map MMCONFIG at %pR\n", &cfg->res); pci_mmcfg_arch_free(); return 0; } } raw_pci_ext_ops = &pci_mmcfg; return 1; } void __init pci_mmcfg_arch_free(void) { struct pci_mmcfg_region *cfg; list_for_each_entry(cfg, &pci_mmcfg_list, list) { if (cfg->virt) { iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus)); cfg->virt = NULL; } } }
gpl-2.0
clemsyn/asusOC
drivers/input/touchscreen/elo.c
9101
9104
/* * Elo serial touchscreen driver * * Copyright (c) 2004 Vojtech Pavlik */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ /* * This driver can handle serial Elo touchscreens using either the Elo standard * 'E271-2210' 10-byte protocol, Elo legacy 'E281A-4002' 6-byte protocol, Elo * legacy 'E271-140' 4-byte protocol and Elo legacy 'E261-280' 3-byte protocol. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #include <linux/ctype.h> #define DRIVER_DESC "Elo serial touchscreen driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Definitions & global arrays. */ #define ELO_MAX_LENGTH 10 #define ELO10_PACKET_LEN 8 #define ELO10_TOUCH 0x03 #define ELO10_PRESSURE 0x80 #define ELO10_LEAD_BYTE 'U' #define ELO10_ID_CMD 'i' #define ELO10_TOUCH_PACKET 'T' #define ELO10_ACK_PACKET 'A' #define ELI10_ID_PACKET 'I' /* * Per-touchscreen data. */ struct elo { struct input_dev *dev; struct serio *serio; struct mutex cmd_mutex; struct completion cmd_done; int id; int idx; unsigned char expected_packet; unsigned char csum; unsigned char data[ELO_MAX_LENGTH]; unsigned char response[ELO10_PACKET_LEN]; char phys[32]; }; static void elo_process_data_10(struct elo *elo, unsigned char data) { struct input_dev *dev = elo->dev; elo->data[elo->idx] = data; switch (elo->idx++) { case 0: elo->csum = 0xaa; if (data != ELO10_LEAD_BYTE) { dev_dbg(&elo->serio->dev, "unsynchronized data: 0x%02x\n", data); elo->idx = 0; } break; case 9: elo->idx = 0; if (data != elo->csum) { dev_dbg(&elo->serio->dev, "bad checksum: 0x%02x, expected 0x%02x\n", data, elo->csum); break; } if (elo->data[1] != elo->expected_packet) { if (elo->data[1] != ELO10_TOUCH_PACKET) dev_dbg(&elo->serio->dev, "unexpected packet: 0x%02x\n", elo->data[1]); break; } if (likely(elo->data[1] == ELO10_TOUCH_PACKET)) { input_report_abs(dev, ABS_X, (elo->data[4] << 8) | elo->data[3]); input_report_abs(dev, ABS_Y, (elo->data[6] << 8) | elo->data[5]); if (elo->data[2] & ELO10_PRESSURE) input_report_abs(dev, ABS_PRESSURE, (elo->data[8] << 8) | elo->data[7]); input_report_key(dev, BTN_TOUCH, elo->data[2] & ELO10_TOUCH); input_sync(dev); } else if (elo->data[1] == ELO10_ACK_PACKET) { if (elo->data[2] == '0') elo->expected_packet = ELO10_TOUCH_PACKET; complete(&elo->cmd_done); } else { memcpy(elo->response, &elo->data[1], ELO10_PACKET_LEN); elo->expected_packet = ELO10_ACK_PACKET; } break; } elo->csum += data; } static void elo_process_data_6(struct elo *elo, unsigned char data) { struct input_dev *dev = elo->dev; elo->data[elo->idx] = data; switch (elo->idx++) { case 0: if ((data & 0xc0) != 0xc0) elo->idx = 0; break; case 1: if ((data & 0xc0) != 0x80) elo->idx = 0; break; case 2: if ((data & 0xc0) != 0x40) elo->idx = 0; break; case 3: if (data & 0xc0) { elo->idx = 0; break; } input_report_abs(dev, ABS_X, ((elo->data[0] & 0x3f) << 6) | (elo->data[1] & 0x3f)); input_report_abs(dev, ABS_Y, ((elo->data[2] & 0x3f) << 6) | (elo->data[3] & 0x3f)); if (elo->id == 2) { input_report_key(dev, BTN_TOUCH, 1); input_sync(dev); elo->idx = 0; } break; case 4: if (data) { input_sync(dev); elo->idx = 0; } break; case 5: if ((data & 0xf0) == 0) { input_report_abs(dev, ABS_PRESSURE, elo->data[5]); input_report_key(dev, BTN_TOUCH, !!elo->data[5]); } input_sync(dev); elo->idx = 0; break; } } static void elo_process_data_3(struct elo *elo, unsigned char data) { struct input_dev *dev = elo->dev; elo->data[elo->idx] = data; switch (elo->idx++) { case 0: if ((data & 0x7f) != 0x01) elo->idx = 0; break; case 2: input_report_key(dev, BTN_TOUCH, !(elo->data[1] & 0x80)); input_report_abs(dev, ABS_X, elo->data[1]); input_report_abs(dev, ABS_Y, elo->data[2]); input_sync(dev); elo->idx = 0; break; } } static irqreturn_t elo_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct elo *elo = serio_get_drvdata(serio); switch (elo->id) { case 0: elo_process_data_10(elo, data); break; case 1: case 2: elo_process_data_6(elo, data); break; case 3: elo_process_data_3(elo, data); break; } return IRQ_HANDLED; } static int elo_command_10(struct elo *elo, unsigned char *packet) { int rc = -1; int i; unsigned char csum = 0xaa + ELO10_LEAD_BYTE; mutex_lock(&elo->cmd_mutex); serio_pause_rx(elo->serio); elo->expected_packet = toupper(packet[0]); init_completion(&elo->cmd_done); serio_continue_rx(elo->serio); if (serio_write(elo->serio, ELO10_LEAD_BYTE)) goto out; for (i = 0; i < ELO10_PACKET_LEN; i++) { csum += packet[i]; if (serio_write(elo->serio, packet[i])) goto out; } if (serio_write(elo->serio, csum)) goto out; wait_for_completion_timeout(&elo->cmd_done, HZ); if (elo->expected_packet == ELO10_TOUCH_PACKET) { /* We are back in reporting mode, the command was ACKed */ memcpy(packet, elo->response, ELO10_PACKET_LEN); rc = 0; } out: mutex_unlock(&elo->cmd_mutex); return rc; } static int elo_setup_10(struct elo *elo) { static const char *elo_types[] = { "Accu", "Dura", "Intelli", "Carroll" }; struct input_dev *dev = elo->dev; unsigned char packet[ELO10_PACKET_LEN] = { ELO10_ID_CMD }; if (elo_command_10(elo, packet)) return -1; dev->id.version = (packet[5] << 8) | packet[4]; input_set_abs_params(dev, ABS_X, 96, 4000, 0, 0); input_set_abs_params(dev, ABS_Y, 96, 4000, 0, 0); if (packet[3] & ELO10_PRESSURE) input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0); dev_info(&elo->serio->dev, "%sTouch touchscreen, fw: %02x.%02x, features: 0x%02x, controller: 0x%02x\n", elo_types[(packet[1] -'0') & 0x03], packet[5], packet[4], packet[3], packet[7]); return 0; } /* * elo_disconnect() is the opposite of elo_connect() */ static void elo_disconnect(struct serio *serio) { struct elo *elo = serio_get_drvdata(serio); input_get_device(elo->dev); input_unregister_device(elo->dev); serio_close(serio); serio_set_drvdata(serio, NULL); input_put_device(elo->dev); kfree(elo); } /* * elo_connect() is the routine that is called when someone adds a * new serio device that supports Gunze protocol and registers it as * an input device. */ static int elo_connect(struct serio *serio, struct serio_driver *drv) { struct elo *elo; struct input_dev *input_dev; int err; elo = kzalloc(sizeof(struct elo), GFP_KERNEL); input_dev = input_allocate_device(); if (!elo || !input_dev) { err = -ENOMEM; goto fail1; } elo->serio = serio; elo->id = serio->id.id; elo->dev = input_dev; elo->expected_packet = ELO10_TOUCH_PACKET; mutex_init(&elo->cmd_mutex); init_completion(&elo->cmd_done); snprintf(elo->phys, sizeof(elo->phys), "%s/input0", serio->phys); input_dev->name = "Elo Serial TouchScreen"; input_dev->phys = elo->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_ELO; input_dev->id.product = elo->id; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); serio_set_drvdata(serio, elo); err = serio_open(serio, drv); if (err) goto fail2; switch (elo->id) { case 0: /* 10-byte protocol */ if (elo_setup_10(elo)) goto fail3; break; case 1: /* 6-byte protocol */ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0); case 2: /* 4-byte protocol */ input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0); input_set_abs_params(input_dev, ABS_Y, 96, 4000, 0, 0); break; case 3: /* 3-byte protocol */ input_set_abs_params(input_dev, ABS_X, 0, 255, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, 255, 0, 0); break; } err = input_register_device(elo->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(elo); return err; } /* * The serio driver structure. */ static struct serio_device_id elo_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_ELO, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, elo_serio_ids); static struct serio_driver elo_drv = { .driver = { .name = "elo", }, .description = DRIVER_DESC, .id_table = elo_serio_ids, .interrupt = elo_interrupt, .connect = elo_connect, .disconnect = elo_disconnect, }; /* * The functions for inserting/removing us as a module. */ static int __init elo_init(void) { return serio_register_driver(&elo_drv); } static void __exit elo_exit(void) { serio_unregister_driver(&elo_drv); } module_init(elo_init); module_exit(elo_exit);
gpl-2.0
randomblame/3.1.10_a50x
drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
12685
2567
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This source file is specifically designed to interface with the v4l-dvb cs53l32a module. */ #include "pvrusb2-cs53l32a.h" #include "pvrusb2-hdw-internal.h" #include "pvrusb2-debug.h" #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <linux/errno.h> struct routing_scheme { const int *def; unsigned int cnt; }; static const int routing_scheme1[] = { [PVR2_CVAL_INPUT_TV] = 2, /* 1 or 2 seems to work here */ [PVR2_CVAL_INPUT_RADIO] = 2, [PVR2_CVAL_INPUT_COMPOSITE] = 0, [PVR2_CVAL_INPUT_SVIDEO] = 0, }; static const struct routing_scheme routing_def1 = { .def = routing_scheme1, .cnt = ARRAY_SIZE(routing_scheme1), }; static const struct routing_scheme *routing_schemes[] = { [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1, }; void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) { if (hdw->input_dirty || hdw->force_dirty) { const struct routing_scheme *sp; unsigned int sid = hdw->hdw_desc->signal_routing_scheme; u32 input; pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)", hdw->input_val); sp = (sid < ARRAY_SIZE(routing_schemes)) ? routing_schemes[sid] : NULL; if ((sp == NULL) || (hdw->input_val < 0) || (hdw->input_val >= sp->cnt)) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "*** WARNING *** subdev v4l2 set_input:" " Invalid routing scheme (%u)" " and/or input (%d)", sid, hdw->input_val); return; } input = sp->def[hdw->input_val]; sd->ops->audio->s_routing(sd, input, 0, 0); } } /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 70 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
ashleyjune/SM-G360T1_kernel
drivers/ide/ide-devsets.c
12941
3915
#include <linux/kernel.h> #include <linux/gfp.h> #include <linux/ide.h> DEFINE_MUTEX(ide_setting_mtx); ide_devset_get(io_32bit, io_32bit); static int set_io_32bit(ide_drive_t *drive, int arg) { if (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT) return -EPERM; if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1)) return -EINVAL; drive->io_32bit = arg; return 0; } ide_devset_get_flag(ksettings, IDE_DFLAG_KEEP_SETTINGS); static int set_ksettings(ide_drive_t *drive, int arg) { if (arg < 0 || arg > 1) return -EINVAL; if (arg) drive->dev_flags |= IDE_DFLAG_KEEP_SETTINGS; else drive->dev_flags &= ~IDE_DFLAG_KEEP_SETTINGS; return 0; } ide_devset_get_flag(using_dma, IDE_DFLAG_USING_DMA); static int set_using_dma(ide_drive_t *drive, int arg) { #ifdef CONFIG_BLK_DEV_IDEDMA int err = -EPERM; if (arg < 0 || arg > 1) return -EINVAL; if (ata_id_has_dma(drive->id) == 0) goto out; if (drive->hwif->dma_ops == NULL) goto out; err = 0; if (arg) { if (ide_set_dma(drive)) err = -EIO; } else ide_dma_off(drive); out: return err; #else if (arg < 0 || arg > 1) return -EINVAL; return -EPERM; #endif } /* * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away */ static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio) { switch (req_pio) { case 202: case 201: case 200: case 102: case 101: case 100: return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0; case 9: case 8: return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0; case 7: case 6: return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0; default: return 0; } } static int set_pio_mode(ide_drive_t *drive, int arg) { ide_hwif_t *hwif = drive->hwif; const struct ide_port_ops *port_ops = hwif->port_ops; if (arg < 0 || arg > 255) return -EINVAL; if (port_ops == NULL || port_ops->set_pio_mode == NULL || (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)) return -ENOSYS; if (set_pio_mode_abuse(drive->hwif, arg)) { drive->pio_mode = arg + XFER_PIO_0; if (arg == 8 || arg == 9) { unsigned long flags; /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */ spin_lock_irqsave(&hwif->lock, flags); port_ops->set_pio_mode(hwif, drive); spin_unlock_irqrestore(&hwif->lock, flags); } else port_ops->set_pio_mode(hwif, drive); } else { int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); ide_set_pio(drive, arg); if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) { if (keep_dma) ide_dma_on(drive); } } return 0; } ide_devset_get_flag(unmaskirq, IDE_DFLAG_UNMASK); static int set_unmaskirq(ide_drive_t *drive, int arg) { if (drive->dev_flags & IDE_DFLAG_NO_UNMASK) return -EPERM; if (arg < 0 || arg > 1) return -EINVAL; if (arg) drive->dev_flags |= IDE_DFLAG_UNMASK; else drive->dev_flags &= ~IDE_DFLAG_UNMASK; return 0; } ide_ext_devset_rw_sync(io_32bit, io_32bit); ide_ext_devset_rw_sync(keepsettings, ksettings); ide_ext_devset_rw_sync(unmaskirq, unmaskirq); ide_ext_devset_rw_sync(using_dma, using_dma); __IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode); int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, int arg) { struct request_queue *q = drive->queue; struct request *rq; int ret = 0; if (!(setting->flags & DS_SYNC)) return setting->set(drive, arg); rq = blk_get_request(q, READ, __GFP_WAIT); rq->cmd_type = REQ_TYPE_SPECIAL; rq->cmd_len = 5; rq->cmd[0] = REQ_DEVSET_EXEC; *(int *)&rq->cmd[1] = arg; rq->special = setting->set; if (blk_execute_rq(q, NULL, rq, 0)) ret = rq->errors; blk_put_request(rq); return ret; } ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq) { int err, (*setfunc)(ide_drive_t *, int) = rq->special; err = setfunc(drive, *(int *)&rq->cmd[1]); if (err) rq->errors = err; ide_complete_rq(drive, err, blk_rq_bytes(rq)); return ide_stopped; }
gpl-2.0
Flipkart/linux
arch/score/mm/pgtable.c
13709
1690
/* * arch/score/mm/pgtable-32.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Lennox Wu <lennox.wu@sunplusct.com> * Chen Liqin <liqin.chen@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/bootmem.h> #include <linux/init.h> #include <linux/pfn.h> #include <linux/mm.h> void pgd_init(unsigned long page) { unsigned long *p = (unsigned long *) page; int i; for (i = 0; i < USER_PTRS_PER_PGD; i += 8) { p[i + 0] = (unsigned long) invalid_pte_table; p[i + 1] = (unsigned long) invalid_pte_table; p[i + 2] = (unsigned long) invalid_pte_table; p[i + 3] = (unsigned long) invalid_pte_table; p[i + 4] = (unsigned long) invalid_pte_table; p[i + 5] = (unsigned long) invalid_pte_table; p[i + 6] = (unsigned long) invalid_pte_table; p[i + 7] = (unsigned long) invalid_pte_table; } } void __init pagetable_init(void) { /* Initialize the entire pgd. */ pgd_init((unsigned long)swapper_pg_dir); }
gpl-2.0
christianeisendle/linux
drivers/cpufreq/arm_big_little.c
142
16322
/* * ARM big.LITTLE Platforms CPUFreq support * * Copyright (C) 2013 ARM Ltd. * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> * * Copyright (C) 2013 Linaro. * Viresh Kumar <viresh.kumar@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/clk.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/export.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of_platform.h> #include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/topology.h> #include <linux/types.h> #include "arm_big_little.h" /* Currently we support only two clusters */ #define A15_CLUSTER 0 #define A7_CLUSTER 1 #define MAX_CLUSTERS 2 #ifdef CONFIG_BL_SWITCHER #include <asm/bL_switcher.h> static bool bL_switching_enabled; #define is_bL_switching_enabled() bL_switching_enabled #define set_switching_enabled(x) (bL_switching_enabled = (x)) #else #define is_bL_switching_enabled() false #define set_switching_enabled(x) do { } while (0) #define bL_switch_request(...) do { } while (0) #define bL_switcher_put_enabled() do { } while (0) #define bL_switcher_get_enabled() do { } while (0) #endif #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) static struct cpufreq_arm_bL_ops *arm_bL_ops; static struct clk *clk[MAX_CLUSTERS]; static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; static atomic_t cluster_usage[MAX_CLUSTERS + 1]; static unsigned int clk_big_min; /* (Big) clock frequencies */ static unsigned int clk_little_max; /* Maximum clock frequency (Little) */ static DEFINE_PER_CPU(unsigned int, physical_cluster); static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq); static struct mutex cluster_lock[MAX_CLUSTERS]; static inline int raw_cpu_to_cluster(int cpu) { return topology_physical_package_id(cpu); } static inline int cpu_to_cluster(int cpu) { return is_bL_switching_enabled() ? MAX_CLUSTERS : raw_cpu_to_cluster(cpu); } static unsigned int find_cluster_maxfreq(int cluster) { int j; u32 max_freq = 0, cpu_freq; for_each_online_cpu(j) { cpu_freq = per_cpu(cpu_last_req_freq, j); if ((cluster == per_cpu(physical_cluster, j)) && (max_freq < cpu_freq)) max_freq = cpu_freq; } pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster, max_freq); return max_freq; } static unsigned int clk_get_cpu_rate(unsigned int cpu) { u32 cur_cluster = per_cpu(physical_cluster, cpu); u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; /* For switcher we use virtual A7 clock rates */ if (is_bL_switching_enabled()) rate = VIRT_FREQ(cur_cluster, rate); pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu, cur_cluster, rate); return rate; } static unsigned int bL_cpufreq_get_rate(unsigned int cpu) { if (is_bL_switching_enabled()) { pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, cpu)); return per_cpu(cpu_last_req_freq, cpu); } else { return clk_get_cpu_rate(cpu); } } static unsigned int bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) { u32 new_rate, prev_rate; int ret; bool bLs = is_bL_switching_enabled(); mutex_lock(&cluster_lock[new_cluster]); if (bLs) { prev_rate = per_cpu(cpu_last_req_freq, cpu); per_cpu(cpu_last_req_freq, cpu) = rate; per_cpu(physical_cluster, cpu) = new_cluster; new_rate = find_cluster_maxfreq(new_cluster); new_rate = ACTUAL_FREQ(new_cluster, new_rate); } else { new_rate = rate; } pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n", __func__, cpu, old_cluster, new_cluster, new_rate); ret = clk_set_rate(clk[new_cluster], new_rate * 1000); if (!ret) { /* * FIXME: clk_set_rate hasn't returned an error here however it * may be that clk_change_rate failed due to hardware or * firmware issues and wasn't able to report that due to the * current design of the clk core layer. To work around this * problem we will read back the clock rate and check it is * correct. This needs to be removed once clk core is fixed. */ if (clk_get_rate(clk[new_cluster]) != new_rate * 1000) ret = -EIO; } if (WARN_ON(ret)) { pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, new_cluster); if (bLs) { per_cpu(cpu_last_req_freq, cpu) = prev_rate; per_cpu(physical_cluster, cpu) = old_cluster; } mutex_unlock(&cluster_lock[new_cluster]); return ret; } mutex_unlock(&cluster_lock[new_cluster]); /* Recalc freq for old cluster when switching clusters */ if (old_cluster != new_cluster) { pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n", __func__, cpu, old_cluster, new_cluster); /* Switch cluster */ bL_switch_request(cpu, new_cluster); mutex_lock(&cluster_lock[old_cluster]); /* Set freq of old cluster if there are cpus left on it */ new_rate = find_cluster_maxfreq(old_cluster); new_rate = ACTUAL_FREQ(old_cluster, new_rate); if (new_rate) { pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n", __func__, old_cluster, new_rate); if (clk_set_rate(clk[old_cluster], new_rate * 1000)) pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", __func__, ret, old_cluster); } mutex_unlock(&cluster_lock[old_cluster]); } return 0; } /* Set clock frequency */ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; unsigned int freqs_new; cur_cluster = cpu_to_cluster(cpu); new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); freqs_new = freq_table[cur_cluster][index].frequency; if (is_bL_switching_enabled()) { if ((actual_cluster == A15_CLUSTER) && (freqs_new < clk_big_min)) { new_cluster = A7_CLUSTER; } else if ((actual_cluster == A7_CLUSTER) && (freqs_new > clk_little_max)) { new_cluster = A15_CLUSTER; } } return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); } static inline u32 get_table_count(struct cpufreq_frequency_table *table) { int count; for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++) ; return count; } /* get the minimum frequency in the cpufreq_frequency_table */ static inline u32 get_table_min(struct cpufreq_frequency_table *table) { struct cpufreq_frequency_table *pos; uint32_t min_freq = ~0; cpufreq_for_each_entry(pos, table) if (pos->frequency < min_freq) min_freq = pos->frequency; return min_freq; } /* get the maximum frequency in the cpufreq_frequency_table */ static inline u32 get_table_max(struct cpufreq_frequency_table *table) { struct cpufreq_frequency_table *pos; uint32_t max_freq = 0; cpufreq_for_each_entry(pos, table) if (pos->frequency > max_freq) max_freq = pos->frequency; return max_freq; } static int merge_cluster_tables(void) { int i, j, k = 0, count = 1; struct cpufreq_frequency_table *table; for (i = 0; i < MAX_CLUSTERS; i++) count += get_table_count(freq_table[i]); table = kzalloc(sizeof(*table) * count, GFP_KERNEL); if (!table) return -ENOMEM; freq_table[MAX_CLUSTERS] = table; /* Add in reverse order to get freqs in increasing order */ for (i = MAX_CLUSTERS - 1; i >= 0; i--) { for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; j++) { table[k].frequency = VIRT_FREQ(i, freq_table[i][j].frequency); pr_debug("%s: index: %d, freq: %d\n", __func__, k, table[k].frequency); k++; } } table[k].driver_data = k; table[k].frequency = CPUFREQ_TABLE_END; pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k); return 0; } static void _put_cluster_clk_and_freq_table(struct device *cpu_dev) { u32 cluster = raw_cpu_to_cluster(cpu_dev->id); if (!freq_table[cluster]) return; clk_put(clk[cluster]); dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); if (arm_bL_ops->free_opp_table) arm_bL_ops->free_opp_table(cpu_dev); dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); } static void put_cluster_clk_and_freq_table(struct device *cpu_dev) { u32 cluster = cpu_to_cluster(cpu_dev->id); int i; if (atomic_dec_return(&cluster_usage[cluster])) return; if (cluster < MAX_CLUSTERS) return _put_cluster_clk_and_freq_table(cpu_dev); for_each_present_cpu(i) { struct device *cdev = get_cpu_device(i); if (!cdev) { pr_err("%s: failed to get cpu%d device\n", __func__, i); return; } _put_cluster_clk_and_freq_table(cdev); } /* free virtual table */ kfree(freq_table[cluster]); } static int _get_cluster_clk_and_freq_table(struct device *cpu_dev) { u32 cluster = raw_cpu_to_cluster(cpu_dev->id); int ret; if (freq_table[cluster]) return 0; ret = arm_bL_ops->init_opp_table(cpu_dev); if (ret) { dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", __func__, cpu_dev->id, ret); goto out; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); if (ret) { dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", __func__, cpu_dev->id, ret); goto free_opp_table; } clk[cluster] = clk_get(cpu_dev, NULL); if (!IS_ERR(clk[cluster])) { dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", __func__, clk[cluster], freq_table[cluster], cluster); return 0; } dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", __func__, cpu_dev->id, cluster); ret = PTR_ERR(clk[cluster]); dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); free_opp_table: if (arm_bL_ops->free_opp_table) arm_bL_ops->free_opp_table(cpu_dev); out: dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, cluster); return ret; } static int get_cluster_clk_and_freq_table(struct device *cpu_dev) { u32 cluster = cpu_to_cluster(cpu_dev->id); int i, ret; if (atomic_inc_return(&cluster_usage[cluster]) != 1) return 0; if (cluster < MAX_CLUSTERS) { ret = _get_cluster_clk_and_freq_table(cpu_dev); if (ret) atomic_dec(&cluster_usage[cluster]); return ret; } /* * Get data for all clusters and fill virtual cluster with a merge of * both */ for_each_present_cpu(i) { struct device *cdev = get_cpu_device(i); if (!cdev) { pr_err("%s: failed to get cpu%d device\n", __func__, i); return -ENODEV; } ret = _get_cluster_clk_and_freq_table(cdev); if (ret) goto put_clusters; } ret = merge_cluster_tables(); if (ret) goto put_clusters; /* Assuming 2 cluster, set clk_big_min and clk_little_max */ clk_big_min = get_table_min(freq_table[0]); clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1])); pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n", __func__, cluster, clk_big_min, clk_little_max); return 0; put_clusters: for_each_present_cpu(i) { struct device *cdev = get_cpu_device(i); if (!cdev) { pr_err("%s: failed to get cpu%d device\n", __func__, i); return -ENODEV; } _put_cluster_clk_and_freq_table(cdev); } atomic_dec(&cluster_usage[cluster]); return ret; } /* Per-CPU initialization */ static int bL_cpufreq_init(struct cpufreq_policy *policy) { u32 cur_cluster = cpu_to_cluster(policy->cpu); struct device *cpu_dev; int ret; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu); return -ENODEV; } ret = get_cluster_clk_and_freq_table(cpu_dev); if (ret) return ret; ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]); if (ret) { dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", policy->cpu, cur_cluster); put_cluster_clk_and_freq_table(cpu_dev); return ret; } if (cur_cluster < MAX_CLUSTERS) { int cpu; cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); for_each_cpu(cpu, policy->cpus) per_cpu(physical_cluster, cpu) = cur_cluster; } else { /* Assumption: during init, we are always running on A15 */ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; } if (arm_bL_ops->get_transition_latency) policy->cpuinfo.transition_latency = arm_bL_ops->get_transition_latency(cpu_dev); else policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; if (is_bL_switching_enabled()) per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); return 0; } static int bL_cpufreq_exit(struct cpufreq_policy *policy) { struct device *cpu_dev; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu); return -ENODEV; } put_cluster_clk_and_freq_table(cpu_dev); dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); return 0; } static struct cpufreq_driver bL_cpufreq_driver = { .name = "arm-big-little", .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = cpufreq_generic_frequency_table_verify, .target_index = bL_cpufreq_set_target, .get = bL_cpufreq_get_rate, .init = bL_cpufreq_init, .exit = bL_cpufreq_exit, .attr = cpufreq_generic_attr, }; #ifdef CONFIG_BL_SWITCHER static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, unsigned long action, void *_arg) { pr_debug("%s: action: %ld\n", __func__, action); switch (action) { case BL_NOTIFY_PRE_ENABLE: case BL_NOTIFY_PRE_DISABLE: cpufreq_unregister_driver(&bL_cpufreq_driver); break; case BL_NOTIFY_POST_ENABLE: set_switching_enabled(true); cpufreq_register_driver(&bL_cpufreq_driver); break; case BL_NOTIFY_POST_DISABLE: set_switching_enabled(false); cpufreq_register_driver(&bL_cpufreq_driver); break; default: return NOTIFY_DONE; } return NOTIFY_OK; } static struct notifier_block bL_switcher_notifier = { .notifier_call = bL_cpufreq_switcher_notifier, }; static int __bLs_register_notifier(void) { return bL_switcher_register_notifier(&bL_switcher_notifier); } static int __bLs_unregister_notifier(void) { return bL_switcher_unregister_notifier(&bL_switcher_notifier); } #else static int __bLs_register_notifier(void) { return 0; } static int __bLs_unregister_notifier(void) { return 0; } #endif int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) { int ret, i; if (arm_bL_ops) { pr_debug("%s: Already registered: %s, exiting\n", __func__, arm_bL_ops->name); return -EBUSY; } if (!ops || !strlen(ops->name) || !ops->init_opp_table) { pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__); return -ENODEV; } arm_bL_ops = ops; set_switching_enabled(bL_switcher_get_enabled()); for (i = 0; i < MAX_CLUSTERS; i++) mutex_init(&cluster_lock[i]); ret = cpufreq_register_driver(&bL_cpufreq_driver); if (ret) { pr_info("%s: Failed registering platform driver: %s, err: %d\n", __func__, ops->name, ret); arm_bL_ops = NULL; } else { ret = __bLs_register_notifier(); if (ret) { cpufreq_unregister_driver(&bL_cpufreq_driver); arm_bL_ops = NULL; } else { pr_info("%s: Registered platform driver: %s\n", __func__, ops->name); } } bL_switcher_put_enabled(); return ret; } EXPORT_SYMBOL_GPL(bL_cpufreq_register); void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops) { if (arm_bL_ops != ops) { pr_err("%s: Registered with: %s, can't unregister, exiting\n", __func__, arm_bL_ops->name); return; } bL_switcher_get_enabled(); __bLs_unregister_notifier(); cpufreq_unregister_driver(&bL_cpufreq_driver); bL_switcher_put_enabled(); pr_info("%s: Un-registered platform driver: %s\n", __func__, arm_bL_ops->name); arm_bL_ops = NULL; } EXPORT_SYMBOL_GPL(bL_cpufreq_unregister); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
wanghao-xznu/linux-2.6-imx
sound/pci/hda/hda_generic.c
142
140540
/* * Universal Interface for Intel High Definition Audio Codec * * Generic widget tree parser * * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/sort.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/bitops.h> #include <sound/core.h> #include <sound/jack.h> #include "hda_codec.h" #include "hda_local.h" #include "hda_auto_parser.h" #include "hda_jack.h" #include "hda_beep.h" #include "hda_generic.h" /* initialize hda_gen_spec struct */ int snd_hda_gen_spec_init(struct hda_gen_spec *spec) { snd_array_init(&spec->kctls, sizeof(struct snd_kcontrol_new), 32); snd_array_init(&spec->paths, sizeof(struct nid_path), 8); snd_array_init(&spec->loopback_list, sizeof(struct hda_amp_list), 8); mutex_init(&spec->pcm_mutex); return 0; } EXPORT_SYMBOL_HDA(snd_hda_gen_spec_init); struct snd_kcontrol_new * snd_hda_gen_add_kctl(struct hda_gen_spec *spec, const char *name, const struct snd_kcontrol_new *temp) { struct snd_kcontrol_new *knew = snd_array_new(&spec->kctls); if (!knew) return NULL; *knew = *temp; if (name) knew->name = kstrdup(name, GFP_KERNEL); else if (knew->name) knew->name = kstrdup(knew->name, GFP_KERNEL); if (!knew->name) return NULL; return knew; } EXPORT_SYMBOL_HDA(snd_hda_gen_add_kctl); static void free_kctls(struct hda_gen_spec *spec) { if (spec->kctls.list) { struct snd_kcontrol_new *kctl = spec->kctls.list; int i; for (i = 0; i < spec->kctls.used; i++) kfree(kctl[i].name); } snd_array_free(&spec->kctls); } void snd_hda_gen_spec_free(struct hda_gen_spec *spec) { if (!spec) return; free_kctls(spec); snd_array_free(&spec->paths); snd_array_free(&spec->loopback_list); } EXPORT_SYMBOL_HDA(snd_hda_gen_spec_free); /* * store user hints */ static void parse_user_hints(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; int val; val = snd_hda_get_bool_hint(codec, "jack_detect"); if (val >= 0) codec->no_jack_detect = !val; val = snd_hda_get_bool_hint(codec, "inv_jack_detect"); if (val >= 0) codec->inv_jack_detect = !!val; val = snd_hda_get_bool_hint(codec, "trigger_sense"); if (val >= 0) codec->no_trigger_sense = !val; val = snd_hda_get_bool_hint(codec, "inv_eapd"); if (val >= 0) codec->inv_eapd = !!val; val = snd_hda_get_bool_hint(codec, "pcm_format_first"); if (val >= 0) codec->pcm_format_first = !!val; val = snd_hda_get_bool_hint(codec, "sticky_stream"); if (val >= 0) codec->no_sticky_stream = !val; val = snd_hda_get_bool_hint(codec, "spdif_status_reset"); if (val >= 0) codec->spdif_status_reset = !!val; val = snd_hda_get_bool_hint(codec, "pin_amp_workaround"); if (val >= 0) codec->pin_amp_workaround = !!val; val = snd_hda_get_bool_hint(codec, "single_adc_amp"); if (val >= 0) codec->single_adc_amp = !!val; val = snd_hda_get_bool_hint(codec, "auto_mute"); if (val >= 0) spec->suppress_auto_mute = !val; val = snd_hda_get_bool_hint(codec, "auto_mic"); if (val >= 0) spec->suppress_auto_mic = !val; val = snd_hda_get_bool_hint(codec, "line_in_auto_switch"); if (val >= 0) spec->line_in_auto_switch = !!val; val = snd_hda_get_bool_hint(codec, "need_dac_fix"); if (val >= 0) spec->need_dac_fix = !!val; val = snd_hda_get_bool_hint(codec, "primary_hp"); if (val >= 0) spec->no_primary_hp = !val; val = snd_hda_get_bool_hint(codec, "multi_cap_vol"); if (val >= 0) spec->multi_cap_vol = !!val; val = snd_hda_get_bool_hint(codec, "inv_dmic_split"); if (val >= 0) spec->inv_dmic_split = !!val; val = snd_hda_get_bool_hint(codec, "indep_hp"); if (val >= 0) spec->indep_hp = !!val; val = snd_hda_get_bool_hint(codec, "add_stereo_mix_input"); if (val >= 0) spec->add_stereo_mix_input = !!val; /* the following two are just for compatibility */ val = snd_hda_get_bool_hint(codec, "add_out_jack_modes"); if (val >= 0) spec->add_jack_modes = !!val; val = snd_hda_get_bool_hint(codec, "add_in_jack_modes"); if (val >= 0) spec->add_jack_modes = !!val; val = snd_hda_get_bool_hint(codec, "add_jack_modes"); if (val >= 0) spec->add_jack_modes = !!val; val = snd_hda_get_bool_hint(codec, "power_down_unused"); if (val >= 0) spec->power_down_unused = !!val; val = snd_hda_get_bool_hint(codec, "add_hp_mic"); if (val >= 0) spec->hp_mic = !!val; val = snd_hda_get_bool_hint(codec, "hp_mic_detect"); if (val >= 0) spec->suppress_hp_mic_detect = !val; if (!snd_hda_get_int_hint(codec, "mixer_nid", &val)) spec->mixer_nid = val; } /* * pin control value accesses */ #define update_pin_ctl(codec, pin, val) \ snd_hda_codec_update_cache(codec, pin, 0, \ AC_VERB_SET_PIN_WIDGET_CONTROL, val) /* restore the pinctl based on the cached value */ static inline void restore_pin_ctl(struct hda_codec *codec, hda_nid_t pin) { update_pin_ctl(codec, pin, snd_hda_codec_get_pin_target(codec, pin)); } /* set the pinctl target value and write it if requested */ static void set_pin_target(struct hda_codec *codec, hda_nid_t pin, unsigned int val, bool do_write) { if (!pin) return; val = snd_hda_correct_pin_ctl(codec, pin, val); snd_hda_codec_set_pin_target(codec, pin, val); if (do_write) update_pin_ctl(codec, pin, val); } /* set pinctl target values for all given pins */ static void set_pin_targets(struct hda_codec *codec, int num_pins, hda_nid_t *pins, unsigned int val) { int i; for (i = 0; i < num_pins; i++) set_pin_target(codec, pins[i], val, false); } /* * parsing paths */ /* return the position of NID in the list, or -1 if not found */ static int find_idx_in_nid_list(hda_nid_t nid, const hda_nid_t *list, int nums) { int i; for (i = 0; i < nums; i++) if (list[i] == nid) return i; return -1; } /* return true if the given NID is contained in the path */ static bool is_nid_contained(struct nid_path *path, hda_nid_t nid) { return find_idx_in_nid_list(nid, path->path, path->depth) >= 0; } static struct nid_path *get_nid_path(struct hda_codec *codec, hda_nid_t from_nid, hda_nid_t to_nid, int anchor_nid) { struct hda_gen_spec *spec = codec->spec; int i; for (i = 0; i < spec->paths.used; i++) { struct nid_path *path = snd_array_elem(&spec->paths, i); if (path->depth <= 0) continue; if ((!from_nid || path->path[0] == from_nid) && (!to_nid || path->path[path->depth - 1] == to_nid)) { if (!anchor_nid || (anchor_nid > 0 && is_nid_contained(path, anchor_nid)) || (anchor_nid < 0 && !is_nid_contained(path, anchor_nid))) return path; } } return NULL; } /* get the path between the given NIDs; * passing 0 to either @pin or @dac behaves as a wildcard */ struct nid_path *snd_hda_get_nid_path(struct hda_codec *codec, hda_nid_t from_nid, hda_nid_t to_nid) { return get_nid_path(codec, from_nid, to_nid, 0); } EXPORT_SYMBOL_HDA(snd_hda_get_nid_path); /* get the index number corresponding to the path instance; * the index starts from 1, for easier checking the invalid value */ int snd_hda_get_path_idx(struct hda_codec *codec, struct nid_path *path) { struct hda_gen_spec *spec = codec->spec; struct nid_path *array = spec->paths.list; ssize_t idx; if (!spec->paths.used) return 0; idx = path - array; if (idx < 0 || idx >= spec->paths.used) return 0; return idx + 1; } EXPORT_SYMBOL_HDA(snd_hda_get_path_idx); /* get the path instance corresponding to the given index number */ struct nid_path *snd_hda_get_path_from_idx(struct hda_codec *codec, int idx) { struct hda_gen_spec *spec = codec->spec; if (idx <= 0 || idx > spec->paths.used) return NULL; return snd_array_elem(&spec->paths, idx - 1); } EXPORT_SYMBOL_HDA(snd_hda_get_path_from_idx); /* check whether the given DAC is already found in any existing paths */ static bool is_dac_already_used(struct hda_codec *codec, hda_nid_t nid) { struct hda_gen_spec *spec = codec->spec; int i; for (i = 0; i < spec->paths.used; i++) { struct nid_path *path = snd_array_elem(&spec->paths, i); if (path->path[0] == nid) return true; } return false; } /* check whether the given two widgets can be connected */ static bool is_reachable_path(struct hda_codec *codec, hda_nid_t from_nid, hda_nid_t to_nid) { if (!from_nid || !to_nid) return false; return snd_hda_get_conn_index(codec, to_nid, from_nid, true) >= 0; } /* nid, dir and idx */ #define AMP_VAL_COMPARE_MASK (0xffff | (1U << 18) | (0x0f << 19)) /* check whether the given ctl is already assigned in any path elements */ static bool is_ctl_used(struct hda_codec *codec, unsigned int val, int type) { struct hda_gen_spec *spec = codec->spec; int i; val &= AMP_VAL_COMPARE_MASK; for (i = 0; i < spec->paths.used; i++) { struct nid_path *path = snd_array_elem(&spec->paths, i); if ((path->ctls[type] & AMP_VAL_COMPARE_MASK) == val) return true; } return false; } /* check whether a control with the given (nid, dir, idx) was assigned */ static bool is_ctl_associated(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, int type) { unsigned int val = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir); return is_ctl_used(codec, val, type); } static void print_nid_path(const char *pfx, struct nid_path *path) { char buf[40]; int i; buf[0] = 0; for (i = 0; i < path->depth; i++) { char tmp[4]; sprintf(tmp, ":%02x", path->path[i]); strlcat(buf, tmp, sizeof(buf)); } snd_printdd("%s path: depth=%d %s\n", pfx, path->depth, buf); } /* called recursively */ static bool __parse_nid_path(struct hda_codec *codec, hda_nid_t from_nid, hda_nid_t to_nid, int anchor_nid, struct nid_path *path, int depth) { const hda_nid_t *conn; int i, nums; if (to_nid == anchor_nid) anchor_nid = 0; /* anchor passed */ else if (to_nid == (hda_nid_t)(-anchor_nid)) return false; /* hit the exclusive nid */ nums = snd_hda_get_conn_list(codec, to_nid, &conn); for (i = 0; i < nums; i++) { if (conn[i] != from_nid) { /* special case: when from_nid is 0, * try to find an empty DAC */ if (from_nid || get_wcaps_type(get_wcaps(codec, conn[i])) != AC_WID_AUD_OUT || is_dac_already_used(codec, conn[i])) continue; } /* anchor is not requested or already passed? */ if (anchor_nid <= 0) goto found; } if (depth >= MAX_NID_PATH_DEPTH) return false; for (i = 0; i < nums; i++) { unsigned int type; type = get_wcaps_type(get_wcaps(codec, conn[i])); if (type == AC_WID_AUD_OUT || type == AC_WID_AUD_IN || type == AC_WID_PIN) continue; if (__parse_nid_path(codec, from_nid, conn[i], anchor_nid, path, depth + 1)) goto found; } return false; found: path->path[path->depth] = conn[i]; path->idx[path->depth + 1] = i; if (nums > 1 && get_wcaps_type(get_wcaps(codec, to_nid)) != AC_WID_AUD_MIX) path->multi[path->depth + 1] = 1; path->depth++; return true; } /* parse the widget path from the given nid to the target nid; * when @from_nid is 0, try to find an empty DAC; * when @anchor_nid is set to a positive value, only paths through the widget * with the given value are evaluated. * when @anchor_nid is set to a negative value, paths through the widget * with the negative of given value are excluded, only other paths are chosen. * when @anchor_nid is zero, no special handling about path selection. */ bool snd_hda_parse_nid_path(struct hda_codec *codec, hda_nid_t from_nid, hda_nid_t to_nid, int anchor_nid, struct nid_path *path) { if (__parse_nid_path(codec, from_nid, to_nid, anchor_nid, path, 1)) { path->path[path->depth] = to_nid; path->depth++; return true; } return false; } EXPORT_SYMBOL_HDA(snd_hda_parse_nid_path); /* * parse the path between the given NIDs and add to the path list. * if no valid path is found, return NULL */ struct nid_path * snd_hda_add_new_path(struct hda_codec *codec, hda_nid_t from_nid, hda_nid_t to_nid, int anchor_nid) { struct hda_gen_spec *spec = codec->spec; struct nid_path *path; if (from_nid && to_nid && !is_reachable_path(codec, from_nid, to_nid)) return NULL; /* check whether the path has been already added */ path = get_nid_path(codec, from_nid, to_nid, anchor_nid); if (path) return path; path = snd_array_new(&spec->paths); if (!path) return NULL; memset(path, 0, sizeof(*path)); if (snd_hda_parse_nid_path(codec, from_nid, to_nid, anchor_nid, path)) return path; /* push back */ spec->paths.used--; return NULL; } EXPORT_SYMBOL_HDA(snd_hda_add_new_path); /* clear the given path as invalid so that it won't be picked up later */ static void invalidate_nid_path(struct hda_codec *codec, int idx) { struct nid_path *path = snd_hda_get_path_from_idx(codec, idx); if (!path) return; memset(path, 0, sizeof(*path)); } /* look for an empty DAC slot */ static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin, bool is_digital) { struct hda_gen_spec *spec = codec->spec; bool cap_digital; int i; for (i = 0; i < spec->num_all_dacs; i++) { hda_nid_t nid = spec->all_dacs[i]; if (!nid || is_dac_already_used(codec, nid)) continue; cap_digital = !!(get_wcaps(codec, nid) & AC_WCAP_DIGITAL); if (is_digital != cap_digital) continue; if (is_reachable_path(codec, nid, pin)) return nid; } return 0; } /* replace the channels in the composed amp value with the given number */ static unsigned int amp_val_replace_channels(unsigned int val, unsigned int chs) { val &= ~(0x3U << 16); val |= chs << 16; return val; } /* check whether the widget has the given amp capability for the direction */ static bool check_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir, unsigned int bits) { if (!nid) return false; if (get_wcaps(codec, nid) & (1 << (dir + 1))) if (query_amp_caps(codec, nid, dir) & bits) return true; return false; } static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1, hda_nid_t nid2, int dir) { if (!(get_wcaps(codec, nid1) & (1 << (dir + 1)))) return !(get_wcaps(codec, nid2) & (1 << (dir + 1))); return (query_amp_caps(codec, nid1, dir) == query_amp_caps(codec, nid2, dir)); } #define nid_has_mute(codec, nid, dir) \ check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) #define nid_has_volume(codec, nid, dir) \ check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS) /* look for a widget suitable for assigning a mute switch in the path */ static hda_nid_t look_for_out_mute_nid(struct hda_codec *codec, struct nid_path *path) { int i; for (i = path->depth - 1; i >= 0; i--) { if (nid_has_mute(codec, path->path[i], HDA_OUTPUT)) return path->path[i]; if (i != path->depth - 1 && i != 0 && nid_has_mute(codec, path->path[i], HDA_INPUT)) return path->path[i]; } return 0; } /* look for a widget suitable for assigning a volume ctl in the path */ static hda_nid_t look_for_out_vol_nid(struct hda_codec *codec, struct nid_path *path) { int i; for (i = path->depth - 1; i >= 0; i--) { if (nid_has_volume(codec, path->path[i], HDA_OUTPUT)) return path->path[i]; } return 0; } /* * path activation / deactivation */ /* can have the amp-in capability? */ static bool has_amp_in(struct hda_codec *codec, struct nid_path *path, int idx) { hda_nid_t nid = path->path[idx]; unsigned int caps = get_wcaps(codec, nid); unsigned int type = get_wcaps_type(caps); if (!(caps & AC_WCAP_IN_AMP)) return false; if (type == AC_WID_PIN && idx > 0) /* only for input pins */ return false; return true; } /* can have the amp-out capability? */ static bool has_amp_out(struct hda_codec *codec, struct nid_path *path, int idx) { hda_nid_t nid = path->path[idx]; unsigned int caps = get_wcaps(codec, nid); unsigned int type = get_wcaps_type(caps); if (!(caps & AC_WCAP_OUT_AMP)) return false; if (type == AC_WID_PIN && !idx) /* only for output pins */ return false; return true; } /* check whether the given (nid,dir,idx) is active */ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid, unsigned int dir, unsigned int idx) { struct hda_gen_spec *spec = codec->spec; int i, n; for (n = 0; n < spec->paths.used; n++) { struct nid_path *path = snd_array_elem(&spec->paths, n); if (!path->active) continue; for (i = 0; i < path->depth; i++) { if (path->path[i] == nid) { if (dir == HDA_OUTPUT || path->idx[i] == idx) return true; break; } } } return false; } /* check whether the NID is referred by any active paths */ #define is_active_nid_for_any(codec, nid) \ is_active_nid(codec, nid, HDA_OUTPUT, 0) /* get the default amp value for the target state */ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, int dir, unsigned int caps, bool enable) { unsigned int val = 0; if (caps & AC_AMPCAP_NUM_STEPS) { /* set to 0dB */ if (enable) val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; } if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { if (!enable) val |= HDA_AMP_MUTE; } return val; } /* initialize the amp value (only at the first time) */ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) { unsigned int caps = query_amp_caps(codec, nid, dir); int val = get_amp_val_to_activate(codec, nid, dir, caps, false); snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); } /* calculate amp value mask we can modify; * if the given amp is controlled by mixers, don't touch it */ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, unsigned int caps) { unsigned int mask = 0xff; if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL)) mask &= ~0x80; } if (caps & AC_AMPCAP_NUM_STEPS) { if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_VOL_CTL) || is_ctl_associated(codec, nid, dir, idx, NID_PATH_BOOST_CTL)) mask &= ~0x7f; } return mask; } static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, int idx_to_check, bool enable) { unsigned int caps; unsigned int mask, val; if (!enable && is_active_nid(codec, nid, dir, idx_to_check)) return; caps = query_amp_caps(codec, nid, dir); val = get_amp_val_to_activate(codec, nid, dir, caps, enable); mask = get_amp_mask_to_modify(codec, nid, dir, idx_to_check, caps); if (!mask) return; val &= mask; snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val); } static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, int i, bool enable) { hda_nid_t nid = path->path[i]; init_amp(codec, nid, HDA_OUTPUT, 0); activate_amp(codec, nid, HDA_OUTPUT, 0, 0, enable); } static void activate_amp_in(struct hda_codec *codec, struct nid_path *path, int i, bool enable, bool add_aamix) { struct hda_gen_spec *spec = codec->spec; const hda_nid_t *conn; int n, nums, idx; int type; hda_nid_t nid = path->path[i]; nums = snd_hda_get_conn_list(codec, nid, &conn); type = get_wcaps_type(get_wcaps(codec, nid)); if (type == AC_WID_PIN || (type == AC_WID_AUD_IN && codec->single_adc_amp)) { nums = 1; idx = 0; } else idx = path->idx[i]; for (n = 0; n < nums; n++) init_amp(codec, nid, HDA_INPUT, n); /* here is a little bit tricky in comparison with activate_amp_out(); * when aa-mixer is available, we need to enable the path as well */ for (n = 0; n < nums; n++) { if (n != idx && (!add_aamix || conn[n] != spec->mixer_merge_nid)) continue; activate_amp(codec, nid, HDA_INPUT, n, idx, enable); } } /* activate or deactivate the given path * if @add_aamix is set, enable the input from aa-mix NID as well (if any) */ void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path, bool enable, bool add_aamix) { struct hda_gen_spec *spec = codec->spec; int i; if (!enable) path->active = false; for (i = path->depth - 1; i >= 0; i--) { hda_nid_t nid = path->path[i]; if (enable && spec->power_down_unused) { /* make sure the widget is powered up */ if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D0)) snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D0); } if (enable && path->multi[i]) snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, path->idx[i]); if (has_amp_in(codec, path, i)) activate_amp_in(codec, path, i, enable, add_aamix); if (has_amp_out(codec, path, i)) activate_amp_out(codec, path, i, enable); } if (enable) path->active = true; } EXPORT_SYMBOL_HDA(snd_hda_activate_path); /* if the given path is inactive, put widgets into D3 (only if suitable) */ static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path) { struct hda_gen_spec *spec = codec->spec; bool changed = false; int i; if (!spec->power_down_unused || path->active) return; for (i = 0; i < path->depth; i++) { hda_nid_t nid = path->path[i]; if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3) && !is_active_nid_for_any(codec, nid)) { snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D3); changed = true; } } if (changed) { msleep(10); snd_hda_codec_read(codec, path->path[0], 0, AC_VERB_GET_POWER_STATE, 0); } } /* turn on/off EAPD on the given pin */ static void set_pin_eapd(struct hda_codec *codec, hda_nid_t pin, bool enable) { struct hda_gen_spec *spec = codec->spec; if (spec->own_eapd_ctl || !(snd_hda_query_pin_caps(codec, pin) & AC_PINCAP_EAPD)) return; if (codec->inv_eapd) enable = !enable; if (spec->keep_eapd_on && !enable) return; snd_hda_codec_update_cache(codec, pin, 0, AC_VERB_SET_EAPD_BTLENABLE, enable ? 0x02 : 0x00); } /* re-initialize the path specified by the given path index */ static void resume_path_from_idx(struct hda_codec *codec, int path_idx) { struct nid_path *path = snd_hda_get_path_from_idx(codec, path_idx); if (path) snd_hda_activate_path(codec, path, path->active, false); } /* * Helper functions for creating mixer ctl elements */ enum { HDA_CTL_WIDGET_VOL, HDA_CTL_WIDGET_MUTE, HDA_CTL_BIND_MUTE, }; static const struct snd_kcontrol_new control_templates[] = { HDA_CODEC_VOLUME(NULL, 0, 0, 0), HDA_CODEC_MUTE(NULL, 0, 0, 0), HDA_BIND_MUTE(NULL, 0, 0, 0), }; /* add dynamic controls from template */ static struct snd_kcontrol_new * add_control(struct hda_gen_spec *spec, int type, const char *name, int cidx, unsigned long val) { struct snd_kcontrol_new *knew; knew = snd_hda_gen_add_kctl(spec, name, &control_templates[type]); if (!knew) return NULL; knew->index = cidx; if (get_amp_nid_(val)) knew->subdevice = HDA_SUBDEV_AMP_FLAG; knew->private_value = val; return knew; } static int add_control_with_pfx(struct hda_gen_spec *spec, int type, const char *pfx, const char *dir, const char *sfx, int cidx, unsigned long val) { char name[44]; snprintf(name, sizeof(name), "%s %s %s", pfx, dir, sfx); if (!add_control(spec, type, name, cidx, val)) return -ENOMEM; return 0; } #define add_pb_vol_ctrl(spec, type, pfx, val) \ add_control_with_pfx(spec, type, pfx, "Playback", "Volume", 0, val) #define add_pb_sw_ctrl(spec, type, pfx, val) \ add_control_with_pfx(spec, type, pfx, "Playback", "Switch", 0, val) #define __add_pb_vol_ctrl(spec, type, pfx, cidx, val) \ add_control_with_pfx(spec, type, pfx, "Playback", "Volume", cidx, val) #define __add_pb_sw_ctrl(spec, type, pfx, cidx, val) \ add_control_with_pfx(spec, type, pfx, "Playback", "Switch", cidx, val) static int add_vol_ctl(struct hda_codec *codec, const char *pfx, int cidx, unsigned int chs, struct nid_path *path) { unsigned int val; if (!path) return 0; val = path->ctls[NID_PATH_VOL_CTL]; if (!val) return 0; val = amp_val_replace_channels(val, chs); return __add_pb_vol_ctrl(codec->spec, HDA_CTL_WIDGET_VOL, pfx, cidx, val); } /* return the channel bits suitable for the given path->ctls[] */ static int get_default_ch_nums(struct hda_codec *codec, struct nid_path *path, int type) { int chs = 1; /* mono (left only) */ if (path) { hda_nid_t nid = get_amp_nid_(path->ctls[type]); if (nid && (get_wcaps(codec, nid) & AC_WCAP_STEREO)) chs = 3; /* stereo */ } return chs; } static int add_stereo_vol(struct hda_codec *codec, const char *pfx, int cidx, struct nid_path *path) { int chs = get_default_ch_nums(codec, path, NID_PATH_VOL_CTL); return add_vol_ctl(codec, pfx, cidx, chs, path); } /* create a mute-switch for the given mixer widget; * if it has multiple sources (e.g. DAC and loopback), create a bind-mute */ static int add_sw_ctl(struct hda_codec *codec, const char *pfx, int cidx, unsigned int chs, struct nid_path *path) { unsigned int val; int type = HDA_CTL_WIDGET_MUTE; if (!path) return 0; val = path->ctls[NID_PATH_MUTE_CTL]; if (!val) return 0; val = amp_val_replace_channels(val, chs); if (get_amp_direction_(val) == HDA_INPUT) { hda_nid_t nid = get_amp_nid_(val); int nums = snd_hda_get_num_conns(codec, nid); if (nums > 1) { type = HDA_CTL_BIND_MUTE; val |= nums << 19; } } return __add_pb_sw_ctrl(codec->spec, type, pfx, cidx, val); } static int add_stereo_sw(struct hda_codec *codec, const char *pfx, int cidx, struct nid_path *path) { int chs = get_default_ch_nums(codec, path, NID_PATH_MUTE_CTL); return add_sw_ctl(codec, pfx, cidx, chs, path); } /* any ctl assigned to the path with the given index? */ static bool path_has_mixer(struct hda_codec *codec, int path_idx, int ctl_type) { struct nid_path *path = snd_hda_get_path_from_idx(codec, path_idx); return path && path->ctls[ctl_type]; } static const char * const channel_name[4] = { "Front", "Surround", "CLFE", "Side" }; /* give some appropriate ctl name prefix for the given line out channel */ static const char *get_line_out_pfx(struct hda_codec *codec, int ch, int *index, int ctl_type) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; *index = 0; if (cfg->line_outs == 1 && !spec->multi_ios && !cfg->hp_outs && !cfg->speaker_outs) return spec->vmaster_mute.hook ? "PCM" : "Master"; /* if there is really a single DAC used in the whole output paths, * use it master (or "PCM" if a vmaster hook is present) */ if (spec->multiout.num_dacs == 1 && !spec->mixer_nid && !spec->multiout.hp_out_nid[0] && !spec->multiout.extra_out_nid[0]) return spec->vmaster_mute.hook ? "PCM" : "Master"; /* multi-io channels */ if (ch >= cfg->line_outs) return channel_name[ch]; switch (cfg->line_out_type) { case AUTO_PIN_SPEAKER_OUT: /* if the primary channel vol/mute is shared with HP volume, * don't name it as Speaker */ if (!ch && cfg->hp_outs && !path_has_mixer(codec, spec->hp_paths[0], ctl_type)) break; if (cfg->line_outs == 1) return "Speaker"; if (cfg->line_outs == 2) return ch ? "Bass Speaker" : "Speaker"; break; case AUTO_PIN_HP_OUT: /* if the primary channel vol/mute is shared with spk volume, * don't name it as Headphone */ if (!ch && cfg->speaker_outs && !path_has_mixer(codec, spec->speaker_paths[0], ctl_type)) break; /* for multi-io case, only the primary out */ if (ch && spec->multi_ios) break; *index = ch; return "Headphone"; } /* for a single channel output, we don't have to name the channel */ if (cfg->line_outs == 1 && !spec->multi_ios) return "PCM"; if (ch >= ARRAY_SIZE(channel_name)) { snd_BUG(); return "PCM"; } return channel_name[ch]; } /* * Parse output paths */ /* badness definition */ enum { /* No primary DAC is found for the main output */ BAD_NO_PRIMARY_DAC = 0x10000, /* No DAC is found for the extra output */ BAD_NO_DAC = 0x4000, /* No possible multi-ios */ BAD_MULTI_IO = 0x120, /* No individual DAC for extra output */ BAD_NO_EXTRA_DAC = 0x102, /* No individual DAC for extra surrounds */ BAD_NO_EXTRA_SURR_DAC = 0x101, /* Primary DAC shared with main surrounds */ BAD_SHARED_SURROUND = 0x100, /* No independent HP possible */ BAD_NO_INDEP_HP = 0x10, /* Primary DAC shared with main CLFE */ BAD_SHARED_CLFE = 0x10, /* Primary DAC shared with extra surrounds */ BAD_SHARED_EXTRA_SURROUND = 0x10, /* Volume widget is shared */ BAD_SHARED_VOL = 0x10, }; /* look for widgets in the given path which are appropriate for * volume and mute controls, and assign the values to ctls[]. * * When no appropriate widget is found in the path, the badness value * is incremented depending on the situation. The function returns the * total badness for both volume and mute controls. */ static int assign_out_path_ctls(struct hda_codec *codec, struct nid_path *path) { hda_nid_t nid; unsigned int val; int badness = 0; if (!path) return BAD_SHARED_VOL * 2; if (path->ctls[NID_PATH_VOL_CTL] || path->ctls[NID_PATH_MUTE_CTL]) return 0; /* already evaluated */ nid = look_for_out_vol_nid(codec, path); if (nid) { val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT); if (is_ctl_used(codec, val, NID_PATH_VOL_CTL)) badness += BAD_SHARED_VOL; else path->ctls[NID_PATH_VOL_CTL] = val; } else badness += BAD_SHARED_VOL; nid = look_for_out_mute_nid(codec, path); if (nid) { unsigned int wid_type = get_wcaps_type(get_wcaps(codec, nid)); if (wid_type == AC_WID_PIN || wid_type == AC_WID_AUD_OUT || nid_has_mute(codec, nid, HDA_OUTPUT)) val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT); else val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT); if (is_ctl_used(codec, val, NID_PATH_MUTE_CTL)) badness += BAD_SHARED_VOL; else path->ctls[NID_PATH_MUTE_CTL] = val; } else badness += BAD_SHARED_VOL; return badness; } const struct badness_table hda_main_out_badness = { .no_primary_dac = BAD_NO_PRIMARY_DAC, .no_dac = BAD_NO_DAC, .shared_primary = BAD_NO_PRIMARY_DAC, .shared_surr = BAD_SHARED_SURROUND, .shared_clfe = BAD_SHARED_CLFE, .shared_surr_main = BAD_SHARED_SURROUND, }; EXPORT_SYMBOL_HDA(hda_main_out_badness); const struct badness_table hda_extra_out_badness = { .no_primary_dac = BAD_NO_DAC, .no_dac = BAD_NO_DAC, .shared_primary = BAD_NO_EXTRA_DAC, .shared_surr = BAD_SHARED_EXTRA_SURROUND, .shared_clfe = BAD_SHARED_EXTRA_SURROUND, .shared_surr_main = BAD_NO_EXTRA_SURR_DAC, }; EXPORT_SYMBOL_HDA(hda_extra_out_badness); /* get the DAC of the primary output corresponding to the given array index */ static hda_nid_t get_primary_out(struct hda_codec *codec, int idx) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; if (cfg->line_outs > idx) return spec->private_dac_nids[idx]; idx -= cfg->line_outs; if (spec->multi_ios > idx) return spec->multi_io[idx].dac; return 0; } /* return the DAC if it's reachable, otherwise zero */ static inline hda_nid_t try_dac(struct hda_codec *codec, hda_nid_t dac, hda_nid_t pin) { return is_reachable_path(codec, dac, pin) ? dac : 0; } /* try to assign DACs to pins and return the resultant badness */ static int try_assign_dacs(struct hda_codec *codec, int num_outs, const hda_nid_t *pins, hda_nid_t *dacs, int *path_idx, const struct badness_table *bad) { struct hda_gen_spec *spec = codec->spec; int i, j; int badness = 0; hda_nid_t dac; if (!num_outs) return 0; for (i = 0; i < num_outs; i++) { struct nid_path *path; hda_nid_t pin = pins[i]; path = snd_hda_get_path_from_idx(codec, path_idx[i]); if (path) { badness += assign_out_path_ctls(codec, path); continue; } dacs[i] = look_for_dac(codec, pin, false); if (!dacs[i] && !i) { /* try to steal the DAC of surrounds for the front */ for (j = 1; j < num_outs; j++) { if (is_reachable_path(codec, dacs[j], pin)) { dacs[0] = dacs[j]; dacs[j] = 0; invalidate_nid_path(codec, path_idx[j]); path_idx[j] = 0; break; } } } dac = dacs[i]; if (!dac) { if (num_outs > 2) dac = try_dac(codec, get_primary_out(codec, i), pin); if (!dac) dac = try_dac(codec, dacs[0], pin); if (!dac) dac = try_dac(codec, get_primary_out(codec, i), pin); if (dac) { if (!i) badness += bad->shared_primary; else if (i == 1) badness += bad->shared_surr; else badness += bad->shared_clfe; } else if (is_reachable_path(codec, spec->private_dac_nids[0], pin)) { dac = spec->private_dac_nids[0]; badness += bad->shared_surr_main; } else if (!i) badness += bad->no_primary_dac; else badness += bad->no_dac; } if (!dac) continue; path = snd_hda_add_new_path(codec, dac, pin, -spec->mixer_nid); if (!path && !i && spec->mixer_nid) { /* try with aamix */ path = snd_hda_add_new_path(codec, dac, pin, 0); } if (!path) { dac = dacs[i] = 0; badness += bad->no_dac; } else { /* print_nid_path("output", path); */ path->active = true; path_idx[i] = snd_hda_get_path_idx(codec, path); badness += assign_out_path_ctls(codec, path); } } return badness; } /* return NID if the given pin has only a single connection to a certain DAC */ static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin) { struct hda_gen_spec *spec = codec->spec; int i; hda_nid_t nid_found = 0; for (i = 0; i < spec->num_all_dacs; i++) { hda_nid_t nid = spec->all_dacs[i]; if (!nid || is_dac_already_used(codec, nid)) continue; if (is_reachable_path(codec, nid, pin)) { if (nid_found) return 0; nid_found = nid; } } return nid_found; } /* check whether the given pin can be a multi-io pin */ static bool can_be_multiio_pin(struct hda_codec *codec, unsigned int location, hda_nid_t nid) { unsigned int defcfg, caps; defcfg = snd_hda_codec_get_pincfg(codec, nid); if (get_defcfg_connect(defcfg) != AC_JACK_PORT_COMPLEX) return false; if (location && get_defcfg_location(defcfg) != location) return false; caps = snd_hda_query_pin_caps(codec, nid); if (!(caps & AC_PINCAP_OUT)) return false; return true; } /* count the number of input pins that are capable to be multi-io */ static int count_multiio_pins(struct hda_codec *codec, hda_nid_t reference_pin) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; unsigned int defcfg = snd_hda_codec_get_pincfg(codec, reference_pin); unsigned int location = get_defcfg_location(defcfg); int type, i; int num_pins = 0; for (type = AUTO_PIN_LINE_IN; type >= AUTO_PIN_MIC; type--) { for (i = 0; i < cfg->num_inputs; i++) { if (cfg->inputs[i].type != type) continue; if (can_be_multiio_pin(codec, location, cfg->inputs[i].pin)) num_pins++; } } return num_pins; } /* * multi-io helper * * When hardwired is set, try to fill ony hardwired pins, and returns * zero if any pins are filled, non-zero if nothing found. * When hardwired is off, try to fill possible input pins, and returns * the badness value. */ static int fill_multi_ios(struct hda_codec *codec, hda_nid_t reference_pin, bool hardwired) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int type, i, j, num_pins, old_pins; unsigned int defcfg = snd_hda_codec_get_pincfg(codec, reference_pin); unsigned int location = get_defcfg_location(defcfg); int badness = 0; struct nid_path *path; old_pins = spec->multi_ios; if (old_pins >= 2) goto end_fill; num_pins = count_multiio_pins(codec, reference_pin); if (num_pins < 2) goto end_fill; for (type = AUTO_PIN_LINE_IN; type >= AUTO_PIN_MIC; type--) { for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; hda_nid_t dac = 0; if (cfg->inputs[i].type != type) continue; if (!can_be_multiio_pin(codec, location, nid)) continue; for (j = 0; j < spec->multi_ios; j++) { if (nid == spec->multi_io[j].pin) break; } if (j < spec->multi_ios) continue; if (hardwired) dac = get_dac_if_single(codec, nid); else if (!dac) dac = look_for_dac(codec, nid, false); if (!dac) { badness++; continue; } path = snd_hda_add_new_path(codec, dac, nid, -spec->mixer_nid); if (!path) { badness++; continue; } /* print_nid_path("multiio", path); */ spec->multi_io[spec->multi_ios].pin = nid; spec->multi_io[spec->multi_ios].dac = dac; spec->out_paths[cfg->line_outs + spec->multi_ios] = snd_hda_get_path_idx(codec, path); spec->multi_ios++; if (spec->multi_ios >= 2) break; } } end_fill: if (badness) badness = BAD_MULTI_IO; if (old_pins == spec->multi_ios) { if (hardwired) return 1; /* nothing found */ else return badness; /* no badness if nothing found */ } if (!hardwired && spec->multi_ios < 2) { /* cancel newly assigned paths */ spec->paths.used -= spec->multi_ios - old_pins; spec->multi_ios = old_pins; return badness; } /* assign volume and mute controls */ for (i = old_pins; i < spec->multi_ios; i++) { path = snd_hda_get_path_from_idx(codec, spec->out_paths[cfg->line_outs + i]); badness += assign_out_path_ctls(codec, path); } return badness; } /* map DACs for all pins in the list if they are single connections */ static bool map_singles(struct hda_codec *codec, int outs, const hda_nid_t *pins, hda_nid_t *dacs, int *path_idx) { struct hda_gen_spec *spec = codec->spec; int i; bool found = false; for (i = 0; i < outs; i++) { struct nid_path *path; hda_nid_t dac; if (dacs[i]) continue; dac = get_dac_if_single(codec, pins[i]); if (!dac) continue; path = snd_hda_add_new_path(codec, dac, pins[i], -spec->mixer_nid); if (!path && !i && spec->mixer_nid) path = snd_hda_add_new_path(codec, dac, pins[i], 0); if (path) { dacs[i] = dac; found = true; /* print_nid_path("output", path); */ path->active = true; path_idx[i] = snd_hda_get_path_idx(codec, path); } } return found; } /* create a new path including aamix if available, and return its index */ static int check_aamix_out_path(struct hda_codec *codec, int path_idx) { struct hda_gen_spec *spec = codec->spec; struct nid_path *path; hda_nid_t path_dac, dac, pin; path = snd_hda_get_path_from_idx(codec, path_idx); if (!path || !path->depth || is_nid_contained(path, spec->mixer_nid)) return 0; path_dac = path->path[0]; dac = spec->private_dac_nids[0]; pin = path->path[path->depth - 1]; path = snd_hda_add_new_path(codec, dac, pin, spec->mixer_nid); if (!path) { if (dac != path_dac) dac = path_dac; else if (spec->multiout.hp_out_nid[0]) dac = spec->multiout.hp_out_nid[0]; else if (spec->multiout.extra_out_nid[0]) dac = spec->multiout.extra_out_nid[0]; else dac = 0; if (dac) path = snd_hda_add_new_path(codec, dac, pin, spec->mixer_nid); } if (!path) return 0; /* print_nid_path("output-aamix", path); */ path->active = false; /* unused as default */ return snd_hda_get_path_idx(codec, path); } /* check whether the independent HP is available with the current config */ static bool indep_hp_possible(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; struct nid_path *path; int i, idx; if (cfg->line_out_type == AUTO_PIN_HP_OUT) idx = spec->out_paths[0]; else idx = spec->hp_paths[0]; path = snd_hda_get_path_from_idx(codec, idx); if (!path) return false; /* assume no path conflicts unless aamix is involved */ if (!spec->mixer_nid || !is_nid_contained(path, spec->mixer_nid)) return true; /* check whether output paths contain aamix */ for (i = 0; i < cfg->line_outs; i++) { if (spec->out_paths[i] == idx) break; path = snd_hda_get_path_from_idx(codec, spec->out_paths[i]); if (path && is_nid_contained(path, spec->mixer_nid)) return false; } for (i = 0; i < cfg->speaker_outs; i++) { path = snd_hda_get_path_from_idx(codec, spec->speaker_paths[i]); if (path && is_nid_contained(path, spec->mixer_nid)) return false; } return true; } /* fill the empty entries in the dac array for speaker/hp with the * shared dac pointed by the paths */ static void refill_shared_dacs(struct hda_codec *codec, int num_outs, hda_nid_t *dacs, int *path_idx) { struct nid_path *path; int i; for (i = 0; i < num_outs; i++) { if (dacs[i]) continue; path = snd_hda_get_path_from_idx(codec, path_idx[i]); if (!path) continue; dacs[i] = path->path[0]; } } /* fill in the dac_nids table from the parsed pin configuration */ static int fill_and_eval_dacs(struct hda_codec *codec, bool fill_hardwired, bool fill_mio_first) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i, err, badness; /* set num_dacs once to full for look_for_dac() */ spec->multiout.num_dacs = cfg->line_outs; spec->multiout.dac_nids = spec->private_dac_nids; memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids)); memset(spec->multiout.hp_out_nid, 0, sizeof(spec->multiout.hp_out_nid)); memset(spec->multiout.extra_out_nid, 0, sizeof(spec->multiout.extra_out_nid)); spec->multi_ios = 0; snd_array_free(&spec->paths); /* clear path indices */ memset(spec->out_paths, 0, sizeof(spec->out_paths)); memset(spec->hp_paths, 0, sizeof(spec->hp_paths)); memset(spec->speaker_paths, 0, sizeof(spec->speaker_paths)); memset(spec->aamix_out_paths, 0, sizeof(spec->aamix_out_paths)); memset(spec->digout_paths, 0, sizeof(spec->digout_paths)); memset(spec->input_paths, 0, sizeof(spec->input_paths)); memset(spec->loopback_paths, 0, sizeof(spec->loopback_paths)); memset(&spec->digin_path, 0, sizeof(spec->digin_path)); badness = 0; /* fill hard-wired DACs first */ if (fill_hardwired) { bool mapped; do { mapped = map_singles(codec, cfg->line_outs, cfg->line_out_pins, spec->private_dac_nids, spec->out_paths); mapped |= map_singles(codec, cfg->hp_outs, cfg->hp_pins, spec->multiout.hp_out_nid, spec->hp_paths); mapped |= map_singles(codec, cfg->speaker_outs, cfg->speaker_pins, spec->multiout.extra_out_nid, spec->speaker_paths); if (fill_mio_first && cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) { err = fill_multi_ios(codec, cfg->line_out_pins[0], true); if (!err) mapped = true; } } while (mapped); } badness += try_assign_dacs(codec, cfg->line_outs, cfg->line_out_pins, spec->private_dac_nids, spec->out_paths, spec->main_out_badness); if (fill_mio_first && cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) { /* try to fill multi-io first */ err = fill_multi_ios(codec, cfg->line_out_pins[0], false); if (err < 0) return err; /* we don't count badness at this stage yet */ } if (cfg->line_out_type != AUTO_PIN_HP_OUT) { err = try_assign_dacs(codec, cfg->hp_outs, cfg->hp_pins, spec->multiout.hp_out_nid, spec->hp_paths, spec->extra_out_badness); if (err < 0) return err; badness += err; } if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) { err = try_assign_dacs(codec, cfg->speaker_outs, cfg->speaker_pins, spec->multiout.extra_out_nid, spec->speaker_paths, spec->extra_out_badness); if (err < 0) return err; badness += err; } if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) { err = fill_multi_ios(codec, cfg->line_out_pins[0], false); if (err < 0) return err; badness += err; } if (spec->mixer_nid) { spec->aamix_out_paths[0] = check_aamix_out_path(codec, spec->out_paths[0]); if (cfg->line_out_type != AUTO_PIN_HP_OUT) spec->aamix_out_paths[1] = check_aamix_out_path(codec, spec->hp_paths[0]); if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) spec->aamix_out_paths[2] = check_aamix_out_path(codec, spec->speaker_paths[0]); } if (cfg->hp_outs && cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) if (count_multiio_pins(codec, cfg->hp_pins[0]) >= 2) spec->multi_ios = 1; /* give badness */ /* re-count num_dacs and squash invalid entries */ spec->multiout.num_dacs = 0; for (i = 0; i < cfg->line_outs; i++) { if (spec->private_dac_nids[i]) spec->multiout.num_dacs++; else { memmove(spec->private_dac_nids + i, spec->private_dac_nids + i + 1, sizeof(hda_nid_t) * (cfg->line_outs - i - 1)); spec->private_dac_nids[cfg->line_outs - 1] = 0; } } spec->ext_channel_count = spec->min_channel_count = spec->multiout.num_dacs * 2; if (spec->multi_ios == 2) { for (i = 0; i < 2; i++) spec->private_dac_nids[spec->multiout.num_dacs++] = spec->multi_io[i].dac; } else if (spec->multi_ios) { spec->multi_ios = 0; badness += BAD_MULTI_IO; } if (spec->indep_hp && !indep_hp_possible(codec)) badness += BAD_NO_INDEP_HP; /* re-fill the shared DAC for speaker / headphone */ if (cfg->line_out_type != AUTO_PIN_HP_OUT) refill_shared_dacs(codec, cfg->hp_outs, spec->multiout.hp_out_nid, spec->hp_paths); if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) refill_shared_dacs(codec, cfg->speaker_outs, spec->multiout.extra_out_nid, spec->speaker_paths); return badness; } #define DEBUG_BADNESS #ifdef DEBUG_BADNESS #define debug_badness snd_printdd #else #define debug_badness(...) #endif #ifdef DEBUG_BADNESS static inline void print_nid_path_idx(struct hda_codec *codec, const char *pfx, int idx) { struct nid_path *path; path = snd_hda_get_path_from_idx(codec, idx); if (path) print_nid_path(pfx, path); } static void debug_show_configs(struct hda_codec *codec, struct auto_pin_cfg *cfg) { struct hda_gen_spec *spec = codec->spec; static const char * const lo_type[3] = { "LO", "SP", "HP" }; int i; debug_badness("multi_outs = %x/%x/%x/%x : %x/%x/%x/%x (type %s)\n", cfg->line_out_pins[0], cfg->line_out_pins[1], cfg->line_out_pins[2], cfg->line_out_pins[3], spec->multiout.dac_nids[0], spec->multiout.dac_nids[1], spec->multiout.dac_nids[2], spec->multiout.dac_nids[3], lo_type[cfg->line_out_type]); for (i = 0; i < cfg->line_outs; i++) print_nid_path_idx(codec, " out", spec->out_paths[i]); if (spec->multi_ios > 0) debug_badness("multi_ios(%d) = %x/%x : %x/%x\n", spec->multi_ios, spec->multi_io[0].pin, spec->multi_io[1].pin, spec->multi_io[0].dac, spec->multi_io[1].dac); for (i = 0; i < spec->multi_ios; i++) print_nid_path_idx(codec, " mio", spec->out_paths[cfg->line_outs + i]); if (cfg->hp_outs) debug_badness("hp_outs = %x/%x/%x/%x : %x/%x/%x/%x\n", cfg->hp_pins[0], cfg->hp_pins[1], cfg->hp_pins[2], cfg->hp_pins[3], spec->multiout.hp_out_nid[0], spec->multiout.hp_out_nid[1], spec->multiout.hp_out_nid[2], spec->multiout.hp_out_nid[3]); for (i = 0; i < cfg->hp_outs; i++) print_nid_path_idx(codec, " hp ", spec->hp_paths[i]); if (cfg->speaker_outs) debug_badness("spk_outs = %x/%x/%x/%x : %x/%x/%x/%x\n", cfg->speaker_pins[0], cfg->speaker_pins[1], cfg->speaker_pins[2], cfg->speaker_pins[3], spec->multiout.extra_out_nid[0], spec->multiout.extra_out_nid[1], spec->multiout.extra_out_nid[2], spec->multiout.extra_out_nid[3]); for (i = 0; i < cfg->speaker_outs; i++) print_nid_path_idx(codec, " spk", spec->speaker_paths[i]); for (i = 0; i < 3; i++) print_nid_path_idx(codec, " mix", spec->aamix_out_paths[i]); } #else #define debug_show_configs(codec, cfg) /* NOP */ #endif /* find all available DACs of the codec */ static void fill_all_dac_nids(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; int i; hda_nid_t nid = codec->start_nid; spec->num_all_dacs = 0; memset(spec->all_dacs, 0, sizeof(spec->all_dacs)); for (i = 0; i < codec->num_nodes; i++, nid++) { if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_AUD_OUT) continue; if (spec->num_all_dacs >= ARRAY_SIZE(spec->all_dacs)) { snd_printk(KERN_ERR "hda: Too many DACs!\n"); break; } spec->all_dacs[spec->num_all_dacs++] = nid; } } static int parse_output_paths(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; struct auto_pin_cfg *best_cfg; unsigned int val; int best_badness = INT_MAX; int badness; bool fill_hardwired = true, fill_mio_first = true; bool best_wired = true, best_mio = true; bool hp_spk_swapped = false; best_cfg = kmalloc(sizeof(*best_cfg), GFP_KERNEL); if (!best_cfg) return -ENOMEM; *best_cfg = *cfg; for (;;) { badness = fill_and_eval_dacs(codec, fill_hardwired, fill_mio_first); if (badness < 0) { kfree(best_cfg); return badness; } debug_badness("==> lo_type=%d, wired=%d, mio=%d, badness=0x%x\n", cfg->line_out_type, fill_hardwired, fill_mio_first, badness); debug_show_configs(codec, cfg); if (badness < best_badness) { best_badness = badness; *best_cfg = *cfg; best_wired = fill_hardwired; best_mio = fill_mio_first; } if (!badness) break; fill_mio_first = !fill_mio_first; if (!fill_mio_first) continue; fill_hardwired = !fill_hardwired; if (!fill_hardwired) continue; if (hp_spk_swapped) break; hp_spk_swapped = true; if (cfg->speaker_outs > 0 && cfg->line_out_type == AUTO_PIN_HP_OUT) { cfg->hp_outs = cfg->line_outs; memcpy(cfg->hp_pins, cfg->line_out_pins, sizeof(cfg->hp_pins)); cfg->line_outs = cfg->speaker_outs; memcpy(cfg->line_out_pins, cfg->speaker_pins, sizeof(cfg->speaker_pins)); cfg->speaker_outs = 0; memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins)); cfg->line_out_type = AUTO_PIN_SPEAKER_OUT; fill_hardwired = true; continue; } if (cfg->hp_outs > 0 && cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) { cfg->speaker_outs = cfg->line_outs; memcpy(cfg->speaker_pins, cfg->line_out_pins, sizeof(cfg->speaker_pins)); cfg->line_outs = cfg->hp_outs; memcpy(cfg->line_out_pins, cfg->hp_pins, sizeof(cfg->hp_pins)); cfg->hp_outs = 0; memset(cfg->hp_pins, 0, sizeof(cfg->hp_pins)); cfg->line_out_type = AUTO_PIN_HP_OUT; fill_hardwired = true; continue; } break; } if (badness) { debug_badness("==> restoring best_cfg\n"); *cfg = *best_cfg; fill_and_eval_dacs(codec, best_wired, best_mio); } debug_badness("==> Best config: lo_type=%d, wired=%d, mio=%d\n", cfg->line_out_type, best_wired, best_mio); debug_show_configs(codec, cfg); if (cfg->line_out_pins[0]) { struct nid_path *path; path = snd_hda_get_path_from_idx(codec, spec->out_paths[0]); if (path) spec->vmaster_nid = look_for_out_vol_nid(codec, path); if (spec->vmaster_nid) snd_hda_set_vmaster_tlv(codec, spec->vmaster_nid, HDA_OUTPUT, spec->vmaster_tlv); } /* set initial pinctl targets */ if (spec->prefer_hp_amp || cfg->line_out_type == AUTO_PIN_HP_OUT) val = PIN_HP; else val = PIN_OUT; set_pin_targets(codec, cfg->line_outs, cfg->line_out_pins, val); if (cfg->line_out_type != AUTO_PIN_HP_OUT) set_pin_targets(codec, cfg->hp_outs, cfg->hp_pins, PIN_HP); if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) { val = spec->prefer_hp_amp ? PIN_HP : PIN_OUT; set_pin_targets(codec, cfg->speaker_outs, cfg->speaker_pins, val); } /* clear indep_hp flag if not available */ if (spec->indep_hp && !indep_hp_possible(codec)) spec->indep_hp = 0; kfree(best_cfg); return 0; } /* add playback controls from the parsed DAC table */ static int create_multi_out_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { struct hda_gen_spec *spec = codec->spec; int i, err, noutputs; noutputs = cfg->line_outs; if (spec->multi_ios > 0 && cfg->line_outs < 3) noutputs += spec->multi_ios; for (i = 0; i < noutputs; i++) { const char *name; int index; struct nid_path *path; path = snd_hda_get_path_from_idx(codec, spec->out_paths[i]); if (!path) continue; name = get_line_out_pfx(codec, i, &index, NID_PATH_VOL_CTL); if (!name || !strcmp(name, "CLFE")) { /* Center/LFE */ err = add_vol_ctl(codec, "Center", 0, 1, path); if (err < 0) return err; err = add_vol_ctl(codec, "LFE", 0, 2, path); if (err < 0) return err; } else { err = add_stereo_vol(codec, name, index, path); if (err < 0) return err; } name = get_line_out_pfx(codec, i, &index, NID_PATH_MUTE_CTL); if (!name || !strcmp(name, "CLFE")) { err = add_sw_ctl(codec, "Center", 0, 1, path); if (err < 0) return err; err = add_sw_ctl(codec, "LFE", 0, 2, path); if (err < 0) return err; } else { err = add_stereo_sw(codec, name, index, path); if (err < 0) return err; } } return 0; } static int create_extra_out(struct hda_codec *codec, int path_idx, const char *pfx, int cidx) { struct nid_path *path; int err; path = snd_hda_get_path_from_idx(codec, path_idx); if (!path) return 0; err = add_stereo_vol(codec, pfx, cidx, path); if (err < 0) return err; err = add_stereo_sw(codec, pfx, cidx, path); if (err < 0) return err; return 0; } /* add playback controls for speaker and HP outputs */ static int create_extra_outs(struct hda_codec *codec, int num_pins, const int *paths, const char *pfx) { int i; for (i = 0; i < num_pins; i++) { const char *name; char tmp[44]; int err, idx = 0; if (num_pins == 2 && i == 1 && !strcmp(pfx, "Speaker")) name = "Bass Speaker"; else if (num_pins >= 3) { snprintf(tmp, sizeof(tmp), "%s %s", pfx, channel_name[i]); name = tmp; } else { name = pfx; idx = i; } err = create_extra_out(codec, paths[i], name, idx); if (err < 0) return err; } return 0; } static int create_hp_out_ctls(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; return create_extra_outs(codec, spec->autocfg.hp_outs, spec->hp_paths, "Headphone"); } static int create_speaker_out_ctls(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; return create_extra_outs(codec, spec->autocfg.speaker_outs, spec->speaker_paths, "Speaker"); } /* * independent HP controls */ static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack); static int indep_hp_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { return snd_hda_enum_bool_helper_info(kcontrol, uinfo); } static int indep_hp_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; ucontrol->value.enumerated.item[0] = spec->indep_hp_enabled; return 0; } static void update_aamix_paths(struct hda_codec *codec, bool do_mix, int nomix_path_idx, int mix_path_idx, int out_type); static int indep_hp_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; unsigned int select = ucontrol->value.enumerated.item[0]; int ret = 0; mutex_lock(&spec->pcm_mutex); if (spec->active_streams) { ret = -EBUSY; goto unlock; } if (spec->indep_hp_enabled != select) { hda_nid_t *dacp; if (spec->autocfg.line_out_type == AUTO_PIN_HP_OUT) dacp = &spec->private_dac_nids[0]; else dacp = &spec->multiout.hp_out_nid[0]; /* update HP aamix paths in case it conflicts with indep HP */ if (spec->have_aamix_ctl) { if (spec->autocfg.line_out_type == AUTO_PIN_HP_OUT) update_aamix_paths(codec, spec->aamix_mode, spec->out_paths[0], spec->aamix_out_paths[0], spec->autocfg.line_out_type); else update_aamix_paths(codec, spec->aamix_mode, spec->hp_paths[0], spec->aamix_out_paths[1], AUTO_PIN_HP_OUT); } spec->indep_hp_enabled = select; if (spec->indep_hp_enabled) *dacp = 0; else *dacp = spec->alt_dac_nid; call_hp_automute(codec, NULL); ret = 1; } unlock: mutex_unlock(&spec->pcm_mutex); return ret; } static const struct snd_kcontrol_new indep_hp_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Independent HP", .info = indep_hp_info, .get = indep_hp_get, .put = indep_hp_put, }; static int create_indep_hp_ctls(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; hda_nid_t dac; if (!spec->indep_hp) return 0; if (spec->autocfg.line_out_type == AUTO_PIN_HP_OUT) dac = spec->multiout.dac_nids[0]; else dac = spec->multiout.hp_out_nid[0]; if (!dac) { spec->indep_hp = 0; return 0; } spec->indep_hp_enabled = false; spec->alt_dac_nid = dac; if (!snd_hda_gen_add_kctl(spec, NULL, &indep_hp_ctl)) return -ENOMEM; return 0; } /* * channel mode enum control */ static int ch_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; int chs; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = spec->multi_ios + 1; if (uinfo->value.enumerated.item > spec->multi_ios) uinfo->value.enumerated.item = spec->multi_ios; chs = uinfo->value.enumerated.item * 2 + spec->min_channel_count; sprintf(uinfo->value.enumerated.name, "%dch", chs); return 0; } static int ch_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; ucontrol->value.enumerated.item[0] = (spec->ext_channel_count - spec->min_channel_count) / 2; return 0; } static inline struct nid_path * get_multiio_path(struct hda_codec *codec, int idx) { struct hda_gen_spec *spec = codec->spec; return snd_hda_get_path_from_idx(codec, spec->out_paths[spec->autocfg.line_outs + idx]); } static void update_automute_all(struct hda_codec *codec); /* Default value to be passed as aamix argument for snd_hda_activate_path(); * used for output paths */ static bool aamix_default(struct hda_gen_spec *spec) { return !spec->have_aamix_ctl || spec->aamix_mode; } static int set_multi_io(struct hda_codec *codec, int idx, bool output) { struct hda_gen_spec *spec = codec->spec; hda_nid_t nid = spec->multi_io[idx].pin; struct nid_path *path; path = get_multiio_path(codec, idx); if (!path) return -EINVAL; if (path->active == output) return 0; if (output) { set_pin_target(codec, nid, PIN_OUT, true); snd_hda_activate_path(codec, path, true, aamix_default(spec)); set_pin_eapd(codec, nid, true); } else { set_pin_eapd(codec, nid, false); snd_hda_activate_path(codec, path, false, aamix_default(spec)); set_pin_target(codec, nid, spec->multi_io[idx].ctl_in, true); path_power_down_sync(codec, path); } /* update jack retasking in case it modifies any of them */ update_automute_all(codec); return 0; } static int ch_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; int i, ch; ch = ucontrol->value.enumerated.item[0]; if (ch < 0 || ch > spec->multi_ios) return -EINVAL; if (ch == (spec->ext_channel_count - spec->min_channel_count) / 2) return 0; spec->ext_channel_count = ch * 2 + spec->min_channel_count; for (i = 0; i < spec->multi_ios; i++) set_multi_io(codec, i, i < ch); spec->multiout.max_channels = max(spec->ext_channel_count, spec->const_channel_count); if (spec->need_dac_fix) spec->multiout.num_dacs = spec->multiout.max_channels / 2; return 1; } static const struct snd_kcontrol_new channel_mode_enum = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Channel Mode", .info = ch_mode_info, .get = ch_mode_get, .put = ch_mode_put, }; static int create_multi_channel_mode(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; if (spec->multi_ios > 0) { if (!snd_hda_gen_add_kctl(spec, NULL, &channel_mode_enum)) return -ENOMEM; } return 0; } /* * aamix loopback enable/disable switch */ #define loopback_mixing_info indep_hp_info static int loopback_mixing_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; ucontrol->value.enumerated.item[0] = spec->aamix_mode; return 0; } static void update_aamix_paths(struct hda_codec *codec, bool do_mix, int nomix_path_idx, int mix_path_idx, int out_type) { struct hda_gen_spec *spec = codec->spec; struct nid_path *nomix_path, *mix_path; nomix_path = snd_hda_get_path_from_idx(codec, nomix_path_idx); mix_path = snd_hda_get_path_from_idx(codec, mix_path_idx); if (!nomix_path || !mix_path) return; /* if HP aamix path is driven from a different DAC and the * independent HP mode is ON, can't turn on aamix path */ if (out_type == AUTO_PIN_HP_OUT && spec->indep_hp_enabled && mix_path->path[0] != spec->alt_dac_nid) do_mix = false; if (do_mix) { snd_hda_activate_path(codec, nomix_path, false, true); snd_hda_activate_path(codec, mix_path, true, true); path_power_down_sync(codec, nomix_path); } else { snd_hda_activate_path(codec, mix_path, false, false); snd_hda_activate_path(codec, nomix_path, true, false); path_power_down_sync(codec, mix_path); } } static int loopback_mixing_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; unsigned int val = ucontrol->value.enumerated.item[0]; if (val == spec->aamix_mode) return 0; spec->aamix_mode = val; update_aamix_paths(codec, val, spec->out_paths[0], spec->aamix_out_paths[0], spec->autocfg.line_out_type); update_aamix_paths(codec, val, spec->hp_paths[0], spec->aamix_out_paths[1], AUTO_PIN_HP_OUT); update_aamix_paths(codec, val, spec->speaker_paths[0], spec->aamix_out_paths[2], AUTO_PIN_SPEAKER_OUT); return 1; } static const struct snd_kcontrol_new loopback_mixing_enum = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Loopback Mixing", .info = loopback_mixing_info, .get = loopback_mixing_get, .put = loopback_mixing_put, }; static int create_loopback_mixing_ctl(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; if (!spec->mixer_nid) return 0; if (!(spec->aamix_out_paths[0] || spec->aamix_out_paths[1] || spec->aamix_out_paths[2])) return 0; if (!snd_hda_gen_add_kctl(spec, NULL, &loopback_mixing_enum)) return -ENOMEM; spec->have_aamix_ctl = 1; return 0; } /* * shared headphone/mic handling */ static void call_update_outputs(struct hda_codec *codec); /* for shared I/O, change the pin-control accordingly */ static void update_hp_mic(struct hda_codec *codec, int adc_mux, bool force) { struct hda_gen_spec *spec = codec->spec; bool as_mic; unsigned int val; hda_nid_t pin; pin = spec->hp_mic_pin; as_mic = spec->cur_mux[adc_mux] == spec->hp_mic_mux_idx; if (!force) { val = snd_hda_codec_get_pin_target(codec, pin); if (as_mic) { if (val & PIN_IN) return; } else { if (val & PIN_OUT) return; } } val = snd_hda_get_default_vref(codec, pin); /* if the HP pin doesn't support VREF and the codec driver gives an * alternative pin, set up the VREF on that pin instead */ if (val == AC_PINCTL_VREF_HIZ && spec->shared_mic_vref_pin) { const hda_nid_t vref_pin = spec->shared_mic_vref_pin; unsigned int vref_val = snd_hda_get_default_vref(codec, vref_pin); if (vref_val != AC_PINCTL_VREF_HIZ) snd_hda_set_pin_ctl_cache(codec, vref_pin, PIN_IN | (as_mic ? vref_val : 0)); } if (!spec->hp_mic_jack_modes) { if (as_mic) val |= PIN_IN; else val = PIN_HP; set_pin_target(codec, pin, val, true); call_hp_automute(codec, NULL); } } /* create a shared input with the headphone out */ static int create_hp_mic(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; unsigned int defcfg; hda_nid_t nid; if (!spec->hp_mic) { if (spec->suppress_hp_mic_detect) return 0; /* automatic detection: only if no input or a single internal * input pin is found, try to detect the shared hp/mic */ if (cfg->num_inputs > 1) return 0; else if (cfg->num_inputs == 1) { defcfg = snd_hda_codec_get_pincfg(codec, cfg->inputs[0].pin); if (snd_hda_get_input_pin_attr(defcfg) != INPUT_PIN_ATTR_INT) return 0; } } spec->hp_mic = 0; /* clear once */ if (cfg->num_inputs >= AUTO_CFG_MAX_INS) return 0; nid = 0; if (cfg->line_out_type == AUTO_PIN_HP_OUT && cfg->line_outs > 0) nid = cfg->line_out_pins[0]; else if (cfg->hp_outs > 0) nid = cfg->hp_pins[0]; if (!nid) return 0; if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_IN)) return 0; /* no input */ cfg->inputs[cfg->num_inputs].pin = nid; cfg->inputs[cfg->num_inputs].type = AUTO_PIN_MIC; cfg->inputs[cfg->num_inputs].is_headphone_mic = 1; cfg->num_inputs++; spec->hp_mic = 1; spec->hp_mic_pin = nid; /* we can't handle auto-mic together with HP-mic */ spec->suppress_auto_mic = 1; snd_printdd("hda-codec: Enable shared I/O jack on NID 0x%x\n", nid); return 0; } /* * output jack mode */ static int create_hp_mic_jack_mode(struct hda_codec *codec, hda_nid_t pin); static const char * const out_jack_texts[] = { "Line Out", "Headphone Out", }; static int out_jack_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { return snd_hda_enum_helper_info(kcontrol, uinfo, 2, out_jack_texts); } static int out_jack_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); hda_nid_t nid = kcontrol->private_value; if (snd_hda_codec_get_pin_target(codec, nid) == PIN_HP) ucontrol->value.enumerated.item[0] = 1; else ucontrol->value.enumerated.item[0] = 0; return 0; } static int out_jack_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); hda_nid_t nid = kcontrol->private_value; unsigned int val; val = ucontrol->value.enumerated.item[0] ? PIN_HP : PIN_OUT; if (snd_hda_codec_get_pin_target(codec, nid) == val) return 0; snd_hda_set_pin_ctl_cache(codec, nid, val); return 1; } static const struct snd_kcontrol_new out_jack_mode_enum = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = out_jack_mode_info, .get = out_jack_mode_get, .put = out_jack_mode_put, }; static bool find_kctl_name(struct hda_codec *codec, const char *name, int idx) { struct hda_gen_spec *spec = codec->spec; int i; for (i = 0; i < spec->kctls.used; i++) { struct snd_kcontrol_new *kctl = snd_array_elem(&spec->kctls, i); if (!strcmp(kctl->name, name) && kctl->index == idx) return true; } return false; } static void get_jack_mode_name(struct hda_codec *codec, hda_nid_t pin, char *name, size_t name_len) { struct hda_gen_spec *spec = codec->spec; int idx = 0; snd_hda_get_pin_label(codec, pin, &spec->autocfg, name, name_len, &idx); strlcat(name, " Jack Mode", name_len); for (; find_kctl_name(codec, name, idx); idx++) ; } static int get_out_jack_num_items(struct hda_codec *codec, hda_nid_t pin) { struct hda_gen_spec *spec = codec->spec; if (spec->add_jack_modes) { unsigned int pincap = snd_hda_query_pin_caps(codec, pin); if ((pincap & AC_PINCAP_OUT) && (pincap & AC_PINCAP_HP_DRV)) return 2; } return 1; } static int create_out_jack_modes(struct hda_codec *codec, int num_pins, hda_nid_t *pins) { struct hda_gen_spec *spec = codec->spec; int i; for (i = 0; i < num_pins; i++) { hda_nid_t pin = pins[i]; if (pin == spec->hp_mic_pin) { int ret = create_hp_mic_jack_mode(codec, pin); if (ret < 0) return ret; continue; } if (get_out_jack_num_items(codec, pin) > 1) { struct snd_kcontrol_new *knew; char name[44]; get_jack_mode_name(codec, pin, name, sizeof(name)); knew = snd_hda_gen_add_kctl(spec, name, &out_jack_mode_enum); if (!knew) return -ENOMEM; knew->private_value = pin; } } return 0; } /* * input jack mode */ /* from AC_PINCTL_VREF_HIZ to AC_PINCTL_VREF_100 */ #define NUM_VREFS 6 static const char * const vref_texts[NUM_VREFS] = { "Line In", "Mic 50pc Bias", "Mic 0V Bias", "", "Mic 80pc Bias", "Mic 100pc Bias" }; static unsigned int get_vref_caps(struct hda_codec *codec, hda_nid_t pin) { unsigned int pincap; pincap = snd_hda_query_pin_caps(codec, pin); pincap = (pincap & AC_PINCAP_VREF) >> AC_PINCAP_VREF_SHIFT; /* filter out unusual vrefs */ pincap &= ~(AC_PINCAP_VREF_GRD | AC_PINCAP_VREF_100); return pincap; } /* convert from the enum item index to the vref ctl index (0=HIZ, 1=50%...) */ static int get_vref_idx(unsigned int vref_caps, unsigned int item_idx) { unsigned int i, n = 0; for (i = 0; i < NUM_VREFS; i++) { if (vref_caps & (1 << i)) { if (n == item_idx) return i; n++; } } return 0; } /* convert back from the vref ctl index to the enum item index */ static int cvt_from_vref_idx(unsigned int vref_caps, unsigned int idx) { unsigned int i, n = 0; for (i = 0; i < NUM_VREFS; i++) { if (i == idx) return n; if (vref_caps & (1 << i)) n++; } return 0; } static int in_jack_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); hda_nid_t nid = kcontrol->private_value; unsigned int vref_caps = get_vref_caps(codec, nid); snd_hda_enum_helper_info(kcontrol, uinfo, hweight32(vref_caps), vref_texts); /* set the right text */ strcpy(uinfo->value.enumerated.name, vref_texts[get_vref_idx(vref_caps, uinfo->value.enumerated.item)]); return 0; } static int in_jack_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); hda_nid_t nid = kcontrol->private_value; unsigned int vref_caps = get_vref_caps(codec, nid); unsigned int idx; idx = snd_hda_codec_get_pin_target(codec, nid) & AC_PINCTL_VREFEN; ucontrol->value.enumerated.item[0] = cvt_from_vref_idx(vref_caps, idx); return 0; } static int in_jack_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); hda_nid_t nid = kcontrol->private_value; unsigned int vref_caps = get_vref_caps(codec, nid); unsigned int val, idx; val = snd_hda_codec_get_pin_target(codec, nid); idx = cvt_from_vref_idx(vref_caps, val & AC_PINCTL_VREFEN); if (idx == ucontrol->value.enumerated.item[0]) return 0; val &= ~AC_PINCTL_VREFEN; val |= get_vref_idx(vref_caps, ucontrol->value.enumerated.item[0]); snd_hda_set_pin_ctl_cache(codec, nid, val); return 1; } static const struct snd_kcontrol_new in_jack_mode_enum = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = in_jack_mode_info, .get = in_jack_mode_get, .put = in_jack_mode_put, }; static int get_in_jack_num_items(struct hda_codec *codec, hda_nid_t pin) { struct hda_gen_spec *spec = codec->spec; int nitems = 0; if (spec->add_jack_modes) nitems = hweight32(get_vref_caps(codec, pin)); return nitems ? nitems : 1; } static int create_in_jack_mode(struct hda_codec *codec, hda_nid_t pin) { struct hda_gen_spec *spec = codec->spec; struct snd_kcontrol_new *knew; char name[44]; unsigned int defcfg; if (pin == spec->hp_mic_pin) return 0; /* already done in create_out_jack_mode() */ /* no jack mode for fixed pins */ defcfg = snd_hda_codec_get_pincfg(codec, pin); if (snd_hda_get_input_pin_attr(defcfg) == INPUT_PIN_ATTR_INT) return 0; /* no multiple vref caps? */ if (get_in_jack_num_items(codec, pin) <= 1) return 0; get_jack_mode_name(codec, pin, name, sizeof(name)); knew = snd_hda_gen_add_kctl(spec, name, &in_jack_mode_enum); if (!knew) return -ENOMEM; knew->private_value = pin; return 0; } /* * HP/mic shared jack mode */ static int hp_mic_jack_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); hda_nid_t nid = kcontrol->private_value; int out_jacks = get_out_jack_num_items(codec, nid); int in_jacks = get_in_jack_num_items(codec, nid); const char *text = NULL; int idx; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = out_jacks + in_jacks; if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; idx = uinfo->value.enumerated.item; if (idx < out_jacks) { if (out_jacks > 1) text = out_jack_texts[idx]; else text = "Headphone Out"; } else { idx -= out_jacks; if (in_jacks > 1) { unsigned int vref_caps = get_vref_caps(codec, nid); text = vref_texts[get_vref_idx(vref_caps, idx)]; } else text = "Mic In"; } strcpy(uinfo->value.enumerated.name, text); return 0; } static int get_cur_hp_mic_jack_mode(struct hda_codec *codec, hda_nid_t nid) { int out_jacks = get_out_jack_num_items(codec, nid); int in_jacks = get_in_jack_num_items(codec, nid); unsigned int val = snd_hda_codec_get_pin_target(codec, nid); int idx = 0; if (val & PIN_OUT) { if (out_jacks > 1 && val == PIN_HP) idx = 1; } else if (val & PIN_IN) { idx = out_jacks; if (in_jacks > 1) { unsigned int vref_caps = get_vref_caps(codec, nid); val &= AC_PINCTL_VREFEN; idx += cvt_from_vref_idx(vref_caps, val); } } return idx; } static int hp_mic_jack_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); hda_nid_t nid = kcontrol->private_value; ucontrol->value.enumerated.item[0] = get_cur_hp_mic_jack_mode(codec, nid); return 0; } static int hp_mic_jack_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); hda_nid_t nid = kcontrol->private_value; int out_jacks = get_out_jack_num_items(codec, nid); int in_jacks = get_in_jack_num_items(codec, nid); unsigned int val, oldval, idx; oldval = get_cur_hp_mic_jack_mode(codec, nid); idx = ucontrol->value.enumerated.item[0]; if (oldval == idx) return 0; if (idx < out_jacks) { if (out_jacks > 1) val = idx ? PIN_HP : PIN_OUT; else val = PIN_HP; } else { idx -= out_jacks; if (in_jacks > 1) { unsigned int vref_caps = get_vref_caps(codec, nid); val = snd_hda_codec_get_pin_target(codec, nid); val &= ~(AC_PINCTL_VREFEN | PIN_HP); val |= get_vref_idx(vref_caps, idx) | PIN_IN; } else val = snd_hda_get_default_vref(codec, nid); } snd_hda_set_pin_ctl_cache(codec, nid, val); call_hp_automute(codec, NULL); return 1; } static const struct snd_kcontrol_new hp_mic_jack_mode_enum = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = hp_mic_jack_mode_info, .get = hp_mic_jack_mode_get, .put = hp_mic_jack_mode_put, }; static int create_hp_mic_jack_mode(struct hda_codec *codec, hda_nid_t pin) { struct hda_gen_spec *spec = codec->spec; struct snd_kcontrol_new *knew; if (get_out_jack_num_items(codec, pin) <= 1 && get_in_jack_num_items(codec, pin) <= 1) return 0; /* no need */ knew = snd_hda_gen_add_kctl(spec, "Headphone Mic Jack Mode", &hp_mic_jack_mode_enum); if (!knew) return -ENOMEM; knew->private_value = pin; spec->hp_mic_jack_modes = 1; return 0; } /* * Parse input paths */ /* add the powersave loopback-list entry */ static int add_loopback_list(struct hda_gen_spec *spec, hda_nid_t mix, int idx) { struct hda_amp_list *list; list = snd_array_new(&spec->loopback_list); if (!list) return -ENOMEM; list->nid = mix; list->dir = HDA_INPUT; list->idx = idx; spec->loopback.amplist = spec->loopback_list.list; return 0; } /* create input playback/capture controls for the given pin */ static int new_analog_input(struct hda_codec *codec, int input_idx, hda_nid_t pin, const char *ctlname, int ctlidx, hda_nid_t mix_nid) { struct hda_gen_spec *spec = codec->spec; struct nid_path *path; unsigned int val; int err, idx; if (!nid_has_volume(codec, mix_nid, HDA_INPUT) && !nid_has_mute(codec, mix_nid, HDA_INPUT)) return 0; /* no need for analog loopback */ path = snd_hda_add_new_path(codec, pin, mix_nid, 0); if (!path) return -EINVAL; print_nid_path("loopback", path); spec->loopback_paths[input_idx] = snd_hda_get_path_idx(codec, path); idx = path->idx[path->depth - 1]; if (nid_has_volume(codec, mix_nid, HDA_INPUT)) { val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT); err = __add_pb_vol_ctrl(spec, HDA_CTL_WIDGET_VOL, ctlname, ctlidx, val); if (err < 0) return err; path->ctls[NID_PATH_VOL_CTL] = val; } if (nid_has_mute(codec, mix_nid, HDA_INPUT)) { val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT); err = __add_pb_sw_ctrl(spec, HDA_CTL_WIDGET_MUTE, ctlname, ctlidx, val); if (err < 0) return err; path->ctls[NID_PATH_MUTE_CTL] = val; } path->active = true; err = add_loopback_list(spec, mix_nid, idx); if (err < 0) return err; if (spec->mixer_nid != spec->mixer_merge_nid && !spec->loopback_merge_path) { path = snd_hda_add_new_path(codec, spec->mixer_nid, spec->mixer_merge_nid, 0); if (path) { print_nid_path("loopback-merge", path); path->active = true; spec->loopback_merge_path = snd_hda_get_path_idx(codec, path); } } return 0; } static int is_input_pin(struct hda_codec *codec, hda_nid_t nid) { unsigned int pincap = snd_hda_query_pin_caps(codec, nid); return (pincap & AC_PINCAP_IN) != 0; } /* Parse the codec tree and retrieve ADCs */ static int fill_adc_nids(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; hda_nid_t nid; hda_nid_t *adc_nids = spec->adc_nids; int max_nums = ARRAY_SIZE(spec->adc_nids); int i, nums = 0; nid = codec->start_nid; for (i = 0; i < codec->num_nodes; i++, nid++) { unsigned int caps = get_wcaps(codec, nid); int type = get_wcaps_type(caps); if (type != AC_WID_AUD_IN || (caps & AC_WCAP_DIGITAL)) continue; adc_nids[nums] = nid; if (++nums >= max_nums) break; } spec->num_adc_nids = nums; /* copy the detected ADCs to all_adcs[] */ spec->num_all_adcs = nums; memcpy(spec->all_adcs, spec->adc_nids, nums * sizeof(hda_nid_t)); return nums; } /* filter out invalid adc_nids that don't give all active input pins; * if needed, check whether dynamic ADC-switching is available */ static int check_dyn_adc_switch(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->input_mux; unsigned int ok_bits; int i, n, nums; nums = 0; ok_bits = 0; for (n = 0; n < spec->num_adc_nids; n++) { for (i = 0; i < imux->num_items; i++) { if (!spec->input_paths[i][n]) break; } if (i >= imux->num_items) { ok_bits |= (1 << n); nums++; } } if (!ok_bits) { /* check whether ADC-switch is possible */ for (i = 0; i < imux->num_items; i++) { for (n = 0; n < spec->num_adc_nids; n++) { if (spec->input_paths[i][n]) { spec->dyn_adc_idx[i] = n; break; } } } snd_printdd("hda-codec: enabling ADC switching\n"); spec->dyn_adc_switch = 1; } else if (nums != spec->num_adc_nids) { /* shrink the invalid adcs and input paths */ nums = 0; for (n = 0; n < spec->num_adc_nids; n++) { if (!(ok_bits & (1 << n))) continue; if (n != nums) { spec->adc_nids[nums] = spec->adc_nids[n]; for (i = 0; i < imux->num_items; i++) { invalidate_nid_path(codec, spec->input_paths[i][nums]); spec->input_paths[i][nums] = spec->input_paths[i][n]; } } nums++; } spec->num_adc_nids = nums; } if (imux->num_items == 1 || (imux->num_items == 2 && spec->hp_mic)) { snd_printdd("hda-codec: reducing to a single ADC\n"); spec->num_adc_nids = 1; /* reduce to a single ADC */ } /* single index for individual volumes ctls */ if (!spec->dyn_adc_switch && spec->multi_cap_vol) spec->num_adc_nids = 1; return 0; } /* parse capture source paths from the given pin and create imux items */ static int parse_capture_source(struct hda_codec *codec, hda_nid_t pin, int cfg_idx, int num_adcs, const char *label, int anchor) { struct hda_gen_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->input_mux; int imux_idx = imux->num_items; bool imux_added = false; int c; for (c = 0; c < num_adcs; c++) { struct nid_path *path; hda_nid_t adc = spec->adc_nids[c]; if (!is_reachable_path(codec, pin, adc)) continue; path = snd_hda_add_new_path(codec, pin, adc, anchor); if (!path) continue; print_nid_path("input", path); spec->input_paths[imux_idx][c] = snd_hda_get_path_idx(codec, path); if (!imux_added) { if (spec->hp_mic_pin == pin) spec->hp_mic_mux_idx = imux->num_items; spec->imux_pins[imux->num_items] = pin; snd_hda_add_imux_item(imux, label, cfg_idx, NULL); imux_added = true; } } return 0; } /* * create playback/capture controls for input pins */ /* fill the label for each input at first */ static int fill_input_pin_labels(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; const struct auto_pin_cfg *cfg = &spec->autocfg; int i; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t pin = cfg->inputs[i].pin; const char *label; int j, idx; if (!is_input_pin(codec, pin)) continue; label = hda_get_autocfg_input_label(codec, cfg, i); idx = 0; for (j = i - 1; j >= 0; j--) { if (spec->input_labels[j] && !strcmp(spec->input_labels[j], label)) { idx = spec->input_label_idxs[j] + 1; break; } } spec->input_labels[i] = label; spec->input_label_idxs[i] = idx; } return 0; } #define CFG_IDX_MIX 99 /* a dummy cfg->input idx for stereo mix */ static int create_input_ctls(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; const struct auto_pin_cfg *cfg = &spec->autocfg; hda_nid_t mixer = spec->mixer_nid; int num_adcs; int i, err; unsigned int val; num_adcs = fill_adc_nids(codec); if (num_adcs < 0) return 0; err = fill_input_pin_labels(codec); if (err < 0) return err; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t pin; pin = cfg->inputs[i].pin; if (!is_input_pin(codec, pin)) continue; val = PIN_IN; if (cfg->inputs[i].type == AUTO_PIN_MIC) val |= snd_hda_get_default_vref(codec, pin); if (pin != spec->hp_mic_pin) set_pin_target(codec, pin, val, false); if (mixer) { if (is_reachable_path(codec, pin, mixer)) { err = new_analog_input(codec, i, pin, spec->input_labels[i], spec->input_label_idxs[i], mixer); if (err < 0) return err; } } err = parse_capture_source(codec, pin, i, num_adcs, spec->input_labels[i], -mixer); if (err < 0) return err; if (spec->add_jack_modes) { err = create_in_jack_mode(codec, pin); if (err < 0) return err; } } if (mixer && spec->add_stereo_mix_input) { err = parse_capture_source(codec, mixer, CFG_IDX_MIX, num_adcs, "Stereo Mix", 0); if (err < 0) return err; } return 0; } /* * input source mux */ /* get the input path specified by the given adc and imux indices */ static struct nid_path *get_input_path(struct hda_codec *codec, int adc_idx, int imux_idx) { struct hda_gen_spec *spec = codec->spec; if (imux_idx < 0 || imux_idx >= HDA_MAX_NUM_INPUTS) { snd_BUG(); return NULL; } if (spec->dyn_adc_switch) adc_idx = spec->dyn_adc_idx[imux_idx]; if (adc_idx < 0 || adc_idx >= AUTO_CFG_MAX_INS) { snd_BUG(); return NULL; } return snd_hda_get_path_from_idx(codec, spec->input_paths[imux_idx][adc_idx]); } static int mux_select(struct hda_codec *codec, unsigned int adc_idx, unsigned int idx); static int mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; return snd_hda_input_mux_info(&spec->input_mux, uinfo); } static int mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; /* the ctls are created at once with multiple counts */ unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ucontrol->value.enumerated.item[0] = spec->cur_mux[adc_idx]; return 0; } static int mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); return mux_select(codec, adc_idx, ucontrol->value.enumerated.item[0]); } static const struct snd_kcontrol_new cap_src_temp = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Input Source", .info = mux_enum_info, .get = mux_enum_get, .put = mux_enum_put, }; /* * capture volume and capture switch ctls */ typedef int (*put_call_t)(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); /* call the given amp update function for all amps in the imux list at once */ static int cap_put_caller(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol, put_call_t func, int type) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; const struct hda_input_mux *imux; struct nid_path *path; int i, adc_idx, err = 0; imux = &spec->input_mux; adc_idx = kcontrol->id.index; mutex_lock(&codec->control_mutex); /* we use the cache-only update at first since multiple input paths * may shared the same amp; by updating only caches, the redundant * writes to hardware can be reduced. */ codec->cached_write = 1; for (i = 0; i < imux->num_items; i++) { path = get_input_path(codec, adc_idx, i); if (!path || !path->ctls[type]) continue; kcontrol->private_value = path->ctls[type]; err = func(kcontrol, ucontrol); if (err < 0) goto error; } error: codec->cached_write = 0; mutex_unlock(&codec->control_mutex); snd_hda_codec_flush_cache(codec); /* flush the updates */ if (err >= 0 && spec->cap_sync_hook) spec->cap_sync_hook(codec, ucontrol); return err; } /* capture volume ctl callbacks */ #define cap_vol_info snd_hda_mixer_amp_volume_info #define cap_vol_get snd_hda_mixer_amp_volume_get #define cap_vol_tlv snd_hda_mixer_amp_tlv static int cap_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { return cap_put_caller(kcontrol, ucontrol, snd_hda_mixer_amp_volume_put, NID_PATH_VOL_CTL); } static const struct snd_kcontrol_new cap_vol_temp = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Volume", .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK), .info = cap_vol_info, .get = cap_vol_get, .put = cap_vol_put, .tlv = { .c = cap_vol_tlv }, }; /* capture switch ctl callbacks */ #define cap_sw_info snd_ctl_boolean_stereo_info #define cap_sw_get snd_hda_mixer_amp_switch_get static int cap_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { return cap_put_caller(kcontrol, ucontrol, snd_hda_mixer_amp_switch_put, NID_PATH_MUTE_CTL); } static const struct snd_kcontrol_new cap_sw_temp = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Switch", .info = cap_sw_info, .get = cap_sw_get, .put = cap_sw_put, }; static int parse_capvol_in_path(struct hda_codec *codec, struct nid_path *path) { hda_nid_t nid; int i, depth; path->ctls[NID_PATH_VOL_CTL] = path->ctls[NID_PATH_MUTE_CTL] = 0; for (depth = 0; depth < 3; depth++) { if (depth >= path->depth) return -EINVAL; i = path->depth - depth - 1; nid = path->path[i]; if (!path->ctls[NID_PATH_VOL_CTL]) { if (nid_has_volume(codec, nid, HDA_OUTPUT)) path->ctls[NID_PATH_VOL_CTL] = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT); else if (nid_has_volume(codec, nid, HDA_INPUT)) { int idx = path->idx[i]; if (!depth && codec->single_adc_amp) idx = 0; path->ctls[NID_PATH_VOL_CTL] = HDA_COMPOSE_AMP_VAL(nid, 3, idx, HDA_INPUT); } } if (!path->ctls[NID_PATH_MUTE_CTL]) { if (nid_has_mute(codec, nid, HDA_OUTPUT)) path->ctls[NID_PATH_MUTE_CTL] = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT); else if (nid_has_mute(codec, nid, HDA_INPUT)) { int idx = path->idx[i]; if (!depth && codec->single_adc_amp) idx = 0; path->ctls[NID_PATH_MUTE_CTL] = HDA_COMPOSE_AMP_VAL(nid, 3, idx, HDA_INPUT); } } } return 0; } static bool is_inv_dmic_pin(struct hda_codec *codec, hda_nid_t nid) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; unsigned int val; int i; if (!spec->inv_dmic_split) return false; for (i = 0; i < cfg->num_inputs; i++) { if (cfg->inputs[i].pin != nid) continue; if (cfg->inputs[i].type != AUTO_PIN_MIC) return false; val = snd_hda_codec_get_pincfg(codec, nid); return snd_hda_get_input_pin_attr(val) == INPUT_PIN_ATTR_INT; } return false; } /* capture switch put callback for a single control with hook call */ static int cap_single_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; int ret; ret = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol); if (ret < 0) return ret; if (spec->cap_sync_hook) spec->cap_sync_hook(codec, ucontrol); return ret; } static int add_single_cap_ctl(struct hda_codec *codec, const char *label, int idx, bool is_switch, unsigned int ctl, bool inv_dmic) { struct hda_gen_spec *spec = codec->spec; char tmpname[44]; int type = is_switch ? HDA_CTL_WIDGET_MUTE : HDA_CTL_WIDGET_VOL; const char *sfx = is_switch ? "Switch" : "Volume"; unsigned int chs = inv_dmic ? 1 : 3; struct snd_kcontrol_new *knew; if (!ctl) return 0; if (label) snprintf(tmpname, sizeof(tmpname), "%s Capture %s", label, sfx); else snprintf(tmpname, sizeof(tmpname), "Capture %s", sfx); knew = add_control(spec, type, tmpname, idx, amp_val_replace_channels(ctl, chs)); if (!knew) return -ENOMEM; if (is_switch) knew->put = cap_single_sw_put; if (!inv_dmic) return 0; /* Make independent right kcontrol */ if (label) snprintf(tmpname, sizeof(tmpname), "Inverted %s Capture %s", label, sfx); else snprintf(tmpname, sizeof(tmpname), "Inverted Capture %s", sfx); knew = add_control(spec, type, tmpname, idx, amp_val_replace_channels(ctl, 2)); if (!knew) return -ENOMEM; if (is_switch) knew->put = cap_single_sw_put; return 0; } /* create single (and simple) capture volume and switch controls */ static int create_single_cap_vol_ctl(struct hda_codec *codec, int idx, unsigned int vol_ctl, unsigned int sw_ctl, bool inv_dmic) { int err; err = add_single_cap_ctl(codec, NULL, idx, false, vol_ctl, inv_dmic); if (err < 0) return err; err = add_single_cap_ctl(codec, NULL, idx, true, sw_ctl, inv_dmic); if (err < 0) return err; return 0; } /* create bound capture volume and switch controls */ static int create_bind_cap_vol_ctl(struct hda_codec *codec, int idx, unsigned int vol_ctl, unsigned int sw_ctl) { struct hda_gen_spec *spec = codec->spec; struct snd_kcontrol_new *knew; if (vol_ctl) { knew = snd_hda_gen_add_kctl(spec, NULL, &cap_vol_temp); if (!knew) return -ENOMEM; knew->index = idx; knew->private_value = vol_ctl; knew->subdevice = HDA_SUBDEV_AMP_FLAG; } if (sw_ctl) { knew = snd_hda_gen_add_kctl(spec, NULL, &cap_sw_temp); if (!knew) return -ENOMEM; knew->index = idx; knew->private_value = sw_ctl; knew->subdevice = HDA_SUBDEV_AMP_FLAG; } return 0; } /* return the vol ctl when used first in the imux list */ static unsigned int get_first_cap_ctl(struct hda_codec *codec, int idx, int type) { struct nid_path *path; unsigned int ctl; int i; path = get_input_path(codec, 0, idx); if (!path) return 0; ctl = path->ctls[type]; if (!ctl) return 0; for (i = 0; i < idx - 1; i++) { path = get_input_path(codec, 0, i); if (path && path->ctls[type] == ctl) return 0; } return ctl; } /* create individual capture volume and switch controls per input */ static int create_multi_cap_vol_ctl(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->input_mux; int i, err, type; for (i = 0; i < imux->num_items; i++) { bool inv_dmic; int idx; idx = imux->items[i].index; if (idx >= spec->autocfg.num_inputs) continue; inv_dmic = is_inv_dmic_pin(codec, spec->imux_pins[i]); for (type = 0; type < 2; type++) { err = add_single_cap_ctl(codec, spec->input_labels[idx], spec->input_label_idxs[idx], type, get_first_cap_ctl(codec, i, type), inv_dmic); if (err < 0) return err; } } return 0; } static int create_capture_mixers(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->input_mux; int i, n, nums, err; if (spec->dyn_adc_switch) nums = 1; else nums = spec->num_adc_nids; if (!spec->auto_mic && imux->num_items > 1) { struct snd_kcontrol_new *knew; const char *name; name = nums > 1 ? "Input Source" : "Capture Source"; knew = snd_hda_gen_add_kctl(spec, name, &cap_src_temp); if (!knew) return -ENOMEM; knew->count = nums; } for (n = 0; n < nums; n++) { bool multi = false; bool multi_cap_vol = spec->multi_cap_vol; bool inv_dmic = false; int vol, sw; vol = sw = 0; for (i = 0; i < imux->num_items; i++) { struct nid_path *path; path = get_input_path(codec, n, i); if (!path) continue; parse_capvol_in_path(codec, path); if (!vol) vol = path->ctls[NID_PATH_VOL_CTL]; else if (vol != path->ctls[NID_PATH_VOL_CTL]) { multi = true; if (!same_amp_caps(codec, vol, path->ctls[NID_PATH_VOL_CTL], HDA_INPUT)) multi_cap_vol = true; } if (!sw) sw = path->ctls[NID_PATH_MUTE_CTL]; else if (sw != path->ctls[NID_PATH_MUTE_CTL]) { multi = true; if (!same_amp_caps(codec, sw, path->ctls[NID_PATH_MUTE_CTL], HDA_INPUT)) multi_cap_vol = true; } if (is_inv_dmic_pin(codec, spec->imux_pins[i])) inv_dmic = true; } if (!multi) err = create_single_cap_vol_ctl(codec, n, vol, sw, inv_dmic); else if (!multi_cap_vol) err = create_bind_cap_vol_ctl(codec, n, vol, sw); else err = create_multi_cap_vol_ctl(codec); if (err < 0) return err; } return 0; } /* * add mic boosts if needed */ /* check whether the given amp is feasible as a boost volume */ static bool check_boost_vol(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) { unsigned int step; if (!nid_has_volume(codec, nid, dir) || is_ctl_associated(codec, nid, dir, idx, NID_PATH_VOL_CTL) || is_ctl_associated(codec, nid, dir, idx, NID_PATH_BOOST_CTL)) return false; step = (query_amp_caps(codec, nid, dir) & AC_AMPCAP_STEP_SIZE) >> AC_AMPCAP_STEP_SIZE_SHIFT; if (step < 0x20) return false; return true; } /* look for a boost amp in a widget close to the pin */ static unsigned int look_for_boost_amp(struct hda_codec *codec, struct nid_path *path) { unsigned int val = 0; hda_nid_t nid; int depth; for (depth = 0; depth < 3; depth++) { if (depth >= path->depth - 1) break; nid = path->path[depth]; if (depth && check_boost_vol(codec, nid, HDA_OUTPUT, 0)) { val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT); break; } else if (check_boost_vol(codec, nid, HDA_INPUT, path->idx[depth])) { val = HDA_COMPOSE_AMP_VAL(nid, 3, path->idx[depth], HDA_INPUT); break; } } return val; } static int parse_mic_boost(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; struct hda_input_mux *imux = &spec->input_mux; int i; if (!spec->num_adc_nids) return 0; for (i = 0; i < imux->num_items; i++) { struct nid_path *path; unsigned int val; int idx; char boost_label[44]; idx = imux->items[i].index; if (idx >= imux->num_items) continue; /* check only line-in and mic pins */ if (cfg->inputs[idx].type > AUTO_PIN_LINE_IN) continue; path = get_input_path(codec, 0, i); if (!path) continue; val = look_for_boost_amp(codec, path); if (!val) continue; /* create a boost control */ snprintf(boost_label, sizeof(boost_label), "%s Boost Volume", spec->input_labels[idx]); if (!add_control(spec, HDA_CTL_WIDGET_VOL, boost_label, spec->input_label_idxs[idx], val)) return -ENOMEM; path->ctls[NID_PATH_BOOST_CTL] = val; } return 0; } /* * parse digital I/Os and set up NIDs in BIOS auto-parse mode */ static void parse_digital(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct nid_path *path; int i, nums; hda_nid_t dig_nid, pin; /* support multiple SPDIFs; the secondary is set up as a slave */ nums = 0; for (i = 0; i < spec->autocfg.dig_outs; i++) { pin = spec->autocfg.dig_out_pins[i]; dig_nid = look_for_dac(codec, pin, true); if (!dig_nid) continue; path = snd_hda_add_new_path(codec, dig_nid, pin, 0); if (!path) continue; print_nid_path("digout", path); path->active = true; spec->digout_paths[i] = snd_hda_get_path_idx(codec, path); set_pin_target(codec, pin, PIN_OUT, false); if (!nums) { spec->multiout.dig_out_nid = dig_nid; spec->dig_out_type = spec->autocfg.dig_out_type[0]; } else { spec->multiout.slave_dig_outs = spec->slave_dig_outs; if (nums >= ARRAY_SIZE(spec->slave_dig_outs) - 1) break; spec->slave_dig_outs[nums - 1] = dig_nid; } nums++; } if (spec->autocfg.dig_in_pin) { pin = spec->autocfg.dig_in_pin; dig_nid = codec->start_nid; for (i = 0; i < codec->num_nodes; i++, dig_nid++) { unsigned int wcaps = get_wcaps(codec, dig_nid); if (get_wcaps_type(wcaps) != AC_WID_AUD_IN) continue; if (!(wcaps & AC_WCAP_DIGITAL)) continue; path = snd_hda_add_new_path(codec, pin, dig_nid, 0); if (path) { print_nid_path("digin", path); path->active = true; spec->dig_in_nid = dig_nid; spec->digin_path = snd_hda_get_path_idx(codec, path); set_pin_target(codec, pin, PIN_IN, false); break; } } } } /* * input MUX handling */ static bool dyn_adc_pcm_resetup(struct hda_codec *codec, int cur); /* select the given imux item; either unmute exclusively or select the route */ static int mux_select(struct hda_codec *codec, unsigned int adc_idx, unsigned int idx) { struct hda_gen_spec *spec = codec->spec; const struct hda_input_mux *imux; struct nid_path *old_path, *path; imux = &spec->input_mux; if (!imux->num_items) return 0; if (idx >= imux->num_items) idx = imux->num_items - 1; if (spec->cur_mux[adc_idx] == idx) return 0; old_path = get_input_path(codec, adc_idx, spec->cur_mux[adc_idx]); if (!old_path) return 0; if (old_path->active) snd_hda_activate_path(codec, old_path, false, false); spec->cur_mux[adc_idx] = idx; if (spec->hp_mic) update_hp_mic(codec, adc_idx, false); if (spec->dyn_adc_switch) dyn_adc_pcm_resetup(codec, idx); path = get_input_path(codec, adc_idx, idx); if (!path) return 0; if (path->active) return 0; snd_hda_activate_path(codec, path, true, false); if (spec->cap_sync_hook) spec->cap_sync_hook(codec, NULL); path_power_down_sync(codec, old_path); return 1; } /* * Jack detections for HP auto-mute and mic-switch */ /* check each pin in the given array; returns true if any of them is plugged */ static bool detect_jacks(struct hda_codec *codec, int num_pins, hda_nid_t *pins) { int i, present = 0; for (i = 0; i < num_pins; i++) { hda_nid_t nid = pins[i]; if (!nid) break; /* don't detect pins retasked as inputs */ if (snd_hda_codec_get_pin_target(codec, nid) & AC_PINCTL_IN_EN) continue; present |= snd_hda_jack_detect(codec, nid); } return present; } /* standard HP/line-out auto-mute helper */ static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins, bool mute) { struct hda_gen_spec *spec = codec->spec; int i; for (i = 0; i < num_pins; i++) { hda_nid_t nid = pins[i]; unsigned int val, oldval; if (!nid) break; oldval = snd_hda_codec_get_pin_target(codec, nid); if (oldval & PIN_IN) continue; /* no mute for inputs */ /* don't reset VREF value in case it's controlling * the amp (see alc861_fixup_asus_amp_vref_0f()) */ if (spec->keep_vref_in_automute) val = oldval & ~PIN_HP; else val = 0; if (!mute) val |= oldval; /* here we call update_pin_ctl() so that the pinctl is changed * without changing the pinctl target value; * the original target value will be still referred at the * init / resume again */ update_pin_ctl(codec, nid, val); set_pin_eapd(codec, nid, !mute); } } /* Toggle outputs muting */ void snd_hda_gen_update_outputs(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; int on; /* Control HP pins/amps depending on master_mute state; * in general, HP pins/amps control should be enabled in all cases, * but currently set only for master_mute, just to be safe */ do_automute(codec, ARRAY_SIZE(spec->autocfg.hp_pins), spec->autocfg.hp_pins, spec->master_mute); if (!spec->automute_speaker) on = 0; else on = spec->hp_jack_present | spec->line_jack_present; on |= spec->master_mute; spec->speaker_muted = on; do_automute(codec, ARRAY_SIZE(spec->autocfg.speaker_pins), spec->autocfg.speaker_pins, on); /* toggle line-out mutes if needed, too */ /* if LO is a copy of either HP or Speaker, don't need to handle it */ if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0] || spec->autocfg.line_out_pins[0] == spec->autocfg.speaker_pins[0]) return; if (!spec->automute_lo) on = 0; else on = spec->hp_jack_present; on |= spec->master_mute; spec->line_out_muted = on; do_automute(codec, ARRAY_SIZE(spec->autocfg.line_out_pins), spec->autocfg.line_out_pins, on); } EXPORT_SYMBOL_HDA(snd_hda_gen_update_outputs); static void call_update_outputs(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; if (spec->automute_hook) spec->automute_hook(codec); else snd_hda_gen_update_outputs(codec); } /* standard HP-automute helper */ void snd_hda_gen_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack) { struct hda_gen_spec *spec = codec->spec; hda_nid_t *pins = spec->autocfg.hp_pins; int num_pins = ARRAY_SIZE(spec->autocfg.hp_pins); /* No detection for the first HP jack during indep-HP mode */ if (spec->indep_hp_enabled) { pins++; num_pins--; } spec->hp_jack_present = detect_jacks(codec, num_pins, pins); if (!spec->detect_hp || (!spec->automute_speaker && !spec->automute_lo)) return; call_update_outputs(codec); } EXPORT_SYMBOL_HDA(snd_hda_gen_hp_automute); /* standard line-out-automute helper */ void snd_hda_gen_line_automute(struct hda_codec *codec, struct hda_jack_tbl *jack) { struct hda_gen_spec *spec = codec->spec; if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) return; /* check LO jack only when it's different from HP */ if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0]) return; spec->line_jack_present = detect_jacks(codec, ARRAY_SIZE(spec->autocfg.line_out_pins), spec->autocfg.line_out_pins); if (!spec->automute_speaker || !spec->detect_lo) return; call_update_outputs(codec); } EXPORT_SYMBOL_HDA(snd_hda_gen_line_automute); /* standard mic auto-switch helper */ void snd_hda_gen_mic_autoswitch(struct hda_codec *codec, struct hda_jack_tbl *jack) { struct hda_gen_spec *spec = codec->spec; int i; if (!spec->auto_mic) return; for (i = spec->am_num_entries - 1; i > 0; i--) { hda_nid_t pin = spec->am_entry[i].pin; /* don't detect pins retasked as outputs */ if (snd_hda_codec_get_pin_target(codec, pin) & AC_PINCTL_OUT_EN) continue; if (snd_hda_jack_detect(codec, pin)) { mux_select(codec, 0, spec->am_entry[i].idx); return; } } mux_select(codec, 0, spec->am_entry[0].idx); } EXPORT_SYMBOL_HDA(snd_hda_gen_mic_autoswitch); /* call appropriate hooks */ static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack) { struct hda_gen_spec *spec = codec->spec; if (spec->hp_automute_hook) spec->hp_automute_hook(codec, jack); else snd_hda_gen_hp_automute(codec, jack); } static void call_line_automute(struct hda_codec *codec, struct hda_jack_tbl *jack) { struct hda_gen_spec *spec = codec->spec; if (spec->line_automute_hook) spec->line_automute_hook(codec, jack); else snd_hda_gen_line_automute(codec, jack); } static void call_mic_autoswitch(struct hda_codec *codec, struct hda_jack_tbl *jack) { struct hda_gen_spec *spec = codec->spec; if (spec->mic_autoswitch_hook) spec->mic_autoswitch_hook(codec, jack); else snd_hda_gen_mic_autoswitch(codec, jack); } /* update jack retasking */ static void update_automute_all(struct hda_codec *codec) { call_hp_automute(codec, NULL); call_line_automute(codec, NULL); call_mic_autoswitch(codec, NULL); } /* * Auto-Mute mode mixer enum support */ static int automute_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; static const char * const texts3[] = { "Disabled", "Speaker Only", "Line Out+Speaker" }; if (spec->automute_speaker_possible && spec->automute_lo_possible) return snd_hda_enum_helper_info(kcontrol, uinfo, 3, texts3); return snd_hda_enum_bool_helper_info(kcontrol, uinfo); } static int automute_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; unsigned int val = 0; if (spec->automute_speaker) val++; if (spec->automute_lo) val++; ucontrol->value.enumerated.item[0] = val; return 0; } static int automute_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gen_spec *spec = codec->spec; switch (ucontrol->value.enumerated.item[0]) { case 0: if (!spec->automute_speaker && !spec->automute_lo) return 0; spec->automute_speaker = 0; spec->automute_lo = 0; break; case 1: if (spec->automute_speaker_possible) { if (!spec->automute_lo && spec->automute_speaker) return 0; spec->automute_speaker = 1; spec->automute_lo = 0; } else if (spec->automute_lo_possible) { if (spec->automute_lo) return 0; spec->automute_lo = 1; } else return -EINVAL; break; case 2: if (!spec->automute_lo_possible || !spec->automute_speaker_possible) return -EINVAL; if (spec->automute_speaker && spec->automute_lo) return 0; spec->automute_speaker = 1; spec->automute_lo = 1; break; default: return -EINVAL; } call_update_outputs(codec); return 1; } static const struct snd_kcontrol_new automute_mode_enum = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Auto-Mute Mode", .info = automute_mode_info, .get = automute_mode_get, .put = automute_mode_put, }; static int add_automute_mode_enum(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; if (!snd_hda_gen_add_kctl(spec, NULL, &automute_mode_enum)) return -ENOMEM; return 0; } /* * Check the availability of HP/line-out auto-mute; * Set up appropriately if really supported */ static int check_auto_mute_availability(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int present = 0; int i, err; if (spec->suppress_auto_mute) return 0; if (cfg->hp_pins[0]) present++; if (cfg->line_out_pins[0]) present++; if (cfg->speaker_pins[0]) present++; if (present < 2) /* need two different output types */ return 0; if (!cfg->speaker_pins[0] && cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) { memcpy(cfg->speaker_pins, cfg->line_out_pins, sizeof(cfg->speaker_pins)); cfg->speaker_outs = cfg->line_outs; } if (!cfg->hp_pins[0] && cfg->line_out_type == AUTO_PIN_HP_OUT) { memcpy(cfg->hp_pins, cfg->line_out_pins, sizeof(cfg->hp_pins)); cfg->hp_outs = cfg->line_outs; } for (i = 0; i < cfg->hp_outs; i++) { hda_nid_t nid = cfg->hp_pins[i]; if (!is_jack_detectable(codec, nid)) continue; snd_printdd("hda-codec: Enable HP auto-muting on NID 0x%x\n", nid); snd_hda_jack_detect_enable_callback(codec, nid, HDA_GEN_HP_EVENT, call_hp_automute); spec->detect_hp = 1; } if (cfg->line_out_type == AUTO_PIN_LINE_OUT && cfg->line_outs) { if (cfg->speaker_outs) for (i = 0; i < cfg->line_outs; i++) { hda_nid_t nid = cfg->line_out_pins[i]; if (!is_jack_detectable(codec, nid)) continue; snd_printdd("hda-codec: Enable Line-Out auto-muting on NID 0x%x\n", nid); snd_hda_jack_detect_enable_callback(codec, nid, HDA_GEN_FRONT_EVENT, call_line_automute); spec->detect_lo = 1; } spec->automute_lo_possible = spec->detect_hp; } spec->automute_speaker_possible = cfg->speaker_outs && (spec->detect_hp || spec->detect_lo); spec->automute_lo = spec->automute_lo_possible; spec->automute_speaker = spec->automute_speaker_possible; if (spec->automute_speaker_possible || spec->automute_lo_possible) { /* create a control for automute mode */ err = add_automute_mode_enum(codec); if (err < 0) return err; } return 0; } /* check whether all auto-mic pins are valid; setup indices if OK */ static bool auto_mic_check_imux(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; const struct hda_input_mux *imux; int i; imux = &spec->input_mux; for (i = 0; i < spec->am_num_entries; i++) { spec->am_entry[i].idx = find_idx_in_nid_list(spec->am_entry[i].pin, spec->imux_pins, imux->num_items); if (spec->am_entry[i].idx < 0) return false; /* no corresponding imux */ } /* we don't need the jack detection for the first pin */ for (i = 1; i < spec->am_num_entries; i++) snd_hda_jack_detect_enable_callback(codec, spec->am_entry[i].pin, HDA_GEN_MIC_EVENT, call_mic_autoswitch); return true; } static int compare_attr(const void *ap, const void *bp) { const struct automic_entry *a = ap; const struct automic_entry *b = bp; return (int)(a->attr - b->attr); } /* * Check the availability of auto-mic switch; * Set up if really supported */ static int check_auto_mic_availability(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; unsigned int types; int i, num_pins; if (spec->suppress_auto_mic) return 0; types = 0; num_pins = 0; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; unsigned int attr; attr = snd_hda_codec_get_pincfg(codec, nid); attr = snd_hda_get_input_pin_attr(attr); if (types & (1 << attr)) return 0; /* already occupied */ switch (attr) { case INPUT_PIN_ATTR_INT: if (cfg->inputs[i].type != AUTO_PIN_MIC) return 0; /* invalid type */ break; case INPUT_PIN_ATTR_UNUSED: return 0; /* invalid entry */ default: if (cfg->inputs[i].type > AUTO_PIN_LINE_IN) return 0; /* invalid type */ if (!spec->line_in_auto_switch && cfg->inputs[i].type != AUTO_PIN_MIC) return 0; /* only mic is allowed */ if (!is_jack_detectable(codec, nid)) return 0; /* no unsol support */ break; } if (num_pins >= MAX_AUTO_MIC_PINS) return 0; types |= (1 << attr); spec->am_entry[num_pins].pin = nid; spec->am_entry[num_pins].attr = attr; num_pins++; } if (num_pins < 2) return 0; spec->am_num_entries = num_pins; /* sort the am_entry in the order of attr so that the pin with a * higher attr will be selected when the jack is plugged. */ sort(spec->am_entry, num_pins, sizeof(spec->am_entry[0]), compare_attr, NULL); if (!auto_mic_check_imux(codec)) return 0; spec->auto_mic = 1; spec->num_adc_nids = 1; spec->cur_mux[0] = spec->am_entry[0].idx; snd_printdd("hda-codec: Enable auto-mic switch on NID 0x%x/0x%x/0x%x\n", spec->am_entry[0].pin, spec->am_entry[1].pin, spec->am_entry[2].pin); return 0; } /* power_filter hook; make inactive widgets into power down */ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec, hda_nid_t nid, unsigned int power_state) { if (power_state != AC_PWRST_D0) return power_state; if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER) return power_state; if (is_active_nid_for_any(codec, nid)) return power_state; return AC_PWRST_D3; } /* * Parse the given BIOS configuration and set up the hda_gen_spec * * return 1 if successful, 0 if the proper config is not found, * or a negative error code */ int snd_hda_gen_parse_auto_config(struct hda_codec *codec, struct auto_pin_cfg *cfg) { struct hda_gen_spec *spec = codec->spec; int err; parse_user_hints(codec); if (spec->mixer_nid && !spec->mixer_merge_nid) spec->mixer_merge_nid = spec->mixer_nid; if (cfg != &spec->autocfg) { spec->autocfg = *cfg; cfg = &spec->autocfg; } if (!spec->main_out_badness) spec->main_out_badness = &hda_main_out_badness; if (!spec->extra_out_badness) spec->extra_out_badness = &hda_extra_out_badness; fill_all_dac_nids(codec); if (!cfg->line_outs) { if (cfg->dig_outs || cfg->dig_in_pin) { spec->multiout.max_channels = 2; spec->no_analog = 1; goto dig_only; } return 0; /* can't find valid BIOS pin config */ } if (!spec->no_primary_hp && cfg->line_out_type == AUTO_PIN_SPEAKER_OUT && cfg->line_outs <= cfg->hp_outs) { /* use HP as primary out */ cfg->speaker_outs = cfg->line_outs; memcpy(cfg->speaker_pins, cfg->line_out_pins, sizeof(cfg->speaker_pins)); cfg->line_outs = cfg->hp_outs; memcpy(cfg->line_out_pins, cfg->hp_pins, sizeof(cfg->hp_pins)); cfg->hp_outs = 0; memset(cfg->hp_pins, 0, sizeof(cfg->hp_pins)); cfg->line_out_type = AUTO_PIN_HP_OUT; } err = parse_output_paths(codec); if (err < 0) return err; err = create_multi_channel_mode(codec); if (err < 0) return err; err = create_multi_out_ctls(codec, cfg); if (err < 0) return err; err = create_hp_out_ctls(codec); if (err < 0) return err; err = create_speaker_out_ctls(codec); if (err < 0) return err; err = create_indep_hp_ctls(codec); if (err < 0) return err; err = create_loopback_mixing_ctl(codec); if (err < 0) return err; err = create_hp_mic(codec); if (err < 0) return err; err = create_input_ctls(codec); if (err < 0) return err; spec->const_channel_count = spec->ext_channel_count; /* check the multiple speaker and headphone pins */ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) spec->const_channel_count = max(spec->const_channel_count, cfg->speaker_outs * 2); if (cfg->line_out_type != AUTO_PIN_HP_OUT) spec->const_channel_count = max(spec->const_channel_count, cfg->hp_outs * 2); spec->multiout.max_channels = max(spec->ext_channel_count, spec->const_channel_count); err = check_auto_mute_availability(codec); if (err < 0) return err; err = check_dyn_adc_switch(codec); if (err < 0) return err; err = check_auto_mic_availability(codec); if (err < 0) return err; err = create_capture_mixers(codec); if (err < 0) return err; err = parse_mic_boost(codec); if (err < 0) return err; if (spec->add_jack_modes) { if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) { err = create_out_jack_modes(codec, cfg->line_outs, cfg->line_out_pins); if (err < 0) return err; } if (cfg->line_out_type != AUTO_PIN_HP_OUT) { err = create_out_jack_modes(codec, cfg->hp_outs, cfg->hp_pins); if (err < 0) return err; } } dig_only: parse_digital(codec); if (spec->power_down_unused) codec->power_filter = snd_hda_gen_path_power_filter; if (!spec->no_analog && spec->beep_nid) { err = snd_hda_attach_beep_device(codec, spec->beep_nid); if (err < 0) return err; } return 1; } EXPORT_SYMBOL_HDA(snd_hda_gen_parse_auto_config); /* * Build control elements */ /* slave controls for virtual master */ static const char * const slave_pfxs[] = { "Front", "Surround", "Center", "LFE", "Side", "Headphone", "Speaker", "Mono", "Line Out", "CLFE", "Bass Speaker", "PCM", "Speaker Front", "Speaker Surround", "Speaker CLFE", "Speaker Side", "Headphone Front", "Headphone Surround", "Headphone CLFE", "Headphone Side", NULL, }; int snd_hda_gen_build_controls(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; int err; if (spec->kctls.used) { err = snd_hda_add_new_ctls(codec, spec->kctls.list); if (err < 0) return err; } if (spec->multiout.dig_out_nid) { err = snd_hda_create_dig_out_ctls(codec, spec->multiout.dig_out_nid, spec->multiout.dig_out_nid, spec->pcm_rec[1].pcm_type); if (err < 0) return err; if (!spec->no_analog) { err = snd_hda_create_spdif_share_sw(codec, &spec->multiout); if (err < 0) return err; spec->multiout.share_spdif = 1; } } if (spec->dig_in_nid) { err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in_nid); if (err < 0) return err; } /* if we have no master control, let's create it */ if (!spec->no_analog && !snd_hda_find_mixer_ctl(codec, "Master Playback Volume")) { err = snd_hda_add_vmaster(codec, "Master Playback Volume", spec->vmaster_tlv, slave_pfxs, "Playback Volume"); if (err < 0) return err; } if (!spec->no_analog && !snd_hda_find_mixer_ctl(codec, "Master Playback Switch")) { err = __snd_hda_add_vmaster(codec, "Master Playback Switch", NULL, slave_pfxs, "Playback Switch", true, &spec->vmaster_mute.sw_kctl); if (err < 0) return err; if (spec->vmaster_mute.hook) snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute, spec->vmaster_mute_enum); } free_kctls(spec); /* no longer needed */ err = snd_hda_jack_add_kctls(codec, &spec->autocfg); if (err < 0) return err; return 0; } EXPORT_SYMBOL_HDA(snd_hda_gen_build_controls); /* * PCM definitions */ static void call_pcm_playback_hook(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream, int action) { struct hda_gen_spec *spec = codec->spec; if (spec->pcm_playback_hook) spec->pcm_playback_hook(hinfo, codec, substream, action); } static void call_pcm_capture_hook(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream, int action) { struct hda_gen_spec *spec = codec->spec; if (spec->pcm_capture_hook) spec->pcm_capture_hook(hinfo, codec, substream, action); } /* * Analog playback callbacks */ static int playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; int err; mutex_lock(&spec->pcm_mutex); err = snd_hda_multi_out_analog_open(codec, &spec->multiout, substream, hinfo); if (!err) { spec->active_streams |= 1 << STREAM_MULTI_OUT; call_pcm_playback_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_OPEN); } mutex_unlock(&spec->pcm_mutex); return err; } static int playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; int err; err = snd_hda_multi_out_analog_prepare(codec, &spec->multiout, stream_tag, format, substream); if (!err) call_pcm_playback_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_PREPARE); return err; } static int playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; int err; err = snd_hda_multi_out_analog_cleanup(codec, &spec->multiout); if (!err) call_pcm_playback_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_CLEANUP); return err; } static int playback_pcm_close(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; mutex_lock(&spec->pcm_mutex); spec->active_streams &= ~(1 << STREAM_MULTI_OUT); call_pcm_playback_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_CLOSE); mutex_unlock(&spec->pcm_mutex); return 0; } static int capture_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { call_pcm_capture_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_OPEN); return 0; } static int capture_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { snd_hda_codec_setup_stream(codec, hinfo->nid, stream_tag, 0, format); call_pcm_capture_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_PREPARE); return 0; } static int capture_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { snd_hda_codec_cleanup_stream(codec, hinfo->nid); call_pcm_capture_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_CLEANUP); return 0; } static int capture_pcm_close(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { call_pcm_capture_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_CLOSE); return 0; } static int alt_playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; int err = 0; mutex_lock(&spec->pcm_mutex); if (!spec->indep_hp_enabled) err = -EBUSY; else spec->active_streams |= 1 << STREAM_INDEP_HP; call_pcm_playback_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_OPEN); mutex_unlock(&spec->pcm_mutex); return err; } static int alt_playback_pcm_close(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; mutex_lock(&spec->pcm_mutex); spec->active_streams &= ~(1 << STREAM_INDEP_HP); call_pcm_playback_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_CLOSE); mutex_unlock(&spec->pcm_mutex); return 0; } static int alt_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { snd_hda_codec_setup_stream(codec, hinfo->nid, stream_tag, 0, format); call_pcm_playback_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_PREPARE); return 0; } static int alt_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { snd_hda_codec_cleanup_stream(codec, hinfo->nid); call_pcm_playback_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_CLEANUP); return 0; } /* * Digital out */ static int dig_playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; return snd_hda_multi_out_dig_open(codec, &spec->multiout); } static int dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag, format, substream); } static int dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout); } static int dig_playback_pcm_close(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; return snd_hda_multi_out_dig_close(codec, &spec->multiout); } /* * Analog capture */ #define alt_capture_pcm_open capture_pcm_open #define alt_capture_pcm_close capture_pcm_close static int alt_capture_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; snd_hda_codec_setup_stream(codec, spec->adc_nids[substream->number + 1], stream_tag, 0, format); call_pcm_capture_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_PREPARE); return 0; } static int alt_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; snd_hda_codec_cleanup_stream(codec, spec->adc_nids[substream->number + 1]); call_pcm_capture_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_CLEANUP); return 0; } /* */ static const struct hda_pcm_stream pcm_analog_playback = { .substreams = 1, .channels_min = 2, .channels_max = 8, /* NID is set in build_pcms */ .ops = { .open = playback_pcm_open, .close = playback_pcm_close, .prepare = playback_pcm_prepare, .cleanup = playback_pcm_cleanup }, }; static const struct hda_pcm_stream pcm_analog_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, /* NID is set in build_pcms */ .ops = { .open = capture_pcm_open, .close = capture_pcm_close, .prepare = capture_pcm_prepare, .cleanup = capture_pcm_cleanup }, }; static const struct hda_pcm_stream pcm_analog_alt_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, /* NID is set in build_pcms */ .ops = { .open = alt_playback_pcm_open, .close = alt_playback_pcm_close, .prepare = alt_playback_pcm_prepare, .cleanup = alt_playback_pcm_cleanup }, }; static const struct hda_pcm_stream pcm_analog_alt_capture = { .substreams = 2, /* can be overridden */ .channels_min = 2, .channels_max = 2, /* NID is set in build_pcms */ .ops = { .open = alt_capture_pcm_open, .close = alt_capture_pcm_close, .prepare = alt_capture_pcm_prepare, .cleanup = alt_capture_pcm_cleanup }, }; static const struct hda_pcm_stream pcm_digital_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, /* NID is set in build_pcms */ .ops = { .open = dig_playback_pcm_open, .close = dig_playback_pcm_close, .prepare = dig_playback_pcm_prepare, .cleanup = dig_playback_pcm_cleanup }, }; static const struct hda_pcm_stream pcm_digital_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, /* NID is set in build_pcms */ }; /* Used by build_pcms to flag that a PCM has no playback stream */ static const struct hda_pcm_stream pcm_null_stream = { .substreams = 0, .channels_min = 0, .channels_max = 0, }; /* * dynamic changing ADC PCM streams */ static bool dyn_adc_pcm_resetup(struct hda_codec *codec, int cur) { struct hda_gen_spec *spec = codec->spec; hda_nid_t new_adc = spec->adc_nids[spec->dyn_adc_idx[cur]]; if (spec->cur_adc && spec->cur_adc != new_adc) { /* stream is running, let's swap the current ADC */ __snd_hda_codec_cleanup_stream(codec, spec->cur_adc, 1); spec->cur_adc = new_adc; snd_hda_codec_setup_stream(codec, new_adc, spec->cur_adc_stream_tag, 0, spec->cur_adc_format); return true; } return false; } /* analog capture with dynamic dual-adc changes */ static int dyn_adc_capture_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; spec->cur_adc = spec->adc_nids[spec->dyn_adc_idx[spec->cur_mux[0]]]; spec->cur_adc_stream_tag = stream_tag; spec->cur_adc_format = format; snd_hda_codec_setup_stream(codec, spec->cur_adc, stream_tag, 0, format); return 0; } static int dyn_adc_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gen_spec *spec = codec->spec; snd_hda_codec_cleanup_stream(codec, spec->cur_adc); spec->cur_adc = 0; return 0; } static const struct hda_pcm_stream dyn_adc_pcm_analog_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, .nid = 0, /* fill later */ .ops = { .prepare = dyn_adc_capture_pcm_prepare, .cleanup = dyn_adc_capture_pcm_cleanup }, }; static void fill_pcm_stream_name(char *str, size_t len, const char *sfx, const char *chip_name) { char *p; if (*str) return; strlcpy(str, chip_name, len); /* drop non-alnum chars after a space */ for (p = strchr(str, ' '); p; p = strchr(p + 1, ' ')) { if (!isalnum(p[1])) { *p = 0; break; } } strlcat(str, sfx, len); } /* build PCM streams based on the parsed results */ int snd_hda_gen_build_pcms(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct hda_pcm *info = spec->pcm_rec; const struct hda_pcm_stream *p; bool have_multi_adcs; codec->num_pcms = 1; codec->pcm_info = info; if (spec->no_analog) goto skip_analog; fill_pcm_stream_name(spec->stream_name_analog, sizeof(spec->stream_name_analog), " Analog", codec->chip_name); info->name = spec->stream_name_analog; if (spec->multiout.num_dacs > 0) { p = spec->stream_analog_playback; if (!p) p = &pcm_analog_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = *p; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dac_nids[0]; info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max = spec->multiout.max_channels; if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT && spec->autocfg.line_outs == 2) info->stream[SNDRV_PCM_STREAM_PLAYBACK].chmap = snd_pcm_2_1_chmaps; } if (spec->num_adc_nids) { p = spec->stream_analog_capture; if (!p) { if (spec->dyn_adc_switch) p = &dyn_adc_pcm_analog_capture; else p = &pcm_analog_capture; } info->stream[SNDRV_PCM_STREAM_CAPTURE] = *p; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[0]; } skip_analog: /* SPDIF for stream index #1 */ if (spec->multiout.dig_out_nid || spec->dig_in_nid) { fill_pcm_stream_name(spec->stream_name_digital, sizeof(spec->stream_name_digital), " Digital", codec->chip_name); codec->num_pcms = 2; codec->slave_dig_outs = spec->multiout.slave_dig_outs; info = spec->pcm_rec + 1; info->name = spec->stream_name_digital; if (spec->dig_out_type) info->pcm_type = spec->dig_out_type; else info->pcm_type = HDA_PCM_TYPE_SPDIF; if (spec->multiout.dig_out_nid) { p = spec->stream_digital_playback; if (!p) p = &pcm_digital_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = *p; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dig_out_nid; } if (spec->dig_in_nid) { p = spec->stream_digital_capture; if (!p) p = &pcm_digital_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE] = *p; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in_nid; } } if (spec->no_analog) return 0; /* If the use of more than one ADC is requested for the current * model, configure a second analog capture-only PCM. */ have_multi_adcs = (spec->num_adc_nids > 1) && !spec->dyn_adc_switch && !spec->auto_mic; /* Additional Analaog capture for index #2 */ if (spec->alt_dac_nid || have_multi_adcs) { fill_pcm_stream_name(spec->stream_name_alt_analog, sizeof(spec->stream_name_alt_analog), " Alt Analog", codec->chip_name); codec->num_pcms = 3; info = spec->pcm_rec + 2; info->name = spec->stream_name_alt_analog; if (spec->alt_dac_nid) { p = spec->stream_analog_alt_playback; if (!p) p = &pcm_analog_alt_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = *p; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->alt_dac_nid; } else { info->stream[SNDRV_PCM_STREAM_PLAYBACK] = pcm_null_stream; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = 0; } if (have_multi_adcs) { p = spec->stream_analog_alt_capture; if (!p) p = &pcm_analog_alt_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE] = *p; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[1]; info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_adc_nids - 1; } else { info->stream[SNDRV_PCM_STREAM_CAPTURE] = pcm_null_stream; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = 0; } } return 0; } EXPORT_SYMBOL_HDA(snd_hda_gen_build_pcms); /* * Standard auto-parser initializations */ /* configure the given path as a proper output */ static void set_output_and_unmute(struct hda_codec *codec, int path_idx) { struct nid_path *path; hda_nid_t pin; path = snd_hda_get_path_from_idx(codec, path_idx); if (!path || !path->depth) return; pin = path->path[path->depth - 1]; restore_pin_ctl(codec, pin); snd_hda_activate_path(codec, path, path->active, aamix_default(codec->spec)); set_pin_eapd(codec, pin, path->active); } /* initialize primary output paths */ static void init_multi_out(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; int i; for (i = 0; i < spec->autocfg.line_outs; i++) set_output_and_unmute(codec, spec->out_paths[i]); } static void __init_extra_out(struct hda_codec *codec, int num_outs, int *paths) { int i; for (i = 0; i < num_outs; i++) set_output_and_unmute(codec, paths[i]); } /* initialize hp and speaker paths */ static void init_extra_out(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; if (spec->autocfg.line_out_type != AUTO_PIN_HP_OUT) __init_extra_out(codec, spec->autocfg.hp_outs, spec->hp_paths); if (spec->autocfg.line_out_type != AUTO_PIN_SPEAKER_OUT) __init_extra_out(codec, spec->autocfg.speaker_outs, spec->speaker_paths); } /* initialize multi-io paths */ static void init_multi_io(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; int i; for (i = 0; i < spec->multi_ios; i++) { hda_nid_t pin = spec->multi_io[i].pin; struct nid_path *path; path = get_multiio_path(codec, i); if (!path) continue; if (!spec->multi_io[i].ctl_in) spec->multi_io[i].ctl_in = snd_hda_codec_get_pin_target(codec, pin); snd_hda_activate_path(codec, path, path->active, aamix_default(spec)); } } /* set up input pins and loopback paths */ static void init_analog_input(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; if (is_input_pin(codec, nid)) restore_pin_ctl(codec, nid); /* init loopback inputs */ if (spec->mixer_nid) { resume_path_from_idx(codec, spec->loopback_paths[i]); resume_path_from_idx(codec, spec->loopback_merge_path); } } } /* initialize ADC paths */ static void init_input_src(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->input_mux; struct nid_path *path; int i, c, nums; if (spec->dyn_adc_switch) nums = 1; else nums = spec->num_adc_nids; for (c = 0; c < nums; c++) { for (i = 0; i < imux->num_items; i++) { path = get_input_path(codec, c, i); if (path) { bool active = path->active; if (i == spec->cur_mux[c]) active = true; snd_hda_activate_path(codec, path, active, false); } } if (spec->hp_mic) update_hp_mic(codec, c, true); } if (spec->cap_sync_hook) spec->cap_sync_hook(codec, NULL); } /* set right pin controls for digital I/O */ static void init_digital(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; int i; hda_nid_t pin; for (i = 0; i < spec->autocfg.dig_outs; i++) set_output_and_unmute(codec, spec->digout_paths[i]); pin = spec->autocfg.dig_in_pin; if (pin) { restore_pin_ctl(codec, pin); resume_path_from_idx(codec, spec->digin_path); } } /* clear unsol-event tags on unused pins; Conexant codecs seem to leave * invalid unsol tags by some reason */ static void clear_unsol_on_unused_pins(struct hda_codec *codec) { int i; for (i = 0; i < codec->init_pins.used; i++) { struct hda_pincfg *pin = snd_array_elem(&codec->init_pins, i); hda_nid_t nid = pin->nid; if (is_jack_detectable(codec, nid) && !snd_hda_jack_tbl_get(codec, nid)) snd_hda_codec_update_cache(codec, nid, 0, AC_VERB_SET_UNSOLICITED_ENABLE, 0); } } /* * initialize the generic spec; * this can be put as patch_ops.init function */ int snd_hda_gen_init(struct hda_codec *codec) { struct hda_gen_spec *spec = codec->spec; if (spec->init_hook) spec->init_hook(codec); snd_hda_apply_verbs(codec); codec->cached_write = 1; init_multi_out(codec); init_extra_out(codec); init_multi_io(codec); init_analog_input(codec); init_input_src(codec); init_digital(codec); clear_unsol_on_unused_pins(codec); /* call init functions of standard auto-mute helpers */ update_automute_all(codec); snd_hda_codec_flush_cache(codec); if (spec->vmaster_mute.sw_kctl && spec->vmaster_mute.hook) snd_hda_sync_vmaster_hook(&spec->vmaster_mute); hda_call_check_power_status(codec, 0x01); return 0; } EXPORT_SYMBOL_HDA(snd_hda_gen_init); /* * free the generic spec; * this can be put as patch_ops.free function */ void snd_hda_gen_free(struct hda_codec *codec) { snd_hda_detach_beep_device(codec); snd_hda_gen_spec_free(codec->spec); kfree(codec->spec); codec->spec = NULL; } EXPORT_SYMBOL_HDA(snd_hda_gen_free); #ifdef CONFIG_PM /* * check the loopback power save state; * this can be put as patch_ops.check_power_status function */ int snd_hda_gen_check_power_status(struct hda_codec *codec, hda_nid_t nid) { struct hda_gen_spec *spec = codec->spec; return snd_hda_check_amp_list_power(codec, &spec->loopback, nid); } EXPORT_SYMBOL_HDA(snd_hda_gen_check_power_status); #endif /* * the generic codec support */ static const struct hda_codec_ops generic_patch_ops = { .build_controls = snd_hda_gen_build_controls, .build_pcms = snd_hda_gen_build_pcms, .init = snd_hda_gen_init, .free = snd_hda_gen_free, .unsol_event = snd_hda_jack_unsol_event, #ifdef CONFIG_PM .check_power_status = snd_hda_gen_check_power_status, #endif }; int snd_hda_parse_generic_codec(struct hda_codec *codec) { struct hda_gen_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; snd_hda_gen_spec_init(spec); codec->spec = spec; err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0); if (err < 0) return err; err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg); if (err < 0) goto error; codec->patch_ops = generic_patch_ops; return 0; error: snd_hda_gen_free(codec); return err; } EXPORT_SYMBOL_HDA(snd_hda_parse_generic_codec);
gpl-2.0
KangDroid/android_kernel_moto_shamu
arch/arm/mach-msm/ocmem_sched.c
142
55994
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/rbtree.h> #include <linux/idr.h> #include <linux/genalloc.h> #include <linux/of.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <mach/ocmem_priv.h> enum request_states { R_FREE = 0x0, /* request is not allocated */ R_PENDING, /* request has a pending operation */ R_ALLOCATED, /* request has been allocated */ R_ENQUEUED, /* request has been enqueued for future retry */ R_MUST_GROW, /* request must grow as a part of pending operation */ R_MUST_SHRINK, /* request must shrink */ R_WF_SHRINK, /* shrink must be ack'ed by a client */ R_SHRUNK, /* request was shrunk */ R_MUST_MAP, /* request must be mapped before being used */ R_MUST_UNMAP, /* request must be unmapped when not being used */ R_MAPPED, /* request is mapped and actively used by client */ R_UNMAPPED, /* request is not mapped, so it's not in active use */ R_EVICTED, /* request is evicted and must be restored */ }; #define SET_STATE(x, val) (set_bit((val), &(x)->state)) #define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state)) #define TEST_STATE(x, val) (test_bit((val), &(x)->state)) enum op_res { OP_COMPLETE = 0x0, OP_RESCHED, OP_PARTIAL, OP_EVICT, OP_FAIL = ~0x0, }; /* Represents various client priorities */ /* Note: More than one client can share a priority level */ enum client_prio { MIN_PRIO = 0x0, NO_PRIO = MIN_PRIO, PRIO_SENSORS = 0x1, PRIO_OTHER_OS = 0x1, PRIO_LP_AUDIO = 0x1, PRIO_HP_AUDIO = 0x2, PRIO_VOICE = 0x3, PRIO_GFX_GROWTH = 0x4, PRIO_VIDEO = 0x5, PRIO_GFX = 0x6, PRIO_OCMEM = 0x7, MAX_OCMEM_PRIO = PRIO_OCMEM + 1, }; static void __iomem *ocmem_vaddr; static struct list_head sched_queue[MAX_OCMEM_PRIO]; static struct mutex sched_queue_mutex; /* The duration in msecs before a pending operation is scheduled * This allows an idle window between use case boundaries where various * hardware state changes can occur. The value will be tweaked on actual * hardware. */ /* Delay in ms for switching to low power mode for OCMEM */ #define SCHED_DELAY 5000 static struct list_head rdm_queue; static struct mutex rdm_mutex; static struct workqueue_struct *ocmem_rdm_wq; static struct workqueue_struct *ocmem_eviction_wq; static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX]; struct ocmem_rdm_work { int id; struct ocmem_map_list *list; struct ocmem_handle *handle; int direction; struct work_struct work; }; /* OCMEM Operational modes */ enum ocmem_client_modes { OCMEM_PERFORMANCE = 1, OCMEM_PASSIVE, OCMEM_LOW_POWER, OCMEM_MODE_MAX = OCMEM_LOW_POWER }; /* OCMEM Addressing modes */ enum ocmem_interconnects { OCMEM_BLOCKED = 0, OCMEM_PORT = 1, OCMEM_OCMEMNOC = 2, OCMEM_SYSNOC = 3, }; enum ocmem_tz_client { TZ_UNUSED = 0x0, TZ_GRAPHICS, TZ_VIDEO, TZ_LP_AUDIO, TZ_SENSORS, TZ_OTHER_OS, TZ_DEBUG, }; /** * Primary OCMEM Arbitration Table **/ struct ocmem_table { int client_id; int priority; int mode; int hw_interconnect; int tz_id; } ocmem_client_table[OCMEM_CLIENT_MAX] = { {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT, TZ_GRAPHICS}, {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC, TZ_VIDEO}, {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC, TZ_UNUSED}, {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED, TZ_UNUSED}, {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED, TZ_UNUSED}, {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC, TZ_LP_AUDIO}, {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC, TZ_SENSORS}, {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC, TZ_OTHER_OS}, }; static struct rb_root sched_tree; static struct mutex sched_mutex; static struct mutex allocation_mutex; static struct mutex free_mutex; /* A region represents a continuous interval in OCMEM address space */ struct ocmem_region { /* Chain in Interval Tree */ struct rb_node region_rb; /* Hash map of requests */ struct idr region_idr; /* Chain in eviction list */ struct list_head eviction_list; unsigned long r_start; unsigned long r_end; unsigned long r_sz; /* Highest priority of all requests served by this region */ int max_prio; }; /* Is OCMEM tightly coupled to the client ?*/ static inline int is_tcm(int id) { if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT || ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC) return 1; else return 0; } static inline int is_iface_access(int id) { return ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC ? 1 : 0; } static inline int is_remapped_access(int id) { return ocmem_client_table[id].hw_interconnect == OCMEM_SYSNOC ? 1 : 0; } static inline int is_blocked(int id) { return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0; } inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle) { if (handle) return &handle->buffer; else return NULL; } inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer) { if (buffer) return container_of(buffer, struct ocmem_handle, buffer); else return NULL; } inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle) { if (handle) return handle->req; else return NULL; } inline struct ocmem_handle *req_to_handle(struct ocmem_req *req) { if (req && req->buffer) return container_of(req->buffer, struct ocmem_handle, buffer); else return NULL; } /* Simple wrappers which will have debug features added later */ inline int ocmem_read(void *at) { return readl_relaxed(at); } inline int ocmem_write(unsigned long val, void *at) { writel_relaxed(val, at); return 0; } inline int get_mode(int id) { if (!check_id(id)) return MODE_DEFAULT; else return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ? WIDE_MODE : THIN_MODE; } inline int get_tz_id(int id) { if (!check_id(id)) return TZ_UNUSED; else return ocmem_client_table[id].tz_id; } /* Returns the address that can be used by a device core to access OCMEM */ static unsigned long device_address(int id, unsigned long addr) { int hw_interconnect = ocmem_client_table[id].hw_interconnect; unsigned long ret_addr = 0x0; switch (hw_interconnect) { case OCMEM_PORT: case OCMEM_OCMEMNOC: ret_addr = phys_to_offset(addr); break; case OCMEM_SYSNOC: ret_addr = addr; break; case OCMEM_BLOCKED: ret_addr = 0x0; break; } return ret_addr; } /* Returns the address as viewed by the core */ static unsigned long core_address(int id, unsigned long addr) { int hw_interconnect = ocmem_client_table[id].hw_interconnect; unsigned long ret_addr = 0x0; switch (hw_interconnect) { case OCMEM_PORT: case OCMEM_OCMEMNOC: ret_addr = offset_to_phys(addr); break; case OCMEM_SYSNOC: ret_addr = addr; break; case OCMEM_BLOCKED: ret_addr = 0x0; break; } return ret_addr; } static inline struct ocmem_zone *zone_of(struct ocmem_req *req) { int owner; if (!req) return NULL; owner = req->owner; return get_zone(owner); } static int insert_region(struct ocmem_region *region) { struct rb_root *root = &sched_tree; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct ocmem_region *tmp = NULL; unsigned long addr = region->r_start; while (*p) { parent = *p; tmp = rb_entry(parent, struct ocmem_region, region_rb); if (tmp->r_end > addr) { if (tmp->r_start <= addr) break; p = &(*p)->rb_left; } else if (tmp->r_end <= addr) p = &(*p)->rb_right; } rb_link_node(&region->region_rb, parent, p); rb_insert_color(&region->region_rb, root); return 0; } static int remove_region(struct ocmem_region *region) { struct rb_root *root = &sched_tree; rb_erase(&region->region_rb, root); return 0; } static struct ocmem_req *ocmem_create_req(void) { struct ocmem_req *p = NULL; p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL); if (!p) return NULL; INIT_LIST_HEAD(&p->zone_list); INIT_LIST_HEAD(&p->sched_list); init_rwsem(&p->rw_sem); SET_STATE(p, R_FREE); pr_debug("request %p created\n", p); return p; } static int ocmem_destroy_req(struct ocmem_req *req) { kfree(req); return 0; } static struct ocmem_region *create_region(void) { struct ocmem_region *p = NULL; p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL); if (!p) return NULL; idr_init(&p->region_idr); INIT_LIST_HEAD(&p->eviction_list); p->r_start = p->r_end = p->r_sz = 0x0; p->max_prio = NO_PRIO; return p; } static int destroy_region(struct ocmem_region *region) { idr_destroy(&region->region_idr); kfree(region); return 0; } static int attach_req(struct ocmem_region *region, struct ocmem_req *req) { int id; id = idr_alloc(&region->region_idr, req, 1, 0, GFP_KERNEL); if (id < 0) return id; req->req_id = id; pr_debug("ocmem: request %p(id:%d) attached to region %p\n", req, id, region); return 0; } static int detach_req(struct ocmem_region *region, struct ocmem_req *req) { idr_remove(&region->region_idr, req->req_id); return 0; } static int populate_region(struct ocmem_region *region, struct ocmem_req *req) { region->r_start = req->req_start; region->r_end = req->req_end; region->r_sz = req->req_end - req->req_start + 1; return 0; } static int region_req_count(int id, void *ptr, void *data) { int *count = data; *count = *count + 1; return 0; } static int req_count(struct ocmem_region *region) { int count = 0; idr_for_each(&region->region_idr, region_req_count, &count); return count; } static int compute_max_prio(int id, void *ptr, void *data) { int *max = data; struct ocmem_req *req = ptr; if (req->prio > *max) *max = req->prio; return 0; } static int update_region_prio(struct ocmem_region *region) { int max_prio; if (req_count(region) != 0) { idr_for_each(&region->region_idr, compute_max_prio, &max_prio); region->max_prio = max_prio; } else { region->max_prio = NO_PRIO; } pr_debug("ocmem: Updating prio of region %p as %d\n", region, max_prio); return 0; } static struct ocmem_region *find_region(unsigned long addr) { struct ocmem_region *region = NULL; struct rb_node *rb_node = NULL; rb_node = sched_tree.rb_node; while (rb_node) { struct ocmem_region *tmp_region = NULL; tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb); if (tmp_region->r_end > addr) { region = tmp_region; if (tmp_region->r_start <= addr) break; rb_node = rb_node->rb_left; } else { rb_node = rb_node->rb_right; } } return region; } static struct ocmem_region *find_region_intersection(unsigned long start, unsigned long end) { struct ocmem_region *region = NULL; region = find_region(start); if (region && end <= region->r_start) region = NULL; return region; } static struct ocmem_region *find_region_match(unsigned long start, unsigned long end) { struct ocmem_region *region = NULL; region = find_region(start); if (region && start == region->r_start && end == region->r_end) return region; return NULL; } static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region) { struct ocmem_req *req = NULL; if (!region) return NULL; req = idr_find(&region->region_idr, owner); return req; } /* Must be called with req->sem held */ static inline int is_mapped(struct ocmem_req *req) { return TEST_STATE(req, R_MAPPED); } static inline int is_pending_shrink(struct ocmem_req *req) { return TEST_STATE(req, R_MUST_SHRINK) || TEST_STATE(req, R_WF_SHRINK); } /* Must be called with sched_mutex held */ static int __sched_unmap(struct ocmem_req *req) { struct ocmem_req *matched_req = NULL; struct ocmem_region *matched_region = NULL; if (!TEST_STATE(req, R_MAPPED)) goto invalid_op_error; matched_region = find_region_match(req->req_start, req->req_end); matched_req = find_req_match(req->req_id, matched_region); if (!matched_region || !matched_req) { pr_err("Could not find backing region for req"); goto invalid_op_error; } if (matched_req != req) { pr_err("Request does not match backing req"); goto invalid_op_error; } if (!is_mapped(req)) { pr_err("Request is not currently mapped"); goto invalid_op_error; } /* Update the request state */ CLEAR_STATE(req, R_MAPPED); SET_STATE(req, R_MUST_MAP); return OP_COMPLETE; invalid_op_error: return OP_FAIL; } /* Must be called with sched_mutex held */ static int __sched_map(struct ocmem_req *req) { struct ocmem_req *matched_req = NULL; struct ocmem_region *matched_region = NULL; matched_region = find_region_match(req->req_start, req->req_end); matched_req = find_req_match(req->req_id, matched_region); if (!matched_region || !matched_req) { pr_err("Could not find backing region for req"); goto invalid_op_error; } if (matched_req != req) { pr_err("Request does not match backing req"); goto invalid_op_error; } /* Update the request state */ CLEAR_STATE(req, R_MUST_MAP); SET_STATE(req, R_MAPPED); return OP_COMPLETE; invalid_op_error: return OP_FAIL; } static int do_map(struct ocmem_req *req) { int rc = 0; down_write(&req->rw_sem); mutex_lock(&sched_mutex); rc = __sched_map(req); mutex_unlock(&sched_mutex); up_write(&req->rw_sem); if (rc == OP_FAIL) return -EINVAL; return 0; } static int do_unmap(struct ocmem_req *req) { int rc = 0; down_write(&req->rw_sem); mutex_lock(&sched_mutex); rc = __sched_unmap(req); mutex_unlock(&sched_mutex); up_write(&req->rw_sem); if (rc == OP_FAIL) return -EINVAL; return 0; } static int process_map(struct ocmem_req *req, unsigned long start, unsigned long end) { int rc = 0; rc = ocmem_restore_sec_program(OCMEM_SECURE_DEV_ID); if (rc < 0) { pr_err("ocmem: Failed to restore security programming\n"); goto lock_failed; } rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz, get_mode(req->owner)); if (rc < 0) { pr_err("ocmem: Failed to secure request %p for %d\n", req, req->owner); goto lock_failed; } rc = do_map(req); if (rc < 0) { pr_err("ocmem: Failed to map request %p for %d\n", req, req->owner); goto process_map_fail; } pr_debug("ocmem: Mapped request %p\n", req); return 0; process_map_fail: ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz); lock_failed: pr_err("ocmem: Failed to map ocmem request\n"); return rc; } static int process_unmap(struct ocmem_req *req, unsigned long start, unsigned long end) { int rc = 0; rc = do_unmap(req); if (rc < 0) goto process_unmap_fail; rc = ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz); if (rc < 0) { pr_err("ocmem: Failed to un-secure request %p for %d\n", req, req->owner); goto unlock_failed; } pr_debug("ocmem: Unmapped request %p\n", req); return 0; unlock_failed: process_unmap_fail: pr_err("ocmem: Failed to unmap ocmem request\n"); return rc; } static int __sched_grow(struct ocmem_req *req, bool can_block) { unsigned long min = req->req_min; unsigned long max = req->req_max; unsigned long step = req->req_step; int owner = req->owner; unsigned long curr_sz = 0; unsigned long growth_sz = 0; unsigned long curr_start = 0; enum client_prio prio = req->prio; unsigned long alloc_addr = 0x0; bool retry; struct ocmem_region *spanned_r = NULL; struct ocmem_region *overlap_r = NULL; int rc = 0; struct ocmem_req *matched_req = NULL; struct ocmem_region *matched_region = NULL; struct ocmem_zone *zone = get_zone(owner); struct ocmem_region *region = NULL; matched_region = find_region_match(req->req_start, req->req_end); matched_req = find_req_match(req->req_id, matched_region); if (!matched_region || !matched_req) { pr_err("Could not find backing region for req"); goto invalid_op_error; } if (matched_req != req) { pr_err("Request does not match backing req"); goto invalid_op_error; } curr_sz = matched_req->req_sz; curr_start = matched_req->req_start; growth_sz = matched_req->req_max - matched_req->req_sz; pr_debug("Attempting to grow req %p from %lx to %lx\n", req, matched_req->req_sz, matched_req->req_max); retry = false; pr_debug("ocmem: GROW: growth size %lx\n", growth_sz); retry_next_step: spanned_r = NULL; overlap_r = NULL; spanned_r = find_region(zone->z_head); overlap_r = find_region_intersection(zone->z_head, zone->z_head + growth_sz); if (overlap_r == NULL) { /* no conflicting regions, schedule this region */ zone->z_ops->free(zone, curr_start, curr_sz); rc = zone->z_ops->allocate(zone, curr_sz + growth_sz, &alloc_addr); if (rc) { pr_err("ocmem: zone allocation operation failed\n"); goto internal_error; } curr_sz += growth_sz; /* Detach the region from the interval tree */ /* This is to guarantee that any change in size * causes the tree to be rebalanced if required */ detach_req(matched_region, req); if (req_count(matched_region) == 0) { remove_region(matched_region); region = matched_region; } else { region = create_region(); if (!region) { pr_err("ocmem: Unable to create region\n"); goto region_error; } } /* update the request */ req->req_start = alloc_addr; /* increment the size to reflect new length */ req->req_sz = curr_sz; req->req_end = alloc_addr + req->req_sz - 1; /* update request state */ CLEAR_STATE(req, R_MUST_GROW); SET_STATE(req, R_ALLOCATED); SET_STATE(req, R_MUST_MAP); req->op = SCHED_MAP; /* update the region with new req */ attach_req(region, req); populate_region(region, req); update_region_prio(region); /* update the tree with new region */ if (insert_region(region)) { pr_err("ocmem: Failed to insert the region\n"); goto region_error; } if (retry) { SET_STATE(req, R_MUST_GROW); SET_STATE(req, R_PENDING); req->op = SCHED_GROW; return OP_PARTIAL; } } else if (spanned_r != NULL && overlap_r != NULL) { /* resolve conflicting regions based on priority */ if (overlap_r->max_prio < prio) { /* Growth cannot be triggered unless a previous * client of lower priority was evicted */ pr_err("ocmem: Invalid growth scheduled\n"); /* This is serious enough to fail */ BUG(); return OP_FAIL; } else if (overlap_r->max_prio > prio) { if (min == max) { /* Cannot grow at this time, try later */ SET_STATE(req, R_PENDING); SET_STATE(req, R_MUST_GROW); return OP_RESCHED; } else { /* Try to grow in steps */ growth_sz -= step; /* We are OOM at this point so need to retry */ if (growth_sz <= curr_sz) { SET_STATE(req, R_PENDING); SET_STATE(req, R_MUST_GROW); return OP_RESCHED; } retry = true; pr_debug("ocmem: Attempting with reduced size %lx\n", growth_sz); goto retry_next_step; } } else { pr_err("ocmem: grow: New Region %p Existing %p\n", matched_region, overlap_r); pr_err("ocmem: Undetermined behavior\n"); /* This is serious enough to fail */ BUG(); } } else if (spanned_r == NULL && overlap_r != NULL) { goto err_not_supported; } return OP_COMPLETE; err_not_supported: pr_err("ocmem: Scheduled unsupported operation\n"); return OP_FAIL; region_error: zone->z_ops->free(zone, alloc_addr, curr_sz); detach_req(region, req); update_region_prio(region); /* req is going to be destroyed by the caller anyways */ internal_error: destroy_region(region); invalid_op_error: return OP_FAIL; } /* Must be called with sched_mutex held */ static int __sched_free(struct ocmem_req *req) { int owner = req->owner; int ret = 0; struct ocmem_req *matched_req = NULL; struct ocmem_region *matched_region = NULL; struct ocmem_zone *zone = get_zone(owner); BUG_ON(!zone); matched_region = find_region_match(req->req_start, req->req_end); matched_req = find_req_match(req->req_id, matched_region); if (!matched_region || !matched_req) goto invalid_op_error; if (matched_req != req) goto invalid_op_error; ret = zone->z_ops->free(zone, matched_req->req_start, matched_req->req_sz); if (ret < 0) goto err_op_fail; detach_req(matched_region, matched_req); update_region_prio(matched_region); if (req_count(matched_region) == 0) { remove_region(matched_region); destroy_region(matched_region); } /* Update the request */ req->req_start = 0x0; req->req_sz = 0x0; req->req_end = 0x0; SET_STATE(req, R_FREE); return OP_COMPLETE; invalid_op_error: pr_err("ocmem: free: Failed to find matching region\n"); err_op_fail: pr_err("ocmem: free: Failed\n"); return OP_FAIL; } /* Must be called with sched_mutex held */ static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz) { int owner = req->owner; int ret = 0; struct ocmem_req *matched_req = NULL; struct ocmem_region *matched_region = NULL; struct ocmem_region *region = NULL; unsigned long alloc_addr = 0x0; int rc = 0; struct ocmem_zone *zone = get_zone(owner); BUG_ON(!zone); /* The shrink should not be called for zero size */ BUG_ON(new_sz == 0); matched_region = find_region_match(req->req_start, req->req_end); matched_req = find_req_match(req->req_id, matched_region); if (!matched_region || !matched_req) goto invalid_op_error; if (matched_req != req) goto invalid_op_error; ret = zone->z_ops->free(zone, matched_req->req_start, matched_req->req_sz); if (ret < 0) { pr_err("Zone Allocation operation failed\n"); goto internal_error; } rc = zone->z_ops->allocate(zone, new_sz, &alloc_addr); if (rc) { pr_err("Zone Allocation operation failed\n"); goto internal_error; } /* Detach the region from the interval tree */ /* This is to guarantee that the change in size * causes the tree to be rebalanced if required */ detach_req(matched_region, req); if (req_count(matched_region) == 0) { remove_region(matched_region); region = matched_region; } else { region = create_region(); if (!region) { pr_err("ocmem: Unable to create region\n"); goto internal_error; } } /* update the request */ req->req_start = alloc_addr; req->req_sz = new_sz; req->req_end = alloc_addr + req->req_sz; if (req_count(region) == 0) { remove_region(matched_region); destroy_region(matched_region); } /* update request state */ SET_STATE(req, R_MUST_GROW); SET_STATE(req, R_MUST_MAP); req->op = SCHED_MAP; /* attach the request to the region */ attach_req(region, req); populate_region(region, req); update_region_prio(region); /* update the tree with new region */ if (insert_region(region)) { pr_err("ocmem: Failed to insert the region\n"); zone->z_ops->free(zone, alloc_addr, new_sz); detach_req(region, req); update_region_prio(region); /* req will be destroyed by the caller */ goto region_error; } return OP_COMPLETE; region_error: destroy_region(region); internal_error: pr_err("ocmem: shrink: Failed\n"); return OP_FAIL; invalid_op_error: pr_err("ocmem: shrink: Failed to find matching region\n"); return OP_FAIL; } /* Must be called with sched_mutex held */ static int __sched_allocate(struct ocmem_req *req, bool can_block, bool can_wait) { unsigned long min = req->req_min; unsigned long max = req->req_max; unsigned long step = req->req_step; int owner = req->owner; unsigned long sz = max; enum client_prio prio = req->prio; unsigned long alloc_addr = 0x0; bool retry; int rc = 0; struct ocmem_region *spanned_r = NULL; struct ocmem_region *overlap_r = NULL; struct ocmem_zone *zone = get_zone(owner); struct ocmem_region *region = NULL; BUG_ON(!zone); if (min > (zone->z_end - zone->z_start)) { pr_err("ocmem: requested minimum size exceeds quota\n"); goto invalid_op_error; } if (max > (zone->z_end - zone->z_start)) { pr_err("ocmem: requested maximum size exceeds quota\n"); goto invalid_op_error; } if (min > zone->z_free) { pr_err("ocmem: out of memory for zone %d\n", owner); goto invalid_op_error; } retry = false; pr_debug("ocmem: do_allocate: %s request %p size %lx\n", get_name(owner), req, sz); retry_next_step: spanned_r = NULL; overlap_r = NULL; spanned_r = find_region(zone->z_head); overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz); if (overlap_r == NULL) { region = create_region(); if (!region) { pr_err("ocmem: Unable to create region\n"); goto invalid_op_error; } /* no conflicting regions, schedule this region */ rc = zone->z_ops->allocate(zone, sz, &alloc_addr); if (rc) { pr_err("Zone Allocation operation failed\n"); goto internal_error; } /* update the request */ req->req_start = alloc_addr; req->req_end = alloc_addr + sz - 1; req->req_sz = sz; req->zone = zone; /* update request state */ CLEAR_STATE(req, R_FREE); CLEAR_STATE(req, R_PENDING); SET_STATE(req, R_ALLOCATED); SET_STATE(req, R_MUST_MAP); req->op = SCHED_NOP; /* attach the request to the region */ attach_req(region, req); populate_region(region, req); update_region_prio(region); /* update the tree with new region */ if (insert_region(region)) { pr_err("ocmem: Failed to insert the region\n"); zone->z_ops->free(zone, alloc_addr, sz); detach_req(region, req); update_region_prio(region); /* req will be destroyed by the caller */ goto internal_error; } if (retry) { SET_STATE(req, R_MUST_GROW); SET_STATE(req, R_PENDING); req->op = SCHED_GROW; return OP_PARTIAL; } } else if (spanned_r != NULL && overlap_r != NULL) { /* resolve conflicting regions based on priority */ if (overlap_r->max_prio < prio) { if (min == max) { req->req_start = zone->z_head; req->req_end = zone->z_head + sz - 1; req->req_sz = 0x0; req->edata = NULL; goto trigger_eviction; } else { /* Try to allocate atleast >= 'min' immediately */ sz -= step; if (sz < min) goto err_out_of_mem; retry = true; pr_debug("ocmem: Attempting with reduced size %lx\n", sz); goto retry_next_step; } } else if (overlap_r->max_prio > prio) { if (can_block == true) { SET_STATE(req, R_PENDING); SET_STATE(req, R_MUST_GROW); return OP_RESCHED; } else { if (min == max) { pr_err("Cannot allocate %lx synchronously\n", sz); goto err_out_of_mem; } else { sz -= step; if (sz < min) goto err_out_of_mem; retry = true; pr_debug("ocmem: Attempting reduced size %lx\n", sz); goto retry_next_step; } } } else { pr_err("ocmem: Undetermined behavior\n"); pr_err("ocmem: New Region %p Existing %p\n", region, overlap_r); /* This is serious enough to fail */ BUG(); } } else if (spanned_r == NULL && overlap_r != NULL) goto err_not_supported; return OP_COMPLETE; trigger_eviction: pr_debug("Trigger eviction of region %p\n", overlap_r); return OP_EVICT; err_not_supported: pr_err("ocmem: Scheduled unsupported operation\n"); return OP_FAIL; err_out_of_mem: pr_err("ocmem: Out of memory during allocation\n"); internal_error: destroy_region(region); invalid_op_error: return OP_FAIL; } /* Remove the request from eviction lists */ static void cancel_restore(struct ocmem_req *req) { struct ocmem_eviction_data *edata; if (!req) return; edata = req->eviction_info; if (!edata) return; if (list_empty(&edata->req_list)) return; list_del_init(&req->eviction_list); req->eviction_info = NULL; return; } static int sched_enqueue(struct ocmem_req *priv) { struct ocmem_req *next = NULL; mutex_lock(&sched_queue_mutex); SET_STATE(priv, R_ENQUEUED); list_add_tail(&priv->sched_list, &sched_queue[priv->owner]); pr_debug("enqueued req %p\n", priv); list_for_each_entry(next, &sched_queue[priv->owner], sched_list) { pr_debug("pending request %p for client %s\n", next, get_name(next->owner)); } mutex_unlock(&sched_queue_mutex); return 0; } static void sched_dequeue(struct ocmem_req *victim_req) { struct ocmem_req *req = NULL; struct ocmem_req *next = NULL; int id; if (!victim_req) return; id = victim_req->owner; mutex_lock(&sched_queue_mutex); if (list_empty(&sched_queue[id])) goto dequeue_done; list_for_each_entry_safe(req, next, &sched_queue[id], sched_list) { if (req == victim_req) { pr_debug("ocmem: Cancelling pending request %p for %s\n", req, get_name(req->owner)); list_del_init(&victim_req->sched_list); CLEAR_STATE(victim_req, R_ENQUEUED); break; } } dequeue_done: mutex_unlock(&sched_queue_mutex); return; } static struct ocmem_req *ocmem_fetch_req(void) { int i; struct ocmem_req *req = NULL; struct ocmem_req *next = NULL; mutex_lock(&sched_queue_mutex); for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) { if (list_empty(&sched_queue[i])) continue; list_for_each_entry_safe(req, next, &sched_queue[i], sched_list) { if (req) { pr_debug("ocmem: Fetched pending request %p\n", req); list_del(&req->sched_list); CLEAR_STATE(req, R_ENQUEUED); break; } } } mutex_unlock(&sched_queue_mutex); return req; } unsigned long process_quota(int id) { struct ocmem_zone *zone = NULL; if (is_blocked(id)) return 0; zone = get_zone(id); if (zone && zone->z_pool) return zone->z_end - zone->z_start; else return 0; } static int do_grow(struct ocmem_req *req) { struct ocmem_buf *buffer = NULL; bool can_block = true; int rc = 0; down_write(&req->rw_sem); buffer = req->buffer; /* Take the scheduler mutex */ mutex_lock(&sched_mutex); rc = __sched_grow(req, can_block); mutex_unlock(&sched_mutex); if (rc == OP_FAIL) goto err_op_fail; if (rc == OP_RESCHED) { pr_debug("ocmem: Enqueue this allocation"); sched_enqueue(req); } else if (rc == OP_COMPLETE || rc == OP_PARTIAL) { buffer->addr = device_address(req->owner, req->req_start); buffer->len = req->req_sz; } up_write(&req->rw_sem); return 0; err_op_fail: up_write(&req->rw_sem); return -EINVAL; } static int process_grow(struct ocmem_req *req) { int rc = 0; unsigned long offset = 0; /* Attempt to grow the region */ rc = do_grow(req); if (rc < 0) return -EINVAL; rc = ocmem_enable_core_clock(); if (rc < 0) goto core_clock_fail; if (is_iface_access(req->owner)) { rc = ocmem_enable_iface_clock(); if (rc < 0) goto iface_clock_fail; } rc = process_map(req, req->req_start, req->req_end); if (rc < 0) goto map_error; offset = phys_to_offset(req->req_start); rc = ocmem_memory_on(req->owner, offset, req->req_sz); if (rc < 0) { pr_err("Failed to switch ON memory macros\n"); goto power_ctl_error; } /* Notify the client about the buffer growth */ rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer); if (rc < 0) { pr_err("No notifier callback to cater for req %p event: %d\n", req, OCMEM_ALLOC_GROW); BUG(); } return 0; power_ctl_error: map_error: if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); iface_clock_fail: ocmem_disable_core_clock(); core_clock_fail: return -EINVAL; } static int do_shrink(struct ocmem_req *req, unsigned long shrink_size) { int rc = 0; struct ocmem_buf *buffer = NULL; down_write(&req->rw_sem); buffer = req->buffer; /* Take the scheduler mutex */ mutex_lock(&sched_mutex); rc = __sched_shrink(req, shrink_size); mutex_unlock(&sched_mutex); if (rc == OP_FAIL) goto err_op_fail; else if (rc == OP_COMPLETE) { buffer->addr = device_address(req->owner, req->req_start); buffer->len = req->req_sz; } up_write(&req->rw_sem); return 0; err_op_fail: up_write(&req->rw_sem); return -EINVAL; } static void ocmem_sched_wk_func(struct work_struct *work); DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func); static int ocmem_schedule_pending(void) { bool need_sched = false; int i = 0; for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) { if (!list_empty(&sched_queue[i])) { need_sched = true; break; } } if (need_sched == true) { cancel_delayed_work(&ocmem_sched_thread); schedule_delayed_work(&ocmem_sched_thread, msecs_to_jiffies(SCHED_DELAY)); pr_debug("ocmem: Scheduled delayed work\n"); } return 0; } static int do_free(struct ocmem_req *req) { int rc = 0; struct ocmem_buf *buffer = req->buffer; down_write(&req->rw_sem); if (is_mapped(req)) { pr_err("ocmem: Buffer needs to be unmapped before free\n"); goto err_free_fail; } pr_debug("ocmem: do_free: client %s req %p\n", get_name(req->owner), req); /* Grab the sched mutex */ mutex_lock(&sched_mutex); rc = __sched_free(req); mutex_unlock(&sched_mutex); switch (rc) { case OP_COMPLETE: buffer->addr = 0x0; buffer->len = 0x0; break; case OP_FAIL: default: goto err_free_fail; break; } up_write(&req->rw_sem); return 0; err_free_fail: up_write(&req->rw_sem); pr_err("ocmem: freeing req %p failed\n", req); return -EINVAL; } int process_free(int id, struct ocmem_handle *handle) { struct ocmem_req *req = NULL; struct ocmem_buf *buffer = NULL; unsigned long offset = 0; int rc = 0; mutex_lock(&free_mutex); if (is_blocked(id)) { pr_err("Client %d cannot request free\n", id); goto free_invalid; } req = handle_to_req(handle); buffer = handle_to_buffer(handle); if (!req) { pr_err("ocmem: No valid request to free\n"); goto free_invalid; } if (req->req_start != core_address(id, buffer->addr)) { pr_err("Invalid buffer handle passed for free\n"); goto free_invalid; } if (req->edata != NULL) { pr_err("ocmem: Request %p(%2lx) yet to process eviction %p\n", req, req->state, req->edata); goto free_invalid; } if (is_pending_shrink(req)) { pr_err("ocmem: Request %p(%2lx) yet to process eviction\n", req, req->state); goto pending_shrink; } /* Remove the request from any restore lists */ if (req->eviction_info) cancel_restore(req); /* Remove the request from any pending opreations */ if (TEST_STATE(req, R_ENQUEUED)) { mutex_lock(&sched_mutex); sched_dequeue(req); mutex_unlock(&sched_mutex); } if (!TEST_STATE(req, R_FREE)) { if (TEST_STATE(req, R_MAPPED)) { /* unmap the interval and clear the memory */ rc = process_unmap(req, req->req_start, req->req_end); if (rc < 0) { pr_err("ocmem: Failed to unmap %p\n", req); goto free_fail; } /* Turn off the memory */ if (req->req_sz != 0) { offset = phys_to_offset(req->req_start); rc = ocmem_memory_off(req->owner, offset, req->req_sz); if (rc < 0) { pr_err("Failed to switch OFF memory macros\n"); goto free_fail; } } if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); ocmem_disable_core_clock(); rc = do_free(req); if (rc < 0) { pr_err("ocmem: Failed to free %p\n", req); goto free_fail; } } else pr_debug("request %p was already shrunk to 0\n", req); } if (!TEST_STATE(req, R_FREE)) { /* Turn off the memory */ if (req->req_sz != 0) { offset = phys_to_offset(req->req_start); rc = ocmem_memory_off(req->owner, offset, req->req_sz); if (rc < 0) { pr_err("Failed to switch OFF memory macros\n"); goto free_fail; } if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); ocmem_disable_core_clock(); } /* free the allocation */ rc = do_free(req); if (rc < 0) return -EINVAL; } inc_ocmem_stat(zone_of(req), NR_FREES); ocmem_destroy_req(req); handle->req = NULL; ocmem_schedule_pending(); mutex_unlock(&free_mutex); return 0; free_fail: free_invalid: mutex_unlock(&free_mutex); return -EINVAL; pending_shrink: mutex_unlock(&free_mutex); return -EAGAIN; } static void ocmem_rdm_worker(struct work_struct *work) { int offset = 0; int rc = 0; int event; struct ocmem_rdm_work *work_data = container_of(work, struct ocmem_rdm_work, work); int id = work_data->id; struct ocmem_map_list *list = work_data->list; int direction = work_data->direction; struct ocmem_handle *handle = work_data->handle; struct ocmem_req *req = handle_to_req(handle); struct ocmem_buf *buffer = handle_to_buffer(handle); down_write(&req->rw_sem); offset = phys_to_offset(req->req_start); rc = ocmem_rdm_transfer(id, list, offset, direction); if (work_data->direction == TO_OCMEM) event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL; else event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL; up_write(&req->rw_sem); kfree(work_data); dispatch_notification(id, event, buffer); } int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle, struct ocmem_map_list *list, int direction) { struct ocmem_rdm_work *work_data = NULL; down_write(&req->rw_sem); work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC); if (!work_data) BUG(); work_data->handle = handle; work_data->list = list; work_data->id = req->owner; work_data->direction = direction; INIT_WORK(&work_data->work, ocmem_rdm_worker); up_write(&req->rw_sem); queue_work(ocmem_rdm_wq, &work_data->work); return 0; } int process_drop(int id, struct ocmem_handle *handle, struct ocmem_map_list *list) { struct ocmem_req *req = NULL; struct ocmem_buf *buffer = NULL; int rc = 0; if (is_blocked(id)) { pr_err("Client %d cannot request drop\n", id); return -EINVAL; } if (is_tcm(id)) pr_err("Client %d cannot request drop\n", id); req = handle_to_req(handle); buffer = handle_to_buffer(handle); if (!req) return -EINVAL; if (req->req_start != core_address(id, buffer->addr)) { pr_err("Invalid buffer handle passed for drop\n"); return -EINVAL; } if (TEST_STATE(req, R_MAPPED)) { rc = process_unmap(req, req->req_start, req->req_end); if (rc < 0) return -EINVAL; if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); ocmem_disable_core_clock(); } else return -EINVAL; return 0; } int process_xfer_out(int id, struct ocmem_handle *handle, struct ocmem_map_list *list) { struct ocmem_req *req = NULL; int rc = 0; req = handle_to_req(handle); if (!req) return -EINVAL; if (!is_mapped(req)) { pr_err("Buffer is not currently mapped\n"); goto transfer_out_error; } rc = queue_transfer(req, handle, list, TO_DDR); if (rc < 0) { pr_err("Failed to queue rdm transfer to DDR\n"); inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS); goto transfer_out_error; } inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR); return 0; transfer_out_error: return -EINVAL; } int process_xfer_in(int id, struct ocmem_handle *handle, struct ocmem_map_list *list) { struct ocmem_req *req = NULL; int rc = 0; req = handle_to_req(handle); if (!req) return -EINVAL; if (!is_mapped(req)) { pr_err("Buffer is not already mapped for transfer\n"); goto transfer_in_error; } inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM); rc = queue_transfer(req, handle, list, TO_OCMEM); if (rc < 0) { pr_err("Failed to queue rdm transfer to OCMEM\n"); inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS); goto transfer_in_error; } return 0; transfer_in_error: return -EINVAL; } int process_shrink(int id, struct ocmem_handle *handle, unsigned long size) { struct ocmem_req *req = NULL; struct ocmem_buf *buffer = NULL; struct ocmem_eviction_data *edata = NULL; int rc = 0; if (is_blocked(id)) { pr_err("Client %d cannot request free\n", id); return -EINVAL; } req = handle_to_req(handle); buffer = handle_to_buffer(handle); if (!req) return -EINVAL; mutex_lock(&free_mutex); if (req->req_start != core_address(id, buffer->addr)) { pr_err("Invalid buffer handle passed for shrink\n"); goto shrink_fail; } edata = req->eviction_info; if (!edata) { pr_err("Unable to find eviction data\n"); goto shrink_fail; } pr_debug("Found edata %p in request %p\n", edata, req); inc_ocmem_stat(zone_of(req), NR_SHRINKS); if (size == 0) { pr_debug("req %p being shrunk to zero\n", req); if (is_mapped(req)) { rc = process_unmap(req, req->req_start, req->req_end); if (rc < 0) goto shrink_fail; if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); ocmem_disable_core_clock(); } rc = do_free(req); if (rc < 0) goto shrink_fail; SET_STATE(req, R_FREE); } else { rc = do_shrink(req, size); if (rc < 0) goto shrink_fail; } CLEAR_STATE(req, R_ALLOCATED); CLEAR_STATE(req, R_WF_SHRINK); SET_STATE(req, R_SHRUNK); if (atomic_dec_and_test(&edata->pending)) { pr_debug("ocmem: All conflicting allocations were shrunk\n"); complete(&edata->completion); } mutex_unlock(&free_mutex); return 0; shrink_fail: pr_err("ocmem: Failed to shrink request %p of %s\n", req, get_name(req->owner)); mutex_unlock(&free_mutex); return -EINVAL; } int process_xfer(int id, struct ocmem_handle *handle, struct ocmem_map_list *list, int direction) { int rc = 0; if (is_tcm(id)) { WARN(1, "Mapping operation is invalid for client\n"); return -EINVAL; } if (direction == TO_DDR) rc = process_xfer_out(id, handle, list); else if (direction == TO_OCMEM) rc = process_xfer_in(id, handle, list); return rc; } static struct ocmem_eviction_data *init_eviction(int id) { struct ocmem_eviction_data *edata = NULL; int prio = ocmem_client_table[id].priority; edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC); if (!edata) { pr_err("ocmem: Could not allocate eviction data\n"); return NULL; } INIT_LIST_HEAD(&edata->victim_list); INIT_LIST_HEAD(&edata->req_list); edata->prio = prio; atomic_set(&edata->pending, 0); return edata; } static void free_eviction(struct ocmem_eviction_data *edata) { if (!edata) return; if (!list_empty(&edata->req_list)) pr_err("ocmem: Eviction data %p not empty\n", edata); kfree(edata); edata = NULL; } static bool is_overlapping(struct ocmem_req *new, struct ocmem_req *old) { if (!new || !old) return false; pr_debug("check overlap [%lx -- %lx] on [%lx -- %lx]\n", new->req_start, new->req_end, old->req_start, old->req_end); if ((new->req_start < old->req_start && new->req_end >= old->req_start) || (new->req_start >= old->req_start && new->req_start <= old->req_end && new->req_end >= old->req_end)) { pr_debug("request %p overlaps with existing req %p\n", new, old); return true; } return false; } static int __evict_common(struct ocmem_eviction_data *edata, struct ocmem_req *req) { struct rb_node *rb_node = NULL; struct ocmem_req *e_req = NULL; bool needs_eviction = false; int j = 0; for (rb_node = rb_first(&sched_tree); rb_node; rb_node = rb_next(rb_node)) { struct ocmem_region *tmp_region = NULL; tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb); if (tmp_region->max_prio < edata->prio) { for (j = edata->prio - 1; j > NO_PRIO; j--) { needs_eviction = false; e_req = find_req_match(j, tmp_region); if (!e_req) continue; if (edata->passive == true) { needs_eviction = true; } else { needs_eviction = is_overlapping(req, e_req); } if (needs_eviction) { pr_debug("adding %p in region %p to eviction list\n", e_req, tmp_region); SET_STATE(e_req, R_MUST_SHRINK); list_add_tail( &e_req->eviction_list, &edata->req_list); atomic_inc(&edata->pending); e_req->eviction_info = edata; } } } else { pr_debug("Skipped region %p\n", tmp_region); } } pr_debug("%d requests will be evicted\n", atomic_read(&edata->pending)); return atomic_read(&edata->pending); } static void trigger_eviction(struct ocmem_eviction_data *edata) { struct ocmem_req *req = NULL; struct ocmem_req *next = NULL; struct ocmem_buf buffer; if (!edata) return; BUG_ON(atomic_read(&edata->pending) == 0); init_completion(&edata->completion); list_for_each_entry_safe(req, next, &edata->req_list, eviction_list) { if (req) { pr_debug("ocmem: Evicting request %p\n", req); buffer.addr = req->req_start; buffer.len = 0x0; CLEAR_STATE(req, R_MUST_SHRINK); dispatch_notification(req->owner, OCMEM_ALLOC_SHRINK, &buffer); SET_STATE(req, R_WF_SHRINK); } } return; } int process_evict(int id) { struct ocmem_eviction_data *edata = NULL; int rc = 0; edata = init_eviction(id); if (!edata) return -EINVAL; edata->passive = true; mutex_lock(&sched_mutex); rc = __evict_common(edata, NULL); if (rc == 0) goto skip_eviction; trigger_eviction(edata); evictions[id] = edata; mutex_unlock(&sched_mutex); wait_for_completion(&edata->completion); return 0; skip_eviction: evictions[id] = NULL; mutex_unlock(&sched_mutex); return 0; } static int run_evict(struct ocmem_req *req) { struct ocmem_eviction_data *edata = NULL; int rc = 0; if (!req) return -EINVAL; edata = init_eviction(req->owner); if (!edata) return -EINVAL; edata->passive = false; mutex_lock(&free_mutex); rc = __evict_common(edata, req); if (rc == 0) goto skip_eviction; trigger_eviction(edata); pr_debug("ocmem: attaching eviction %p to request %p", edata, req); req->edata = edata; mutex_unlock(&free_mutex); wait_for_completion(&edata->completion); pr_debug("ocmem: eviction completed successfully\n"); return 0; skip_eviction: pr_err("ocmem: Unable to run eviction\n"); free_eviction(edata); req->edata = NULL; mutex_unlock(&free_mutex); return 0; } static int __restore_common(struct ocmem_eviction_data *edata) { struct ocmem_req *req = NULL; if (!edata) return -EINVAL; while (!list_empty(&edata->req_list)) { req = list_first_entry(&edata->req_list, struct ocmem_req, eviction_list); list_del_init(&req->eviction_list); pr_debug("ocmem: restoring evicted request %p\n", req); req->edata = NULL; req->eviction_info = NULL; req->op = SCHED_ALLOCATE; inc_ocmem_stat(zone_of(req), NR_RESTORES); sched_enqueue(req); } pr_debug("Scheduled all evicted regions\n"); return 0; } static int sched_restore(struct ocmem_req *req) { int rc = 0; if (!req) return -EINVAL; if (!req->edata) return 0; mutex_lock(&free_mutex); rc = __restore_common(req->edata); mutex_unlock(&free_mutex); if (rc < 0) return -EINVAL; free_eviction(req->edata); req->edata = NULL; return 0; } int process_restore(int id) { struct ocmem_eviction_data *edata = evictions[id]; int rc = 0; if (!edata) { pr_err("Client %s invoked restore without any eviction\n", get_name(id)); return -EINVAL; } mutex_lock(&free_mutex); rc = __restore_common(edata); mutex_unlock(&free_mutex); if (rc < 0) { pr_err("Failed to restore evicted requests\n"); return -EINVAL; } free_eviction(edata); evictions[id] = NULL; ocmem_schedule_pending(); return 0; } static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait) { int rc = 0; int ret = 0; struct ocmem_buf *buffer = req->buffer; down_write(&req->rw_sem); mutex_lock(&allocation_mutex); retry_allocate: /* Take the scheduler mutex */ mutex_lock(&sched_mutex); rc = __sched_allocate(req, can_block, can_wait); mutex_unlock(&sched_mutex); if (rc == OP_EVICT) { ret = run_evict(req); if (ret == 0) { rc = sched_restore(req); if (rc < 0) { pr_err("Failed to restore for req %p\n", req); goto err_allocate_fail; } req->edata = NULL; pr_debug("Attempting to re-allocate req %p\n", req); req->req_start = 0x0; req->req_end = 0x0; goto retry_allocate; } else { goto err_allocate_fail; } } mutex_unlock(&allocation_mutex); if (rc == OP_FAIL) { inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS); goto err_allocate_fail; } if (rc == OP_RESCHED) { buffer->addr = 0x0; buffer->len = 0x0; pr_debug("ocmem: Enqueuing req %p\n", req); sched_enqueue(req); } else if (rc == OP_PARTIAL) { buffer->addr = device_address(req->owner, req->req_start); buffer->len = req->req_sz; inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS); pr_debug("ocmem: Enqueuing req %p\n", req); sched_enqueue(req); } else if (rc == OP_COMPLETE) { buffer->addr = device_address(req->owner, req->req_start); buffer->len = req->req_sz; } up_write(&req->rw_sem); return 0; err_allocate_fail: mutex_unlock(&allocation_mutex); up_write(&req->rw_sem); return -EINVAL; } static int do_dump(struct ocmem_req *req, unsigned long addr) { void __iomem *req_vaddr; unsigned long offset = 0x0; int rc = 0; down_write(&req->rw_sem); offset = phys_to_offset(req->req_start); req_vaddr = ocmem_vaddr + offset; if (!req_vaddr) goto err_do_dump; rc = ocmem_enable_dump(req->owner, offset, req->req_sz); if (rc < 0) goto err_do_dump; pr_debug("Dumping client %s buffer ocmem p: %lx (v: %p) to ddr %lx\n", get_name(req->owner), req->req_start, req_vaddr, addr); memcpy((void *)addr, req_vaddr, req->req_sz); rc = ocmem_disable_dump(req->owner, offset, req->req_sz); if (rc < 0) pr_err("Failed to secure request %p of %s after dump\n", req, get_name(req->owner)); up_write(&req->rw_sem); return 0; err_do_dump: up_write(&req->rw_sem); return -EINVAL; } int process_allocate(int id, struct ocmem_handle *handle, unsigned long min, unsigned long max, unsigned long step, bool can_block, bool can_wait) { struct ocmem_req *req = NULL; struct ocmem_buf *buffer = NULL; int rc = 0; unsigned long offset = 0; /* sanity checks */ if (is_blocked(id)) { pr_err("Client %d cannot request allocation\n", id); return -EINVAL; } if (handle->req != NULL) { pr_err("Invalid handle passed in\n"); return -EINVAL; } buffer = handle_to_buffer(handle); BUG_ON(buffer == NULL); /* prepare a request structure to represent this transaction */ req = ocmem_create_req(); if (!req) return -ENOMEM; req->owner = id; req->req_min = min; req->req_max = max; req->req_step = step; req->prio = ocmem_client_table[id].priority; req->op = SCHED_ALLOCATE; req->buffer = buffer; inc_ocmem_stat(zone_of(req), NR_REQUESTS); rc = do_allocate(req, can_block, can_wait); if (rc < 0) goto do_allocate_error; inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS); handle->req = req; if (req->req_sz != 0) { rc = ocmem_enable_core_clock(); if (rc < 0) goto core_clock_fail; if (is_iface_access(req->owner)) { rc = ocmem_enable_iface_clock(); if (rc < 0) goto iface_clock_fail; } rc = process_map(req, req->req_start, req->req_end); if (rc < 0) goto map_error; offset = phys_to_offset(req->req_start); rc = ocmem_memory_on(req->owner, offset, req->req_sz); if (rc < 0) { pr_err("Failed to switch ON memory macros\n"); goto power_ctl_error; } } return 0; power_ctl_error: process_unmap(req, req->req_start, req->req_end); map_error: handle->req = NULL; do_free(req); if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); iface_clock_fail: ocmem_disable_core_clock(); core_clock_fail: do_allocate_error: ocmem_destroy_req(req); return -EINVAL; } int process_delayed_allocate(struct ocmem_req *req) { struct ocmem_handle *handle = NULL; int rc = 0; int id = req->owner; unsigned long offset = 0; handle = req_to_handle(req); BUG_ON(handle == NULL); rc = do_allocate(req, true, false); if (rc < 0) goto do_allocate_error; /* The request can still be pending */ if (TEST_STATE(req, R_PENDING)) return 0; inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS); if (req->req_sz != 0) { rc = ocmem_enable_core_clock(); if (rc < 0) goto core_clock_fail; if (is_iface_access(req->owner)) { rc = ocmem_enable_iface_clock(); if (rc < 0) goto iface_clock_fail; } rc = process_map(req, req->req_start, req->req_end); if (rc < 0) goto map_error; offset = phys_to_offset(req->req_start); rc = ocmem_memory_on(req->owner, offset, req->req_sz); if (rc < 0) { pr_err("Failed to switch ON memory macros\n"); goto power_ctl_error; } } /* Notify the client about the buffer growth */ rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer); if (rc < 0) { pr_err("No notifier callback to cater for req %p event: %d\n", req, OCMEM_ALLOC_GROW); BUG(); } return 0; power_ctl_error: process_unmap(req, req->req_start, req->req_end); map_error: handle->req = NULL; do_free(req); if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); iface_clock_fail: ocmem_disable_core_clock(); core_clock_fail: do_allocate_error: ocmem_destroy_req(req); return -EINVAL; } int process_dump(int id, struct ocmem_handle *handle, unsigned long addr) { struct ocmem_req *req = NULL; int rc = 0; req = handle_to_req(handle); if (!req) return -EINVAL; if (!is_mapped(req)) { pr_err("Buffer is not mapped\n"); goto dump_error; } inc_ocmem_stat(zone_of(req), NR_DUMP_REQUESTS); mutex_lock(&sched_mutex); rc = do_dump(req, addr); mutex_unlock(&sched_mutex); if (rc < 0) goto dump_error; inc_ocmem_stat(zone_of(req), NR_DUMP_COMPLETE); return 0; dump_error: pr_err("Dumping OCMEM memory failed for client %d\n", id); return -EINVAL; } static void ocmem_sched_wk_func(struct work_struct *work) { struct ocmem_buf *buffer = NULL; struct ocmem_handle *handle = NULL; struct ocmem_req *req = ocmem_fetch_req(); if (!req) { pr_debug("No Pending Requests found\n"); return; } pr_debug("ocmem: sched_wk pending req %p\n", req); handle = req_to_handle(req); buffer = handle_to_buffer(handle); BUG_ON(req->op == SCHED_NOP); switch (req->op) { case SCHED_GROW: process_grow(req); break; case SCHED_ALLOCATE: process_delayed_allocate(req); break; default: pr_err("ocmem: Unknown operation encountered\n"); break; } return; } static int ocmem_allocations_show(struct seq_file *f, void *dummy) { struct rb_node *rb_node = NULL; struct ocmem_req *req = NULL; unsigned j; mutex_lock(&sched_mutex); for (rb_node = rb_first(&sched_tree); rb_node; rb_node = rb_next(rb_node)) { struct ocmem_region *tmp_region = NULL; tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb); for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) { req = find_req_match(j, tmp_region); if (req) { seq_printf(f, "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n", get_name(req->owner), req->req_start, req->req_end, req->req_sz, req->state); } } } mutex_unlock(&sched_mutex); return 0; } static int ocmem_allocations_open(struct inode *inode, struct file *file) { return single_open(file, ocmem_allocations_show, inode->i_private); } static const struct file_operations allocations_show_fops = { .open = ocmem_allocations_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int ocmem_sched_init(struct platform_device *pdev) { int i = 0; struct ocmem_plat_data *pdata = NULL; struct device *dev = &pdev->dev; sched_tree = RB_ROOT; pdata = platform_get_drvdata(pdev); mutex_init(&allocation_mutex); mutex_init(&free_mutex); mutex_init(&sched_mutex); mutex_init(&sched_queue_mutex); ocmem_vaddr = pdata->vbase; for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) INIT_LIST_HEAD(&sched_queue[i]); mutex_init(&rdm_mutex); INIT_LIST_HEAD(&rdm_queue); ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0); if (!ocmem_rdm_wq) return -ENOMEM; ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0); if (!ocmem_eviction_wq) return -ENOMEM; if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node, NULL, &allocations_show_fops)) { dev_err(dev, "Unable to create debugfs node for scheduler\n"); return -EBUSY; } return 0; }
gpl-2.0
MyAOSP/kernel_asus_tf300t
arch/mips/sibyte/sb1250/irq.c
654
9619
/* * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/linkage.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/mm.h> #include <linux/kernel_stat.h> #include <asm/errno.h> #include <asm/signal.h> #include <asm/system.h> #include <asm/time.h> #include <asm/io.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_uart.h> #include <asm/sibyte/sb1250_scd.h> #include <asm/sibyte/sb1250.h> /* * These are the routines that handle all the low level interrupt stuff. * Actions handled here are: initialization of the interrupt map, requesting of * interrupt lines by handlers, dispatching if interrupts to handlers, probing * for interrupt lines */ #ifdef CONFIG_SIBYTE_HAS_LDT extern unsigned long ldt_eoi_space; #endif /* Store the CPU id (not the logical number) */ int sb1250_irq_owner[SB1250_NR_IRQS]; static DEFINE_RAW_SPINLOCK(sb1250_imr_lock); void sb1250_mask_irq(int cpu, int irq) { unsigned long flags; u64 cur_ints; raw_spin_lock_irqsave(&sb1250_imr_lock, flags); cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) + R_IMR_INTERRUPT_MASK)); cur_ints |= (((u64) 1) << irq); ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) + R_IMR_INTERRUPT_MASK)); raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags); } void sb1250_unmask_irq(int cpu, int irq) { unsigned long flags; u64 cur_ints; raw_spin_lock_irqsave(&sb1250_imr_lock, flags); cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) + R_IMR_INTERRUPT_MASK)); cur_ints &= ~(((u64) 1) << irq); ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) + R_IMR_INTERRUPT_MASK)); raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags); } #ifdef CONFIG_SMP static int sb1250_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) { int i = 0, old_cpu, cpu, int_on; unsigned int irq = d->irq; u64 cur_ints; unsigned long flags; i = cpumask_first(mask); /* Convert logical CPU to physical CPU */ cpu = cpu_logical_map(i); /* Protect against other affinity changers and IMR manipulation */ raw_spin_lock_irqsave(&sb1250_imr_lock, flags); /* Swizzle each CPU's IMR (but leave the IP selection alone) */ old_cpu = sb1250_irq_owner[irq]; cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(old_cpu) + R_IMR_INTERRUPT_MASK)); int_on = !(cur_ints & (((u64) 1) << irq)); if (int_on) { /* If it was on, mask it */ cur_ints |= (((u64) 1) << irq); ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(old_cpu) + R_IMR_INTERRUPT_MASK)); } sb1250_irq_owner[irq] = cpu; if (int_on) { /* unmask for the new CPU */ cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) + R_IMR_INTERRUPT_MASK)); cur_ints &= ~(((u64) 1) << irq); ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) + R_IMR_INTERRUPT_MASK)); } raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags); return 0; } #endif static void disable_sb1250_irq(struct irq_data *d) { unsigned int irq = d->irq; sb1250_mask_irq(sb1250_irq_owner[irq], irq); } static void enable_sb1250_irq(struct irq_data *d) { unsigned int irq = d->irq; sb1250_unmask_irq(sb1250_irq_owner[irq], irq); } static void ack_sb1250_irq(struct irq_data *d) { unsigned int irq = d->irq; #ifdef CONFIG_SIBYTE_HAS_LDT u64 pending; /* * If the interrupt was an HT interrupt, now is the time to * clear it. NOTE: we assume the HT bridge was set up to * deliver the interrupts to all CPUs (which makes affinity * changing easier for us) */ pending = __raw_readq(IOADDR(A_IMR_REGISTER(sb1250_irq_owner[irq], R_IMR_LDT_INTERRUPT))); pending &= ((u64)1 << (irq)); if (pending) { int i; for (i=0; i<NR_CPUS; i++) { int cpu; #ifdef CONFIG_SMP cpu = cpu_logical_map(i); #else cpu = i; #endif /* * Clear for all CPUs so an affinity switch * doesn't find an old status */ __raw_writeq(pending, IOADDR(A_IMR_REGISTER(cpu, R_IMR_LDT_INTERRUPT_CLR))); } /* * Generate EOI. For Pass 1 parts, EOI is a nop. For * Pass 2, the LDT world may be edge-triggered, but * this EOI shouldn't hurt. If they are * level-sensitive, the EOI is required. */ *(uint32_t *)(ldt_eoi_space+(irq<<16)+(7<<2)) = 0; } #endif sb1250_mask_irq(sb1250_irq_owner[irq], irq); } static struct irq_chip sb1250_irq_type = { .name = "SB1250-IMR", .irq_mask_ack = ack_sb1250_irq, .irq_unmask = enable_sb1250_irq, .irq_mask = disable_sb1250_irq, #ifdef CONFIG_SMP .irq_set_affinity = sb1250_set_affinity #endif }; void __init init_sb1250_irqs(void) { int i; for (i = 0; i < SB1250_NR_IRQS; i++) { irq_set_chip_and_handler(i, &sb1250_irq_type, handle_level_irq); sb1250_irq_owner[i] = 0; } } /* * arch_init_irq is called early in the boot sequence from init/main.c via * init_IRQ. It is responsible for setting up the interrupt mapper and * installing the handler that will be responsible for dispatching interrupts * to the "right" place. */ /* * For now, map all interrupts to IP[2]. We could save * some cycles by parceling out system interrupts to different * IP lines, but keep it simple for bringup. We'll also direct * all interrupts to a single CPU; we should probably route * PCI and LDT to one cpu and everything else to the other * to balance the load a bit. * * On the second cpu, everything is set to IP5, which is * ignored, EXCEPT the mailbox interrupt. That one is * set to IP[2] so it is handled. This is needed so we * can do cross-cpu function calls, as required by SMP */ #define IMR_IP2_VAL K_INT_MAP_I0 #define IMR_IP3_VAL K_INT_MAP_I1 #define IMR_IP4_VAL K_INT_MAP_I2 #define IMR_IP5_VAL K_INT_MAP_I3 #define IMR_IP6_VAL K_INT_MAP_I4 void __init arch_init_irq(void) { unsigned int i; u64 tmp; unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | STATUSF_IP1 | STATUSF_IP0; /* Default everything to IP2 */ for (i = 0; i < SB1250_NR_IRQS; i++) { /* was I0 */ __raw_writeq(IMR_IP2_VAL, IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) + (i << 3))); __raw_writeq(IMR_IP2_VAL, IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MAP_BASE) + (i << 3))); } init_sb1250_irqs(); /* * Map the high 16 bits of the mailbox registers to IP[3], for * inter-cpu messages */ /* Was I1 */ __raw_writeq(IMR_IP3_VAL, IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) + (K_INT_MBOX_0 << 3))); __raw_writeq(IMR_IP3_VAL, IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MAP_BASE) + (K_INT_MBOX_0 << 3))); /* Clear the mailboxes. The firmware may leave them dirty */ __raw_writeq(0xffffffffffffffffULL, IOADDR(A_IMR_REGISTER(0, R_IMR_MAILBOX_CLR_CPU))); __raw_writeq(0xffffffffffffffffULL, IOADDR(A_IMR_REGISTER(1, R_IMR_MAILBOX_CLR_CPU))); /* Mask everything except the mailbox registers for both cpus */ tmp = ~((u64) 0) ^ (((u64) 1) << K_INT_MBOX_0); __raw_writeq(tmp, IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MASK))); __raw_writeq(tmp, IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MASK))); /* * Note that the timer interrupts are also mapped, but this is * done in sb1250_time_init(). Also, the profiling driver * does its own management of IP7. */ /* Enable necessary IPs, disable the rest */ change_c0_status(ST0_IM, imask); } extern void sb1250_mailbox_interrupt(void); static inline void dispatch_ip2(void) { unsigned int cpu = smp_processor_id(); unsigned long long mask; /* * Default...we've hit an IP[2] interrupt, which means we've got to * check the 1250 interrupt registers to figure out what to do. Need * to detect which CPU we're on, now that smp_affinity is supported. */ mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_STATUS_BASE))); if (mask) do_IRQ(fls64(mask) - 1); } asmlinkage void plat_irq_dispatch(void) { unsigned int cpu = smp_processor_id(); unsigned int pending; /* * What a pain. We have to be really careful saving the upper 32 bits * of any * register across function calls if we don't want them * trashed--since were running in -o32, the calling routing never saves * the full 64 bits of a register across a function call. Being the * interrupt handler, we're guaranteed that interrupts are disabled * during this code so we don't have to worry about random interrupts * blasting the high 32 bits. */ pending = read_c0_cause() & read_c0_status() & ST0_IM; if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */ do_IRQ(MIPS_CPU_IRQ_BASE + 7); else if (pending & CAUSEF_IP4) do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */ #ifdef CONFIG_SMP else if (pending & CAUSEF_IP3) sb1250_mailbox_interrupt(); #endif else if (pending & CAUSEF_IP2) dispatch_ip2(); else spurious_interrupt(); }
gpl-2.0
gengzh0016/kernel_BBxM
drivers/gpu/drm/nouveau/nvc0_vm.c
654
3825
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_vm.h" void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, struct nouveau_gpuobj *pgt[2]) { u32 pde[2] = { 0, 0 }; if (pgt[0]) pde[1] = 0x00000001 | (pgt[0]->vinst >> 8); if (pgt[1]) pde[0] = 0x00000001 | (pgt[1]->vinst >> 8); nv_wo32(pgd, (index * 8) + 0, pde[0]); nv_wo32(pgd, (index * 8) + 4, pde[1]); } static inline u64 nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) { phys >>= 8; phys |= 0x00000001; /* present */ if (vma->access & NV_MEM_ACCESS_SYS) phys |= 0x00000002; phys |= ((u64)target << 32); phys |= ((u64)memtype << 36); return phys; } void nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) { u32 next = 1 << (vma->node->type - 8); phys = nvc0_vm_addr(vma, phys, mem->memtype, 0); pte <<= 3; while (cnt--) { nv_wo32(pgt, pte + 0, lower_32_bits(phys)); nv_wo32(pgt, pte + 4, upper_32_bits(phys)); phys += next; pte += 8; } } void nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) { pte <<= 3; while (cnt--) { u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5); nv_wo32(pgt, pte + 0, lower_32_bits(phys)); nv_wo32(pgt, pte + 4, upper_32_bits(phys)); pte += 8; } } void nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) { pte <<= 3; while (cnt--) { nv_wo32(pgt, pte + 0, 0x00000000); nv_wo32(pgt, pte + 4, 0x00000000); pte += 8; } } void nvc0_vm_flush(struct nouveau_vm *vm) { struct drm_nouveau_private *dev_priv = vm->dev->dev_private; struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; struct drm_device *dev = vm->dev; struct nouveau_vm_pgd *vpgd; unsigned long flags; u32 engine; engine = 1; if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) engine |= 4; pinstmem->flush(vm->dev); spin_lock_irqsave(&dev_priv->vm_lock, flags); list_for_each_entry(vpgd, &vm->pgd_list, head) { /* looks like maybe a "free flush slots" counter, the * faster you write to 0x100cbc to more it decreases */ if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) { NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n", nv_rd32(dev, 0x100c80), engine); } nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8); nv_wr32(dev, 0x100cbc, 0x80000000 | engine); /* wait for flush to be queued? */ if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) { NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n", nv_rd32(dev, 0x100c80), engine); } } spin_unlock_irqrestore(&dev_priv->vm_lock, flags); }
gpl-2.0
dancefire/hd806-kernel-android
drivers/ata/pata_ninja32.c
654
5499
/* * pata_ninja32.c - Ninja32 PATA for new ATA layer * (C) 2007 Red Hat Inc * * Note: The controller like many controllers has shared timings for * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back * in the dma_stop function. Thus we actually don't need a set_dmamode * method as the PIO method is always called and will set the right PIO * timing parameters. * * The Ninja32 Cardbus is not a generic SFF controller. Instead it is * laid out as follows off BAR 0. This is based upon Mark Lord's delkin * driver and the extensive analysis done by the BSD developers, notably * ITOH Yasufumi. * * Base + 0x00 IRQ Status * Base + 0x01 IRQ control * Base + 0x02 Chipset control * Base + 0x03 Unknown * Base + 0x04 VDMA and reset control + wait bits * Base + 0x08 BMIMBA * Base + 0x0C DMA Length * Base + 0x10 Taskfile * Base + 0x18 BMDMA Status ? * Base + 0x1C * Base + 0x1D Bus master control * bit 0 = enable * bit 1 = 0 write/1 read * bit 2 = 1 sgtable * bit 3 = go * bit 4-6 wait bits * bit 7 = done * Base + 0x1E AltStatus * Base + 0x1F timing register */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_ninja32" #define DRV_VERSION "0.1.5" /** * ninja32_set_piomode - set initial PIO mode data * @ap: ATA interface * @adev: ATA device * * Called to do the PIO mode setup. Our timing registers are shared * but we want to set the PIO timing by default. */ static void ninja32_set_piomode(struct ata_port *ap, struct ata_device *adev) { static u16 pio_timing[5] = { 0xd6, 0x85, 0x44, 0x33, 0x13 }; iowrite8(pio_timing[adev->pio_mode - XFER_PIO_0], ap->ioaddr.bmdma_addr + 0x1f); ap->private_data = adev; } static void ninja32_dev_select(struct ata_port *ap, unsigned int device) { struct ata_device *adev = &ap->link.device[device]; if (ap->private_data != adev) { iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f); ata_sff_dev_select(ap, device); ninja32_set_piomode(ap, adev); } } static struct scsi_host_template ninja32_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations ninja32_port_ops = { .inherits = &ata_bmdma_port_ops, .sff_dev_select = ninja32_dev_select, .cable_detect = ata_cable_40wire, .set_piomode = ninja32_set_piomode, .sff_data_xfer = ata_sff_data_xfer32 }; static void ninja32_program(void __iomem *base) { iowrite8(0x05, base + 0x01); /* Enable interrupt lines */ iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */ iowrite8(0x01, base + 0x03); /* Unknown */ iowrite8(0x20, base + 0x04); /* WAIT0 */ iowrite8(0x8f, base + 0x05); /* Unknown */ iowrite8(0xa4, base + 0x1c); /* Unknown */ iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */ } static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) { struct ata_host *host; struct ata_port *ap; void __iomem *base; int rc; host = ata_host_alloc(&dev->dev, 1); if (!host) return -ENOMEM; ap = host->ports[0]; /* Set up the PCI device */ rc = pcim_enable_device(dev); if (rc) return rc; rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(dev); if (rc) return rc; host->iomap = pcim_iomap_table(dev); rc = pci_set_dma_mask(dev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK); if (rc) return rc; pci_set_master(dev); /* Set up the register mappings. We use the I/O mapping as only the older chips also have MMIO on BAR 1 */ base = host->iomap[0]; if (!base) return -ENOMEM; ap->ops = &ninja32_port_ops; ap->pio_mask = ATA_PIO4; ap->flags |= ATA_FLAG_SLAVE_POSS; ap->ioaddr.cmd_addr = base + 0x10; ap->ioaddr.ctl_addr = base + 0x1E; ap->ioaddr.altstatus_addr = base + 0x1E; ap->ioaddr.bmdma_addr = base; ata_sff_std_ports(&ap->ioaddr); ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; ninja32_program(base); /* FIXME: Should we disable them at remove ? */ return ata_host_activate(host, dev->irq, ata_sff_interrupt, IRQF_SHARED, &ninja32_sht); } #ifdef CONFIG_PM static int ninja32_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; ninja32_program(host->iomap[0]); ata_host_resume(host); return 0; } #endif static const struct pci_device_id ninja32[] = { { 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { }, }; static struct pci_driver ninja32_pci_driver = { .name = DRV_NAME, .id_table = ninja32, .probe = ninja32_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ninja32_reinit_one, #endif }; static int __init ninja32_init(void) { return pci_register_driver(&ninja32_pci_driver); } static void __exit ninja32_exit(void) { pci_unregister_driver(&ninja32_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Ninja32 ATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ninja32); MODULE_VERSION(DRV_VERSION); module_init(ninja32_init); module_exit(ninja32_exit);
gpl-2.0
messi2050/android_kernel_huawei_msm8610
arch/arm/mach-msm/qdsp6v2/audio_amrwbplus.c
910
6456
/* amr-wbplus audio output device * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/msm_audio_amrwbplus.h> #include "audio_utils_aio.h" #ifdef CONFIG_DEBUG_FS static const struct file_operations audio_amrwbplus_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, }; static void config_debug_fs(struct q6audio_aio *audio) { if (audio != NULL) { char name[sizeof("msm_amrwbplus_") + 5]; snprintf(name, sizeof(name), "msm_amrwbplus_%04x", audio->ac->session); audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, NULL, (void *)audio, &audio_amrwbplus_debug_fops); if (IS_ERR(audio->dentry)) pr_debug("debugfs_create_file failed\n"); } } #else static void config_debug_fs(struct q6audio_aio *audio) { } #endif static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct asm_amrwbplus_cfg q6_amrwbplus_cfg; struct msm_audio_amrwbplus_config_v2 *amrwbplus_drv_config; struct q6audio_aio *audio = file->private_data; int rc = 0; switch (cmd) { case AUDIO_START: { pr_err("%s[%p]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ rc = q6asm_enc_cfg_blk_pcm(audio->ac, audio->pcm_cfg.sample_rate, audio->pcm_cfg.channel_count); if (rc < 0) { pr_err("pcm output block config failed\n"); break; } } amrwbplus_drv_config = (struct msm_audio_amrwbplus_config_v2 *)audio->codec_cfg; q6_amrwbplus_cfg.size_bytes = amrwbplus_drv_config->size_bytes; q6_amrwbplus_cfg.version = amrwbplus_drv_config->version; q6_amrwbplus_cfg.num_channels = amrwbplus_drv_config->num_channels; q6_amrwbplus_cfg.amr_band_mode = amrwbplus_drv_config->amr_band_mode; q6_amrwbplus_cfg.amr_dtx_mode = amrwbplus_drv_config->amr_dtx_mode; q6_amrwbplus_cfg.amr_frame_fmt = amrwbplus_drv_config->amr_frame_fmt; q6_amrwbplus_cfg.amr_lsf_idx = amrwbplus_drv_config->amr_lsf_idx; rc = q6asm_media_format_block_amrwbplus(audio->ac, &q6_amrwbplus_cfg); if (rc < 0) { pr_err("q6asm_media_format_block_amrwb+ failed...\n"); break; } rc = audio_aio_enable(audio); audio->eos_rsp = 0; audio->eos_flag = 0; if (!rc) { audio->enabled = 1; } else { audio->enabled = 0; pr_err("Audio Start procedure failed rc=%d\n", rc); break; } pr_debug("%s:AUDIO_START sessionid[%d]enable[%d]\n", __func__, audio->ac->session, audio->enabled); if (audio->stopped == 1) audio->stopped = 0; break; } case AUDIO_GET_AMRWBPLUS_CONFIG_V2: { if ((audio) && (arg) && (audio->codec_cfg)) { if (copy_to_user((void *)arg, audio->codec_cfg, sizeof(struct msm_audio_amrwbplus_config_v2))) { rc = -EFAULT; pr_err("wb+ config get copy_to_user failed"); break; } } else { pr_err("wb+ config v2 invalid parameters.."); rc = -EFAULT; break; } break; } case AUDIO_SET_AMRWBPLUS_CONFIG_V2: { if ((audio) && (arg) && (audio->codec_cfg)) { if (copy_from_user(audio->codec_cfg, (void *)arg, sizeof(struct msm_audio_amrwbplus_config_v2))) { rc = -EFAULT; pr_err("wb+ config set copy_to_user_failed"); break; } } else { pr_err("wb+ config invalid parameters.."); rc = -EFAULT; break; } break; } default: pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); } return rc; } static int audio_open(struct inode *inode, struct file *file) { struct q6audio_aio *audio = NULL; int rc = 0; audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); if (audio == NULL) { pr_err("kzalloc failed for amrwb+ decode driver\n"); return -ENOMEM; } audio->codec_cfg = kzalloc(sizeof(struct msm_audio_amrwbplus_config_v2), GFP_KERNEL); if (audio->codec_cfg == NULL) { pr_err("%s:failed kzalloc for amrwb+ config structure", __func__); kfree(audio); return -ENOMEM; } audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, (void *)audio); if (!audio->ac) { pr_err("Could not allocate memory for audio client\n"); kfree(audio->codec_cfg); kfree(audio); return -ENOMEM; } /* open in T/NT mode */ if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, FORMAT_AMR_WB_PLUS); if (rc < 0) { pr_err("amrwbplus NT mode Open failed rc=%d\n", rc); rc = -ENODEV; goto fail; } audio->feedback = NON_TUNNEL_MODE; audio->buf_cfg.frames_per_buf = 0x01; audio->buf_cfg.meta_info_enable = 0x01; } else if ((file->f_mode & FMODE_WRITE) && !(file->f_mode & FMODE_READ)) { rc = q6asm_open_write(audio->ac, FORMAT_AMR_WB_PLUS); if (rc < 0) { pr_err("wb+ T mode Open failed rc=%d\n", rc); rc = -ENODEV; goto fail; } audio->feedback = TUNNEL_MODE; audio->buf_cfg.meta_info_enable = 0x00; } else { pr_err("audio_amrwbplus Not supported mode\n"); rc = -EACCES; goto fail; } rc = audio_aio_open(audio, file); if (rc < 0) { pr_err("audio_aio_open rc=%d\n", rc); goto fail; } config_debug_fs(audio); pr_debug("%s: AMRWBPLUS dec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); return 0; fail: q6asm_audio_client_free(audio->ac); kfree(audio->codec_cfg); kfree(audio); return rc; } static const struct file_operations audio_amrwbplus_fops = { .owner = THIS_MODULE, .open = audio_open, .release = audio_aio_release, .unlocked_ioctl = audio_ioctl, .fsync = audio_aio_fsync, }; struct miscdevice audio_amrwbplus_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_amrwbplus", .fops = &audio_amrwbplus_fops, }; static int __init audio_amrwbplus_init(void) { return misc_register(&audio_amrwbplus_misc); } device_initcall(audio_amrwbplus_init);
gpl-2.0
Lloir/pc-kernel
arch/arm/mach-omap1/board-h2.c
910
10310
/* * linux/arch/arm/mach-omap1/board-h2.c * * Board specific inits for OMAP-1610 H2 * * Copyright (C) 2001 RidgeRun, Inc. * Author: Greg Lonnon <glonnon@ridgerun.com> * * Copyright (C) 2002 MontaVista Software, Inc. * * Separated FPGA interrupts from innovator1510.c and cleaned up for 2.6 * Copyright (C) 2004 Nokia Corporation by Tony Lindrgen <tony@atomide.com> * * H2 specific changes and cleanup * Copyright (C) 2004 Nokia Corporation by Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/input.h> #include <linux/i2c/tps65010.h> #include <linux/smc91x.h> #include <linux/omapfb.h> #include <linux/platform_data/gpio-omap.h> #include <linux/leds.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/mux.h> #include <linux/omap-dma.h> #include <mach/tc.h> #include <linux/platform_data/keypad-omap.h> #include <mach/flash.h> #include <mach/hardware.h> #include <mach/usb.h> #include "common.h" #include "board-h2.h" /* At OMAP1610 Innovator the Ethernet is directly connected to CS1 */ #define OMAP1610_ETHR_START 0x04000300 static const unsigned int h2_keymap[] = { KEY(0, 0, KEY_LEFT), KEY(1, 0, KEY_RIGHT), KEY(2, 0, KEY_3), KEY(3, 0, KEY_F10), KEY(4, 0, KEY_F5), KEY(5, 0, KEY_9), KEY(0, 1, KEY_DOWN), KEY(1, 1, KEY_UP), KEY(2, 1, KEY_2), KEY(3, 1, KEY_F9), KEY(4, 1, KEY_F7), KEY(5, 1, KEY_0), KEY(0, 2, KEY_ENTER), KEY(1, 2, KEY_6), KEY(2, 2, KEY_1), KEY(3, 2, KEY_F2), KEY(4, 2, KEY_F6), KEY(5, 2, KEY_HOME), KEY(0, 3, KEY_8), KEY(1, 3, KEY_5), KEY(2, 3, KEY_F12), KEY(3, 3, KEY_F3), KEY(4, 3, KEY_F8), KEY(5, 3, KEY_END), KEY(0, 4, KEY_7), KEY(1, 4, KEY_4), KEY(2, 4, KEY_F11), KEY(3, 4, KEY_F1), KEY(4, 4, KEY_F4), KEY(5, 4, KEY_ESC), KEY(0, 5, KEY_F13), KEY(1, 5, KEY_F14), KEY(2, 5, KEY_F15), KEY(3, 5, KEY_F16), KEY(4, 5, KEY_SLEEP), }; static struct mtd_partition h2_nor_partitions[] = { /* bootloader (U-Boot, etc) in first sector */ { .name = "bootloader", .offset = 0, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* bootloader params in the next sector */ { .name = "params", .offset = MTDPART_OFS_APPEND, .size = SZ_128K, .mask_flags = 0, }, /* kernel */ { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_2M, .mask_flags = 0 }, /* file system */ { .name = "filesystem", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0 } }; static struct physmap_flash_data h2_nor_data = { .width = 2, .set_vpp = omap1_set_vpp, .parts = h2_nor_partitions, .nr_parts = ARRAY_SIZE(h2_nor_partitions), }; static struct resource h2_nor_resource = { /* This is on CS3, wherever it's mapped */ .flags = IORESOURCE_MEM, }; static struct platform_device h2_nor_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &h2_nor_data, }, .num_resources = 1, .resource = &h2_nor_resource, }; static struct mtd_partition h2_nand_partitions[] = { #if 0 /* REVISIT: enable these partitions if you make NAND BOOT * work on your H2 (rev C or newer); published versions of * x-load only support P2 and H3. */ { .name = "xloader", .offset = 0, .size = 64 * 1024, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "bootloader", .offset = MTDPART_OFS_APPEND, .size = 256 * 1024, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "params", .offset = MTDPART_OFS_APPEND, .size = 192 * 1024, }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = 2 * SZ_1M, }, #endif { .name = "filesystem", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, }, }; #define H2_NAND_RB_GPIO_PIN 62 static int h2_nand_dev_ready(struct mtd_info *mtd) { return gpio_get_value(H2_NAND_RB_GPIO_PIN); } static struct platform_nand_data h2_nand_platdata = { .chip = { .nr_chips = 1, .chip_offset = 0, .nr_partitions = ARRAY_SIZE(h2_nand_partitions), .partitions = h2_nand_partitions, .options = NAND_SAMSUNG_LP_OPTIONS, }, .ctrl = { .cmd_ctrl = omap1_nand_cmd_ctl, .dev_ready = h2_nand_dev_ready, }, }; static struct resource h2_nand_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device h2_nand_device = { .name = "gen_nand", .id = 0, .dev = { .platform_data = &h2_nand_platdata, }, .num_resources = 1, .resource = &h2_nand_resource, }; static struct smc91x_platdata h2_smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource h2_smc91x_resources[] = { [0] = { .start = OMAP1610_ETHR_START, /* Physical */ .end = OMAP1610_ETHR_START + 0xf, .flags = IORESOURCE_MEM, }, [1] = { .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct platform_device h2_smc91x_device = { .name = "smc91x", .id = 0, .dev = { .platform_data = &h2_smc91x_info, }, .num_resources = ARRAY_SIZE(h2_smc91x_resources), .resource = h2_smc91x_resources, }; static struct resource h2_kp_resources[] = { [0] = { .start = INT_KEYBOARD, .end = INT_KEYBOARD, .flags = IORESOURCE_IRQ, }, }; static const struct matrix_keymap_data h2_keymap_data = { .keymap = h2_keymap, .keymap_size = ARRAY_SIZE(h2_keymap), }; static struct omap_kp_platform_data h2_kp_data = { .rows = 8, .cols = 8, .keymap_data = &h2_keymap_data, .rep = true, .delay = 9, .dbounce = true, }; static struct platform_device h2_kp_device = { .name = "omap-keypad", .id = -1, .dev = { .platform_data = &h2_kp_data, }, .num_resources = ARRAY_SIZE(h2_kp_resources), .resource = h2_kp_resources, }; static struct gpio_led h2_gpio_led_pins[] = { { .name = "h2:red", .default_trigger = "heartbeat", .gpio = 3, }, { .name = "h2:green", .default_trigger = "cpu0", .gpio = OMAP_MPUIO(4), }, }; static struct gpio_led_platform_data h2_gpio_led_data = { .leds = h2_gpio_led_pins, .num_leds = ARRAY_SIZE(h2_gpio_led_pins), }; static struct platform_device h2_gpio_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &h2_gpio_led_data, }, }; static struct platform_device *h2_devices[] __initdata = { &h2_nor_device, &h2_nand_device, &h2_smc91x_device, &h2_kp_device, &h2_gpio_leds, }; static void __init h2_init_smc91x(void) { if (gpio_request(0, "SMC91x irq") < 0) { printk("Error requesting gpio 0 for smc91x irq\n"); return; } } static int tps_setup(struct i2c_client *client, void *context) { if (!IS_BUILTIN(CONFIG_TPS65010)) return -ENOSYS; tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V | TPS_LDO1_ENABLE | TPS_VLDO1_3_0V); return 0; } static struct tps65010_board tps_board = { .base = H2_TPS_GPIO_BASE, .outmask = 0x0f, .setup = tps_setup, }; static struct i2c_board_info __initdata h2_i2c_board_info[] = { { I2C_BOARD_INFO("tps65010", 0x48), .platform_data = &tps_board, }, { I2C_BOARD_INFO("isp1301_omap", 0x2d), }, }; static struct omap_usb_config h2_usb_config __initdata = { /* usb1 has a Mini-AB port and external isp1301 transceiver */ .otg = 2, #if IS_ENABLED(CONFIG_USB_OMAP) .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */ /* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */ #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) /* needs OTG cable, or NONSTANDARD (B-to-MiniB) */ .hmc_mode = 20, /* 1:dev|otg(off) 1:host 2:disabled */ #endif .pins[1] = 3, }; static struct omap_lcd_config h2_lcd_config __initdata = { .ctrl_name = "internal", }; static void __init h2_init(void) { h2_init_smc91x(); /* Here we assume the NOR boot config: NOR on CS3 (possibly swapped * to address 0 by a dip switch), NAND on CS2B. The NAND driver will * notice whether a NAND chip is enabled at probe time. * * FIXME revC boards (and H3) support NAND-boot, with a dip switch to * put NOR on CS2B and NAND (which on H2 may be 16bit) on CS3. Try * detecting that in code here, to avoid probing every possible flash * configuration... */ h2_nor_resource.end = h2_nor_resource.start = omap_cs3_phys(); h2_nor_resource.end += SZ_32M - 1; h2_nand_resource.end = h2_nand_resource.start = OMAP_CS2B_PHYS; h2_nand_resource.end += SZ_4K - 1; BUG_ON(gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0); gpio_direction_input(H2_NAND_RB_GPIO_PIN); omap_cfg_reg(L3_1610_FLASH_CS2B_OE); omap_cfg_reg(M8_1610_FLASH_CS2B_WE); /* MMC: card detect and WP */ /* omap_cfg_reg(U19_ARMIO1); */ /* CD */ omap_cfg_reg(BALLOUT_V8_ARMIO3); /* WP */ /* Mux pins for keypad */ omap_cfg_reg(F18_1610_KBC0); omap_cfg_reg(D20_1610_KBC1); omap_cfg_reg(D19_1610_KBC2); omap_cfg_reg(E18_1610_KBC3); omap_cfg_reg(C21_1610_KBC4); omap_cfg_reg(G18_1610_KBR0); omap_cfg_reg(F19_1610_KBR1); omap_cfg_reg(H14_1610_KBR2); omap_cfg_reg(E20_1610_KBR3); omap_cfg_reg(E19_1610_KBR4); omap_cfg_reg(N19_1610_KBR5); /* GPIO based LEDs */ omap_cfg_reg(P18_1610_GPIO3); omap_cfg_reg(MPUIO4); h2_smc91x_resources[1].start = gpio_to_irq(0); h2_smc91x_resources[1].end = gpio_to_irq(0); platform_add_devices(h2_devices, ARRAY_SIZE(h2_devices)); omap_serial_init(); h2_i2c_board_info[0].irq = gpio_to_irq(58); h2_i2c_board_info[1].irq = gpio_to_irq(2); omap_register_i2c_bus(1, 100, h2_i2c_board_info, ARRAY_SIZE(h2_i2c_board_info)); omap1_usb_init(&h2_usb_config); h2_mmc_init(); omapfb_set_lcd_config(&h2_lcd_config); } MACHINE_START(OMAP_H2, "TI-H2") /* Maintainer: Imre Deak <imre.deak@nokia.com> */ .atag_offset = 0x100, .map_io = omap16xx_map_io, .init_early = omap1_init_early, .init_irq = omap1_init_irq, .init_machine = h2_init, .init_late = omap1_init_late, .init_time = omap1_timer_init, .restart = omap1_restart, MACHINE_END
gpl-2.0
SharkBa1t/cse524
drivers/input/ff-memless.c
1166
14744
/* * Force feedback support for memoryless devices * * Copyright (c) 2006 Anssi Hannula <anssi.hannula@gmail.com> * Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define DEBUG */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/input.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/jiffies.h> #include <linux/fixp-arith.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Anssi Hannula <anssi.hannula@gmail.com>"); MODULE_DESCRIPTION("Force feedback support for memoryless devices"); /* Number of effects handled with memoryless devices */ #define FF_MEMLESS_EFFECTS 16 /* Envelope update interval in ms */ #define FF_ENVELOPE_INTERVAL 50 #define FF_EFFECT_STARTED 0 #define FF_EFFECT_PLAYING 1 #define FF_EFFECT_ABORTING 2 struct ml_effect_state { struct ff_effect *effect; unsigned long flags; /* effect state (STARTED, PLAYING, etc) */ int count; /* loop count of the effect */ unsigned long play_at; /* start time */ unsigned long stop_at; /* stop time */ unsigned long adj_at; /* last time the effect was sent */ }; struct ml_device { void *private; struct ml_effect_state states[FF_MEMLESS_EFFECTS]; int gain; struct timer_list timer; struct input_dev *dev; int (*play_effect)(struct input_dev *dev, void *data, struct ff_effect *effect); }; static const struct ff_envelope *get_envelope(const struct ff_effect *effect) { static const struct ff_envelope empty_envelope; switch (effect->type) { case FF_PERIODIC: return &effect->u.periodic.envelope; case FF_CONSTANT: return &effect->u.constant.envelope; default: return &empty_envelope; } } /* * Check for the next time envelope requires an update on memoryless devices */ static unsigned long calculate_next_time(struct ml_effect_state *state) { const struct ff_envelope *envelope = get_envelope(state->effect); unsigned long attack_stop, fade_start, next_fade; if (envelope->attack_length) { attack_stop = state->play_at + msecs_to_jiffies(envelope->attack_length); if (time_before(state->adj_at, attack_stop)) return state->adj_at + msecs_to_jiffies(FF_ENVELOPE_INTERVAL); } if (state->effect->replay.length) { if (envelope->fade_length) { /* check when fading should start */ fade_start = state->stop_at - msecs_to_jiffies(envelope->fade_length); if (time_before(state->adj_at, fade_start)) return fade_start; /* already fading, advance to next checkpoint */ next_fade = state->adj_at + msecs_to_jiffies(FF_ENVELOPE_INTERVAL); if (time_before(next_fade, state->stop_at)) return next_fade; } return state->stop_at; } return state->play_at; } static void ml_schedule_timer(struct ml_device *ml) { struct ml_effect_state *state; unsigned long now = jiffies; unsigned long earliest = 0; unsigned long next_at; int events = 0; int i; pr_debug("calculating next timer\n"); for (i = 0; i < FF_MEMLESS_EFFECTS; i++) { state = &ml->states[i]; if (!test_bit(FF_EFFECT_STARTED, &state->flags)) continue; if (test_bit(FF_EFFECT_PLAYING, &state->flags)) next_at = calculate_next_time(state); else next_at = state->play_at; if (time_before_eq(now, next_at) && (++events == 1 || time_before(next_at, earliest))) earliest = next_at; } if (!events) { pr_debug("no actions\n"); del_timer(&ml->timer); } else { pr_debug("timer set\n"); mod_timer(&ml->timer, earliest); } } /* * Apply an envelope to a value */ static int apply_envelope(struct ml_effect_state *state, int value, struct ff_envelope *envelope) { struct ff_effect *effect = state->effect; unsigned long now = jiffies; int time_from_level; int time_of_envelope; int envelope_level; int difference; if (envelope->attack_length && time_before(now, state->play_at + msecs_to_jiffies(envelope->attack_length))) { pr_debug("value = 0x%x, attack_level = 0x%x\n", value, envelope->attack_level); time_from_level = jiffies_to_msecs(now - state->play_at); time_of_envelope = envelope->attack_length; envelope_level = min_t(u16, envelope->attack_level, 0x7fff); } else if (envelope->fade_length && effect->replay.length && time_after(now, state->stop_at - msecs_to_jiffies(envelope->fade_length)) && time_before(now, state->stop_at)) { time_from_level = jiffies_to_msecs(state->stop_at - now); time_of_envelope = envelope->fade_length; envelope_level = min_t(u16, envelope->fade_level, 0x7fff); } else return value; difference = abs(value) - envelope_level; pr_debug("difference = %d\n", difference); pr_debug("time_from_level = 0x%x\n", time_from_level); pr_debug("time_of_envelope = 0x%x\n", time_of_envelope); difference = difference * time_from_level / time_of_envelope; pr_debug("difference = %d\n", difference); return value < 0 ? -(difference + envelope_level) : (difference + envelope_level); } /* * Return the type the effect has to be converted into (memless devices) */ static int get_compatible_type(struct ff_device *ff, int effect_type) { if (test_bit(effect_type, ff->ffbit)) return effect_type; if (effect_type == FF_PERIODIC && test_bit(FF_RUMBLE, ff->ffbit)) return FF_RUMBLE; pr_err("invalid type in get_compatible_type()\n"); return 0; } /* * Only left/right direction should be used (under/over 0x8000) for * forward/reverse motor direction (to keep calculation fast & simple). */ static u16 ml_calculate_direction(u16 direction, u16 force, u16 new_direction, u16 new_force) { if (!force) return new_direction; if (!new_force) return direction; return (((u32)(direction >> 1) * force + (new_direction >> 1) * new_force) / (force + new_force)) << 1; } #define FRAC_N 8 static inline s16 fixp_new16(s16 a) { return ((s32)a) >> (16 - FRAC_N); } static inline s16 fixp_mult(s16 a, s16 b) { a = ((s32)a * 0x100) / 0x7fff; return ((s32)(a * b)) >> FRAC_N; } /* * Combine two effects and apply gain. */ static void ml_combine_effects(struct ff_effect *effect, struct ml_effect_state *state, int gain) { struct ff_effect *new = state->effect; unsigned int strong, weak, i; int x, y; s16 level; switch (new->type) { case FF_CONSTANT: i = new->direction * 360 / 0xffff; level = fixp_new16(apply_envelope(state, new->u.constant.level, &new->u.constant.envelope)); x = fixp_mult(fixp_sin16(i), level) * gain / 0xffff; y = fixp_mult(-fixp_cos16(i), level) * gain / 0xffff; /* * here we abuse ff_ramp to hold x and y of constant force * If in future any driver wants something else than x and y * in s8, this should be changed to something more generic */ effect->u.ramp.start_level = clamp_val(effect->u.ramp.start_level + x, -0x80, 0x7f); effect->u.ramp.end_level = clamp_val(effect->u.ramp.end_level + y, -0x80, 0x7f); break; case FF_RUMBLE: strong = (u32)new->u.rumble.strong_magnitude * gain / 0xffff; weak = (u32)new->u.rumble.weak_magnitude * gain / 0xffff; if (effect->u.rumble.strong_magnitude + strong) effect->direction = ml_calculate_direction( effect->direction, effect->u.rumble.strong_magnitude, new->direction, strong); else if (effect->u.rumble.weak_magnitude + weak) effect->direction = ml_calculate_direction( effect->direction, effect->u.rumble.weak_magnitude, new->direction, weak); else effect->direction = 0; effect->u.rumble.strong_magnitude = min(strong + effect->u.rumble.strong_magnitude, 0xffffU); effect->u.rumble.weak_magnitude = min(weak + effect->u.rumble.weak_magnitude, 0xffffU); break; case FF_PERIODIC: i = apply_envelope(state, abs(new->u.periodic.magnitude), &new->u.periodic.envelope); /* here we also scale it 0x7fff => 0xffff */ i = i * gain / 0x7fff; if (effect->u.rumble.strong_magnitude + i) effect->direction = ml_calculate_direction( effect->direction, effect->u.rumble.strong_magnitude, new->direction, i); else effect->direction = 0; effect->u.rumble.strong_magnitude = min(i + effect->u.rumble.strong_magnitude, 0xffffU); effect->u.rumble.weak_magnitude = min(i + effect->u.rumble.weak_magnitude, 0xffffU); break; default: pr_err("invalid type in ml_combine_effects()\n"); break; } } /* * Because memoryless devices have only one effect per effect type active * at one time we have to combine multiple effects into one */ static int ml_get_combo_effect(struct ml_device *ml, unsigned long *effect_handled, struct ff_effect *combo_effect) { struct ff_effect *effect; struct ml_effect_state *state; int effect_type; int i; memset(combo_effect, 0, sizeof(struct ff_effect)); for (i = 0; i < FF_MEMLESS_EFFECTS; i++) { if (__test_and_set_bit(i, effect_handled)) continue; state = &ml->states[i]; effect = state->effect; if (!test_bit(FF_EFFECT_STARTED, &state->flags)) continue; if (time_before(jiffies, state->play_at)) continue; /* * here we have started effects that are either * currently playing (and may need be aborted) * or need to start playing. */ effect_type = get_compatible_type(ml->dev->ff, effect->type); if (combo_effect->type != effect_type) { if (combo_effect->type != 0) { __clear_bit(i, effect_handled); continue; } combo_effect->type = effect_type; } if (__test_and_clear_bit(FF_EFFECT_ABORTING, &state->flags)) { __clear_bit(FF_EFFECT_PLAYING, &state->flags); __clear_bit(FF_EFFECT_STARTED, &state->flags); } else if (effect->replay.length && time_after_eq(jiffies, state->stop_at)) { __clear_bit(FF_EFFECT_PLAYING, &state->flags); if (--state->count <= 0) { __clear_bit(FF_EFFECT_STARTED, &state->flags); } else { state->play_at = jiffies + msecs_to_jiffies(effect->replay.delay); state->stop_at = state->play_at + msecs_to_jiffies(effect->replay.length); } } else { __set_bit(FF_EFFECT_PLAYING, &state->flags); state->adj_at = jiffies; ml_combine_effects(combo_effect, state, ml->gain); } } return combo_effect->type != 0; } static void ml_play_effects(struct ml_device *ml) { struct ff_effect effect; DECLARE_BITMAP(handled_bm, FF_MEMLESS_EFFECTS); memset(handled_bm, 0, sizeof(handled_bm)); while (ml_get_combo_effect(ml, handled_bm, &effect)) ml->play_effect(ml->dev, ml->private, &effect); ml_schedule_timer(ml); } static void ml_effect_timer(unsigned long timer_data) { struct input_dev *dev = (struct input_dev *)timer_data; struct ml_device *ml = dev->ff->private; unsigned long flags; pr_debug("timer: updating effects\n"); spin_lock_irqsave(&dev->event_lock, flags); ml_play_effects(ml); spin_unlock_irqrestore(&dev->event_lock, flags); } /* * Sets requested gain for FF effects. Called with dev->event_lock held. */ static void ml_ff_set_gain(struct input_dev *dev, u16 gain) { struct ml_device *ml = dev->ff->private; int i; ml->gain = gain; for (i = 0; i < FF_MEMLESS_EFFECTS; i++) __clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags); ml_play_effects(ml); } /* * Start/stop specified FF effect. Called with dev->event_lock held. */ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value) { struct ml_device *ml = dev->ff->private; struct ml_effect_state *state = &ml->states[effect_id]; if (value > 0) { pr_debug("initiated play\n"); __set_bit(FF_EFFECT_STARTED, &state->flags); state->count = value; state->play_at = jiffies + msecs_to_jiffies(state->effect->replay.delay); state->stop_at = state->play_at + msecs_to_jiffies(state->effect->replay.length); state->adj_at = state->play_at; } else { pr_debug("initiated stop\n"); if (test_bit(FF_EFFECT_PLAYING, &state->flags)) __set_bit(FF_EFFECT_ABORTING, &state->flags); else __clear_bit(FF_EFFECT_STARTED, &state->flags); } ml_play_effects(ml); return 0; } static int ml_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old) { struct ml_device *ml = dev->ff->private; struct ml_effect_state *state = &ml->states[effect->id]; spin_lock_irq(&dev->event_lock); if (test_bit(FF_EFFECT_STARTED, &state->flags)) { __clear_bit(FF_EFFECT_PLAYING, &state->flags); state->play_at = jiffies + msecs_to_jiffies(state->effect->replay.delay); state->stop_at = state->play_at + msecs_to_jiffies(state->effect->replay.length); state->adj_at = state->play_at; ml_schedule_timer(ml); } spin_unlock_irq(&dev->event_lock); return 0; } static void ml_ff_destroy(struct ff_device *ff) { struct ml_device *ml = ff->private; kfree(ml->private); } /** * input_ff_create_memless() - create memoryless force-feedback device * @dev: input device supporting force-feedback * @data: driver-specific data to be passed into @play_effect * @play_effect: driver-specific method for playing FF effect */ int input_ff_create_memless(struct input_dev *dev, void *data, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)) { struct ml_device *ml; struct ff_device *ff; int error; int i; ml = kzalloc(sizeof(struct ml_device), GFP_KERNEL); if (!ml) return -ENOMEM; ml->dev = dev; ml->private = data; ml->play_effect = play_effect; ml->gain = 0xffff; setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev); set_bit(FF_GAIN, dev->ffbit); error = input_ff_create(dev, FF_MEMLESS_EFFECTS); if (error) { kfree(ml); return error; } ff = dev->ff; ff->private = ml; ff->upload = ml_ff_upload; ff->playback = ml_ff_playback; ff->set_gain = ml_ff_set_gain; ff->destroy = ml_ff_destroy; /* we can emulate periodic effects with RUMBLE */ if (test_bit(FF_RUMBLE, ff->ffbit)) { set_bit(FF_PERIODIC, dev->ffbit); set_bit(FF_SINE, dev->ffbit); set_bit(FF_TRIANGLE, dev->ffbit); set_bit(FF_SQUARE, dev->ffbit); } for (i = 0; i < FF_MEMLESS_EFFECTS; i++) ml->states[i].effect = &ff->effects[i]; return 0; } EXPORT_SYMBOL_GPL(input_ff_create_memless);
gpl-2.0
CyanogenMod/android_kernel_motorola_omap4-kexec-common
arch/sh/kernel/cpu/sh4a/setup-sh7724.c
2190
36791
/* * SH7724 Setup * * Copyright (C) 2009 Renesas Solutions Corp. * * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * Based on SH7723 Setup * Copyright (C) 2008 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/mm.h> #include <linux/serial_sci.h> #include <linux/uio_driver.h> #include <linux/sh_dma.h> #include <linux/sh_timer.h> #include <linux/io.h> #include <linux/notifier.h> #include <asm/suspend.h> #include <asm/clock.h> #include <asm/mmzone.h> #include <cpu/dma-register.h> #include <cpu/sh7724.h> /* DMA */ static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = { { .slave_id = SHDMA_SLAVE_SCIF0_TX, .addr = 0xffe0000c, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x21, }, { .slave_id = SHDMA_SLAVE_SCIF0_RX, .addr = 0xffe00014, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x22, }, { .slave_id = SHDMA_SLAVE_SCIF1_TX, .addr = 0xffe1000c, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x25, }, { .slave_id = SHDMA_SLAVE_SCIF1_RX, .addr = 0xffe10014, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x26, }, { .slave_id = SHDMA_SLAVE_SCIF2_TX, .addr = 0xffe2000c, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x29, }, { .slave_id = SHDMA_SLAVE_SCIF2_RX, .addr = 0xffe20014, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2a, }, { .slave_id = SHDMA_SLAVE_SCIF3_TX, .addr = 0xa4e30020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2d, }, { .slave_id = SHDMA_SLAVE_SCIF3_RX, .addr = 0xa4e30024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2e, }, { .slave_id = SHDMA_SLAVE_SCIF4_TX, .addr = 0xa4e40020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x31, }, { .slave_id = SHDMA_SLAVE_SCIF4_RX, .addr = 0xa4e40024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x32, }, { .slave_id = SHDMA_SLAVE_SCIF5_TX, .addr = 0xa4e50020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x35, }, { .slave_id = SHDMA_SLAVE_SCIF5_RX, .addr = 0xa4e50024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x36, }, { .slave_id = SHDMA_SLAVE_USB0D0_TX, .addr = 0xA4D80100, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0x73, }, { .slave_id = SHDMA_SLAVE_USB0D0_RX, .addr = 0xA4D80100, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0x73, }, { .slave_id = SHDMA_SLAVE_USB0D1_TX, .addr = 0xA4D80120, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0x77, }, { .slave_id = SHDMA_SLAVE_USB0D1_RX, .addr = 0xA4D80120, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0x77, }, { .slave_id = SHDMA_SLAVE_USB1D0_TX, .addr = 0xA4D90100, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0xab, }, { .slave_id = SHDMA_SLAVE_USB1D0_RX, .addr = 0xA4D90100, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0xab, }, { .slave_id = SHDMA_SLAVE_USB1D1_TX, .addr = 0xA4D90120, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0xaf, }, { .slave_id = SHDMA_SLAVE_USB1D1_RX, .addr = 0xA4D90120, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0xaf, }, { .slave_id = SHDMA_SLAVE_SDHI0_TX, .addr = 0x04ce0030, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xc1, }, { .slave_id = SHDMA_SLAVE_SDHI0_RX, .addr = 0x04ce0030, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xc2, }, { .slave_id = SHDMA_SLAVE_SDHI1_TX, .addr = 0x04cf0030, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xc9, }, { .slave_id = SHDMA_SLAVE_SDHI1_RX, .addr = 0x04cf0030, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xca, }, }; static const struct sh_dmae_channel sh7724_dmae_channels[] = { { .offset = 0, .dmars = 0, .dmars_bit = 0, }, { .offset = 0x10, .dmars = 0, .dmars_bit = 8, }, { .offset = 0x20, .dmars = 4, .dmars_bit = 0, }, { .offset = 0x30, .dmars = 4, .dmars_bit = 8, }, { .offset = 0x50, .dmars = 8, .dmars_bit = 0, }, { .offset = 0x60, .dmars = 8, .dmars_bit = 8, } }; static const unsigned int ts_shift[] = TS_SHIFT; static struct sh_dmae_pdata dma_platform_data = { .slave = sh7724_dmae_slaves, .slave_num = ARRAY_SIZE(sh7724_dmae_slaves), .channel = sh7724_dmae_channels, .channel_num = ARRAY_SIZE(sh7724_dmae_channels), .ts_low_shift = CHCR_TS_LOW_SHIFT, .ts_low_mask = CHCR_TS_LOW_MASK, .ts_high_shift = CHCR_TS_HIGH_SHIFT, .ts_high_mask = CHCR_TS_HIGH_MASK, .ts_shift = ts_shift, .ts_shift_num = ARRAY_SIZE(ts_shift), .dmaor_init = DMAOR_INIT, }; /* Resource order important! */ static struct resource sh7724_dmae0_resources[] = { { /* Channel registers and DMAOR */ .start = 0xfe008020, .end = 0xfe00808f, .flags = IORESOURCE_MEM, }, { /* DMARSx */ .start = 0xfe009000, .end = 0xfe00900b, .flags = IORESOURCE_MEM, }, { /* DMA error IRQ */ .start = 78, .end = 78, .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 0-3 */ .start = 48, .end = 51, .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 4-5 */ .start = 76, .end = 77, .flags = IORESOURCE_IRQ, }, }; /* Resource order important! */ static struct resource sh7724_dmae1_resources[] = { { /* Channel registers and DMAOR */ .start = 0xfdc08020, .end = 0xfdc0808f, .flags = IORESOURCE_MEM, }, { /* DMARSx */ .start = 0xfdc09000, .end = 0xfdc0900b, .flags = IORESOURCE_MEM, }, { /* DMA error IRQ */ .start = 74, .end = 74, .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 0-3 */ .start = 40, .end = 43, .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 4-5 */ .start = 72, .end = 73, .flags = IORESOURCE_IRQ, }, }; static struct platform_device dma0_device = { .name = "sh-dma-engine", .id = 0, .resource = sh7724_dmae0_resources, .num_resources = ARRAY_SIZE(sh7724_dmae0_resources), .dev = { .platform_data = &dma_platform_data, }, .archdata = { .hwblk_id = HWBLK_DMAC0, }, }; static struct platform_device dma1_device = { .name = "sh-dma-engine", .id = 1, .resource = sh7724_dmae1_resources, .num_resources = ARRAY_SIZE(sh7724_dmae1_resources), .dev = { .platform_data = &dma_platform_data, }, .archdata = { .hwblk_id = HWBLK_DMAC1, }, }; /* Serial */ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe10000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe20000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 82, 82, 82, 82 }, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct plat_sci_port scif3_platform_data = { .mapbase = 0xa4e30000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 56, 56, 56, 56 }, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; static struct plat_sci_port scif4_platform_data = { .mapbase = 0xa4e40000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 88, 88, 88, 88 }, }; static struct platform_device scif4_device = { .name = "sh-sci", .id = 4, .dev = { .platform_data = &scif4_platform_data, }, }; static struct plat_sci_port scif5_platform_data = { .mapbase = 0xa4e50000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 109, 109, 109, 109 }, }; static struct platform_device scif5_device = { .name = "sh-sci", .id = 5, .dev = { .platform_data = &scif5_platform_data, }, }; /* RTC */ static struct resource rtc_resources[] = { [0] = { .start = 0xa465fec0, .end = 0xa465fec0 + 0x58 - 1, .flags = IORESOURCE_IO, }, [1] = { /* Period IRQ */ .start = 69, .flags = IORESOURCE_IRQ, }, [2] = { /* Carry IRQ */ .start = 70, .flags = IORESOURCE_IRQ, }, [3] = { /* Alarm IRQ */ .start = 68, .flags = IORESOURCE_IRQ, }, }; static struct platform_device rtc_device = { .name = "sh-rtc", .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, .archdata = { .hwblk_id = HWBLK_RTC, }, }; /* I2C0 */ static struct resource iic0_resources[] = { [0] = { .name = "IIC0", .start = 0x04470000, .end = 0x04470018 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = 96, .end = 99, .flags = IORESOURCE_IRQ, }, }; static struct platform_device iic0_device = { .name = "i2c-sh_mobile", .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic0_resources), .resource = iic0_resources, .archdata = { .hwblk_id = HWBLK_IIC0, }, }; /* I2C1 */ static struct resource iic1_resources[] = { [0] = { .name = "IIC1", .start = 0x04750000, .end = 0x04750018 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = 92, .end = 95, .flags = IORESOURCE_IRQ, }, }; static struct platform_device iic1_device = { .name = "i2c-sh_mobile", .id = 1, /* "i2c1" clock */ .num_resources = ARRAY_SIZE(iic1_resources), .resource = iic1_resources, .archdata = { .hwblk_id = HWBLK_IIC1, }, }; /* VPU */ static struct uio_info vpu_platform_data = { .name = "VPU5F", .version = "0", .irq = 60, }; static struct resource vpu_resources[] = { [0] = { .name = "VPU", .start = 0xfe900000, .end = 0xfe902807, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device vpu_device = { .name = "uio_pdrv_genirq", .id = 0, .dev = { .platform_data = &vpu_platform_data, }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), .archdata = { .hwblk_id = HWBLK_VPU, }, }; /* VEU0 */ static struct uio_info veu0_platform_data = { .name = "VEU3F0", .version = "0", .irq = 83, }; static struct resource veu0_resources[] = { [0] = { .name = "VEU3F0", .start = 0xfe920000, .end = 0xfe9200cb, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device veu0_device = { .name = "uio_pdrv_genirq", .id = 1, .dev = { .platform_data = &veu0_platform_data, }, .resource = veu0_resources, .num_resources = ARRAY_SIZE(veu0_resources), .archdata = { .hwblk_id = HWBLK_VEU0, }, }; /* VEU1 */ static struct uio_info veu1_platform_data = { .name = "VEU3F1", .version = "0", .irq = 54, }; static struct resource veu1_resources[] = { [0] = { .name = "VEU3F1", .start = 0xfe924000, .end = 0xfe9240cb, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device veu1_device = { .name = "uio_pdrv_genirq", .id = 2, .dev = { .platform_data = &veu1_platform_data, }, .resource = veu1_resources, .num_resources = ARRAY_SIZE(veu1_resources), .archdata = { .hwblk_id = HWBLK_VEU1, }, }; /* BEU0 */ static struct uio_info beu0_platform_data = { .name = "BEU0", .version = "0", .irq = evt2irq(0x8A0), }; static struct resource beu0_resources[] = { [0] = { .name = "BEU0", .start = 0xfe930000, .end = 0xfe933400, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device beu0_device = { .name = "uio_pdrv_genirq", .id = 6, .dev = { .platform_data = &beu0_platform_data, }, .resource = beu0_resources, .num_resources = ARRAY_SIZE(beu0_resources), .archdata = { .hwblk_id = HWBLK_BEU0, }, }; /* BEU1 */ static struct uio_info beu1_platform_data = { .name = "BEU1", .version = "0", .irq = evt2irq(0xA00), }; static struct resource beu1_resources[] = { [0] = { .name = "BEU1", .start = 0xfe940000, .end = 0xfe943400, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device beu1_device = { .name = "uio_pdrv_genirq", .id = 7, .dev = { .platform_data = &beu1_platform_data, }, .resource = beu1_resources, .num_resources = ARRAY_SIZE(beu1_resources), .archdata = { .hwblk_id = HWBLK_BEU1, }, }; static struct sh_timer_config cmt_platform_data = { .channel_offset = 0x60, .timer_bit = 5, .clockevent_rating = 125, .clocksource_rating = 200, }; static struct resource cmt_resources[] = { [0] = { .start = 0x044a0060, .end = 0x044a006b, .flags = IORESOURCE_MEM, }, [1] = { .start = 104, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt_device = { .name = "sh_cmt", .id = 0, .dev = { .platform_data = &cmt_platform_data, }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), .archdata = { .hwblk_id = HWBLK_CMT, }, }; static struct sh_timer_config tmu0_platform_data = { .channel_offset = 0x04, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu0_resources[] = { [0] = { .start = 0xffd80008, .end = 0xffd80013, .flags = IORESOURCE_MEM, }, [1] = { .start = 16, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu0_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu0_platform_data, }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), .archdata = { .hwblk_id = HWBLK_TMU0, }, }; static struct sh_timer_config tmu1_platform_data = { .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu1_resources[] = { [0] = { .start = 0xffd80014, .end = 0xffd8001f, .flags = IORESOURCE_MEM, }, [1] = { .start = 17, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu1_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu1_platform_data, }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), .archdata = { .hwblk_id = HWBLK_TMU0, }, }; static struct sh_timer_config tmu2_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu2_resources[] = { [0] = { .start = 0xffd80020, .end = 0xffd8002b, .flags = IORESOURCE_MEM, }, [1] = { .start = 18, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu2_device = { .name = "sh_tmu", .id = 2, .dev = { .platform_data = &tmu2_platform_data, }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), .archdata = { .hwblk_id = HWBLK_TMU0, }, }; static struct sh_timer_config tmu3_platform_data = { .channel_offset = 0x04, .timer_bit = 0, }; static struct resource tmu3_resources[] = { [0] = { .start = 0xffd90008, .end = 0xffd90013, .flags = IORESOURCE_MEM, }, [1] = { .start = 57, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu3_device = { .name = "sh_tmu", .id = 3, .dev = { .platform_data = &tmu3_platform_data, }, .resource = tmu3_resources, .num_resources = ARRAY_SIZE(tmu3_resources), .archdata = { .hwblk_id = HWBLK_TMU1, }, }; static struct sh_timer_config tmu4_platform_data = { .channel_offset = 0x10, .timer_bit = 1, }; static struct resource tmu4_resources[] = { [0] = { .start = 0xffd90014, .end = 0xffd9001f, .flags = IORESOURCE_MEM, }, [1] = { .start = 58, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu4_device = { .name = "sh_tmu", .id = 4, .dev = { .platform_data = &tmu4_platform_data, }, .resource = tmu4_resources, .num_resources = ARRAY_SIZE(tmu4_resources), .archdata = { .hwblk_id = HWBLK_TMU1, }, }; static struct sh_timer_config tmu5_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu5_resources[] = { [0] = { .start = 0xffd90020, .end = 0xffd9002b, .flags = IORESOURCE_MEM, }, [1] = { .start = 57, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu5_device = { .name = "sh_tmu", .id = 5, .dev = { .platform_data = &tmu5_platform_data, }, .resource = tmu5_resources, .num_resources = ARRAY_SIZE(tmu5_resources), .archdata = { .hwblk_id = HWBLK_TMU1, }, }; /* JPU */ static struct uio_info jpu_platform_data = { .name = "JPU", .version = "0", .irq = 27, }; static struct resource jpu_resources[] = { [0] = { .name = "JPU", .start = 0xfe980000, .end = 0xfe9902d3, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device jpu_device = { .name = "uio_pdrv_genirq", .id = 3, .dev = { .platform_data = &jpu_platform_data, }, .resource = jpu_resources, .num_resources = ARRAY_SIZE(jpu_resources), .archdata = { .hwblk_id = HWBLK_JPU, }, }; /* SPU2DSP0 */ static struct uio_info spu0_platform_data = { .name = "SPU2DSP0", .version = "0", .irq = 86, }; static struct resource spu0_resources[] = { [0] = { .name = "SPU2DSP0", .start = 0xFE200000, .end = 0xFE2FFFFF, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device spu0_device = { .name = "uio_pdrv_genirq", .id = 4, .dev = { .platform_data = &spu0_platform_data, }, .resource = spu0_resources, .num_resources = ARRAY_SIZE(spu0_resources), .archdata = { .hwblk_id = HWBLK_SPU, }, }; /* SPU2DSP1 */ static struct uio_info spu1_platform_data = { .name = "SPU2DSP1", .version = "0", .irq = 87, }; static struct resource spu1_resources[] = { [0] = { .name = "SPU2DSP1", .start = 0xFE300000, .end = 0xFE3FFFFF, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device spu1_device = { .name = "uio_pdrv_genirq", .id = 5, .dev = { .platform_data = &spu1_platform_data, }, .resource = spu1_resources, .num_resources = ARRAY_SIZE(spu1_resources), .archdata = { .hwblk_id = HWBLK_SPU, }, }; static struct platform_device *sh7724_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &cmt_device, &tmu0_device, &tmu1_device, &tmu2_device, &tmu3_device, &tmu4_device, &tmu5_device, &dma0_device, &dma1_device, &rtc_device, &iic0_device, &iic1_device, &vpu_device, &veu0_device, &veu1_device, &beu0_device, &beu1_device, &jpu_device, &spu0_device, &spu1_device, }; static int __init sh7724_devices_setup(void) { platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20); platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); platform_resource_setup_memory(&spu0_device, "spu0", 2 << 20); platform_resource_setup_memory(&spu1_device, "spu1", 2 << 20); return platform_add_devices(sh7724_devices, ARRAY_SIZE(sh7724_devices)); } arch_initcall(sh7724_devices_setup); static struct platform_device *sh7724_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &cmt_device, &tmu0_device, &tmu1_device, &tmu2_device, &tmu3_device, &tmu4_device, &tmu5_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(sh7724_early_devices, ARRAY_SIZE(sh7724_early_devices)); } #define RAMCR_CACHE_L2FC 0x0002 #define RAMCR_CACHE_L2E 0x0001 #define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) void l2_cache_init(void) { /* Enable L2 cache */ __raw_writel(L2_CACHE_ENABLE, RAMCR); } enum { UNUSED = 0, ENABLED, DISABLED, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, HUDI, DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3, _2DG_TRI, _2DG_INI, _2DG_CEI, DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3, VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU, SCIFA3, VPU, TPU, CEU1, BEU1, USB0, USB1, ATAPI, RTC_ATI, RTC_PRI, RTC_CUI, DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR, KEYSC, SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2, VEU0, MSIOF_MSIOFI0, MSIOF_MSIOFI1, SPU_SPUI0, SPU_SPUI1, SCIFA4, ICB, ETHI, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI, CMT, TSIF, FSI, SCIFA5, TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, IRDA, JPU, _2DDMAC, MMC_MMC2I, MMC_MMC3I, LCDC, TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2, /* interrupt groups */ DMAC1A, _2DG, DMAC0A, VIO, USB, RTC, DMAC1B, DMAC0B, I2C0, I2C1, SDHI0, SDHI1, SPU, MMCIF, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), INTC_VECT(DMAC1A_DEI0, 0x700), INTC_VECT(DMAC1A_DEI1, 0x720), INTC_VECT(DMAC1A_DEI2, 0x740), INTC_VECT(DMAC1A_DEI3, 0x760), INTC_VECT(_2DG_TRI, 0x780), INTC_VECT(_2DG_INI, 0x7A0), INTC_VECT(_2DG_CEI, 0x7C0), INTC_VECT(DMAC0A_DEI0, 0x800), INTC_VECT(DMAC0A_DEI1, 0x820), INTC_VECT(DMAC0A_DEI2, 0x840), INTC_VECT(DMAC0A_DEI3, 0x860), INTC_VECT(VIO_CEU0, 0x880), INTC_VECT(VIO_BEU0, 0x8A0), INTC_VECT(VIO_VEU1, 0x8C0), INTC_VECT(VIO_VOU, 0x8E0), INTC_VECT(SCIFA3, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(TPU, 0x9A0), INTC_VECT(CEU1, 0x9E0), INTC_VECT(BEU1, 0xA00), INTC_VECT(USB0, 0xA20), INTC_VECT(USB1, 0xA40), INTC_VECT(ATAPI, 0xA60), INTC_VECT(RTC_ATI, 0xA80), INTC_VECT(RTC_PRI, 0xAA0), INTC_VECT(RTC_CUI, 0xAC0), INTC_VECT(DMAC1B_DEI4, 0xB00), INTC_VECT(DMAC1B_DEI5, 0xB20), INTC_VECT(DMAC1B_DADERR, 0xB40), INTC_VECT(DMAC0B_DEI4, 0xB80), INTC_VECT(DMAC0B_DEI5, 0xBA0), INTC_VECT(DMAC0B_DADERR, 0xBC0), INTC_VECT(KEYSC, 0xBE0), INTC_VECT(SCIF_SCIF0, 0xC00), INTC_VECT(SCIF_SCIF1, 0xC20), INTC_VECT(SCIF_SCIF2, 0xC40), INTC_VECT(VEU0, 0xC60), INTC_VECT(MSIOF_MSIOFI0, 0xC80), INTC_VECT(MSIOF_MSIOFI1, 0xCA0), INTC_VECT(SPU_SPUI0, 0xCC0), INTC_VECT(SPU_SPUI1, 0xCE0), INTC_VECT(SCIFA4, 0xD00), INTC_VECT(ICB, 0xD20), INTC_VECT(ETHI, 0xD60), INTC_VECT(I2C1_ALI, 0xD80), INTC_VECT(I2C1_TACKI, 0xDA0), INTC_VECT(I2C1_WAITI, 0xDC0), INTC_VECT(I2C1_DTEI, 0xDE0), INTC_VECT(I2C0_ALI, 0xE00), INTC_VECT(I2C0_TACKI, 0xE20), INTC_VECT(I2C0_WAITI, 0xE40), INTC_VECT(I2C0_DTEI, 0xE60), INTC_VECT(SDHI0, 0xE80), INTC_VECT(SDHI0, 0xEA0), INTC_VECT(SDHI0, 0xEC0), INTC_VECT(SDHI0, 0xEE0), INTC_VECT(CMT, 0xF00), INTC_VECT(TSIF, 0xF20), INTC_VECT(FSI, 0xF80), INTC_VECT(SCIFA5, 0xFA0), INTC_VECT(TMU0_TUNI0, 0x400), INTC_VECT(TMU0_TUNI1, 0x420), INTC_VECT(TMU0_TUNI2, 0x440), INTC_VECT(IRDA, 0x480), INTC_VECT(SDHI1, 0x4E0), INTC_VECT(SDHI1, 0x500), INTC_VECT(SDHI1, 0x520), INTC_VECT(JPU, 0x560), INTC_VECT(_2DDMAC, 0x4A0), INTC_VECT(MMC_MMC2I, 0x5A0), INTC_VECT(MMC_MMC3I, 0x5C0), INTC_VECT(LCDC, 0xF40), INTC_VECT(TMU1_TUNI0, 0x920), INTC_VECT(TMU1_TUNI1, 0x940), INTC_VECT(TMU1_TUNI2, 0x960), }; static struct intc_group groups[] __initdata = { INTC_GROUP(DMAC1A, DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3), INTC_GROUP(_2DG, _2DG_TRI, _2DG_INI, _2DG_CEI), INTC_GROUP(DMAC0A, DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3), INTC_GROUP(VIO, VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU), INTC_GROUP(USB, USB0, USB1), INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), INTC_GROUP(DMAC1B, DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR), INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR), INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI), INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI), INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1), INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I), }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, 0, ENABLED, ENABLED, ENABLED } }, { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0, DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } }, { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ { 0, 0, 0, VPU, ATAPI, ETHI, 0, SCIFA3 } }, { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ { DMAC1A_DEI3, DMAC1A_DEI2, DMAC1A_DEI1, DMAC1A_DEI0, SPU_SPUI1, SPU_SPUI0, BEU1, IRDA } }, { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ { 0, TMU0_TUNI2, TMU0_TUNI1, TMU0_TUNI0, JPU, 0, 0, LCDC } }, { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ { KEYSC, DMAC0B_DADERR, DMAC0B_DEI5, DMAC0B_DEI4, VEU0, SCIF_SCIF2, SCIF_SCIF1, SCIF_SCIF0 } }, { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ { 0, 0, ICB, SCIFA4, CEU1, 0, MSIOF_MSIOFI1, MSIOF_MSIOFI0 } }, { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } }, { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, SCIFA5, FSI } }, { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ { 0, 0, 0, CMT, 0, USB1, USB0, 0 } }, { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ { 0, DMAC1B_DADERR, DMAC1B_DEI5, DMAC1B_DEI4, 0, RTC_CUI, RTC_PRI, RTC_ATI } }, { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ { 0, _2DG_CEI, _2DG_INI, _2DG_TRI, 0, TPU, 0, TSIF } }, { 0xa40800b0, 0xa40800f0, 8, /* IMR12 / IMCR12 */ { 0, 0, MMC_MMC3I, MMC_MMC2I, 0, 0, 0, _2DDMAC } }, { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, IRDA } }, { 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, DMAC1A, BEU1 } }, { 0xa4080008, 0, 16, 4, /* IPRC */ { TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2, SPU } }, { 0xa408000c, 0, 16, 4, /* IPRD */ { 0, MMCIF, 0, ATAPI } }, { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0A, VIO, SCIFA3, VPU } }, { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC0B, USB, CMT } }, { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2, VEU0 } }, { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF_MSIOFI0, MSIOF_MSIOFI1, I2C1, I2C0 } }, { 0xa4080020, 0, 16, 4, /* IPRI */ { SCIFA4, ICB, TSIF, _2DG } }, { 0xa4080024, 0, 16, 4, /* IPRJ */ { CEU1, ETHI, FSI, SDHI1 } }, { 0xa4080028, 0, 16, 4, /* IPRK */ { RTC, DMAC1B, 0, SDHI0 } }, { 0xa408002c, 0, 16, 4, /* IPRL */ { SCIFA5, 0, TPU, _2DDMAC } }, { 0xa4140010, 0, 32, 4, /* INTPRI00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_sense_reg sense_registers[] __initdata = { { 0xa414001c, 16, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_mask_reg ack_registers[] __initdata = { { 0xa4140024, 0, 8, /* INTREQ00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_desc intc_desc __initdata = { .name = "sh7724", .force_enable = ENABLED, .force_disable = DISABLED, .hw = INTC_HW_DESC(vectors, groups, mask_registers, prio_registers, sense_registers, ack_registers), }; void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); } static struct { /* BSC */ unsigned long mmselr; unsigned long cs0bcr; unsigned long cs4bcr; unsigned long cs5abcr; unsigned long cs5bbcr; unsigned long cs6abcr; unsigned long cs6bbcr; unsigned long cs4wcr; unsigned long cs5awcr; unsigned long cs5bwcr; unsigned long cs6awcr; unsigned long cs6bwcr; /* INTC */ unsigned short ipra; unsigned short iprb; unsigned short iprc; unsigned short iprd; unsigned short ipre; unsigned short iprf; unsigned short iprg; unsigned short iprh; unsigned short ipri; unsigned short iprj; unsigned short iprk; unsigned short iprl; unsigned char imr0; unsigned char imr1; unsigned char imr2; unsigned char imr3; unsigned char imr4; unsigned char imr5; unsigned char imr6; unsigned char imr7; unsigned char imr8; unsigned char imr9; unsigned char imr10; unsigned char imr11; unsigned char imr12; /* RWDT */ unsigned short rwtcnt; unsigned short rwtcsr; /* CPG */ unsigned long irdaclk; unsigned long spuclk; } sh7724_rstandby_state; static int sh7724_pre_sleep_notifier_call(struct notifier_block *nb, unsigned long flags, void *unused) { if (!(flags & SUSP_SH_RSTANDBY)) return NOTIFY_DONE; /* BCR */ sh7724_rstandby_state.mmselr = __raw_readl(0xff800020); /* MMSELR */ sh7724_rstandby_state.mmselr |= 0xa5a50000; sh7724_rstandby_state.cs0bcr = __raw_readl(0xfec10004); /* CS0BCR */ sh7724_rstandby_state.cs4bcr = __raw_readl(0xfec10010); /* CS4BCR */ sh7724_rstandby_state.cs5abcr = __raw_readl(0xfec10014); /* CS5ABCR */ sh7724_rstandby_state.cs5bbcr = __raw_readl(0xfec10018); /* CS5BBCR */ sh7724_rstandby_state.cs6abcr = __raw_readl(0xfec1001c); /* CS6ABCR */ sh7724_rstandby_state.cs6bbcr = __raw_readl(0xfec10020); /* CS6BBCR */ sh7724_rstandby_state.cs4wcr = __raw_readl(0xfec10030); /* CS4WCR */ sh7724_rstandby_state.cs5awcr = __raw_readl(0xfec10034); /* CS5AWCR */ sh7724_rstandby_state.cs5bwcr = __raw_readl(0xfec10038); /* CS5BWCR */ sh7724_rstandby_state.cs6awcr = __raw_readl(0xfec1003c); /* CS6AWCR */ sh7724_rstandby_state.cs6bwcr = __raw_readl(0xfec10040); /* CS6BWCR */ /* INTC */ sh7724_rstandby_state.ipra = __raw_readw(0xa4080000); /* IPRA */ sh7724_rstandby_state.iprb = __raw_readw(0xa4080004); /* IPRB */ sh7724_rstandby_state.iprc = __raw_readw(0xa4080008); /* IPRC */ sh7724_rstandby_state.iprd = __raw_readw(0xa408000c); /* IPRD */ sh7724_rstandby_state.ipre = __raw_readw(0xa4080010); /* IPRE */ sh7724_rstandby_state.iprf = __raw_readw(0xa4080014); /* IPRF */ sh7724_rstandby_state.iprg = __raw_readw(0xa4080018); /* IPRG */ sh7724_rstandby_state.iprh = __raw_readw(0xa408001c); /* IPRH */ sh7724_rstandby_state.ipri = __raw_readw(0xa4080020); /* IPRI */ sh7724_rstandby_state.iprj = __raw_readw(0xa4080024); /* IPRJ */ sh7724_rstandby_state.iprk = __raw_readw(0xa4080028); /* IPRK */ sh7724_rstandby_state.iprl = __raw_readw(0xa408002c); /* IPRL */ sh7724_rstandby_state.imr0 = __raw_readb(0xa4080080); /* IMR0 */ sh7724_rstandby_state.imr1 = __raw_readb(0xa4080084); /* IMR1 */ sh7724_rstandby_state.imr2 = __raw_readb(0xa4080088); /* IMR2 */ sh7724_rstandby_state.imr3 = __raw_readb(0xa408008c); /* IMR3 */ sh7724_rstandby_state.imr4 = __raw_readb(0xa4080090); /* IMR4 */ sh7724_rstandby_state.imr5 = __raw_readb(0xa4080094); /* IMR5 */ sh7724_rstandby_state.imr6 = __raw_readb(0xa4080098); /* IMR6 */ sh7724_rstandby_state.imr7 = __raw_readb(0xa408009c); /* IMR7 */ sh7724_rstandby_state.imr8 = __raw_readb(0xa40800a0); /* IMR8 */ sh7724_rstandby_state.imr9 = __raw_readb(0xa40800a4); /* IMR9 */ sh7724_rstandby_state.imr10 = __raw_readb(0xa40800a8); /* IMR10 */ sh7724_rstandby_state.imr11 = __raw_readb(0xa40800ac); /* IMR11 */ sh7724_rstandby_state.imr12 = __raw_readb(0xa40800b0); /* IMR12 */ /* RWDT */ sh7724_rstandby_state.rwtcnt = __raw_readb(0xa4520000); /* RWTCNT */ sh7724_rstandby_state.rwtcnt |= 0x5a00; sh7724_rstandby_state.rwtcsr = __raw_readb(0xa4520004); /* RWTCSR */ sh7724_rstandby_state.rwtcsr |= 0xa500; __raw_writew(sh7724_rstandby_state.rwtcsr & 0x07, 0xa4520004); /* CPG */ sh7724_rstandby_state.irdaclk = __raw_readl(0xa4150018); /* IRDACLKCR */ sh7724_rstandby_state.spuclk = __raw_readl(0xa415003c); /* SPUCLKCR */ return NOTIFY_DONE; } static int sh7724_post_sleep_notifier_call(struct notifier_block *nb, unsigned long flags, void *unused) { if (!(flags & SUSP_SH_RSTANDBY)) return NOTIFY_DONE; /* BCR */ __raw_writel(sh7724_rstandby_state.mmselr, 0xff800020); /* MMSELR */ __raw_writel(sh7724_rstandby_state.cs0bcr, 0xfec10004); /* CS0BCR */ __raw_writel(sh7724_rstandby_state.cs4bcr, 0xfec10010); /* CS4BCR */ __raw_writel(sh7724_rstandby_state.cs5abcr, 0xfec10014); /* CS5ABCR */ __raw_writel(sh7724_rstandby_state.cs5bbcr, 0xfec10018); /* CS5BBCR */ __raw_writel(sh7724_rstandby_state.cs6abcr, 0xfec1001c); /* CS6ABCR */ __raw_writel(sh7724_rstandby_state.cs6bbcr, 0xfec10020); /* CS6BBCR */ __raw_writel(sh7724_rstandby_state.cs4wcr, 0xfec10030); /* CS4WCR */ __raw_writel(sh7724_rstandby_state.cs5awcr, 0xfec10034); /* CS5AWCR */ __raw_writel(sh7724_rstandby_state.cs5bwcr, 0xfec10038); /* CS5BWCR */ __raw_writel(sh7724_rstandby_state.cs6awcr, 0xfec1003c); /* CS6AWCR */ __raw_writel(sh7724_rstandby_state.cs6bwcr, 0xfec10040); /* CS6BWCR */ /* INTC */ __raw_writew(sh7724_rstandby_state.ipra, 0xa4080000); /* IPRA */ __raw_writew(sh7724_rstandby_state.iprb, 0xa4080004); /* IPRB */ __raw_writew(sh7724_rstandby_state.iprc, 0xa4080008); /* IPRC */ __raw_writew(sh7724_rstandby_state.iprd, 0xa408000c); /* IPRD */ __raw_writew(sh7724_rstandby_state.ipre, 0xa4080010); /* IPRE */ __raw_writew(sh7724_rstandby_state.iprf, 0xa4080014); /* IPRF */ __raw_writew(sh7724_rstandby_state.iprg, 0xa4080018); /* IPRG */ __raw_writew(sh7724_rstandby_state.iprh, 0xa408001c); /* IPRH */ __raw_writew(sh7724_rstandby_state.ipri, 0xa4080020); /* IPRI */ __raw_writew(sh7724_rstandby_state.iprj, 0xa4080024); /* IPRJ */ __raw_writew(sh7724_rstandby_state.iprk, 0xa4080028); /* IPRK */ __raw_writew(sh7724_rstandby_state.iprl, 0xa408002c); /* IPRL */ __raw_writeb(sh7724_rstandby_state.imr0, 0xa4080080); /* IMR0 */ __raw_writeb(sh7724_rstandby_state.imr1, 0xa4080084); /* IMR1 */ __raw_writeb(sh7724_rstandby_state.imr2, 0xa4080088); /* IMR2 */ __raw_writeb(sh7724_rstandby_state.imr3, 0xa408008c); /* IMR3 */ __raw_writeb(sh7724_rstandby_state.imr4, 0xa4080090); /* IMR4 */ __raw_writeb(sh7724_rstandby_state.imr5, 0xa4080094); /* IMR5 */ __raw_writeb(sh7724_rstandby_state.imr6, 0xa4080098); /* IMR6 */ __raw_writeb(sh7724_rstandby_state.imr7, 0xa408009c); /* IMR7 */ __raw_writeb(sh7724_rstandby_state.imr8, 0xa40800a0); /* IMR8 */ __raw_writeb(sh7724_rstandby_state.imr9, 0xa40800a4); /* IMR9 */ __raw_writeb(sh7724_rstandby_state.imr10, 0xa40800a8); /* IMR10 */ __raw_writeb(sh7724_rstandby_state.imr11, 0xa40800ac); /* IMR11 */ __raw_writeb(sh7724_rstandby_state.imr12, 0xa40800b0); /* IMR12 */ /* RWDT */ __raw_writew(sh7724_rstandby_state.rwtcnt, 0xa4520000); /* RWTCNT */ __raw_writew(sh7724_rstandby_state.rwtcsr, 0xa4520004); /* RWTCSR */ /* CPG */ __raw_writel(sh7724_rstandby_state.irdaclk, 0xa4150018); /* IRDACLKCR */ __raw_writel(sh7724_rstandby_state.spuclk, 0xa415003c); /* SPUCLKCR */ return NOTIFY_DONE; } static struct notifier_block sh7724_pre_sleep_notifier = { .notifier_call = sh7724_pre_sleep_notifier_call, .priority = SH_MOBILE_PRE(SH_MOBILE_SLEEP_CPU), }; static struct notifier_block sh7724_post_sleep_notifier = { .notifier_call = sh7724_post_sleep_notifier_call, .priority = SH_MOBILE_POST(SH_MOBILE_SLEEP_CPU), }; static int __init sh7724_sleep_setup(void) { atomic_notifier_chain_register(&sh_mobile_pre_sleep_notifier_list, &sh7724_pre_sleep_notifier); atomic_notifier_chain_register(&sh_mobile_post_sleep_notifier_list, &sh7724_post_sleep_notifier); return 0; } arch_initcall(sh7724_sleep_setup);
gpl-2.0
varunchitre15/android_kernel_sony_tianchi
drivers/video/msm/mdp4_overlay_dsi_video.c
2190
28216
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/hrtimer.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/fb.h> #include <linux/msm_mdp.h> #include <linux/ktime.h> #include <linux/wakelock.h> #include <linux/time.h> #include <asm/system.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include "mdp.h" #include "msm_fb.h" #include "mdp4.h" #include "mipi_dsi.h" #include <mach/iommu_domains.h> #define DSI_VIDEO_BASE 0xE0000 static int first_pixel_start_x; static int first_pixel_start_y; static int dsi_video_enabled; #define MAX_CONTROLLER 1 static struct vsycn_ctrl { struct device *dev; int inited; int update_ndx; int ov_koff; int ov_done; atomic_t suspend; atomic_t vsync_resume; int wait_vsync_cnt; int blt_change; int blt_free; u32 blt_ctrl; u32 blt_mode; int sysfs_created; struct mutex update_lock; struct completion ov_comp; struct completion dmap_comp; struct completion vsync_comp; spinlock_t spin_lock; struct msm_fb_data_type *mfd; struct mdp4_overlay_pipe *base_pipe; struct vsync_update vlist[2]; int vsync_irq_enabled; ktime_t vsync_time; } vsync_ctrl_db[MAX_CONTROLLER]; static void vsync_irq_enable(int intr, int term) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); outp32(MDP_INTR_CLEAR, intr); mdp_intr_mask |= intr; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(term); spin_unlock_irqrestore(&mdp_spin_lock, flag); pr_debug("%s: IRQ-en done, term=%x\n", __func__, term); } static void vsync_irq_disable(int intr, int term) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); outp32(MDP_INTR_CLEAR, intr); mdp_intr_mask &= ~intr; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_disable_irq_nosync(term); spin_unlock_irqrestore(&mdp_spin_lock, flag); pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term); } static void mdp4_overlay_dsi_video_start(void) { if (!dsi_video_enabled) { /* enable DSI block */ mdp4_iommu_attach(); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1); dsi_video_enabled = 1; } } /* * mdp4_dsi_video_pipe_queue: * called from thread context */ void mdp4_dsi_video_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe) { struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pp; int undx; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; if (atomic_read(&vctrl->suspend) > 0) return; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */ pr_debug("%s: vndx=%d pipe=%x ndx=%d num=%d pid=%d\n", __func__, undx, (int)pipe, pipe->pipe_ndx, pipe->pipe_num, current->pid); *pp = *pipe; /* clone it */ vp->update_cnt++; mutex_unlock(&vctrl->update_lock); mdp4_stat.overlay_play[pipe->mixer_num]++; } static void mdp4_dsi_video_blt_ov_update(struct mdp4_overlay_pipe *pipe); static void mdp4_dsi_video_wait4dmap(int cndx); static void mdp4_dsi_video_wait4ov(int cndx); int mdp4_dsi_video_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; mdp_update_pm(vctrl->mfd, vctrl->vsync_time); if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return cnt; } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); /* free previous committed iommu back to pool */ mdp4_overlay_iommu_unmap_freelist(mixer); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) { spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("%s: Error, frame dropped %d %d\n", __func__, vctrl->ov_koff, vctrl->ov_done); return 0; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); } /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } mdp4_mixer_stage_commit(mixer); /* start timing generator & mmu if they are not started yet */ mdp4_overlay_dsi_video_start(); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_dsi_video_blt_ov_update(pipe); pipe->ov_cnt++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mb(); vctrl->ov_koff++; /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); } else { /* schedule second phase update at dmap */ INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) { if (pipe->ov_blt_addr) mdp4_dsi_video_wait4ov(cndx); else mdp4_dsi_video_wait4dmap(cndx); } return cnt; } void mdp4_dsi_video_vsync_ctrl(struct fb_info *info, int enable) { struct vsycn_ctrl *vctrl; int cndx = 0; vctrl = &vsync_ctrl_db[cndx]; if (vctrl->vsync_irq_enabled == enable) return; pr_debug("%s: vsync enable=%d\n", __func__, enable); vctrl->vsync_irq_enabled = enable; if (enable) vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); else vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); if (vctrl->vsync_irq_enabled && atomic_read(&vctrl->suspend) == 0) atomic_set(&vctrl->vsync_resume, 1); } void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (atomic_read(&vctrl->suspend) > 0) { *vtime = -1; return; } /* start timing generator & mmu if they are not started yet */ mdp4_overlay_dsi_video_start(); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->wait_vsync_cnt == 0) INIT_COMPLETION(vctrl->vsync_comp); vctrl->wait_vsync_cnt++; spin_unlock_irqrestore(&vctrl->spin_lock, flags); wait_for_completion(&vctrl->vsync_comp); mdp4_stat.wait4vsync0++; *vtime = ktime_to_ns(vctrl->vsync_time); } static void mdp4_dsi_video_wait4dmap(int cndx) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; if (atomic_read(&vctrl->suspend) > 0) return; wait_for_completion(&vctrl->dmap_comp); } static void mdp4_dsi_video_wait4dmap_done(int cndx) { unsigned long flags; struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_dsi_video_wait4dmap(cndx); } static void mdp4_dsi_video_wait4ov(int cndx) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; if (atomic_read(&vctrl->suspend) > 0) return; wait_for_completion(&vctrl->ov_comp); } ssize_t mdp4_dsi_video_show_event(struct device *dev, struct device_attribute *attr, char *buf) { int cndx; struct vsycn_ctrl *vctrl; ssize_t ret = 0; unsigned long flags; u64 vsync_tick; cndx = 0; vctrl = &vsync_ctrl_db[0]; if (atomic_read(&vctrl->suspend) > 0 || atomic_read(&vctrl->vsync_resume) == 0) return 0; spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->wait_vsync_cnt == 0) INIT_COMPLETION(vctrl->vsync_comp); vctrl->wait_vsync_cnt++; spin_unlock_irqrestore(&vctrl->spin_lock, flags); ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp, msecs_to_jiffies(VSYNC_PERIOD * 4)); if (ret <= 0) { vctrl->wait_vsync_cnt = 0; vsync_tick = ktime_to_ns(ktime_get()); ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick); buf[strlen(buf) + 1] = '\0'; return ret; } spin_lock_irqsave(&vctrl->spin_lock, flags); vsync_tick = ktime_to_ns(vctrl->vsync_time); spin_unlock_irqrestore(&vctrl->spin_lock, flags); ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick); buf[strlen(buf) + 1] = '\0'; return ret; } void mdp4_dsi_vsync_init(int cndx) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } pr_debug("%s: ndx=%d\n", __func__, cndx); vctrl = &vsync_ctrl_db[cndx]; if (vctrl->inited) return; vctrl->inited = 1; vctrl->update_ndx = 0; mutex_init(&vctrl->update_lock); init_completion(&vctrl->vsync_comp); init_completion(&vctrl->dmap_comp); init_completion(&vctrl->ov_comp); atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 1); spin_lock_init(&vctrl->spin_lock); } void mdp4_dsi_video_base_swap(int cndx, struct mdp4_overlay_pipe *pipe) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; vctrl->base_pipe = pipe; } int mdp4_dsi_video_on(struct platform_device *pdev) { int dsi_width; int dsi_height; int dsi_bpp; int dsi_border_clr; int dsi_underflow_clr; int dsi_hsync_skew; int hsync_period; int hsync_ctrl; int vsync_period; int display_hctl; int display_v_start; int display_v_end; int active_hctl; int active_h_start; int active_h_end; int active_v_start; int active_v_end; int ctrl_polarity; int h_back_porch; int h_front_porch; int v_back_porch; int v_front_porch; int hsync_pulse_width; int vsync_pulse_width; int hsync_polarity; int vsync_polarity; int data_en_polarity; int hsync_start_x; int hsync_end_x; uint8 *buf; unsigned int buf_offset; int bpp, ptype; struct fb_info *fbi; struct fb_var_screeninfo *var; struct msm_fb_data_type *mfd; struct mdp4_overlay_pipe *pipe; int ret = 0; int cndx = 0; struct vsycn_ctrl *vctrl; struct msm_panel_info *pinfo; vctrl = &vsync_ctrl_db[cndx]; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; vctrl->mfd = mfd; vctrl->dev = mfd->fbi->dev; pinfo = &mfd->panel_info; vctrl->blt_ctrl = pinfo->lcd.blt_ctrl; vctrl->blt_mode = pinfo->lcd.blt_mode; /* mdp clock on */ mdp_clk_ctrl(1); fbi = mfd->fbi; var = &fbi->var; bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (vctrl->base_pipe == NULL) { ptype = mdp4_overlay_format2type(mfd->fb_imgType); if (ptype < 0) printk(KERN_INFO "%s: format2type failed\n", __func__); pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0); if (pipe == NULL) { printk(KERN_INFO "%s: pipe_alloc failed\n", __func__); return -EBUSY; } pipe->pipe_used++; pipe->mixer_stage = MDP4_MIXER_STAGE_BASE; pipe->mixer_num = MDP4_MIXER0; pipe->src_format = mfd->fb_imgType; mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_VIDEO); ret = mdp4_overlay_format2pipe(pipe); if (ret < 0) printk(KERN_INFO "%s: format2type failed\n", __func__); pipe->ov_blt_addr = 0; pipe->dma_blt_addr = 0; vctrl->base_pipe = pipe; /* keep it */ mdp4_init_writeback_buf(mfd, MDP4_MIXER0); } else { pipe = vctrl->base_pipe; } if (!(mfd->cont_splash_done)) { mfd->cont_splash_done = 1; mdp4_dsi_video_wait4dmap_done(0); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0); dsi_video_enabled = 0; mipi_dsi_controller_cfg(0); /* Clks are enabled in probe. Disabling clocks now */ mdp_clk_ctrl(0); } pipe->src_height = fbi->var.yres; pipe->src_width = fbi->var.xres; pipe->src_h = fbi->var.yres; pipe->src_w = fbi->var.xres; pipe->src_y = 0; pipe->src_x = 0; pipe->dst_h = fbi->var.yres; pipe->dst_w = fbi->var.xres; pipe->srcp0_ystride = fbi->fix.line_length; pipe->bpp = bpp; if (mfd->display_iova) pipe->srcp0_addr = mfd->display_iova + buf_offset; else pipe->srcp0_addr = (uint32)(buf + buf_offset); pipe->dst_h = fbi->var.yres; pipe->dst_w = fbi->var.xres; mdp4_overlay_mdp_pipe_req(pipe, mfd); mdp4_calc_blt_mdp_bw(mfd, pipe); atomic_set(&vctrl->suspend, 0); mdp4_overlay_dmap_xy(pipe); /* dma_p */ mdp4_overlay_dmap_cfg(mfd, 1); mdp4_overlay_rgb_setup(pipe); mdp4_overlayproc_cfg(pipe); mdp4_overlay_reg_flush(pipe, 1); mdp4_mixer_stage_up(pipe, 0); mdp4_mixer_stage_commit(pipe->mixer_num); /* * DSI timing setting */ h_back_porch = var->left_margin; h_front_porch = var->right_margin; v_back_porch = var->upper_margin; v_front_porch = var->lower_margin; hsync_pulse_width = var->hsync_len; vsync_pulse_width = var->vsync_len; dsi_border_clr = mfd->panel_info.lcdc.border_clr; dsi_underflow_clr = mfd->panel_info.lcdc.underflow_clr; dsi_hsync_skew = mfd->panel_info.lcdc.hsync_skew; dsi_width = mfd->panel_info.xres + mfd->panel_info.lcdc.xres_pad; dsi_height = mfd->panel_info.yres + mfd->panel_info.lcdc.yres_pad; dsi_bpp = mfd->panel_info.bpp; hsync_period = hsync_pulse_width + h_back_porch + dsi_width + h_front_porch; hsync_ctrl = (hsync_period << 16) | hsync_pulse_width; hsync_start_x = h_back_porch + hsync_pulse_width; hsync_end_x = hsync_period - h_front_porch - 1; display_hctl = (hsync_end_x << 16) | hsync_start_x; vsync_period = (vsync_pulse_width + v_back_porch + dsi_height + v_front_porch); display_v_start = ((vsync_pulse_width + v_back_porch) * hsync_period) + dsi_hsync_skew; display_v_end = ((vsync_period - v_front_porch) * hsync_period) + dsi_hsync_skew - 1; if (dsi_width != var->xres) { active_h_start = hsync_start_x + first_pixel_start_x; active_h_end = active_h_start + var->xres - 1; active_hctl = ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start; } else { active_hctl = 0; } if (dsi_height != var->yres) { active_v_start = display_v_start + first_pixel_start_y * hsync_period; active_v_end = active_v_start + (var->yres) * hsync_period - 1; active_v_start |= ACTIVE_START_Y_EN; } else { active_v_start = 0; active_v_end = 0; } dsi_underflow_clr |= 0x80000000; /* enable recovery */ hsync_polarity = 0; vsync_polarity = 0; data_en_polarity = 0; ctrl_polarity = (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x4, hsync_ctrl); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x8, vsync_period * hsync_period); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0xc, vsync_pulse_width * hsync_period); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x10, display_hctl); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x14, display_v_start); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x18, display_v_end); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x1c, active_hctl); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x20, active_v_start); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x24, active_v_end); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x28, dsi_border_clr); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x2c, dsi_underflow_clr); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x30, dsi_hsync_skew); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x38, ctrl_polarity); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mdp_histogram_ctrl_all(TRUE); return ret; } int mdp4_dsi_video_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; unsigned long flags; int undx, need_wait = 0; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); msleep(20); /* >= 17 ms */ complete_all(&vctrl->vsync_comp); if (pipe->ov_blt_addr) { spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) need_wait = 1; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_dsi_video_wait4ov(0); } mdp_histogram_ctrl_all(FALSE); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0); dsi_video_enabled = 0; if (vctrl->vsync_irq_enabled) { vctrl->vsync_irq_enabled = 0; vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } if (pipe) { /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* base pipe may change after borderfill_stage_down */ pipe = vctrl->base_pipe; mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(vctrl->base_pipe, 1); mdp4_overlay_iommu_pipe_free( vctrl->base_pipe->pipe_ndx, 1); } } /* mdp clock off */ mdp_clk_ctrl(0); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); return ret; } static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp) { /* * The adreno GPU hardware requires that the pitch be aligned to * 32 pixels for color buffers, so for the cases where the GPU * is writing directly to fb0, the framebuffer pitch * also needs to be 32 pixel aligned */ if (fb_index == 0) return ALIGN(xres, 32) * bpp; else return xres * bpp; } /* 3D side by side */ void mdp4_dsi_video_3d_sbys(struct msm_fb_data_type *mfd, struct msmfb_overlay_3d *r3d) { struct fb_info *fbi; unsigned int buf_offset; int bpp; uint8 *buf = NULL; int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (vctrl->base_pipe == NULL) return; pipe = vctrl->base_pipe; pipe->is_3d = r3d->is_3d; pipe->src_height_3d = r3d->height; pipe->src_width_3d = r3d->width; if (pipe->is_3d) mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_SIDE_BY_SIDE); else mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_NONE); fbi = mfd->fbi; bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (pipe->is_3d) { pipe->src_height = pipe->src_height_3d; pipe->src_width = pipe->src_width_3d; pipe->src_h = pipe->src_height_3d; pipe->src_w = pipe->src_width_3d; pipe->dst_h = pipe->src_height_3d; pipe->dst_w = pipe->src_width_3d; pipe->srcp0_ystride = msm_fb_line_length(0, pipe->src_width, bpp); } else { /* 2D */ pipe->src_height = fbi->var.yres; pipe->src_width = fbi->var.xres; pipe->src_h = fbi->var.yres; pipe->src_w = fbi->var.xres; pipe->dst_h = fbi->var.yres; pipe->dst_w = fbi->var.xres; pipe->srcp0_ystride = fbi->fix.line_length; } pipe->src_y = 0; pipe->src_x = 0; pipe->dst_y = 0; pipe->dst_x = 0; if (mfd->display_iova) pipe->srcp0_addr = mfd->display_iova + buf_offset; else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_overlay_rgb_setup(pipe); mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); mdp4_overlay_dmap_cfg(mfd, 1); mdp4_overlay_reg_flush(pipe, 1); mdp4_mixer_stage_up(pipe, 0); mdp4_mixer_stage_commit(pipe->mixer_num); mb(); } static void mdp4_dsi_video_blt_ov_update(struct mdp4_overlay_pipe *pipe) { uint32 off, addr; int bpp; char *overlay_base; if (pipe->ov_blt_addr == 0) return; #ifdef BLT_RGB565 bpp = 2; /* overlay ouput is RGB565 */ #else bpp = 3; /* overlay ouput is RGB888 */ #endif off = 0; if (pipe->ov_cnt & 0x01) off = pipe->src_height * pipe->src_width * bpp; addr = pipe->ov_blt_addr + off; /* overlay 0 */ overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */ outpdw(overlay_base + 0x000c, addr); outpdw(overlay_base + 0x001c, addr); } static void mdp4_dsi_video_blt_dmap_update(struct mdp4_overlay_pipe *pipe) { uint32 off, addr; int bpp; if (pipe->ov_blt_addr == 0) return; #ifdef BLT_RGB565 bpp = 2; /* overlay ouput is RGB565 */ #else bpp = 3; /* overlay ouput is RGB888 */ #endif off = 0; if (pipe->dmap_cnt & 0x01) off = pipe->src_height * pipe->src_width * bpp; addr = pipe->dma_blt_addr + off; /* dmap */ MDP_OUTP(MDP_BASE + 0x90008, addr); } /* * mdp4_primary_vsync_dsi_video: called from isr */ void mdp4_primary_vsync_dsi_video(void) { int cndx; struct vsycn_ctrl *vctrl; cndx = 0; vctrl = &vsync_ctrl_db[cndx]; pr_debug("%s: cpu=%d\n", __func__, smp_processor_id()); spin_lock(&vctrl->spin_lock); vctrl->vsync_time = ktime_get(); if (vctrl->wait_vsync_cnt) { complete_all(&vctrl->vsync_comp); vctrl->wait_vsync_cnt = 0; } spin_unlock(&vctrl->spin_lock); } /* * mdp4_dmap_done_dsi_video: called from isr */ void mdp4_dmap_done_dsi_video(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) return; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); if (vctrl->blt_change) { mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); vctrl->blt_change = 0; } complete_all(&vctrl->dmap_comp); mdp4_overlay_dma_commit(cndx); spin_unlock(&vctrl->spin_lock); } /* * mdp4_overlay0_done_dsi: called from isr */ void mdp4_overlay0_done_dsi_video(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) return; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); vctrl->ov_done++; complete_all(&vctrl->ov_comp); if (pipe->ov_blt_addr == 0) { spin_unlock(&vctrl->spin_lock); return; } mdp4_dsi_video_blt_dmap_update(pipe); pipe->dmap_cnt++; spin_unlock(&vctrl->spin_lock); } /* * make sure the MIPI_DSI_WRITEBACK_SIZE defined at boardfile * has enough space h * w * 3 * 2 */ static void mdp4_dsi_video_do_blt(struct msm_fb_data_type *mfd, int enable) { unsigned long flag; int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; long long vtime; u32 mode, ctrl; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; mode = (dbg_force_ov0_blt & 0x0f) ? (dbg_force_ov0_blt & 0x0f) : vctrl->blt_mode; ctrl = (dbg_force_ov0_blt >> 4) ? (dbg_force_ov0_blt >> 4) : vctrl->blt_ctrl; pr_debug("%s: mode=%d, enable=%d ov_blt_addr=%x\n", __func__, mode, enable, (int)pipe->ov_blt_addr); if ((mode == MDP4_OVERLAY_MODE_BLT_ALWAYS_OFF) && !pipe->ov_blt_addr) return; else if ((mode == MDP4_OVERLAY_MODE_BLT_ALWAYS_ON) && pipe->ov_blt_addr) return; else if (enable && pipe->ov_blt_addr) return; else if (!enable && !pipe->ov_blt_addr) return; if (pipe->ov_blt_addr == 0) { mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0); if (mfd->ov0_wb_buf->write_addr == 0) { pr_warning("%s: no blt_base assigned\n", __func__); return; } } pr_debug("%s: mode=%d, enable=%d ov_blt_addr=%x\n", __func__, mode, enable, (int)pipe->ov_blt_addr); spin_lock_irqsave(&vctrl->spin_lock, flag); if (pipe->ov_blt_addr == 0) { pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr; pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr; pipe->ov_cnt = 0; pipe->dmap_cnt = 0; vctrl->ov_koff = 0; vctrl->ov_done = 0; vctrl->blt_free = 0; mdp4_stat.blt_dsi_video++; } else { pipe->ov_blt_addr = 0; pipe->dma_blt_addr = 0; vctrl->blt_free = 4; /* 4 commits to free wb buf */ } spin_unlock_irqrestore(&vctrl->spin_lock, flag); if (ctrl == MDP4_OVERLAY_BLT_SWITCH_TG_ON) { spin_lock_irqsave(&vctrl->spin_lock, flag); if (!dsi_video_enabled) { pr_debug("%s: blt switched not in ISR dsi_video_enabled=%d\n", __func__, dsi_video_enabled); mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); } else { pr_debug("%s: blt switched in ISR dsi_video_enabled=%d\n", __func__, dsi_video_enabled); vctrl->blt_change++; } spin_unlock_irqrestore(&vctrl->spin_lock, flag); if (dsi_video_enabled) mdp4_dsi_video_wait4dmap_done(0); } else if (ctrl == MDP4_OVERLAY_BLT_SWITCH_TG_OFF) { pr_debug("%s: blt switched by turning TG off\n", __func__); if (dsi_video_enabled) { mdp4_dsi_video_wait4vsync(0, &vtime); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0); mdp4_dsi_video_wait4dmap_done(0); } mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); if (dsi_video_enabled) { /* * need wait for more than 1 ms to * make sure dsi lanes' fifo is empty and * lanes in stop state befroe reset * controller */ usleep(2000); mipi_dsi_sw_reset(); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1); } } else if (ctrl == MDP4_OVERLAY_BLT_SWITCH_POLL) { pr_debug("%s: blt switched by polling mdp status\n", __func__); if (dsi_video_enabled) while (inpdw(MDP_BASE + 0x0018) & 0x05) cpu_relax(); mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); } else pr_err("%s: ctrl=%d is not supported\n", __func__, ctrl); } void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd, struct msmfb_overlay_blt *req) { mdp4_dsi_video_do_blt(mfd, req->enable); } void mdp4_dsi_video_blt_start(struct msm_fb_data_type *mfd) { mdp4_dsi_video_do_blt(mfd, 1); } void mdp4_dsi_video_blt_stop(struct msm_fb_data_type *mfd) { mdp4_dsi_video_do_blt(mfd, 0); } void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cnt, cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (!pipe || !mfd->panel_power_on) { mutex_unlock(&mfd->dma->ov_mutex); return; } pr_debug("%s: cpu=%d pid=%d\n", __func__, smp_processor_id(), current->pid); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->display_iova) pipe->srcp0_addr = mfd->display_iova + buf_offset; else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_dsi_video_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); cnt = mdp4_dsi_video_pipe_commit(cndx, 0); if (cnt) { if (pipe->ov_blt_addr) mdp4_dsi_video_wait4ov(cndx); else mdp4_dsi_video_wait4dmap(cndx); } mdp4_overlay_mdp_perf_upd(mfd, 0); mutex_unlock(&mfd->dma->ov_mutex); }
gpl-2.0
geduino-foundation/kernel-unico
drivers/net/wireless/mwifiex/11n_aggr.c
2446
9434
/* * Marvell Wireless LAN device driver: 802.11n Aggregation * * Copyright (C) 2011, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" #include "11n.h" #include "11n_aggr.h" /* * Creates an AMSDU subframe for aggregation into one AMSDU packet. * * The resultant AMSDU subframe format is - * * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+ * | DA | SA | Length | SNAP header | MSDU | * | data[0..5] | data[6..11] | | | data[14..] | * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+ * <--6-bytes--> <--6-bytes--> <--2-bytes--><--8-bytes--> <--n-bytes--> * * This function also computes the amount of padding required to make the * buffer length multiple of 4 bytes. * * Data => |DA|SA|SNAP-TYPE|........ .| * MSDU => |DA|SA|Length|SNAP|...... ..| */ static int mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr, struct sk_buff *skb_src, int *pad) { int dt_offset; struct rfc_1042_hdr snap = { 0xaa, /* LLC DSAP */ 0xaa, /* LLC SSAP */ 0x03, /* LLC CTRL */ {0x00, 0x00, 0x00}, /* SNAP OUI */ 0x0000 /* SNAP type */ /* * This field will be overwritten * later with ethertype */ }; struct tx_packet_hdr *tx_header; skb_put(skb_aggr, sizeof(*tx_header)); tx_header = (struct tx_packet_hdr *) skb_aggr->data; /* Copy DA and SA */ dt_offset = 2 * ETH_ALEN; memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset); /* Copy SNAP header */ snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset); dt_offset += sizeof(u16); memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr)); skb_pull(skb_src, dt_offset); /* Update Length field */ tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN); /* Add payload */ skb_put(skb_aggr, skb_src->len); memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data, skb_src->len); *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len + LLC_SNAP_LEN)) & 3)) : 0; skb_put(skb_aggr, *pad); return skb_aggr->len + *pad; } /* * Adds TxPD to AMSDU header. * * Each AMSDU packet will contain one TxPD at the beginning, * followed by multiple AMSDU subframes. */ static void mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, struct sk_buff *skb) { struct txpd *local_tx_pd; skb_push(skb, sizeof(*local_tx_pd)); local_tx_pd = (struct txpd *) skb->data; memset(local_tx_pd, 0, sizeof(struct txpd)); /* Original priority has been overwritten */ local_tx_pd->priority = (u8) skb->priority; local_tx_pd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb); local_tx_pd->bss_num = priv->bss_num; local_tx_pd->bss_type = priv->bss_type; /* Always zero as the data is followed by struct txpd */ local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd)); local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU); local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len - sizeof(*local_tx_pd)); if (local_tx_pd->tx_control == 0) /* TxCtrl set by user or default */ local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && (priv->adapter->pps_uapsd_mode)) { if (true == mwifiex_check_last_packet_indication(priv)) { priv->adapter->tx_lock_flag = true; local_tx_pd->flags = MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET; } } } /* * Create aggregated packet. * * This function creates an aggregated MSDU packet, by combining buffers * from the RA list. Each individual buffer is encapsulated as an AMSDU * subframe and all such subframes are concatenated together to form the * AMSDU packet. * * A TxPD is also added to the front of the resultant AMSDU packets for * transmission. The resultant packets format is - * * +---- ~ ----+------ ~ ------+------ ~ ------+-..-+------ ~ ------+ * | TxPD |AMSDU sub-frame|AMSDU sub-frame| .. |AMSDU sub-frame| * | | 1 | 2 | .. | n | * +---- ~ ----+------ ~ ------+------ ~ ------+ .. +------ ~ ------+ */ int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, struct mwifiex_ra_list_tbl *pra_list, int headroom, int ptrindex, unsigned long ra_list_flags) __releases(&priv->wmm.ra_list_spinlock) { struct mwifiex_adapter *adapter = priv->adapter; struct sk_buff *skb_aggr, *skb_src; struct mwifiex_txinfo *tx_info_aggr, *tx_info_src; int pad = 0, ret; struct mwifiex_tx_param tx_param; struct txpd *ptx_pd = NULL; if (skb_queue_empty(&pra_list->skb_head)) { spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); return 0; } skb_src = skb_peek(&pra_list->skb_head); tx_info_src = MWIFIEX_SKB_TXCB(skb_src); skb_aggr = dev_alloc_skb(adapter->tx_buf_size); if (!skb_aggr) { dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); return -1; } skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); tx_info_aggr->bss_index = tx_info_src->bss_index; skb_aggr->priority = skb_src->priority; while (skb_src && ((skb_headroom(skb_aggr) + skb_src->len + LLC_SNAP_LEN) <= adapter->tx_buf_size)) { if (!skb_queue_empty(&pra_list->skb_head)) skb_src = skb_dequeue(&pra_list->skb_head); else skb_src = NULL; if (skb_src) pra_list->total_pkts_size -= skb_src->len; atomic_dec(&priv->wmm.tx_pkts_queued); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad); mwifiex_write_data_complete(adapter, skb_src, 0); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); return -1; } if (!skb_queue_empty(&pra_list->skb_head)) skb_src = skb_peek(&pra_list->skb_head); else skb_src = NULL; } spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); /* Last AMSDU packet does not need padding */ skb_trim(skb_aggr, skb_aggr->len - pad); /* Form AMSDU */ mwifiex_11n_form_amsdu_txpd(priv, skb_aggr); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) ptx_pd = (struct txpd *)skb_aggr->data; skb_push(skb_aggr, headroom); tx_param.next_pkt_len = ((pra_list->total_pkts_size) ? (((pra_list->total_pkts_size) > adapter->tx_buf_size) ? adapter-> tx_buf_size : pra_list->total_pkts_size + LLC_SNAP_LEN + sizeof(struct txpd)) : 0); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb_aggr->data, skb_aggr->len, &tx_param); switch (ret) { case -EBUSY: spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); mwifiex_write_data_complete(adapter, skb_aggr, -1); return -1; } if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) { priv->adapter->tx_lock_flag = false; if (ptx_pd) ptx_pd->flags = 0; } skb_queue_tail(&pra_list->skb_head, skb_aggr); pra_list->total_pkts_size += skb_aggr->len; atomic_inc(&priv->wmm.tx_pkts_queued); tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); break; case -1: adapter->data_sent = false; dev_err(adapter->dev, "%s: host_to_card failed: %#x\n", __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb_aggr, ret); return 0; case -EINPROGRESS: adapter->data_sent = false; break; case 0: mwifiex_write_data_complete(adapter, skb_aggr, ret); break; default: break; } if (ret != -EBUSY) { spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); if (mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { priv->wmm.packets_out[ptrindex]++; priv->wmm.tid_tbl_ptr[ptrindex].ra_list_curr = pra_list; } /* Now bss_prio_cur pointer points to next node */ adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur = list_first_entry( &adapter->bss_prio_tbl[priv->bss_priority] .bss_prio_cur->list, struct mwifiex_bss_prio_node, list); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); } return 0; }
gpl-2.0
Luquidtester/DirtyKernel-3x-ION
drivers/net/wireless/mwifiex/11n_aggr.c
2446
9434
/* * Marvell Wireless LAN device driver: 802.11n Aggregation * * Copyright (C) 2011, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" #include "11n.h" #include "11n_aggr.h" /* * Creates an AMSDU subframe for aggregation into one AMSDU packet. * * The resultant AMSDU subframe format is - * * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+ * | DA | SA | Length | SNAP header | MSDU | * | data[0..5] | data[6..11] | | | data[14..] | * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+ * <--6-bytes--> <--6-bytes--> <--2-bytes--><--8-bytes--> <--n-bytes--> * * This function also computes the amount of padding required to make the * buffer length multiple of 4 bytes. * * Data => |DA|SA|SNAP-TYPE|........ .| * MSDU => |DA|SA|Length|SNAP|...... ..| */ static int mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr, struct sk_buff *skb_src, int *pad) { int dt_offset; struct rfc_1042_hdr snap = { 0xaa, /* LLC DSAP */ 0xaa, /* LLC SSAP */ 0x03, /* LLC CTRL */ {0x00, 0x00, 0x00}, /* SNAP OUI */ 0x0000 /* SNAP type */ /* * This field will be overwritten * later with ethertype */ }; struct tx_packet_hdr *tx_header; skb_put(skb_aggr, sizeof(*tx_header)); tx_header = (struct tx_packet_hdr *) skb_aggr->data; /* Copy DA and SA */ dt_offset = 2 * ETH_ALEN; memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset); /* Copy SNAP header */ snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset); dt_offset += sizeof(u16); memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr)); skb_pull(skb_src, dt_offset); /* Update Length field */ tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN); /* Add payload */ skb_put(skb_aggr, skb_src->len); memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data, skb_src->len); *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len + LLC_SNAP_LEN)) & 3)) : 0; skb_put(skb_aggr, *pad); return skb_aggr->len + *pad; } /* * Adds TxPD to AMSDU header. * * Each AMSDU packet will contain one TxPD at the beginning, * followed by multiple AMSDU subframes. */ static void mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, struct sk_buff *skb) { struct txpd *local_tx_pd; skb_push(skb, sizeof(*local_tx_pd)); local_tx_pd = (struct txpd *) skb->data; memset(local_tx_pd, 0, sizeof(struct txpd)); /* Original priority has been overwritten */ local_tx_pd->priority = (u8) skb->priority; local_tx_pd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb); local_tx_pd->bss_num = priv->bss_num; local_tx_pd->bss_type = priv->bss_type; /* Always zero as the data is followed by struct txpd */ local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd)); local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU); local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len - sizeof(*local_tx_pd)); if (local_tx_pd->tx_control == 0) /* TxCtrl set by user or default */ local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && (priv->adapter->pps_uapsd_mode)) { if (true == mwifiex_check_last_packet_indication(priv)) { priv->adapter->tx_lock_flag = true; local_tx_pd->flags = MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET; } } } /* * Create aggregated packet. * * This function creates an aggregated MSDU packet, by combining buffers * from the RA list. Each individual buffer is encapsulated as an AMSDU * subframe and all such subframes are concatenated together to form the * AMSDU packet. * * A TxPD is also added to the front of the resultant AMSDU packets for * transmission. The resultant packets format is - * * +---- ~ ----+------ ~ ------+------ ~ ------+-..-+------ ~ ------+ * | TxPD |AMSDU sub-frame|AMSDU sub-frame| .. |AMSDU sub-frame| * | | 1 | 2 | .. | n | * +---- ~ ----+------ ~ ------+------ ~ ------+ .. +------ ~ ------+ */ int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, struct mwifiex_ra_list_tbl *pra_list, int headroom, int ptrindex, unsigned long ra_list_flags) __releases(&priv->wmm.ra_list_spinlock) { struct mwifiex_adapter *adapter = priv->adapter; struct sk_buff *skb_aggr, *skb_src; struct mwifiex_txinfo *tx_info_aggr, *tx_info_src; int pad = 0, ret; struct mwifiex_tx_param tx_param; struct txpd *ptx_pd = NULL; if (skb_queue_empty(&pra_list->skb_head)) { spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); return 0; } skb_src = skb_peek(&pra_list->skb_head); tx_info_src = MWIFIEX_SKB_TXCB(skb_src); skb_aggr = dev_alloc_skb(adapter->tx_buf_size); if (!skb_aggr) { dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); return -1; } skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); tx_info_aggr->bss_index = tx_info_src->bss_index; skb_aggr->priority = skb_src->priority; while (skb_src && ((skb_headroom(skb_aggr) + skb_src->len + LLC_SNAP_LEN) <= adapter->tx_buf_size)) { if (!skb_queue_empty(&pra_list->skb_head)) skb_src = skb_dequeue(&pra_list->skb_head); else skb_src = NULL; if (skb_src) pra_list->total_pkts_size -= skb_src->len; atomic_dec(&priv->wmm.tx_pkts_queued); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad); mwifiex_write_data_complete(adapter, skb_src, 0); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); return -1; } if (!skb_queue_empty(&pra_list->skb_head)) skb_src = skb_peek(&pra_list->skb_head); else skb_src = NULL; } spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); /* Last AMSDU packet does not need padding */ skb_trim(skb_aggr, skb_aggr->len - pad); /* Form AMSDU */ mwifiex_11n_form_amsdu_txpd(priv, skb_aggr); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) ptx_pd = (struct txpd *)skb_aggr->data; skb_push(skb_aggr, headroom); tx_param.next_pkt_len = ((pra_list->total_pkts_size) ? (((pra_list->total_pkts_size) > adapter->tx_buf_size) ? adapter-> tx_buf_size : pra_list->total_pkts_size + LLC_SNAP_LEN + sizeof(struct txpd)) : 0); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb_aggr->data, skb_aggr->len, &tx_param); switch (ret) { case -EBUSY: spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); mwifiex_write_data_complete(adapter, skb_aggr, -1); return -1; } if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) { priv->adapter->tx_lock_flag = false; if (ptx_pd) ptx_pd->flags = 0; } skb_queue_tail(&pra_list->skb_head, skb_aggr); pra_list->total_pkts_size += skb_aggr->len; atomic_inc(&priv->wmm.tx_pkts_queued); tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); break; case -1: adapter->data_sent = false; dev_err(adapter->dev, "%s: host_to_card failed: %#x\n", __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb_aggr, ret); return 0; case -EINPROGRESS: adapter->data_sent = false; break; case 0: mwifiex_write_data_complete(adapter, skb_aggr, ret); break; default: break; } if (ret != -EBUSY) { spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); if (mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { priv->wmm.packets_out[ptrindex]++; priv->wmm.tid_tbl_ptr[ptrindex].ra_list_curr = pra_list; } /* Now bss_prio_cur pointer points to next node */ adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur = list_first_entry( &adapter->bss_prio_tbl[priv->bss_priority] .bss_prio_cur->list, struct mwifiex_bss_prio_node, list); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); } return 0; }
gpl-2.0
ShevT/android_kernel_d1_p1
drivers/staging/iio/adc/max1363_ring.c
2702
5281
/* * Copyright (C) 2008 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * max1363_ring.c */ #include <linux/interrupt.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/i2c.h> #include <linux/bitops.h> #include "../iio.h" #include "../ring_generic.h" #include "../ring_sw.h" #include "../trigger.h" #include "../sysfs.h" #include "max1363.h" int max1363_single_channel_from_ring(long mask, struct max1363_state *st) { struct iio_ring_buffer *ring = iio_priv_to_dev(st)->ring; int count = 0, ret; u8 *ring_data; if (!(st->current_mode->modemask & mask)) { ret = -EBUSY; goto error_ret; } ring_data = kmalloc(ring->access->get_bytes_per_datum(ring), GFP_KERNEL); if (ring_data == NULL) { ret = -ENOMEM; goto error_ret; } ret = ring->access->read_last(ring, ring_data); if (ret) goto error_free_ring_data; /* Need a count of channels prior to this one */ mask >>= 1; while (mask) { if (mask & st->current_mode->modemask) count++; mask >>= 1; } if (st->chip_info->bits != 8) ret = ((int)(ring_data[count*2 + 0] & 0x0F) << 8) + (int)(ring_data[count*2 + 1]); else ret = ring_data[count]; error_free_ring_data: kfree(ring_data); error_ret: return ret; } /** * max1363_ring_preenable() - setup the parameters of the ring before enabling * * The complex nature of the setting of the nuber of bytes per datum is due * to this driver currently ensuring that the timestamp is stored at an 8 * byte boundary. **/ static int max1363_ring_preenable(struct iio_dev *indio_dev) { struct max1363_state *st = iio_priv(indio_dev); struct iio_ring_buffer *ring = indio_dev->ring; size_t d_size = 0; unsigned long numvals; /* * Need to figure out the current mode based upon the requested * scan mask in iio_dev */ st->current_mode = max1363_match_mode(ring->scan_mask, st->chip_info); if (!st->current_mode) return -EINVAL; max1363_set_scan_mode(st); numvals = hweight_long(st->current_mode->modemask); if (ring->access->set_bytes_per_datum) { if (ring->scan_timestamp) d_size += sizeof(s64); if (st->chip_info->bits != 8) d_size += numvals*2; else d_size += numvals; if (ring->scan_timestamp && (d_size % 8)) d_size += 8 - (d_size % 8); ring->access->set_bytes_per_datum(ring, d_size); } return 0; } static irqreturn_t max1363_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->private_data; struct max1363_state *st = iio_priv(indio_dev); s64 time_ns; __u8 *rxbuf; int b_sent; size_t d_size; unsigned long numvals = hweight_long(st->current_mode->modemask); /* Ensure the timestamp is 8 byte aligned */ if (st->chip_info->bits != 8) d_size = numvals*2 + sizeof(s64); else d_size = numvals + sizeof(s64); if (d_size % sizeof(s64)) d_size += sizeof(s64) - (d_size % sizeof(s64)); /* Monitor mode prevents reading. Whilst not currently implemented * might as well have this test in here in the meantime as it does * no harm. */ if (numvals == 0) return IRQ_HANDLED; rxbuf = kmalloc(d_size, GFP_KERNEL); if (rxbuf == NULL) return -ENOMEM; if (st->chip_info->bits != 8) b_sent = i2c_master_recv(st->client, rxbuf, numvals*2); else b_sent = i2c_master_recv(st->client, rxbuf, numvals); if (b_sent < 0) goto done; time_ns = iio_get_time_ns(); memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns)); indio_dev->ring->access->store_to(indio_dev->ring, rxbuf, time_ns); done: iio_trigger_notify_done(indio_dev->trig); kfree(rxbuf); return IRQ_HANDLED; } static const struct iio_ring_setup_ops max1363_ring_setup_ops = { .postenable = &iio_triggered_ring_postenable, .preenable = &max1363_ring_preenable, .predisable = &iio_triggered_ring_predisable, }; int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev) { struct max1363_state *st = iio_priv(indio_dev); int ret = 0; indio_dev->ring = iio_sw_rb_allocate(indio_dev); if (!indio_dev->ring) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &max1363_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", st->client->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_sw_rb; } /* Effectively select the ring buffer implementation */ indio_dev->ring->access = &ring_sw_access_funcs; /* Ring buffer functions - here trigger setup related */ indio_dev->ring->setup_ops = &max1363_ring_setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_RING_TRIGGERED; return 0; error_deallocate_sw_rb: iio_sw_rb_free(indio_dev->ring); error_ret: return ret; } void max1363_ring_cleanup(struct iio_dev *indio_dev) { /* ensure that the trigger has been detached */ if (indio_dev->trig) { iio_put_trigger(indio_dev->trig); iio_trigger_dettach_poll_func(indio_dev->trig, indio_dev->pollfunc); } iio_dealloc_pollfunc(indio_dev->pollfunc); iio_sw_rb_free(indio_dev->ring); }
gpl-2.0
webore/lenovo
drivers/media/dvb/frontends/ix2505v.c
2958
7896
/** * Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner * * Copyright (C) 2010 Malcolm Priestley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License Version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/module.h> #include <linux/dvb/frontend.h> #include <linux/slab.h> #include <linux/types.h> #include "ix2505v.h" static int ix2505v_debug; #define dprintk(level, args...) do { \ if (ix2505v_debug & level) \ printk(KERN_DEBUG "ix2505v: " args); \ } while (0) #define deb_info(args...) dprintk(0x01, args) #define deb_i2c(args...) dprintk(0x02, args) struct ix2505v_state { struct i2c_adapter *i2c; const struct ix2505v_config *config; u32 frequency; }; /** * Data read format of the Sharp IX2505V B0017 * * byte1: 1 | 1 | 0 | 0 | 0 | MA1 | MA0 | 1 * byte2: POR | FL | RD2 | RD1 | RD0 | X | X | X * * byte1 = address * byte2; * POR = Power on Reset (VCC H=<2.2v L=>2.2v) * FL = Phase Lock (H=lock L=unlock) * RD0-2 = Reserved internal operations * * Only POR can be used to check the tuner is present * * Caution: after byte2 the I2C reverts to write mode continuing to read * may corrupt tuning data. * */ static int ix2505v_read_status_reg(struct ix2505v_state *state) { u8 addr = state->config->tuner_address; u8 b2[] = {0}; int ret; struct i2c_msg msg[1] = { { .addr = addr, .flags = I2C_M_RD, .buf = b2, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 1); deb_i2c("Read %s ", __func__); return (ret == 1) ? (int) b2[0] : -1; } static int ix2505v_write(struct ix2505v_state *state, u8 buf[], u8 count) { struct i2c_msg msg[1] = { { .addr = state->config->tuner_address, .flags = 0, .buf = buf, .len = count }, }; int ret; ret = i2c_transfer(state->i2c, msg, 1); if (ret != 1) { deb_i2c("%s: i2c error, ret=%d\n", __func__, ret); return -EIO; } return 0; } static int ix2505v_release(struct dvb_frontend *fe) { struct ix2505v_state *state = fe->tuner_priv; fe->tuner_priv = NULL; kfree(state); return 0; } /** * Data write format of the Sharp IX2505V B0017 * * byte1: 1 | 1 | 0 | 0 | 0 | 0(MA1)| 0(MA0)| 0 * byte2: 0 | BG1 | BG2 | N8 | N7 | N6 | N5 | N4 * byte3: N3 | N2 | N1 | A5 | A4 | A3 | A2 | A1 * byte4: 1 | 1(C1) | 1(C0) | PD5 | PD4 | TM | 0(RTS)| 1(REF) * byte5: BA2 | BA1 | BA0 | PSC | PD3 |PD2/TS2|DIV/TS1|PD0/TS0 * * byte1 = address * * Write order * 1) byte1 -> byte2 -> byte3 -> byte4 -> byte5 * 2) byte1 -> byte4 -> byte5 -> byte2 -> byte3 * 3) byte1 -> byte2 -> byte3 -> byte4 * 4) byte1 -> byte4 -> byte5 -> byte2 * 5) byte1 -> byte2 -> byte3 * 6) byte1 -> byte4 -> byte5 * 7) byte1 -> byte2 * 8) byte1 -> byte4 * * Recommended Setup * 1 -> 8 -> 6 */ static int ix2505v_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { struct ix2505v_state *state = fe->tuner_priv; u32 frequency = params->frequency; u32 b_w = (params->u.qpsk.symbol_rate * 27) / 32000; u32 div_factor, N , A, x; int ret = 0, len; u8 gain, cc, ref, psc, local_osc, lpf; u8 data[4] = {0}; if ((frequency < fe->ops.info.frequency_min) || (frequency > fe->ops.info.frequency_max)) return -EINVAL; if (state->config->tuner_gain) gain = (state->config->tuner_gain < 4) ? state->config->tuner_gain : 0; else gain = 0x0; if (state->config->tuner_chargepump) cc = state->config->tuner_chargepump; else cc = 0x3; ref = 8; /* REF =1 */ psc = 32; /* PSC = 0 */ div_factor = (frequency * ref) / 40; /* local osc = 4Mhz */ x = div_factor / psc; N = x/100; A = ((x - (N * 100)) * psc) / 100; data[0] = ((gain & 0x3) << 5) | (N >> 3); data[1] = (N << 5) | (A & 0x1f); data[2] = 0x81 | ((cc & 0x3) << 5) ; /*PD5,PD4 & TM = 0|C1,C0|REF=1*/ deb_info("Frq=%d x=%d N=%d A=%d\n", frequency, x, N, A); if (frequency <= 1065000) local_osc = (6 << 5) | 2; else if (frequency <= 1170000) local_osc = (7 << 5) | 2; else if (frequency <= 1300000) local_osc = (1 << 5); else if (frequency <= 1445000) local_osc = (2 << 5); else if (frequency <= 1607000) local_osc = (3 << 5); else if (frequency <= 1778000) local_osc = (4 << 5); else if (frequency <= 1942000) local_osc = (5 << 5); else /*frequency up to 2150000*/ local_osc = (6 << 5); data[3] = local_osc; /* all other bits set 0 */ if (b_w <= 10000) lpf = 0xc; else if (b_w <= 12000) lpf = 0x2; else if (b_w <= 14000) lpf = 0xa; else if (b_w <= 16000) lpf = 0x6; else if (b_w <= 18000) lpf = 0xe; else if (b_w <= 20000) lpf = 0x1; else if (b_w <= 22000) lpf = 0x9; else if (b_w <= 24000) lpf = 0x5; else if (b_w <= 26000) lpf = 0xd; else if (b_w <= 28000) lpf = 0x3; else lpf = 0xb; deb_info("Osc=%x b_w=%x lpf=%x\n", local_osc, b_w, lpf); deb_info("Data 0=[%x%x%x%x]\n", data[0], data[1], data[2], data[3]); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); len = sizeof(data); ret |= ix2505v_write(state, data, len); data[2] |= 0x4; /* set TM = 1 other bits same */ if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); len = 1; ret |= ix2505v_write(state, &data[2], len); /* write byte 4 only */ msleep(10); data[2] |= ((lpf >> 2) & 0x3) << 3; /* lpf */ data[3] |= (lpf & 0x3) << 2; deb_info("Data 2=[%x%x]\n", data[2], data[3]); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); len = 2; ret |= ix2505v_write(state, &data[2], len); /* write byte 4 & 5 */ if (state->config->min_delay_ms) msleep(state->config->min_delay_ms); state->frequency = frequency; return ret; } static int ix2505v_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct ix2505v_state *state = fe->tuner_priv; *frequency = state->frequency; return 0; } static struct dvb_tuner_ops ix2505v_tuner_ops = { .info = { .name = "Sharp IX2505V (B0017)", .frequency_min = 950000, .frequency_max = 2175000 }, .release = ix2505v_release, .set_params = ix2505v_set_params, .get_frequency = ix2505v_get_frequency, }; struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe, const struct ix2505v_config *config, struct i2c_adapter *i2c) { struct ix2505v_state *state = NULL; int ret; if (NULL == config) { deb_i2c("%s: no config ", __func__); goto error; } state = kzalloc(sizeof(struct ix2505v_state), GFP_KERNEL); if (NULL == state) return NULL; state->config = config; state->i2c = i2c; if (state->config->tuner_write_only) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = ix2505v_read_status_reg(state); if (ret & 0x80) { deb_i2c("%s: No IX2505V found\n", __func__); goto error; } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } fe->tuner_priv = state; memcpy(&fe->ops.tuner_ops, &ix2505v_tuner_ops, sizeof(struct dvb_tuner_ops)); deb_i2c("%s: initialization (%s addr=0x%02x) ok\n", __func__, fe->ops.tuner_ops.info.name, config->tuner_address); return fe; error: kfree(state); return NULL; } EXPORT_SYMBOL(ix2505v_attach); module_param_named(debug, ix2505v_debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("DVB IX2505V tuner driver"); MODULE_AUTHOR("Malcolm Priestley"); MODULE_LICENSE("GPL");
gpl-2.0
Divaksh/Speedy-Kernel-u8500
lib/percpu_counter.c
2958
5166
/* * Fast batching percpu counters. */ #include <linux/percpu_counter.h> #include <linux/notifier.h> #include <linux/mutex.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/debugobjects.h> static LIST_HEAD(percpu_counters); static DEFINE_MUTEX(percpu_counters_lock); #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER static struct debug_obj_descr percpu_counter_debug_descr; static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state) { struct percpu_counter *fbc = addr; switch (state) { case ODEBUG_STATE_ACTIVE: percpu_counter_destroy(fbc); debug_object_free(fbc, &percpu_counter_debug_descr); return 1; default: return 0; } } static struct debug_obj_descr percpu_counter_debug_descr = { .name = "percpu_counter", .fixup_free = percpu_counter_fixup_free, }; static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) { debug_object_init(fbc, &percpu_counter_debug_descr); debug_object_activate(fbc, &percpu_counter_debug_descr); } static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) { debug_object_deactivate(fbc, &percpu_counter_debug_descr); debug_object_free(fbc, &percpu_counter_debug_descr); } #else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) { } static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) { } #endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) { int cpu; spin_lock(&fbc->lock); for_each_possible_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); *pcount = 0; } fbc->count = amount; spin_unlock(&fbc->lock); } EXPORT_SYMBOL(percpu_counter_set); void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; preempt_disable(); count = __this_cpu_read(*fbc->counters) + amount; if (count >= batch || count <= -batch) { spin_lock(&fbc->lock); fbc->count += count; __this_cpu_write(*fbc->counters, 0); spin_unlock(&fbc->lock); } else { __this_cpu_write(*fbc->counters, count); } preempt_enable(); } EXPORT_SYMBOL(__percpu_counter_add); /* * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() */ s64 __percpu_counter_sum(struct percpu_counter *fbc) { s64 ret; int cpu; spin_lock(&fbc->lock); ret = fbc->count; for_each_online_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; } spin_unlock(&fbc->lock); return ret; } EXPORT_SYMBOL(__percpu_counter_sum); int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, struct lock_class_key *key) { spin_lock_init(&fbc->lock); lockdep_set_class(&fbc->lock, key); fbc->count = amount; fbc->counters = alloc_percpu(s32); if (!fbc->counters) return -ENOMEM; debug_percpu_counter_activate(fbc); #ifdef CONFIG_HOTPLUG_CPU INIT_LIST_HEAD(&fbc->list); mutex_lock(&percpu_counters_lock); list_add(&fbc->list, &percpu_counters); mutex_unlock(&percpu_counters_lock); #endif return 0; } EXPORT_SYMBOL(__percpu_counter_init); void percpu_counter_destroy(struct percpu_counter *fbc) { if (!fbc->counters) return; debug_percpu_counter_deactivate(fbc); #ifdef CONFIG_HOTPLUG_CPU mutex_lock(&percpu_counters_lock); list_del(&fbc->list); mutex_unlock(&percpu_counters_lock); #endif free_percpu(fbc->counters); fbc->counters = NULL; } EXPORT_SYMBOL(percpu_counter_destroy); int percpu_counter_batch __read_mostly = 32; EXPORT_SYMBOL(percpu_counter_batch); static void compute_batch_value(void) { int nr = num_online_cpus(); percpu_counter_batch = max(32, nr*2); } static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { #ifdef CONFIG_HOTPLUG_CPU unsigned int cpu; struct percpu_counter *fbc; compute_batch_value(); if (action != CPU_DEAD) return NOTIFY_OK; cpu = (unsigned long)hcpu; mutex_lock(&percpu_counters_lock); list_for_each_entry(fbc, &percpu_counters, list) { s32 *pcount; unsigned long flags; spin_lock_irqsave(&fbc->lock, flags); pcount = per_cpu_ptr(fbc->counters, cpu); fbc->count += *pcount; *pcount = 0; spin_unlock_irqrestore(&fbc->lock, flags); } mutex_unlock(&percpu_counters_lock); #endif return NOTIFY_OK; } /* * Compare counter against given value. * Return 1 if greater, 0 if equal and -1 if less */ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) { s64 count; count = percpu_counter_read(fbc); /* Check to see if rough count will be sufficient for comparison */ if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { if (count > rhs) return 1; else return -1; } /* Need to use precise count */ count = percpu_counter_sum(fbc); if (count > rhs) return 1; else if (count < rhs) return -1; else return 0; } EXPORT_SYMBOL(percpu_counter_compare); static int __init percpu_counter_startup(void) { compute_batch_value(); hotcpu_notifier(percpu_counter_hotcpu_callback, 0); return 0; } module_init(percpu_counter_startup);
gpl-2.0
TeamWin/android_kernel_htc_msm8660
lib/percpu_counter.c
2958
5166
/* * Fast batching percpu counters. */ #include <linux/percpu_counter.h> #include <linux/notifier.h> #include <linux/mutex.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/debugobjects.h> static LIST_HEAD(percpu_counters); static DEFINE_MUTEX(percpu_counters_lock); #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER static struct debug_obj_descr percpu_counter_debug_descr; static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state) { struct percpu_counter *fbc = addr; switch (state) { case ODEBUG_STATE_ACTIVE: percpu_counter_destroy(fbc); debug_object_free(fbc, &percpu_counter_debug_descr); return 1; default: return 0; } } static struct debug_obj_descr percpu_counter_debug_descr = { .name = "percpu_counter", .fixup_free = percpu_counter_fixup_free, }; static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) { debug_object_init(fbc, &percpu_counter_debug_descr); debug_object_activate(fbc, &percpu_counter_debug_descr); } static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) { debug_object_deactivate(fbc, &percpu_counter_debug_descr); debug_object_free(fbc, &percpu_counter_debug_descr); } #else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) { } static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) { } #endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) { int cpu; spin_lock(&fbc->lock); for_each_possible_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); *pcount = 0; } fbc->count = amount; spin_unlock(&fbc->lock); } EXPORT_SYMBOL(percpu_counter_set); void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; preempt_disable(); count = __this_cpu_read(*fbc->counters) + amount; if (count >= batch || count <= -batch) { spin_lock(&fbc->lock); fbc->count += count; __this_cpu_write(*fbc->counters, 0); spin_unlock(&fbc->lock); } else { __this_cpu_write(*fbc->counters, count); } preempt_enable(); } EXPORT_SYMBOL(__percpu_counter_add); /* * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() */ s64 __percpu_counter_sum(struct percpu_counter *fbc) { s64 ret; int cpu; spin_lock(&fbc->lock); ret = fbc->count; for_each_online_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; } spin_unlock(&fbc->lock); return ret; } EXPORT_SYMBOL(__percpu_counter_sum); int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, struct lock_class_key *key) { spin_lock_init(&fbc->lock); lockdep_set_class(&fbc->lock, key); fbc->count = amount; fbc->counters = alloc_percpu(s32); if (!fbc->counters) return -ENOMEM; debug_percpu_counter_activate(fbc); #ifdef CONFIG_HOTPLUG_CPU INIT_LIST_HEAD(&fbc->list); mutex_lock(&percpu_counters_lock); list_add(&fbc->list, &percpu_counters); mutex_unlock(&percpu_counters_lock); #endif return 0; } EXPORT_SYMBOL(__percpu_counter_init); void percpu_counter_destroy(struct percpu_counter *fbc) { if (!fbc->counters) return; debug_percpu_counter_deactivate(fbc); #ifdef CONFIG_HOTPLUG_CPU mutex_lock(&percpu_counters_lock); list_del(&fbc->list); mutex_unlock(&percpu_counters_lock); #endif free_percpu(fbc->counters); fbc->counters = NULL; } EXPORT_SYMBOL(percpu_counter_destroy); int percpu_counter_batch __read_mostly = 32; EXPORT_SYMBOL(percpu_counter_batch); static void compute_batch_value(void) { int nr = num_online_cpus(); percpu_counter_batch = max(32, nr*2); } static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { #ifdef CONFIG_HOTPLUG_CPU unsigned int cpu; struct percpu_counter *fbc; compute_batch_value(); if (action != CPU_DEAD) return NOTIFY_OK; cpu = (unsigned long)hcpu; mutex_lock(&percpu_counters_lock); list_for_each_entry(fbc, &percpu_counters, list) { s32 *pcount; unsigned long flags; spin_lock_irqsave(&fbc->lock, flags); pcount = per_cpu_ptr(fbc->counters, cpu); fbc->count += *pcount; *pcount = 0; spin_unlock_irqrestore(&fbc->lock, flags); } mutex_unlock(&percpu_counters_lock); #endif return NOTIFY_OK; } /* * Compare counter against given value. * Return 1 if greater, 0 if equal and -1 if less */ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) { s64 count; count = percpu_counter_read(fbc); /* Check to see if rough count will be sufficient for comparison */ if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { if (count > rhs) return 1; else return -1; } /* Need to use precise count */ count = percpu_counter_sum(fbc); if (count > rhs) return 1; else if (count < rhs) return -1; else return 0; } EXPORT_SYMBOL(percpu_counter_compare); static int __init percpu_counter_startup(void) { compute_batch_value(); hotcpu_notifier(percpu_counter_hotcpu_callback, 0); return 0; } module_init(percpu_counter_startup);
gpl-2.0
OwnROM-Devices/OwnKernel-sprout
arch/x86/kernel/vm86_32.c
3214
22256
/* * Copyright (C) 1994 Linus Torvalds * * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 * stack - Manfred Spraul <manfred@colorfullife.com> * * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle * them correctly. Now the emulation will be in a * consistent state after stackfaults - Kasper Dupont * <kasperd@daimi.au.dk> * * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont * <kasperd@daimi.au.dk> * * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault * caused by Kasper Dupont's changes - Stas Sergeev * * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. * Kasper Dupont <kasperd@daimi.au.dk> * * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. * Kasper Dupont <kasperd@daimi.au.dk> * * 9 apr 2002 - Changed stack access macros to jump to a label * instead of returning to userspace. This simplifies * do_int, and is needed by handle_vm6_fault. Kasper * Dupont <kasperd@daimi.au.dk> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/capability.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/syscalls.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/highmem.h> #include <linux/ptrace.h> #include <linux/audit.h> #include <linux/stddef.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/tlbflush.h> #include <asm/irq.h> /* * Known problems: * * Interrupt handling is not guaranteed: * - a real x86 will disable all interrupts for one instruction * after a "mov ss,xx" to make stack handling atomic even without * the 'lss' instruction. We can't guarantee this in v86 mode, * as the next instruction might result in a page fault or similar. * - a real x86 will have interrupts disabled for one instruction * past the 'sti' that enables them. We don't bother with all the * details yet. * * Let's hope these problems do not actually matter for anything. */ #define KVM86 ((struct kernel_vm86_struct *)regs) #define VMPI KVM86->vm86plus /* * 8- and 16-bit register defines.. */ #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0]) #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1]) #define IP(regs) (*(unsigned short *)&((regs)->pt.ip)) #define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) /* * virtual flags (16 and 32-bit versions) */ #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) #define VEFLAGS (current->thread.v86flags) #define set_flags(X, new, mask) \ ((X) = ((X) & ~(mask)) | ((new) & (mask))) #define SAFE_MASK (0xDD5) #define RETURN_MASK (0xDFF) /* convert kernel_vm86_regs to vm86_regs */ static int copy_vm86_regs_to_user(struct vm86_regs __user *user, const struct kernel_vm86_regs *regs) { int ret = 0; /* * kernel_vm86_regs is missing gs, so copy everything up to * (but not including) orig_eax, and then rest including orig_eax. */ ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax, sizeof(struct kernel_vm86_regs) - offsetof(struct kernel_vm86_regs, pt.orig_ax)); return ret; } /* convert vm86_regs to kernel_vm86_regs */ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, const struct vm86_regs __user *user, unsigned extra) { int ret = 0; /* copy ax-fs inclusive */ ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax)); /* copy orig_ax-__gsh+extra */ ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax, sizeof(struct kernel_vm86_regs) - offsetof(struct kernel_vm86_regs, pt.orig_ax) + extra); return ret; } struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) { struct tss_struct *tss; struct pt_regs *ret; unsigned long tmp; /* * This gets called from entry.S with interrupts disabled, but * from process context. Enable interrupts here, before trying * to access user space. */ local_irq_enable(); if (!current->thread.vm86_info) { pr_alert("no vm86_info: BAD\n"); do_exit(SIGSEGV); } set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs, regs); tmp += put_user(current->thread.screen_bitmap, &current->thread.vm86_info->screen_bitmap); if (tmp) { pr_alert("could not access userspace vm86_info\n"); do_exit(SIGSEGV); } tss = &per_cpu(init_tss, get_cpu()); current->thread.sp0 = current->thread.saved_sp0; current->thread.sysenter_cs = __KERNEL_CS; load_sp0(tss, &current->thread); current->thread.saved_sp0 = 0; put_cpu(); ret = KVM86->regs32; ret->fs = current->thread.saved_fs; set_user_gs(ret, current->thread.saved_gs); return ret; } static void mark_screen_rdonly(struct mm_struct *mm) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; int i; down_write(&mm->mmap_sem); pgd = pgd_offset(mm, 0xA0000); if (pgd_none_or_clear_bad(pgd)) goto out; pud = pud_offset(pgd, 0xA0000); if (pud_none_or_clear_bad(pud)) goto out; pmd = pmd_offset(pud, 0xA0000); split_huge_page_pmd_mm(mm, 0xA0000, pmd); if (pmd_none_or_clear_bad(pmd)) goto out; pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); for (i = 0; i < 32; i++) { if (pte_present(*pte)) set_pte(pte, pte_wrprotect(*pte)); pte++; } pte_unmap_unlock(pte, ptl); out: up_write(&mm->mmap_sem); flush_tlb(); } static int do_vm86_irq_handling(int subfunction, int irqnumber); static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86) { struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. * This remains on the stack until we * return to 32 bit user space. */ struct task_struct *tsk = current; int tmp; if (tsk->thread.saved_sp0) return -EPERM; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, vm86plus) - sizeof(info.regs)); if (tmp) return -EFAULT; memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); info.regs32 = current_pt_regs(); tsk->thread.vm86_info = v86; do_sys_vm86(&info, tsk); return 0; /* we never return here */ } SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) { struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. * This remains on the stack until we * return to 32 bit user space. */ struct task_struct *tsk; int tmp; struct vm86plus_struct __user *v86; tsk = current; switch (cmd) { case VM86_REQUEST_IRQ: case VM86_FREE_IRQ: case VM86_GET_IRQ_BITS: case VM86_GET_AND_RESET_IRQ: return do_vm86_irq_handling(cmd, (int)arg); case VM86_PLUS_INSTALL_CHECK: /* * NOTE: on old vm86 stuff this will return the error * from access_ok(), because the subfunction is * interpreted as (invalid) address to vm86_struct. * So the installation check works. */ return 0; } /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ if (tsk->thread.saved_sp0) return -EPERM; v86 = (struct vm86plus_struct __user *)arg; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, regs32) - sizeof(info.regs)); if (tmp) return -EFAULT; info.regs32 = current_pt_regs(); info.vm86plus.is_vm86pus = 1; tsk->thread.vm86_info = (struct vm86_struct __user *)v86; do_sys_vm86(&info, tsk); return 0; /* we never return here */ } static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) { struct tss_struct *tss; /* * make sure the vm86() system call doesn't try to do anything silly */ info->regs.pt.ds = 0; info->regs.pt.es = 0; info->regs.pt.fs = 0; #ifndef CONFIG_X86_32_LAZY_GS info->regs.pt.gs = 0; #endif /* * The flags register is also special: we cannot trust that the user * has set it up safely, so this makes sure interrupt etc flags are * inherited from protected mode. */ VEFLAGS = info->regs.pt.flags; info->regs.pt.flags &= SAFE_MASK; info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; info->regs.pt.flags |= X86_VM_MASK; switch (info->cpu_type) { case CPU_286: tsk->thread.v86mask = 0; break; case CPU_386: tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; case CPU_486: tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; default: tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; } /* * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL) */ info->regs32->ax = VM86_SIGNAL; tsk->thread.saved_sp0 = tsk->thread.sp0; tsk->thread.saved_fs = info->regs32->fs; tsk->thread.saved_gs = get_user_gs(info->regs32); tss = &per_cpu(init_tss, get_cpu()); tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; if (cpu_has_sep) tsk->thread.sysenter_cs = 0; load_sp0(tss, &tsk->thread); put_cpu(); tsk->thread.screen_bitmap = info->screen_bitmap; if (info->flags & VM86_SCREEN_BITMAP) mark_screen_rdonly(tsk->mm); /*call __audit_syscall_exit since we do not exit via the normal paths */ #ifdef CONFIG_AUDITSYSCALL if (unlikely(current->audit_context)) __audit_syscall_exit(1, 0); #endif __asm__ __volatile__( "movl %0,%%esp\n\t" "movl %1,%%ebp\n\t" #ifdef CONFIG_X86_32_LAZY_GS "mov %2, %%gs\n\t" #endif "jmp resume_userspace" : /* no outputs */ :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); /* we never return here */ } static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) { struct pt_regs *regs32; regs32 = save_v86_state(regs16); regs32->ax = retval; __asm__ __volatile__("movl %0,%%esp\n\t" "movl %1,%%ebp\n\t" "jmp resume_userspace" : : "r" (regs32), "r" (current_thread_info())); } static inline void set_IF(struct kernel_vm86_regs *regs) { VEFLAGS |= X86_EFLAGS_VIF; if (VEFLAGS & X86_EFLAGS_VIP) return_to_32bit(regs, VM86_STI); } static inline void clear_IF(struct kernel_vm86_regs *regs) { VEFLAGS &= ~X86_EFLAGS_VIF; } static inline void clear_TF(struct kernel_vm86_regs *regs) { regs->pt.flags &= ~X86_EFLAGS_TF; } static inline void clear_AC(struct kernel_vm86_regs *regs) { regs->pt.flags &= ~X86_EFLAGS_AC; } /* * It is correct to call set_IF(regs) from the set_vflags_* * functions. However someone forgot to call clear_IF(regs) * in the opposite case. * After the command sequence CLI PUSHF STI POPF you should * end up with interrupts disabled, but you ended up with * interrupts enabled. * ( I was testing my own changes, but the only bug I * could find was in a function I had not changed. ) * [KD] */ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) { set_flags(VEFLAGS, flags, current->thread.v86mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); else clear_IF(regs); } static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) { set_flags(VFLAGS, flags, current->thread.v86mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); else clear_IF(regs); } static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) { unsigned long flags = regs->pt.flags & RETURN_MASK; if (VEFLAGS & X86_EFLAGS_VIF) flags |= X86_EFLAGS_IF; flags |= X86_EFLAGS_IOPL; return flags | (VEFLAGS & current->thread.v86mask); } static inline int is_revectored(int nr, struct revectored_struct *bitmap) { __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" :"=r" (nr) :"m" (*bitmap), "r" (nr)); return nr; } #define val_byte(val, n) (((__u8 *)&val)[n]) #define pushb(base, ptr, val, err_label) \ do { \ __u8 __val = val; \ ptr--; \ if (put_user(__val, base + ptr) < 0) \ goto err_label; \ } while (0) #define pushw(base, ptr, val, err_label) \ do { \ __u16 __val = val; \ ptr--; \ if (put_user(val_byte(__val, 1), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ } while (0) #define pushl(base, ptr, val, err_label) \ do { \ __u32 __val = val; \ ptr--; \ if (put_user(val_byte(__val, 3), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 2), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 1), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ } while (0) #define popb(base, ptr, err_label) \ ({ \ __u8 __res; \ if (get_user(__res, base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) #define popw(base, ptr, err_label) \ ({ \ __u16 __res; \ if (get_user(val_byte(__res, 0), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 1), base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) #define popl(base, ptr, err_label) \ ({ \ __u32 __res; \ if (get_user(val_byte(__res, 0), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 1), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 2), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 3), base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) /* There are so many possible reasons for this function to return * VM86_INTx, so adding another doesn't bother me. We can expect * userspace programs to be able to handle it. (Getting a problem * in userspace is always better than an Oops anyway.) [KD] */ static void do_int(struct kernel_vm86_regs *regs, int i, unsigned char __user *ssp, unsigned short sp) { unsigned long __user *intr_ptr; unsigned long segoffs; if (regs->pt.cs == BIOSSEG) goto cannot_handle; if (is_revectored(i, &KVM86->int_revectored)) goto cannot_handle; if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) goto cannot_handle; intr_ptr = (unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) goto cannot_handle; if ((segoffs >> 16) == BIOSSEG) goto cannot_handle; pushw(ssp, sp, get_vflags(regs), cannot_handle); pushw(ssp, sp, regs->pt.cs, cannot_handle); pushw(ssp, sp, IP(regs), cannot_handle); regs->pt.cs = segoffs >> 16; SP(regs) -= 6; IP(regs) = segoffs & 0xffff; clear_TF(regs); clear_IF(regs); clear_AC(regs); return; cannot_handle: return_to_32bit(regs, VM86_INTx + (i << 8)); } int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) { if (VMPI.is_vm86pus) { if ((trapno == 3) || (trapno == 1)) { KVM86->regs32->ax = VM86_TRAP + (trapno << 8); /* setting this flag forces the code in entry_32.S to the path where we call save_v86_state() and change the stack pointer to KVM86->regs32 */ set_thread_flag(TIF_NOTIFY_RESUME); return 0; } do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); return 0; } if (trapno != 1) return 1; /* we let this handle by the calling routine */ current->thread.trap_nr = trapno; current->thread.error_code = error_code; force_sig(SIGTRAP, current); return 0; } void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) { unsigned char opcode; unsigned char __user *csp; unsigned char __user *ssp; unsigned short ip, sp, orig_flags; int data32, pref_done; #define CHECK_IF_IN_TRAP \ if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ newflags |= X86_EFLAGS_TF #define VM86_FAULT_RETURN do { \ if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \ return_to_32bit(regs, VM86_PICRETURN); \ if (orig_flags & X86_EFLAGS_TF) \ handle_vm86_trap(regs, 0, 1); \ return; } while (0) orig_flags = *(unsigned short *)&regs->pt.flags; csp = (unsigned char __user *) (regs->pt.cs << 4); ssp = (unsigned char __user *) (regs->pt.ss << 4); sp = SP(regs); ip = IP(regs); data32 = 0; pref_done = 0; do { switch (opcode = popb(csp, ip, simulate_sigsegv)) { case 0x66: /* 32-bit data */ data32 = 1; break; case 0x67: /* 32-bit address */ break; case 0x2e: /* CS */ break; case 0x3e: /* DS */ break; case 0x26: /* ES */ break; case 0x36: /* SS */ break; case 0x65: /* GS */ break; case 0x64: /* FS */ break; case 0xf2: /* repnz */ break; case 0xf3: /* rep */ break; default: pref_done = 1; } } while (!pref_done); switch (opcode) { /* pushf */ case 0x9c: if (data32) { pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 4; } else { pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 2; } IP(regs) = ip; VM86_FAULT_RETURN; /* popf */ case 0x9d: { unsigned long newflags; if (data32) { newflags = popl(ssp, sp, simulate_sigsegv); SP(regs) += 4; } else { newflags = popw(ssp, sp, simulate_sigsegv); SP(regs) += 2; } IP(regs) = ip; CHECK_IF_IN_TRAP; if (data32) set_vflags_long(newflags, regs); else set_vflags_short(newflags, regs); VM86_FAULT_RETURN; } /* int xx */ case 0xcd: { int intno = popb(csp, ip, simulate_sigsegv); IP(regs) = ip; if (VMPI.vm86dbg_active) { if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3]) return_to_32bit(regs, VM86_INTx + (intno << 8)); } do_int(regs, intno, ssp, sp); return; } /* iret */ case 0xcf: { unsigned long newip; unsigned long newcs; unsigned long newflags; if (data32) { newip = popl(ssp, sp, simulate_sigsegv); newcs = popl(ssp, sp, simulate_sigsegv); newflags = popl(ssp, sp, simulate_sigsegv); SP(regs) += 12; } else { newip = popw(ssp, sp, simulate_sigsegv); newcs = popw(ssp, sp, simulate_sigsegv); newflags = popw(ssp, sp, simulate_sigsegv); SP(regs) += 6; } IP(regs) = newip; regs->pt.cs = newcs; CHECK_IF_IN_TRAP; if (data32) { set_vflags_long(newflags, regs); } else { set_vflags_short(newflags, regs); } VM86_FAULT_RETURN; } /* cli */ case 0xfa: IP(regs) = ip; clear_IF(regs); VM86_FAULT_RETURN; /* sti */ /* * Damn. This is incorrect: the 'sti' instruction should actually * enable interrupts after the /next/ instruction. Not good. * * Probably needs some horsing around with the TF flag. Aiee.. */ case 0xfb: IP(regs) = ip; set_IF(regs); VM86_FAULT_RETURN; default: return_to_32bit(regs, VM86_UNKNOWN); } return; simulate_sigsegv: /* FIXME: After a long discussion with Stas we finally * agreed, that this is wrong. Here we should * really send a SIGSEGV to the user program. * But how do we create the correct context? We * are inside a general protection fault handler * and has just returned from a page fault handler. * The correct context for the signal handler * should be a mixture of the two, but how do we * get the information? [KD] */ return_to_32bit(regs, VM86_UNKNOWN); } /* ---------------- vm86 special IRQ passing stuff ----------------- */ #define VM86_IRQNAME "vm86irq" static struct vm86_irqs { struct task_struct *tsk; int sig; } vm86_irqs[16]; static DEFINE_SPINLOCK(irqbits_lock); static int irqbits; #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | (1 << SIGUNUSED)) static irqreturn_t irq_handler(int intno, void *dev_id) { int irq_bit; unsigned long flags; spin_lock_irqsave(&irqbits_lock, flags); irq_bit = 1 << intno; if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) goto out; irqbits |= irq_bit; if (vm86_irqs[intno].sig) send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); /* * IRQ will be re-enabled when user asks for the irq (whether * polling or as a result of the signal) */ disable_irq_nosync(intno); spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_HANDLED; out: spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_NONE; } static inline void free_vm86_irq(int irqnumber) { unsigned long flags; free_irq(irqnumber, NULL); vm86_irqs[irqnumber].tsk = NULL; spin_lock_irqsave(&irqbits_lock, flags); irqbits &= ~(1 << irqnumber); spin_unlock_irqrestore(&irqbits_lock, flags); } void release_vm86_irqs(struct task_struct *task) { int i; for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) if (vm86_irqs[i].tsk == task) free_vm86_irq(i); } static inline int get_and_reset_irq(int irqnumber) { int bit; unsigned long flags; int ret = 0; if (invalid_vm86_irq(irqnumber)) return 0; if (vm86_irqs[irqnumber].tsk != current) return 0; spin_lock_irqsave(&irqbits_lock, flags); bit = irqbits & (1 << irqnumber); irqbits &= ~bit; if (bit) { enable_irq(irqnumber); ret = 1; } spin_unlock_irqrestore(&irqbits_lock, flags); return ret; } static int do_vm86_irq_handling(int subfunction, int irqnumber) { int ret; switch (subfunction) { case VM86_GET_AND_RESET_IRQ: { return get_and_reset_irq(irqnumber); } case VM86_GET_IRQ_BITS: { return irqbits; } case VM86_REQUEST_IRQ: { int sig = irqnumber >> 8; int irq = irqnumber & 255; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; if (invalid_vm86_irq(irq)) return -EPERM; if (vm86_irqs[irq].tsk) return -EPERM; ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); if (ret) return ret; vm86_irqs[irq].sig = sig; vm86_irqs[irq].tsk = current; return irq; } case VM86_FREE_IRQ: { if (invalid_vm86_irq(irqnumber)) return -EPERM; if (!vm86_irqs[irqnumber].tsk) return 0; if (vm86_irqs[irqnumber].tsk != current) return -EPERM; free_vm86_irq(irqnumber); return 0; } } return -EINVAL; }
gpl-2.0
jdkoreclipse/incrediblec_2.6.38
drivers/media/dvb/frontends/lgs8gxx.c
3214
24207
/* * Support for Legend Silicon GB20600 (a.k.a DMB-TH) demodulator * LGS8913, LGS8GL5, LGS8G75 * experimental support LGS8G42, LGS8G52 * * Copyright (C) 2007-2009 David T.L. Wong <davidtlwong@gmail.com> * Copyright (C) 2008 Sirius International (Hong Kong) Limited * Timothy Lee <timothy.lee@siriushk.com> (for initial work on LGS8GL5) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <asm/div64.h> #include <linux/firmware.h> #include "dvb_frontend.h" #include "lgs8gxx.h" #include "lgs8gxx_priv.h" #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_DEBUG "lgs8gxx: " args); \ } while (0) static int debug; static int fake_signal_str = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); module_param(fake_signal_str, int, 0644); MODULE_PARM_DESC(fake_signal_str, "fake signal strength for LGS8913." "Signal strength calculation is slow.(default:on)."); /* LGS8GXX internal helper functions */ static int lgs8gxx_write_reg(struct lgs8gxx_state *priv, u8 reg, u8 data) { int ret; u8 buf[] = { reg, data }; struct i2c_msg msg = { .flags = 0, .buf = buf, .len = 2 }; msg.addr = priv->config->demod_address; if (priv->config->prod != LGS8GXX_PROD_LGS8G75 && reg >= 0xC0) msg.addr += 0x02; if (debug >= 2) dprintk("%s: reg=0x%02X, data=0x%02X\n", __func__, reg, data); ret = i2c_transfer(priv->i2c, &msg, 1); if (ret != 1) dprintk("%s: error reg=0x%x, data=0x%x, ret=%i\n", __func__, reg, data, ret); return (ret != 1) ? -1 : 0; } static int lgs8gxx_read_reg(struct lgs8gxx_state *priv, u8 reg, u8 *p_data) { int ret; u8 dev_addr; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .flags = 0, .buf = b0, .len = 1 }, { .flags = I2C_M_RD, .buf = b1, .len = 1 }, }; dev_addr = priv->config->demod_address; if (priv->config->prod != LGS8GXX_PROD_LGS8G75 && reg >= 0xC0) dev_addr += 0x02; msg[1].addr = msg[0].addr = dev_addr; ret = i2c_transfer(priv->i2c, msg, 2); if (ret != 2) { dprintk("%s: error reg=0x%x, ret=%i\n", __func__, reg, ret); return -1; } *p_data = b1[0]; if (debug >= 2) dprintk("%s: reg=0x%02X, data=0x%02X\n", __func__, reg, b1[0]); return 0; } static int lgs8gxx_soft_reset(struct lgs8gxx_state *priv) { lgs8gxx_write_reg(priv, 0x02, 0x00); msleep(1); lgs8gxx_write_reg(priv, 0x02, 0x01); msleep(100); return 0; } static int wait_reg_mask(struct lgs8gxx_state *priv, u8 reg, u8 mask, u8 val, u8 delay, u8 tries) { u8 t; int i; for (i = 0; i < tries; i++) { lgs8gxx_read_reg(priv, reg, &t); if ((t & mask) == val) return 0; msleep(delay); } return 1; } static int lgs8gxx_set_ad_mode(struct lgs8gxx_state *priv) { const struct lgs8gxx_config *config = priv->config; u8 if_conf; if_conf = 0x10; /* AGC output on, RF_AGC output off; */ if_conf |= ((config->ext_adc) ? 0x80 : 0x00) | ((config->if_neg_center) ? 0x04 : 0x00) | ((config->if_freq == 0) ? 0x08 : 0x00) | /* Baseband */ ((config->adc_signed) ? 0x02 : 0x00) | ((config->if_neg_edge) ? 0x01 : 0x00); if (config->ext_adc && (config->prod == LGS8GXX_PROD_LGS8G52)) { lgs8gxx_write_reg(priv, 0xBA, 0x40); } lgs8gxx_write_reg(priv, 0x07, if_conf); return 0; } static int lgs8gxx_set_if_freq(struct lgs8gxx_state *priv, u32 freq /*in kHz*/) { u64 val; u32 v32; u32 if_clk; if_clk = priv->config->if_clk_freq; val = freq; if (freq != 0) { val <<= 32; if (if_clk != 0) do_div(val, if_clk); v32 = val & 0xFFFFFFFF; dprintk("Set IF Freq to %dkHz\n", freq); } else { v32 = 0; dprintk("Set IF Freq to baseband\n"); } dprintk("AFC_INIT_FREQ = 0x%08X\n", v32); if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_write_reg(priv, 0x08, 0xFF & (v32)); lgs8gxx_write_reg(priv, 0x09, 0xFF & (v32 >> 8)); lgs8gxx_write_reg(priv, 0x0A, 0xFF & (v32 >> 16)); lgs8gxx_write_reg(priv, 0x0B, 0xFF & (v32 >> 24)); } else { lgs8gxx_write_reg(priv, 0x09, 0xFF & (v32)); lgs8gxx_write_reg(priv, 0x0A, 0xFF & (v32 >> 8)); lgs8gxx_write_reg(priv, 0x0B, 0xFF & (v32 >> 16)); lgs8gxx_write_reg(priv, 0x0C, 0xFF & (v32 >> 24)); } return 0; } static int lgs8gxx_get_afc_phase(struct lgs8gxx_state *priv) { u64 val; u32 v32 = 0; u8 reg_addr, t; int i; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) reg_addr = 0x23; else reg_addr = 0x48; for (i = 0; i < 4; i++) { lgs8gxx_read_reg(priv, reg_addr, &t); v32 <<= 8; v32 |= t; reg_addr--; } val = v32; val *= priv->config->if_clk_freq; val >>= 32; dprintk("AFC = %u kHz\n", (u32)val); return 0; } static int lgs8gxx_set_mode_auto(struct lgs8gxx_state *priv) { u8 t; u8 prod = priv->config->prod; if (prod == LGS8GXX_PROD_LGS8913) lgs8gxx_write_reg(priv, 0xC6, 0x01); if (prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_read_reg(priv, 0x0C, &t); t &= (~0x04); lgs8gxx_write_reg(priv, 0x0C, t | 0x80); lgs8gxx_write_reg(priv, 0x39, 0x00); lgs8gxx_write_reg(priv, 0x3D, 0x04); } else if (prod == LGS8GXX_PROD_LGS8913 || prod == LGS8GXX_PROD_LGS8GL5 || prod == LGS8GXX_PROD_LGS8G42 || prod == LGS8GXX_PROD_LGS8G52 || prod == LGS8GXX_PROD_LGS8G54) { lgs8gxx_read_reg(priv, 0x7E, &t); lgs8gxx_write_reg(priv, 0x7E, t | 0x01); /* clear FEC self reset */ lgs8gxx_read_reg(priv, 0xC5, &t); lgs8gxx_write_reg(priv, 0xC5, t & 0xE0); } if (prod == LGS8GXX_PROD_LGS8913) { /* FEC auto detect */ lgs8gxx_write_reg(priv, 0xC1, 0x03); lgs8gxx_read_reg(priv, 0x7C, &t); t = (t & 0x8C) | 0x03; lgs8gxx_write_reg(priv, 0x7C, t); /* BER test mode */ lgs8gxx_read_reg(priv, 0xC3, &t); t = (t & 0xEF) | 0x10; lgs8gxx_write_reg(priv, 0xC3, t); } if (priv->config->prod == LGS8GXX_PROD_LGS8G52) lgs8gxx_write_reg(priv, 0xD9, 0x40); return 0; } static int lgs8gxx_set_mode_manual(struct lgs8gxx_state *priv) { int ret = 0; u8 t; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { u8 t2; lgs8gxx_read_reg(priv, 0x0C, &t); t &= (~0x80); lgs8gxx_write_reg(priv, 0x0C, t); lgs8gxx_read_reg(priv, 0x0C, &t); lgs8gxx_read_reg(priv, 0x19, &t2); if (((t&0x03) == 0x01) && (t2&0x01)) { lgs8gxx_write_reg(priv, 0x6E, 0x05); lgs8gxx_write_reg(priv, 0x39, 0x02); lgs8gxx_write_reg(priv, 0x39, 0x03); lgs8gxx_write_reg(priv, 0x3D, 0x05); lgs8gxx_write_reg(priv, 0x3E, 0x28); lgs8gxx_write_reg(priv, 0x53, 0x80); } else { lgs8gxx_write_reg(priv, 0x6E, 0x3F); lgs8gxx_write_reg(priv, 0x39, 0x00); lgs8gxx_write_reg(priv, 0x3D, 0x04); } lgs8gxx_soft_reset(priv); return 0; } /* turn off auto-detect; manual settings */ lgs8gxx_write_reg(priv, 0x7E, 0); if (priv->config->prod == LGS8GXX_PROD_LGS8913) lgs8gxx_write_reg(priv, 0xC1, 0); ret = lgs8gxx_read_reg(priv, 0xC5, &t); t = (t & 0xE0) | 0x06; lgs8gxx_write_reg(priv, 0xC5, t); lgs8gxx_soft_reset(priv); return 0; } static int lgs8gxx_is_locked(struct lgs8gxx_state *priv, u8 *locked) { int ret = 0; u8 t; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) ret = lgs8gxx_read_reg(priv, 0x13, &t); else ret = lgs8gxx_read_reg(priv, 0x4B, &t); if (ret != 0) return ret; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) *locked = ((t & 0x80) == 0x80) ? 1 : 0; else *locked = ((t & 0xC0) == 0xC0) ? 1 : 0; return 0; } /* Wait for Code Acquisition Lock */ static int lgs8gxx_wait_ca_lock(struct lgs8gxx_state *priv, u8 *locked) { int ret = 0; u8 reg, mask, val; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { reg = 0x13; mask = 0x80; val = 0x80; } else { reg = 0x4B; mask = 0xC0; val = 0xC0; } ret = wait_reg_mask(priv, reg, mask, val, 50, 40); *locked = (ret == 0) ? 1 : 0; return 0; } static int lgs8gxx_is_autodetect_finished(struct lgs8gxx_state *priv, u8 *finished) { int ret = 0; u8 reg, mask, val; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { reg = 0x1f; mask = 0xC0; val = 0x80; } else { reg = 0xA4; mask = 0x03; val = 0x01; } ret = wait_reg_mask(priv, reg, mask, val, 10, 20); *finished = (ret == 0) ? 1 : 0; return 0; } static int lgs8gxx_autolock_gi(struct lgs8gxx_state *priv, u8 gi, u8 cpn, u8 *locked) { int err = 0; u8 ad_fini = 0; u8 t1, t2; if (gi == GI_945) dprintk("try GI 945\n"); else if (gi == GI_595) dprintk("try GI 595\n"); else if (gi == GI_420) dprintk("try GI 420\n"); if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_read_reg(priv, 0x0C, &t1); lgs8gxx_read_reg(priv, 0x18, &t2); t1 &= ~(GI_MASK); t1 |= gi; t2 &= 0xFE; t2 |= cpn ? 0x01 : 0x00; lgs8gxx_write_reg(priv, 0x0C, t1); lgs8gxx_write_reg(priv, 0x18, t2); } else { lgs8gxx_write_reg(priv, 0x04, gi); } lgs8gxx_soft_reset(priv); err = lgs8gxx_wait_ca_lock(priv, locked); if (err || !(*locked)) return err; err = lgs8gxx_is_autodetect_finished(priv, &ad_fini); if (err != 0) return err; if (ad_fini) { dprintk("auto detect finished\n"); } else *locked = 0; return 0; } static int lgs8gxx_auto_detect(struct lgs8gxx_state *priv, u8 *detected_param, u8 *gi) { int i, j; int err = 0; u8 locked = 0, tmp_gi; dprintk("%s\n", __func__); lgs8gxx_set_mode_auto(priv); if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_write_reg(priv, 0x67, 0xAA); lgs8gxx_write_reg(priv, 0x6E, 0x3F); } else { /* Guard Interval */ lgs8gxx_write_reg(priv, 0x03, 00); } for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { tmp_gi = GI_945; err = lgs8gxx_autolock_gi(priv, GI_945, j, &locked); if (err) goto out; if (locked) goto locked; } for (j = 0; j < 2; j++) { tmp_gi = GI_420; err = lgs8gxx_autolock_gi(priv, GI_420, j, &locked); if (err) goto out; if (locked) goto locked; } tmp_gi = GI_595; err = lgs8gxx_autolock_gi(priv, GI_595, 1, &locked); if (err) goto out; if (locked) goto locked; } locked: if ((err == 0) && (locked == 1)) { u8 t; if (priv->config->prod != LGS8GXX_PROD_LGS8G75) { lgs8gxx_read_reg(priv, 0xA2, &t); *detected_param = t; } else { lgs8gxx_read_reg(priv, 0x1F, &t); *detected_param = t & 0x3F; } if (tmp_gi == GI_945) dprintk("GI 945 locked\n"); else if (tmp_gi == GI_595) dprintk("GI 595 locked\n"); else if (tmp_gi == GI_420) dprintk("GI 420 locked\n"); *gi = tmp_gi; } if (!locked) err = -1; out: return err; } static void lgs8gxx_auto_lock(struct lgs8gxx_state *priv) { s8 err; u8 gi = 0x2; u8 detected_param = 0; err = lgs8gxx_auto_detect(priv, &detected_param, &gi); if (err != 0) { dprintk("lgs8gxx_auto_detect failed\n"); } else dprintk("detected param = 0x%02X\n", detected_param); /* Apply detected parameters */ if (priv->config->prod == LGS8GXX_PROD_LGS8913) { u8 inter_leave_len = detected_param & TIM_MASK ; /* Fix 8913 time interleaver detection bug */ inter_leave_len = (inter_leave_len == TIM_MIDDLE) ? 0x60 : 0x40; detected_param &= CF_MASK | SC_MASK | LGS_FEC_MASK; detected_param |= inter_leave_len; } if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { u8 t; lgs8gxx_read_reg(priv, 0x19, &t); t &= 0x81; t |= detected_param << 1; lgs8gxx_write_reg(priv, 0x19, t); } else { lgs8gxx_write_reg(priv, 0x7D, detected_param); if (priv->config->prod == LGS8GXX_PROD_LGS8913) lgs8gxx_write_reg(priv, 0xC0, detected_param); } /* lgs8gxx_soft_reset(priv); */ /* Enter manual mode */ lgs8gxx_set_mode_manual(priv); switch (gi) { case GI_945: priv->curr_gi = 945; break; case GI_595: priv->curr_gi = 595; break; case GI_420: priv->curr_gi = 420; break; default: priv->curr_gi = 945; break; } } static int lgs8gxx_set_mpeg_mode(struct lgs8gxx_state *priv, u8 serial, u8 clk_pol, u8 clk_gated) { int ret = 0; u8 t, reg_addr; reg_addr = (priv->config->prod == LGS8GXX_PROD_LGS8G75) ? 0x30 : 0xC2; ret = lgs8gxx_read_reg(priv, reg_addr, &t); if (ret != 0) return ret; t &= 0xF8; t |= serial ? TS_SERIAL : TS_PARALLEL; t |= clk_pol ? TS_CLK_INVERTED : TS_CLK_NORMAL; t |= clk_gated ? TS_CLK_GATED : TS_CLK_FREERUN; ret = lgs8gxx_write_reg(priv, reg_addr, t); if (ret != 0) return ret; return 0; } /* A/D input peak-to-peak voltage range */ static int lgs8g75_set_adc_vpp(struct lgs8gxx_state *priv, u8 sel) { u8 r26 = 0x73, r27 = 0x90; if (priv->config->prod != LGS8GXX_PROD_LGS8G75) return 0; r26 |= (sel & 0x01) << 7; r27 |= (sel & 0x02) >> 1; lgs8gxx_write_reg(priv, 0x26, r26); lgs8gxx_write_reg(priv, 0x27, r27); return 0; } /* LGS8913 demod frontend functions */ static int lgs8913_init(struct lgs8gxx_state *priv) { u8 t; /* LGS8913 specific */ lgs8gxx_write_reg(priv, 0xc1, 0x3); lgs8gxx_read_reg(priv, 0x7c, &t); lgs8gxx_write_reg(priv, 0x7c, (t&0x8c) | 0x3); /* LGS8913 specific */ lgs8gxx_read_reg(priv, 0xc3, &t); lgs8gxx_write_reg(priv, 0xc3, t&0x10); return 0; } static int lgs8g75_init_data(struct lgs8gxx_state *priv) { const struct firmware *fw; int rc; int i; rc = request_firmware(&fw, "lgs8g75.fw", &priv->i2c->dev); if (rc) return rc; lgs8gxx_write_reg(priv, 0xC6, 0x40); lgs8gxx_write_reg(priv, 0x3D, 0x04); lgs8gxx_write_reg(priv, 0x39, 0x00); lgs8gxx_write_reg(priv, 0x3A, 0x00); lgs8gxx_write_reg(priv, 0x38, 0x00); lgs8gxx_write_reg(priv, 0x3B, 0x00); lgs8gxx_write_reg(priv, 0x38, 0x00); for (i = 0; i < fw->size; i++) { lgs8gxx_write_reg(priv, 0x38, 0x00); lgs8gxx_write_reg(priv, 0x3A, (u8)(i&0xff)); lgs8gxx_write_reg(priv, 0x3B, (u8)(i>>8)); lgs8gxx_write_reg(priv, 0x3C, fw->data[i]); } lgs8gxx_write_reg(priv, 0x38, 0x00); release_firmware(fw); return 0; } static int lgs8gxx_init(struct dvb_frontend *fe) { struct lgs8gxx_state *priv = (struct lgs8gxx_state *)fe->demodulator_priv; const struct lgs8gxx_config *config = priv->config; u8 data = 0; s8 err; dprintk("%s\n", __func__); lgs8gxx_read_reg(priv, 0, &data); dprintk("reg 0 = 0x%02X\n", data); if (config->prod == LGS8GXX_PROD_LGS8G75) lgs8g75_set_adc_vpp(priv, config->adc_vpp); /* Setup MPEG output format */ err = lgs8gxx_set_mpeg_mode(priv, config->serial_ts, config->ts_clk_pol, config->ts_clk_gated); if (err != 0) return -EIO; if (config->prod == LGS8GXX_PROD_LGS8913) lgs8913_init(priv); lgs8gxx_set_if_freq(priv, priv->config->if_freq); lgs8gxx_set_ad_mode(priv); return 0; } static void lgs8gxx_release(struct dvb_frontend *fe) { struct lgs8gxx_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); kfree(state); } static int lgs8gxx_write(struct dvb_frontend *fe, const u8 buf[], int len) { struct lgs8gxx_state *priv = fe->demodulator_priv; if (len != 2) return -EINVAL; return lgs8gxx_write_reg(priv, buf[0], buf[1]); } static int lgs8gxx_set_fe(struct dvb_frontend *fe, struct dvb_frontend_parameters *fe_params) { struct lgs8gxx_state *priv = fe->demodulator_priv; dprintk("%s\n", __func__); /* set frequency */ if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, fe_params); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } /* start auto lock */ lgs8gxx_auto_lock(priv); msleep(10); return 0; } static int lgs8gxx_get_fe(struct dvb_frontend *fe, struct dvb_frontend_parameters *fe_params) { dprintk("%s\n", __func__); /* TODO: get real readings from device */ /* inversion status */ fe_params->inversion = INVERSION_OFF; /* bandwidth */ fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ; fe_params->u.ofdm.code_rate_HP = FEC_AUTO; fe_params->u.ofdm.code_rate_LP = FEC_AUTO; fe_params->u.ofdm.constellation = QAM_AUTO; /* transmission mode */ fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_AUTO; /* guard interval */ fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_AUTO; /* hierarchy */ fe_params->u.ofdm.hierarchy_information = HIERARCHY_NONE; return 0; } static int lgs8gxx_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *fesettings) { /* FIXME: copy from tda1004x.c */ fesettings->min_delay_ms = 800; fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static int lgs8gxx_read_status(struct dvb_frontend *fe, fe_status_t *fe_status) { struct lgs8gxx_state *priv = fe->demodulator_priv; s8 ret; u8 t, locked = 0; dprintk("%s\n", __func__); *fe_status = 0; lgs8gxx_get_afc_phase(priv); lgs8gxx_is_locked(priv, &locked); if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { if (locked) *fe_status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; return 0; } ret = lgs8gxx_read_reg(priv, 0x4B, &t); if (ret != 0) return -EIO; dprintk("Reg 0x4B: 0x%02X\n", t); *fe_status = 0; if (priv->config->prod == LGS8GXX_PROD_LGS8913) { if ((t & 0x40) == 0x40) *fe_status |= FE_HAS_SIGNAL | FE_HAS_CARRIER; if ((t & 0x80) == 0x80) *fe_status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; } else { if ((t & 0x80) == 0x80) *fe_status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; } /* success */ dprintk("%s: fe_status=0x%x\n", __func__, *fe_status); return 0; } static int lgs8gxx_read_signal_agc(struct lgs8gxx_state *priv, u16 *signal) { u16 v; u8 agc_lvl[2], cat; dprintk("%s()\n", __func__); lgs8gxx_read_reg(priv, 0x3F, &agc_lvl[0]); lgs8gxx_read_reg(priv, 0x3E, &agc_lvl[1]); v = agc_lvl[0]; v <<= 8; v |= agc_lvl[1]; dprintk("agc_lvl: 0x%04X\n", v); if (v < 0x100) cat = 0; else if (v < 0x190) cat = 5; else if (v < 0x2A8) cat = 4; else if (v < 0x381) cat = 3; else if (v < 0x400) cat = 2; else if (v == 0x400) cat = 1; else cat = 0; *signal = cat * 65535 / 5; return 0; } static int lgs8913_read_signal_strength(struct lgs8gxx_state *priv, u16 *signal) { u8 t; s8 ret; s16 max_strength = 0; u8 str; u16 i, gi = priv->curr_gi; dprintk("%s\n", __func__); ret = lgs8gxx_read_reg(priv, 0x4B, &t); if (ret != 0) return -EIO; if (fake_signal_str) { if ((t & 0xC0) == 0xC0) { dprintk("Fake signal strength\n"); *signal = 0x7FFF; } else *signal = 0; return 0; } dprintk("gi = %d\n", gi); for (i = 0; i < gi; i++) { if ((i & 0xFF) == 0) lgs8gxx_write_reg(priv, 0x84, 0x03 & (i >> 8)); lgs8gxx_write_reg(priv, 0x83, i & 0xFF); lgs8gxx_read_reg(priv, 0x94, &str); if (max_strength < str) max_strength = str; } *signal = max_strength; dprintk("%s: signal=0x%02X\n", __func__, *signal); lgs8gxx_read_reg(priv, 0x95, &t); dprintk("%s: AVG Noise=0x%02X\n", __func__, t); return 0; } static int lgs8g75_read_signal_strength(struct lgs8gxx_state *priv, u16 *signal) { u8 t; s16 v = 0; dprintk("%s\n", __func__); lgs8gxx_read_reg(priv, 0xB1, &t); v |= t; v <<= 8; lgs8gxx_read_reg(priv, 0xB0, &t); v |= t; *signal = v; dprintk("%s: signal=0x%02X\n", __func__, *signal); return 0; } static int lgs8gxx_read_signal_strength(struct dvb_frontend *fe, u16 *signal) { struct lgs8gxx_state *priv = fe->demodulator_priv; if (priv->config->prod == LGS8GXX_PROD_LGS8913) return lgs8913_read_signal_strength(priv, signal); else if (priv->config->prod == LGS8GXX_PROD_LGS8G75) return lgs8g75_read_signal_strength(priv, signal); else return lgs8gxx_read_signal_agc(priv, signal); } static int lgs8gxx_read_snr(struct dvb_frontend *fe, u16 *snr) { struct lgs8gxx_state *priv = fe->demodulator_priv; u8 t; *snr = 0; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) lgs8gxx_read_reg(priv, 0x34, &t); else lgs8gxx_read_reg(priv, 0x95, &t); dprintk("AVG Noise=0x%02X\n", t); *snr = 256 - t; *snr <<= 8; dprintk("snr=0x%x\n", *snr); return 0; } static int lgs8gxx_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { *ucblocks = 0; dprintk("%s: ucblocks=0x%x\n", __func__, *ucblocks); return 0; } static void packet_counter_start(struct lgs8gxx_state *priv) { u8 orig, t; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_read_reg(priv, 0x30, &orig); orig &= 0xE7; t = orig | 0x10; lgs8gxx_write_reg(priv, 0x30, t); t = orig | 0x18; lgs8gxx_write_reg(priv, 0x30, t); t = orig | 0x10; lgs8gxx_write_reg(priv, 0x30, t); } else { lgs8gxx_write_reg(priv, 0xC6, 0x01); lgs8gxx_write_reg(priv, 0xC6, 0x41); lgs8gxx_write_reg(priv, 0xC6, 0x01); } } static void packet_counter_stop(struct lgs8gxx_state *priv) { u8 t; if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { lgs8gxx_read_reg(priv, 0x30, &t); t &= 0xE7; lgs8gxx_write_reg(priv, 0x30, t); } else { lgs8gxx_write_reg(priv, 0xC6, 0x81); } } static int lgs8gxx_read_ber(struct dvb_frontend *fe, u32 *ber) { struct lgs8gxx_state *priv = fe->demodulator_priv; u8 reg_err, reg_total, t; u32 total_cnt = 0, err_cnt = 0; int i; dprintk("%s\n", __func__); packet_counter_start(priv); msleep(200); packet_counter_stop(priv); if (priv->config->prod == LGS8GXX_PROD_LGS8G75) { reg_total = 0x28; reg_err = 0x2C; } else { reg_total = 0xD0; reg_err = 0xD4; } for (i = 0; i < 4; i++) { total_cnt <<= 8; lgs8gxx_read_reg(priv, reg_total+3-i, &t); total_cnt |= t; } for (i = 0; i < 4; i++) { err_cnt <<= 8; lgs8gxx_read_reg(priv, reg_err+3-i, &t); err_cnt |= t; } dprintk("error=%d total=%d\n", err_cnt, total_cnt); if (total_cnt == 0) *ber = 0; else *ber = err_cnt * 100 / total_cnt; dprintk("%s: ber=0x%x\n", __func__, *ber); return 0; } static int lgs8gxx_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct lgs8gxx_state *priv = fe->demodulator_priv; if (priv->config->tuner_address == 0) return 0; if (enable) { u8 v = 0x80 | priv->config->tuner_address; return lgs8gxx_write_reg(priv, 0x01, v); } return lgs8gxx_write_reg(priv, 0x01, 0); } static struct dvb_frontend_ops lgs8gxx_ops = { .info = { .name = "Legend Silicon LGS8913/LGS8GXX DMB-TH", .type = FE_OFDM, .frequency_min = 474000000, .frequency_max = 858000000, .frequency_stepsize = 10000, .caps = FE_CAN_FEC_AUTO | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO }, .release = lgs8gxx_release, .init = lgs8gxx_init, .write = lgs8gxx_write, .i2c_gate_ctrl = lgs8gxx_i2c_gate_ctrl, .set_frontend = lgs8gxx_set_fe, .get_frontend = lgs8gxx_get_fe, .get_tune_settings = lgs8gxx_get_tune_settings, .read_status = lgs8gxx_read_status, .read_ber = lgs8gxx_read_ber, .read_signal_strength = lgs8gxx_read_signal_strength, .read_snr = lgs8gxx_read_snr, .read_ucblocks = lgs8gxx_read_ucblocks, }; struct dvb_frontend *lgs8gxx_attach(const struct lgs8gxx_config *config, struct i2c_adapter *i2c) { struct lgs8gxx_state *priv = NULL; u8 data = 0; dprintk("%s()\n", __func__); if (config == NULL || i2c == NULL) return NULL; priv = kzalloc(sizeof(struct lgs8gxx_state), GFP_KERNEL); if (priv == NULL) goto error_out; priv->config = config; priv->i2c = i2c; /* check if the demod is there */ if (lgs8gxx_read_reg(priv, 0, &data) != 0) { dprintk("%s lgs8gxx not found at i2c addr 0x%02X\n", __func__, priv->config->demod_address); goto error_out; } lgs8gxx_read_reg(priv, 1, &data); memcpy(&priv->frontend.ops, &lgs8gxx_ops, sizeof(struct dvb_frontend_ops)); priv->frontend.demodulator_priv = priv; if (config->prod == LGS8GXX_PROD_LGS8G75) lgs8g75_init_data(priv); return &priv->frontend; error_out: dprintk("%s() error_out\n", __func__); kfree(priv); return NULL; } EXPORT_SYMBOL(lgs8gxx_attach); MODULE_DESCRIPTION("Legend Silicon LGS8913/LGS8GXX DMB-TH demodulator driver"); MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
NEKTech-Labs/NEKTech-Linux
net/ax25/ax25_ds_subr.c
4238
5241
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/spinlock.h> #include <linux/net.h> #include <linux/gfp.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> void ax25_ds_nr_error_recovery(ax25_cb *ax25) { ax25_ds_establish_data_link(ax25); } /* * dl1bke 960114: transmit I frames on DAMA poll */ void ax25_ds_enquiry_response(ax25_cb *ax25) { ax25_cb *ax25o; struct hlist_node *node; /* Please note that neither DK4EG's nor DG2FEF's * DAMA spec mention the following behaviour as seen * with TheFirmware: * * DB0ACH->DL1BKE <RR C P R0> [DAMA] * DL1BKE->DB0ACH <I NR=0 NS=0> * DL1BKE-7->DB0PRA-6 DB0ACH <I C S3 R5> * DL1BKE->DB0ACH <RR R F R0> * * The Flexnet DAMA Master implementation apparently * insists on the "proper" AX.25 behaviour: * * DB0ACH->DL1BKE <RR C P R0> [DAMA] * DL1BKE->DB0ACH <RR R F R0> * DL1BKE->DB0ACH <I NR=0 NS=0> * DL1BKE-7->DB0PRA-6 DB0ACH <I C S3 R5> * * Flexnet refuses to send us *any* I frame if we send * a REJ in case AX25_COND_REJECT is set. It is superfluous in * this mode anyway (a RR or RNR invokes the retransmission). * Is this a Flexnet bug? */ ax25_std_enquiry_response(ax25); if (!(ax25->condition & AX25_COND_PEER_RX_BUSY)) { ax25_requeue_frames(ax25); ax25_kick(ax25); } if (ax25->state == AX25_STATE_1 || ax25->state == AX25_STATE_2 || skb_peek(&ax25->ack_queue) != NULL) ax25_ds_t1_timeout(ax25); else ax25->n2count = 0; ax25_start_t3timer(ax25); ax25_ds_set_timer(ax25->ax25_dev); spin_lock(&ax25_list_lock); ax25_for_each(ax25o, node, &ax25_list) { if (ax25o == ax25) continue; if (ax25o->ax25_dev != ax25->ax25_dev) continue; if (ax25o->state == AX25_STATE_1 || ax25o->state == AX25_STATE_2) { ax25_ds_t1_timeout(ax25o); continue; } if (!(ax25o->condition & AX25_COND_PEER_RX_BUSY) && ax25o->state == AX25_STATE_3) { ax25_requeue_frames(ax25o); ax25_kick(ax25o); } if (ax25o->state == AX25_STATE_1 || ax25o->state == AX25_STATE_2 || skb_peek(&ax25o->ack_queue) != NULL) ax25_ds_t1_timeout(ax25o); /* do not start T3 for listening sockets (tnx DD8NE) */ if (ax25o->state != AX25_STATE_0) ax25_start_t3timer(ax25o); } spin_unlock(&ax25_list_lock); } void ax25_ds_establish_data_link(ax25_cb *ax25) { ax25->condition &= AX25_COND_DAMA_MODE; ax25->n2count = 0; ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); ax25_stop_t2timer(ax25); ax25_start_t3timer(ax25); } /* * :::FIXME::: * This is a kludge. Not all drivers recognize kiss commands. * We need a driver level request to switch duplex mode, that does * either SCC changing, PI config or KISS as required. Currently * this request isn't reliable. */ static void ax25_kiss_cmd(ax25_dev *ax25_dev, unsigned char cmd, unsigned char param) { struct sk_buff *skb; unsigned char *p; if (ax25_dev->dev == NULL) return; if ((skb = alloc_skb(2, GFP_ATOMIC)) == NULL) return; skb_reset_network_header(skb); p = skb_put(skb, 2); *p++ = cmd; *p++ = param; skb->protocol = ax25_type_trans(skb, ax25_dev->dev); dev_queue_xmit(skb); } /* * A nasty problem arises if we count the number of DAMA connections * wrong, especially when connections on the device already existed * and our network node (or the sysop) decides to turn on DAMA Master * mode. We thus flag the 'real' slave connections with * ax25->dama_slave=1 and look on every disconnect if still slave * connections exist. */ static int ax25_check_dama_slave(ax25_dev *ax25_dev) { ax25_cb *ax25; int res = 0; struct hlist_node *node; spin_lock(&ax25_list_lock); ax25_for_each(ax25, node, &ax25_list) if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { res = 1; break; } spin_unlock(&ax25_list_lock); return res; } static void ax25_dev_dama_on(ax25_dev *ax25_dev) { if (ax25_dev == NULL) return; if (ax25_dev->dama.slave == 0) ax25_kiss_cmd(ax25_dev, 5, 1); ax25_dev->dama.slave = 1; ax25_ds_set_timer(ax25_dev); } void ax25_dev_dama_off(ax25_dev *ax25_dev) { if (ax25_dev == NULL) return; if (ax25_dev->dama.slave && !ax25_check_dama_slave(ax25_dev)) { ax25_kiss_cmd(ax25_dev, 5, 0); ax25_dev->dama.slave = 0; ax25_ds_del_timer(ax25_dev); } } void ax25_dama_on(ax25_cb *ax25) { ax25_dev_dama_on(ax25->ax25_dev); ax25->condition |= AX25_COND_DAMA_MODE; } void ax25_dama_off(ax25_cb *ax25) { ax25->condition &= ~AX25_COND_DAMA_MODE; ax25_dev_dama_off(ax25->ax25_dev); }
gpl-2.0
javelinanddart/kernel_samsung_msm8660-common-1
net/ax25/ax25_ds_subr.c
4238
5241
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/spinlock.h> #include <linux/net.h> #include <linux/gfp.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> void ax25_ds_nr_error_recovery(ax25_cb *ax25) { ax25_ds_establish_data_link(ax25); } /* * dl1bke 960114: transmit I frames on DAMA poll */ void ax25_ds_enquiry_response(ax25_cb *ax25) { ax25_cb *ax25o; struct hlist_node *node; /* Please note that neither DK4EG's nor DG2FEF's * DAMA spec mention the following behaviour as seen * with TheFirmware: * * DB0ACH->DL1BKE <RR C P R0> [DAMA] * DL1BKE->DB0ACH <I NR=0 NS=0> * DL1BKE-7->DB0PRA-6 DB0ACH <I C S3 R5> * DL1BKE->DB0ACH <RR R F R0> * * The Flexnet DAMA Master implementation apparently * insists on the "proper" AX.25 behaviour: * * DB0ACH->DL1BKE <RR C P R0> [DAMA] * DL1BKE->DB0ACH <RR R F R0> * DL1BKE->DB0ACH <I NR=0 NS=0> * DL1BKE-7->DB0PRA-6 DB0ACH <I C S3 R5> * * Flexnet refuses to send us *any* I frame if we send * a REJ in case AX25_COND_REJECT is set. It is superfluous in * this mode anyway (a RR or RNR invokes the retransmission). * Is this a Flexnet bug? */ ax25_std_enquiry_response(ax25); if (!(ax25->condition & AX25_COND_PEER_RX_BUSY)) { ax25_requeue_frames(ax25); ax25_kick(ax25); } if (ax25->state == AX25_STATE_1 || ax25->state == AX25_STATE_2 || skb_peek(&ax25->ack_queue) != NULL) ax25_ds_t1_timeout(ax25); else ax25->n2count = 0; ax25_start_t3timer(ax25); ax25_ds_set_timer(ax25->ax25_dev); spin_lock(&ax25_list_lock); ax25_for_each(ax25o, node, &ax25_list) { if (ax25o == ax25) continue; if (ax25o->ax25_dev != ax25->ax25_dev) continue; if (ax25o->state == AX25_STATE_1 || ax25o->state == AX25_STATE_2) { ax25_ds_t1_timeout(ax25o); continue; } if (!(ax25o->condition & AX25_COND_PEER_RX_BUSY) && ax25o->state == AX25_STATE_3) { ax25_requeue_frames(ax25o); ax25_kick(ax25o); } if (ax25o->state == AX25_STATE_1 || ax25o->state == AX25_STATE_2 || skb_peek(&ax25o->ack_queue) != NULL) ax25_ds_t1_timeout(ax25o); /* do not start T3 for listening sockets (tnx DD8NE) */ if (ax25o->state != AX25_STATE_0) ax25_start_t3timer(ax25o); } spin_unlock(&ax25_list_lock); } void ax25_ds_establish_data_link(ax25_cb *ax25) { ax25->condition &= AX25_COND_DAMA_MODE; ax25->n2count = 0; ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); ax25_stop_t2timer(ax25); ax25_start_t3timer(ax25); } /* * :::FIXME::: * This is a kludge. Not all drivers recognize kiss commands. * We need a driver level request to switch duplex mode, that does * either SCC changing, PI config or KISS as required. Currently * this request isn't reliable. */ static void ax25_kiss_cmd(ax25_dev *ax25_dev, unsigned char cmd, unsigned char param) { struct sk_buff *skb; unsigned char *p; if (ax25_dev->dev == NULL) return; if ((skb = alloc_skb(2, GFP_ATOMIC)) == NULL) return; skb_reset_network_header(skb); p = skb_put(skb, 2); *p++ = cmd; *p++ = param; skb->protocol = ax25_type_trans(skb, ax25_dev->dev); dev_queue_xmit(skb); } /* * A nasty problem arises if we count the number of DAMA connections * wrong, especially when connections on the device already existed * and our network node (or the sysop) decides to turn on DAMA Master * mode. We thus flag the 'real' slave connections with * ax25->dama_slave=1 and look on every disconnect if still slave * connections exist. */ static int ax25_check_dama_slave(ax25_dev *ax25_dev) { ax25_cb *ax25; int res = 0; struct hlist_node *node; spin_lock(&ax25_list_lock); ax25_for_each(ax25, node, &ax25_list) if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { res = 1; break; } spin_unlock(&ax25_list_lock); return res; } static void ax25_dev_dama_on(ax25_dev *ax25_dev) { if (ax25_dev == NULL) return; if (ax25_dev->dama.slave == 0) ax25_kiss_cmd(ax25_dev, 5, 1); ax25_dev->dama.slave = 1; ax25_ds_set_timer(ax25_dev); } void ax25_dev_dama_off(ax25_dev *ax25_dev) { if (ax25_dev == NULL) return; if (ax25_dev->dama.slave && !ax25_check_dama_slave(ax25_dev)) { ax25_kiss_cmd(ax25_dev, 5, 0); ax25_dev->dama.slave = 0; ax25_ds_del_timer(ax25_dev); } } void ax25_dama_on(ax25_cb *ax25) { ax25_dev_dama_on(ax25->ax25_dev); ax25->condition |= AX25_COND_DAMA_MODE; } void ax25_dama_off(ax25_cb *ax25) { ax25->condition &= ~AX25_COND_DAMA_MODE; ax25_dev_dama_off(ax25->ax25_dev); }
gpl-2.0
cnexus/kernel_d2spr_tw
arch/cris/arch-v10/kernel/kgdb.c
4494
50519
/*!************************************************************************** *! *! FILE NAME : kgdb.c *! *! DESCRIPTION: Implementation of the gdb stub with respect to ETRAX 100. *! It is a mix of arch/m68k/kernel/kgdb.c and cris_stub.c. *! *!--------------------------------------------------------------------------- *! HISTORY *! *! DATE NAME CHANGES *! ---- ---- ------- *! Apr 26 1999 Hendrik Ruijter Initial version. *! May 6 1999 Hendrik Ruijter Removed call to strlen in libc and removed *! struct assignment as it generates calls to *! memcpy in libc. *! Jun 17 1999 Hendrik Ruijter Added gdb 4.18 support. 'X', 'qC' and 'qL'. *! Jul 21 1999 Bjorn Wesen eLinux port *! *!--------------------------------------------------------------------------- *! *! (C) Copyright 1999, Axis Communications AB, LUND, SWEDEN *! *!**************************************************************************/ /* @(#) cris_stub.c 1.3 06/17/99 */ /* * kgdb usage notes: * ----------------- * * If you select CONFIG_ETRAX_KGDB in the configuration, the kernel will be * built with different gcc flags: "-g" is added to get debug infos, and * "-fomit-frame-pointer" is omitted to make debugging easier. Since the * resulting kernel will be quite big (approx. > 7 MB), it will be stripped * before compresion. Such a kernel will behave just as usually, except if * given a "debug=<device>" command line option. (Only serial devices are * allowed for <device>, i.e. no printers or the like; possible values are * machine depedend and are the same as for the usual debug device, the one * for logging kernel messages.) If that option is given and the device can be * initialized, the kernel will connect to the remote gdb in trap_init(). The * serial parameters are fixed to 8N1 and 115200 bps, for easyness of * implementation. * * To start a debugging session, start that gdb with the debugging kernel * image (the one with the symbols, vmlinux.debug) named on the command line. * This file will be used by gdb to get symbol and debugging infos about the * kernel. Next, select remote debug mode by * target remote <device> * where <device> is the name of the serial device over which the debugged * machine is connected. Maybe you have to adjust the baud rate by * set remotebaud <rate> * or also other parameters with stty: * shell stty ... </dev/... * If the kernel to debug has already booted, it waited for gdb and now * connects, and you'll see a breakpoint being reported. If the kernel isn't * running yet, start it now. The order of gdb and the kernel doesn't matter. * Another thing worth knowing about in the getting-started phase is how to * debug the remote protocol itself. This is activated with * set remotedebug 1 * gdb will then print out each packet sent or received. You'll also get some * messages about the gdb stub on the console of the debugged machine. * * If all that works, you can use lots of the usual debugging techniques on * the kernel, e.g. inspecting and changing variables/memory, setting * breakpoints, single stepping and so on. It's also possible to interrupt the * debugged kernel by pressing C-c in gdb. Have fun! :-) * * The gdb stub is entered (and thus the remote gdb gets control) in the * following situations: * * - If breakpoint() is called. This is just after kgdb initialization, or if * a breakpoint() call has been put somewhere into the kernel source. * (Breakpoints can of course also be set the usual way in gdb.) * In eLinux, we call breakpoint() in init/main.c after IRQ initialization. * * - If there is a kernel exception, i.e. bad_super_trap() or die_if_kernel() * are entered. All the CPU exceptions are mapped to (more or less..., see * the hard_trap_info array below) appropriate signal, which are reported * to gdb. die_if_kernel() is usually called after some kind of access * error and thus is reported as SIGSEGV. * * - When panic() is called. This is reported as SIGABRT. * * - If C-c is received over the serial line, which is treated as * SIGINT. * * Of course, all these signals are just faked for gdb, since there is no * signal concept as such for the kernel. It also isn't possible --obviously-- * to set signal handlers from inside gdb, or restart the kernel with a * signal. * * Current limitations: * * - While the kernel is stopped, interrupts are disabled for safety reasons * (i.e., variables not changing magically or the like). But this also * means that the clock isn't running anymore, and that interrupts from the * hardware may get lost/not be served in time. This can cause some device * errors... * * - When single-stepping, only one instruction of the current thread is * executed, but interrupts are allowed for that time and will be serviced * if pending. Be prepared for that. * * - All debugging happens in kernel virtual address space. There's no way to * access physical memory not mapped in kernel space, or to access user * space. A way to work around this is using get_user_long & Co. in gdb * expressions, but only for the current process. * * - Interrupting the kernel only works if interrupts are currently allowed, * and the interrupt of the serial line isn't blocked by some other means * (IPL too high, disabled, ...) * * - The gdb stub is currently not reentrant, i.e. errors that happen therein * (e.g. accessing invalid memory) may not be caught correctly. This could * be removed in future by introducing a stack of struct registers. * */ /* * To enable debugger support, two things need to happen. One, a * call to kgdb_init() is necessary in order to allow any breakpoints * or error conditions to be properly intercepted and reported to gdb. * Two, a breakpoint needs to be generated to begin communication. This * is most easily accomplished by a call to breakpoint(). * * The following gdb commands are supported: * * command function Return value * * g return the value of the CPU registers hex data or ENN * G set the value of the CPU registers OK or ENN * * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN * * c Resume at current address SNN ( signal NN) * cAA..AA Continue at address AA..AA SNN * * s Step one instruction SNN * sAA..AA Step one instruction from AA..AA SNN * * k kill * * ? What was the last sigval ? SNN (signal NN) * * bBB..BB Set baud rate to BB..BB OK or BNN, then sets * baud rate * * All commands and responses are sent with a packet which includes a * checksum. A packet consists of * * $<packet info>#<checksum>. * * where * <packet info> :: <characters representing the command or response> * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>> * * When a packet is received, it is first acknowledged with either '+' or '-'. * '+' indicates a successful transfer. '-' indicates a failed transfer. * * Example: * * Host: Reply: * $m0,10#2a +$00010203040506070809101112131415#42 * */ #include <linux/string.h> #include <linux/signal.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/linkage.h> #include <linux/reboot.h> #include <asm/setup.h> #include <asm/ptrace.h> #include <arch/svinto.h> #include <asm/irq.h> static int kgdb_started = 0; /********************************* Register image ****************************/ /* Use the order of registers as defined in "AXIS ETRAX CRIS Programmer's Reference", p. 1-1, with the additional register definitions of the ETRAX 100LX in cris-opc.h. There are 16 general 32-bit registers, R0-R15, where R14 is the stack pointer, SP, and R15 is the program counter, PC. There are 16 special registers, P0-P15, where three of the unimplemented registers, P0, P4 and P8, are reserved as zero-registers. A read from any of these registers returns zero and a write has no effect. */ typedef struct register_image { /* Offset */ unsigned int r0; /* 0x00 */ unsigned int r1; /* 0x04 */ unsigned int r2; /* 0x08 */ unsigned int r3; /* 0x0C */ unsigned int r4; /* 0x10 */ unsigned int r5; /* 0x14 */ unsigned int r6; /* 0x18 */ unsigned int r7; /* 0x1C */ unsigned int r8; /* 0x20 Frame pointer */ unsigned int r9; /* 0x24 */ unsigned int r10; /* 0x28 */ unsigned int r11; /* 0x2C */ unsigned int r12; /* 0x30 */ unsigned int r13; /* 0x34 */ unsigned int sp; /* 0x38 Stack pointer */ unsigned int pc; /* 0x3C Program counter */ unsigned char p0; /* 0x40 8-bit zero-register */ unsigned char vr; /* 0x41 Version register */ unsigned short p4; /* 0x42 16-bit zero-register */ unsigned short ccr; /* 0x44 Condition code register */ unsigned int mof; /* 0x46 Multiply overflow register */ unsigned int p8; /* 0x4A 32-bit zero-register */ unsigned int ibr; /* 0x4E Interrupt base register */ unsigned int irp; /* 0x52 Interrupt return pointer */ unsigned int srp; /* 0x56 Subroutine return pointer */ unsigned int bar; /* 0x5A Breakpoint address register */ unsigned int dccr; /* 0x5E Double condition code register */ unsigned int brp; /* 0x62 Breakpoint return pointer (pc in caller) */ unsigned int usp; /* 0x66 User mode stack pointer */ } registers; /************** Prototypes for local library functions ***********************/ /* Copy of strcpy from libc. */ static char *gdb_cris_strcpy (char *s1, const char *s2); /* Copy of strlen from libc. */ static int gdb_cris_strlen (const char *s); /* Copy of memchr from libc. */ static void *gdb_cris_memchr (const void *s, int c, int n); /* Copy of strtol from libc. Does only support base 16. */ static int gdb_cris_strtol (const char *s, char **endptr, int base); /********************** Prototypes for local functions. **********************/ /* Copy the content of a register image into another. The size n is the size of the register image. Due to struct assignment generation of memcpy in libc. */ static void copy_registers (registers *dptr, registers *sptr, int n); /* Copy the stored registers from the stack. Put the register contents of thread thread_id in the struct reg. */ static void copy_registers_from_stack (int thread_id, registers *reg); /* Copy the registers to the stack. Put the register contents of thread thread_id from struct reg to the stack. */ static void copy_registers_to_stack (int thread_id, registers *reg); /* Write a value to a specified register regno in the register image of the current thread. */ static int write_register (int regno, char *val); /* Write a value to a specified register in the stack of a thread other than the current thread. */ static write_stack_register (int thread_id, int regno, char *valptr); /* Read a value from a specified register in the register image. Returns the status of the read operation. The register value is returned in valptr. */ static int read_register (char regno, unsigned int *valptr); /* Serial port, reads one character. ETRAX 100 specific. from debugport.c */ int getDebugChar (void); /* Serial port, writes one character. ETRAX 100 specific. from debugport.c */ void putDebugChar (int val); void enableDebugIRQ (void); /* Returns the integer equivalent of a hexadecimal character. */ static int hex (char ch); /* Convert the memory, pointed to by mem into hexadecimal representation. Put the result in buf, and return a pointer to the last character in buf (null). */ static char *mem2hex (char *buf, unsigned char *mem, int count); /* Convert the array, in hexadecimal representation, pointed to by buf into binary representation. Put the result in mem, and return a pointer to the character after the last byte written. */ static unsigned char *hex2mem (unsigned char *mem, char *buf, int count); /* Put the content of the array, in binary representation, pointed to by buf into memory pointed to by mem, and return a pointer to the character after the last byte written. */ static unsigned char *bin2mem (unsigned char *mem, unsigned char *buf, int count); /* Await the sequence $<data>#<checksum> and store <data> in the array buffer returned. */ static void getpacket (char *buffer); /* Send $<data>#<checksum> from the <data> in the array buffer. */ static void putpacket (char *buffer); /* Build and send a response packet in order to inform the host the stub is stopped. */ static void stub_is_stopped (int sigval); /* All expected commands are sent from remote.c. Send a response according to the description in remote.c. */ static void handle_exception (int sigval); /* Performs a complete re-start from scratch. ETRAX specific. */ static void kill_restart (void); /******************** Prototypes for global functions. ***********************/ /* The string str is prepended with the GDB printout token and sent. */ void putDebugString (const unsigned char *str, int length); /* used by etrax100ser.c */ /* The hook for both static (compiled) and dynamic breakpoints set by GDB. ETRAX 100 specific. */ void handle_breakpoint (void); /* used by irq.c */ /* The hook for an interrupt generated by GDB. ETRAX 100 specific. */ void handle_interrupt (void); /* used by irq.c */ /* A static breakpoint to be used at startup. */ void breakpoint (void); /* called by init/main.c */ /* From osys_int.c, executing_task contains the number of the current executing task in osys. Does not know of object-oriented threads. */ extern unsigned char executing_task; /* The number of characters used for a 64 bit thread identifier. */ #define HEXCHARS_IN_THREAD_ID 16 /* Avoid warning as the internal_stack is not used in the C-code. */ #define USEDVAR(name) { if (name) { ; } } #define USEDFUN(name) { void (*pf)(void) = (void *)name; USEDVAR(pf) } /********************************** Packet I/O ******************************/ /* BUFMAX defines the maximum number of characters in inbound/outbound buffers */ #define BUFMAX 512 /* Run-length encoding maximum length. Send 64 at most. */ #define RUNLENMAX 64 /* The inbound/outbound buffers used in packet I/O */ static char remcomInBuffer[BUFMAX]; static char remcomOutBuffer[BUFMAX]; /* Error and warning messages. */ enum error_type { SUCCESS, E01, E02, E03, E04, E05, E06, E07 }; static char *error_message[] = { "", "E01 Set current or general thread - H[c,g] - internal error.", "E02 Change register content - P - cannot change read-only register.", "E03 Thread is not alive.", /* T, not used. */ "E04 The command is not supported - [s,C,S,!,R,d,r] - internal error.", "E05 Change register content - P - the register is not implemented..", "E06 Change memory content - M - internal error.", "E07 Change register content - P - the register is not stored on the stack" }; /********************************* Register image ****************************/ /* Use the order of registers as defined in "AXIS ETRAX CRIS Programmer's Reference", p. 1-1, with the additional register definitions of the ETRAX 100LX in cris-opc.h. There are 16 general 32-bit registers, R0-R15, where R14 is the stack pointer, SP, and R15 is the program counter, PC. There are 16 special registers, P0-P15, where three of the unimplemented registers, P0, P4 and P8, are reserved as zero-registers. A read from any of these registers returns zero and a write has no effect. */ enum register_name { R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, R13, SP, PC, P0, VR, P2, P3, P4, CCR, P6, MOF, P8, IBR, IRP, SRP, BAR, DCCR, BRP, USP }; /* The register sizes of the registers in register_name. An unimplemented register is designated by size 0 in this array. */ static int register_size[] = { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 1, 1, 0, 0, 2, 2, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4 }; /* Contains the register image of the executing thread in the assembler part of the code in order to avoid horrible addressing modes. */ static registers reg; /* FIXME: Should this be used? Delete otherwise. */ /* Contains the assumed consistency state of the register image. Uses the enum error_type for state information. */ static int consistency_status = SUCCESS; /********************************** Handle exceptions ************************/ /* The variable reg contains the register image associated with the current_thread_c variable. It is a complete register image created at entry. The reg_g contains a register image of a task where the general registers are taken from the stack and all special registers are taken from the executing task. It is associated with current_thread_g and used in order to provide access mainly for 'g', 'G' and 'P'. */ /* Need two task id pointers in order to handle Hct and Hgt commands. */ static int current_thread_c = 0; static int current_thread_g = 0; /* Need two register images in order to handle Hct and Hgt commands. The variable reg_g is in addition to reg above. */ static registers reg_g; /********************************** Breakpoint *******************************/ /* Use an internal stack in the breakpoint and interrupt response routines */ #define INTERNAL_STACK_SIZE 1024 static char internal_stack[INTERNAL_STACK_SIZE]; /* Due to the breakpoint return pointer, a state variable is needed to keep track of whether it is a static (compiled) or dynamic (gdb-invoked) breakpoint to be handled. A static breakpoint uses the content of register BRP as it is whereas a dynamic breakpoint requires subtraction with 2 in order to execute the instruction. The first breakpoint is static. */ static unsigned char is_dyn_brkp = 0; /********************************* String library ****************************/ /* Single-step over library functions creates trap loops. */ /* Copy char s2[] to s1[]. */ static char* gdb_cris_strcpy (char *s1, const char *s2) { char *s = s1; for (s = s1; (*s++ = *s2++) != '\0'; ) ; return (s1); } /* Find length of s[]. */ static int gdb_cris_strlen (const char *s) { const char *sc; for (sc = s; *sc != '\0'; sc++) ; return (sc - s); } /* Find first occurrence of c in s[n]. */ static void* gdb_cris_memchr (const void *s, int c, int n) { const unsigned char uc = c; const unsigned char *su; for (su = s; 0 < n; ++su, --n) if (*su == uc) return ((void *)su); return (NULL); } /******************************* Standard library ****************************/ /* Single-step over library functions creates trap loops. */ /* Convert string to long. */ static int gdb_cris_strtol (const char *s, char **endptr, int base) { char *s1; char *sd; int x = 0; for (s1 = (char*)s; (sd = gdb_cris_memchr(hex_asc, *s1, base)) != NULL; ++s1) x = x * base + (sd - hex_asc); if (endptr) { /* Unconverted suffix is stored in endptr unless endptr is NULL. */ *endptr = s1; } return x; } /********************************* Register image ****************************/ /* Copy the content of a register image into another. The size n is the size of the register image. Due to struct assignment generation of memcpy in libc. */ static void copy_registers (registers *dptr, registers *sptr, int n) { unsigned char *dreg; unsigned char *sreg; for (dreg = (unsigned char*)dptr, sreg = (unsigned char*)sptr; n > 0; n--) *dreg++ = *sreg++; } #ifdef PROCESS_SUPPORT /* Copy the stored registers from the stack. Put the register contents of thread thread_id in the struct reg. */ static void copy_registers_from_stack (int thread_id, registers *regptr) { int j; stack_registers *s = (stack_registers *)stack_list[thread_id]; unsigned int *d = (unsigned int *)regptr; for (j = 13; j >= 0; j--) *d++ = s->r[j]; regptr->sp = (unsigned int)stack_list[thread_id]; regptr->pc = s->pc; regptr->dccr = s->dccr; regptr->srp = s->srp; } /* Copy the registers to the stack. Put the register contents of thread thread_id from struct reg to the stack. */ static void copy_registers_to_stack (int thread_id, registers *regptr) { int i; stack_registers *d = (stack_registers *)stack_list[thread_id]; unsigned int *s = (unsigned int *)regptr; for (i = 0; i < 14; i++) { d->r[i] = *s++; } d->pc = regptr->pc; d->dccr = regptr->dccr; d->srp = regptr->srp; } #endif /* Write a value to a specified register in the register image of the current thread. Returns status code SUCCESS, E02 or E05. */ static int write_register (int regno, char *val) { int status = SUCCESS; registers *current_reg = &reg; if (regno >= R0 && regno <= PC) { /* 32-bit register with simple offset. */ hex2mem ((unsigned char *)current_reg + regno * sizeof(unsigned int), val, sizeof(unsigned int)); } else if (regno == P0 || regno == VR || regno == P4 || regno == P8) { /* Do not support read-only registers. */ status = E02; } else if (regno == CCR) { /* 16 bit register with complex offset. (P4 is read-only, P6 is not implemented, and P7 (MOF) is 32 bits in ETRAX 100LX. */ hex2mem ((unsigned char *)&(current_reg->ccr) + (regno-CCR) * sizeof(unsigned short), val, sizeof(unsigned short)); } else if (regno >= MOF && regno <= USP) { /* 32 bit register with complex offset. (P8 has been taken care of.) */ hex2mem ((unsigned char *)&(current_reg->ibr) + (regno-IBR) * sizeof(unsigned int), val, sizeof(unsigned int)); } else { /* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */ status = E05; } return status; } #ifdef PROCESS_SUPPORT /* Write a value to a specified register in the stack of a thread other than the current thread. Returns status code SUCCESS or E07. */ static int write_stack_register (int thread_id, int regno, char *valptr) { int status = SUCCESS; stack_registers *d = (stack_registers *)stack_list[thread_id]; unsigned int val; hex2mem ((unsigned char *)&val, valptr, sizeof(unsigned int)); if (regno >= R0 && regno < SP) { d->r[regno] = val; } else if (regno == SP) { stack_list[thread_id] = val; } else if (regno == PC) { d->pc = val; } else if (regno == SRP) { d->srp = val; } else if (regno == DCCR) { d->dccr = val; } else { /* Do not support registers in the current thread. */ status = E07; } return status; } #endif /* Read a value from a specified register in the register image. Returns the value in the register or -1 for non-implemented registers. Should check consistency_status after a call which may be E05 after changes in the implementation. */ static int read_register (char regno, unsigned int *valptr) { registers *current_reg = &reg; if (regno >= R0 && regno <= PC) { /* 32-bit register with simple offset. */ *valptr = *(unsigned int *)((char *)current_reg + regno * sizeof(unsigned int)); return SUCCESS; } else if (regno == P0 || regno == VR) { /* 8 bit register with complex offset. */ *valptr = (unsigned int)(*(unsigned char *) ((char *)&(current_reg->p0) + (regno-P0) * sizeof(char))); return SUCCESS; } else if (regno == P4 || regno == CCR) { /* 16 bit register with complex offset. */ *valptr = (unsigned int)(*(unsigned short *) ((char *)&(current_reg->p4) + (regno-P4) * sizeof(unsigned short))); return SUCCESS; } else if (regno >= MOF && regno <= USP) { /* 32 bit register with complex offset. */ *valptr = *(unsigned int *)((char *)&(current_reg->p8) + (regno-P8) * sizeof(unsigned int)); return SUCCESS; } else { /* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */ consistency_status = E05; return E05; } } /********************************** Packet I/O ******************************/ /* Returns the integer equivalent of a hexadecimal character. */ static int hex (char ch) { if ((ch >= 'a') && (ch <= 'f')) return (ch - 'a' + 10); if ((ch >= '0') && (ch <= '9')) return (ch - '0'); if ((ch >= 'A') && (ch <= 'F')) return (ch - 'A' + 10); return (-1); } /* Convert the memory, pointed to by mem into hexadecimal representation. Put the result in buf, and return a pointer to the last character in buf (null). */ static int do_printk = 0; static char * mem2hex(char *buf, unsigned char *mem, int count) { int i; int ch; if (mem == NULL) { /* Bogus read from m0. FIXME: What constitutes a valid address? */ for (i = 0; i < count; i++) { *buf++ = '0'; *buf++ = '0'; } } else { /* Valid mem address. */ for (i = 0; i < count; i++) { ch = *mem++; buf = pack_hex_byte(buf, ch); } } /* Terminate properly. */ *buf = '\0'; return (buf); } /* Convert the array, in hexadecimal representation, pointed to by buf into binary representation. Put the result in mem, and return a pointer to the character after the last byte written. */ static unsigned char* hex2mem (unsigned char *mem, char *buf, int count) { int i; unsigned char ch; for (i = 0; i < count; i++) { ch = hex (*buf++) << 4; ch = ch + hex (*buf++); *mem++ = ch; } return (mem); } /* Put the content of the array, in binary representation, pointed to by buf into memory pointed to by mem, and return a pointer to the character after the last byte written. Gdb will escape $, #, and the escape char (0x7d). */ static unsigned char* bin2mem (unsigned char *mem, unsigned char *buf, int count) { int i; unsigned char *next; for (i = 0; i < count; i++) { /* Check for any escaped characters. Be paranoid and only unescape chars that should be escaped. */ if (*buf == 0x7d) { next = buf + 1; if (*next == 0x3 || *next == 0x4 || *next == 0x5D) /* #, $, ESC */ { buf++; *buf += 0x20; } } *mem++ = *buf++; } return (mem); } /* Await the sequence $<data>#<checksum> and store <data> in the array buffer returned. */ static void getpacket (char *buffer) { unsigned char checksum; unsigned char xmitcsum; int i; int count; char ch; do { while ((ch = getDebugChar ()) != '$') /* Wait for the start character $ and ignore all other characters */; checksum = 0; xmitcsum = -1; count = 0; /* Read until a # or the end of the buffer is reached */ while (count < BUFMAX) { ch = getDebugChar (); if (ch == '#') break; checksum = checksum + ch; buffer[count] = ch; count = count + 1; } buffer[count] = '\0'; if (ch == '#') { xmitcsum = hex (getDebugChar ()) << 4; xmitcsum += hex (getDebugChar ()); if (checksum != xmitcsum) { /* Wrong checksum */ putDebugChar ('-'); } else { /* Correct checksum */ putDebugChar ('+'); /* If sequence characters are received, reply with them */ if (buffer[2] == ':') { putDebugChar (buffer[0]); putDebugChar (buffer[1]); /* Remove the sequence characters from the buffer */ count = gdb_cris_strlen (buffer); for (i = 3; i <= count; i++) buffer[i - 3] = buffer[i]; } } } } while (checksum != xmitcsum); } /* Send $<data>#<checksum> from the <data> in the array buffer. */ static void putpacket(char *buffer) { int checksum; int runlen; int encode; do { char *src = buffer; putDebugChar ('$'); checksum = 0; while (*src) { /* Do run length encoding */ putDebugChar (*src); checksum += *src; runlen = 0; while (runlen < RUNLENMAX && *src == src[runlen]) { runlen++; } if (runlen > 3) { /* Got a useful amount */ putDebugChar ('*'); checksum += '*'; encode = runlen + ' ' - 4; putDebugChar (encode); checksum += encode; src += runlen; } else { src++; } } putDebugChar('#'); putDebugChar(hex_asc_hi(checksum)); putDebugChar(hex_asc_lo(checksum)); } while(kgdb_started && (getDebugChar() != '+')); } /* The string str is prepended with the GDB printout token and sent. Required in traditional implementations. */ void putDebugString (const unsigned char *str, int length) { remcomOutBuffer[0] = 'O'; mem2hex(&remcomOutBuffer[1], (unsigned char *)str, length); putpacket(remcomOutBuffer); } /********************************** Handle exceptions ************************/ /* Build and send a response packet in order to inform the host the stub is stopped. TAAn...:r...;n...:r...;n...:r...; AA = signal number n... = register number (hex) r... = register contents n... = `thread' r... = thread process ID. This is a hex integer. n... = other string not starting with valid hex digit. gdb should ignore this n,r pair and go on to the next. This way we can extend the protocol. */ static void stub_is_stopped(int sigval) { char *ptr = remcomOutBuffer; int regno; unsigned int reg_cont; int status; /* Send trap type (converted to signal) */ *ptr++ = 'T'; ptr = pack_hex_byte(ptr, sigval); /* Send register contents. We probably only need to send the * PC, frame pointer and stack pointer here. Other registers will be * explicitly asked for. But for now, send all. */ for (regno = R0; regno <= USP; regno++) { /* Store n...:r...; for the registers in the buffer. */ status = read_register (regno, &reg_cont); if (status == SUCCESS) { ptr = pack_hex_byte(ptr, regno); *ptr++ = ':'; ptr = mem2hex(ptr, (unsigned char *)&reg_cont, register_size[regno]); *ptr++ = ';'; } } #ifdef PROCESS_SUPPORT /* Store the registers of the executing thread. Assume that both step, continue, and register content requests are with respect to this thread. The executing task is from the operating system scheduler. */ current_thread_c = executing_task; current_thread_g = executing_task; /* A struct assignment translates into a libc memcpy call. Avoid all libc functions in order to prevent recursive break points. */ copy_registers (&reg_g, &reg, sizeof(registers)); /* Store thread:r...; with the executing task TID. */ gdb_cris_strcpy (&remcomOutBuffer[pos], "thread:"); pos += gdb_cris_strlen ("thread:"); remcomOutBuffer[pos++] = hex_asc_hi(executing_task); remcomOutBuffer[pos++] = hex_asc_lo(executing_task); gdb_cris_strcpy (&remcomOutBuffer[pos], ";"); #endif /* null-terminate and send it off */ *ptr = 0; putpacket (remcomOutBuffer); } /* All expected commands are sent from remote.c. Send a response according to the description in remote.c. */ static void handle_exception (int sigval) { /* Avoid warning of not used. */ USEDFUN(handle_exception); USEDVAR(internal_stack[0]); /* Send response. */ stub_is_stopped (sigval); for (;;) { remcomOutBuffer[0] = '\0'; getpacket (remcomInBuffer); switch (remcomInBuffer[0]) { case 'g': /* Read registers: g Success: Each byte of register data is described by two hex digits. Registers are in the internal order for GDB, and the bytes in a register are in the same order the machine uses. Failure: void. */ { #ifdef PROCESS_SUPPORT /* Use the special register content in the executing thread. */ copy_registers (&reg_g, &reg, sizeof(registers)); /* Replace the content available on the stack. */ if (current_thread_g != executing_task) { copy_registers_from_stack (current_thread_g, &reg_g); } mem2hex ((unsigned char *)remcomOutBuffer, (unsigned char *)&reg_g, sizeof(registers)); #else mem2hex(remcomOutBuffer, (char *)&reg, sizeof(registers)); #endif } break; case 'G': /* Write registers. GXX..XX Each byte of register data is described by two hex digits. Success: OK Failure: void. */ #ifdef PROCESS_SUPPORT hex2mem ((unsigned char *)&reg_g, &remcomInBuffer[1], sizeof(registers)); if (current_thread_g == executing_task) { copy_registers (&reg, &reg_g, sizeof(registers)); } else { copy_registers_to_stack(current_thread_g, &reg_g); } #else hex2mem((char *)&reg, &remcomInBuffer[1], sizeof(registers)); #endif gdb_cris_strcpy (remcomOutBuffer, "OK"); break; case 'P': /* Write register. Pn...=r... Write register n..., hex value without 0x, with value r..., which contains a hex value without 0x and two hex digits for each byte in the register (target byte order). P1f=11223344 means set register 31 to 44332211. Success: OK Failure: E02, E05 */ { char *suffix; int regno = gdb_cris_strtol (&remcomInBuffer[1], &suffix, 16); int status; #ifdef PROCESS_SUPPORT if (current_thread_g != executing_task) status = write_stack_register (current_thread_g, regno, suffix+1); else #endif status = write_register (regno, suffix+1); switch (status) { case E02: /* Do not support read-only registers. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E02]); break; case E05: /* Do not support non-existing registers. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E05]); break; case E07: /* Do not support non-existing registers on the stack. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E07]); break; default: /* Valid register number. */ gdb_cris_strcpy (remcomOutBuffer, "OK"); break; } } break; case 'm': /* Read from memory. mAA..AA,LLLL AA..AA is the address and LLLL is the length. Success: XX..XX is the memory content. Can be fewer bytes than requested if only part of the data may be read. m6000120a,6c means retrieve 108 byte from base address 6000120a. Failure: void. */ { char *suffix; unsigned char *addr = (unsigned char *)gdb_cris_strtol(&remcomInBuffer[1], &suffix, 16); int length = gdb_cris_strtol(suffix+1, 0, 16); mem2hex(remcomOutBuffer, addr, length); } break; case 'X': /* Write to memory. XAA..AA,LLLL:XX..XX AA..AA is the start address, LLLL is the number of bytes, and XX..XX is the binary data. Success: OK Failure: void. */ case 'M': /* Write to memory. MAA..AA,LLLL:XX..XX AA..AA is the start address, LLLL is the number of bytes, and XX..XX is the hexadecimal data. Success: OK Failure: void. */ { char *lenptr; char *dataptr; unsigned char *addr = (unsigned char *)gdb_cris_strtol(&remcomInBuffer[1], &lenptr, 16); int length = gdb_cris_strtol(lenptr+1, &dataptr, 16); if (*lenptr == ',' && *dataptr == ':') { if (remcomInBuffer[0] == 'M') { hex2mem(addr, dataptr + 1, length); } else /* X */ { bin2mem(addr, dataptr + 1, length); } gdb_cris_strcpy (remcomOutBuffer, "OK"); } else { gdb_cris_strcpy (remcomOutBuffer, error_message[E06]); } } break; case 'c': /* Continue execution. cAA..AA AA..AA is the address where execution is resumed. If AA..AA is omitted, resume at the present address. Success: return to the executing thread. Failure: will never know. */ if (remcomInBuffer[1] != '\0') { reg.pc = gdb_cris_strtol (&remcomInBuffer[1], 0, 16); } enableDebugIRQ(); return; case 's': /* Step. sAA..AA AA..AA is the address where execution is resumed. If AA..AA is omitted, resume at the present address. Success: return to the executing thread. Failure: will never know. Should never be invoked. The single-step is implemented on the host side. If ever invoked, it is an internal error E04. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E04]); putpacket (remcomOutBuffer); return; case '?': /* The last signal which caused a stop. ? Success: SAA, where AA is the signal number. Failure: void. */ remcomOutBuffer[0] = 'S'; remcomOutBuffer[1] = hex_asc_hi(sigval); remcomOutBuffer[2] = hex_asc_lo(sigval); remcomOutBuffer[3] = 0; break; case 'D': /* Detach from host. D Success: OK, and return to the executing thread. Failure: will never know */ putpacket ("OK"); return; case 'k': case 'r': /* kill request or reset request. Success: restart of target. Failure: will never know. */ kill_restart (); break; case 'C': case 'S': case '!': case 'R': case 'd': /* Continue with signal sig. Csig;AA..AA Step with signal sig. Ssig;AA..AA Use the extended remote protocol. ! Restart the target system. R0 Toggle debug flag. d Search backwards. tAA:PP,MM Not supported: E04 */ gdb_cris_strcpy (remcomOutBuffer, error_message[E04]); break; #ifdef PROCESS_SUPPORT case 'T': /* Thread alive. TXX Is thread XX alive? Success: OK, thread XX is alive. Failure: E03, thread XX is dead. */ { int thread_id = (int)gdb_cris_strtol (&remcomInBuffer[1], 0, 16); /* Cannot tell whether it is alive or not. */ if (thread_id >= 0 && thread_id < number_of_tasks) gdb_cris_strcpy (remcomOutBuffer, "OK"); } break; case 'H': /* Set thread for subsequent operations: Hct c = 'c' for thread used in step and continue; t can be -1 for all threads. c = 'g' for thread used in other operations. t = 0 means pick any thread. Success: OK Failure: E01 */ { int thread_id = gdb_cris_strtol (&remcomInBuffer[2], 0, 16); if (remcomInBuffer[1] == 'c') { /* c = 'c' for thread used in step and continue */ /* Do not change current_thread_c here. It would create a mess in the scheduler. */ gdb_cris_strcpy (remcomOutBuffer, "OK"); } else if (remcomInBuffer[1] == 'g') { /* c = 'g' for thread used in other operations. t = 0 means pick any thread. Impossible since the scheduler does not allow that. */ if (thread_id >= 0 && thread_id < number_of_tasks) { current_thread_g = thread_id; gdb_cris_strcpy (remcomOutBuffer, "OK"); } else { /* Not expected - send an error message. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E01]); } } else { /* Not expected - send an error message. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E01]); } } break; case 'q': case 'Q': /* Query of general interest. qXXXX Set general value XXXX. QXXXX=yyyy */ { int pos; int nextpos; int thread_id; switch (remcomInBuffer[1]) { case 'C': /* Identify the remote current thread. */ gdb_cris_strcpy (&remcomOutBuffer[0], "QC"); remcomOutBuffer[2] = hex_asc_hi(current_thread_c); remcomOutBuffer[3] = hex_asc_lo(current_thread_c); remcomOutBuffer[4] = '\0'; break; case 'L': gdb_cris_strcpy (&remcomOutBuffer[0], "QM"); /* Reply with number of threads. */ if (os_is_started()) { remcomOutBuffer[2] = hex_asc_hi(number_of_tasks); remcomOutBuffer[3] = hex_asc_lo(number_of_tasks); } else { remcomOutBuffer[2] = hex_asc_hi(0); remcomOutBuffer[3] = hex_asc_lo(1); } /* Done with the reply. */ remcomOutBuffer[4] = hex_asc_lo(1); pos = 5; /* Expects the argument thread id. */ for (; pos < (5 + HEXCHARS_IN_THREAD_ID); pos++) remcomOutBuffer[pos] = remcomInBuffer[pos]; /* Reply with the thread identifiers. */ if (os_is_started()) { /* Store the thread identifiers of all tasks. */ for (thread_id = 0; thread_id < number_of_tasks; thread_id++) { nextpos = pos + HEXCHARS_IN_THREAD_ID - 1; for (; pos < nextpos; pos ++) remcomOutBuffer[pos] = hex_asc_lo(0); remcomOutBuffer[pos++] = hex_asc_lo(thread_id); } } else { /* Store the thread identifier of the boot task. */ nextpos = pos + HEXCHARS_IN_THREAD_ID - 1; for (; pos < nextpos; pos ++) remcomOutBuffer[pos] = hex_asc_lo(0); remcomOutBuffer[pos++] = hex_asc_lo(current_thread_c); } remcomOutBuffer[pos] = '\0'; break; default: /* Not supported: "" */ /* Request information about section offsets: qOffsets. */ remcomOutBuffer[0] = 0; break; } } break; #endif /* PROCESS_SUPPORT */ default: /* The stub should ignore other request and send an empty response ($#<checksum>). This way we can extend the protocol and GDB can tell whether the stub it is talking to uses the old or the new. */ remcomOutBuffer[0] = 0; break; } putpacket(remcomOutBuffer); } } /* Performs a complete re-start from scratch. */ static void kill_restart () { machine_restart(""); } /********************************** Breakpoint *******************************/ /* The hook for both a static (compiled) and a dynamic breakpoint set by GDB. An internal stack is used by the stub. The register image of the caller is stored in the structure register_image. Interactive communication with the host is handled by handle_exception and finally the register image is restored. */ void kgdb_handle_breakpoint(void); asm (" .global kgdb_handle_breakpoint kgdb_handle_breakpoint: ;; ;; Response to the break-instruction ;; ;; Create a register image of the caller ;; move $dccr,[reg+0x5E] ; Save the flags in DCCR before disable interrupts di ; Disable interrupts move.d $r0,[reg] ; Save R0 move.d $r1,[reg+0x04] ; Save R1 move.d $r2,[reg+0x08] ; Save R2 move.d $r3,[reg+0x0C] ; Save R3 move.d $r4,[reg+0x10] ; Save R4 move.d $r5,[reg+0x14] ; Save R5 move.d $r6,[reg+0x18] ; Save R6 move.d $r7,[reg+0x1C] ; Save R7 move.d $r8,[reg+0x20] ; Save R8 move.d $r9,[reg+0x24] ; Save R9 move.d $r10,[reg+0x28] ; Save R10 move.d $r11,[reg+0x2C] ; Save R11 move.d $r12,[reg+0x30] ; Save R12 move.d $r13,[reg+0x34] ; Save R13 move.d $sp,[reg+0x38] ; Save SP (R14) ;; Due to the old assembler-versions BRP might not be recognized .word 0xE670 ; move brp,$r0 subq 2,$r0 ; Set to address of previous instruction. move.d $r0,[reg+0x3c] ; Save the address in PC (R15) clear.b [reg+0x40] ; Clear P0 move $vr,[reg+0x41] ; Save special register P1 clear.w [reg+0x42] ; Clear P4 move $ccr,[reg+0x44] ; Save special register CCR move $mof,[reg+0x46] ; P7 clear.d [reg+0x4A] ; Clear P8 move $ibr,[reg+0x4E] ; P9, move $irp,[reg+0x52] ; P10, move $srp,[reg+0x56] ; P11, move $dtp0,[reg+0x5A] ; P12, register BAR, assembler might not know BAR ; P13, register DCCR already saved ;; Due to the old assembler-versions BRP might not be recognized .word 0xE670 ; move brp,r0 ;; Static (compiled) breakpoints must return to the next instruction in order ;; to avoid infinite loops. Dynamic (gdb-invoked) must restore the instruction ;; in order to execute it when execution is continued. test.b [is_dyn_brkp] ; Is this a dynamic breakpoint? beq is_static ; No, a static breakpoint nop subq 2,$r0 ; rerun the instruction the break replaced is_static: moveq 1,$r1 move.b $r1,[is_dyn_brkp] ; Set the state variable to dynamic breakpoint move.d $r0,[reg+0x62] ; Save the return address in BRP move $usp,[reg+0x66] ; USP ;; ;; Handle the communication ;; move.d internal_stack+1020,$sp ; Use the internal stack which grows upward moveq 5,$r10 ; SIGTRAP jsr handle_exception ; Interactive routine ;; ;; Return to the caller ;; move.d [reg],$r0 ; Restore R0 move.d [reg+0x04],$r1 ; Restore R1 move.d [reg+0x08],$r2 ; Restore R2 move.d [reg+0x0C],$r3 ; Restore R3 move.d [reg+0x10],$r4 ; Restore R4 move.d [reg+0x14],$r5 ; Restore R5 move.d [reg+0x18],$r6 ; Restore R6 move.d [reg+0x1C],$r7 ; Restore R7 move.d [reg+0x20],$r8 ; Restore R8 move.d [reg+0x24],$r9 ; Restore R9 move.d [reg+0x28],$r10 ; Restore R10 move.d [reg+0x2C],$r11 ; Restore R11 move.d [reg+0x30],$r12 ; Restore R12 move.d [reg+0x34],$r13 ; Restore R13 ;; ;; FIXME: Which registers should be restored? ;; move.d [reg+0x38],$sp ; Restore SP (R14) move [reg+0x56],$srp ; Restore the subroutine return pointer. move [reg+0x5E],$dccr ; Restore DCCR move [reg+0x66],$usp ; Restore USP jump [reg+0x62] ; A jump to the content in register BRP works. nop ; "); /* The hook for an interrupt generated by GDB. An internal stack is used by the stub. The register image of the caller is stored in the structure register_image. Interactive communication with the host is handled by handle_exception and finally the register image is restored. Due to the old assembler which does not recognise the break instruction and the breakpoint return pointer hex-code is used. */ void kgdb_handle_serial(void); asm (" .global kgdb_handle_serial kgdb_handle_serial: ;; ;; Response to a serial interrupt ;; move $dccr,[reg+0x5E] ; Save the flags in DCCR di ; Disable interrupts move.d $r0,[reg] ; Save R0 move.d $r1,[reg+0x04] ; Save R1 move.d $r2,[reg+0x08] ; Save R2 move.d $r3,[reg+0x0C] ; Save R3 move.d $r4,[reg+0x10] ; Save R4 move.d $r5,[reg+0x14] ; Save R5 move.d $r6,[reg+0x18] ; Save R6 move.d $r7,[reg+0x1C] ; Save R7 move.d $r8,[reg+0x20] ; Save R8 move.d $r9,[reg+0x24] ; Save R9 move.d $r10,[reg+0x28] ; Save R10 move.d $r11,[reg+0x2C] ; Save R11 move.d $r12,[reg+0x30] ; Save R12 move.d $r13,[reg+0x34] ; Save R13 move.d $sp,[reg+0x38] ; Save SP (R14) move $irp,[reg+0x3c] ; Save the address in PC (R15) clear.b [reg+0x40] ; Clear P0 move $vr,[reg+0x41] ; Save special register P1, clear.w [reg+0x42] ; Clear P4 move $ccr,[reg+0x44] ; Save special register CCR move $mof,[reg+0x46] ; P7 clear.d [reg+0x4A] ; Clear P8 move $ibr,[reg+0x4E] ; P9, move $irp,[reg+0x52] ; P10, move $srp,[reg+0x56] ; P11, move $dtp0,[reg+0x5A] ; P12, register BAR, assembler might not know BAR ; P13, register DCCR already saved ;; Due to the old assembler-versions BRP might not be recognized .word 0xE670 ; move brp,r0 move.d $r0,[reg+0x62] ; Save the return address in BRP move $usp,[reg+0x66] ; USP ;; get the serial character (from debugport.c) and check if it is a ctrl-c jsr getDebugChar cmp.b 3, $r10 bne goback nop move.d [reg+0x5E], $r10 ; Get DCCR btstq 8, $r10 ; Test the U-flag. bmi goback nop ;; ;; Handle the communication ;; move.d internal_stack+1020,$sp ; Use the internal stack moveq 2,$r10 ; SIGINT jsr handle_exception ; Interactive routine goback: ;; ;; Return to the caller ;; move.d [reg],$r0 ; Restore R0 move.d [reg+0x04],$r1 ; Restore R1 move.d [reg+0x08],$r2 ; Restore R2 move.d [reg+0x0C],$r3 ; Restore R3 move.d [reg+0x10],$r4 ; Restore R4 move.d [reg+0x14],$r5 ; Restore R5 move.d [reg+0x18],$r6 ; Restore R6 move.d [reg+0x1C],$r7 ; Restore R7 move.d [reg+0x20],$r8 ; Restore R8 move.d [reg+0x24],$r9 ; Restore R9 move.d [reg+0x28],$r10 ; Restore R10 move.d [reg+0x2C],$r11 ; Restore R11 move.d [reg+0x30],$r12 ; Restore R12 move.d [reg+0x34],$r13 ; Restore R13 ;; ;; FIXME: Which registers should be restored? ;; move.d [reg+0x38],$sp ; Restore SP (R14) move [reg+0x56],$srp ; Restore the subroutine return pointer. move [reg+0x5E],$dccr ; Restore DCCR move [reg+0x66],$usp ; Restore USP reti ; Return from the interrupt routine nop "); /* Use this static breakpoint in the start-up only. */ void breakpoint(void) { kgdb_started = 1; is_dyn_brkp = 0; /* This is a static, not a dynamic breakpoint. */ __asm__ volatile ("break 8"); /* Jump to handle_breakpoint. */ } /* initialize kgdb. doesn't break into the debugger, but sets up irq and ports */ void kgdb_init(void) { /* could initialize debug port as well but it's done in head.S already... */ /* breakpoint handler is now set in irq.c */ set_int_vector(8, kgdb_handle_serial); enableDebugIRQ(); } /****************************** End of file **********************************/
gpl-2.0
Tommy-Geenexus/android_kernel_sony_apq8064_yuga_5.x
arch/arm/mach-orion5x/ls_hgl-setup.c
5006
7204
/* * arch/arm/mach-orion5x/ls_hgl-setup.c * * Maintainer: Zhu Qingsen <zhuqs@cn.fujitsu.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/leds.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/ata_platform.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /***************************************************************************** * Linkstation LS-HGL Info ****************************************************************************/ /* * 256K NOR flash Device bus boot chip select */ #define LS_HGL_NOR_BOOT_BASE 0xf4000000 #define LS_HGL_NOR_BOOT_SIZE SZ_256K /***************************************************************************** * 256KB NOR Flash on BOOT Device ****************************************************************************/ static struct physmap_flash_data ls_hgl_nor_flash_data = { .width = 1, }; static struct resource ls_hgl_nor_flash_resource = { .flags = IORESOURCE_MEM, .start = LS_HGL_NOR_BOOT_BASE, .end = LS_HGL_NOR_BOOT_BASE + LS_HGL_NOR_BOOT_SIZE - 1, }; static struct platform_device ls_hgl_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ls_hgl_nor_flash_data, }, .num_resources = 1, .resource = &ls_hgl_nor_flash_resource, }; /***************************************************************************** * Ethernet ****************************************************************************/ static struct mv643xx_eth_platform_data ls_hgl_eth_data = { .phy_addr = 8, }; /***************************************************************************** * RTC 5C372a on I2C bus ****************************************************************************/ static struct i2c_board_info __initdata ls_hgl_i2c_rtc = { I2C_BOARD_INFO("rs5c372a", 0x32), }; /***************************************************************************** * LEDs attached to GPIO ****************************************************************************/ #define LS_HGL_GPIO_LED_ALARM 2 #define LS_HGL_GPIO_LED_INFO 3 #define LS_HGL_GPIO_LED_FUNC 17 #define LS_HGL_GPIO_LED_PWR 0 static struct gpio_led ls_hgl_led_pins[] = { { .name = "alarm:red", .gpio = LS_HGL_GPIO_LED_ALARM, .active_low = 1, }, { .name = "info:amber", .gpio = LS_HGL_GPIO_LED_INFO, .active_low = 1, }, { .name = "func:blue:top", .gpio = LS_HGL_GPIO_LED_FUNC, .active_low = 1, }, { .name = "power:blue:bottom", .gpio = LS_HGL_GPIO_LED_PWR, }, }; static struct gpio_led_platform_data ls_hgl_led_data = { .leds = ls_hgl_led_pins, .num_leds = ARRAY_SIZE(ls_hgl_led_pins), }; static struct platform_device ls_hgl_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &ls_hgl_led_data, }, }; /**************************************************************************** * GPIO Attached Keys ****************************************************************************/ #define LS_HGL_GPIO_KEY_FUNC 15 #define LS_HGL_GPIO_KEY_POWER 8 #define LS_HGL_GPIO_KEY_AUTOPOWER 10 #define LS_HGL_SW_POWER 0x00 #define LS_HGL_SW_AUTOPOWER 0x01 static struct gpio_keys_button ls_hgl_buttons[] = { { .code = KEY_OPTION, .gpio = LS_HGL_GPIO_KEY_FUNC, .desc = "Function Button", .active_low = 1, }, { .type = EV_SW, .code = LS_HGL_SW_POWER, .gpio = LS_HGL_GPIO_KEY_POWER, .desc = "Power-on Switch", .active_low = 1, }, { .type = EV_SW, .code = LS_HGL_SW_AUTOPOWER, .gpio = LS_HGL_GPIO_KEY_AUTOPOWER, .desc = "Power-auto Switch", .active_low = 1, }, }; static struct gpio_keys_platform_data ls_hgl_button_data = { .buttons = ls_hgl_buttons, .nbuttons = ARRAY_SIZE(ls_hgl_buttons), }; static struct platform_device ls_hgl_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &ls_hgl_button_data, }, }; /***************************************************************************** * SATA ****************************************************************************/ static struct mv_sata_platform_data ls_hgl_sata_data = { .n_ports = 2, }; /***************************************************************************** * Linkstation LS-HGL specific power off method: reboot ****************************************************************************/ /* * On the Linkstation LS-HGL, the shutdown process is following: * - Userland monitors key events until the power switch goes to off position * - The board reboots * - U-boot starts and goes into an idle mode waiting for the user * to move the switch to ON position */ static void ls_hgl_power_off(void) { orion5x_restart('h', NULL); } /***************************************************************************** * General Setup ****************************************************************************/ #define LS_HGL_GPIO_USB_POWER 9 #define LS_HGL_GPIO_AUTO_POWER 10 #define LS_HGL_GPIO_POWER 8 #define LS_HGL_GPIO_HDD_POWER 1 static unsigned int ls_hgl_mpp_modes[] __initdata = { MPP0_GPIO, /* LED_PWR */ MPP1_GPIO, /* HDD_PWR */ MPP2_GPIO, /* LED_ALARM */ MPP3_GPIO, /* LED_INFO */ MPP4_UNUSED, MPP5_UNUSED, MPP6_GPIO, /* FAN_LCK */ MPP7_GPIO, /* INIT */ MPP8_GPIO, /* POWER */ MPP9_GPIO, /* USB_PWR */ MPP10_GPIO, /* AUTO_POWER */ MPP11_UNUSED, /* LED_ETH (dummy) */ MPP12_UNUSED, MPP13_UNUSED, MPP14_UNUSED, MPP15_GPIO, /* FUNC */ MPP16_UNUSED, MPP17_GPIO, /* LED_FUNC */ MPP18_UNUSED, MPP19_UNUSED, 0, }; static void __init ls_hgl_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(ls_hgl_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_ehci1_init(); orion5x_eth_init(&ls_hgl_eth_data); orion5x_i2c_init(); orion5x_sata_init(&ls_hgl_sata_data); orion5x_uart0_init(); orion5x_xor_init(); orion5x_setup_dev_boot_win(LS_HGL_NOR_BOOT_BASE, LS_HGL_NOR_BOOT_SIZE); platform_device_register(&ls_hgl_nor_flash); platform_device_register(&ls_hgl_button_device); platform_device_register(&ls_hgl_leds); i2c_register_board_info(0, &ls_hgl_i2c_rtc, 1); /* enable USB power */ gpio_set_value(LS_HGL_GPIO_USB_POWER, 1); /* register power-off method */ pm_power_off = ls_hgl_power_off; pr_info("%s: finished\n", __func__); } MACHINE_START(LINKSTATION_LS_HGL, "Buffalo Linkstation LS-HGL") /* Maintainer: Zhu Qingsen <zhuqs@cn.fujistu.com> */ .atag_offset = 0x100, .init_machine = ls_hgl_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END
gpl-2.0
GustavoRD78/78Kernel-ZL-230
arch/mips/sibyte/bcm1480/smp.c
6542
5471
/* * Copyright (C) 2001,2002,2004 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/init.h> #include <linux/delay.h> #include <linux/smp.h> #include <linux/kernel_stat.h> #include <linux/sched.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/fw/cfe/cfe_api.h> #include <asm/sibyte/sb1250.h> #include <asm/sibyte/bcm1480_regs.h> #include <asm/sibyte/bcm1480_int.h> extern void smp_call_function_interrupt(void); /* * These are routines for dealing with the bcm1480 smp capabilities * independent of board/firmware */ static void *mailbox_0_set_regs[] = { IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU), IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU), IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU), IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU), }; static void *mailbox_0_clear_regs[] = { IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU), IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU), IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU), IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU), }; static void *mailbox_0_regs[] = { IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_CPU), IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_CPU), IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_CPU), IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_CPU), }; /* * SMP init and finish on secondary CPUs */ void __cpuinit bcm1480_smp_init(void) { unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | STATUSF_IP1 | STATUSF_IP0; /* Set interrupt mask, but don't enable */ change_c0_status(ST0_IM, imask); } /* * These are routines for dealing with the sb1250 smp capabilities * independent of board/firmware */ /* * Simple enough; everything is set up, so just poke the appropriate mailbox * register, and we should be set */ static void bcm1480_send_ipi_single(int cpu, unsigned int action) { __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]); } static void bcm1480_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; for_each_cpu(i, mask) bcm1480_send_ipi_single(i, action); } /* * Code to run on secondary just after probing the CPU */ static void __cpuinit bcm1480_init_secondary(void) { extern void bcm1480_smp_init(void); bcm1480_smp_init(); } /* * Do any tidying up before marking online and running the idle * loop */ static void __cpuinit bcm1480_smp_finish(void) { extern void sb1480_clockevent_init(void); sb1480_clockevent_init(); local_irq_enable(); } /* * Final cleanup after all secondaries booted */ static void bcm1480_cpus_done(void) { } /* * Setup the PC, SP, and GP of a secondary processor and start it * running! */ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle) { int retval; retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap, __KSTK_TOS(idle), (unsigned long)task_thread_info(idle), 0); if (retval != 0) printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); } /* * Use CFE to find out how many CPUs are available, setting up * cpu_possible_mask and the logical/physical mappings. * XXXKW will the boot CPU ever not be physical 0? * * Common setup before any secondaries are started */ static void __init bcm1480_smp_setup(void) { int i, num; init_cpu_possible(cpumask_of(0)); __cpu_number_map[0] = 0; __cpu_logical_map[0] = 0; for (i = 1, num = 0; i < NR_CPUS; i++) { if (cfe_cpu_stop(i) == 0) { set_cpu_possible(i, true); __cpu_number_map[i] = ++num; __cpu_logical_map[num] = i; } } printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); } static void __init bcm1480_prepare_cpus(unsigned int max_cpus) { } struct plat_smp_ops bcm1480_smp_ops = { .send_ipi_single = bcm1480_send_ipi_single, .send_ipi_mask = bcm1480_send_ipi_mask, .init_secondary = bcm1480_init_secondary, .smp_finish = bcm1480_smp_finish, .cpus_done = bcm1480_cpus_done, .boot_secondary = bcm1480_boot_secondary, .smp_setup = bcm1480_smp_setup, .prepare_cpus = bcm1480_prepare_cpus, }; void bcm1480_mailbox_interrupt(void) { int cpu = smp_processor_id(); int irq = K_BCM1480_INT_MBOX_0_0; unsigned int action; kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); /* Load the mailbox register to figure out what we're supposed to do */ action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff; /* Clear the mailbox to clear the interrupt */ __raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); }
gpl-2.0
CyanogenMod/android_kernel_htc_msm8974
sound/pci/cs46xx/dsp_spos_scb_lib.c
9102
49369
/* * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* * 2002-07 Benny Sjostrand benny@hostmobility.com */ #include <asm/io.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> #include <sound/cs46xx.h> #include "cs46xx_lib.h" #include "dsp_spos.h" struct proc_scb_info { struct dsp_scb_descriptor * scb_desc; struct snd_cs46xx *chip; }; static void remove_symbol (struct snd_cs46xx * chip, struct dsp_symbol_entry * symbol) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; int symbol_index = (int)(symbol - ins->symbol_table.symbols); if (snd_BUG_ON(ins->symbol_table.nsymbols <= 0)) return; if (snd_BUG_ON(symbol_index < 0 || symbol_index >= ins->symbol_table.nsymbols)) return; ins->symbol_table.symbols[symbol_index].deleted = 1; if (symbol_index < ins->symbol_table.highest_frag_index) { ins->symbol_table.highest_frag_index = symbol_index; } if (symbol_index == ins->symbol_table.nsymbols - 1) ins->symbol_table.nsymbols --; if (ins->symbol_table.highest_frag_index > ins->symbol_table.nsymbols) { ins->symbol_table.highest_frag_index = ins->symbol_table.nsymbols; } } #ifdef CONFIG_PROC_FS static void cs46xx_dsp_proc_scb_info_read (struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct proc_scb_info * scb_info = entry->private_data; struct dsp_scb_descriptor * scb = scb_info->scb_desc; struct dsp_spos_instance * ins; struct snd_cs46xx *chip = scb_info->chip; int j,col; void __iomem *dst = chip->region.idx[1].remap_addr + DSP_PARAMETER_BYTE_OFFSET; ins = chip->dsp_spos_instance; mutex_lock(&chip->spos_mutex); snd_iprintf(buffer,"%04x %s:\n",scb->address,scb->scb_name); for (col = 0,j = 0;j < 0x10; j++,col++) { if (col == 4) { snd_iprintf(buffer,"\n"); col = 0; } snd_iprintf(buffer,"%08x ",readl(dst + (scb->address + j) * sizeof(u32))); } snd_iprintf(buffer,"\n"); if (scb->parent_scb_ptr != NULL) { snd_iprintf(buffer,"parent [%s:%04x] ", scb->parent_scb_ptr->scb_name, scb->parent_scb_ptr->address); } else snd_iprintf(buffer,"parent [none] "); snd_iprintf(buffer,"sub_list_ptr [%s:%04x]\nnext_scb_ptr [%s:%04x] task_entry [%s:%04x]\n", scb->sub_list_ptr->scb_name, scb->sub_list_ptr->address, scb->next_scb_ptr->scb_name, scb->next_scb_ptr->address, scb->task_entry->symbol_name, scb->task_entry->address); snd_iprintf(buffer,"index [%d] ref_count [%d]\n",scb->index,scb->ref_count); mutex_unlock(&chip->spos_mutex); } #endif static void _dsp_unlink_scb (struct snd_cs46xx *chip, struct dsp_scb_descriptor * scb) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; if ( scb->parent_scb_ptr ) { /* unlink parent SCB */ if (snd_BUG_ON(scb->parent_scb_ptr->sub_list_ptr != scb && scb->parent_scb_ptr->next_scb_ptr != scb)) return; if (scb->parent_scb_ptr->sub_list_ptr == scb) { if (scb->next_scb_ptr == ins->the_null_scb) { /* last and only node in parent sublist */ scb->parent_scb_ptr->sub_list_ptr = scb->sub_list_ptr; if (scb->sub_list_ptr != ins->the_null_scb) { scb->sub_list_ptr->parent_scb_ptr = scb->parent_scb_ptr; } scb->sub_list_ptr = ins->the_null_scb; } else { /* first node in parent sublist */ scb->parent_scb_ptr->sub_list_ptr = scb->next_scb_ptr; if (scb->next_scb_ptr != ins->the_null_scb) { /* update next node parent ptr. */ scb->next_scb_ptr->parent_scb_ptr = scb->parent_scb_ptr; } scb->next_scb_ptr = ins->the_null_scb; } } else { scb->parent_scb_ptr->next_scb_ptr = scb->next_scb_ptr; if (scb->next_scb_ptr != ins->the_null_scb) { /* update next node parent ptr. */ scb->next_scb_ptr->parent_scb_ptr = scb->parent_scb_ptr; } scb->next_scb_ptr = ins->the_null_scb; } /* update parent first entry in DSP RAM */ cs46xx_dsp_spos_update_scb(chip,scb->parent_scb_ptr); /* then update entry in DSP RAM */ cs46xx_dsp_spos_update_scb(chip,scb); scb->parent_scb_ptr = NULL; } } static void _dsp_clear_sample_buffer (struct snd_cs46xx *chip, u32 sample_buffer_addr, int dword_count) { void __iomem *dst = chip->region.idx[2].remap_addr + sample_buffer_addr; int i; for (i = 0; i < dword_count ; ++i ) { writel(0, dst); dst += 4; } } void cs46xx_dsp_remove_scb (struct snd_cs46xx *chip, struct dsp_scb_descriptor * scb) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; unsigned long flags; /* check integrety */ if (snd_BUG_ON(scb->index < 0 || scb->index >= ins->nscb || (ins->scbs + scb->index) != scb)) return; #if 0 /* can't remove a SCB with childs before removing childs first */ if (snd_BUG_ON(scb->sub_list_ptr != ins->the_null_scb || scb->next_scb_ptr != ins->the_null_scb)) goto _end; #endif spin_lock_irqsave(&chip->reg_lock, flags); _dsp_unlink_scb (chip,scb); spin_unlock_irqrestore(&chip->reg_lock, flags); cs46xx_dsp_proc_free_scb_desc(scb); if (snd_BUG_ON(!scb->scb_symbol)) return; remove_symbol (chip,scb->scb_symbol); ins->scbs[scb->index].deleted = 1; #ifdef CONFIG_PM kfree(ins->scbs[scb->index].data); ins->scbs[scb->index].data = NULL; #endif if (scb->index < ins->scb_highest_frag_index) ins->scb_highest_frag_index = scb->index; if (scb->index == ins->nscb - 1) { ins->nscb --; } if (ins->scb_highest_frag_index > ins->nscb) { ins->scb_highest_frag_index = ins->nscb; } #if 0 /* !!!! THIS IS A PIECE OF SHIT MADE BY ME !!! */ for(i = scb->index + 1;i < ins->nscb; ++i) { ins->scbs[i - 1].index = i - 1; } #endif } #ifdef CONFIG_PROC_FS void cs46xx_dsp_proc_free_scb_desc (struct dsp_scb_descriptor * scb) { if (scb->proc_info) { struct proc_scb_info * scb_info = scb->proc_info->private_data; snd_printdd("cs46xx_dsp_proc_free_scb_desc: freeing %s\n",scb->scb_name); snd_info_free_entry(scb->proc_info); scb->proc_info = NULL; kfree (scb_info); } } void cs46xx_dsp_proc_register_scb_desc (struct snd_cs46xx *chip, struct dsp_scb_descriptor * scb) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct snd_info_entry * entry; struct proc_scb_info * scb_info; /* register to proc */ if (ins->snd_card != NULL && ins->proc_dsp_dir != NULL && scb->proc_info == NULL) { if ((entry = snd_info_create_card_entry(ins->snd_card, scb->scb_name, ins->proc_dsp_dir)) != NULL) { scb_info = kmalloc(sizeof(struct proc_scb_info), GFP_KERNEL); if (!scb_info) { snd_info_free_entry(entry); entry = NULL; goto out; } scb_info->chip = chip; scb_info->scb_desc = scb; entry->content = SNDRV_INFO_CONTENT_TEXT; entry->private_data = scb_info; entry->mode = S_IFREG | S_IRUGO | S_IWUSR; entry->c.text.read = cs46xx_dsp_proc_scb_info_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); kfree (scb_info); entry = NULL; } } out: scb->proc_info = entry; } } #endif /* CONFIG_PROC_FS */ static struct dsp_scb_descriptor * _dsp_create_generic_scb (struct snd_cs46xx *chip, char * name, u32 * scb_data, u32 dest, struct dsp_symbol_entry * task_entry, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_scb_descriptor * scb; unsigned long flags; if (snd_BUG_ON(!ins->the_null_scb)) return NULL; /* fill the data that will be wroten to DSP */ scb_data[SCBsubListPtr] = (ins->the_null_scb->address << 0x10) | ins->the_null_scb->address; scb_data[SCBfuncEntryPtr] &= 0xFFFF0000; scb_data[SCBfuncEntryPtr] |= task_entry->address; snd_printdd("dsp_spos: creating SCB <%s>\n",name); scb = cs46xx_dsp_create_scb(chip,name,scb_data,dest); scb->sub_list_ptr = ins->the_null_scb; scb->next_scb_ptr = ins->the_null_scb; scb->parent_scb_ptr = parent_scb; scb->task_entry = task_entry; /* update parent SCB */ if (scb->parent_scb_ptr) { #if 0 printk ("scb->parent_scb_ptr = %s\n",scb->parent_scb_ptr->scb_name); printk ("scb->parent_scb_ptr->next_scb_ptr = %s\n",scb->parent_scb_ptr->next_scb_ptr->scb_name); printk ("scb->parent_scb_ptr->sub_list_ptr = %s\n",scb->parent_scb_ptr->sub_list_ptr->scb_name); #endif /* link to parent SCB */ if (scb_child_type == SCB_ON_PARENT_NEXT_SCB) { if (snd_BUG_ON(scb->parent_scb_ptr->next_scb_ptr != ins->the_null_scb)) return NULL; scb->parent_scb_ptr->next_scb_ptr = scb; } else if (scb_child_type == SCB_ON_PARENT_SUBLIST_SCB) { if (snd_BUG_ON(scb->parent_scb_ptr->sub_list_ptr != ins->the_null_scb)) return NULL; scb->parent_scb_ptr->sub_list_ptr = scb; } else { snd_BUG(); } spin_lock_irqsave(&chip->reg_lock, flags); /* update entry in DSP RAM */ cs46xx_dsp_spos_update_scb(chip,scb->parent_scb_ptr); spin_unlock_irqrestore(&chip->reg_lock, flags); } cs46xx_dsp_proc_register_scb_desc (chip,scb); return scb; } static struct dsp_scb_descriptor * cs46xx_dsp_create_generic_scb (struct snd_cs46xx *chip, char * name, u32 * scb_data, u32 dest, char * task_entry_name, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_symbol_entry * task_entry; task_entry = cs46xx_dsp_lookup_symbol (chip,task_entry_name, SYMBOL_CODE); if (task_entry == NULL) { snd_printk (KERN_ERR "dsp_spos: symbol %s not found\n",task_entry_name); return NULL; } return _dsp_create_generic_scb (chip,name,scb_data,dest,task_entry, parent_scb,scb_child_type); } struct dsp_scb_descriptor * cs46xx_dsp_create_timing_master_scb (struct snd_cs46xx *chip) { struct dsp_scb_descriptor * scb; struct dsp_timing_master_scb timing_master_scb = { { 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, 0,0, 0,NULL_SCB_ADDR, 0,0, /* extraSampleAccum:TMreserved */ 0,0, /* codecFIFOptr:codecFIFOsyncd */ 0x0001,0x8000, /* fracSampAccumQm1:TMfrmsLeftInGroup */ 0x0001,0x0000, /* fracSampCorrectionQm1:TMfrmGroupLength */ 0x00060000 /* nSampPerFrmQ15 */ }; scb = cs46xx_dsp_create_generic_scb(chip,"TimingMasterSCBInst",(u32 *)&timing_master_scb, TIMINGMASTER_SCB_ADDR, "TIMINGMASTER",NULL,SCB_NO_PARENT); return scb; } struct dsp_scb_descriptor * cs46xx_dsp_create_codec_out_scb(struct snd_cs46xx * chip, char * codec_name, u16 channel_disp, u16 fifo_addr, u16 child_scb_addr, u32 dest, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_scb_descriptor * scb; struct dsp_codec_output_scb codec_out_scb = { { 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, 0,0, 0,NULL_SCB_ADDR, 0, /* COstrmRsConfig */ 0, /* COstrmBufPtr */ channel_disp,fifo_addr, /* leftChanBaseIOaddr:rightChanIOdisp */ 0x0000,0x0080, /* (!AC97!) COexpVolChangeRate:COscaleShiftCount */ 0,child_scb_addr /* COreserved - need child scb to work with rom code */ }; scb = cs46xx_dsp_create_generic_scb(chip,codec_name,(u32 *)&codec_out_scb, dest,"S16_CODECOUTPUTTASK",parent_scb, scb_child_type); return scb; } struct dsp_scb_descriptor * cs46xx_dsp_create_codec_in_scb(struct snd_cs46xx * chip, char * codec_name, u16 channel_disp, u16 fifo_addr, u16 sample_buffer_addr, u32 dest, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_scb_descriptor * scb; struct dsp_codec_input_scb codec_input_scb = { { 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, #if 0 /* cs4620 */ SyncIOSCB,NULL_SCB_ADDR #else 0 , 0, #endif 0,0, RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_64, /* strmRsConfig */ sample_buffer_addr << 0x10, /* strmBufPtr; defined as a dword ptr, used as a byte ptr */ channel_disp,fifo_addr, /* (!AC97!) leftChanBaseINaddr=AC97primary link input slot 3 :rightChanINdisp=""slot 4 */ 0x0000,0x0000, /* (!AC97!) ????:scaleShiftCount; no shift needed because AC97 is already 20 bits */ 0x80008000 /* ??clw cwcgame.scb has 0 */ }; scb = cs46xx_dsp_create_generic_scb(chip,codec_name,(u32 *)&codec_input_scb, dest,"S16_CODECINPUTTASK",parent_scb, scb_child_type); return scb; } static struct dsp_scb_descriptor * cs46xx_dsp_create_pcm_reader_scb(struct snd_cs46xx * chip, char * scb_name, u16 sample_buffer_addr, u32 dest, int virtual_channel, u32 playback_hw_addr, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_scb_descriptor * scb; struct dsp_generic_scb pcm_reader_scb = { /* Play DMA Task xfers data from host buffer to SP buffer init/runtime variables: PlayAC: Play Audio Data Conversion - SCB loc: 2nd dword, mask: 0x0000F000L DATA_FMT_16BIT_ST_LTLEND(0x00000000L) from 16-bit stereo, little-endian DATA_FMT_8_BIT_ST_SIGNED(0x00001000L) from 8-bit stereo, signed DATA_FMT_16BIT_MN_LTLEND(0x00002000L) from 16-bit mono, little-endian DATA_FMT_8_BIT_MN_SIGNED(0x00003000L) from 8-bit mono, signed DATA_FMT_16BIT_ST_BIGEND(0x00004000L) from 16-bit stereo, big-endian DATA_FMT_16BIT_MN_BIGEND(0x00006000L) from 16-bit mono, big-endian DATA_FMT_8_BIT_ST_UNSIGNED(0x00009000L) from 8-bit stereo, unsigned DATA_FMT_8_BIT_MN_UNSIGNED(0x0000b000L) from 8-bit mono, unsigned ? Other combinations possible from: DMA_RQ_C2_AUDIO_CONVERT_MASK 0x0000F000L DMA_RQ_C2_AC_NONE 0x00000000L DMA_RQ_C2_AC_8_TO_16_BIT 0x00001000L DMA_RQ_C2_AC_MONO_TO_STEREO 0x00002000L DMA_RQ_C2_AC_ENDIAN_CONVERT 0x00004000L DMA_RQ_C2_AC_SIGNED_CONVERT 0x00008000L HostBuffAddr: Host Buffer Physical Byte Address - SCB loc:3rd dword, Mask: 0xFFFFFFFFL aligned to dword boundary */ /* Basic (non scatter/gather) DMA requestor (4 ints) */ { DMA_RQ_C1_SOURCE_ON_HOST + /* source buffer is on the host */ DMA_RQ_C1_SOURCE_MOD1024 + /* source buffer is 1024 dwords (4096 bytes) */ DMA_RQ_C1_DEST_MOD32 + /* dest buffer(PCMreaderBuf) is 32 dwords*/ DMA_RQ_C1_WRITEBACK_SRC_FLAG + /* ?? */ DMA_RQ_C1_WRITEBACK_DEST_FLAG + /* ?? */ 15, /* DwordCount-1: picked 16 for DwordCount because Jim */ /* Barnette said that is what we should use since */ /* we are not running in optimized mode? */ DMA_RQ_C2_AC_NONE + DMA_RQ_C2_SIGNAL_SOURCE_PINGPONG + /* set play interrupt (bit0) in HISR when source */ /* buffer (on host) crosses half-way point */ virtual_channel, /* Play DMA channel arbitrarily set to 0 */ playback_hw_addr, /* HostBuffAddr (source) */ DMA_RQ_SD_SP_SAMPLE_ADDR + /* destination buffer is in SP Sample Memory */ sample_buffer_addr /* SP Buffer Address (destination) */ }, /* Scatter/gather DMA requestor extension (5 ints) */ { 0, 0, 0, 0, 0 }, /* Sublist pointer & next stream control block (SCB) link. */ NULL_SCB_ADDR,NULL_SCB_ADDR, /* Pointer to this tasks parameter block & stream function pointer */ 0,NULL_SCB_ADDR, /* rsConfig register for stream buffer (rsDMA reg. is loaded from basicReq.daw */ /* for incoming streams, or basicReq.saw, for outgoing streams) */ RSCONFIG_DMA_ENABLE + /* enable DMA */ (19 << RSCONFIG_MAX_DMA_SIZE_SHIFT) + /* MAX_DMA_SIZE picked to be 19 since SPUD */ /* uses it for some reason */ ((dest >> 4) << RSCONFIG_STREAM_NUM_SHIFT) + /* stream number = SCBaddr/16 */ RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_32, /* dest buffer(PCMreaderBuf) is 32 dwords (256 bytes) */ /* Stream sample pointer & MAC-unit mode for this stream */ (sample_buffer_addr << 0x10), /* Fractional increment per output sample in the input sample buffer */ 0, { /* Standard stereo volume control default muted */ 0xffff,0xffff, 0xffff,0xffff } }; if (ins->null_algorithm == NULL) { ins->null_algorithm = cs46xx_dsp_lookup_symbol (chip,"NULLALGORITHM", SYMBOL_CODE); if (ins->null_algorithm == NULL) { snd_printk (KERN_ERR "dsp_spos: symbol NULLALGORITHM not found\n"); return NULL; } } scb = _dsp_create_generic_scb(chip,scb_name,(u32 *)&pcm_reader_scb, dest,ins->null_algorithm,parent_scb, scb_child_type); return scb; } #define GOF_PER_SEC 200 struct dsp_scb_descriptor * cs46xx_dsp_create_src_task_scb(struct snd_cs46xx * chip, char * scb_name, int rate, u16 src_buffer_addr, u16 src_delay_buffer_addr, u32 dest, struct dsp_scb_descriptor * parent_scb, int scb_child_type, int pass_through) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_scb_descriptor * scb; unsigned int tmp1, tmp2; unsigned int phiIncr; unsigned int correctionPerGOF, correctionPerSec; snd_printdd( "dsp_spos: setting %s rate to %u\n",scb_name,rate); /* * Compute the values used to drive the actual sample rate conversion. * The following formulas are being computed, using inline assembly * since we need to use 64 bit arithmetic to compute the values: * * phiIncr = floor((Fs,in * 2^26) / Fs,out) * correctionPerGOF = floor((Fs,in * 2^26 - Fs,out * phiIncr) / * GOF_PER_SEC) * ulCorrectionPerSec = Fs,in * 2^26 - Fs,out * phiIncr -M * GOF_PER_SEC * correctionPerGOF * * i.e. * * phiIncr:other = dividend:remainder((Fs,in * 2^26) / Fs,out) * correctionPerGOF:correctionPerSec = * dividend:remainder(ulOther / GOF_PER_SEC) */ tmp1 = rate << 16; phiIncr = tmp1 / 48000; tmp1 -= phiIncr * 48000; tmp1 <<= 10; phiIncr <<= 10; tmp2 = tmp1 / 48000; phiIncr += tmp2; tmp1 -= tmp2 * 48000; correctionPerGOF = tmp1 / GOF_PER_SEC; tmp1 -= correctionPerGOF * GOF_PER_SEC; correctionPerSec = tmp1; { struct dsp_src_task_scb src_task_scb = { 0x0028,0x00c8, 0x5555,0x0000, 0x0000,0x0000, src_buffer_addr,1, correctionPerGOF,correctionPerSec, RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_32, 0x0000,src_delay_buffer_addr, 0x0, 0x080,(src_delay_buffer_addr + (24 * 4)), 0,0, /* next_scb, sub_list_ptr */ 0,0, /* entry, this_spb */ RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_8, src_buffer_addr << 0x10, phiIncr, { 0xffff - ins->dac_volume_right,0xffff - ins->dac_volume_left, 0xffff - ins->dac_volume_right,0xffff - ins->dac_volume_left } }; if (ins->s16_up == NULL) { ins->s16_up = cs46xx_dsp_lookup_symbol (chip,"S16_UPSRC", SYMBOL_CODE); if (ins->s16_up == NULL) { snd_printk (KERN_ERR "dsp_spos: symbol S16_UPSRC not found\n"); return NULL; } } /* clear buffers */ _dsp_clear_sample_buffer (chip,src_buffer_addr,8); _dsp_clear_sample_buffer (chip,src_delay_buffer_addr,32); if (pass_through) { /* wont work with any other rate than the native DSP rate */ snd_BUG_ON(rate != 48000); scb = cs46xx_dsp_create_generic_scb(chip,scb_name,(u32 *)&src_task_scb, dest,"DMAREADER",parent_scb, scb_child_type); } else { scb = _dsp_create_generic_scb(chip,scb_name,(u32 *)&src_task_scb, dest,ins->s16_up,parent_scb, scb_child_type); } } return scb; } #if 0 /* not used */ struct dsp_scb_descriptor * cs46xx_dsp_create_filter_scb(struct snd_cs46xx * chip, char * scb_name, u16 buffer_addr, u32 dest, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_scb_descriptor * scb; struct dsp_filter_scb filter_scb = { .a0_right = 0x41a9, .a0_left = 0x41a9, .a1_right = 0xb8e4, .a1_left = 0xb8e4, .a2_right = 0x3e55, .a2_left = 0x3e55, .filter_unused3 = 0x0000, .filter_unused2 = 0x0000, .output_buf_ptr = buffer_addr, .init = 0x000, .prev_sample_output1 = 0x00000000, .prev_sample_output2 = 0x00000000, .prev_sample_input1 = 0x00000000, .prev_sample_input2 = 0x00000000, .next_scb_ptr = 0x0000, .sub_list_ptr = 0x0000, .entry_point = 0x0000, .spb_ptr = 0x0000, .b0_right = 0x0e38, .b0_left = 0x0e38, .b1_right = 0x1c71, .b1_left = 0x1c71, .b2_right = 0x0e38, .b2_left = 0x0e38, }; scb = cs46xx_dsp_create_generic_scb(chip,scb_name,(u32 *)&filter_scb, dest,"FILTERTASK",parent_scb, scb_child_type); return scb; } #endif /* not used */ struct dsp_scb_descriptor * cs46xx_dsp_create_mix_only_scb(struct snd_cs46xx * chip, char * scb_name, u16 mix_buffer_addr, u32 dest, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_scb_descriptor * scb; struct dsp_mix_only_scb master_mix_scb = { /* 0 */ { 0, /* 1 */ 0, /* 2 */ mix_buffer_addr, /* 3 */ 0 /* */ }, { /* 4 */ 0, /* 5 */ 0, /* 6 */ 0, /* 7 */ 0, /* 8 */ 0x00000080 }, /* 9 */ 0,0, /* A */ 0,0, /* B */ RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_32, /* C */ (mix_buffer_addr + (16 * 4)) << 0x10, /* D */ 0, { /* E */ 0x8000,0x8000, /* F */ 0x8000,0x8000 } }; scb = cs46xx_dsp_create_generic_scb(chip,scb_name,(u32 *)&master_mix_scb, dest,"S16_MIX",parent_scb, scb_child_type); return scb; } struct dsp_scb_descriptor * cs46xx_dsp_create_mix_to_ostream_scb(struct snd_cs46xx * chip, char * scb_name, u16 mix_buffer_addr, u16 writeback_spb, u32 dest, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_scb_descriptor * scb; struct dsp_mix2_ostream_scb mix2_ostream_scb = { /* Basic (non scatter/gather) DMA requestor (4 ints) */ { DMA_RQ_C1_SOURCE_MOD64 + DMA_RQ_C1_DEST_ON_HOST + DMA_RQ_C1_DEST_MOD1024 + DMA_RQ_C1_WRITEBACK_SRC_FLAG + DMA_RQ_C1_WRITEBACK_DEST_FLAG + 15, DMA_RQ_C2_AC_NONE + DMA_RQ_C2_SIGNAL_DEST_PINGPONG + CS46XX_DSP_CAPTURE_CHANNEL, DMA_RQ_SD_SP_SAMPLE_ADDR + mix_buffer_addr, 0x0 }, { 0, 0, 0, 0, 0, }, 0,0, 0,writeback_spb, RSCONFIG_DMA_ENABLE + (19 << RSCONFIG_MAX_DMA_SIZE_SHIFT) + ((dest >> 4) << RSCONFIG_STREAM_NUM_SHIFT) + RSCONFIG_DMA_TO_HOST + RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_64, (mix_buffer_addr + (32 * 4)) << 0x10, 1,0, 0x0001,0x0080, 0xFFFF,0 }; scb = cs46xx_dsp_create_generic_scb(chip,scb_name,(u32 *)&mix2_ostream_scb, dest,"S16_MIX_TO_OSTREAM",parent_scb, scb_child_type); return scb; } struct dsp_scb_descriptor * cs46xx_dsp_create_vari_decimate_scb(struct snd_cs46xx * chip,char * scb_name, u16 vari_buffer_addr0, u16 vari_buffer_addr1, u32 dest, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_scb_descriptor * scb; struct dsp_vari_decimate_scb vari_decimate_scb = { 0x0028,0x00c8, 0x5555,0x0000, 0x0000,0x0000, vari_buffer_addr0,vari_buffer_addr1, 0x0028,0x00c8, RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_256, 0xFF800000, 0, 0x0080,vari_buffer_addr1 + (25 * 4), 0,0, 0,0, RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_8, vari_buffer_addr0 << 0x10, 0x04000000, { 0x8000,0x8000, 0xFFFF,0xFFFF } }; scb = cs46xx_dsp_create_generic_scb(chip,scb_name,(u32 *)&vari_decimate_scb, dest,"VARIDECIMATE",parent_scb, scb_child_type); return scb; } static struct dsp_scb_descriptor * cs46xx_dsp_create_pcm_serial_input_scb(struct snd_cs46xx * chip, char * scb_name, u32 dest, struct dsp_scb_descriptor * input_scb, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_scb_descriptor * scb; struct dsp_pcm_serial_input_scb pcm_serial_input_scb = { { 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, 0,0, 0,0, RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_16, 0, /* 0xD */ 0,input_scb->address, { /* 0xE */ 0x8000,0x8000, /* 0xF */ 0x8000,0x8000 } }; scb = cs46xx_dsp_create_generic_scb(chip,scb_name,(u32 *)&pcm_serial_input_scb, dest,"PCMSERIALINPUTTASK",parent_scb, scb_child_type); return scb; } static struct dsp_scb_descriptor * cs46xx_dsp_create_asynch_fg_tx_scb(struct snd_cs46xx * chip, char * scb_name, u32 dest, u16 hfg_scb_address, u16 asynch_buffer_address, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_scb_descriptor * scb; struct dsp_asynch_fg_tx_scb asynch_fg_tx_scb = { 0xfc00,0x03ff, /* Prototype sample buffer size of 256 dwords */ 0x0058,0x0028, /* Min Delta 7 dwords == 28 bytes */ /* : Max delta 25 dwords == 100 bytes */ 0,hfg_scb_address, /* Point to HFG task SCB */ 0,0, /* Initialize current Delta and Consumer ptr adjustment count */ 0, /* Initialize accumulated Phi to 0 */ 0,0x2aab, /* Const 1/3 */ { 0, /* Define the unused elements */ 0, 0 }, 0,0, 0,dest + AFGTxAccumPhi, RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_256, /* Stereo, 256 dword */ (asynch_buffer_address) << 0x10, /* This should be automagically synchronized to the producer pointer */ /* There is no correct initial value, it will depend upon the detected rate etc */ 0x18000000, /* Phi increment for approx 32k operation */ 0x8000,0x8000, /* Volume controls are unused at this time */ 0x8000,0x8000 }; scb = cs46xx_dsp_create_generic_scb(chip,scb_name,(u32 *)&asynch_fg_tx_scb, dest,"ASYNCHFGTXCODE",parent_scb, scb_child_type); return scb; } struct dsp_scb_descriptor * cs46xx_dsp_create_asynch_fg_rx_scb(struct snd_cs46xx * chip, char * scb_name, u32 dest, u16 hfg_scb_address, u16 asynch_buffer_address, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_scb_descriptor * scb; struct dsp_asynch_fg_rx_scb asynch_fg_rx_scb = { 0xfe00,0x01ff, /* Prototype sample buffer size of 128 dwords */ 0x0064,0x001c, /* Min Delta 7 dwords == 28 bytes */ /* : Max delta 25 dwords == 100 bytes */ 0,hfg_scb_address, /* Point to HFG task SCB */ 0,0, /* Initialize current Delta and Consumer ptr adjustment count */ { 0, /* Define the unused elements */ 0, 0, 0, 0 }, 0,0, 0,dest, RSCONFIG_MODULO_128 | RSCONFIG_SAMPLE_16STEREO, /* Stereo, 128 dword */ ( (asynch_buffer_address + (16 * 4)) << 0x10), /* This should be automagically synchrinized to the producer pointer */ /* There is no correct initial value, it will depend upon the detected rate etc */ 0x18000000, /* Set IEC958 input volume */ 0xffff - ins->spdif_input_volume_right,0xffff - ins->spdif_input_volume_left, 0xffff - ins->spdif_input_volume_right,0xffff - ins->spdif_input_volume_left, }; scb = cs46xx_dsp_create_generic_scb(chip,scb_name,(u32 *)&asynch_fg_rx_scb, dest,"ASYNCHFGRXCODE",parent_scb, scb_child_type); return scb; } #if 0 /* not used */ struct dsp_scb_descriptor * cs46xx_dsp_create_output_snoop_scb(struct snd_cs46xx * chip, char * scb_name, u32 dest, u16 snoop_buffer_address, struct dsp_scb_descriptor * snoop_scb, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_scb_descriptor * scb; struct dsp_output_snoop_scb output_snoop_scb = { { 0, /* not used. Zero */ 0, 0, 0, }, { 0, /* not used. Zero */ 0, 0, 0, 0 }, 0,0, 0,0, RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_64, snoop_buffer_address << 0x10, 0,0, 0, 0,snoop_scb->address }; scb = cs46xx_dsp_create_generic_scb(chip,scb_name,(u32 *)&output_snoop_scb, dest,"OUTPUTSNOOP",parent_scb, scb_child_type); return scb; } #endif /* not used */ struct dsp_scb_descriptor * cs46xx_dsp_create_spio_write_scb(struct snd_cs46xx * chip, char * scb_name, u32 dest, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_scb_descriptor * scb; struct dsp_spio_write_scb spio_write_scb = { 0,0, /* SPIOWAddress2:SPIOWAddress1; */ 0, /* SPIOWData1; */ 0, /* SPIOWData2; */ 0,0, /* SPIOWAddress4:SPIOWAddress3; */ 0, /* SPIOWData3; */ 0, /* SPIOWData4; */ 0,0, /* SPIOWDataPtr:Unused1; */ { 0,0 }, /* Unused2[2]; */ 0,0, /* SPIOWChildPtr:SPIOWSiblingPtr; */ 0,0, /* SPIOWThisPtr:SPIOWEntryPoint; */ { 0, 0, 0, 0, 0 /* Unused3[5]; */ } }; scb = cs46xx_dsp_create_generic_scb(chip,scb_name,(u32 *)&spio_write_scb, dest,"SPIOWRITE",parent_scb, scb_child_type); return scb; } struct dsp_scb_descriptor * cs46xx_dsp_create_magic_snoop_scb(struct snd_cs46xx * chip, char * scb_name, u32 dest, u16 snoop_buffer_address, struct dsp_scb_descriptor * snoop_scb, struct dsp_scb_descriptor * parent_scb, int scb_child_type) { struct dsp_scb_descriptor * scb; struct dsp_magic_snoop_task magic_snoop_scb = { /* 0 */ 0, /* i0 */ /* 1 */ 0, /* i1 */ /* 2 */ snoop_buffer_address << 0x10, /* 3 */ 0,snoop_scb->address, /* 4 */ 0, /* i3 */ /* 5 */ 0, /* i4 */ /* 6 */ 0, /* i5 */ /* 7 */ 0, /* i6 */ /* 8 */ 0, /* i7 */ /* 9 */ 0,0, /* next_scb, sub_list_ptr */ /* A */ 0,0, /* entry_point, this_ptr */ /* B */ RSCONFIG_SAMPLE_16STEREO + RSCONFIG_MODULO_64, /* C */ snoop_buffer_address << 0x10, /* D */ 0, /* E */ { 0x8000,0x8000, /* F */ 0xffff,0xffff } }; scb = cs46xx_dsp_create_generic_scb(chip,scb_name,(u32 *)&magic_snoop_scb, dest,"MAGICSNOOPTASK",parent_scb, scb_child_type); return scb; } static struct dsp_scb_descriptor * find_next_free_scb (struct snd_cs46xx * chip, struct dsp_scb_descriptor * from) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_scb_descriptor * scb = from; while (scb->next_scb_ptr != ins->the_null_scb) { if (snd_BUG_ON(!scb->next_scb_ptr)) return NULL; scb = scb->next_scb_ptr; } return scb; } static u32 pcm_reader_buffer_addr[DSP_MAX_PCM_CHANNELS] = { 0x0600, /* 1 */ 0x1500, /* 2 */ 0x1580, /* 3 */ 0x1600, /* 4 */ 0x1680, /* 5 */ 0x1700, /* 6 */ 0x1780, /* 7 */ 0x1800, /* 8 */ 0x1880, /* 9 */ 0x1900, /* 10 */ 0x1980, /* 11 */ 0x1A00, /* 12 */ 0x1A80, /* 13 */ 0x1B00, /* 14 */ 0x1B80, /* 15 */ 0x1C00, /* 16 */ 0x1C80, /* 17 */ 0x1D00, /* 18 */ 0x1D80, /* 19 */ 0x1E00, /* 20 */ 0x1E80, /* 21 */ 0x1F00, /* 22 */ 0x1F80, /* 23 */ 0x2000, /* 24 */ 0x2080, /* 25 */ 0x2100, /* 26 */ 0x2180, /* 27 */ 0x2200, /* 28 */ 0x2280, /* 29 */ 0x2300, /* 30 */ 0x2380, /* 31 */ 0x2400, /* 32 */ }; static u32 src_output_buffer_addr[DSP_MAX_SRC_NR] = { 0x2B80, 0x2BA0, 0x2BC0, 0x2BE0, 0x2D00, 0x2D20, 0x2D40, 0x2D60, 0x2D80, 0x2DA0, 0x2DC0, 0x2DE0, 0x2E00, 0x2E20 }; static u32 src_delay_buffer_addr[DSP_MAX_SRC_NR] = { 0x2480, 0x2500, 0x2580, 0x2600, 0x2680, 0x2700, 0x2780, 0x2800, 0x2880, 0x2900, 0x2980, 0x2A00, 0x2A80, 0x2B00 }; struct dsp_pcm_channel_descriptor * cs46xx_dsp_create_pcm_channel (struct snd_cs46xx * chip, u32 sample_rate, void * private_data, u32 hw_dma_addr, int pcm_channel_id) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_scb_descriptor * src_scb = NULL, * pcm_scb, * mixer_scb = NULL; struct dsp_scb_descriptor * src_parent_scb = NULL; /* struct dsp_scb_descriptor * pcm_parent_scb; */ char scb_name[DSP_MAX_SCB_NAME]; int i, pcm_index = -1, insert_point, src_index = -1, pass_through = 0; unsigned long flags; switch (pcm_channel_id) { case DSP_PCM_MAIN_CHANNEL: mixer_scb = ins->master_mix_scb; break; case DSP_PCM_REAR_CHANNEL: mixer_scb = ins->rear_mix_scb; break; case DSP_PCM_CENTER_LFE_CHANNEL: mixer_scb = ins->center_lfe_mix_scb; break; case DSP_PCM_S71_CHANNEL: /* TODO */ snd_BUG(); break; case DSP_IEC958_CHANNEL: if (snd_BUG_ON(!ins->asynch_tx_scb)) return NULL; mixer_scb = ins->asynch_tx_scb; /* if sample rate is set to 48khz we pass the Sample Rate Converted (which could alter the raw data stream ...) */ if (sample_rate == 48000) { snd_printdd ("IEC958 pass through\n"); /* Hack to bypass creating a new SRC */ pass_through = 1; } break; default: snd_BUG(); return NULL; } /* default sample rate is 44100 */ if (!sample_rate) sample_rate = 44100; /* search for a already created SRC SCB with the same sample rate */ for (i = 0; i < DSP_MAX_PCM_CHANNELS && (pcm_index == -1 || src_scb == NULL); ++i) { /* virtual channel reserved for capture */ if (i == CS46XX_DSP_CAPTURE_CHANNEL) continue; if (ins->pcm_channels[i].active) { if (!src_scb && ins->pcm_channels[i].sample_rate == sample_rate && ins->pcm_channels[i].mixer_scb == mixer_scb) { src_scb = ins->pcm_channels[i].src_scb; ins->pcm_channels[i].src_scb->ref_count ++; src_index = ins->pcm_channels[i].src_slot; } } else if (pcm_index == -1) { pcm_index = i; } } if (pcm_index == -1) { snd_printk (KERN_ERR "dsp_spos: no free PCM channel\n"); return NULL; } if (src_scb == NULL) { if (ins->nsrc_scb >= DSP_MAX_SRC_NR) { snd_printk(KERN_ERR "dsp_spos: to many SRC instances\n!"); return NULL; } /* find a free slot */ for (i = 0; i < DSP_MAX_SRC_NR; ++i) { if (ins->src_scb_slots[i] == 0) { src_index = i; ins->src_scb_slots[i] = 1; break; } } if (snd_BUG_ON(src_index == -1)) return NULL; /* we need to create a new SRC SCB */ if (mixer_scb->sub_list_ptr == ins->the_null_scb) { src_parent_scb = mixer_scb; insert_point = SCB_ON_PARENT_SUBLIST_SCB; } else { src_parent_scb = find_next_free_scb(chip,mixer_scb->sub_list_ptr); insert_point = SCB_ON_PARENT_NEXT_SCB; } snprintf (scb_name,DSP_MAX_SCB_NAME,"SrcTask_SCB%d",src_index); snd_printdd( "dsp_spos: creating SRC \"%s\"\n",scb_name); src_scb = cs46xx_dsp_create_src_task_scb(chip,scb_name, sample_rate, src_output_buffer_addr[src_index], src_delay_buffer_addr[src_index], /* 0x400 - 0x600 source SCBs */ 0x400 + (src_index * 0x10) , src_parent_scb, insert_point, pass_through); if (!src_scb) { snd_printk (KERN_ERR "dsp_spos: failed to create SRCtaskSCB\n"); return NULL; } /* cs46xx_dsp_set_src_sample_rate(chip,src_scb,sample_rate); */ ins->nsrc_scb ++; } snprintf (scb_name,DSP_MAX_SCB_NAME,"PCMReader_SCB%d",pcm_index); snd_printdd( "dsp_spos: creating PCM \"%s\" (%d)\n",scb_name, pcm_channel_id); pcm_scb = cs46xx_dsp_create_pcm_reader_scb(chip,scb_name, pcm_reader_buffer_addr[pcm_index], /* 0x200 - 400 PCMreader SCBs */ (pcm_index * 0x10) + 0x200, pcm_index, /* virtual channel 0-31 */ hw_dma_addr, /* pcm hw addr */ NULL, /* parent SCB ptr */ 0 /* insert point */ ); if (!pcm_scb) { snd_printk (KERN_ERR "dsp_spos: failed to create PCMreaderSCB\n"); return NULL; } spin_lock_irqsave(&chip->reg_lock, flags); ins->pcm_channels[pcm_index].sample_rate = sample_rate; ins->pcm_channels[pcm_index].pcm_reader_scb = pcm_scb; ins->pcm_channels[pcm_index].src_scb = src_scb; ins->pcm_channels[pcm_index].unlinked = 1; ins->pcm_channels[pcm_index].private_data = private_data; ins->pcm_channels[pcm_index].src_slot = src_index; ins->pcm_channels[pcm_index].active = 1; ins->pcm_channels[pcm_index].pcm_slot = pcm_index; ins->pcm_channels[pcm_index].mixer_scb = mixer_scb; ins->npcm_channels ++; spin_unlock_irqrestore(&chip->reg_lock, flags); return (ins->pcm_channels + pcm_index); } int cs46xx_dsp_pcm_channel_set_period (struct snd_cs46xx * chip, struct dsp_pcm_channel_descriptor * pcm_channel, int period_size) { u32 temp = snd_cs46xx_peek (chip,pcm_channel->pcm_reader_scb->address << 2); temp &= ~DMA_RQ_C1_SOURCE_SIZE_MASK; switch (period_size) { case 2048: temp |= DMA_RQ_C1_SOURCE_MOD1024; break; case 1024: temp |= DMA_RQ_C1_SOURCE_MOD512; break; case 512: temp |= DMA_RQ_C1_SOURCE_MOD256; break; case 256: temp |= DMA_RQ_C1_SOURCE_MOD128; break; case 128: temp |= DMA_RQ_C1_SOURCE_MOD64; break; case 64: temp |= DMA_RQ_C1_SOURCE_MOD32; break; case 32: temp |= DMA_RQ_C1_SOURCE_MOD16; break; default: snd_printdd ("period size (%d) not supported by HW\n", period_size); return -EINVAL; } snd_cs46xx_poke (chip,pcm_channel->pcm_reader_scb->address << 2,temp); return 0; } int cs46xx_dsp_pcm_ostream_set_period (struct snd_cs46xx * chip, int period_size) { u32 temp = snd_cs46xx_peek (chip,WRITEBACK_SCB_ADDR << 2); temp &= ~DMA_RQ_C1_DEST_SIZE_MASK; switch (period_size) { case 2048: temp |= DMA_RQ_C1_DEST_MOD1024; break; case 1024: temp |= DMA_RQ_C1_DEST_MOD512; break; case 512: temp |= DMA_RQ_C1_DEST_MOD256; break; case 256: temp |= DMA_RQ_C1_DEST_MOD128; break; case 128: temp |= DMA_RQ_C1_DEST_MOD64; break; case 64: temp |= DMA_RQ_C1_DEST_MOD32; break; case 32: temp |= DMA_RQ_C1_DEST_MOD16; break; default: snd_printdd ("period size (%d) not supported by HW\n", period_size); return -EINVAL; } snd_cs46xx_poke (chip,WRITEBACK_SCB_ADDR << 2,temp); return 0; } void cs46xx_dsp_destroy_pcm_channel (struct snd_cs46xx * chip, struct dsp_pcm_channel_descriptor * pcm_channel) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; unsigned long flags; if (snd_BUG_ON(!pcm_channel->active || ins->npcm_channels <= 0 || pcm_channel->src_scb->ref_count <= 0)) return; spin_lock_irqsave(&chip->reg_lock, flags); pcm_channel->unlinked = 1; pcm_channel->active = 0; pcm_channel->private_data = NULL; pcm_channel->src_scb->ref_count --; ins->npcm_channels --; spin_unlock_irqrestore(&chip->reg_lock, flags); cs46xx_dsp_remove_scb(chip,pcm_channel->pcm_reader_scb); if (!pcm_channel->src_scb->ref_count) { cs46xx_dsp_remove_scb(chip,pcm_channel->src_scb); if (snd_BUG_ON(pcm_channel->src_slot < 0 || pcm_channel->src_slot >= DSP_MAX_SRC_NR)) return; ins->src_scb_slots[pcm_channel->src_slot] = 0; ins->nsrc_scb --; } } int cs46xx_dsp_pcm_unlink (struct snd_cs46xx * chip, struct dsp_pcm_channel_descriptor * pcm_channel) { unsigned long flags; if (snd_BUG_ON(!pcm_channel->active || chip->dsp_spos_instance->npcm_channels <= 0)) return -EIO; spin_lock_irqsave(&chip->reg_lock, flags); if (pcm_channel->unlinked) { spin_unlock_irqrestore(&chip->reg_lock, flags); return -EIO; } pcm_channel->unlinked = 1; _dsp_unlink_scb (chip,pcm_channel->pcm_reader_scb); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } int cs46xx_dsp_pcm_link (struct snd_cs46xx * chip, struct dsp_pcm_channel_descriptor * pcm_channel) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_scb_descriptor * parent_scb; struct dsp_scb_descriptor * src_scb = pcm_channel->src_scb; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); if (pcm_channel->unlinked == 0) { spin_unlock_irqrestore(&chip->reg_lock, flags); return -EIO; } parent_scb = src_scb; if (src_scb->sub_list_ptr != ins->the_null_scb) { src_scb->sub_list_ptr->parent_scb_ptr = pcm_channel->pcm_reader_scb; pcm_channel->pcm_reader_scb->next_scb_ptr = src_scb->sub_list_ptr; } src_scb->sub_list_ptr = pcm_channel->pcm_reader_scb; snd_BUG_ON(pcm_channel->pcm_reader_scb->parent_scb_ptr); pcm_channel->pcm_reader_scb->parent_scb_ptr = parent_scb; /* update SCB entry in DSP RAM */ cs46xx_dsp_spos_update_scb(chip,pcm_channel->pcm_reader_scb); /* update parent SCB entry */ cs46xx_dsp_spos_update_scb(chip,parent_scb); pcm_channel->unlinked = 0; spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } struct dsp_scb_descriptor * cs46xx_add_record_source (struct snd_cs46xx *chip, struct dsp_scb_descriptor * source, u16 addr, char * scb_name) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_scb_descriptor * parent; struct dsp_scb_descriptor * pcm_input; int insert_point; if (snd_BUG_ON(!ins->record_mixer_scb)) return NULL; if (ins->record_mixer_scb->sub_list_ptr != ins->the_null_scb) { parent = find_next_free_scb (chip,ins->record_mixer_scb->sub_list_ptr); insert_point = SCB_ON_PARENT_NEXT_SCB; } else { parent = ins->record_mixer_scb; insert_point = SCB_ON_PARENT_SUBLIST_SCB; } pcm_input = cs46xx_dsp_create_pcm_serial_input_scb(chip,scb_name,addr, source, parent, insert_point); return pcm_input; } int cs46xx_src_unlink(struct snd_cs46xx *chip, struct dsp_scb_descriptor * src) { unsigned long flags; if (snd_BUG_ON(!src->parent_scb_ptr)) return -EINVAL; /* mute SCB */ cs46xx_dsp_scb_set_volume (chip,src,0,0); spin_lock_irqsave(&chip->reg_lock, flags); _dsp_unlink_scb (chip,src); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } int cs46xx_src_link(struct snd_cs46xx *chip, struct dsp_scb_descriptor * src) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; struct dsp_scb_descriptor * parent_scb; if (snd_BUG_ON(src->parent_scb_ptr)) return -EINVAL; if (snd_BUG_ON(!ins->master_mix_scb)) return -EINVAL; if (ins->master_mix_scb->sub_list_ptr != ins->the_null_scb) { parent_scb = find_next_free_scb (chip,ins->master_mix_scb->sub_list_ptr); parent_scb->next_scb_ptr = src; } else { parent_scb = ins->master_mix_scb; parent_scb->sub_list_ptr = src; } src->parent_scb_ptr = parent_scb; /* update entry in DSP RAM */ cs46xx_dsp_spos_update_scb(chip,parent_scb); return 0; } int cs46xx_dsp_enable_spdif_out (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; if ( ! (ins->spdif_status_out & DSP_SPDIF_STATUS_HW_ENABLED) ) { cs46xx_dsp_enable_spdif_hw (chip); } /* dont touch anything if SPDIF is open */ if ( ins->spdif_status_out & DSP_SPDIF_STATUS_PLAYBACK_OPEN) { /* when cs46xx_iec958_post_close(...) is called it will call this function if necessary depending on this bit */ ins->spdif_status_out |= DSP_SPDIF_STATUS_OUTPUT_ENABLED; return -EBUSY; } if (snd_BUG_ON(ins->asynch_tx_scb)) return -EINVAL; if (snd_BUG_ON(ins->master_mix_scb->next_scb_ptr != ins->the_null_scb)) return -EINVAL; /* reset output snooper sample buffer pointer */ snd_cs46xx_poke (chip, (ins->ref_snoop_scb->address + 2) << 2, (OUTPUT_SNOOP_BUFFER + 0x10) << 0x10 ); /* The asynch. transfer task */ ins->asynch_tx_scb = cs46xx_dsp_create_asynch_fg_tx_scb(chip,"AsynchFGTxSCB",ASYNCTX_SCB_ADDR, SPDIFO_SCB_INST, SPDIFO_IP_OUTPUT_BUFFER1, ins->master_mix_scb, SCB_ON_PARENT_NEXT_SCB); if (!ins->asynch_tx_scb) return -ENOMEM; ins->spdif_pcm_input_scb = cs46xx_dsp_create_pcm_serial_input_scb(chip,"PCMSerialInput_II", PCMSERIALINII_SCB_ADDR, ins->ref_snoop_scb, ins->asynch_tx_scb, SCB_ON_PARENT_SUBLIST_SCB); if (!ins->spdif_pcm_input_scb) return -ENOMEM; /* monitor state */ ins->spdif_status_out |= DSP_SPDIF_STATUS_OUTPUT_ENABLED; return 0; } int cs46xx_dsp_disable_spdif_out (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; /* dont touch anything if SPDIF is open */ if ( ins->spdif_status_out & DSP_SPDIF_STATUS_PLAYBACK_OPEN) { ins->spdif_status_out &= ~DSP_SPDIF_STATUS_OUTPUT_ENABLED; return -EBUSY; } /* check integrety */ if (snd_BUG_ON(!ins->asynch_tx_scb)) return -EINVAL; if (snd_BUG_ON(!ins->spdif_pcm_input_scb)) return -EINVAL; if (snd_BUG_ON(ins->master_mix_scb->next_scb_ptr != ins->asynch_tx_scb)) return -EINVAL; if (snd_BUG_ON(ins->asynch_tx_scb->parent_scb_ptr != ins->master_mix_scb)) return -EINVAL; cs46xx_dsp_remove_scb (chip,ins->spdif_pcm_input_scb); cs46xx_dsp_remove_scb (chip,ins->asynch_tx_scb); ins->spdif_pcm_input_scb = NULL; ins->asynch_tx_scb = NULL; /* clear buffer to prevent any undesired noise */ _dsp_clear_sample_buffer(chip,SPDIFO_IP_OUTPUT_BUFFER1,256); /* monitor state */ ins->spdif_status_out &= ~DSP_SPDIF_STATUS_OUTPUT_ENABLED; return 0; } int cs46xx_iec958_pre_open (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; if ( ins->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED ) { /* remove AsynchFGTxSCB and and PCMSerialInput_II */ cs46xx_dsp_disable_spdif_out (chip); /* save state */ ins->spdif_status_out |= DSP_SPDIF_STATUS_OUTPUT_ENABLED; } /* if not enabled already */ if ( !(ins->spdif_status_out & DSP_SPDIF_STATUS_HW_ENABLED) ) { cs46xx_dsp_enable_spdif_hw (chip); } /* Create the asynch. transfer task for playback */ ins->asynch_tx_scb = cs46xx_dsp_create_asynch_fg_tx_scb(chip,"AsynchFGTxSCB",ASYNCTX_SCB_ADDR, SPDIFO_SCB_INST, SPDIFO_IP_OUTPUT_BUFFER1, ins->master_mix_scb, SCB_ON_PARENT_NEXT_SCB); /* set spdif channel status value for streaming */ cs46xx_poke_via_dsp (chip,SP_SPDOUT_CSUV, ins->spdif_csuv_stream); ins->spdif_status_out |= DSP_SPDIF_STATUS_PLAYBACK_OPEN; return 0; } int cs46xx_iec958_post_close (struct snd_cs46xx *chip) { struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (snd_BUG_ON(!ins->asynch_tx_scb)) return -EINVAL; ins->spdif_status_out &= ~DSP_SPDIF_STATUS_PLAYBACK_OPEN; /* restore settings */ cs46xx_poke_via_dsp (chip,SP_SPDOUT_CSUV, ins->spdif_csuv_default); /* deallocate stuff */ if (ins->spdif_pcm_input_scb != NULL) { cs46xx_dsp_remove_scb (chip,ins->spdif_pcm_input_scb); ins->spdif_pcm_input_scb = NULL; } cs46xx_dsp_remove_scb (chip,ins->asynch_tx_scb); ins->asynch_tx_scb = NULL; /* clear buffer to prevent any undesired noise */ _dsp_clear_sample_buffer(chip,SPDIFO_IP_OUTPUT_BUFFER1,256); /* restore state */ if ( ins->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED ) { cs46xx_dsp_enable_spdif_out (chip); } return 0; }
gpl-2.0
gukai/kernel
sound/core/seq/seq_fifo.c
12686
6036
/* * ALSA sequencer FIFO * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <sound/core.h> #include <linux/slab.h> #include "seq_fifo.h" #include "seq_lock.h" /* FIFO */ /* create new fifo */ struct snd_seq_fifo *snd_seq_fifo_new(int poolsize) { struct snd_seq_fifo *f; f = kzalloc(sizeof(*f), GFP_KERNEL); if (f == NULL) { snd_printd("malloc failed for snd_seq_fifo_new() \n"); return NULL; } f->pool = snd_seq_pool_new(poolsize); if (f->pool == NULL) { kfree(f); return NULL; } if (snd_seq_pool_init(f->pool) < 0) { snd_seq_pool_delete(&f->pool); kfree(f); return NULL; } spin_lock_init(&f->lock); snd_use_lock_init(&f->use_lock); init_waitqueue_head(&f->input_sleep); atomic_set(&f->overflow, 0); f->head = NULL; f->tail = NULL; f->cells = 0; return f; } void snd_seq_fifo_delete(struct snd_seq_fifo **fifo) { struct snd_seq_fifo *f; if (snd_BUG_ON(!fifo)) return; f = *fifo; if (snd_BUG_ON(!f)) return; *fifo = NULL; snd_seq_fifo_clear(f); /* wake up clients if any */ if (waitqueue_active(&f->input_sleep)) wake_up(&f->input_sleep); /* release resources...*/ /*....................*/ if (f->pool) { snd_seq_pool_done(f->pool); snd_seq_pool_delete(&f->pool); } kfree(f); } static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f); /* clear queue */ void snd_seq_fifo_clear(struct snd_seq_fifo *f) { struct snd_seq_event_cell *cell; unsigned long flags; /* clear overflow flag */ atomic_set(&f->overflow, 0); snd_use_lock_sync(&f->use_lock); spin_lock_irqsave(&f->lock, flags); /* drain the fifo */ while ((cell = fifo_cell_out(f)) != NULL) { snd_seq_cell_free(cell); } spin_unlock_irqrestore(&f->lock, flags); } /* enqueue event to fifo */ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, struct snd_seq_event *event) { struct snd_seq_event_cell *cell; unsigned long flags; int err; if (snd_BUG_ON(!f)) return -EINVAL; snd_use_lock_use(&f->use_lock); err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ if (err < 0) { if (err == -ENOMEM) atomic_inc(&f->overflow); snd_use_lock_free(&f->use_lock); return err; } /* append new cells to fifo */ spin_lock_irqsave(&f->lock, flags); if (f->tail != NULL) f->tail->next = cell; f->tail = cell; if (f->head == NULL) f->head = cell; f->cells++; spin_unlock_irqrestore(&f->lock, flags); /* wakeup client */ if (waitqueue_active(&f->input_sleep)) wake_up(&f->input_sleep); snd_use_lock_free(&f->use_lock); return 0; /* success */ } /* dequeue cell from fifo */ static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f) { struct snd_seq_event_cell *cell; if ((cell = f->head) != NULL) { f->head = cell->next; /* reset tail if this was the last element */ if (f->tail == cell) f->tail = NULL; cell->next = NULL; f->cells--; } return cell; } /* dequeue cell from fifo and copy on user space */ int snd_seq_fifo_cell_out(struct snd_seq_fifo *f, struct snd_seq_event_cell **cellp, int nonblock) { struct snd_seq_event_cell *cell; unsigned long flags; wait_queue_t wait; if (snd_BUG_ON(!f)) return -EINVAL; *cellp = NULL; init_waitqueue_entry(&wait, current); spin_lock_irqsave(&f->lock, flags); while ((cell = fifo_cell_out(f)) == NULL) { if (nonblock) { /* non-blocking - return immediately */ spin_unlock_irqrestore(&f->lock, flags); return -EAGAIN; } set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&f->input_sleep, &wait); spin_unlock_irq(&f->lock); schedule(); spin_lock_irq(&f->lock); remove_wait_queue(&f->input_sleep, &wait); if (signal_pending(current)) { spin_unlock_irqrestore(&f->lock, flags); return -ERESTARTSYS; } } spin_unlock_irqrestore(&f->lock, flags); *cellp = cell; return 0; } void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f, struct snd_seq_event_cell *cell) { unsigned long flags; if (cell) { spin_lock_irqsave(&f->lock, flags); cell->next = f->head; f->head = cell; f->cells++; spin_unlock_irqrestore(&f->lock, flags); } } /* polling; return non-zero if queue is available */ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table *wait) { poll_wait(file, &f->input_sleep, wait); return (f->cells > 0); } /* change the size of pool; all old events are removed */ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize) { unsigned long flags; struct snd_seq_pool *newpool, *oldpool; struct snd_seq_event_cell *cell, *next, *oldhead; if (snd_BUG_ON(!f || !f->pool)) return -EINVAL; /* allocate new pool */ newpool = snd_seq_pool_new(poolsize); if (newpool == NULL) return -ENOMEM; if (snd_seq_pool_init(newpool) < 0) { snd_seq_pool_delete(&newpool); return -ENOMEM; } spin_lock_irqsave(&f->lock, flags); /* remember old pool */ oldpool = f->pool; oldhead = f->head; /* exchange pools */ f->pool = newpool; f->head = NULL; f->tail = NULL; f->cells = 0; /* NOTE: overflow flag is not cleared */ spin_unlock_irqrestore(&f->lock, flags); /* release cells in old pool */ for (cell = oldhead; cell; cell = next) { next = cell->next; snd_seq_cell_free(cell); } snd_seq_pool_delete(&oldpool); return 0; }
gpl-2.0
zlatinski/omap-android-drm-kms
drivers/infiniband/core/mad_rmpp.c
12942
27619
/* * Copyright (c) 2005 Intel Inc. All rights reserved. * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include "mad_priv.h" #include "mad_rmpp.h" enum rmpp_state { RMPP_STATE_ACTIVE, RMPP_STATE_TIMEOUT, RMPP_STATE_COMPLETE, RMPP_STATE_CANCELING }; struct mad_rmpp_recv { struct ib_mad_agent_private *agent; struct list_head list; struct delayed_work timeout_work; struct delayed_work cleanup_work; struct completion comp; enum rmpp_state state; spinlock_t lock; atomic_t refcount; struct ib_ah *ah; struct ib_mad_recv_wc *rmpp_wc; struct ib_mad_recv_buf *cur_seg_buf; int last_ack; int seg_num; int newwin; int repwin; __be64 tid; u32 src_qp; u16 slid; u8 mgmt_class; u8 class_version; u8 method; }; static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { if (atomic_dec_and_test(&rmpp_recv->refcount)) complete(&rmpp_recv->comp); } static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { deref_rmpp_recv(rmpp_recv); wait_for_completion(&rmpp_recv->comp); ib_destroy_ah(rmpp_recv->ah); kfree(rmpp_recv); } void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) { struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->state != RMPP_STATE_COMPLETE) ib_free_recv_mad(rmpp_recv->rmpp_wc); rmpp_recv->state = RMPP_STATE_CANCELING; } spin_unlock_irqrestore(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { cancel_delayed_work(&rmpp_recv->timeout_work); cancel_delayed_work(&rmpp_recv->cleanup_work); } flush_workqueue(agent->qp_info->port_priv->wq); list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv, &agent->rmpp_list, list) { list_del(&rmpp_recv->list); destroy_rmpp_recv(rmpp_recv); } } static void format_ack(struct ib_mad_send_buf *msg, struct ib_rmpp_mad *data, struct mad_rmpp_recv *rmpp_recv) { struct ib_rmpp_mad *ack = msg->mad; unsigned long flags; memcpy(ack, &data->mad_hdr, msg->hdr_len); ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); spin_lock_irqsave(&rmpp_recv->lock, flags); rmpp_recv->last_ack = rmpp_recv->seg_num; ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); spin_unlock_irqrestore(&rmpp_recv->lock, flags); } static void ack_recv(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; int ret, hdr_len; hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL); if (IS_ERR(msg)) return; format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); msg->ah = rmpp_recv->ah; ret = ib_post_send_mad(msg, NULL); if (ret) ib_free_send_mad(msg); } static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; struct ib_ah *ah; int hdr_len; ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, recv_wc->recv_buf.grh, agent->port_num); if (IS_ERR(ah)) return (void *) ah; hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL); if (IS_ERR(msg)) ib_destroy_ah(ah); else { msg->ah = ah; msg->context[0] = ah; } return msg; } static void ack_ds_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; struct ib_rmpp_mad *rmpp_mad; int ret; msg = alloc_response_msg(&agent->agent, recv_wc); if (IS_ERR(msg)) return; rmpp_mad = msg->mad; memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1); ret = ib_post_send_mad(msg, NULL); if (ret) { ib_destroy_ah(msg->ah); ib_free_send_mad(msg); } } void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) { if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah) ib_destroy_ah(mad_send_wc->send_buf->ah); ib_free_send_mad(mad_send_wc->send_buf); } static void nack_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *recv_wc, u8 rmpp_status) { struct ib_mad_send_buf *msg; struct ib_rmpp_mad *rmpp_mad; int ret; msg = alloc_response_msg(&agent->agent, recv_wc); if (IS_ERR(msg)) return; rmpp_mad = msg->mad; memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.paylen_newwin = 0; ret = ib_post_send_mad(msg, NULL); if (ret) { ib_destroy_ah(msg->ah); ib_free_send_mad(msg); } } static void recv_timeout_handler(struct work_struct *work) { struct mad_rmpp_recv *rmpp_recv = container_of(work, struct mad_rmpp_recv, timeout_work.work); struct ib_mad_recv_wc *rmpp_wc; unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); if (rmpp_recv->state != RMPP_STATE_ACTIVE) { spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); return; } rmpp_recv->state = RMPP_STATE_TIMEOUT; list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); rmpp_wc = rmpp_recv->rmpp_wc; nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); destroy_rmpp_recv(rmpp_recv); ib_free_recv_mad(rmpp_wc); } static void recv_cleanup_handler(struct work_struct *work) { struct mad_rmpp_recv *rmpp_recv = container_of(work, struct mad_rmpp_recv, cleanup_work.work); unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); if (rmpp_recv->state == RMPP_STATE_CANCELING) { spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); return; } list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); destroy_rmpp_recv(rmpp_recv); } static struct mad_rmpp_recv * create_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr; rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL); if (!rmpp_recv) return NULL; rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, agent->agent.port_num); if (IS_ERR(rmpp_recv->ah)) goto error; rmpp_recv->agent = agent; init_completion(&rmpp_recv->comp); INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler); INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); spin_lock_init(&rmpp_recv->lock); rmpp_recv->state = RMPP_STATE_ACTIVE; atomic_set(&rmpp_recv->refcount, 1); rmpp_recv->rmpp_wc = mad_recv_wc; rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf; rmpp_recv->newwin = 1; rmpp_recv->seg_num = 1; rmpp_recv->last_ack = 0; rmpp_recv->repwin = 1; mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; rmpp_recv->tid = mad_hdr->tid; rmpp_recv->src_qp = mad_recv_wc->wc->src_qp; rmpp_recv->slid = mad_recv_wc->wc->slid; rmpp_recv->mgmt_class = mad_hdr->mgmt_class; rmpp_recv->class_version = mad_hdr->class_version; rmpp_recv->method = mad_hdr->method; return rmpp_recv; error: kfree(rmpp_recv); return NULL; } static struct mad_rmpp_recv * find_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->tid == mad_hdr->tid && rmpp_recv->src_qp == mad_recv_wc->wc->src_qp && rmpp_recv->slid == mad_recv_wc->wc->slid && rmpp_recv->mgmt_class == mad_hdr->mgmt_class && rmpp_recv->class_version == mad_hdr->class_version && rmpp_recv->method == mad_hdr->method) return rmpp_recv; } return NULL; } static struct mad_rmpp_recv * acquire_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); if (rmpp_recv) atomic_inc(&rmpp_recv->refcount); spin_unlock_irqrestore(&agent->lock, flags); return rmpp_recv; } static struct mad_rmpp_recv * insert_rmpp_recv(struct ib_mad_agent_private *agent, struct mad_rmpp_recv *rmpp_recv) { struct mad_rmpp_recv *cur_rmpp_recv; cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc); if (!cur_rmpp_recv) list_add_tail(&rmpp_recv->list, &agent->rmpp_list); return cur_rmpp_recv; } static inline int get_last_flag(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST; } static inline int get_seg_num(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); } static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list, struct ib_mad_recv_buf *seg) { if (seg->list.next == rmpp_list) return NULL; return container_of(seg->list.next, struct ib_mad_recv_buf, list); } static inline int window_size(struct ib_mad_agent_private *agent) { return max(agent->qp_info->recv_queue.max_active >> 3, 1); } static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list, int seg_num) { struct ib_mad_recv_buf *seg_buf; int cur_seg_num; list_for_each_entry_reverse(seg_buf, rmpp_list, list) { cur_seg_num = get_seg_num(seg_buf); if (seg_num > cur_seg_num) return seg_buf; if (seg_num == cur_seg_num) break; } return NULL; } static void update_seg_num(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_buf *new_buf) { struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list; while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) { rmpp_recv->cur_seg_buf = new_buf; rmpp_recv->seg_num++; new_buf = get_next_seg(rmpp_list, new_buf); } } static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) { struct ib_rmpp_mad *rmpp_mad; int hdr_size, data_size, pad; rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); data_size = sizeof(struct ib_rmpp_mad) - hdr_size; pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (pad > IB_MGMT_RMPP_DATA || pad < 0) pad = 0; return hdr_size + rmpp_recv->seg_num * data_size - pad; } static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv) { struct ib_mad_recv_wc *rmpp_wc; ack_recv(rmpp_recv, rmpp_recv->rmpp_wc); if (rmpp_recv->seg_num > 1) cancel_delayed_work(&rmpp_recv->timeout_work); rmpp_wc = rmpp_recv->rmpp_wc; rmpp_wc->mad_len = get_mad_len(rmpp_recv); /* 10 seconds until we can find the packet lifetime */ queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq, &rmpp_recv->cleanup_work, msecs_to_jiffies(10000)); return rmpp_wc; } static struct ib_mad_recv_wc * continue_rmpp(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_recv_buf *prev_buf; struct ib_mad_recv_wc *done_wc; int seg_num; unsigned long flags; rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc); if (!rmpp_recv) goto drop1; seg_num = get_seg_num(&mad_recv_wc->recv_buf); spin_lock_irqsave(&rmpp_recv->lock, flags); if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) || (seg_num > rmpp_recv->newwin)) goto drop3; if ((seg_num <= rmpp_recv->last_ack) || (rmpp_recv->state == RMPP_STATE_COMPLETE)) { spin_unlock_irqrestore(&rmpp_recv->lock, flags); ack_recv(rmpp_recv, mad_recv_wc); goto drop2; } prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num); if (!prev_buf) goto drop3; done_wc = NULL; list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list); if (rmpp_recv->cur_seg_buf == prev_buf) { update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf); if (get_last_flag(rmpp_recv->cur_seg_buf)) { rmpp_recv->state = RMPP_STATE_COMPLETE; spin_unlock_irqrestore(&rmpp_recv->lock, flags); done_wc = complete_rmpp(rmpp_recv); goto out; } else if (rmpp_recv->seg_num == rmpp_recv->newwin) { rmpp_recv->newwin += window_size(agent); spin_unlock_irqrestore(&rmpp_recv->lock, flags); ack_recv(rmpp_recv, mad_recv_wc); goto out; } } spin_unlock_irqrestore(&rmpp_recv->lock, flags); out: deref_rmpp_recv(rmpp_recv); return done_wc; drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags); drop2: deref_rmpp_recv(rmpp_recv); drop1: ib_free_recv_mad(mad_recv_wc); return NULL; } static struct ib_mad_recv_wc * start_rmpp(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; unsigned long flags; rmpp_recv = create_rmpp_recv(agent, mad_recv_wc); if (!rmpp_recv) { ib_free_recv_mad(mad_recv_wc); return NULL; } spin_lock_irqsave(&agent->lock, flags); if (insert_rmpp_recv(agent, rmpp_recv)) { spin_unlock_irqrestore(&agent->lock, flags); /* duplicate first MAD */ destroy_rmpp_recv(rmpp_recv); return continue_rmpp(agent, mad_recv_wc); } atomic_inc(&rmpp_recv->refcount); if (get_last_flag(&mad_recv_wc->recv_buf)) { rmpp_recv->state = RMPP_STATE_COMPLETE; spin_unlock_irqrestore(&agent->lock, flags); complete_rmpp(rmpp_recv); } else { spin_unlock_irqrestore(&agent->lock, flags); /* 40 seconds until we can find the packet lifetimes */ queue_delayed_work(agent->qp_info->port_priv->wq, &rmpp_recv->timeout_work, msecs_to_jiffies(40000)); rmpp_recv->newwin += window_size(agent); ack_recv(rmpp_recv, mad_recv_wc); mad_recv_wc = NULL; } deref_rmpp_recv(rmpp_recv); return mad_recv_wc; } static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int timeout; u32 paylen = 0; rmpp_mad = mad_send_wr->send_buf.mad; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num); if (mad_send_wr->seg_num == 1) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; paylen = mad_send_wr->send_buf.seg_count * IB_MGMT_RMPP_DATA - mad_send_wr->pad; } if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad; } rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen); /* 2 seconds for an ACK until we can find the packet lifetime */ timeout = mad_send_wr->send_buf.timeout_ms; if (!timeout || timeout > 2000) mad_send_wr->timeout = msecs_to_jiffies(2000); return ib_send_mad(mad_send_wr); } static void abort_send(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc wc; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); if (!mad_send_wr) goto out; /* Unmatched send */ if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&agent->lock, flags); wc.status = IB_WC_REM_ABORT_ERR; wc.vendor_err = rmpp_status; wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &wc); return; out: spin_unlock_irqrestore(&agent->lock, flags); } static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr, int seg_num) { struct list_head *list; wr->last_ack = seg_num; list = &wr->last_ack_seg->list; list_for_each_entry(wr->last_ack_seg, list, list) if (wr->last_ack_seg->num == seg_num) break; } static void process_ds_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc, int newwin) { struct mad_rmpp_recv *rmpp_recv; rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE) rmpp_recv->repwin = newwin; } static void process_rmpp_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_rmpp_mad *rmpp_mad; unsigned long flags; int seg_num, newwin, ret; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); return; } seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (newwin < seg_num) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); return; } spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); if (!mad_send_wr) { if (!seg_num) process_ds_ack(agent, mad_recv_wc, newwin); goto out; /* Unmatched or DS RMPP ACK */ } if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) && (mad_send_wr->timeout)) { spin_unlock_irqrestore(&agent->lock, flags); ack_ds_ack(agent, mad_recv_wc); return; /* Repeated ACK for DS RMPP transaction */ } if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ if (seg_num > mad_send_wr->send_buf.seg_count || seg_num > mad_send_wr->newwin) { spin_unlock_irqrestore(&agent->lock, flags); abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); return; } if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) goto out; /* Old ACK */ if (seg_num > mad_send_wr->last_ack) { adjust_last_ack(mad_send_wr, seg_num); mad_send_wr->retries_left = mad_send_wr->max_retries; } mad_send_wr->newwin = newwin; if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { /* If no response is expected, the ACK completes the send */ if (!mad_send_wr->send_buf.timeout_ms) { struct ib_mad_send_wc wc; ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&agent->lock, flags); wc.status = IB_WC_SUCCESS; wc.vendor_err = 0; wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &wc); return; } if (mad_send_wr->refcount == 1) ib_reset_mad_timeout(mad_send_wr, mad_send_wr->send_buf.timeout_ms); spin_unlock_irqrestore(&agent->lock, flags); ack_ds_ack(agent, mad_recv_wc); return; } else if (mad_send_wr->refcount == 1 && mad_send_wr->seg_num < mad_send_wr->newwin && mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { /* Send failure will just result in a timeout/retry */ ret = send_next_seg(mad_send_wr); if (ret) goto out; mad_send_wr->refcount++; list_move_tail(&mad_send_wr->agent_list, &mad_send_wr->mad_agent_priv->send_list); } out: spin_unlock_irqrestore(&agent->lock, flags); } static struct ib_mad_recv_wc * process_rmpp_data(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_hdr *rmpp_hdr; u8 rmpp_status; rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr; if (rmpp_hdr->rmpp_status) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS; goto bad; } if (rmpp_hdr->seg_num == cpu_to_be32(1)) { if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; goto bad; } return start_rmpp(agent, mad_recv_wc); } else { if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; goto bad; } return continue_rmpp(agent, mad_recv_wc); } bad: nack_recv(agent, mad_recv_wc, rmpp_status); ib_free_recv_mad(mad_recv_wc); return NULL; } static void process_rmpp_stop(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); } static void process_rmpp_abort(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); } struct ib_mad_recv_wc * ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) return mad_recv_wc; if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); goto out; } switch (rmpp_mad->rmpp_hdr.rmpp_type) { case IB_MGMT_RMPP_TYPE_DATA: return process_rmpp_data(agent, mad_recv_wc); case IB_MGMT_RMPP_TYPE_ACK: process_rmpp_ack(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_STOP: process_rmpp_stop(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_ABORT: process_rmpp_abort(agent, mad_recv_wc); break; default: abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); break; } out: ib_free_recv_mad(mad_recv_wc); return NULL; } static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv; struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad; struct mad_rmpp_recv *rmpp_recv; struct ib_ah_attr ah_attr; unsigned long flags; int newwin = 1; if (!(mad_hdr->method & IB_MGMT_METHOD_RESP)) goto out; spin_lock_irqsave(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->tid != mad_hdr->tid || rmpp_recv->mgmt_class != mad_hdr->mgmt_class || rmpp_recv->class_version != mad_hdr->class_version || (rmpp_recv->method & IB_MGMT_METHOD_RESP)) continue; if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr)) continue; if (rmpp_recv->slid == ah_attr.dlid) { newwin = rmpp_recv->repwin; break; } } spin_unlock_irqrestore(&agent->lock, flags); out: return newwin; } int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { mad_send_wr->seg_num = 1; return IB_RMPP_RESULT_INTERNAL; } mad_send_wr->newwin = init_newwin(mad_send_wr); /* We need to wait for the final ACK even if there isn't a response */ mad_send_wr->refcount += (mad_send_wr->timeout == 0); ret = send_next_seg(mad_send_wr); if (!ret) return IB_RMPP_RESULT_CONSUMED; return ret; } int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ if (mad_send_wc->status != IB_WC_SUCCESS || mad_send_wr->status != IB_WC_SUCCESS) return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */ if (!mad_send_wr->timeout) return IB_RMPP_RESULT_PROCESSED; /* Response received */ if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); return IB_RMPP_RESULT_PROCESSED; /* Send done */ } if (mad_send_wr->seg_num == mad_send_wr->newwin || mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */ ret = send_next_seg(mad_send_wr); if (ret) { mad_send_wc->status = IB_WC_GENERAL_ERR; return IB_RMPP_RESULT_PROCESSED; } return IB_RMPP_RESULT_CONSUMED; } int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) return IB_RMPP_RESULT_PROCESSED; mad_send_wr->seg_num = mad_send_wr->last_ack; mad_send_wr->cur_seg = mad_send_wr->last_ack_seg; ret = send_next_seg(mad_send_wr); if (ret) return IB_RMPP_RESULT_PROCESSED; return IB_RMPP_RESULT_CONSUMED; }
gpl-2.0
ezterry/kernel-biff-testing
drivers/serial/mpc52xx_uart.c
143
39870
/* * Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs. * * FIXME According to the usermanual the status bits in the status register * are only updated when the peripherals access the FIFO and not when the * CPU access them. So since we use this bits to know when we stop writing * and reading, they may not be updated in-time and a race condition may * exists. But I haven't be able to prove this and I don't care. But if * any problem arises, it might worth checking. The TX/RX FIFO Stats * registers should be used in addition. * Update: Actually, they seem updated ... At least the bits we use. * * * Maintainer : Sylvain Munaut <tnt@246tNt.com> * * Some of the code has been inspired/copied from the 2.4 code written * by Dale Farnsworth <dfarnsworth@mvista.com>. * * Copyright (C) 2008 Freescale Semiconductor Inc. * John Rigby <jrigby@gmail.com> * Added support for MPC5121 * Copyright (C) 2006 Secret Lab Technologies Ltd. * Grant Likely <grant.likely@secretlab.ca> * Copyright (C) 2004-2006 Sylvain Munaut <tnt@246tNt.com> * Copyright (C) 2003 MontaVista, Software, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #undef DEBUG #include <linux/device.h> #include <linux/module.h> #include <linux/tty.h> #include <linux/serial.h> #include <linux/sysrq.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/clk.h> #include <asm/mpc52xx.h> #include <asm/mpc52xx_psc.h> #if defined(CONFIG_SERIAL_MPC52xx_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/serial_core.h> /* We've been assigned a range on the "Low-density serial ports" major */ #define SERIAL_PSC_MAJOR 204 #define SERIAL_PSC_MINOR 148 #define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */ static struct uart_port mpc52xx_uart_ports[MPC52xx_PSC_MAXNUM]; /* Rem: - We use the read_status_mask as a shadow of * psc->mpc52xx_psc_imr * - It's important that is array is all zero on start as we * use it to know if it's initialized or not ! If it's not sure * it's cleared, then a memset(...,0,...) should be added to * the console_init */ /* lookup table for matching device nodes to index numbers */ static struct device_node *mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM]; static void mpc52xx_uart_of_enumerate(void); #define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase)) /* Forward declaration of the interruption handling routine */ static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id); static irqreturn_t mpc5xxx_uart_process_int(struct uart_port *port); /* Simple macro to test if a port is console or not. This one is taken * for serial_core.c and maybe should be moved to serial_core.h ? */ #ifdef CONFIG_SERIAL_CORE_CONSOLE #define uart_console(port) \ ((port)->cons && (port)->cons->index == (port)->line) #else #define uart_console(port) (0) #endif /* ======================================================================== */ /* PSC fifo operations for isolating differences between 52xx and 512x */ /* ======================================================================== */ struct psc_ops { void (*fifo_init)(struct uart_port *port); int (*raw_rx_rdy)(struct uart_port *port); int (*raw_tx_rdy)(struct uart_port *port); int (*rx_rdy)(struct uart_port *port); int (*tx_rdy)(struct uart_port *port); int (*tx_empty)(struct uart_port *port); void (*stop_rx)(struct uart_port *port); void (*start_tx)(struct uart_port *port); void (*stop_tx)(struct uart_port *port); void (*rx_clr_irq)(struct uart_port *port); void (*tx_clr_irq)(struct uart_port *port); void (*write_char)(struct uart_port *port, unsigned char c); unsigned char (*read_char)(struct uart_port *port); void (*cw_disable_ints)(struct uart_port *port); void (*cw_restore_ints)(struct uart_port *port); unsigned int (*set_baudrate)(struct uart_port *port, struct ktermios *new, struct ktermios *old); int (*clock)(struct uart_port *port, int enable); int (*fifoc_init)(void); void (*fifoc_uninit)(void); void (*get_irq)(struct uart_port *, struct device_node *); irqreturn_t (*handle_irq)(struct uart_port *port); }; /* setting the prescaler and divisor reg is common for all chips */ static inline void mpc52xx_set_divisor(struct mpc52xx_psc __iomem *psc, u16 prescaler, unsigned int divisor) { /* select prescaler */ out_be16(&psc->mpc52xx_psc_clock_select, prescaler); out_8(&psc->ctur, divisor >> 8); out_8(&psc->ctlr, divisor & 0xff); } #ifdef CONFIG_PPC_MPC52xx #define FIFO_52xx(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1)) static void mpc52xx_psc_fifo_init(struct uart_port *port) { struct mpc52xx_psc __iomem *psc = PSC(port); struct mpc52xx_psc_fifo __iomem *fifo = FIFO_52xx(port); out_8(&fifo->rfcntl, 0x00); out_be16(&fifo->rfalarm, 0x1ff); out_8(&fifo->tfcntl, 0x07); out_be16(&fifo->tfalarm, 0x80); port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY; out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); } static int mpc52xx_psc_raw_rx_rdy(struct uart_port *port) { return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY; } static int mpc52xx_psc_raw_tx_rdy(struct uart_port *port) { return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXRDY; } static int mpc52xx_psc_rx_rdy(struct uart_port *port) { return in_be16(&PSC(port)->mpc52xx_psc_isr) & port->read_status_mask & MPC52xx_PSC_IMR_RXRDY; } static int mpc52xx_psc_tx_rdy(struct uart_port *port) { return in_be16(&PSC(port)->mpc52xx_psc_isr) & port->read_status_mask & MPC52xx_PSC_IMR_TXRDY; } static int mpc52xx_psc_tx_empty(struct uart_port *port) { return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP; } static void mpc52xx_psc_start_tx(struct uart_port *port) { port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY; out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); } static void mpc52xx_psc_stop_tx(struct uart_port *port) { port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY; out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); } static void mpc52xx_psc_stop_rx(struct uart_port *port) { port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY; out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); } static void mpc52xx_psc_rx_clr_irq(struct uart_port *port) { } static void mpc52xx_psc_tx_clr_irq(struct uart_port *port) { } static void mpc52xx_psc_write_char(struct uart_port *port, unsigned char c) { out_8(&PSC(port)->mpc52xx_psc_buffer_8, c); } static unsigned char mpc52xx_psc_read_char(struct uart_port *port) { return in_8(&PSC(port)->mpc52xx_psc_buffer_8); } static void mpc52xx_psc_cw_disable_ints(struct uart_port *port) { out_be16(&PSC(port)->mpc52xx_psc_imr, 0); } static void mpc52xx_psc_cw_restore_ints(struct uart_port *port) { out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); } static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port, struct ktermios *new, struct ktermios *old) { unsigned int baud; unsigned int divisor; /* The 5200 has a fixed /32 prescaler, uartclk contains the ipb freq */ baud = uart_get_baud_rate(port, new, old, port->uartclk / (32 * 0xffff) + 1, port->uartclk / 32); divisor = (port->uartclk + 16 * baud) / (32 * baud); /* enable the /32 prescaler and set the divisor */ mpc52xx_set_divisor(PSC(port), 0xdd00, divisor); return baud; } static unsigned int mpc5200b_psc_set_baudrate(struct uart_port *port, struct ktermios *new, struct ktermios *old) { unsigned int baud; unsigned int divisor; u16 prescaler; /* The 5200B has a selectable /4 or /32 prescaler, uartclk contains the * ipb freq */ baud = uart_get_baud_rate(port, new, old, port->uartclk / (32 * 0xffff) + 1, port->uartclk / 4); divisor = (port->uartclk + 2 * baud) / (4 * baud); /* select the proper prescaler and set the divisor */ if (divisor > 0xffff) { divisor = (divisor + 4) / 8; prescaler = 0xdd00; /* /32 */ } else prescaler = 0xff00; /* /4 */ mpc52xx_set_divisor(PSC(port), prescaler, divisor); return baud; } static void mpc52xx_psc_get_irq(struct uart_port *port, struct device_node *np) { port->irqflags = IRQF_DISABLED; port->irq = irq_of_parse_and_map(np, 0); } /* 52xx specific interrupt handler. The caller holds the port lock */ static irqreturn_t mpc52xx_psc_handle_irq(struct uart_port *port) { return mpc5xxx_uart_process_int(port); } static struct psc_ops mpc52xx_psc_ops = { .fifo_init = mpc52xx_psc_fifo_init, .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy, .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy, .rx_rdy = mpc52xx_psc_rx_rdy, .tx_rdy = mpc52xx_psc_tx_rdy, .tx_empty = mpc52xx_psc_tx_empty, .stop_rx = mpc52xx_psc_stop_rx, .start_tx = mpc52xx_psc_start_tx, .stop_tx = mpc52xx_psc_stop_tx, .rx_clr_irq = mpc52xx_psc_rx_clr_irq, .tx_clr_irq = mpc52xx_psc_tx_clr_irq, .write_char = mpc52xx_psc_write_char, .read_char = mpc52xx_psc_read_char, .cw_disable_ints = mpc52xx_psc_cw_disable_ints, .cw_restore_ints = mpc52xx_psc_cw_restore_ints, .set_baudrate = mpc5200_psc_set_baudrate, .get_irq = mpc52xx_psc_get_irq, .handle_irq = mpc52xx_psc_handle_irq, }; static struct psc_ops mpc5200b_psc_ops = { .fifo_init = mpc52xx_psc_fifo_init, .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy, .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy, .rx_rdy = mpc52xx_psc_rx_rdy, .tx_rdy = mpc52xx_psc_tx_rdy, .tx_empty = mpc52xx_psc_tx_empty, .stop_rx = mpc52xx_psc_stop_rx, .start_tx = mpc52xx_psc_start_tx, .stop_tx = mpc52xx_psc_stop_tx, .rx_clr_irq = mpc52xx_psc_rx_clr_irq, .tx_clr_irq = mpc52xx_psc_tx_clr_irq, .write_char = mpc52xx_psc_write_char, .read_char = mpc52xx_psc_read_char, .cw_disable_ints = mpc52xx_psc_cw_disable_ints, .cw_restore_ints = mpc52xx_psc_cw_restore_ints, .set_baudrate = mpc5200b_psc_set_baudrate, .get_irq = mpc52xx_psc_get_irq, .handle_irq = mpc52xx_psc_handle_irq, }; #endif /* CONFIG_MPC52xx */ #ifdef CONFIG_PPC_MPC512x #define FIFO_512x(port) ((struct mpc512x_psc_fifo __iomem *)(PSC(port)+1)) /* PSC FIFO Controller for mpc512x */ struct psc_fifoc { u32 fifoc_cmd; u32 fifoc_int; u32 fifoc_dma; u32 fifoc_axe; u32 fifoc_debug; }; static struct psc_fifoc __iomem *psc_fifoc; static unsigned int psc_fifoc_irq; static void mpc512x_psc_fifo_init(struct uart_port *port) { /* /32 prescaler */ out_be16(&PSC(port)->mpc52xx_psc_clock_select, 0xdd00); out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE); out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE); out_be32(&FIFO_512x(port)->txalarm, 1); out_be32(&FIFO_512x(port)->tximr, 0); out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_RESET_SLICE); out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_ENABLE_SLICE); out_be32(&FIFO_512x(port)->rxalarm, 1); out_be32(&FIFO_512x(port)->rximr, 0); out_be32(&FIFO_512x(port)->tximr, MPC512x_PSC_FIFO_ALARM); out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM); } static int mpc512x_psc_raw_rx_rdy(struct uart_port *port) { return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY); } static int mpc512x_psc_raw_tx_rdy(struct uart_port *port) { return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL); } static int mpc512x_psc_rx_rdy(struct uart_port *port) { return in_be32(&FIFO_512x(port)->rxsr) & in_be32(&FIFO_512x(port)->rximr) & MPC512x_PSC_FIFO_ALARM; } static int mpc512x_psc_tx_rdy(struct uart_port *port) { return in_be32(&FIFO_512x(port)->txsr) & in_be32(&FIFO_512x(port)->tximr) & MPC512x_PSC_FIFO_ALARM; } static int mpc512x_psc_tx_empty(struct uart_port *port) { return in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_EMPTY; } static void mpc512x_psc_stop_rx(struct uart_port *port) { unsigned long rx_fifo_imr; rx_fifo_imr = in_be32(&FIFO_512x(port)->rximr); rx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM; out_be32(&FIFO_512x(port)->rximr, rx_fifo_imr); } static void mpc512x_psc_start_tx(struct uart_port *port) { unsigned long tx_fifo_imr; tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr); tx_fifo_imr |= MPC512x_PSC_FIFO_ALARM; out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr); } static void mpc512x_psc_stop_tx(struct uart_port *port) { unsigned long tx_fifo_imr; tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr); tx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM; out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr); } static void mpc512x_psc_rx_clr_irq(struct uart_port *port) { out_be32(&FIFO_512x(port)->rxisr, in_be32(&FIFO_512x(port)->rxisr)); } static void mpc512x_psc_tx_clr_irq(struct uart_port *port) { out_be32(&FIFO_512x(port)->txisr, in_be32(&FIFO_512x(port)->txisr)); } static void mpc512x_psc_write_char(struct uart_port *port, unsigned char c) { out_8(&FIFO_512x(port)->txdata_8, c); } static unsigned char mpc512x_psc_read_char(struct uart_port *port) { return in_8(&FIFO_512x(port)->rxdata_8); } static void mpc512x_psc_cw_disable_ints(struct uart_port *port) { port->read_status_mask = in_be32(&FIFO_512x(port)->tximr) << 16 | in_be32(&FIFO_512x(port)->rximr); out_be32(&FIFO_512x(port)->tximr, 0); out_be32(&FIFO_512x(port)->rximr, 0); } static void mpc512x_psc_cw_restore_ints(struct uart_port *port) { out_be32(&FIFO_512x(port)->tximr, (port->read_status_mask >> 16) & 0x7f); out_be32(&FIFO_512x(port)->rximr, port->read_status_mask & 0x7f); } static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port, struct ktermios *new, struct ktermios *old) { unsigned int baud; unsigned int divisor; /* * The "MPC5121e Microcontroller Reference Manual, Rev. 3" says on * pg. 30-10 that the chip supports a /32 and a /10 prescaler. * Furthermore, it states that "After reset, the prescaler by 10 * for the UART mode is selected", but the reset register value is * 0x0000 which means a /32 prescaler. This is wrong. * * In reality using /32 prescaler doesn't work, as it is not supported! * Use /16 or /10 prescaler, see "MPC5121e Hardware Design Guide", * Chapter 4.1 PSC in UART Mode. * Calculate with a /16 prescaler here. */ /* uartclk contains the ips freq */ baud = uart_get_baud_rate(port, new, old, port->uartclk / (16 * 0xffff) + 1, port->uartclk / 16); divisor = (port->uartclk + 8 * baud) / (16 * baud); /* enable the /16 prescaler and set the divisor */ mpc52xx_set_divisor(PSC(port), 0xdd00, divisor); return baud; } /* Init PSC FIFO Controller */ static int __init mpc512x_psc_fifoc_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-psc-fifo"); if (!np) { pr_err("%s: Can't find FIFOC node\n", __func__); return -ENODEV; } psc_fifoc = of_iomap(np, 0); if (!psc_fifoc) { pr_err("%s: Can't map FIFOC\n", __func__); of_node_put(np); return -ENODEV; } psc_fifoc_irq = irq_of_parse_and_map(np, 0); of_node_put(np); if (psc_fifoc_irq == NO_IRQ) { pr_err("%s: Can't get FIFOC irq\n", __func__); iounmap(psc_fifoc); return -ENODEV; } return 0; } static void __exit mpc512x_psc_fifoc_uninit(void) { iounmap(psc_fifoc); } /* 512x specific interrupt handler. The caller holds the port lock */ static irqreturn_t mpc512x_psc_handle_irq(struct uart_port *port) { unsigned long fifoc_int; int psc_num; /* Read pending PSC FIFOC interrupts */ fifoc_int = in_be32(&psc_fifoc->fifoc_int); /* Check if it is an interrupt for this port */ psc_num = (port->mapbase & 0xf00) >> 8; if (test_bit(psc_num, &fifoc_int) || test_bit(psc_num + 16, &fifoc_int)) return mpc5xxx_uart_process_int(port); return IRQ_NONE; } static int mpc512x_psc_clock(struct uart_port *port, int enable) { struct clk *psc_clk; int psc_num; char clk_name[10]; if (uart_console(port)) return 0; psc_num = (port->mapbase & 0xf00) >> 8; snprintf(clk_name, sizeof(clk_name), "psc%d_clk", psc_num); psc_clk = clk_get(port->dev, clk_name); if (IS_ERR(psc_clk)) { dev_err(port->dev, "Failed to get PSC clock entry!\n"); return -ENODEV; } dev_dbg(port->dev, "%s %sable\n", clk_name, enable ? "en" : "dis"); if (enable) clk_enable(psc_clk); else clk_disable(psc_clk); return 0; } static void mpc512x_psc_get_irq(struct uart_port *port, struct device_node *np) { port->irqflags = IRQF_SHARED; port->irq = psc_fifoc_irq; } static struct psc_ops mpc512x_psc_ops = { .fifo_init = mpc512x_psc_fifo_init, .raw_rx_rdy = mpc512x_psc_raw_rx_rdy, .raw_tx_rdy = mpc512x_psc_raw_tx_rdy, .rx_rdy = mpc512x_psc_rx_rdy, .tx_rdy = mpc512x_psc_tx_rdy, .tx_empty = mpc512x_psc_tx_empty, .stop_rx = mpc512x_psc_stop_rx, .start_tx = mpc512x_psc_start_tx, .stop_tx = mpc512x_psc_stop_tx, .rx_clr_irq = mpc512x_psc_rx_clr_irq, .tx_clr_irq = mpc512x_psc_tx_clr_irq, .write_char = mpc512x_psc_write_char, .read_char = mpc512x_psc_read_char, .cw_disable_ints = mpc512x_psc_cw_disable_ints, .cw_restore_ints = mpc512x_psc_cw_restore_ints, .set_baudrate = mpc512x_psc_set_baudrate, .clock = mpc512x_psc_clock, .fifoc_init = mpc512x_psc_fifoc_init, .fifoc_uninit = mpc512x_psc_fifoc_uninit, .get_irq = mpc512x_psc_get_irq, .handle_irq = mpc512x_psc_handle_irq, }; #endif static struct psc_ops *psc_ops; /* ======================================================================== */ /* UART operations */ /* ======================================================================== */ static unsigned int mpc52xx_uart_tx_empty(struct uart_port *port) { return psc_ops->tx_empty(port) ? TIOCSER_TEMT : 0; } static void mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) { if (mctrl & TIOCM_RTS) out_8(&PSC(port)->op1, MPC52xx_PSC_OP_RTS); else out_8(&PSC(port)->op0, MPC52xx_PSC_OP_RTS); } static unsigned int mpc52xx_uart_get_mctrl(struct uart_port *port) { unsigned int ret = TIOCM_DSR; u8 status = in_8(&PSC(port)->mpc52xx_psc_ipcr); if (!(status & MPC52xx_PSC_CTS)) ret |= TIOCM_CTS; if (!(status & MPC52xx_PSC_DCD)) ret |= TIOCM_CAR; return ret; } static void mpc52xx_uart_stop_tx(struct uart_port *port) { /* port->lock taken by caller */ psc_ops->stop_tx(port); } static void mpc52xx_uart_start_tx(struct uart_port *port) { /* port->lock taken by caller */ psc_ops->start_tx(port); } static void mpc52xx_uart_send_xchar(struct uart_port *port, char ch) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); port->x_char = ch; if (ch) { /* Make sure tx interrupts are on */ /* Truly necessary ??? They should be anyway */ psc_ops->start_tx(port); } spin_unlock_irqrestore(&port->lock, flags); } static void mpc52xx_uart_stop_rx(struct uart_port *port) { /* port->lock taken by caller */ psc_ops->stop_rx(port); } static void mpc52xx_uart_enable_ms(struct uart_port *port) { struct mpc52xx_psc __iomem *psc = PSC(port); /* clear D_*-bits by reading them */ in_8(&psc->mpc52xx_psc_ipcr); /* enable CTS and DCD as IPC interrupts */ out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD); port->read_status_mask |= MPC52xx_PSC_IMR_IPC; out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); } static void mpc52xx_uart_break_ctl(struct uart_port *port, int ctl) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); if (ctl == -1) out_8(&PSC(port)->command, MPC52xx_PSC_START_BRK); else out_8(&PSC(port)->command, MPC52xx_PSC_STOP_BRK); spin_unlock_irqrestore(&port->lock, flags); } static int mpc52xx_uart_startup(struct uart_port *port) { struct mpc52xx_psc __iomem *psc = PSC(port); int ret; if (psc_ops->clock) { ret = psc_ops->clock(port, 1); if (ret) return ret; } /* Request IRQ */ ret = request_irq(port->irq, mpc52xx_uart_int, port->irqflags, "mpc52xx_psc_uart", port); if (ret) return ret; /* Reset/activate the port, clear and enable interrupts */ out_8(&psc->command, MPC52xx_PSC_RST_RX); out_8(&psc->command, MPC52xx_PSC_RST_TX); out_be32(&psc->sicr, 0); /* UART mode DCD ignored */ psc_ops->fifo_init(port); out_8(&psc->command, MPC52xx_PSC_TX_ENABLE); out_8(&psc->command, MPC52xx_PSC_RX_ENABLE); return 0; } static void mpc52xx_uart_shutdown(struct uart_port *port) { struct mpc52xx_psc __iomem *psc = PSC(port); /* Shut down the port. Leave TX active if on a console port */ out_8(&psc->command, MPC52xx_PSC_RST_RX); if (!uart_console(port)) out_8(&psc->command, MPC52xx_PSC_RST_TX); port->read_status_mask = 0; out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); if (psc_ops->clock) psc_ops->clock(port, 0); /* Release interrupt */ free_irq(port->irq, port); } static void mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new, struct ktermios *old) { struct mpc52xx_psc __iomem *psc = PSC(port); unsigned long flags; unsigned char mr1, mr2; unsigned int j; unsigned int baud; /* Prepare what we're gonna write */ mr1 = 0; switch (new->c_cflag & CSIZE) { case CS5: mr1 |= MPC52xx_PSC_MODE_5_BITS; break; case CS6: mr1 |= MPC52xx_PSC_MODE_6_BITS; break; case CS7: mr1 |= MPC52xx_PSC_MODE_7_BITS; break; case CS8: default: mr1 |= MPC52xx_PSC_MODE_8_BITS; } if (new->c_cflag & PARENB) { mr1 |= (new->c_cflag & PARODD) ? MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN; } else mr1 |= MPC52xx_PSC_MODE_PARNONE; mr2 = 0; if (new->c_cflag & CSTOPB) mr2 |= MPC52xx_PSC_MODE_TWO_STOP; else mr2 |= ((new->c_cflag & CSIZE) == CS5) ? MPC52xx_PSC_MODE_ONE_STOP_5_BITS : MPC52xx_PSC_MODE_ONE_STOP; if (new->c_cflag & CRTSCTS) { mr1 |= MPC52xx_PSC_MODE_RXRTS; mr2 |= MPC52xx_PSC_MODE_TXCTS; } /* Get the lock */ spin_lock_irqsave(&port->lock, flags); /* Do our best to flush TX & RX, so we don't lose anything */ /* But we don't wait indefinitely ! */ j = 5000000; /* Maximum wait */ /* FIXME Can't receive chars since set_termios might be called at early * boot for the console, all stuff is not yet ready to receive at that * time and that just makes the kernel oops */ /* while (j-- && mpc52xx_uart_int_rx_chars(port)); */ while (!mpc52xx_uart_tx_empty(port) && --j) udelay(1); if (!j) printk(KERN_ERR "mpc52xx_uart.c: " "Unable to flush RX & TX fifos in-time in set_termios." "Some chars may have been lost.\n"); /* Reset the TX & RX */ out_8(&psc->command, MPC52xx_PSC_RST_RX); out_8(&psc->command, MPC52xx_PSC_RST_TX); /* Send new mode settings */ out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); out_8(&psc->mode, mr1); out_8(&psc->mode, mr2); baud = psc_ops->set_baudrate(port, new, old); /* Update the per-port timeout */ uart_update_timeout(port, new->c_cflag, baud); if (UART_ENABLE_MS(port, new->c_cflag)) mpc52xx_uart_enable_ms(port); /* Reenable TX & RX */ out_8(&psc->command, MPC52xx_PSC_TX_ENABLE); out_8(&psc->command, MPC52xx_PSC_RX_ENABLE); /* We're all set, release the lock */ spin_unlock_irqrestore(&port->lock, flags); } static const char * mpc52xx_uart_type(struct uart_port *port) { return port->type == PORT_MPC52xx ? "MPC52xx PSC" : NULL; } static void mpc52xx_uart_release_port(struct uart_port *port) { /* remapped by us ? */ if (port->flags & UPF_IOREMAP) { iounmap(port->membase); port->membase = NULL; } release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc)); } static int mpc52xx_uart_request_port(struct uart_port *port) { int err; if (port->flags & UPF_IOREMAP) /* Need to remap ? */ port->membase = ioremap(port->mapbase, sizeof(struct mpc52xx_psc)); if (!port->membase) return -EINVAL; err = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc), "mpc52xx_psc_uart") != NULL ? 0 : -EBUSY; if (err && (port->flags & UPF_IOREMAP)) { iounmap(port->membase); port->membase = NULL; } return err; } static void mpc52xx_uart_config_port(struct uart_port *port, int flags) { if ((flags & UART_CONFIG_TYPE) && (mpc52xx_uart_request_port(port) == 0)) port->type = PORT_MPC52xx; } static int mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser) { if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPC52xx) return -EINVAL; if ((ser->irq != port->irq) || (ser->io_type != UPIO_MEM) || (ser->baud_base != port->uartclk) || (ser->iomem_base != (void *)port->mapbase) || (ser->hub6 != 0)) return -EINVAL; return 0; } static struct uart_ops mpc52xx_uart_ops = { .tx_empty = mpc52xx_uart_tx_empty, .set_mctrl = mpc52xx_uart_set_mctrl, .get_mctrl = mpc52xx_uart_get_mctrl, .stop_tx = mpc52xx_uart_stop_tx, .start_tx = mpc52xx_uart_start_tx, .send_xchar = mpc52xx_uart_send_xchar, .stop_rx = mpc52xx_uart_stop_rx, .enable_ms = mpc52xx_uart_enable_ms, .break_ctl = mpc52xx_uart_break_ctl, .startup = mpc52xx_uart_startup, .shutdown = mpc52xx_uart_shutdown, .set_termios = mpc52xx_uart_set_termios, /* .pm = mpc52xx_uart_pm, Not supported yet */ /* .set_wake = mpc52xx_uart_set_wake, Not supported yet */ .type = mpc52xx_uart_type, .release_port = mpc52xx_uart_release_port, .request_port = mpc52xx_uart_request_port, .config_port = mpc52xx_uart_config_port, .verify_port = mpc52xx_uart_verify_port }; /* ======================================================================== */ /* Interrupt handling */ /* ======================================================================== */ static inline int mpc52xx_uart_int_rx_chars(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; unsigned char ch, flag; unsigned short status; /* While we can read, do so ! */ while (psc_ops->raw_rx_rdy(port)) { /* Get the char */ ch = psc_ops->read_char(port); /* Handle sysreq char */ #ifdef SUPPORT_SYSRQ if (uart_handle_sysrq_char(port, ch)) { port->sysrq = 0; continue; } #endif /* Store it */ flag = TTY_NORMAL; port->icount.rx++; status = in_be16(&PSC(port)->mpc52xx_psc_status); if (status & (MPC52xx_PSC_SR_PE | MPC52xx_PSC_SR_FE | MPC52xx_PSC_SR_RB)) { if (status & MPC52xx_PSC_SR_RB) { flag = TTY_BREAK; uart_handle_break(port); port->icount.brk++; } else if (status & MPC52xx_PSC_SR_PE) { flag = TTY_PARITY; port->icount.parity++; } else if (status & MPC52xx_PSC_SR_FE) { flag = TTY_FRAME; port->icount.frame++; } /* Clear error condition */ out_8(&PSC(port)->command, MPC52xx_PSC_RST_ERR_STAT); } tty_insert_flip_char(tty, ch, flag); if (status & MPC52xx_PSC_SR_OE) { /* * Overrun is special, since it's * reported immediately, and doesn't * affect the current character */ tty_insert_flip_char(tty, 0, TTY_OVERRUN); port->icount.overrun++; } } spin_unlock(&port->lock); tty_flip_buffer_push(tty); spin_lock(&port->lock); return psc_ops->raw_rx_rdy(port); } static inline int mpc52xx_uart_int_tx_chars(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; /* Process out of band chars */ if (port->x_char) { psc_ops->write_char(port, port->x_char); port->icount.tx++; port->x_char = 0; return 1; } /* Nothing to do ? */ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { mpc52xx_uart_stop_tx(port); return 0; } /* Send chars */ while (psc_ops->raw_tx_rdy(port)) { psc_ops->write_char(port, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; if (uart_circ_empty(xmit)) break; } /* Wake up */ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); /* Maybe we're done after all */ if (uart_circ_empty(xmit)) { mpc52xx_uart_stop_tx(port); return 0; } return 1; } static irqreturn_t mpc5xxx_uart_process_int(struct uart_port *port) { unsigned long pass = ISR_PASS_LIMIT; unsigned int keepgoing; u8 status; /* While we have stuff to do, we continue */ do { /* If we don't find anything to do, we stop */ keepgoing = 0; psc_ops->rx_clr_irq(port); if (psc_ops->rx_rdy(port)) keepgoing |= mpc52xx_uart_int_rx_chars(port); psc_ops->tx_clr_irq(port); if (psc_ops->tx_rdy(port)) keepgoing |= mpc52xx_uart_int_tx_chars(port); status = in_8(&PSC(port)->mpc52xx_psc_ipcr); if (status & MPC52xx_PSC_D_DCD) uart_handle_dcd_change(port, !(status & MPC52xx_PSC_DCD)); if (status & MPC52xx_PSC_D_CTS) uart_handle_cts_change(port, !(status & MPC52xx_PSC_CTS)); /* Limit number of iteration */ if (!(--pass)) keepgoing = 0; } while (keepgoing); return IRQ_HANDLED; } static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id) { struct uart_port *port = dev_id; irqreturn_t ret; spin_lock(&port->lock); ret = psc_ops->handle_irq(port); spin_unlock(&port->lock); return ret; } /* ======================================================================== */ /* Console ( if applicable ) */ /* ======================================================================== */ #ifdef CONFIG_SERIAL_MPC52xx_CONSOLE static void __init mpc52xx_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits, int *flow) { struct mpc52xx_psc __iomem *psc = PSC(port); unsigned char mr1; pr_debug("mpc52xx_console_get_options(port=%p)\n", port); /* Read the mode registers */ out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); mr1 = in_8(&psc->mode); /* CT{U,L}R are write-only ! */ *baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD; /* Parse them */ switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) { case MPC52xx_PSC_MODE_5_BITS: *bits = 5; break; case MPC52xx_PSC_MODE_6_BITS: *bits = 6; break; case MPC52xx_PSC_MODE_7_BITS: *bits = 7; break; case MPC52xx_PSC_MODE_8_BITS: default: *bits = 8; } if (mr1 & MPC52xx_PSC_MODE_PARNONE) *parity = 'n'; else *parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e'; } static void mpc52xx_console_write(struct console *co, const char *s, unsigned int count) { struct uart_port *port = &mpc52xx_uart_ports[co->index]; unsigned int i, j; /* Disable interrupts */ psc_ops->cw_disable_ints(port); /* Wait the TX buffer to be empty */ j = 5000000; /* Maximum wait */ while (!mpc52xx_uart_tx_empty(port) && --j) udelay(1); /* Write all the chars */ for (i = 0; i < count; i++, s++) { /* Line return handling */ if (*s == '\n') psc_ops->write_char(port, '\r'); /* Send the char */ psc_ops->write_char(port, *s); /* Wait the TX buffer to be empty */ j = 20000; /* Maximum wait */ while (!mpc52xx_uart_tx_empty(port) && --j) udelay(1); } /* Restore interrupt state */ psc_ops->cw_restore_ints(port); } static int __init mpc52xx_console_setup(struct console *co, char *options) { struct uart_port *port = &mpc52xx_uart_ports[co->index]; struct device_node *np = mpc52xx_uart_nodes[co->index]; unsigned int uartclk; struct resource res; int ret; int baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD; int bits = 8; int parity = 'n'; int flow = 'n'; pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n", co, co->index, options); if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) { pr_debug("PSC%x out of range\n", co->index); return -EINVAL; } if (!np) { pr_debug("PSC%x not found in device tree\n", co->index); return -EINVAL; } pr_debug("Console on ttyPSC%x is %s\n", co->index, mpc52xx_uart_nodes[co->index]->full_name); /* Fetch register locations */ ret = of_address_to_resource(np, 0, &res); if (ret) { pr_debug("Could not get resources for PSC%x\n", co->index); return ret; } uartclk = mpc5xxx_get_bus_frequency(np); if (uartclk == 0) { pr_debug("Could not find uart clock frequency!\n"); return -EINVAL; } /* Basic port init. Needed since we use some uart_??? func before * real init for early access */ spin_lock_init(&port->lock); port->uartclk = uartclk; port->ops = &mpc52xx_uart_ops; port->mapbase = res.start; port->membase = ioremap(res.start, sizeof(struct mpc52xx_psc)); port->irq = irq_of_parse_and_map(np, 0); if (port->membase == NULL) return -EINVAL; pr_debug("mpc52xx-psc uart at %p, mapped to %p, irq=%x, freq=%i\n", (void *)port->mapbase, port->membase, port->irq, port->uartclk); /* Setup the port parameters accoding to options */ if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow); pr_debug("Setting console parameters: %i %i%c1 flow=%c\n", baud, bits, parity, flow); return uart_set_options(port, co, baud, parity, bits, flow); } static struct uart_driver mpc52xx_uart_driver; static struct console mpc52xx_console = { .name = "ttyPSC", .write = mpc52xx_console_write, .device = uart_console_device, .setup = mpc52xx_console_setup, .flags = CON_PRINTBUFFER, .index = -1, /* Specified on the cmdline (e.g. console=ttyPSC0) */ .data = &mpc52xx_uart_driver, }; static int __init mpc52xx_console_init(void) { mpc52xx_uart_of_enumerate(); register_console(&mpc52xx_console); return 0; } console_initcall(mpc52xx_console_init); #define MPC52xx_PSC_CONSOLE &mpc52xx_console #else #define MPC52xx_PSC_CONSOLE NULL #endif /* ======================================================================== */ /* UART Driver */ /* ======================================================================== */ static struct uart_driver mpc52xx_uart_driver = { .driver_name = "mpc52xx_psc_uart", .dev_name = "ttyPSC", .major = SERIAL_PSC_MAJOR, .minor = SERIAL_PSC_MINOR, .nr = MPC52xx_PSC_MAXNUM, .cons = MPC52xx_PSC_CONSOLE, }; /* ======================================================================== */ /* OF Platform Driver */ /* ======================================================================== */ static struct of_device_id mpc52xx_uart_of_match[] = { #ifdef CONFIG_PPC_MPC52xx { .compatible = "fsl,mpc5200b-psc-uart", .data = &mpc5200b_psc_ops, }, { .compatible = "fsl,mpc5200-psc-uart", .data = &mpc52xx_psc_ops, }, /* binding used by old lite5200 device trees: */ { .compatible = "mpc5200-psc-uart", .data = &mpc52xx_psc_ops, }, /* binding used by efika: */ { .compatible = "mpc5200-serial", .data = &mpc52xx_psc_ops, }, #endif #ifdef CONFIG_PPC_MPC512x { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, }, #endif {}, }; static int __devinit mpc52xx_uart_of_probe(struct platform_device *op, const struct of_device_id *match) { int idx = -1; unsigned int uartclk; struct uart_port *port = NULL; struct resource res; int ret; dev_dbg(&op->dev, "mpc52xx_uart_probe(op=%p, match=%p)\n", op, match); /* Check validity & presence */ for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++) if (mpc52xx_uart_nodes[idx] == op->dev.of_node) break; if (idx >= MPC52xx_PSC_MAXNUM) return -EINVAL; pr_debug("Found %s assigned to ttyPSC%x\n", mpc52xx_uart_nodes[idx]->full_name, idx); /* set the uart clock to the input clock of the psc, the different * prescalers are taken into account in the set_baudrate() methods * of the respective chip */ uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node); if (uartclk == 0) { dev_dbg(&op->dev, "Could not find uart clock frequency!\n"); return -EINVAL; } /* Init the port structure */ port = &mpc52xx_uart_ports[idx]; spin_lock_init(&port->lock); port->uartclk = uartclk; port->fifosize = 512; port->iotype = UPIO_MEM; port->flags = UPF_BOOT_AUTOCONF | (uart_console(port) ? 0 : UPF_IOREMAP); port->line = idx; port->ops = &mpc52xx_uart_ops; port->dev = &op->dev; /* Search for IRQ and mapbase */ ret = of_address_to_resource(op->dev.of_node, 0, &res); if (ret) return ret; port->mapbase = res.start; if (!port->mapbase) { dev_dbg(&op->dev, "Could not allocate resources for PSC\n"); return -EINVAL; } psc_ops->get_irq(port, op->dev.of_node); if (port->irq == NO_IRQ) { dev_dbg(&op->dev, "Could not get irq\n"); return -EINVAL; } dev_dbg(&op->dev, "mpc52xx-psc uart at %p, irq=%x, freq=%i\n", (void *)port->mapbase, port->irq, port->uartclk); /* Add the port to the uart sub-system */ ret = uart_add_one_port(&mpc52xx_uart_driver, port); if (ret) return ret; dev_set_drvdata(&op->dev, (void *)port); return 0; } static int mpc52xx_uart_of_remove(struct platform_device *op) { struct uart_port *port = dev_get_drvdata(&op->dev); dev_set_drvdata(&op->dev, NULL); if (port) uart_remove_one_port(&mpc52xx_uart_driver, port); return 0; } #ifdef CONFIG_PM static int mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state) { struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev); if (port) uart_suspend_port(&mpc52xx_uart_driver, port); return 0; } static int mpc52xx_uart_of_resume(struct platform_device *op) { struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev); if (port) uart_resume_port(&mpc52xx_uart_driver, port); return 0; } #endif static void mpc52xx_uart_of_assign(struct device_node *np) { int i; /* Find the first free PSC number */ for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) { if (mpc52xx_uart_nodes[i] == NULL) { of_node_get(np); mpc52xx_uart_nodes[i] = np; return; } } } static void mpc52xx_uart_of_enumerate(void) { static int enum_done; struct device_node *np; const struct of_device_id *match; int i; if (enum_done) return; /* Assign index to each PSC in device tree */ for_each_matching_node(np, mpc52xx_uart_of_match) { match = of_match_node(mpc52xx_uart_of_match, np); psc_ops = match->data; mpc52xx_uart_of_assign(np); } enum_done = 1; for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) { if (mpc52xx_uart_nodes[i]) pr_debug("%s assigned to ttyPSC%x\n", mpc52xx_uart_nodes[i]->full_name, i); } } MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match); static struct of_platform_driver mpc52xx_uart_of_driver = { .probe = mpc52xx_uart_of_probe, .remove = mpc52xx_uart_of_remove, #ifdef CONFIG_PM .suspend = mpc52xx_uart_of_suspend, .resume = mpc52xx_uart_of_resume, #endif .driver = { .name = "mpc52xx-psc-uart", .owner = THIS_MODULE, .of_match_table = mpc52xx_uart_of_match, }, }; /* ======================================================================== */ /* Module */ /* ======================================================================== */ static int __init mpc52xx_uart_init(void) { int ret; printk(KERN_INFO "Serial: MPC52xx PSC UART driver\n"); ret = uart_register_driver(&mpc52xx_uart_driver); if (ret) { printk(KERN_ERR "%s: uart_register_driver failed (%i)\n", __FILE__, ret); return ret; } mpc52xx_uart_of_enumerate(); /* * Map the PSC FIFO Controller and init if on MPC512x. */ if (psc_ops && psc_ops->fifoc_init) { ret = psc_ops->fifoc_init(); if (ret) return ret; } ret = of_register_platform_driver(&mpc52xx_uart_of_driver); if (ret) { printk(KERN_ERR "%s: of_register_platform_driver failed (%i)\n", __FILE__, ret); uart_unregister_driver(&mpc52xx_uart_driver); return ret; } return 0; } static void __exit mpc52xx_uart_exit(void) { if (psc_ops->fifoc_uninit) psc_ops->fifoc_uninit(); of_unregister_platform_driver(&mpc52xx_uart_of_driver); uart_unregister_driver(&mpc52xx_uart_driver); } module_init(mpc52xx_uart_init); module_exit(mpc52xx_uart_exit); MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>"); MODULE_DESCRIPTION("Freescale MPC52xx PSC UART"); MODULE_LICENSE("GPL");
gpl-2.0
dh-harald/i5700-kernel
drivers/usb/core/urb.c
143
27056
#include <linux/module.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/log2.h> #include <linux/usb.h> #include <linux/wait.h> #include "hcd.h" #define to_urb(d) container_of(d, struct urb, kref) static void urb_destroy(struct kref *kref) { struct urb *urb = to_urb(kref); if (urb->transfer_flags & URB_FREE_BUFFER) kfree(urb->transfer_buffer); kfree(urb); } /** * usb_init_urb - initializes a urb so that it can be used by a USB driver * @urb: pointer to the urb to initialize * * Initializes a urb so that the USB subsystem can use it properly. * * If a urb is created with a call to usb_alloc_urb() it is not * necessary to call this function. Only use this if you allocate the * space for a struct urb on your own. If you call this function, be * careful when freeing the memory for your urb that it is no longer in * use by the USB core. * * Only use this function if you _really_ understand what you are doing. */ void usb_init_urb(struct urb *urb) { if (urb) { memset(urb, 0, sizeof(*urb)); kref_init(&urb->kref); INIT_LIST_HEAD(&urb->anchor_list); } } EXPORT_SYMBOL_GPL(usb_init_urb); /** * usb_alloc_urb - creates a new urb for a USB driver to use * @iso_packets: number of iso packets for this urb * @mem_flags: the type of memory to allocate, see kmalloc() for a list of * valid options for this. * * Creates an urb for the USB driver to use, initializes a few internal * structures, incrementes the usage counter, and returns a pointer to it. * * If no memory is available, NULL is returned. * * If the driver want to use this urb for interrupt, control, or bulk * endpoints, pass '0' as the number of iso packets. * * The driver must call usb_free_urb() when it is finished with the urb. */ struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) { struct urb *urb; urb = kmalloc(sizeof(struct urb) + iso_packets * sizeof(struct usb_iso_packet_descriptor), mem_flags); if (!urb) { printk(KERN_ERR "alloc_urb: kmalloc failed\n"); return NULL; } usb_init_urb(urb); return urb; } EXPORT_SYMBOL_GPL(usb_alloc_urb); /** * usb_free_urb - frees the memory used by a urb when all users of it are finished * @urb: pointer to the urb to free, may be NULL * * Must be called when a user of a urb is finished with it. When the last user * of the urb calls this function, the memory of the urb is freed. * * Note: The transfer buffer associated with the urb is not freed unless the * URB_FREE_BUFFER transfer flag is set. */ void usb_free_urb(struct urb *urb) { if (urb) kref_put(&urb->kref, urb_destroy); } EXPORT_SYMBOL_GPL(usb_free_urb); /** * usb_get_urb - increments the reference count of the urb * @urb: pointer to the urb to modify, may be NULL * * This must be called whenever a urb is transferred from a device driver to a * host controller driver. This allows proper reference counting to happen * for urbs. * * A pointer to the urb with the incremented reference counter is returned. */ struct urb *usb_get_urb(struct urb *urb) { if (urb) kref_get(&urb->kref); return urb; } EXPORT_SYMBOL_GPL(usb_get_urb); /** * usb_anchor_urb - anchors an URB while it is processed * @urb: pointer to the urb to anchor * @anchor: pointer to the anchor * * This can be called to have access to URBs which are to be executed * without bothering to track them */ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor) { unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); usb_get_urb(urb); list_add_tail(&urb->anchor_list, &anchor->urb_list); urb->anchor = anchor; if (unlikely(anchor->poisoned)) { atomic_inc(&urb->reject); } spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_anchor_urb); /** * usb_unanchor_urb - unanchors an URB * @urb: pointer to the urb to anchor * * Call this to stop the system keeping track of this URB */ void usb_unanchor_urb(struct urb *urb) { unsigned long flags; struct usb_anchor *anchor; if (!urb) return; anchor = urb->anchor; if (!anchor) return; spin_lock_irqsave(&anchor->lock, flags); if (unlikely(anchor != urb->anchor)) { /* we've lost the race to another thread */ spin_unlock_irqrestore(&anchor->lock, flags); return; } urb->anchor = NULL; list_del(&urb->anchor_list); spin_unlock_irqrestore(&anchor->lock, flags); usb_put_urb(urb); if (list_empty(&anchor->urb_list)) wake_up(&anchor->wait); } EXPORT_SYMBOL_GPL(usb_unanchor_urb); /*-------------------------------------------------------------------*/ /** * usb_submit_urb - issue an asynchronous transfer request for an endpoint * @urb: pointer to the urb describing the request * @mem_flags: the type of memory to allocate, see kmalloc() for a list * of valid options for this. * * This submits a transfer request, and transfers control of the URB * describing that request to the USB subsystem. Request completion will * be indicated later, asynchronously, by calling the completion handler. * The three types of completion are success, error, and unlink * (a software-induced fault, also called "request cancellation"). * * URBs may be submitted in interrupt context. * * The caller must have correctly initialized the URB before submitting * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are * available to ensure that most fields are correctly initialized, for * the particular kind of transfer, although they will not initialize * any transfer flags. * * Successful submissions return 0; otherwise this routine returns a * negative error number. If the submission is successful, the complete() * callback from the URB will be called exactly once, when the USB core and * Host Controller Driver (HCD) are finished with the URB. When the completion * function is called, control of the URB is returned to the device * driver which issued the request. The completion handler may then * immediately free or reuse that URB. * * With few exceptions, USB device drivers should never access URB fields * provided by usbcore or the HCD until its complete() is called. * The exceptions relate to periodic transfer scheduling. For both * interrupt and isochronous urbs, as part of successful URB submission * urb->interval is modified to reflect the actual transfer period used * (normally some power of two units). And for isochronous urbs, * urb->start_frame is modified to reflect when the URB's transfers were * scheduled to start. Not all isochronous transfer scheduling policies * will work, but most host controller drivers should easily handle ISO * queues going from now until 10-200 msec into the future. * * For control endpoints, the synchronous usb_control_msg() call is * often used (in non-interrupt context) instead of this call. * That is often used through convenience wrappers, for the requests * that are standardized in the USB 2.0 specification. For bulk * endpoints, a synchronous usb_bulk_msg() call is available. * * Request Queuing: * * URBs may be submitted to endpoints before previous ones complete, to * minimize the impact of interrupt latencies and system overhead on data * throughput. With that queuing policy, an endpoint's queue would never * be empty. This is required for continuous isochronous data streams, * and may also be required for some kinds of interrupt transfers. Such * queuing also maximizes bandwidth utilization by letting USB controllers * start work on later requests before driver software has finished the * completion processing for earlier (successful) requests. * * As of Linux 2.6, all USB endpoint transfer queues support depths greater * than one. This was previously a HCD-specific behavior, except for ISO * transfers. Non-isochronous endpoint queues are inactive during cleanup * after faults (transfer errors or cancellation). * * Reserved Bandwidth Transfers: * * Periodic transfers (interrupt or isochronous) are performed repeatedly, * using the interval specified in the urb. Submitting the first urb to * the endpoint reserves the bandwidth necessary to make those transfers. * If the USB subsystem can't allocate sufficient bandwidth to perform * the periodic request, submitting such a periodic request should fail. * * Device drivers must explicitly request that repetition, by ensuring that * some URB is always on the endpoint's queue (except possibly for short * periods during completion callacks). When there is no longer an urb * queued, the endpoint's bandwidth reservation is canceled. This means * drivers can use their completion handlers to ensure they keep bandwidth * they need, by reinitializing and resubmitting the just-completed urb * until the driver longer needs that periodic bandwidth. * * Memory Flags: * * The general rules for how to decide which mem_flags to use * are the same as for kmalloc. There are four * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and * GFP_ATOMIC. * * GFP_NOFS is not ever used, as it has not been implemented yet. * * GFP_ATOMIC is used when * (a) you are inside a completion handler, an interrupt, bottom half, * tasklet or timer, or * (b) you are holding a spinlock or rwlock (does not apply to * semaphores), or * (c) current->state != TASK_RUNNING, this is the case only after * you've changed it. * * GFP_NOIO is used in the block io path and error handling of storage * devices. * * All other situations use GFP_KERNEL. * * Some more specific rules for mem_flags can be inferred, such as * (1) start_xmit, timeout, and receive methods of network drivers must * use GFP_ATOMIC (they are called with a spinlock held); * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also * called with a spinlock held); * (3) If you use a kernel thread with a network driver you must use * GFP_NOIO, unless (b) or (c) apply; * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) * apply or your are in a storage driver's block io path; * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and * (6) changing firmware on a running storage or net device uses * GFP_NOIO, unless b) or c) apply * */ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) { int xfertype, max; struct usb_device *dev; struct usb_host_endpoint *ep; int is_out; if (!urb || urb->hcpriv || !urb->complete) return -EINVAL; dev = urb->dev; if ((!dev) || (dev->state < USB_STATE_DEFAULT)) return -ENODEV; /* For now, get the endpoint from the pipe. Eventually drivers * will be required to set urb->ep directly and we will eliminate * urb->pipe. */ ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out) [usb_pipeendpoint(urb->pipe)]; if (!ep) return -ENOENT; urb->ep = ep; urb->status = -EINPROGRESS; urb->actual_length = 0; /* Lots of sanity checks, so HCDs can rely on clean data * and don't need to duplicate tests */ xfertype = usb_endpoint_type(&ep->desc); if (xfertype == USB_ENDPOINT_XFER_CONTROL) { struct usb_ctrlrequest *setup = (struct usb_ctrlrequest *) urb->setup_packet; if (!setup) return -ENOEXEC; is_out = !(setup->bRequestType & USB_DIR_IN) || !setup->wLength; } else { is_out = usb_endpoint_dir_out(&ep->desc); } /* Cache the direction for later use */ urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) | (is_out ? URB_DIR_OUT : URB_DIR_IN); if (xfertype != USB_ENDPOINT_XFER_CONTROL && dev->state < USB_STATE_CONFIGURED) return -ENODEV; max = le16_to_cpu(ep->desc.wMaxPacketSize); if (max <= 0) { dev_dbg(&dev->dev, "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", usb_endpoint_num(&ep->desc), is_out ? "out" : "in", __func__, max); return -EMSGSIZE; } /* periodic transfers limit size per frame/uframe, * but drivers only control those sizes for ISO. * while we're checking, initialize return status. */ if (xfertype == USB_ENDPOINT_XFER_ISOC) { int n, len; /* "high bandwidth" mode, 1-3 packets/uframe? */ if (dev->speed == USB_SPEED_HIGH) { int mult = 1 + ((max >> 11) & 0x03); max &= 0x07ff; max *= mult; } if (urb->number_of_packets <= 0) return -EINVAL; for (n = 0; n < urb->number_of_packets; n++) { len = urb->iso_frame_desc[n].length; if (len < 0 || len > max) return -EMSGSIZE; urb->iso_frame_desc[n].status = -EXDEV; urb->iso_frame_desc[n].actual_length = 0; } } /* the I/O buffer must be mapped/unmapped, except when length=0 */ if (urb->transfer_buffer_length < 0) return -EMSGSIZE; #ifdef DEBUG /* stuff that drivers shouldn't do, but which shouldn't * cause problems in HCDs if they get it wrong. */ { unsigned int orig_flags = urb->transfer_flags; unsigned int allowed; /* enforce simple/standard policy */ allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK | URB_FREE_BUFFER); switch (xfertype) { case USB_ENDPOINT_XFER_BULK: if (is_out) allowed |= URB_ZERO_PACKET; /* FALLTHROUGH */ case USB_ENDPOINT_XFER_CONTROL: allowed |= URB_NO_FSBR; /* only affects UHCI */ /* FALLTHROUGH */ default: /* all non-iso endpoints */ if (!is_out) allowed |= URB_SHORT_NOT_OK; break; case USB_ENDPOINT_XFER_ISOC: allowed |= URB_ISO_ASAP; break; } urb->transfer_flags &= allowed; /* fail if submitter gave bogus flags */ if (urb->transfer_flags != orig_flags) { dev_err(&dev->dev, "BOGUS urb flags, %x --> %x\n", orig_flags, urb->transfer_flags); return -EINVAL; } } #endif /* * Force periodic transfer intervals to be legal values that are * a power of two (so HCDs don't need to). * * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC * supports different values... this uses EHCI/UHCI defaults (and * EHCI can use smaller non-default values). */ switch (xfertype) { case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: /* too small? */ if (urb->interval <= 0) return -EINVAL; /* too big? */ switch (dev->speed) { case USB_SPEED_HIGH: /* units are microframes */ /* NOTE usb handles 2^15 */ if (urb->interval > (1024 * 8)) urb->interval = 1024 * 8; max = 1024 * 8; break; case USB_SPEED_FULL: /* units are frames/msec */ case USB_SPEED_LOW: if (xfertype == USB_ENDPOINT_XFER_INT) { if (urb->interval > 255) return -EINVAL; /* NOTE ohci only handles up to 32 */ max = 128; } else { if (urb->interval > 1024) urb->interval = 1024; /* NOTE usb and ohci handle up to 2^15 */ max = 1024; } break; default: return -EINVAL; } /* Round down to a power of 2, no more than max */ urb->interval = min(max, 1 << ilog2(urb->interval)); } return usb_hcd_submit_urb(urb, mem_flags); } EXPORT_SYMBOL_GPL(usb_submit_urb); /*-------------------------------------------------------------------*/ /** * usb_unlink_urb - abort/cancel a transfer request for an endpoint * @urb: pointer to urb describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. URBs complete only once * per submission, and may be canceled only once per submission. * Successful cancellation means termination of @urb will be expedited * and the completion handler will be called with a status code * indicating that the request has been canceled (rather than any other * code). * * Drivers should not call this routine or related routines, such as * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect * method has returned. The disconnect function should synchronize with * a driver's I/O routines to insure that all URB-related activity has * completed before it returns. * * This request is always asynchronous. Success is indicated by * returning -EINPROGRESS, at which time the URB will probably not yet * have been given back to the device driver. When it is eventually * called, the completion function will see @urb->status == -ECONNRESET. * Failure is indicated by usb_unlink_urb() returning any other value. * Unlinking will fail when @urb is not currently "linked" (i.e., it was * never submitted, or it was unlinked before, or the hardware is already * finished with it), even if the completion handler has not yet run. * * Unlinking and Endpoint Queues: * * [The behaviors and guarantees described below do not apply to virtual * root hubs but only to endpoint queues for physical USB devices.] * * Host Controller Drivers (HCDs) place all the URBs for a particular * endpoint in a queue. Normally the queue advances as the controller * hardware processes each request. But when an URB terminates with an * error its queue generally stops (see below), at least until that URB's * completion routine returns. It is guaranteed that a stopped queue * will not restart until all its unlinked URBs have been fully retired, * with their completion routines run, even if that's not until some time * after the original completion handler returns. The same behavior and * guarantee apply when an URB terminates because it was unlinked. * * Bulk and interrupt endpoint queues are guaranteed to stop whenever an * URB terminates with any sort of error, including -ECONNRESET, -ENOENT, * and -EREMOTEIO. Control endpoint queues behave the same way except * that they are not guaranteed to stop for -EREMOTEIO errors. Queues * for isochronous endpoints are treated differently, because they must * advance at fixed rates. Such queues do not stop when an URB * encounters an error or is unlinked. An unlinked isochronous URB may * leave a gap in the stream of packets; it is undefined whether such * gaps can be filled in. * * Note that early termination of an URB because a short packet was * received will generate a -EREMOTEIO error if and only if the * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device * drivers can build deep queues for large or complex bulk transfers * and clean them up reliably after any sort of aborted transfer by * unlinking all pending URBs at the first fault. * * When a control URB terminates with an error other than -EREMOTEIO, it * is quite likely that the status stage of the transfer will not take * place. */ int usb_unlink_urb(struct urb *urb) { if (!urb) return -EINVAL; if (!urb->dev) return -ENODEV; if (!urb->ep) return -EIDRM; return usb_hcd_unlink_urb(urb, -ECONNRESET); } EXPORT_SYMBOL_GPL(usb_unlink_urb); /** * usb_kill_urb - cancel a transfer request and wait for it to finish * @urb: pointer to URB describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. It is guaranteed that * upon return all completion handlers will have finished and the URB * will be totally idle and available for reuse. These features make * this an ideal way to stop I/O in a disconnect() callback or close() * function. If the request has not already finished or been unlinked * the completion handler will see urb->status == -ENOENT. * * While the routine is running, attempts to resubmit the URB will fail * with error -EPERM. Thus even if the URB's completion handler always * tries to resubmit, it will not succeed and the URB will become idle. * * This routine may not be used in an interrupt context (such as a bottom * half or a completion handler), or when holding a spinlock, or in other * situations where the caller can't schedule(). * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_kill_urb(struct urb *urb) { might_sleep(); if (!(urb && urb->dev && urb->ep)) return; atomic_inc(&urb->reject); usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); atomic_dec(&urb->reject); } EXPORT_SYMBOL_GPL(usb_kill_urb); /** * usb_poison_urb - reliably kill a transfer and prevent further use of an URB * @urb: pointer to URB describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. It is guaranteed that * upon return all completion handlers will have finished and the URB * will be totally idle and cannot be reused. These features make * this an ideal way to stop I/O in a disconnect() callback. * If the request has not already finished or been unlinked * the completion handler will see urb->status == -ENOENT. * * After and while the routine runs, attempts to resubmit the URB will fail * with error -EPERM. Thus even if the URB's completion handler always * tries to resubmit, it will not succeed and the URB will become idle. * * This routine may not be used in an interrupt context (such as a bottom * half or a completion handler), or when holding a spinlock, or in other * situations where the caller can't schedule(). * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_poison_urb(struct urb *urb) { might_sleep(); if (!(urb && urb->dev && urb->ep)) return; atomic_inc(&urb->reject); usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); } EXPORT_SYMBOL_GPL(usb_poison_urb); void usb_unpoison_urb(struct urb *urb) { if (!urb) return; atomic_dec(&urb->reject); } EXPORT_SYMBOL_GPL(usb_unpoison_urb); /** * usb_kill_anchored_urbs - cancel transfer requests en masse * @anchor: anchor the requests are bound to * * this allows all outstanding URBs to be killed starting * from the back of the queue * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_kill_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; spin_lock_irq(&anchor->lock); while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); /* we must make sure the URB isn't freed before we kill it*/ usb_get_urb(victim); spin_unlock_irq(&anchor->lock); /* this will unanchor the URB */ usb_kill_urb(victim); usb_put_urb(victim); spin_lock_irq(&anchor->lock); } spin_unlock_irq(&anchor->lock); } EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); /** * usb_poison_anchored_urbs - cease all traffic from an anchor * @anchor: anchor the requests are bound to * * this allows all outstanding URBs to be poisoned starting * from the back of the queue. Newly added URBs will also be * poisoned * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_poison_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; spin_lock_irq(&anchor->lock); anchor->poisoned = 1; while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); /* we must make sure the URB isn't freed before we kill it*/ usb_get_urb(victim); spin_unlock_irq(&anchor->lock); /* this will unanchor the URB */ usb_poison_urb(victim); usb_put_urb(victim); spin_lock_irq(&anchor->lock); } spin_unlock_irq(&anchor->lock); } EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); /** * usb_unpoison_anchored_urbs - let an anchor be used successfully again * @anchor: anchor the requests are bound to * * Reverses the effect of usb_poison_anchored_urbs * the anchor can be used normally after it returns */ void usb_unpoison_anchored_urbs(struct usb_anchor *anchor) { unsigned long flags; struct urb *lazarus; spin_lock_irqsave(&anchor->lock, flags); list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) { usb_unpoison_urb(lazarus); } anchor->poisoned = 0; spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs); /** * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse * @anchor: anchor the requests are bound to * * this allows all outstanding URBs to be unlinked starting * from the back of the queue. This function is asynchronous. * The unlinking is just tiggered. It may happen after this * function has returned. * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_unlink_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); usb_get_urb(victim); spin_unlock_irqrestore(&anchor->lock, flags); /* this will unanchor the URB */ usb_unlink_urb(victim); usb_put_urb(victim); spin_lock_irqsave(&anchor->lock, flags); } spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs); /** * usb_wait_anchor_empty_timeout - wait for an anchor to be unused * @anchor: the anchor you want to become unused * @timeout: how long you are willing to wait in milliseconds * * Call this is you want to be sure all an anchor's * URBs have finished */ int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, unsigned int timeout) { return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list), msecs_to_jiffies(timeout)); } EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout); /** * usb_get_from_anchor - get an anchor's oldest urb * @anchor: the anchor whose urb you want * * this will take the oldest urb from an anchor, * unanchor and return it */ struct urb *usb_get_from_anchor(struct usb_anchor *anchor) { struct urb *victim; unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); if (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.next, struct urb, anchor_list); usb_get_urb(victim); spin_unlock_irqrestore(&anchor->lock, flags); usb_unanchor_urb(victim); } else { spin_unlock_irqrestore(&anchor->lock, flags); victim = NULL; } return victim; } EXPORT_SYMBOL_GPL(usb_get_from_anchor); /** * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs * @anchor: the anchor whose urbs you want to unanchor * * use this to get rid of all an anchor's urbs */ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); usb_get_urb(victim); spin_unlock_irqrestore(&anchor->lock, flags); /* this may free the URB */ usb_unanchor_urb(victim); usb_put_urb(victim); spin_lock_irqsave(&anchor->lock, flags); } spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); /** * usb_anchor_empty - is an anchor empty * @anchor: the anchor you want to query * * returns 1 if the anchor has no urbs associated with it */ int usb_anchor_empty(struct usb_anchor *anchor) { return list_empty(&anchor->urb_list); } EXPORT_SYMBOL_GPL(usb_anchor_empty);
gpl-2.0
BrateloSlava/test-sez-cm-kernel
drivers/staging/iio/magnetometer/inv_compass/inv_yas53x_ring.c
399
4045
/* * Copyright (C) 2012 Invensense, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /** * @addtogroup DRIVERS * @brief Hardware drivers. * * @{ * @file inv_yas53x_ring.c * @brief Invensense implementation for yas530/yas532/yas533. * @details This driver currently works for the yas530/yas532/yas533. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/sysfs.h> #include <linux/jiffies.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/kfifo.h> #include <linux/poll.h> #include <linux/miscdevice.h> #include "iio.h" #include "kfifo_buf.h" #include "trigger_consumer.h" #include "sysfs.h" #include "inv_yas53x_iio.h" static s64 get_time_ns(void) { struct timespec ts; ktime_get_ts(&ts); return timespec_to_ns(&ts); } static int put_scan_to_buf(struct iio_dev *indio_dev, unsigned char *d, short *s, int scan_index) { struct iio_buffer *ring = indio_dev->buffer; int st; int i, d_ind; d_ind = 0; for (i = 0; i < 3; i++) { st = iio_scan_mask_query(indio_dev, ring, scan_index + i); if (st) { memcpy(&d[d_ind], &s[i], sizeof(s[i])); d_ind += sizeof(s[i]); } } return d_ind; } /** * inv_read_yas53x_fifo() - Transfer data from FIFO to ring buffer. */ void inv_read_yas53x_fifo(struct iio_dev *indio_dev) { struct inv_compass_state *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; int d_ind; s32 overunderflow; s8 *tmp; s64 tmp_buf[2]; if (!yas53x_read(st, st->compass_data, &overunderflow)) { tmp = (u8 *)tmp_buf; d_ind = put_scan_to_buf(indio_dev, tmp, st->compass_data, INV_YAS53X_SCAN_MAGN_X); if (ring->scan_timestamp) tmp_buf[(d_ind + 7) / 8] = get_time_ns(); ring->access->store_to(indio_dev->buffer, tmp, 0); if (overunderflow) { yas53x_resume(st); if (!st->overunderflow) st->overunderflow = 1; } } } void inv_yas53x_unconfigure_ring(struct iio_dev *indio_dev) { iio_kfifo_free(indio_dev->buffer); }; static int inv_yas53x_postenable(struct iio_dev *indio_dev) { struct inv_compass_state *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; /* when all the outputs are disabled, even though buffer/enable is on, do nothing */ if (!(iio_scan_mask_query(indio_dev, ring, INV_YAS53X_SCAN_MAGN_X) || iio_scan_mask_query(indio_dev, ring, INV_YAS53X_SCAN_MAGN_Y) || iio_scan_mask_query(indio_dev, ring, INV_YAS53X_SCAN_MAGN_Z))) return 0; set_yas53x_enable(indio_dev, true); schedule_delayed_work(&st->work, msecs_to_jiffies(st->delay)); return 0; } static int inv_yas53x_predisable(struct iio_dev *indio_dev) { struct inv_compass_state *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; cancel_delayed_work_sync(&st->work); clear_bit(INV_YAS53X_SCAN_MAGN_X, ring->scan_mask); clear_bit(INV_YAS53X_SCAN_MAGN_Y, ring->scan_mask); clear_bit(INV_YAS53X_SCAN_MAGN_Z, ring->scan_mask); return 0; } static const struct iio_buffer_setup_ops inv_yas53x_ring_setup_ops = { .preenable = &iio_sw_buffer_preenable, .postenable = &inv_yas53x_postenable, .predisable = &inv_yas53x_predisable, }; int inv_yas53x_configure_ring(struct iio_dev *indio_dev) { int ret = 0; struct iio_buffer *ring; ring = iio_kfifo_allocate(indio_dev); if (!ring) { ret = -ENOMEM; return ret; } indio_dev->buffer = ring; /* setup ring buffer */ ring->scan_timestamp = true; indio_dev->setup_ops = &inv_yas53x_ring_setup_ops; indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; } /** * @} */
gpl-2.0
blueskycoco/linux-2.6.35-s3c2440
drivers/spi/spi_bitbang.c
655
13550
/* * spi_bitbang.c - polling/bitbanging SPI master controller driver utilities * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> /*----------------------------------------------------------------------*/ /* * FIRST PART (OPTIONAL): word-at-a-time spi_transfer support. * Use this for GPIO or shift-register level hardware APIs. * * spi_bitbang_cs is in spi_device->controller_state, which is unavailable * to glue code. These bitbang setup() and cleanup() routines are always * used, though maybe they're called from controller-aware code. * * chipselect() and friends may use use spi_device->controller_data and * controller registers as appropriate. * * * NOTE: SPI controller pins can often be used as GPIO pins instead, * which means you could use a bitbang driver either to get hardware * working quickly, or testing for differences that aren't speed related. */ struct spi_bitbang_cs { unsigned nsecs; /* (clock cycle time)/2 */ u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits); unsigned (*txrx_bufs)(struct spi_device *, u32 (*txrx_word)( struct spi_device *spi, unsigned nsecs, u32 word, u8 bits), unsigned, struct spi_transfer *); }; static unsigned bitbang_txrx_8( struct spi_device *spi, u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits), unsigned ns, struct spi_transfer *t ) { unsigned bits = spi->bits_per_word; unsigned count = t->len; const u8 *tx = t->tx_buf; u8 *rx = t->rx_buf; while (likely(count > 0)) { u8 word = 0; if (tx) word = *tx++; word = txrx_word(spi, ns, word, bits); if (rx) *rx++ = word; count -= 1; } return t->len - count; } static unsigned bitbang_txrx_16( struct spi_device *spi, u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits), unsigned ns, struct spi_transfer *t ) { unsigned bits = spi->bits_per_word; unsigned count = t->len; const u16 *tx = t->tx_buf; u16 *rx = t->rx_buf; while (likely(count > 1)) { u16 word = 0; if (tx) word = *tx++; word = txrx_word(spi, ns, word, bits); if (rx) *rx++ = word; count -= 2; } return t->len - count; } static unsigned bitbang_txrx_32( struct spi_device *spi, u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits), unsigned ns, struct spi_transfer *t ) { unsigned bits = spi->bits_per_word; unsigned count = t->len; const u32 *tx = t->tx_buf; u32 *rx = t->rx_buf; while (likely(count > 3)) { u32 word = 0; if (tx) word = *tx++; word = txrx_word(spi, ns, word, bits); if (rx) *rx++ = word; count -= 4; } return t->len - count; } int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { struct spi_bitbang_cs *cs = spi->controller_state; u8 bits_per_word; u32 hz; if (t) { bits_per_word = t->bits_per_word; hz = t->speed_hz; } else { bits_per_word = 0; hz = 0; } /* spi_transfer level calls that work per-word */ if (!bits_per_word) bits_per_word = spi->bits_per_word; if (bits_per_word <= 8) cs->txrx_bufs = bitbang_txrx_8; else if (bits_per_word <= 16) cs->txrx_bufs = bitbang_txrx_16; else if (bits_per_word <= 32) cs->txrx_bufs = bitbang_txrx_32; else return -EINVAL; /* nsecs = (clock period)/2 */ if (!hz) hz = spi->max_speed_hz; if (hz) { cs->nsecs = (1000000000/2) / hz; if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000)) return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer); /** * spi_bitbang_setup - default setup for per-word I/O loops */ int spi_bitbang_setup(struct spi_device *spi) { struct spi_bitbang_cs *cs = spi->controller_state; struct spi_bitbang *bitbang; int retval; unsigned long flags; bitbang = spi_master_get_devdata(spi->master); if (!cs) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) return -ENOMEM; spi->controller_state = cs; } /* per-word shift register access, in hardware or bitbanging */ cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; if (!cs->txrx_word) return -EINVAL; retval = bitbang->setup_transfer(spi, NULL); if (retval < 0) return retval; dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); /* NOTE we _need_ to call chipselect() early, ideally with adapter * setup, unless the hardware defaults cooperate to avoid confusion * between normal (active low) and inverted chipselects. */ /* deselect chip (low or high) */ spin_lock_irqsave(&bitbang->lock, flags); if (!bitbang->busy) { bitbang->chipselect(spi, BITBANG_CS_INACTIVE); ndelay(cs->nsecs); } spin_unlock_irqrestore(&bitbang->lock, flags); return 0; } EXPORT_SYMBOL_GPL(spi_bitbang_setup); /** * spi_bitbang_cleanup - default cleanup for per-word I/O loops */ void spi_bitbang_cleanup(struct spi_device *spi) { kfree(spi->controller_state); } EXPORT_SYMBOL_GPL(spi_bitbang_cleanup); static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t) { struct spi_bitbang_cs *cs = spi->controller_state; unsigned nsecs = cs->nsecs; return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t); } /*----------------------------------------------------------------------*/ /* * SECOND PART ... simple transfer queue runner. * * This costs a task context per controller, running the queue by * performing each transfer in sequence. Smarter hardware can queue * several DMA transfers at once, and process several controller queues * in parallel; this driver doesn't match such hardware very well. * * Drivers can provide word-at-a-time i/o primitives, or provide * transfer-at-a-time ones to leverage dma or fifo hardware. */ static void bitbang_work(struct work_struct *work) { struct spi_bitbang *bitbang = container_of(work, struct spi_bitbang, work); unsigned long flags; int do_setup = -1; int (*setup_transfer)(struct spi_device *, struct spi_transfer *); setup_transfer = bitbang->setup_transfer; spin_lock_irqsave(&bitbang->lock, flags); bitbang->busy = 1; while (!list_empty(&bitbang->queue)) { struct spi_message *m; struct spi_device *spi; unsigned nsecs; struct spi_transfer *t = NULL; unsigned tmp; unsigned cs_change; int status; m = container_of(bitbang->queue.next, struct spi_message, queue); list_del_init(&m->queue); spin_unlock_irqrestore(&bitbang->lock, flags); /* FIXME this is made-up ... the correct value is known to * word-at-a-time bitbang code, and presumably chipselect() * should enforce these requirements too? */ nsecs = 100; spi = m->spi; tmp = 0; cs_change = 1; status = 0; list_for_each_entry (t, &m->transfers, transfer_list) { /* override speed or wordsize? */ if (t->speed_hz || t->bits_per_word) do_setup = 1; /* init (-1) or override (1) transfer params */ if (do_setup != 0) { if (!setup_transfer) { status = -ENOPROTOOPT; break; } status = setup_transfer(spi, t); if (status < 0) break; } /* set up default clock polarity, and activate chip; * this implicitly updates clock and spi modes as * previously recorded for this device via setup(). * (and also deselects any other chip that might be * selected ...) */ if (cs_change) { bitbang->chipselect(spi, BITBANG_CS_ACTIVE); ndelay(nsecs); } cs_change = t->cs_change; if (!t->tx_buf && !t->rx_buf && t->len) { status = -EINVAL; break; } /* transfer data. the lower level code handles any * new dma mappings it needs. our caller always gave * us dma-safe buffers. */ if (t->len) { /* REVISIT dma API still needs a designated * DMA_ADDR_INVALID; ~0 might be better. */ if (!m->is_dma_mapped) t->rx_dma = t->tx_dma = 0; status = bitbang->txrx_bufs(spi, t); } if (status > 0) m->actual_length += status; if (status != t->len) { /* always report some kind of error */ if (status >= 0) status = -EREMOTEIO; break; } status = 0; /* protocol tweaks before next transfer */ if (t->delay_usecs) udelay(t->delay_usecs); if (!cs_change) continue; if (t->transfer_list.next == &m->transfers) break; /* sometimes a short mid-message deselect of the chip * may be needed to terminate a mode or command */ ndelay(nsecs); bitbang->chipselect(spi, BITBANG_CS_INACTIVE); ndelay(nsecs); } m->status = status; m->complete(m->context); /* restore speed and wordsize if it was overridden */ if (do_setup == 1) setup_transfer(spi, NULL); do_setup = 0; /* normally deactivate chipselect ... unless no error and * cs_change has hinted that the next message will probably * be for this chip too. */ if (!(status == 0 && cs_change)) { ndelay(nsecs); bitbang->chipselect(spi, BITBANG_CS_INACTIVE); ndelay(nsecs); } spin_lock_irqsave(&bitbang->lock, flags); } bitbang->busy = 0; spin_unlock_irqrestore(&bitbang->lock, flags); } /** * spi_bitbang_transfer - default submit to transfer queue */ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m) { struct spi_bitbang *bitbang; unsigned long flags; int status = 0; m->actual_length = 0; m->status = -EINPROGRESS; bitbang = spi_master_get_devdata(spi->master); spin_lock_irqsave(&bitbang->lock, flags); if (!spi->max_speed_hz) status = -ENETDOWN; else { list_add_tail(&m->queue, &bitbang->queue); queue_work(bitbang->workqueue, &bitbang->work); } spin_unlock_irqrestore(&bitbang->lock, flags); return status; } EXPORT_SYMBOL_GPL(spi_bitbang_transfer); /*----------------------------------------------------------------------*/ /** * spi_bitbang_start - start up a polled/bitbanging SPI master driver * @bitbang: driver handle * * Caller should have zero-initialized all parts of the structure, and then * provided callbacks for chip selection and I/O loops. If the master has * a transfer method, its final step should call spi_bitbang_transfer; or, * that's the default if the transfer routine is not initialized. It should * also set up the bus number and number of chipselects. * * For i/o loops, provide callbacks either per-word (for bitbanging, or for * hardware that basically exposes a shift register) or per-spi_transfer * (which takes better advantage of hardware like fifos or DMA engines). * * Drivers using per-word I/O loops should use (or call) spi_bitbang_setup, * spi_bitbang_cleanup and spi_bitbang_setup_transfer to handle those spi * master methods. Those methods are the defaults if the bitbang->txrx_bufs * routine isn't initialized. * * This routine registers the spi_master, which will process requests in a * dedicated task, keeping IRQs unblocked most of the time. To stop * processing those requests, call spi_bitbang_stop(). */ int spi_bitbang_start(struct spi_bitbang *bitbang) { int status; if (!bitbang->master || !bitbang->chipselect) return -EINVAL; INIT_WORK(&bitbang->work, bitbang_work); spin_lock_init(&bitbang->lock); INIT_LIST_HEAD(&bitbang->queue); if (!bitbang->master->mode_bits) bitbang->master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags; if (!bitbang->master->transfer) bitbang->master->transfer = spi_bitbang_transfer; if (!bitbang->txrx_bufs) { bitbang->use_dma = 0; bitbang->txrx_bufs = spi_bitbang_bufs; if (!bitbang->master->setup) { if (!bitbang->setup_transfer) bitbang->setup_transfer = spi_bitbang_setup_transfer; bitbang->master->setup = spi_bitbang_setup; bitbang->master->cleanup = spi_bitbang_cleanup; } } else if (!bitbang->master->setup) return -EINVAL; /* this task is the only thing to touch the SPI bits */ bitbang->busy = 0; bitbang->workqueue = create_singlethread_workqueue( dev_name(bitbang->master->dev.parent)); if (bitbang->workqueue == NULL) { status = -EBUSY; goto err1; } /* driver may get busy before register() returns, especially * if someone registered boardinfo for devices */ status = spi_register_master(bitbang->master); if (status < 0) goto err2; return status; err2: destroy_workqueue(bitbang->workqueue); err1: return status; } EXPORT_SYMBOL_GPL(spi_bitbang_start); /** * spi_bitbang_stop - stops the task providing spi communication */ int spi_bitbang_stop(struct spi_bitbang *bitbang) { spi_unregister_master(bitbang->master); WARN_ON(!list_empty(&bitbang->queue)); destroy_workqueue(bitbang->workqueue); return 0; } EXPORT_SYMBOL_GPL(spi_bitbang_stop); MODULE_LICENSE("GPL");
gpl-2.0
zeeshanhussain/inazuma-msm8916
drivers/net/ethernet/msm/msm_rmnet_smux.c
1167
22951
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * RMNET SMUX Module. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/wakelock.h> #include <linux/if_arp.h> #include <linux/msm_rmnet.h> #include <linux/platform_device.h> #include <linux/smux.h> #include <linux/ip.h> #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> #endif /* Debug message support */ static int msm_rmnet_smux_debug_mask; module_param_named(debug_enable, msm_rmnet_smux_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); #define DEBUG_MASK_LVL0 (1U << 0) #define DEBUG_MASK_LVL1 (1U << 1) #define DEBUG_MASK_LVL2 (1U << 2) #define DBG(m, x...) do { \ if (msm_rmnet_smux_debug_mask & m) \ pr_info(x); \ } while (0) #define DBG0(x...) DBG(DEBUG_MASK_LVL0, x) #define DBG1(x...) DBG(DEBUG_MASK_LVL1, x) #define DBG2(x...) DBG(DEBUG_MASK_LVL2, x) /* Configure device instances */ #define RMNET_SMUX_DEVICE_COUNT (2) /* allow larger frames */ #define RMNET_DATA_LEN 2000 #define DEVICE_ID_INVALID -1 #define DEVICE_INACTIVE 0x00 #define DEVICE_ACTIVE 0x01 #define HEADROOM_FOR_SMUX 8 /* for mux header */ #define HEADROOM_FOR_QOS 8 #define TAILROOM 8 /* for padding by mux layer */ struct rmnet_private { struct net_device_stats stats; uint32_t ch_id; #ifdef CONFIG_MSM_RMNET_DEBUG ktime_t last_packet; unsigned long wakeups_xmit; unsigned long wakeups_rcv; unsigned long timeout_us; #endif spinlock_t lock; spinlock_t tx_queue_lock; struct tasklet_struct tsklt; /* IOCTL specified mode (protocol, QoS header) */ u32 operation_mode; uint8_t device_state; uint8_t in_reset; }; static struct net_device *netdevs[RMNET_SMUX_DEVICE_COUNT]; #ifdef CONFIG_MSM_RMNET_DEBUG static unsigned long timeout_us; #ifdef CONFIG_HAS_EARLYSUSPEND /* * If early suspend is enabled then we specify two timeout values, * screen on (default), and screen is off. */ static unsigned long timeout_suspend_us; static struct device *rmnet0; /* Set timeout in us when the screen is off. */ static ssize_t timeout_suspend_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n) { timeout_suspend_us = strict_strtoul(buf, NULL, 10); return n; } static ssize_t timeout_suspend_show(struct device *d, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long) timeout_suspend_us); } static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show, timeout_suspend_store); static void rmnet_early_suspend(struct early_suspend *handler) { if (rmnet0) { struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0)); p->timeout_us = timeout_suspend_us; } } static void rmnet_late_resume(struct early_suspend *handler) { if (rmnet0) { struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0)); p->timeout_us = timeout_us; } } static struct early_suspend rmnet_power_suspend = { .suspend = rmnet_early_suspend, .resume = rmnet_late_resume, }; static int __init rmnet_late_init(void) { register_early_suspend(&rmnet_power_suspend); return 0; } late_initcall(rmnet_late_init); #endif /* CONFIG_HAS_EARLYSUSPEND */ /* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */ static int rmnet_cause_wakeup(struct rmnet_private *p) { int ret = 0; ktime_t now; if (p->timeout_us == 0) /* Check if disabled */ return 0; /* Use real (wall) time. */ now = ktime_get_real(); if (ktime_us_delta(now, p->last_packet) > p->timeout_us) ret = 1; p->last_packet = now; return ret; } static ssize_t wakeups_xmit_show(struct device *d, struct device_attribute *attr, char *buf) { struct rmnet_private *p = netdev_priv(to_net_dev(d)); return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_xmit); } DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL); static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr, char *buf) { struct rmnet_private *p = netdev_priv(to_net_dev(d)); return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_rcv); } DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL); /* Set timeout in us. */ static ssize_t timeout_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n) { #ifndef CONFIG_HAS_EARLYSUSPEND struct rmnet_private *p = netdev_priv(to_net_dev(d)); p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10); #else /* If using early suspend/resume hooks do not write the value on store. */ timeout_us = strict_strtoul(buf, NULL, 10); #endif /* CONFIG_HAS_EARLYSUSPEND */ return n; } static ssize_t timeout_show(struct device *d, struct device_attribute *attr, char *buf) { struct rmnet_private *p = netdev_priv(to_net_dev(d)); p = netdev_priv(to_net_dev(d)); return snprintf(buf, PAGE_SIZE, "%lu\n", timeout_us); } DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store); #endif /* CONFIG_MSM_RMNET_DEBUG */ /* Forward declaration */ static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static int count_this_packet(void *_hdr, int len) { struct ethhdr *hdr = _hdr; if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP)) return 0; return 1; } static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev) { __be16 protocol = 0; skb->dev = dev; /* Determine L3 protocol */ switch (skb->data[0] & 0xf0) { case 0x40: protocol = htons(ETH_P_IP); break; case 0x60: protocol = htons(ETH_P_IPV6); break; default: pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x", dev->name, skb->data[0] & 0xf0); /* skb will be dropped in upper layer for unknown protocol */ } return protocol; } static void smux_read_done(void *rcv_dev, const void *meta_data) { struct rmnet_private *p; struct net_device *dev = rcv_dev; u32 opmode; unsigned long flags; struct sk_buff *skb = NULL; const struct smux_meta_read *read_meta_info = meta_data; if (!dev || !read_meta_info) { DBG1("%s:invalid read_done callback recieved", __func__); return; } p = netdev_priv(dev); skb = (struct sk_buff *) read_meta_info->pkt_priv; if (!skb || skb->dev != dev) { DBG1("%s: ERR:skb pointer NULL in READ_DONE CALLBACK", __func__); return; } /* Handle Rx frame format */ spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_IP(opmode)) { /* Driver in IP mode */ skb->protocol = rmnet_ip_type_trans(skb, dev); } else { /* Driver in Ethernet mode */ skb->protocol = eth_type_trans(skb, dev); } if (RMNET_IS_MODE_IP(opmode) || count_this_packet(skb->data, skb->len)) { #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += rmnet_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } DBG2("[%s] Rx packet #%lu len=%d\n", dev->name, p->stats.rx_packets, skb->len); /* Deliver to network stack */ netif_rx(skb); return; } static void smux_write_done(void *dev, const void *meta_data) { struct rmnet_private *p = netdev_priv(dev); u32 opmode; struct sk_buff *skb = NULL; const struct smux_meta_write *write_meta_info = meta_data; unsigned long flags; if (!dev || !write_meta_info) { DBG1("%s: ERR:invalid WRITE_DONE callback recieved", __func__); return; } skb = (struct sk_buff *) write_meta_info->pkt_priv; if (!skb) { DBG1("%s: ERR:skb pointer NULL in WRITE_DONE" " CALLBACK", __func__); return; } spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); DBG1("%s: write complete\n", __func__); if (RMNET_IS_MODE_IP(opmode) || count_this_packet(skb->data, skb->len)) { p->stats.tx_packets++; p->stats.tx_bytes += skb->len; #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_xmit += rmnet_cause_wakeup(p); #endif } DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n", ((struct net_device *)(dev))->name, p->stats.tx_packets, skb->len, skb->mark); dev_kfree_skb_any(skb); spin_lock_irqsave(&p->tx_queue_lock, flags); if (netif_queue_stopped(dev) && msm_smux_is_ch_low(p->ch_id)) { DBG0("%s: Low WM hit, waking queue=%p\n", __func__, skb); netif_wake_queue(dev); } spin_unlock_irqrestore(&p->tx_queue_lock, flags); } void rmnet_smux_notify(void *priv, int event_type, const void *metadata) { struct rmnet_private *p; struct net_device *dev; unsigned long flags; struct sk_buff *skb = NULL; u32 opmode; const struct smux_meta_disconnected *ssr_info; const struct smux_meta_read *read_meta_info; const struct smux_meta_write *write_meta_info = metadata; if (!priv) DBG0("%s: priv(cookie) NULL, ignoring notification:" " %d\n", __func__, event_type); switch (event_type) { case SMUX_CONNECTED: p = netdev_priv(priv); dev = priv; DBG0("[%s] SMUX_CONNECTED event dev:%s\n", __func__, dev->name); netif_carrier_on(dev); netif_start_queue(dev); spin_lock_irqsave(&p->lock, flags); p->device_state = DEVICE_ACTIVE; spin_unlock_irqrestore(&p->lock, flags); break; case SMUX_DISCONNECTED: p = netdev_priv(priv); dev = priv; ssr_info = metadata; DBG0("[%s] SMUX_DISCONNECTED event dev:%s\n", __func__, dev->name); if (ssr_info && ssr_info->is_ssr == 1) DBG0("SSR detected on :%s\n", dev->name); netif_carrier_off(dev); netif_stop_queue(dev); spin_lock_irqsave(&p->lock, flags); p->device_state = DEVICE_INACTIVE; spin_unlock_irqrestore(&p->lock, flags); break; case SMUX_READ_DONE: smux_read_done(priv, metadata); break; case SMUX_READ_FAIL: p = netdev_priv(priv); dev = priv; read_meta_info = metadata; if (!dev || !read_meta_info) { DBG1("%s: ERR:invalid read failed callback" " recieved", __func__); return; } skb = (struct sk_buff *) read_meta_info->pkt_priv; if (!skb) { DBG1("%s: ERR:skb pointer NULL in read fail" " CALLBACK", __func__); return; } DBG0("%s: read failed\n", __func__); opmode = p->operation_mode; if (RMNET_IS_MODE_IP(opmode) || count_this_packet(skb->data, skb->len)) p->stats.rx_dropped++; dev_kfree_skb_any(skb); break; case SMUX_WRITE_DONE: smux_write_done(priv, metadata); break; case SMUX_WRITE_FAIL: p = netdev_priv(priv); dev = priv; write_meta_info = metadata; if (!dev || !write_meta_info) { DBG1("%s: ERR:invalid WRITE_DONE" "callback recieved", __func__); return; } skb = (struct sk_buff *) write_meta_info->pkt_priv; if (!skb) { DBG1("%s: ERR:skb pointer NULL in" " WRITE_DONE CALLBACK", __func__); return; } DBG0("%s: write failed\n", __func__); opmode = p->operation_mode; if (RMNET_IS_MODE_IP(opmode) || count_this_packet(skb->data, skb->len)) { p->stats.tx_dropped++; } dev_kfree_skb_any(skb); break; case SMUX_LOW_WM_HIT: dev = priv; p = netdev_priv(priv); DBG0("[%s] Low WM hit dev:%s\n", __func__, dev->name); spin_lock_irqsave(&p->tx_queue_lock, flags); netif_wake_queue(dev); spin_unlock_irqrestore(&p->tx_queue_lock, flags); break; case SMUX_HIGH_WM_HIT: dev = priv; p = netdev_priv(priv); DBG0("[%s] High WM hit dev:%s\n", __func__, dev->name); spin_lock_irqsave(&p->tx_queue_lock, flags); netif_stop_queue(dev); spin_unlock_irqrestore(&p->tx_queue_lock, flags); break; default: dev = priv; DBG0("[%s] Invalid event:%d received on" " dev: %s\n", __func__, event_type, dev->name); break; } return; } int get_rx_buffers(void *priv, void **pkt_priv, void **buffer, int size) { struct net_device *dev = (struct net_device *) priv; struct sk_buff *skb = NULL; void *ptr = NULL; DBG0("[%s] dev:%s\n", __func__, dev->name); skb = __dev_alloc_skb(size, GFP_ATOMIC); if (skb == NULL) { DBG0("%s: unable to alloc skb\n", __func__); return -ENOMEM; } /* TODO skb_reserve(skb, NET_IP_ALIGN); for ethernet mode */ /* Populate some params now. */ skb->dev = dev; ptr = skb_put(skb, size); skb_set_network_header(skb, 0); /* done with skb setup, return the buffer pointer. */ *pkt_priv = skb; *buffer = ptr; return 0; } static int __rmnet_open(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); DBG0("[%s] __rmnet_open()\n", dev->name); if (p->device_state == DEVICE_ACTIVE) { return 0; } else { DBG0("[%s] Platform inactive\n", dev->name); return -ENODEV; } } static int rmnet_open(struct net_device *dev) { int rc = 0; DBG0("[%s] rmnet_open()\n", dev->name); rc = __rmnet_open(dev); if (rc == 0) netif_start_queue(dev); return rc; } static int rmnet_stop(struct net_device *dev) { DBG0("[%s] rmnet_stop()\n", dev->name); netif_stop_queue(dev); return 0; } static int rmnet_change_mtu(struct net_device *dev, int new_mtu) { if (0 > new_mtu || RMNET_DATA_LEN < new_mtu) return -EINVAL; DBG0("[%s] MTU change: old=%d new=%d\n", dev->name, dev->mtu, new_mtu); dev->mtu = new_mtu; return 0; } static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); struct QMI_QOS_HDR_S *qmih; u32 opmode; unsigned long flags; /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */ spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_QOS(opmode)) { qmih = (struct QMI_QOS_HDR_S *) skb_push(skb, sizeof(struct QMI_QOS_HDR_S)); qmih->version = 1; qmih->flags = 0; qmih->flow_id = skb->mark; } dev->trans_start = jiffies; return msm_smux_write(p->ch_id, skb, skb->data, skb->len); } static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); unsigned long flags; int ret = 0; if (netif_queue_stopped(dev) || (p->device_state == DEVICE_INACTIVE)) { pr_err("[%s]fatal: rmnet_xmit called when " "netif_queue is stopped", dev->name); return 0; } spin_lock_irqsave(&p->tx_queue_lock, flags); ret = _rmnet_xmit(skb, dev); if (ret == -EAGAIN) { /* * EAGAIN means we attempted to overflow the high watermark * Clearly the queue is not stopped like it should be, so * stop it and return BUSY to the TCP/IP framework. It will * retry this packet with the queue is restarted which happens * low watermark is called. */ netif_stop_queue(dev); ret = NETDEV_TX_BUSY; } spin_unlock_irqrestore(&p->tx_queue_lock, flags); return ret; } static struct net_device_stats *rmnet_get_stats(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); return &p->stats; } static void rmnet_tx_timeout(struct net_device *dev) { pr_warning("[%s] rmnet_tx_timeout()\n", dev->name); } static const struct net_device_ops rmnet_ops_ether = { .ndo_open = rmnet_open, .ndo_stop = rmnet_stop, .ndo_start_xmit = rmnet_xmit, .ndo_get_stats = rmnet_get_stats, .ndo_tx_timeout = rmnet_tx_timeout, .ndo_do_ioctl = rmnet_ioctl, .ndo_change_mtu = rmnet_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static const struct net_device_ops rmnet_ops_ip = { .ndo_open = rmnet_open, .ndo_stop = rmnet_stop, .ndo_start_xmit = rmnet_xmit, .ndo_get_stats = rmnet_get_stats, .ndo_tx_timeout = rmnet_tx_timeout, .ndo_do_ioctl = rmnet_ioctl, .ndo_change_mtu = rmnet_change_mtu, .ndo_set_mac_address = 0, .ndo_validate_addr = 0, }; static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct rmnet_private *p = netdev_priv(dev); u32 old_opmode = p->operation_mode; unsigned long flags; int prev_mtu = dev->mtu; int rc = 0; struct rmnet_ioctl_data_s ioctl_data; /* Process IOCTL command */ switch (cmd) { case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */ /* Perform Ethernet config only if in IP mode currently*/ if (p->operation_mode & RMNET_MODE_LLP_IP) { ether_setup(dev); random_ether_addr(dev->dev_addr); dev->mtu = prev_mtu; dev->netdev_ops = &rmnet_ops_ether; spin_lock_irqsave(&p->lock, flags); p->operation_mode &= ~RMNET_MODE_LLP_IP; p->operation_mode |= RMNET_MODE_LLP_ETH; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): " "set Ethernet protocol mode\n", dev->name); } break; case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */ /* Perform IP config only if in Ethernet mode currently*/ if (p->operation_mode & RMNET_MODE_LLP_ETH) { /* Undo config done in ether_setup() */ dev->header_ops = 0; /* No header */ dev->type = ARPHRD_RAWIP; dev->hard_header_len = 0; dev->mtu = prev_mtu; dev->addr_len = 0; dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); dev->needed_headroom = HEADROOM_FOR_SMUX + HEADROOM_FOR_QOS; dev->needed_tailroom = TAILROOM; dev->netdev_ops = &rmnet_ops_ip; spin_lock_irqsave(&p->lock, flags); p->operation_mode &= ~RMNET_MODE_LLP_ETH; p->operation_mode |= RMNET_MODE_LLP_IP; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): " "set IP protocol mode\n", dev->name); } break; case RMNET_IOCTL_GET_LLP: /* Get link protocol state */ ioctl_data.u.operation_mode = (p->operation_mode & (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP)); if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, sizeof(struct rmnet_ioctl_data_s))) rc = -EFAULT; break; case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */ spin_lock_irqsave(&p->lock, flags); p->operation_mode |= RMNET_MODE_QOS; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n", dev->name); break; case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */ spin_lock_irqsave(&p->lock, flags); p->operation_mode &= ~RMNET_MODE_QOS; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n", dev->name); break; case RMNET_IOCTL_GET_QOS: /* Get QoS header state */ ioctl_data.u.operation_mode = (p->operation_mode & RMNET_MODE_QOS); if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, sizeof(struct rmnet_ioctl_data_s))) rc = -EFAULT; break; case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */ ioctl_data.u.operation_mode = p->operation_mode; if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, sizeof(struct rmnet_ioctl_data_s))) rc = -EFAULT; break; case RMNET_IOCTL_OPEN: /* Open transport port */ rc = __rmnet_open(dev); DBG0("[%s] rmnet_ioctl(): open transport port\n", dev->name); break; case RMNET_IOCTL_CLOSE: /* Close transport port */ DBG0("[%s] rmnet_ioctl(): close transport port\n", dev->name); break; default: pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]", dev->name, cmd); return -EINVAL; } DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n", dev->name, __func__, cmd, old_opmode, p->operation_mode); return rc; } static void __init rmnet_setup(struct net_device *dev) { /* Using Ethernet mode by default */ dev->netdev_ops = &rmnet_ops_ether; ether_setup(dev); /* set this after calling ether_setup */ dev->mtu = RMNET_DATA_LEN; dev->needed_headroom = HEADROOM_FOR_SMUX + HEADROOM_FOR_QOS ; dev->needed_tailroom = TAILROOM; random_ether_addr(dev->dev_addr); dev->watchdog_timeo = 1000; /* 10 seconds? */ } static int smux_rmnet_probe(struct platform_device *pdev) { int i; int r; struct rmnet_private *p; for (i = 0; i < RMNET_SMUX_DEVICE_COUNT; i++) { p = netdev_priv(netdevs[i]); if (p != NULL) { r = msm_smux_open(p->ch_id, netdevs[i], rmnet_smux_notify, get_rx_buffers); if (r < 0) { DBG0("%s: ch=%d open failed with rc %d\n", __func__, p->ch_id, r); } } } return 0; } static int smux_rmnet_remove(struct platform_device *pdev) { int i; int r; struct rmnet_private *p; for (i = 0; i < RMNET_SMUX_DEVICE_COUNT; i++) { p = netdev_priv(netdevs[i]); if (p != NULL) { r = msm_smux_close(p->ch_id); if (r < 0) { DBG0("%s: ch=%d close failed with rc %d\n", __func__, p->ch_id, r); continue; } netif_carrier_off(netdevs[i]); netif_stop_queue(netdevs[i]); } } return 0; } static struct platform_driver smux_rmnet_driver = { .probe = smux_rmnet_probe, .remove = smux_rmnet_remove, .driver = { .name = "SMUX_RMNET", .owner = THIS_MODULE, }, }; static int __init rmnet_init(void) { int ret; struct device *d; struct net_device *dev; struct rmnet_private *p; unsigned n; #ifdef CONFIG_MSM_RMNET_DEBUG timeout_us = 0; #ifdef CONFIG_HAS_EARLYSUSPEND timeout_suspend_us = 0; #endif /* CONFIG_HAS_EARLYSUSPEND */ #endif /* CONFIG_MSM_RMNET_DEBUG */ for (n = 0; n < RMNET_SMUX_DEVICE_COUNT; n++) { dev = alloc_netdev(sizeof(struct rmnet_private), "rmnet_smux%d", rmnet_setup); if (!dev) { pr_err("%s: no memory for netdev %d\n", __func__, n); return -ENOMEM; } netdevs[n] = dev; d = &(dev->dev); p = netdev_priv(dev); /* Initial config uses Ethernet */ p->operation_mode = RMNET_MODE_LLP_ETH; p->ch_id = n; p->in_reset = 0; spin_lock_init(&p->lock); spin_lock_init(&p->tx_queue_lock); #ifdef CONFIG_MSM_RMNET_DEBUG p->timeout_us = timeout_us; p->wakeups_xmit = p->wakeups_rcv = 0; #endif ret = register_netdev(dev); if (ret) { pr_err("%s: unable to register netdev" " %d rc=%d\n", __func__, n, ret); free_netdev(dev); return ret; } #ifdef CONFIG_MSM_RMNET_DEBUG if (device_create_file(d, &dev_attr_timeout)) continue; if (device_create_file(d, &dev_attr_wakeups_xmit)) continue; if (device_create_file(d, &dev_attr_wakeups_rcv)) continue; #ifdef CONFIG_HAS_EARLYSUSPEND if (device_create_file(d, &dev_attr_timeout_suspend)) continue; /* Only care about rmnet0 for suspend/resume tiemout hooks. */ if (n == 0) rmnet0 = d; #endif /* CONFIG_HAS_EARLYSUSPEND */ #endif /* CONFIG_MSM_RMNET_DEBUG */ } ret = platform_driver_register(&smux_rmnet_driver); if (ret) { pr_err("%s: registration failed n=%d rc=%d\n", __func__, n, ret); return ret; } return 0; } module_init(rmnet_init); MODULE_DESCRIPTION("MSM RMNET SMUX TRANSPORT"); MODULE_LICENSE("GPL v2");
gpl-2.0
binkybear/nexus7_2012-5
arch/mips/kernel/crash_dump.c
1935
1887
#include <linux/highmem.h> #include <linux/bootmem.h> #include <linux/crash_dump.h> #include <asm/uaccess.h> #include <linux/slab.h> static int __init parse_savemaxmem(char *p) { if (p) saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; return 1; } __setup("savemaxmem=", parse_savemaxmem); static void *kdump_buf_page; /** * copy_oldmem_page - copy one page from "oldmem" * @pfn: page frame number to be copied * @buf: target memory address for the copy; this can be in kernel address * space or user address space (see @userbuf) * @csize: number of bytes to copy * @offset: offset in bytes into the page (based on pfn) to begin the copy * @userbuf: if set, @buf is in user address space, use copy_to_user(), * otherwise @buf is in kernel address space, use memcpy(). * * Copy a page from "oldmem". For this page, there is no pte mapped * in the current kernel. * * Calling copy_to_user() in atomic context is not desirable. Hence first * copying the data to a pre-allocated kernel page and then copying to user * space in non-atomic context. */ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf) { void *vaddr; if (!csize) return 0; vaddr = kmap_atomic_pfn(pfn); if (!userbuf) { memcpy(buf, (vaddr + offset), csize); kunmap_atomic(vaddr); } else { if (!kdump_buf_page) { pr_warning("Kdump: Kdump buffer page not allocated\n"); return -EFAULT; } copy_page(kdump_buf_page, vaddr); kunmap_atomic(vaddr); if (copy_to_user(buf, (kdump_buf_page + offset), csize)) return -EFAULT; } return csize; } static int __init kdump_buf_page_init(void) { int ret = 0; kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!kdump_buf_page) { pr_warning("Kdump: Failed to allocate kdump buffer page\n"); ret = -ENOMEM; } return ret; } arch_initcall(kdump_buf_page_init);
gpl-2.0
TeamWin/android_kernel_samsung_zerofltespr
arch/powerpc/platforms/pseries/lpar.c
1935
16213
/* * pSeries_lpar.c * Copyright (C) 2001 Todd Inglett, IBM Corporation * * pSeries LPAR support. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Enables debugging of low-level hash table routines - careful! */ #undef DEBUG #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/console.h> #include <linux/export.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/mmu_context.h> #include <asm/iommu.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/prom.h> #include <asm/cputable.h> #include <asm/udbg.h> #include <asm/smp.h> #include <asm/trace.h> #include <asm/firmware.h> #include "plpar_wrappers.h" #include "pseries.h" /* in hvCall.S */ EXPORT_SYMBOL(plpar_hcall); EXPORT_SYMBOL(plpar_hcall9); EXPORT_SYMBOL(plpar_hcall_norets); extern void pSeries_find_serial_port(void); void vpa_init(int cpu) { int hwcpu = get_hard_smp_processor_id(cpu); unsigned long addr; long ret; struct paca_struct *pp; struct dtl_entry *dtl; if (cpu_has_feature(CPU_FTR_ALTIVEC)) lppaca_of(cpu).vmxregs_in_use = 1; addr = __pa(&lppaca_of(cpu)); ret = register_vpa(hwcpu, addr); if (ret) { pr_err("WARNING: VPA registration for cpu %d (hw %d) of area " "%lx failed with %ld\n", cpu, hwcpu, addr, ret); return; } /* * PAPR says this feature is SLB-Buffer but firmware never * reports that. All SPLPAR support SLB shadow buffer. */ addr = __pa(&slb_shadow[cpu]); if (firmware_has_feature(FW_FEATURE_SPLPAR)) { ret = register_slb_shadow(hwcpu, addr); if (ret) pr_err("WARNING: SLB shadow buffer registration for " "cpu %d (hw %d) of area %lx failed with %ld\n", cpu, hwcpu, addr, ret); } /* * Register dispatch trace log, if one has been allocated. */ pp = &paca[cpu]; dtl = pp->dispatch_log; if (dtl) { pp->dtl_ridx = 0; pp->dtl_curr = dtl; lppaca_of(cpu).dtl_idx = 0; /* hypervisor reads buffer length from this field */ dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; ret = register_dtl(hwcpu, __pa(dtl)); if (ret) pr_err("WARNING: DTL registration of cpu %d (hw %d) " "failed with %ld\n", smp_processor_id(), hwcpu, ret); lppaca_of(cpu).dtl_enable_mask = 2; } } static long pSeries_lpar_hpte_insert(unsigned long hpte_group, unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int apsize, int ssize) { unsigned long lpar_rc; unsigned long flags; unsigned long slot; unsigned long hpte_v, hpte_r; if (!(vflags & HPTE_V_BOLTED)) pr_devel("hpte_insert(group=%lx, vpn=%016lx, " "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n", hpte_group, vpn, pa, rflags, vflags, psize); hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; if (!(vflags & HPTE_V_BOLTED)) pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); /* Now fill in the actual HPTE */ /* Set CEC cookie to 0 */ /* Zero page = 0 */ /* I-cache Invalidate = 0 */ /* I-cache synchronize = 0 */ /* Exact = 0 */ flags = 0; /* Make pHyp happy */ if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) hpte_r &= ~_PAGE_COHERENT; if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) flags |= H_COALESCE_CAND; lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); if (unlikely(lpar_rc == H_PTEG_FULL)) { if (!(vflags & HPTE_V_BOLTED)) pr_devel(" full\n"); return -1; } /* * Since we try and ioremap PHBs we don't own, the pte insert * will fail. However we must catch the failure in hash_page * or we will loop forever, so return -2 in this case. */ if (unlikely(lpar_rc != H_SUCCESS)) { if (!(vflags & HPTE_V_BOLTED)) pr_devel(" lpar err %ld\n", lpar_rc); return -2; } if (!(vflags & HPTE_V_BOLTED)) pr_devel(" -> slot: %lu\n", slot & 7); /* Because of iSeries, we have to pass down the secondary * bucket bit here as well */ return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3); } static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock); static long pSeries_lpar_hpte_remove(unsigned long hpte_group) { unsigned long slot_offset; unsigned long lpar_rc; int i; unsigned long dummy1, dummy2; /* pick a random slot to start at */ slot_offset = mftb() & 0x7; for (i = 0; i < HPTES_PER_GROUP; i++) { /* don't remove a bolted entry */ lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, (0x1UL << 4), &dummy1, &dummy2); if (lpar_rc == H_SUCCESS) return i; /* * The test for adjunct partition is performed before the * ANDCOND test. H_RESOURCE may be returned, so we need to * check for that as well. */ BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); slot_offset++; slot_offset &= 0x7; } return -1; } static void pSeries_lpar_hptab_clear(void) { unsigned long size_bytes = 1UL << ppc64_pft_size; unsigned long hpte_count = size_bytes >> 4; struct { unsigned long pteh; unsigned long ptel; } ptes[4]; long lpar_rc; unsigned long i, j; /* Read in batches of 4, * invalidate only valid entries not in the VRMA * hpte_count will be a multiple of 4 */ for (i = 0; i < hpte_count; i += 4) { lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); if (lpar_rc != H_SUCCESS) continue; for (j = 0; j < 4; j++){ if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == HPTE_V_VRMA_MASK) continue; if (ptes[j].pteh & HPTE_V_VALID) plpar_pte_remove_raw(0, i + j, 0, &(ptes[j].pteh), &(ptes[j].ptel)); } } } /* * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and * the low 3 bits of flags happen to line up. So no transform is needed. * We can probably optimize here and assume the high bits of newpp are * already zero. For now I am paranoid. */ static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long vpn, int psize, int ssize, int local) { unsigned long lpar_rc; unsigned long flags = (newpp & 7) | H_AVPN; unsigned long want_v; want_v = hpte_encode_avpn(vpn, psize, ssize); pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", want_v, slot, flags, psize); lpar_rc = plpar_pte_protect(flags, slot, want_v); if (lpar_rc == H_NOT_FOUND) { pr_devel("not found !\n"); return -1; } pr_devel("ok\n"); BUG_ON(lpar_rc != H_SUCCESS); return 0; } static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot) { unsigned long dword0; unsigned long lpar_rc; unsigned long dummy_word1; unsigned long flags; /* Read 1 pte at a time */ /* Do not need RPN to logical page translation */ /* No cross CEC PFT access */ flags = 0; lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1); BUG_ON(lpar_rc != H_SUCCESS); return dword0; } static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) { unsigned long hash; unsigned long i; long slot; unsigned long want_v, hpte_v; hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); want_v = hpte_encode_avpn(vpn, psize, ssize); /* Bolted entries are always in the primary group */ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; for (i = 0; i < HPTES_PER_GROUP; i++) { hpte_v = pSeries_lpar_hpte_getword0(slot); if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) /* HPTE matches */ return slot; ++slot; } return -1; } static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { unsigned long vpn; unsigned long lpar_rc, slot, vsid, flags; vsid = get_kernel_vsid(ea, ssize); vpn = hpt_vpn(ea, vsid, ssize); slot = pSeries_lpar_hpte_find(vpn, psize, ssize); BUG_ON(slot == -1); flags = newpp & 7; lpar_rc = plpar_pte_protect(flags, slot, 0); BUG_ON(lpar_rc != H_SUCCESS); } static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, int psize, int ssize, int local) { unsigned long want_v; unsigned long lpar_rc; unsigned long dummy1, dummy2; pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", slot, vpn, psize, local); want_v = hpte_encode_avpn(vpn, psize, ssize); lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); if (lpar_rc == H_NOT_FOUND) return; BUG_ON(lpar_rc != H_SUCCESS); } static void pSeries_lpar_hpte_removebolted(unsigned long ea, int psize, int ssize) { unsigned long vpn; unsigned long slot, vsid; vsid = get_kernel_vsid(ea, ssize); vpn = hpt_vpn(ea, vsid, ssize); slot = pSeries_lpar_hpte_find(vpn, psize, ssize); BUG_ON(slot == -1); pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0); } /* Flag bits for H_BULK_REMOVE */ #define HBR_REQUEST 0x4000000000000000UL #define HBR_RESPONSE 0x8000000000000000UL #define HBR_END 0xc000000000000000UL #define HBR_AVPN 0x0200000000000000UL #define HBR_ANDCOND 0x0100000000000000UL /* * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie * lock. */ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) { unsigned long vpn; unsigned long i, pix, rc; unsigned long flags = 0; struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); unsigned long param[9]; unsigned long hash, index, shift, hidx, slot; real_pte_t pte; int psize, ssize; if (lock_tlbie) spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); psize = batch->psize; ssize = batch->ssize; pix = 0; for (i = 0; i < number; i++) { vpn = batch->vpn[i]; pte = batch->pte[i]; pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { hash = hpt_hash(vpn, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, local); } else { param[pix] = HBR_REQUEST | HBR_AVPN | slot; param[pix+1] = hpte_encode_avpn(vpn, psize, ssize); pix += 2; if (pix == 8) { rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], param[2], param[3], param[4], param[5], param[6], param[7]); BUG_ON(rc != H_SUCCESS); pix = 0; } } } pte_iterate_hashed_end(); } if (pix) { param[pix] = HBR_END; rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], param[2], param[3], param[4], param[5], param[6], param[7]); BUG_ON(rc != H_SUCCESS); } if (lock_tlbie) spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); } static int __init disable_bulk_remove(char *str) { if (strcmp(str, "off") == 0 && firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { printk(KERN_INFO "Disabling BULK_REMOVE firmware feature"); powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE; } return 1; } __setup("bulk_remove=", disable_bulk_remove); void __init hpte_init_lpar(void) { ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp; ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; ppc_md.hpte_insert = pSeries_lpar_hpte_insert; ppc_md.hpte_remove = pSeries_lpar_hpte_remove; ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted; ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear; } #ifdef CONFIG_PPC_SMLPAR #define CMO_FREE_HINT_DEFAULT 1 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT; static int __init cmo_free_hint(char *str) { char *parm; parm = strstrip(str); if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) { printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n"); cmo_free_hint_flag = 0; return 1; } cmo_free_hint_flag = 1; printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n"); if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0) return 1; return 0; } __setup("cmo_free_hint=", cmo_free_hint); static void pSeries_set_page_state(struct page *page, int order, unsigned long state) { int i, j; unsigned long cmo_page_sz, addr; cmo_page_sz = cmo_get_page_size(); addr = __pa((unsigned long)page_address(page)); for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) { for (j = 0; j < PAGE_SIZE; j += cmo_page_sz) plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0); } } void arch_free_page(struct page *page, int order) { if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO)) return; pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED); } EXPORT_SYMBOL(arch_free_page); #endif #ifdef CONFIG_TRACEPOINTS /* * We optimise our hcall path by placing hcall_tracepoint_refcount * directly in the TOC so we can check if the hcall tracepoints are * enabled via a single load. */ /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ extern long hcall_tracepoint_refcount; /* * Since the tracing code might execute hcalls we need to guard against * recursion. One example of this are spinlocks calling H_YIELD on * shared processor partitions. */ static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); void hcall_tracepoint_regfunc(void) { hcall_tracepoint_refcount++; } void hcall_tracepoint_unregfunc(void) { hcall_tracepoint_refcount--; } void __trace_hcall_entry(unsigned long opcode, unsigned long *args) { unsigned long flags; unsigned int *depth; /* * We cannot call tracepoints inside RCU idle regions which * means we must not trace H_CEDE. */ if (opcode == H_CEDE) return; local_irq_save(flags); depth = &__get_cpu_var(hcall_trace_depth); if (*depth) goto out; (*depth)++; preempt_disable(); trace_hcall_entry(opcode, args); (*depth)--; out: local_irq_restore(flags); } void __trace_hcall_exit(long opcode, unsigned long retval, unsigned long *retbuf) { unsigned long flags; unsigned int *depth; if (opcode == H_CEDE) return; local_irq_save(flags); depth = &__get_cpu_var(hcall_trace_depth); if (*depth) goto out; (*depth)++; trace_hcall_exit(opcode, retval, retbuf); preempt_enable(); (*depth)--; out: local_irq_restore(flags); } #endif /** * h_get_mpp * H_GET_MPP hcall returns info in 7 parms */ int h_get_mpp(struct hvcall_mpp_data *mpp_data) { int rc; unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; rc = plpar_hcall9(H_GET_MPP, retbuf); mpp_data->entitled_mem = retbuf[0]; mpp_data->mapped_mem = retbuf[1]; mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; mpp_data->pool_num = retbuf[2] & 0xffff; mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff; mpp_data->pool_size = retbuf[4]; mpp_data->loan_request = retbuf[5]; mpp_data->backing_mem = retbuf[6]; return rc; } EXPORT_SYMBOL(h_get_mpp); int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) { int rc; unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; rc = plpar_hcall9(H_GET_MPP_X, retbuf); mpp_x_data->coalesced_bytes = retbuf[0]; mpp_x_data->pool_coalesced_bytes = retbuf[1]; mpp_x_data->pool_purr_cycles = retbuf[2]; mpp_x_data->pool_spurr_cycles = retbuf[3]; return rc; }
gpl-2.0
bq-vegetalte/stock_vegetalte
drivers/staging/speakup/speakup_ltlk.c
3471
6040
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ #include "speakup.h" #include "spk_priv.h" #include "serialio.h" #include "speakup_dtlk.h" /* local header file for LiteTalk values */ #define DRV_VERSION "2.11" #define PROCSPEECH 0x0d static int synth_probe(struct spk_synth *synth); static struct var_t vars[] = { { CAPS_START, .u.s = {"\x01+35p" } }, { CAPS_STOP, .u.s = {"\x01-35p" } }, { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } }, { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } }, { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } }, { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/ltlk. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute freq_attribute = __ATTR(freq, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = __ATTR(tone, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &freq_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &tone_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_ltlk = { .name = "ltlk", .version = DRV_VERSION, .long_name = "LiteTalk", .init = "\01@\x01\x31y\n\0", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = synth_probe, .release = spk_serial_release, .synth_immediate = spk_synth_immediate, .catch_up = spk_do_catch_up, .flush = spk_synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = spk_serial_in_nowait, .indexing = { .command = "\x01%di", .lowindex = 1, .highindex = 5, .currindex = 1, }, .attributes = { .attrs = synth_attrs, .name = "ltlk", }, }; /* interrogate the LiteTalk and print its settings */ static void synth_interrogate(struct spk_synth *synth) { unsigned char *t, i; unsigned char buf[50], rom_v[20]; spk_synth_immediate(synth, "\x18\x01?"); for (i = 0; i < 50; i++) { buf[i] = spk_serial_in(); if (i > 2 && buf[i] == 0x7f) break; } t = buf+2; for (i = 0; *t != '\r'; t++) { rom_v[i] = *t; if (++i >= 19) break; } rom_v[i] = 0; pr_info("%s: ROM version: %s\n", synth->long_name, rom_v); } static int synth_probe(struct spk_synth *synth) { int failed = 0; failed = spk_serial_synth_probe(synth); if (failed == 0) synth_interrogate(synth); synth->alive = !failed; return failed; } module_param_named(ser, synth_ltlk.ser, int, S_IRUGO); module_param_named(start, synth_ltlk.startup, short, S_IRUGO); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init ltlk_init(void) { return synth_add(&synth_ltlk); } static void __exit ltlk_exit(void) { synth_remove(&synth_ltlk); } module_init(ltlk_init); module_exit(ltlk_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DoubleTalk LT/LiteTalk synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
greg17477/kernel_franco_mako
arch/arm/mach-msm/hw3d.c
5007
9972
/* arch/arm/mach-msm/hw3d.c * * Register/Interrupt access for userspace 3D library. * * Copyright (C) 2007 Google, Inc. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/poll.h> #include <linux/time.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/clk.h> #include <linux/android_pmem.h> #include <mach/board.h> static DEFINE_SPINLOCK(hw3d_lock); static DECLARE_WAIT_QUEUE_HEAD(hw3d_queue); static int hw3d_pending; static int hw3d_disabled; static struct clk *grp_clk; static struct clk *imem_clk; DECLARE_MUTEX(hw3d_sem); static unsigned int hw3d_granted; static struct file *hw3d_granted_file; static irqreturn_t hw3d_irq_handler(int irq, void *data) { unsigned long flags; spin_lock_irqsave(&hw3d_lock, flags); if (!hw3d_disabled) { disable_irq(INT_GRAPHICS); hw3d_disabled = 1; } hw3d_pending = 1; spin_unlock_irqrestore(&hw3d_lock, flags); wake_up(&hw3d_queue); return IRQ_HANDLED; } static void hw3d_disable_interrupt(void) { unsigned long flags; spin_lock_irqsave(&hw3d_lock, flags); if (!hw3d_disabled) { disable_irq(INT_GRAPHICS); hw3d_disabled = 1; } spin_unlock_irqrestore(&hw3d_lock, flags); } static long hw3d_wait_for_interrupt(void) { unsigned long flags; int ret; for (;;) { spin_lock_irqsave(&hw3d_lock, flags); if (hw3d_pending) { hw3d_pending = 0; spin_unlock_irqrestore(&hw3d_lock, flags); return 0; } if (hw3d_disabled) { hw3d_disabled = 0; enable_irq(INT_GRAPHICS); } spin_unlock_irqrestore(&hw3d_lock, flags); ret = wait_event_interruptible(hw3d_queue, hw3d_pending); if (ret < 0) { hw3d_disable_interrupt(); return ret; } } return 0; } #define HW3D_REGS_LEN 0x100000 static long hw3d_wait_for_revoke(struct hw3d_info *info, struct file *filp) { struct hw3d_data *data = filp->private_data; int ret; if (is_master(info, filp)) { pr_err("%s: cannot revoke on master node\n", __func__); return -EPERM; } ret = wait_event_interruptible(info->revoke_wq, info->revoking || data->closing); if (ret == 0 && data->closing) ret = -EPIPE; if (ret < 0) return ret; return 0; } static void locked_hw3d_client_done(struct hw3d_info *info, int had_timer) { if (info->enabled) { pr_debug("hw3d: was enabled\n"); info->enabled = 0; clk_disable(info->grp_clk); clk_disable(info->imem_clk); } info->revoking = 0; /* double check that the irqs are disabled */ locked_hw3d_irq_disable(info); if (had_timer) wake_unlock(&info->wake_lock); wake_up(&info->revoke_done_wq); } static void do_force_revoke(struct hw3d_info *info) { unsigned long flags; /* at this point, the task had a chance to relinquish the gpu, but * it hasn't. So, we kill it */ spin_lock_irqsave(&info->lock, flags); pr_debug("hw3d: forcing revoke\n"); locked_hw3d_irq_disable(info); if (info->client_task) { pr_info("hw3d: force revoke from pid=%d\n", info->client_task->pid); force_sig(SIGKILL, info->client_task); put_task_struct(info->client_task); info->client_task = NULL; } locked_hw3d_client_done(info, 1); pr_debug("hw3d: done forcing revoke\n"); spin_unlock_irqrestore(&info->lock, flags); } #define REVOKE_TIMEOUT (2 * HZ) static void locked_hw3d_revoke(struct hw3d_info *info) { /* force us to wait to suspend until the revoke is done. If the * user doesn't release the gpu, the timer will turn off the gpu, * and force kill the process. */ wake_lock(&info->wake_lock); info->revoking = 1; wake_up(&info->revoke_wq); mod_timer(&info->revoke_timer, jiffies + REVOKE_TIMEOUT); } bool is_msm_hw3d_file(struct file *file) { struct hw3d_info *info = hw3d_info; if (MAJOR(file->f_dentry->d_inode->i_rdev) == MAJOR(info->devno) && (is_master(info, file) || is_client(info, file))) return 1; return 0; } void put_msm_hw3d_file(struct file *file) { if (!is_msm_hw3d_file(file)) return; fput(file); } static long hw3d_revoke_gpu(struct file *file) { int ret = 0; unsigned long user_start, user_len; struct pmem_region region = {.offset = 0x0, .len = HW3D_REGS_LEN}; down(&hw3d_sem); if (!hw3d_granted) goto end; /* revoke the pmem region completely */ if ((ret = pmem_remap(&region, file, PMEM_UNMAP))) goto end; get_pmem_user_addr(file, &user_start, &user_len); /* reset the gpu */ clk_disable(grp_clk); clk_disable(imem_clk); hw3d_granted = 0; end: up(&hw3d_sem); return ret; } static long hw3d_grant_gpu(struct file *file) { int ret = 0; struct pmem_region region = {.offset = 0x0, .len = HW3D_REGS_LEN}; down(&hw3d_sem); if (hw3d_granted) { ret = -1; goto end; } /* map the registers */ if ((ret = pmem_remap(&region, file, PMEM_MAP))) goto end; clk_enable(grp_clk); clk_enable(imem_clk); hw3d_granted = 1; hw3d_granted_file = file; end: up(&hw3d_sem); return ret; } static int hw3d_release(struct inode *inode, struct file *file) { down(&hw3d_sem); /* if the gpu is in use, and its inuse by the file that was released */ if (hw3d_granted && (file == hw3d_granted_file)) { clk_disable(grp_clk); clk_disable(imem_clk); hw3d_granted = 0; hw3d_granted_file = NULL; } up(&hw3d_sem); return 0; } static void hw3d_vma_open(struct vm_area_struct *vma) { /* XXX: should the master be allowed to fork and keep the mappings? */ /* TODO: remap garbage page into here. * * For now, just pull the mapping. The user shouldn't be forking * and using it anyway. */ zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL); } static void hw3d_vma_close(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct hw3d_data *data = file->private_data; int i; pr_debug("hw3d: current %u ppid %u file %p count %ld\n", current->pid, current->parent->pid, file, file_count(file)); BUG_ON(!data); mutex_lock(&data->mutex); for (i = 0; i < HW3D_NUM_REGIONS; ++i) { if (data->vmas[i] == vma) { data->vmas[i] = NULL; goto done; } } pr_warning("%s: vma %p not of ours during vma_close\n", __func__, vma); done: mutex_unlock(&data->mutex); } static int hw3d_mmap(struct file *file, struct vm_area_struct *vma) { struct hw3d_info *info = hw3d_info; struct hw3d_data *data = file->private_data; unsigned long vma_size = vma->vm_end - vma->vm_start; int ret = 0; int region = REGION_PAGE_ID(vma->vm_pgoff); if (region >= HW3D_NUM_REGIONS) { pr_err("%s: Trying to mmap unknown region %d\n", __func__, region); return -EINVAL; } else if (vma_size > info->regions[region].size) { pr_err("%s: VMA size %ld exceeds region %d size %ld\n", __func__, vma_size, region, info->regions[region].size); return -EINVAL; } else if (REGION_PAGE_OFFS(vma->vm_pgoff) != 0 || (vma_size & ~PAGE_MASK)) { pr_err("%s: Can't remap part of the region %d\n", __func__, region); return -EINVAL; } else if (!is_master(info, file) && current->group_leader != info->client_task) { pr_err("%s: current(%d) != client_task(%d)\n", __func__, current->group_leader->pid, info->client_task->pid); return -EPERM; } else if (!is_master(info, file) && (info->revoking || info->suspending)) { pr_err("%s: cannot mmap while revoking(%d) or suspending(%d)\n", __func__, info->revoking, info->suspending); return -EPERM; } mutex_lock(&data->mutex); if (data->vmas[region] != NULL) { pr_err("%s: Region %d already mapped (pid=%d tid=%d)\n", __func__, region, current->group_leader->pid, current->pid); ret = -EBUSY; goto done; } /* our mappings are always noncached */ #ifdef pgprot_noncached vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif ret = io_remap_pfn_range(vma, vma->vm_start, info->regions[region].pbase >> PAGE_SHIFT, vma_size, vma->vm_page_prot); if (ret) { pr_err("%s: Cannot remap page range for region %d!\n", __func__, region); ret = -EAGAIN; goto done; } /* Prevent a malicious client from stealing another client's data * by forcing a revoke on it and then mmapping the GPU buffers. */ if (region != HW3D_REGS) memset(info->regions[region].vbase, 0, info->regions[region].size); vma->vm_ops = &hw3d_vm_ops; /* mark this region as mapped */ data->vmas[region] = vma; done: mutex_unlock(&data->mutex); return ret; } static long hw3d_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case HW3D_REVOKE_GPU: return hw3d_revoke_gpu(file); break; case HW3D_GRANT_GPU: return hw3d_grant_gpu(file); break; case HW3D_WAIT_FOR_INTERRUPT: return hw3d_wait_for_interrupt(); break; default: return -EINVAL; } return 0; } static struct android_pmem_platform_data pmem_data = { .name = "hw3d", .start = 0xA0000000, .size = 0x100000, .allocator_type = PMEM_ALLOCATORTYPE_ALLORNOTHING, .cached = 0, }; static int __init hw3d_init(void) { int ret; grp_clk = clk_get(NULL, "grp_clk"); if (IS_ERR(grp_clk)) return PTR_ERR(grp_clk); imem_clk = clk_get(NULL, "imem_clk"); if (IS_ERR(imem_clk)) { clk_put(grp_clk); return PTR_ERR(imem_clk); } ret = request_irq(INT_GRAPHICS, hw3d_irq_handler, IRQF_TRIGGER_HIGH, "hw3d", 0); if (ret) { clk_put(grp_clk); clk_put(imem_clk); return ret; } hw3d_disable_interrupt(); hw3d_granted = 0; return pmem_setup(&pmem_data, hw3d_ioctl, hw3d_release); } device_initcall(hw3d_init);
gpl-2.0
Jazz-823/kernel_lge_hammerhead_M
fs/btrfs/dir-item.c
5007
11931
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include "ctree.h" #include "disk-io.h" #include "hash.h" #include "transaction.h" /* * insert a name into a directory, doing overflow properly if there is a hash * collision. data_size indicates how big the item inserted should be. On * success a struct btrfs_dir_item pointer is returned, otherwise it is * an ERR_PTR. * * The name is not copied into the dir item, you have to do that yourself. */ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 data_size, const char *name, int name_len) { int ret; char *ptr; struct btrfs_item *item; struct extent_buffer *leaf; ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); if (ret == -EEXIST) { struct btrfs_dir_item *di; di = btrfs_match_dir_item_name(root, path, name, name_len); if (di) return ERR_PTR(-EEXIST); btrfs_extend_item(trans, root, path, data_size); } else if (ret < 0) return ERR_PTR(ret); WARN_ON(ret > 0); leaf = path->nodes[0]; item = btrfs_item_nr(leaf, path->slots[0]); ptr = btrfs_item_ptr(leaf, path->slots[0], char); BUG_ON(data_size > btrfs_item_size(leaf, item)); ptr += btrfs_item_size(leaf, item) - data_size; return (struct btrfs_dir_item *)ptr; } /* * xattrs work a lot like directories, this inserts an xattr item * into the tree */ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, const char *name, u16 name_len, const void *data, u16 data_len) { int ret = 0; struct btrfs_dir_item *dir_item; unsigned long name_ptr, data_ptr; struct btrfs_key key, location; struct btrfs_disk_key disk_key; struct extent_buffer *leaf; u32 data_size; BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)); key.objectid = objectid; btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); data_size = sizeof(*dir_item) + name_len + data_len; dir_item = insert_with_overflow(trans, root, path, &key, data_size, name, name_len); if (IS_ERR(dir_item)) return PTR_ERR(dir_item); memset(&location, 0, sizeof(location)); leaf = path->nodes[0]; btrfs_cpu_key_to_disk(&disk_key, &location); btrfs_set_dir_item_key(leaf, dir_item, &disk_key); btrfs_set_dir_type(leaf, dir_item, BTRFS_FT_XATTR); btrfs_set_dir_name_len(leaf, dir_item, name_len); btrfs_set_dir_transid(leaf, dir_item, trans->transid); btrfs_set_dir_data_len(leaf, dir_item, data_len); name_ptr = (unsigned long)(dir_item + 1); data_ptr = (unsigned long)((char *)name_ptr + name_len); write_extent_buffer(leaf, name, name_ptr, name_len); write_extent_buffer(leaf, data, data_ptr, data_len); btrfs_mark_buffer_dirty(path->nodes[0]); return ret; } /* * insert a directory item in the tree, doing all the magic for * both indexes. 'dir' indicates which objectid to insert it into, * 'location' is the key to stuff into the directory item, 'type' is the * type of the inode we're pointing to, and 'index' is the sequence number * to use for the second index (if one is created). * Will return 0 or -ENOMEM */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, struct inode *dir, struct btrfs_key *location, u8 type, u64 index) { int ret = 0; int ret2 = 0; struct btrfs_path *path; struct btrfs_dir_item *dir_item; struct extent_buffer *leaf; unsigned long name_ptr; struct btrfs_key key; struct btrfs_disk_key disk_key; u32 data_size; key.objectid = btrfs_ino(dir); btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->leave_spinning = 1; btrfs_cpu_key_to_disk(&disk_key, location); data_size = sizeof(*dir_item) + name_len; dir_item = insert_with_overflow(trans, root, path, &key, data_size, name, name_len); if (IS_ERR(dir_item)) { ret = PTR_ERR(dir_item); if (ret == -EEXIST) goto second_insert; goto out_free; } leaf = path->nodes[0]; btrfs_set_dir_item_key(leaf, dir_item, &disk_key); btrfs_set_dir_type(leaf, dir_item, type); btrfs_set_dir_data_len(leaf, dir_item, 0); btrfs_set_dir_name_len(leaf, dir_item, name_len); btrfs_set_dir_transid(leaf, dir_item, trans->transid); name_ptr = (unsigned long)(dir_item + 1); write_extent_buffer(leaf, name, name_ptr, name_len); btrfs_mark_buffer_dirty(leaf); second_insert: /* FIXME, use some real flag for selecting the extra index */ if (root == root->fs_info->tree_root) { ret = 0; goto out_free; } btrfs_release_path(path); ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir, &disk_key, type, index); out_free: btrfs_free_path(path); if (ret) return ret; if (ret2) return ret2; return 0; } /* * lookup a directory item based on name. 'dir' is the objectid * we're searching in, and 'mod' tells us if you plan on deleting the * item (use mod < 0) or changing the options (use mod > 0) */ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, const char *name, int name_len, int mod) { int ret; struct btrfs_key key; int ins_len = mod < 0 ? -1 : 0; int cow = mod != 0; key.objectid = dir; btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); if (ret < 0) return ERR_PTR(ret); if (ret > 0) return NULL; return btrfs_match_dir_item_name(root, path, name, name_len); } /* * lookup a directory item based on index. 'dir' is the objectid * we're searching in, and 'mod' tells us if you plan on deleting the * item (use mod < 0) or changing the options (use mod > 0) * * The name is used to make sure the index really points to the name you were * looking for. */ struct btrfs_dir_item * btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, u64 objectid, const char *name, int name_len, int mod) { int ret; struct btrfs_key key; int ins_len = mod < 0 ? -1 : 0; int cow = mod != 0; key.objectid = dir; btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); key.offset = objectid; ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); if (ret < 0) return ERR_PTR(ret); if (ret > 0) return ERR_PTR(-ENOENT); return btrfs_match_dir_item_name(root, path, name, name_len); } struct btrfs_dir_item * btrfs_search_dir_index_item(struct btrfs_root *root, struct btrfs_path *path, u64 dirid, const char *name, int name_len) { struct extent_buffer *leaf; struct btrfs_dir_item *di; struct btrfs_key key; u32 nritems; int ret; key.objectid = dirid; key.type = BTRFS_DIR_INDEX_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ERR_PTR(ret); leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); while (1) { if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); if (ret < 0) return ERR_PTR(ret); if (ret > 0) break; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); continue; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != dirid || key.type != BTRFS_DIR_INDEX_KEY) break; di = btrfs_match_dir_item_name(root, path, name, name_len); if (di) return di; path->slots[0]++; } return NULL; } struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, const char *name, u16 name_len, int mod) { int ret; struct btrfs_key key; int ins_len = mod < 0 ? -1 : 0; int cow = mod != 0; key.objectid = dir; btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); if (ret < 0) return ERR_PTR(ret); if (ret > 0) return NULL; return btrfs_match_dir_item_name(root, path, name, name_len); } /* * helper function to look at the directory item pointed to by 'path' * this walks through all the entries in a dir item and finds one * for a specific name. */ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, struct btrfs_path *path, const char *name, int name_len) { struct btrfs_dir_item *dir_item; unsigned long name_ptr; u32 total_len; u32 cur = 0; u32 this_len; struct extent_buffer *leaf; leaf = path->nodes[0]; dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); if (verify_dir_item(root, leaf, dir_item)) return NULL; total_len = btrfs_item_size_nr(leaf, path->slots[0]); while (cur < total_len) { this_len = sizeof(*dir_item) + btrfs_dir_name_len(leaf, dir_item) + btrfs_dir_data_len(leaf, dir_item); name_ptr = (unsigned long)(dir_item + 1); if (btrfs_dir_name_len(leaf, dir_item) == name_len && memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) return dir_item; cur += this_len; dir_item = (struct btrfs_dir_item *)((char *)dir_item + this_len); } return NULL; } /* * given a pointer into a directory item, delete it. This * handles items that have more than one entry in them. */ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_dir_item *di) { struct extent_buffer *leaf; u32 sub_item_len; u32 item_len; int ret = 0; leaf = path->nodes[0]; sub_item_len = sizeof(*di) + btrfs_dir_name_len(leaf, di) + btrfs_dir_data_len(leaf, di); item_len = btrfs_item_size_nr(leaf, path->slots[0]); if (sub_item_len == item_len) { ret = btrfs_del_item(trans, root, path); } else { /* MARKER */ unsigned long ptr = (unsigned long)di; unsigned long start; start = btrfs_item_ptr_offset(leaf, path->slots[0]); memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, item_len - (ptr + sub_item_len - start)); btrfs_truncate_item(trans, root, path, item_len - sub_item_len, 1); } return ret; } int verify_dir_item(struct btrfs_root *root, struct extent_buffer *leaf, struct btrfs_dir_item *dir_item) { u16 namelen = BTRFS_NAME_LEN; u8 type = btrfs_dir_type(leaf, dir_item); if (type >= BTRFS_FT_MAX) { printk(KERN_CRIT "btrfs: invalid dir item type: %d\n", (int)type); return 1; } if (type == BTRFS_FT_XATTR) namelen = XATTR_NAME_MAX; if (btrfs_dir_name_len(leaf, dir_item) > namelen) { printk(KERN_CRIT "btrfs: invalid dir item name len: %u\n", (unsigned)btrfs_dir_data_len(leaf, dir_item)); return 1; } /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */ if (btrfs_dir_data_len(leaf, dir_item) > BTRFS_MAX_XATTR_SIZE(root)) { printk(KERN_CRIT "btrfs: invalid dir item data len: %u\n", (unsigned)btrfs_dir_data_len(leaf, dir_item)); return 1; } return 0; }
gpl-2.0
arpitjain9819/FIRE-AND-FLAMES
drivers/gpu/drm/gma500/cdv_intel_lvds.c
5263
20096
/* * Copyright © 2006-2011 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Authors: * Eric Anholt <eric@anholt.net> * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> */ #include <linux/i2c.h> #include <linux/dmi.h> #include <drm/drmP.h> #include "intel_bios.h" #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" #include "power.h" #include <linux/pm_runtime.h> #include "cdv_device.h" /** * LVDS I2C backlight control macros */ #define BRIGHTNESS_MAX_LEVEL 100 #define BRIGHTNESS_MASK 0xFF #define BLC_I2C_TYPE 0x01 #define BLC_PWM_TYPT 0x02 #define BLC_POLARITY_NORMAL 0 #define BLC_POLARITY_INVERSE 1 #define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE) #define PSB_BLC_MIN_PWM_REG_FREQ (0x2) #define PSB_BLC_PWM_PRECISION_FACTOR (10) #define PSB_BACKLIGHT_PWM_CTL_SHIFT (16) #define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE) struct cdv_intel_lvds_priv { /** * Saved LVDO output states */ uint32_t savePP_ON; uint32_t savePP_OFF; uint32_t saveLVDS; uint32_t savePP_CONTROL; uint32_t savePP_CYCLE; uint32_t savePFIT_CONTROL; uint32_t savePFIT_PGM_RATIOS; uint32_t saveBLC_PWM_CTL; }; /* * Returns the maximum level of the backlight duty cycle field. */ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; u32 retval; if (gma_power_begin(dev, false)) { retval = ((REG_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; gma_power_end(dev); } else retval = ((dev_priv->regs.saveBLC_PWM_CTL & BACKLIGHT_MODULATION_FREQ_MASK) >> BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; return retval; } #if 0 /* * Set LVDS backlight level by I2C command */ static int cdv_lvds_i2c_set_brightness(struct drm_device *dev, unsigned int level) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus; u8 out_buf[2]; unsigned int blc_i2c_brightness; struct i2c_msg msgs[] = { { .addr = lvds_i2c_bus->slave_addr, .flags = 0, .len = 2, .buf = out_buf, } }; blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level * BRIGHTNESS_MASK / BRIGHTNESS_MAX_LEVEL); if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE) blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness; out_buf[0] = dev_priv->lvds_bl->brightnesscmd; out_buf[1] = (u8)blc_i2c_brightness; if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) return 0; DRM_ERROR("I2C transfer error\n"); return -1; } static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level) { struct drm_psb_private *dev_priv = dev->dev_private; u32 max_pwm_blc; u32 blc_pwm_duty_cycle; max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev); /*BLC_PWM_CTL Should be initiated while backlight device init*/ BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0); blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL; if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE) blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle; blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR; REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (blc_pwm_duty_cycle)); return 0; } /* * Set LVDS backlight level either by I2C or PWM */ void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level) { struct drm_psb_private *dev_priv = dev->dev_private; if (!dev_priv->lvds_bl) { DRM_ERROR("NO LVDS Backlight Info\n"); return; } if (dev_priv->lvds_bl->type == BLC_I2C_TYPE) cdv_lvds_i2c_set_brightness(dev, level); else cdv_lvds_pwm_set_brightness(dev, level); } #endif /** * Sets the backlight level. * * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight(). */ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level) { struct drm_psb_private *dev_priv = dev->dev_private; u32 blc_pwm_ctl; if (gma_power_begin(dev, false)) { blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl | (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); gma_power_end(dev); } else { blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL & ~BACKLIGHT_DUTY_CYCLE_MASK; dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl | (level << BACKLIGHT_DUTY_CYCLE_SHIFT)); } } /** * Sets the power state for the panel. */ static void cdv_intel_lvds_set_power(struct drm_device *dev, struct drm_encoder *encoder, bool on) { struct drm_psb_private *dev_priv = dev->dev_private; u32 pp_status; if (!gma_power_begin(dev, true)) return; if (on) { REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON); do { pp_status = REG_READ(PP_STATUS); } while ((pp_status & PP_ON) == 0); cdv_intel_lvds_set_backlight(dev, dev_priv->mode_dev.backlight_duty_cycle); } else { cdv_intel_lvds_set_backlight(dev, 0); REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON); do { pp_status = REG_READ(PP_STATUS); } while (pp_status & PP_ON); } gma_power_end(dev); } static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; if (mode == DRM_MODE_DPMS_ON) cdv_intel_lvds_set_power(dev, encoder, true); else cdv_intel_lvds_set_power(dev, encoder, false); /* XXX: We never power down the LVDS pairs. */ } static void cdv_intel_lvds_save(struct drm_connector *connector) { } static void cdv_intel_lvds_restore(struct drm_connector *connector) { } static int cdv_intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct drm_display_mode *fixed_mode = dev_priv->mode_dev.panel_fixed_mode; /* just in case */ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; /* just in case */ if (mode->flags & DRM_MODE_FLAG_INTERLACE) return MODE_NO_INTERLACE; if (fixed_mode) { if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay > fixed_mode->vdisplay) return MODE_PANEL; } return MODE_OK; } static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; struct drm_encoder *tmp_encoder; struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode; /* Should never happen!! */ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { printk(KERN_ERR "Can't enable LVDS and another " "encoder on the same pipe\n"); return false; } } /* * If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ if (panel_fixed_mode != NULL) { adjusted_mode->hdisplay = panel_fixed_mode->hdisplay; adjusted_mode->hsync_start = panel_fixed_mode->hsync_start; adjusted_mode->hsync_end = panel_fixed_mode->hsync_end; adjusted_mode->htotal = panel_fixed_mode->htotal; adjusted_mode->vdisplay = panel_fixed_mode->vdisplay; adjusted_mode->vsync_start = panel_fixed_mode->vsync_start; adjusted_mode->vsync_end = panel_fixed_mode->vsync_end; adjusted_mode->vtotal = panel_fixed_mode->vtotal; adjusted_mode->clock = panel_fixed_mode->clock; drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); } /* * XXX: It would be nice to support lower refresh rates on the * panels to reduce power consumption, and perhaps match the * user's requested refresh rate. */ return true; } static void cdv_intel_lvds_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; if (!gma_power_begin(dev, true)) return; mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & BACKLIGHT_DUTY_CYCLE_MASK); cdv_intel_lvds_set_power(dev, encoder, false); gma_power_end(dev); } static void cdv_intel_lvds_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; if (mode_dev->backlight_duty_cycle == 0) mode_dev->backlight_duty_cycle = cdv_intel_lvds_get_max_backlight(dev); cdv_intel_lvds_set_power(dev, encoder, true); } static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; u32 pfit_control; /* * The LVDS pin pair will already have been turned on in the * cdv_intel_crtc_mode_set since it has a large impact on the DPLL * settings. */ /* * Enable automatic panel scaling so that non-native modes fill the * screen. Should be enabled before the pipe is enabled, according to * register description and PRM. */ if (mode->hdisplay != adjusted_mode->hdisplay || mode->vdisplay != adjusted_mode->vdisplay) pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | HORIZ_INTERP_BILINEAR); else pfit_control = 0; if (dev_priv->lvds_dither) pfit_control |= PANEL_8TO6_DITHER_ENABLE; REG_WRITE(PFIT_CONTROL, pfit_control); } /** * Detect the LVDS connection. * * This always returns CONNECTOR_STATUS_CONNECTED. * This connector should only have * been set up if the LVDS was actually connected anyway. */ static enum drm_connector_status cdv_intel_lvds_detect( struct drm_connector *connector, bool force) { return connector_status_connected; } /** * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. */ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; int ret; ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter); if (ret) return ret; /* Didn't get an EDID, so * Set wide sync ranges so we get all modes * handed to valid_mode for checking */ connector->display_info.min_vfreq = 0; connector->display_info.max_vfreq = 200; connector->display_info.min_hfreq = 0; connector->display_info.max_hfreq = 200; if (mode_dev->panel_fixed_mode != NULL) { struct drm_display_mode *mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); drm_mode_probed_add(connector, mode); return 1; } return 0; } /** * cdv_intel_lvds_destroy - unregister and free LVDS structures * @connector: connector to free * * Unregister the DDC bus for this connector then free the driver private * structure. */ static void cdv_intel_lvds_destroy(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); if (psb_intel_encoder->i2c_bus) psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); } static int cdv_intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { struct drm_encoder *encoder = connector->encoder; if (!strcmp(property->name, "scaling mode") && encoder) { struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc); uint64_t curValue; if (!crtc) return -1; switch (value) { case DRM_MODE_SCALE_FULLSCREEN: break; case DRM_MODE_SCALE_NO_SCALE: break; case DRM_MODE_SCALE_ASPECT: break; default: return -1; } if (drm_connector_property_get_value(connector, property, &curValue)) return -1; if (curValue == value) return 0; if (drm_connector_property_set_value(connector, property, value)) return -1; if (crtc->saved_mode.hdisplay != 0 && crtc->saved_mode.vdisplay != 0) { if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode, encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb)) return -1; } } else if (!strcmp(property->name, "backlight") && encoder) { if (drm_connector_property_set_value(connector, property, value)) return -1; else { #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE struct drm_psb_private *dev_priv = encoder->dev->dev_private; struct backlight_device *bd = dev_priv->backlight_device; bd->props.brightness = value; backlight_update_status(bd); #endif } } else if (!strcmp(property->name, "DPMS") && encoder) { struct drm_encoder_helper_funcs *helpers = encoder->helper_private; helpers->dpms(encoder, value); } return 0; } static const struct drm_encoder_helper_funcs cdv_intel_lvds_helper_funcs = { .dpms = cdv_intel_lvds_encoder_dpms, .mode_fixup = cdv_intel_lvds_mode_fixup, .prepare = cdv_intel_lvds_prepare, .mode_set = cdv_intel_lvds_mode_set, .commit = cdv_intel_lvds_commit, }; static const struct drm_connector_helper_funcs cdv_intel_lvds_connector_helper_funcs = { .get_modes = cdv_intel_lvds_get_modes, .mode_valid = cdv_intel_lvds_mode_valid, .best_encoder = psb_intel_best_encoder, }; static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = { .dpms = drm_helper_connector_dpms, .save = cdv_intel_lvds_save, .restore = cdv_intel_lvds_restore, .detect = cdv_intel_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = cdv_intel_lvds_set_property, .destroy = cdv_intel_lvds_destroy, }; static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); } const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = { .destroy = cdv_intel_lvds_enc_destroy, }; /** * cdv_intel_lvds_init - setup LVDS connectors on this device * @dev: drm device * * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ void cdv_intel_lvds_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { struct psb_intel_encoder *psb_intel_encoder; struct psb_intel_connector *psb_intel_connector; struct cdv_intel_lvds_priv *lvds_priv; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_display_mode *scan; struct drm_crtc *crtc; struct drm_psb_private *dev_priv = dev->dev_private; u32 lvds; int pipe; psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); if (!psb_intel_encoder) return; psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); if (!psb_intel_connector) goto failed_connector; lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL); if (!lvds_priv) goto failed_lvds_priv; psb_intel_encoder->dev_priv = lvds_priv; connector = &psb_intel_connector->base; encoder = &psb_intel_encoder->base; drm_connector_init(dev, connector, &cdv_intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(dev, encoder, &cdv_intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs); drm_connector_helper_add(connector, &cdv_intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; /*Attach connector properties*/ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); /** * Set up I2C bus * FIXME: distroy i2c_bus when exit */ psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B"); if (!psb_intel_encoder->i2c_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "I2C bus registration failed.\n"); goto failed_blc_i2c; } psb_intel_encoder->i2c_bus->slave_addr = 0x2C; dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus; /* * LVDS discovery: * 1) check for EDID on DDC * 2) check for VBT data * 3) check to see if LVDS is already on * if none of the above, no panel * 4) make sure lid is open * if closed, act like it's not there for now */ /* Set up the DDC bus. */ psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); if (!psb_intel_encoder->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); goto failed_ddc; } /* * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter); list_for_each_entry(scan, &connector->probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { mode_dev->panel_fixed_mode = drm_mode_duplicate(dev, scan); goto out; /* FIXME: check for quirks */ } } /* Failed to get EDID, what about VBT? do we need this?*/ if (dev_priv->lfp_lvds_vbt_mode) { mode_dev->panel_fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); if (mode_dev->panel_fixed_mode) { mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; goto out; /* FIXME: check for quirks */ } } /* * If we didn't get EDID, try checking if the panel is already turned * on. If so, assume that whatever is currently programmed is the * correct mode. */ lvds = REG_READ(LVDS); pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; crtc = psb_intel_get_crtc_from_pipe(dev, pipe); if (crtc && (lvds & LVDS_PORT_EN)) { mode_dev->panel_fixed_mode = cdv_intel_crtc_mode_get(dev, crtc); if (mode_dev->panel_fixed_mode) { mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; goto out; /* FIXME: check for quirks */ } } /* If we still don't have a mode after all that, give up. */ if (!mode_dev->panel_fixed_mode) { DRM_DEBUG ("Found no modes on the lvds, ignoring the LVDS\n"); goto failed_find; } out: drm_sysfs_connector_add(connector); return; failed_find: printk(KERN_ERR "Failed find\n"); if (psb_intel_encoder->ddc_bus) psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); failed_ddc: printk(KERN_ERR "Failed DDC\n"); if (psb_intel_encoder->i2c_bus) psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); failed_blc_i2c: printk(KERN_ERR "Failed BLC\n"); drm_encoder_cleanup(encoder); drm_connector_cleanup(connector); kfree(lvds_priv); failed_lvds_priv: kfree(psb_intel_connector); failed_connector: kfree(psb_intel_encoder); }
gpl-2.0
zeroblade1984/LG_MSM8226
drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
5263
32010
/* * Copyright © 2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * jim liu <jim.liu@intel.com> * Jackie Li<yaodong.li@intel.com> */ #include "mdfld_dsi_dpi.h" #include "mdfld_output.h" #include "mdfld_dsi_pkg_sender.h" #include "psb_drv.h" #include "tc35876x-dsi-lvds.h" static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, int pipe); static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe) { u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); int timeout = 0; udelay(500); /* This will time out after approximately 2+ seconds */ while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) { udelay(100); timeout++; } if (timeout == 20000) DRM_INFO("MIPI: HS Data FIFO was never cleared!\n"); } static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe) { u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); int timeout = 0; udelay(500); /* This will time out after approximately 2+ seconds */ while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_CTRL_FULL)) { udelay(100); timeout++; } if (timeout == 20000) DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n"); } static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe) { u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); int timeout = 0; udelay(500); /* This will time out after approximately 2+ seconds */ while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) & DPI_FIFO_EMPTY) != DPI_FIFO_EMPTY)) { udelay(100); timeout++; } if (timeout == 20000) DRM_ERROR("MIPI: DPI FIFO was never cleared\n"); } static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe) { u32 intr_stat_reg = MIPI_INTR_STAT_REG(pipe); int timeout = 0; udelay(500); /* This will time out after approximately 2+ seconds */ while ((timeout < 20000) && (!(REG_READ(intr_stat_reg) & DSI_INTR_STATE_SPL_PKG_SENT))) { udelay(100); timeout++; } if (timeout == 20000) DRM_ERROR("MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n"); } /* For TC35876X */ static void dsi_set_device_ready_state(struct drm_device *dev, int state, int pipe) { REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), !!state, 0, 0); } static void dsi_set_pipe_plane_enable_state(struct drm_device *dev, int state, int pipe) { struct drm_psb_private *dev_priv = dev->dev_private; u32 pipeconf_reg = PIPEACONF; u32 dspcntr_reg = DSPACNTR; u32 dspcntr = dev_priv->dspcntr[pipe]; u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; if (pipe) { pipeconf_reg = PIPECCONF; dspcntr_reg = DSPCCNTR; } else mipi &= (~0x03); if (state) { /*Set up pipe */ REG_WRITE(pipeconf_reg, BIT(31)); if (REG_BIT_WAIT(pipeconf_reg, 1, 30)) dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n", __func__); /*Set up display plane */ REG_WRITE(dspcntr_reg, dspcntr); } else { u32 dspbase_reg = pipe ? MDFLD_DSPCBASE : MRST_DSPABASE; /* Put DSI lanes to ULPS to disable pipe */ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 2, 2, 1); REG_READ(MIPI_DEVICE_READY_REG(pipe)); /* posted write? */ /* LP Hold */ REG_FLD_MOD(MIPI_PORT_CONTROL(pipe), 0, 16, 16); REG_READ(MIPI_PORT_CONTROL(pipe)); /* posted write? */ /* Disable display plane */ REG_FLD_MOD(dspcntr_reg, 0, 31, 31); /* Flush the plane changes ??? posted write? */ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); REG_READ(dspbase_reg); /* Disable PIPE */ REG_FLD_MOD(pipeconf_reg, 0, 31, 31); if (REG_BIT_WAIT(pipeconf_reg, 0, 30)) dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n", __func__); if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28)) dev_err(&dev->pdev->dev, "%s: FIFO not empty\n", __func__); } } static void mdfld_dsi_configure_down(struct mdfld_dsi_encoder *dsi_encoder, int pipe) { struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder); struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder); struct drm_device *dev = dsi_config->dev; struct drm_psb_private *dev_priv = dev->dev_private; if (!dev_priv->dpi_panel_on[pipe]) { dev_err(dev->dev, "DPI panel is already off\n"); return; } tc35876x_toshiba_bridge_panel_off(dev); tc35876x_set_bridge_reset_state(dev, 1); dsi_set_pipe_plane_enable_state(dev, 0, pipe); mdfld_dsi_dpi_shut_down(dpi_output, pipe); dsi_set_device_ready_state(dev, 0, pipe); } static void mdfld_dsi_configure_up(struct mdfld_dsi_encoder *dsi_encoder, int pipe) { struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder); struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder); struct drm_device *dev = dsi_config->dev; struct drm_psb_private *dev_priv = dev->dev_private; if (dev_priv->dpi_panel_on[pipe]) { dev_err(dev->dev, "DPI panel is already on\n"); return; } /* For resume path sequence */ mdfld_dsi_dpi_shut_down(dpi_output, pipe); dsi_set_device_ready_state(dev, 0, pipe); dsi_set_device_ready_state(dev, 1, pipe); tc35876x_set_bridge_reset_state(dev, 0); tc35876x_configure_lvds_bridge(dev); mdfld_dsi_dpi_turn_on(dpi_output, pipe); /* Send turn on command */ dsi_set_pipe_plane_enable_state(dev, 1, pipe); } /* End for TC35876X */ /* ************************************************************************* *\ * FUNCTION: mdfld_dsi_tpo_ic_init * * DESCRIPTION: This function is called only by mrst_dsi_mode_set and * restore_display_registers. since this function does not * acquire the mutex, it is important that the calling function * does! \* ************************************************************************* */ static void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe) { struct drm_device *dev = dsi_config->dev; u32 dcsChannelNumber = dsi_config->channel_num; u32 gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe); u32 gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe); u32 gen_ctrl_val = GEN_LONG_WRITE; DRM_INFO("Enter mrst init TPO MIPI display.\n"); gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS; /* Flip page order */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00008036); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS)); /* 0xF0 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x005a5af0); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); /* Write protection key */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x005a5af1); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); /* 0xFC */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x005a5afc); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); /* 0xB7 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x770000b7); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000044); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS)); /* 0xB6 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x000a0ab6); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); /* 0xF2 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x081010f2); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x4a070708); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x000000c5); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); /* 0xF8 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x024003f8); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x01030a04); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x0e020220); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000004); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS)); /* 0xE2 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x398fc3e2); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x0000916f); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS)); /* 0xB0 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x000000b0); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS)); /* 0xF4 */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x240242f4); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x78ee2002); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x2a071050); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x507fee10); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x10300710); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS)); /* 0xBA */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x19fe07ba); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x101c0a31); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000010); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); /* 0xBB */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x28ff07bb); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x24280a31); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000034); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); /* 0xFB */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x535d05fb); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1b1a2130); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x221e180e); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x131d2120); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x535d0508); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1c1a2131); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x231f160d); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x111b2220); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x535c2008); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1f1d2433); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x2c251a10); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x2c34372d); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000023); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS)); /* 0xFA */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x525c0bfa); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1c1c232f); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x2623190e); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x18212625); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x545d0d0e); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1e1d2333); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x26231a10); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x1a222725); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x545d280f); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x21202635); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x31292013); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x31393d33); mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x00000029); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS)); /* Set DM */ mdfld_wait_for_HS_DATA_FIFO(dev, pipe); REG_WRITE(gen_data_reg, 0x000100f7); mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); } static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count, int num_lane, int bpp) { return (u16)((pixel_clock_count * bpp) / (num_lane * 8)); } /* * Calculate the dpi time basing on a given drm mode @mode * return 0 on success. * FIXME: I was using proposed mode value for calculation, may need to * use crtc mode values later */ int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode, struct mdfld_dsi_dpi_timing *dpi_timing, int num_lane, int bpp) { int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive; int pclk_vsync, pclk_vfp, pclk_vbp; pclk_hactive = mode->hdisplay; pclk_hfp = mode->hsync_start - mode->hdisplay; pclk_hsync = mode->hsync_end - mode->hsync_start; pclk_hbp = mode->htotal - mode->hsync_end; pclk_vfp = mode->vsync_start - mode->vdisplay; pclk_vsync = mode->vsync_end - mode->vsync_start; pclk_vbp = mode->vtotal - mode->vsync_end; /* * byte clock counts were calculated by following formula * bclock_count = pclk_count * bpp / num_lane / 8 */ dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_hsync, num_lane, bpp); dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_hbp, num_lane, bpp); dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_hfp, num_lane, bpp); dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_hactive, num_lane, bpp); dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_vsync, num_lane, bpp); dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_vbp, num_lane, bpp); dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count( pclk_vfp, num_lane, bpp); return 0; } void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe) { struct drm_device *dev = dsi_config->dev; int lane_count = dsi_config->lane_count; struct mdfld_dsi_dpi_timing dpi_timing; struct drm_display_mode *mode = dsi_config->mode; u32 val; /*un-ready device*/ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0); /*init dsi adapter before kicking off*/ REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018); /*enable all interrupts*/ REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff); /*set up func_prg*/ val = lane_count; val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET; switch (dsi_config->bpp) { case 16: val |= DSI_DPI_COLOR_FORMAT_RGB565; break; case 18: val |= DSI_DPI_COLOR_FORMAT_RGB666; break; case 24: val |= DSI_DPI_COLOR_FORMAT_RGB888; break; default: DRM_ERROR("unsupported color format, bpp = %d\n", dsi_config->bpp); } REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val); REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), (mode->vtotal * mode->htotal * dsi_config->bpp / (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK); REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffff & DSI_LP_RX_TIMEOUT_MASK); /*max value: 20 clock cycles of txclkesc*/ REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK); /*min 21 txclkesc, max: ffffh*/ REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xffff & DSI_RESET_TIMER_MASK); REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe), mode->vdisplay << 16 | mode->hdisplay); /*set DPI timing registers*/ mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp); REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HBP_COUNT_REG(pipe), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HFP_COUNT_REG(pipe), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VBP_COUNT_REG(pipe), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VFP_COUNT_REG(pipe), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46); /*min: 7d0 max: 4e20*/ REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0); /*set up video mode*/ val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE; REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val); REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000); REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004); /*TODO: figure out how to setup these registers*/ if (mdfld_get_panel_type(dev, pipe) == TC35876X) REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008); else REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408); REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14); if (mdfld_get_panel_type(dev, pipe) == TC35876X) tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */ /*set device ready*/ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0); } void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe) { struct drm_device *dev = output->dev; /* clear special packet sent bit */ if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) REG_WRITE(MIPI_INTR_STAT_REG(pipe), DSI_INTR_STATE_SPL_PKG_SENT); /*send turn on package*/ REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_TURN_ON); /*wait for SPL_PKG_SENT interrupt*/ mdfld_wait_for_SPL_PKG_SENT(dev, pipe); if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) REG_WRITE(MIPI_INTR_STAT_REG(pipe), DSI_INTR_STATE_SPL_PKG_SENT); output->panel_on = 1; /* FIXME the following is disabled to WA the X slow start issue for TMD panel if (pipe == 2) dev_priv->dpi_panel_on2 = true; else if (pipe == 0) dev_priv->dpi_panel_on = true; */ } static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, int pipe) { struct drm_device *dev = output->dev; /*if output is on, or mode setting didn't happen, ignore this*/ if ((!output->panel_on) || output->first_boot) { output->first_boot = 0; return; } /* Wait for dpi fifo to empty */ mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe); /* Clear the special packet interrupt bit if set */ if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) REG_WRITE(MIPI_INTR_STAT_REG(pipe), DSI_INTR_STATE_SPL_PKG_SENT); if (REG_READ(MIPI_DPI_CONTROL_REG(pipe)) == DSI_DPI_CTRL_HS_SHUTDOWN) goto shutdown_out; REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_SHUTDOWN); shutdown_out: output->panel_on = 0; output->first_boot = 0; /* FIXME the following is disabled to WA the X slow start issue for TMD panel if (pipe == 2) dev_priv->dpi_panel_on2 = false; else if (pipe == 0) dev_priv->dpi_panel_on = false; */ } static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on) { struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder); struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder); int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder); struct drm_device *dev = dsi_config->dev; struct drm_psb_private *dev_priv = dev->dev_private; /*start up display island if it was shutdown*/ if (!gma_power_begin(dev, true)) return; if (on) { if (mdfld_get_panel_type(dev, pipe) == TMD_VID) mdfld_dsi_dpi_turn_on(dpi_output, pipe); else if (mdfld_get_panel_type(dev, pipe) == TC35876X) mdfld_dsi_configure_up(dsi_encoder, pipe); else { /*enable mipi port*/ REG_WRITE(MIPI_PORT_CONTROL(pipe), REG_READ(MIPI_PORT_CONTROL(pipe)) | BIT(31)); REG_READ(MIPI_PORT_CONTROL(pipe)); mdfld_dsi_dpi_turn_on(dpi_output, pipe); mdfld_dsi_tpo_ic_init(dsi_config, pipe); } dev_priv->dpi_panel_on[pipe] = true; } else { if (mdfld_get_panel_type(dev, pipe) == TMD_VID) mdfld_dsi_dpi_shut_down(dpi_output, pipe); else if (mdfld_get_panel_type(dev, pipe) == TC35876X) mdfld_dsi_configure_down(dsi_encoder, pipe); else { mdfld_dsi_dpi_shut_down(dpi_output, pipe); /*disable mipi port*/ REG_WRITE(MIPI_PORT_CONTROL(pipe), REG_READ(MIPI_PORT_CONTROL(pipe)) & ~BIT(31)); REG_READ(MIPI_PORT_CONTROL(pipe)); } dev_priv->dpi_panel_on[pipe] = false; } gma_power_end(dev); } void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode) { mdfld_dsi_dpi_set_power(encoder, mode == DRM_MODE_DPMS_ON); } bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder); struct drm_display_mode *fixed_mode = dsi_config->fixed_mode; if (fixed_mode) { adjusted_mode->hdisplay = fixed_mode->hdisplay; adjusted_mode->hsync_start = fixed_mode->hsync_start; adjusted_mode->hsync_end = fixed_mode->hsync_end; adjusted_mode->htotal = fixed_mode->htotal; adjusted_mode->vdisplay = fixed_mode->vdisplay; adjusted_mode->vsync_start = fixed_mode->vsync_start; adjusted_mode->vsync_end = fixed_mode->vsync_end; adjusted_mode->vtotal = fixed_mode->vtotal; adjusted_mode->clock = fixed_mode->clock; drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); } return true; } void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder) { mdfld_dsi_dpi_set_power(encoder, false); } void mdfld_dsi_dpi_commit(struct drm_encoder *encoder) { mdfld_dsi_dpi_set_power(encoder, true); } /* For TC35876X */ /* This functionality was implemented in FW in iCDK */ /* But removed in DV0 and later. So need to add here. */ static void mipi_set_properties(struct mdfld_dsi_config *dsi_config, int pipe) { struct drm_device *dev = dsi_config->dev; REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018); REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff); REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), 0xffffff); REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffffff); REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14); REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xff); REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x25); REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0xf0); REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000); REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004); REG_WRITE(MIPI_DBI_BW_CTRL_REG(pipe), 0x00000820); REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14); } static void mdfld_mipi_set_video_timing(struct mdfld_dsi_config *dsi_config, int pipe) { struct drm_device *dev = dsi_config->dev; struct mdfld_dsi_dpi_timing dpi_timing; struct drm_display_mode *mode = dsi_config->mode; mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp); REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe), mode->vdisplay << 16 | mode->hdisplay); REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HBP_COUNT_REG(pipe), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HFP_COUNT_REG(pipe), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VBP_COUNT_REG(pipe), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK); REG_WRITE(MIPI_VFP_COUNT_REG(pipe), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK); } static void mdfld_mipi_config(struct mdfld_dsi_config *dsi_config, int pipe) { struct drm_device *dev = dsi_config->dev; int lane_count = dsi_config->lane_count; if (pipe) { REG_WRITE(MIPI_PORT_CONTROL(0), 0x00000002); REG_WRITE(MIPI_PORT_CONTROL(2), 0x80000000); } else { REG_WRITE(MIPI_PORT_CONTROL(0), 0x80010000); REG_WRITE(MIPI_PORT_CONTROL(2), 0x00); } REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150A600F); REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), 0x0000000F); /* lane_count = 3 */ REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), 0x00000200 | lane_count); mdfld_mipi_set_video_timing(dsi_config, pipe); } static void mdfld_set_pipe_timing(struct mdfld_dsi_config *dsi_config, int pipe) { struct drm_device *dev = dsi_config->dev; struct drm_display_mode *mode = dsi_config->mode; REG_WRITE(HTOTAL_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1)); REG_WRITE(HBLANK_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1)); REG_WRITE(HSYNC_A, ((mode->hsync_end - 1) << 16) | (mode->hsync_start - 1)); REG_WRITE(VTOTAL_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1)); REG_WRITE(VBLANK_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1)); REG_WRITE(VSYNC_A, ((mode->vsync_end - 1) << 16) | (mode->vsync_start - 1)); REG_WRITE(PIPEASRC, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); } /* End for TC35876X */ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder); struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder); struct drm_device *dev = dsi_config->dev; struct drm_psb_private *dev_priv = dev->dev_private; int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder); u32 pipeconf_reg = PIPEACONF; u32 dspcntr_reg = DSPACNTR; u32 pipeconf = dev_priv->pipeconf[pipe]; u32 dspcntr = dev_priv->dspcntr[pipe]; u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; if (pipe) { pipeconf_reg = PIPECCONF; dspcntr_reg = DSPCCNTR; } else { if (mdfld_get_panel_type(dev, pipe) == TC35876X) mipi &= (~0x03); /* Use all four lanes */ else mipi |= 2; } /*start up display island if it was shutdown*/ if (!gma_power_begin(dev, true)) return; if (mdfld_get_panel_type(dev, pipe) == TC35876X) { /* * The following logic is required to reset the bridge and * configure. This also starts the DSI clock at 200MHz. */ tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */ tc35876x_toshiba_bridge_panel_on(dev); udelay(100); /* Now start the DSI clock */ REG_WRITE(MRST_DPLL_A, 0x00); REG_WRITE(MRST_FPA0, 0xC1); REG_WRITE(MRST_DPLL_A, 0x00800000); udelay(500); REG_WRITE(MRST_DPLL_A, 0x80800000); if (REG_BIT_WAIT(pipeconf_reg, 1, 29)) dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n", __func__); REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008); mipi_set_properties(dsi_config, pipe); mdfld_mipi_config(dsi_config, pipe); mdfld_set_pipe_timing(dsi_config, pipe); REG_WRITE(DSPABASE, 0x00); REG_WRITE(DSPASTRIDE, (mode->hdisplay * 4)); REG_WRITE(DSPASIZE, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); REG_WRITE(DSPACNTR, 0x98000000); REG_WRITE(DSPASURF, 0x00); REG_WRITE(VGACNTRL, 0x80000000); REG_WRITE(DEVICE_READY_REG, 0x00000001); REG_WRITE(MIPI_PORT_CONTROL(pipe), 0x80810000); } else { /*set up mipi port FIXME: do at init time */ REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi); } REG_READ(MIPI_PORT_CONTROL(pipe)); if (mdfld_get_panel_type(dev, pipe) == TMD_VID) { /* NOP */ } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) { /* set up DSI controller DPI interface */ mdfld_dsi_dpi_controller_init(dsi_config, pipe); /* Configure MIPI Bridge and Panel */ tc35876x_configure_lvds_bridge(dev); dev_priv->dpi_panel_on[pipe] = true; } else { /*turn on DPI interface*/ mdfld_dsi_dpi_turn_on(dpi_output, pipe); } /*set up pipe*/ REG_WRITE(pipeconf_reg, pipeconf); REG_READ(pipeconf_reg); /*set up display plane*/ REG_WRITE(dspcntr_reg, dspcntr); REG_READ(dspcntr_reg); msleep(20); /* FIXME: this should wait for vblank */ if (mdfld_get_panel_type(dev, pipe) == TMD_VID) { /* NOP */ } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) { mdfld_dsi_dpi_turn_on(dpi_output, pipe); } else { /* init driver ic */ mdfld_dsi_tpo_ic_init(dsi_config, pipe); /*init backlight*/ mdfld_dsi_brightness_init(dsi_config, pipe); } gma_power_end(dev); } /* * Init DSI DPI encoder. * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector * return pointer of newly allocated DPI encoder, NULL on error */ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, struct mdfld_dsi_connector *dsi_connector, const struct panel_funcs *p_funcs) { struct mdfld_dsi_dpi_output *dpi_output = NULL; struct mdfld_dsi_config *dsi_config; struct drm_connector *connector = NULL; struct drm_encoder *encoder = NULL; int pipe; u32 data; int ret; pipe = dsi_connector->pipe; if (mdfld_get_panel_type(dev, pipe) != TC35876X) { dsi_config = mdfld_dsi_get_config(dsi_connector); /* panel hard-reset */ if (p_funcs->reset) { ret = p_funcs->reset(pipe); if (ret) { DRM_ERROR("Panel %d hard-reset failed\n", pipe); return NULL; } } /* panel drvIC init */ if (p_funcs->drv_ic_init) p_funcs->drv_ic_init(dsi_config, pipe); /* panel power mode detect */ ret = mdfld_dsi_get_power_mode(dsi_config, &data, false); if (ret) { DRM_ERROR("Panel %d get power mode failed\n", pipe); dsi_connector->status = connector_status_disconnected; } else { DRM_INFO("pipe %d power mode 0x%x\n", pipe, data); dsi_connector->status = connector_status_connected; } } dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL); if (!dpi_output) { DRM_ERROR("No memory\n"); return NULL; } if (dsi_connector->pipe) dpi_output->panel_on = 0; else dpi_output->panel_on = 0; dpi_output->dev = dev; if (mdfld_get_panel_type(dev, pipe) != TC35876X) dpi_output->p_funcs = p_funcs; dpi_output->first_boot = 1; /*get fixed mode*/ dsi_config = mdfld_dsi_get_config(dsi_connector); /*create drm encoder object*/ connector = &dsi_connector->base.base; encoder = &dpi_output->base.base.base; drm_encoder_init(dev, encoder, p_funcs->encoder_funcs, DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); /*attach to given connector*/ drm_mode_connector_attach_encoder(connector, encoder); /*set possible crtcs and clones*/ if (dsi_connector->pipe) { encoder->possible_crtcs = (1 << 2); encoder->possible_clones = (1 << 1); } else { encoder->possible_crtcs = (1 << 0); encoder->possible_clones = (1 << 0); } dsi_connector->base.encoder = &dpi_output->base.base; return &dpi_output->base; }
gpl-2.0
vtss/linux-stable
fs/ext4/xattr_trusted.c
8591
1523
/* * linux/fs/ext4/xattr_trusted.c * Handler for trusted extended attributes. * * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org> */ #include <linux/string.h> #include <linux/capability.h> #include <linux/fs.h> #include "ext4_jbd2.h" #include "ext4.h" #include "xattr.h" static size_t ext4_xattr_trusted_list(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN; const size_t total_len = prefix_len + name_len + 1; if (!capable(CAP_SYS_ADMIN)) return 0; if (list && total_len <= list_size) { memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len); memcpy(list+prefix_len, name, name_len); list[prefix_len + name_len] = '\0'; } return total_len; } static int ext4_xattr_trusted_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { if (strcmp(name, "") == 0) return -EINVAL; return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_TRUSTED, name, buffer, size); } static int ext4_xattr_trusted_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { if (strcmp(name, "") == 0) return -EINVAL; return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_TRUSTED, name, value, size, flags); } const struct xattr_handler ext4_xattr_trusted_handler = { .prefix = XATTR_TRUSTED_PREFIX, .list = ext4_xattr_trusted_list, .get = ext4_xattr_trusted_get, .set = ext4_xattr_trusted_set, };
gpl-2.0
marshallshen/android-os
scripts/dtc/fstree.c
11151
2270
/* * (C) Copyright David Gibson <dwg@au1.ibm.com>, IBM Corporation. 2005. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ #include "dtc.h" #include <dirent.h> #include <sys/stat.h> static struct node *read_fstree(const char *dirname) { DIR *d; struct dirent *de; struct stat st; struct node *tree; d = opendir(dirname); if (!d) die("Couldn't opendir() \"%s\": %s\n", dirname, strerror(errno)); tree = build_node(NULL, NULL); while ((de = readdir(d)) != NULL) { char *tmpnam; if (streq(de->d_name, ".") || streq(de->d_name, "..")) continue; tmpnam = join_path(dirname, de->d_name); if (lstat(tmpnam, &st) < 0) die("stat(%s): %s\n", tmpnam, strerror(errno)); if (S_ISREG(st.st_mode)) { struct property *prop; FILE *pfile; pfile = fopen(tmpnam, "r"); if (! pfile) { fprintf(stderr, "WARNING: Cannot open %s: %s\n", tmpnam, strerror(errno)); } else { prop = build_property(xstrdup(de->d_name), data_copy_file(pfile, st.st_size)); add_property(tree, prop); fclose(pfile); } } else if (S_ISDIR(st.st_mode)) { struct node *newchild; newchild = read_fstree(tmpnam); newchild = name_node(newchild, xstrdup(de->d_name)); add_child(tree, newchild); } free(tmpnam); } closedir(d); return tree; } struct boot_info *dt_from_fs(const char *dirname) { struct node *tree; tree = read_fstree(dirname); tree = name_node(tree, ""); return build_boot_info(NULL, tree, guess_boot_cpuid(tree)); }
gpl-2.0
javandoc/Build-FFmpeg
libavfilter/bbox.c
144
2142
/* * Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "bbox.h" int ff_calculate_bounding_box(FFBoundingBox *bbox, const uint8_t *data, int linesize, int w, int h, int min_val) { int x, y; int start_x; int start_y; int end_x; int end_y; const uint8_t *line; /* left bound */ for (start_x = 0; start_x < w; start_x++) for (y = 0; y < h; y++) if ((data[y * linesize + start_x] > min_val)) goto outl; outl: if (start_x == w) /* no points found */ return 0; /* right bound */ for (end_x = w - 1; end_x >= start_x; end_x--) for (y = 0; y < h; y++) if ((data[y * linesize + end_x] > min_val)) goto outr; outr: /* top bound */ line = data; for (start_y = 0; start_y < h; start_y++) { for (x = 0; x < w; x++) if (line[x] > min_val) goto outt; line += linesize; } outt: /* bottom bound */ line = data + (h-1)*linesize; for (end_y = h - 1; end_y >= start_y; end_y--) { for (x = 0; x < w; x++) if (line[x] > min_val) goto outb; line -= linesize; } outb: bbox->x1 = start_x; bbox->y1 = start_y; bbox->x2 = end_x; bbox->y2 = end_y; return 1; }
gpl-2.0
romracer/atrix-kernel
net/tipc/net.c
656
9981
/* * net/tipc/net.c: TIPC network routing code * * Copyright (c) 1995-2006, Ericsson AB * Copyright (c) 2005, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "bearer.h" #include "net.h" #include "zone.h" #include "addr.h" #include "name_table.h" #include "name_distr.h" #include "subscr.h" #include "link.h" #include "msg.h" #include "port.h" #include "bcast.h" #include "discover.h" #include "config.h" /* * The TIPC locking policy is designed to ensure a very fine locking * granularity, permitting complete parallel access to individual * port and node/link instances. The code consists of three major * locking domains, each protected with their own disjunct set of locks. * * 1: The routing hierarchy. * Comprises the structures 'zone', 'cluster', 'node', 'link' * and 'bearer'. The whole hierarchy is protected by a big * read/write lock, tipc_net_lock, to enssure that nothing is added * or removed while code is accessing any of these structures. * This layer must not be called from the two others while they * hold any of their own locks. * Neither must it itself do any upcalls to the other two before * it has released tipc_net_lock and other protective locks. * * Within the tipc_net_lock domain there are two sub-domains;'node' and * 'bearer', where local write operations are permitted, * provided that those are protected by individual spin_locks * per instance. Code holding tipc_net_lock(read) and a node spin_lock * is permitted to poke around in both the node itself and its * subordinate links. I.e, it can update link counters and queues, * change link state, send protocol messages, and alter the * "active_links" array in the node; but it can _not_ remove a link * or a node from the overall structure. * Correspondingly, individual bearers may change status within a * tipc_net_lock(read), protected by an individual spin_lock ber bearer * instance, but it needs tipc_net_lock(write) to remove/add any bearers. * * * 2: The transport level of the protocol. * This consists of the structures port, (and its user level * representations, such as user_port and tipc_sock), reference and * tipc_user (port.c, reg.c, socket.c). * * This layer has four different locks: * - The tipc_port spin_lock. This is protecting each port instance * from parallel data access and removal. Since we can not place * this lock in the port itself, it has been placed in the * corresponding reference table entry, which has the same life * cycle as the module. This entry is difficult to access from * outside the TIPC core, however, so a pointer to the lock has * been added in the port instance, -to be used for unlocking * only. * - A read/write lock to protect the reference table itself (teg.c). * (Nobody is using read-only access to this, so it can just as * well be changed to a spin_lock) * - A spin lock to protect the registry of kernel/driver users (reg.c) * - A global spin_lock (tipc_port_lock), which only task is to ensure * consistency where more than one port is involved in an operation, * i.e., whe a port is part of a linked list of ports. * There are two such lists; 'port_list', which is used for management, * and 'wait_list', which is used to queue ports during congestion. * * 3: The name table (name_table.c, name_distr.c, subscription.c) * - There is one big read/write-lock (tipc_nametbl_lock) protecting the * overall name table structure. Nothing must be added/removed to * this structure without holding write access to it. * - There is one local spin_lock per sub_sequence, which can be seen * as a sub-domain to the tipc_nametbl_lock domain. It is used only * for translation operations, and is needed because a translation * steps the root of the 'publication' linked list between each lookup. * This is always used within the scope of a tipc_nametbl_lock(read). * - A local spin_lock protecting the queue of subscriber events. */ DEFINE_RWLOCK(tipc_net_lock); struct network tipc_net = { NULL }; struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) { return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref); } u32 tipc_net_select_router(u32 addr, u32 ref) { return tipc_zone_select_router(tipc_net.zones[tipc_zone(addr)], addr, ref); } #if 0 u32 tipc_net_next_node(u32 a) { if (tipc_net.zones[tipc_zone(a)]) return tipc_zone_next_node(a); return 0; } #endif void tipc_net_remove_as_router(u32 router) { u32 z_num; for (z_num = 1; z_num <= tipc_max_zones; z_num++) { if (!tipc_net.zones[z_num]) continue; tipc_zone_remove_as_router(tipc_net.zones[z_num], router); } } void tipc_net_send_external_routes(u32 dest) { u32 z_num; for (z_num = 1; z_num <= tipc_max_zones; z_num++) { if (tipc_net.zones[z_num]) tipc_zone_send_external_routes(tipc_net.zones[z_num], dest); } } static int net_init(void) { memset(&tipc_net, 0, sizeof(tipc_net)); tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC); if (!tipc_net.zones) { return -ENOMEM; } return 0; } static void net_stop(void) { u32 z_num; if (!tipc_net.zones) return; for (z_num = 1; z_num <= tipc_max_zones; z_num++) { tipc_zone_delete(tipc_net.zones[z_num]); } kfree(tipc_net.zones); tipc_net.zones = NULL; } static void net_route_named_msg(struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); u32 dnode; u32 dport; if (!msg_named(msg)) { msg_dbg(msg, "tipc_net->drop_nam:"); buf_discard(buf); return; } dnode = addr_domain(msg_lookup_scope(msg)); dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode); dbg("tipc_net->lookup<%u,%u>-><%u,%x>\n", msg_nametype(msg), msg_nameinst(msg), dport, dnode); if (dport) { msg_set_destnode(msg, dnode); msg_set_destport(msg, dport); tipc_net_route_msg(buf); return; } msg_dbg(msg, "tipc_net->rej:NO NAME: "); tipc_reject_msg(buf, TIPC_ERR_NO_NAME); } void tipc_net_route_msg(struct sk_buff *buf) { struct tipc_msg *msg; u32 dnode; if (!buf) return; msg = buf_msg(buf); msg_incr_reroute_cnt(msg); if (msg_reroute_cnt(msg) > 6) { if (msg_errcode(msg)) { msg_dbg(msg, "NET>DISC>:"); buf_discard(buf); } else { msg_dbg(msg, "NET>REJ>:"); tipc_reject_msg(buf, msg_destport(msg) ? TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME); } return; } msg_dbg(msg, "tipc_net->rout: "); /* Handle message for this node */ dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); if (in_scope(dnode, tipc_own_addr)) { if (msg_isdata(msg)) { if (msg_mcast(msg)) tipc_port_recv_mcast(buf, NULL); else if (msg_destport(msg)) tipc_port_recv_msg(buf); else net_route_named_msg(buf); return; } switch (msg_user(msg)) { case ROUTE_DISTRIBUTOR: tipc_cltr_recv_routing_table(buf); break; case NAME_DISTRIBUTOR: tipc_named_recv(buf); break; case CONN_MANAGER: tipc_port_recv_proto_msg(buf); break; default: msg_dbg(msg,"DROP/NET/<REC<"); buf_discard(buf); } return; } /* Handle message for another node */ msg_dbg(msg, "NET>SEND>: "); tipc_link_send(buf, dnode, msg_link_selector(msg)); } int tipc_net_start(u32 addr) { char addr_string[16]; int res; if (tipc_mode != TIPC_NODE_MODE) return -ENOPROTOOPT; tipc_subscr_stop(); tipc_cfg_stop(); tipc_own_addr = addr; tipc_mode = TIPC_NET_MODE; tipc_named_reinit(); tipc_port_reinit(); if ((res = tipc_bearer_init()) || (res = net_init()) || (res = tipc_cltr_init()) || (res = tipc_bclink_init())) { return res; } tipc_k_signal((Handler)tipc_subscr_start, 0); tipc_k_signal((Handler)tipc_cfg_init, 0); info("Started in network mode\n"); info("Own node address %s, network identity %u\n", addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); return 0; } void tipc_net_stop(void) { if (tipc_mode != TIPC_NET_MODE) return; write_lock_bh(&tipc_net_lock); tipc_bearer_stop(); tipc_mode = TIPC_NODE_MODE; tipc_bclink_stop(); net_stop(); write_unlock_bh(&tipc_net_lock); info("Left network mode \n"); }
gpl-2.0
morogoku/MoRoKernel-S7-v2
drivers/hwmon/ltc4245.c
1680
15717
/* * Driver for Linear Technology LTC4245 I2C Multiple Supply Hot Swap Controller * * Copyright (C) 2008 Ira W. Snyder <iws@ovro.caltech.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This driver is based on the ds1621 and ina209 drivers. * * Datasheet: * http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1140,P19392,D13517 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/i2c/ltc4245.h> /* Here are names of the chip's registers (a.k.a. commands) */ enum ltc4245_cmd { LTC4245_STATUS = 0x00, /* readonly */ LTC4245_ALERT = 0x01, LTC4245_CONTROL = 0x02, LTC4245_ON = 0x03, LTC4245_FAULT1 = 0x04, LTC4245_FAULT2 = 0x05, LTC4245_GPIO = 0x06, LTC4245_ADCADR = 0x07, LTC4245_12VIN = 0x10, LTC4245_12VSENSE = 0x11, LTC4245_12VOUT = 0x12, LTC4245_5VIN = 0x13, LTC4245_5VSENSE = 0x14, LTC4245_5VOUT = 0x15, LTC4245_3VIN = 0x16, LTC4245_3VSENSE = 0x17, LTC4245_3VOUT = 0x18, LTC4245_VEEIN = 0x19, LTC4245_VEESENSE = 0x1a, LTC4245_VEEOUT = 0x1b, LTC4245_GPIOADC = 0x1c, }; struct ltc4245_data { struct i2c_client *client; const struct attribute_group *groups[3]; struct mutex update_lock; bool valid; unsigned long last_updated; /* in jiffies */ /* Control registers */ u8 cregs[0x08]; /* Voltage registers */ u8 vregs[0x0d]; /* GPIO ADC registers */ bool use_extra_gpios; int gpios[3]; }; /* * Update the readings from the GPIO pins. If the driver has been configured to * sample all GPIO's as analog voltages, a round-robin sampling method is used. * Otherwise, only the configured GPIO pin is sampled. * * LOCKING: must hold data->update_lock */ static void ltc4245_update_gpios(struct device *dev) { struct ltc4245_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; u8 gpio_curr, gpio_next, gpio_reg; int i; /* no extra gpio support, we're basically done */ if (!data->use_extra_gpios) { data->gpios[0] = data->vregs[LTC4245_GPIOADC - 0x10]; return; } /* * If the last reading was too long ago, then we mark all old GPIO * readings as stale by setting them to -EAGAIN */ if (time_after(jiffies, data->last_updated + 5 * HZ)) { for (i = 0; i < ARRAY_SIZE(data->gpios); i++) data->gpios[i] = -EAGAIN; } /* * Get the current GPIO pin * * The datasheet calls these GPIO[1-3], but we'll calculate the zero * based array index instead, and call them GPIO[0-2]. This is much * easier to think about. */ gpio_curr = (data->cregs[LTC4245_GPIO] & 0xc0) >> 6; if (gpio_curr > 0) gpio_curr -= 1; /* Read the GPIO voltage from the GPIOADC register */ data->gpios[gpio_curr] = data->vregs[LTC4245_GPIOADC - 0x10]; /* Find the next GPIO pin to read */ gpio_next = (gpio_curr + 1) % ARRAY_SIZE(data->gpios); /* * Calculate the correct setting for the GPIO register so it will * sample the next GPIO pin */ gpio_reg = (data->cregs[LTC4245_GPIO] & 0x3f) | ((gpio_next + 1) << 6); /* Update the GPIO register */ i2c_smbus_write_byte_data(client, LTC4245_GPIO, gpio_reg); /* Update saved data */ data->cregs[LTC4245_GPIO] = gpio_reg; } static struct ltc4245_data *ltc4245_update_device(struct device *dev) { struct ltc4245_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; s32 val; int i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { /* Read control registers -- 0x00 to 0x07 */ for (i = 0; i < ARRAY_SIZE(data->cregs); i++) { val = i2c_smbus_read_byte_data(client, i); if (unlikely(val < 0)) data->cregs[i] = 0; else data->cregs[i] = val; } /* Read voltage registers -- 0x10 to 0x1c */ for (i = 0; i < ARRAY_SIZE(data->vregs); i++) { val = i2c_smbus_read_byte_data(client, i+0x10); if (unlikely(val < 0)) data->vregs[i] = 0; else data->vregs[i] = val; } /* Update GPIO readings */ ltc4245_update_gpios(dev); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* Return the voltage from the given register in millivolts */ static int ltc4245_get_voltage(struct device *dev, u8 reg) { struct ltc4245_data *data = ltc4245_update_device(dev); const u8 regval = data->vregs[reg - 0x10]; u32 voltage = 0; switch (reg) { case LTC4245_12VIN: case LTC4245_12VOUT: voltage = regval * 55; break; case LTC4245_5VIN: case LTC4245_5VOUT: voltage = regval * 22; break; case LTC4245_3VIN: case LTC4245_3VOUT: voltage = regval * 15; break; case LTC4245_VEEIN: case LTC4245_VEEOUT: voltage = regval * -55; break; case LTC4245_GPIOADC: voltage = regval * 10; break; default: /* If we get here, the developer messed up */ WARN_ON_ONCE(1); break; } return voltage; } /* Return the current in the given sense register in milliAmperes */ static unsigned int ltc4245_get_current(struct device *dev, u8 reg) { struct ltc4245_data *data = ltc4245_update_device(dev); const u8 regval = data->vregs[reg - 0x10]; unsigned int voltage; unsigned int curr; /* * The strange looking conversions that follow are fixed-point * math, since we cannot do floating point in the kernel. * * Step 1: convert sense register to microVolts * Step 2: convert voltage to milliAmperes * * If you play around with the V=IR equation, you come up with * the following: X uV / Y mOhm == Z mA * * With the resistors that are fractions of a milliOhm, we multiply * the voltage and resistance by 10, to shift the decimal point. * Now we can use the normal division operator again. */ switch (reg) { case LTC4245_12VSENSE: voltage = regval * 250; /* voltage in uV */ curr = voltage / 50; /* sense resistor 50 mOhm */ break; case LTC4245_5VSENSE: voltage = regval * 125; /* voltage in uV */ curr = (voltage * 10) / 35; /* sense resistor 3.5 mOhm */ break; case LTC4245_3VSENSE: voltage = regval * 125; /* voltage in uV */ curr = (voltage * 10) / 25; /* sense resistor 2.5 mOhm */ break; case LTC4245_VEESENSE: voltage = regval * 250; /* voltage in uV */ curr = voltage / 100; /* sense resistor 100 mOhm */ break; default: /* If we get here, the developer messed up */ WARN_ON_ONCE(1); curr = 0; break; } return curr; } static ssize_t ltc4245_show_voltage(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); const int voltage = ltc4245_get_voltage(dev, attr->index); return snprintf(buf, PAGE_SIZE, "%d\n", voltage); } static ssize_t ltc4245_show_current(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); const unsigned int curr = ltc4245_get_current(dev, attr->index); return snprintf(buf, PAGE_SIZE, "%u\n", curr); } static ssize_t ltc4245_show_power(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); const unsigned int curr = ltc4245_get_current(dev, attr->index); const int output_voltage = ltc4245_get_voltage(dev, attr->index+1); /* current in mA * voltage in mV == power in uW */ const unsigned int power = abs(output_voltage * curr); return snprintf(buf, PAGE_SIZE, "%u\n", power); } static ssize_t ltc4245_show_alarm(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da); struct ltc4245_data *data = ltc4245_update_device(dev); const u8 reg = data->cregs[attr->index]; const u32 mask = attr->nr; return snprintf(buf, PAGE_SIZE, "%u\n", (reg & mask) ? 1 : 0); } static ssize_t ltc4245_show_gpio(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ltc4245_data *data = ltc4245_update_device(dev); int val = data->gpios[attr->index]; /* handle stale GPIO's */ if (val < 0) return val; /* Convert to millivolts and print */ return snprintf(buf, PAGE_SIZE, "%u\n", val * 10); } /* Construct a sensor_device_attribute structure for each register */ /* Input voltages */ static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc4245_show_voltage, NULL, LTC4245_12VIN); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc4245_show_voltage, NULL, LTC4245_5VIN); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, ltc4245_show_voltage, NULL, LTC4245_3VIN); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, ltc4245_show_voltage, NULL, LTC4245_VEEIN); /* Input undervoltage alarms */ static SENSOR_DEVICE_ATTR_2(in1_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 0, LTC4245_FAULT1); static SENSOR_DEVICE_ATTR_2(in2_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 1, LTC4245_FAULT1); static SENSOR_DEVICE_ATTR_2(in3_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 2, LTC4245_FAULT1); static SENSOR_DEVICE_ATTR_2(in4_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 3, LTC4245_FAULT1); /* Currents (via sense resistor) */ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4245_show_current, NULL, LTC4245_12VSENSE); static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, ltc4245_show_current, NULL, LTC4245_5VSENSE); static SENSOR_DEVICE_ATTR(curr3_input, S_IRUGO, ltc4245_show_current, NULL, LTC4245_3VSENSE); static SENSOR_DEVICE_ATTR(curr4_input, S_IRUGO, ltc4245_show_current, NULL, LTC4245_VEESENSE); /* Overcurrent alarms */ static SENSOR_DEVICE_ATTR_2(curr1_max_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 4, LTC4245_FAULT1); static SENSOR_DEVICE_ATTR_2(curr2_max_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 5, LTC4245_FAULT1); static SENSOR_DEVICE_ATTR_2(curr3_max_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 6, LTC4245_FAULT1); static SENSOR_DEVICE_ATTR_2(curr4_max_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 7, LTC4245_FAULT1); /* Output voltages */ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, ltc4245_show_voltage, NULL, LTC4245_12VOUT); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, ltc4245_show_voltage, NULL, LTC4245_5VOUT); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, ltc4245_show_voltage, NULL, LTC4245_3VOUT); static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, ltc4245_show_voltage, NULL, LTC4245_VEEOUT); /* Power Bad alarms */ static SENSOR_DEVICE_ATTR_2(in5_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 0, LTC4245_FAULT2); static SENSOR_DEVICE_ATTR_2(in6_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 1, LTC4245_FAULT2); static SENSOR_DEVICE_ATTR_2(in7_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 2, LTC4245_FAULT2); static SENSOR_DEVICE_ATTR_2(in8_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL, 1 << 3, LTC4245_FAULT2); /* GPIO voltages */ static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, ltc4245_show_gpio, NULL, 0); static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, ltc4245_show_gpio, NULL, 1); static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, ltc4245_show_gpio, NULL, 2); /* Power Consumption (virtual) */ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ltc4245_show_power, NULL, LTC4245_12VSENSE); static SENSOR_DEVICE_ATTR(power2_input, S_IRUGO, ltc4245_show_power, NULL, LTC4245_5VSENSE); static SENSOR_DEVICE_ATTR(power3_input, S_IRUGO, ltc4245_show_power, NULL, LTC4245_3VSENSE); static SENSOR_DEVICE_ATTR(power4_input, S_IRUGO, ltc4245_show_power, NULL, LTC4245_VEESENSE); /* * Finally, construct an array of pointers to members of the above objects, * as required for sysfs_create_group() */ static struct attribute *ltc4245_std_attributes[] = { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in1_min_alarm.dev_attr.attr, &sensor_dev_attr_in2_min_alarm.dev_attr.attr, &sensor_dev_attr_in3_min_alarm.dev_attr.attr, &sensor_dev_attr_in4_min_alarm.dev_attr.attr, &sensor_dev_attr_curr1_input.dev_attr.attr, &sensor_dev_attr_curr2_input.dev_attr.attr, &sensor_dev_attr_curr3_input.dev_attr.attr, &sensor_dev_attr_curr4_input.dev_attr.attr, &sensor_dev_attr_curr1_max_alarm.dev_attr.attr, &sensor_dev_attr_curr2_max_alarm.dev_attr.attr, &sensor_dev_attr_curr3_max_alarm.dev_attr.attr, &sensor_dev_attr_curr4_max_alarm.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in5_min_alarm.dev_attr.attr, &sensor_dev_attr_in6_min_alarm.dev_attr.attr, &sensor_dev_attr_in7_min_alarm.dev_attr.attr, &sensor_dev_attr_in8_min_alarm.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, &sensor_dev_attr_power2_input.dev_attr.attr, &sensor_dev_attr_power3_input.dev_attr.attr, &sensor_dev_attr_power4_input.dev_attr.attr, NULL, }; static struct attribute *ltc4245_gpio_attributes[] = { &sensor_dev_attr_in10_input.dev_attr.attr, &sensor_dev_attr_in11_input.dev_attr.attr, NULL, }; static const struct attribute_group ltc4245_std_group = { .attrs = ltc4245_std_attributes, }; static const struct attribute_group ltc4245_gpio_group = { .attrs = ltc4245_gpio_attributes, }; static void ltc4245_sysfs_add_groups(struct ltc4245_data *data) { /* standard sysfs attributes */ data->groups[0] = &ltc4245_std_group; /* if we're using the extra gpio support, register it's attributes */ if (data->use_extra_gpios) data->groups[1] = &ltc4245_gpio_group; } static bool ltc4245_use_extra_gpios(struct i2c_client *client) { struct ltc4245_platform_data *pdata = dev_get_platdata(&client->dev); struct device_node *np = client->dev.of_node; /* prefer platform data */ if (pdata) return pdata->use_extra_gpios; /* fallback on OF */ if (of_find_property(np, "ltc4245,use-extra-gpios", NULL)) return true; return false; } static int ltc4245_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; struct ltc4245_data *data; struct device *hwmon_dev; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->client = client; mutex_init(&data->update_lock); data->use_extra_gpios = ltc4245_use_extra_gpios(client); /* Initialize the LTC4245 chip */ i2c_smbus_write_byte_data(client, LTC4245_FAULT1, 0x00); i2c_smbus_write_byte_data(client, LTC4245_FAULT2, 0x00); /* Add sysfs hooks */ ltc4245_sysfs_add_groups(data); hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev, client->name, data, data->groups); return PTR_ERR_OR_ZERO(hwmon_dev); } static const struct i2c_device_id ltc4245_id[] = { { "ltc4245", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ltc4245_id); /* This is the driver that will be inserted */ static struct i2c_driver ltc4245_driver = { .driver = { .name = "ltc4245", }, .probe = ltc4245_probe, .id_table = ltc4245_id, }; module_i2c_driver(ltc4245_driver); MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); MODULE_DESCRIPTION("LTC4245 driver"); MODULE_LICENSE("GPL");
gpl-2.0
winpih/Riderism-2.6.35
arch/arm/kernel/fiq.c
1680
4013
/* * linux/arch/arm/kernel/fiq.c * * Copyright (C) 1998 Russell King * Copyright (C) 1998, 1999 Phil Blundell * * FIQ support written by Philip Blundell <philb@gnu.org>, 1998. * * FIQ support re-written by Russell King to be more generic * * We now properly support a method by which the FIQ handlers can * be stacked onto the vector. We still do not support sharing * the FIQ vector itself. * * Operation is as follows: * 1. Owner A claims FIQ: * - default_fiq relinquishes control. * 2. Owner A: * - inserts code. * - sets any registers, * - enables FIQ. * 3. Owner B claims FIQ: * - if owner A has a relinquish function. * - disable FIQs. * - saves any registers. * - returns zero. * 4. Owner B: * - inserts code. * - sets any registers, * - enables FIQ. * 5. Owner B releases FIQ: * - Owner A is asked to reacquire FIQ: * - inserts code. * - restores saved registers. * - enables FIQ. * 6. Goto 3 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/seq_file.h> #include <asm/cacheflush.h> #include <asm/fiq.h> #include <asm/irq.h> #include <asm/system.h> static unsigned long no_fiq_insn; /* Default reacquire function * - we always relinquish FIQ control * - we always reacquire FIQ control */ static int fiq_def_op(void *ref, int relinquish) { if (!relinquish) set_fiq_handler(&no_fiq_insn, sizeof(no_fiq_insn)); return 0; } static struct fiq_handler default_owner = { .name = "default", .fiq_op = fiq_def_op, }; static struct fiq_handler *current_fiq = &default_owner; int show_fiq_list(struct seq_file *p, void *v) { if (current_fiq != &default_owner) seq_printf(p, "FIQ: %s\n", current_fiq->name); return 0; } void set_fiq_handler(void *start, unsigned int length) { memcpy((void *)0xffff001c, start, length); flush_icache_range(0xffff001c, 0xffff001c + length); if (!vectors_high()) flush_icache_range(0x1c, 0x1c + length); } /* * Taking an interrupt in FIQ mode is death, so both these functions * disable irqs for the duration. Note - these functions are almost * entirely coded in assembly. */ void __naked set_fiq_regs(struct pt_regs *regs) { register unsigned long tmp; asm volatile ( "mov ip, sp\n\ stmfd sp!, {fp, ip, lr, pc}\n\ sub fp, ip, #4\n\ mrs %0, cpsr\n\ msr cpsr_c, %2 @ select FIQ mode\n\ mov r0, r0\n\ ldmia %1, {r8 - r14}\n\ msr cpsr_c, %0 @ return to SVC mode\n\ mov r0, r0\n\ ldmfd sp, {fp, sp, pc}" : "=&r" (tmp) : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE)); } void __naked get_fiq_regs(struct pt_regs *regs) { register unsigned long tmp; asm volatile ( "mov ip, sp\n\ stmfd sp!, {fp, ip, lr, pc}\n\ sub fp, ip, #4\n\ mrs %0, cpsr\n\ msr cpsr_c, %2 @ select FIQ mode\n\ mov r0, r0\n\ stmia %1, {r8 - r14}\n\ msr cpsr_c, %0 @ return to SVC mode\n\ mov r0, r0\n\ ldmfd sp, {fp, sp, pc}" : "=&r" (tmp) : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE)); } int claim_fiq(struct fiq_handler *f) { int ret = 0; if (current_fiq) { ret = -EBUSY; if (current_fiq->fiq_op != NULL) ret = current_fiq->fiq_op(current_fiq->dev_id, 1); } if (!ret) { f->next = current_fiq; current_fiq = f; } return ret; } void release_fiq(struct fiq_handler *f) { if (current_fiq != f) { printk(KERN_ERR "%s FIQ trying to release %s FIQ\n", f->name, current_fiq->name); dump_stack(); return; } do current_fiq = current_fiq->next; while (current_fiq->fiq_op(current_fiq->dev_id, 0)); } void enable_fiq(int fiq) { enable_irq(fiq + FIQ_START); } void disable_fiq(int fiq) { disable_irq(fiq + FIQ_START); } EXPORT_SYMBOL(set_fiq_handler); EXPORT_SYMBOL(set_fiq_regs); EXPORT_SYMBOL(get_fiq_regs); EXPORT_SYMBOL(claim_fiq); EXPORT_SYMBOL(release_fiq); EXPORT_SYMBOL(enable_fiq); EXPORT_SYMBOL(disable_fiq); void __init init_FIQ(void) { no_fiq_insn = *(unsigned long *)0xffff001c; }
gpl-2.0
shakalaca/ASUS_ZenFone_ZE551KL
kernel/arch/powerpc/mm/pgtable_64.c
1936
11366
/* * This file contains ioremap and related functions for 64-bit machines. * * Derived from arch/ppc64/mm/init.c * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <engebret@us.ibm.com> * Rework for PPC64 port. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/export.h> #include <linux/types.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/slab.h> #include <asm/pgalloc.h> #include <asm/page.h> #include <asm/prom.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/tlb.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/sections.h> #include <asm/firmware.h> #include "mmu_decl.h" /* Some sanity checking */ #if TASK_SIZE_USER64 > PGTABLE_RANGE #error TASK_SIZE_USER64 exceeds pagetable range #endif #ifdef CONFIG_PPC_STD_MMU_64 #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) #error TASK_SIZE_USER64 exceeds user VSID range #endif #endif unsigned long ioremap_bot = IOREMAP_BASE; #ifdef CONFIG_PPC_MMU_NOHASH static void *early_alloc_pgtable(unsigned long size) { void *pt; if (init_bootmem_done) pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS)); else pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS))); memset(pt, 0, size); return pt; } #endif /* CONFIG_PPC_MMU_NOHASH */ /* * map_kernel_page currently only called by __ioremap * map_kernel_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it */ int map_kernel_page(unsigned long ea, unsigned long pa, int flags) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; if (slab_is_available()) { pgdp = pgd_offset_k(ea); pudp = pud_alloc(&init_mm, pgdp, ea); if (!pudp) return -ENOMEM; pmdp = pmd_alloc(&init_mm, pudp, ea); if (!pmdp) return -ENOMEM; ptep = pte_alloc_kernel(pmdp, ea); if (!ptep) return -ENOMEM; set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); } else { #ifdef CONFIG_PPC_MMU_NOHASH /* Warning ! This will blow up if bootmem is not initialized * which our ppc64 code is keen to do that, we'll need to * fix it and/or be more careful */ pgdp = pgd_offset_k(ea); #ifdef PUD_TABLE_SIZE if (pgd_none(*pgdp)) { pudp = early_alloc_pgtable(PUD_TABLE_SIZE); BUG_ON(pudp == NULL); pgd_populate(&init_mm, pgdp, pudp); } #endif /* PUD_TABLE_SIZE */ pudp = pud_offset(pgdp, ea); if (pud_none(*pudp)) { pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); BUG_ON(pmdp == NULL); pud_populate(&init_mm, pudp, pmdp); } pmdp = pmd_offset(pudp, ea); if (!pmd_present(*pmdp)) { ptep = early_alloc_pgtable(PAGE_SIZE); BUG_ON(ptep == NULL); pmd_populate_kernel(&init_mm, pmdp, ptep); } ptep = pte_offset_kernel(pmdp, ea); set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); #else /* CONFIG_PPC_MMU_NOHASH */ /* * If the mm subsystem is not fully up, we cannot create a * linux page table entry for this mapping. Simply bolt an * entry in the hardware page table. * */ if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, mmu_io_psize, mmu_kernel_ssize)) { printk(KERN_ERR "Failed to do bolted mapping IO " "memory at %016lx !\n", pa); return -ENOMEM; } #endif /* !CONFIG_PPC_MMU_NOHASH */ } return 0; } /** * __ioremap_at - Low level function to establish the page tables * for an IO mapping */ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, unsigned long flags) { unsigned long i; /* Make sure we have the base flags */ if ((flags & _PAGE_PRESENT) == 0) flags |= pgprot_val(PAGE_KERNEL); /* Non-cacheable page cannot be coherent */ if (flags & _PAGE_NO_CACHE) flags &= ~_PAGE_COHERENT; /* We don't support the 4K PFN hack with ioremap */ if (flags & _PAGE_4K_PFN) return NULL; WARN_ON(pa & ~PAGE_MASK); WARN_ON(((unsigned long)ea) & ~PAGE_MASK); WARN_ON(size & ~PAGE_MASK); for (i = 0; i < size; i += PAGE_SIZE) if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) return NULL; return (void __iomem *)ea; } /** * __iounmap_from - Low level function to tear down the page tables * for an IO mapping. This is used for mappings that * are manipulated manually, like partial unmapping of * PCI IOs or ISA space. */ void __iounmap_at(void *ea, unsigned long size) { WARN_ON(((unsigned long)ea) & ~PAGE_MASK); WARN_ON(size & ~PAGE_MASK); unmap_kernel_range((unsigned long)ea, size); } void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, void *caller) { phys_addr_t paligned; void __iomem *ret; /* * Choose an address to map it to. * Once the imalloc system is running, we use it. * Before that, we map using addresses going * up from ioremap_bot. imalloc will use * the addresses from ioremap_bot through * IMALLOC_END * */ paligned = addr & PAGE_MASK; size = PAGE_ALIGN(addr + size) - paligned; if ((size == 0) || (paligned == 0)) return NULL; if (mem_init_done) { struct vm_struct *area; area = __get_vm_area_caller(size, VM_IOREMAP, ioremap_bot, IOREMAP_END, caller); if (area == NULL) return NULL; area->phys_addr = paligned; ret = __ioremap_at(paligned, area->addr, size, flags); if (!ret) vunmap(area->addr); } else { ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); if (ret) ioremap_bot += size; } if (ret) ret += addr & ~PAGE_MASK; return ret; } void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) { return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); } void __iomem * ioremap(phys_addr_t addr, unsigned long size) { unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED; void *caller = __builtin_return_address(0); if (ppc_md.ioremap) return ppc_md.ioremap(addr, size, flags, caller); return __ioremap_caller(addr, size, flags, caller); } void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) { unsigned long flags = _PAGE_NO_CACHE; void *caller = __builtin_return_address(0); if (ppc_md.ioremap) return ppc_md.ioremap(addr, size, flags, caller); return __ioremap_caller(addr, size, flags, caller); } void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { void *caller = __builtin_return_address(0); /* writeable implies dirty for kernel addresses */ if (flags & _PAGE_RW) flags |= _PAGE_DIRTY; /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ flags &= ~(_PAGE_USER | _PAGE_EXEC); #ifdef _PAGE_BAP_SR /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format * which means that we just cleared supervisor access... oops ;-) This * restores it */ flags |= _PAGE_BAP_SR; #endif if (ppc_md.ioremap) return ppc_md.ioremap(addr, size, flags, caller); return __ioremap_caller(addr, size, flags, caller); } /* * Unmap an IO region and remove it from imalloc'd list. * Access to IO memory should be serialized by driver. */ void __iounmap(volatile void __iomem *token) { void *addr; if (!mem_init_done) return; addr = (void *) ((unsigned long __force) PCI_FIX_ADDR(token) & PAGE_MASK); if ((unsigned long)addr < ioremap_bot) { printk(KERN_WARNING "Attempt to iounmap early bolted mapping" " at 0x%p\n", addr); return; } vunmap(addr); } void iounmap(volatile void __iomem *token) { if (ppc_md.iounmap) ppc_md.iounmap(token); else __iounmap(token); } EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(ioremap_wc); EXPORT_SYMBOL(ioremap_prot); EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__ioremap_at); EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(__iounmap); EXPORT_SYMBOL(__iounmap_at); #ifdef CONFIG_PPC_64K_PAGES static pte_t *get_from_cache(struct mm_struct *mm) { void *pte_frag, *ret; spin_lock(&mm->page_table_lock); ret = mm->context.pte_frag; if (ret) { pte_frag = ret + PTE_FRAG_SIZE; /* * If we have taken up all the fragments mark PTE page NULL */ if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) pte_frag = NULL; mm->context.pte_frag = pte_frag; } spin_unlock(&mm->page_table_lock); return (pte_t *)ret; } static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) { void *ret = NULL; struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO); if (!page) return NULL; ret = page_address(page); spin_lock(&mm->page_table_lock); /* * If we find pgtable_page set, we return * the allocated page with single fragement * count. */ if (likely(!mm->context.pte_frag)) { atomic_set(&page->_count, PTE_FRAG_NR); mm->context.pte_frag = ret + PTE_FRAG_SIZE; } spin_unlock(&mm->page_table_lock); if (!kernel) pgtable_page_ctor(page); return (pte_t *)ret; } pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) { pte_t *pte; pte = get_from_cache(mm); if (pte) return pte; return __alloc_for_cache(mm, kernel); } void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel) { struct page *page = virt_to_page(table); if (put_page_testzero(page)) { if (!kernel) pgtable_page_dtor(page); free_hot_cold_page(page, 0); } } #ifdef CONFIG_SMP static void page_table_free_rcu(void *table) { struct page *page = virt_to_page(table); if (put_page_testzero(page)) { pgtable_page_dtor(page); free_hot_cold_page(page, 0); } } void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { unsigned long pgf = (unsigned long)table; BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); pgf |= shift; tlb_remove_table(tlb, (void *)pgf); } void __tlb_remove_table(void *_table) { void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; if (!shift) /* PTE page needs special handling */ page_table_free_rcu(table); else { BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); kmem_cache_free(PGT_CACHE(shift), table); } } #else void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { if (!shift) { /* PTE page needs special handling */ struct page *page = virt_to_page(table); if (put_page_testzero(page)) { pgtable_page_dtor(page); free_hot_cold_page(page, 0); } } else { BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); kmem_cache_free(PGT_CACHE(shift), table); } } #endif #endif /* CONFIG_PPC_64K_PAGES */
gpl-2.0
cjdoucette/XIA-for-Linux
arch/mips/pmcs-msp71xx/msp_irq.c
1936
4229
/* * IRQ vector handles * * Copyright (C) 1995, 1996, 1997, 2003 by Ralf Baechle * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/time.h> #include <asm/irq_cpu.h> #include <asm/setup.h> #include <msp_int.h> /* SLP bases systems */ extern void msp_slp_irq_init(void); extern void msp_slp_irq_dispatch(void); /* CIC based systems */ extern void msp_cic_irq_init(void); extern void msp_cic_irq_dispatch(void); /* VSMP support init */ extern void msp_vsmp_int_init(void); /* vectored interrupt implementation */ /* SW0/1 interrupts are used for SMP */ static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); } static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); } static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); } static inline void usb_int_dispatch(void) { do_IRQ(MSP_INT_USB); } static inline void sec_int_dispatch(void) { do_IRQ(MSP_INT_SEC); } /* * The PMC-Sierra MSP interrupts are arranged in a 3 level cascaded * hierarchical system. The first level are the direct MIPS interrupts * and are assigned the interrupt range 0-7. The second level is the SLM * interrupt controller and is assigned the range 8-39. The third level * comprises the Peripherial block, the PCI block, the PCI MSI block and * the SLP. The PCI interrupts and the SLP errors are handled by the * relevant subsystems so the core interrupt code needs only concern * itself with the Peripheral block. These are assigned interrupts in * the range 40-71. */ asmlinkage void plat_irq_dispatch(void) { u32 pending; pending = read_c0_status() & read_c0_cause(); /* * jump to the correct interrupt routine * These are arranged in priority order and the timer * comes first! */ #ifdef CONFIG_IRQ_MSP_CIC /* break out the CIC stuff for now */ if (pending & C_IRQ4) /* do the peripherals first, that's the timer */ msp_cic_irq_dispatch(); else if (pending & C_IRQ0) do_IRQ(MSP_INT_MAC0); else if (pending & C_IRQ1) do_IRQ(MSP_INT_MAC1); else if (pending & C_IRQ2) do_IRQ(MSP_INT_USB); else if (pending & C_IRQ3) do_IRQ(MSP_INT_SAR); else if (pending & C_IRQ5) do_IRQ(MSP_INT_SEC); #else if (pending & C_IRQ5) do_IRQ(MSP_INT_TIMER); else if (pending & C_IRQ0) do_IRQ(MSP_INT_MAC0); else if (pending & C_IRQ1) do_IRQ(MSP_INT_MAC1); else if (pending & C_IRQ3) do_IRQ(MSP_INT_VE); else if (pending & C_IRQ4) msp_slp_irq_dispatch(); #endif else if (pending & C_SW0) /* do software after hardware */ do_IRQ(MSP_INT_SW0); else if (pending & C_SW1) do_IRQ(MSP_INT_SW1); } static struct irqaction cic_cascade_msp = { .handler = no_action, .name = "MSP CIC cascade", .flags = IRQF_NO_THREAD, }; static struct irqaction per_cascade_msp = { .handler = no_action, .name = "MSP PER cascade", .flags = IRQF_NO_THREAD, }; void __init arch_init_irq(void) { /* assume we'll be using vectored interrupt mode except in UP mode*/ #ifdef CONFIG_MIPS_MT BUG_ON(!cpu_has_vint); #endif /* initialize the 1st-level CPU based interrupt controller */ mips_cpu_irq_init(); #ifdef CONFIG_IRQ_MSP_CIC msp_cic_irq_init(); #ifdef CONFIG_MIPS_MT set_vi_handler(MSP_INT_CIC, msp_cic_irq_dispatch); set_vi_handler(MSP_INT_MAC0, mac0_int_dispatch); set_vi_handler(MSP_INT_MAC1, mac1_int_dispatch); set_vi_handler(MSP_INT_SAR, mac2_int_dispatch); set_vi_handler(MSP_INT_USB, usb_int_dispatch); set_vi_handler(MSP_INT_SEC, sec_int_dispatch); #ifdef CONFIG_MIPS_MT_SMP msp_vsmp_int_init(); #endif /* CONFIG_MIPS_MT_SMP */ #endif /* CONFIG_MIPS_MT */ /* setup the cascaded interrupts */ setup_irq(MSP_INT_CIC, &cic_cascade_msp); setup_irq(MSP_INT_PER, &per_cascade_msp); #else /* * Setup the 2nd-level SLP register based interrupt controller. * VSMP support support is not enabled for SLP. */ msp_slp_irq_init(); /* setup the cascaded SLP/PER interrupts */ setup_irq(MSP_INT_SLP, &cic_cascade_msp); setup_irq(MSP_INT_PER, &per_cascade_msp); #endif }
gpl-2.0
bq-rk3066/android_kernel_bq_rk3188_DEPRECATED
arch/sparc/kernel/leon_pci_grpci2.c
2192
24398
/* * leon_pci_grpci2.c: GRPCI2 Host PCI driver * * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom * */ #include <linux/of_device.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/module.h> #include <asm/io.h> #include <asm/leon.h> #include <asm/vaddrs.h> #include <asm/sections.h> #include <asm/leon_pci.h> #include "irq.h" struct grpci2_barcfg { unsigned long pciadr; /* PCI Space Address */ unsigned long ahbadr; /* PCI Base address mapped to this AHB addr */ }; /* Device Node Configuration options: * - barcfgs : Custom Configuration of Host's 6 target BARs * - irq_mask : Limit which PCI interrupts are enabled * - do_reset : Force PCI Reset on startup * * barcfgs * ======= * * Optional custom Target BAR configuration (see struct grpci2_barcfg). All * addresses are physical. Array always contains 6 elements (len=2*4*6 bytes) * * -1 means not configured (let host driver do default setup). * * [i*2+0] = PCI Address of BAR[i] on target interface * [i*2+1] = Accessing PCI address of BAR[i] result in this AMBA address * * * irq_mask * ======== * * Limit which PCI interrupts are enabled. 0=Disable, 1=Enable. By default * all are enabled. Use this when PCI interrupt pins are floating on PCB. * int, len=4. * bit0 = PCI INTA# * bit1 = PCI INTB# * bit2 = PCI INTC# * bit3 = PCI INTD# * * * reset * ===== * * Force PCI reset on startup. int, len=4 */ /* Enable Debugging Configuration Space Access */ #undef GRPCI2_DEBUG_CFGACCESS /* * GRPCI2 APB Register MAP */ struct grpci2_regs { unsigned int ctrl; /* 0x00 Control */ unsigned int sts_cap; /* 0x04 Status / Capabilities */ int res1; /* 0x08 */ unsigned int io_map; /* 0x0C I/O Map address */ unsigned int dma_ctrl; /* 0x10 DMA */ unsigned int dma_bdbase; /* 0x14 DMA */ int res2[2]; /* 0x18 */ unsigned int bars[6]; /* 0x20 read-only PCI BARs */ int res3[2]; /* 0x38 */ unsigned int ahbmst_map[16]; /* 0x40 AHB->PCI Map per AHB Master */ /* PCI Trace Buffer Registers (OPTIONAL) */ unsigned int t_ctrl; /* 0x80 */ unsigned int t_cnt; /* 0x84 */ unsigned int t_adpat; /* 0x88 */ unsigned int t_admask; /* 0x8C */ unsigned int t_sigpat; /* 0x90 */ unsigned int t_sigmask; /* 0x94 */ unsigned int t_adstate; /* 0x98 */ unsigned int t_sigstate; /* 0x9C */ }; #define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a)))) #define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a))) #define CTRL_BUS_BIT 16 #define CTRL_RESET (1<<31) #define CTRL_SI (1<<27) #define CTRL_PE (1<<26) #define CTRL_EI (1<<25) #define CTRL_ER (1<<24) #define CTRL_BUS (0xff<<CTRL_BUS_BIT) #define CTRL_HOSTINT 0xf #define STS_HOST_BIT 31 #define STS_MST_BIT 30 #define STS_TAR_BIT 29 #define STS_DMA_BIT 28 #define STS_DI_BIT 27 #define STS_HI_BIT 26 #define STS_IRQMODE_BIT 24 #define STS_TRACE_BIT 23 #define STS_CFGERRVALID_BIT 20 #define STS_CFGERR_BIT 19 #define STS_INTTYPE_BIT 12 #define STS_INTSTS_BIT 8 #define STS_FDEPTH_BIT 2 #define STS_FNUM_BIT 0 #define STS_HOST (1<<STS_HOST_BIT) #define STS_MST (1<<STS_MST_BIT) #define STS_TAR (1<<STS_TAR_BIT) #define STS_DMA (1<<STS_DMA_BIT) #define STS_DI (1<<STS_DI_BIT) #define STS_HI (1<<STS_HI_BIT) #define STS_IRQMODE (0x3<<STS_IRQMODE_BIT) #define STS_TRACE (1<<STS_TRACE_BIT) #define STS_CFGERRVALID (1<<STS_CFGERRVALID_BIT) #define STS_CFGERR (1<<STS_CFGERR_BIT) #define STS_INTTYPE (0x3f<<STS_INTTYPE_BIT) #define STS_INTSTS (0xf<<STS_INTSTS_BIT) #define STS_FDEPTH (0x7<<STS_FDEPTH_BIT) #define STS_FNUM (0x3<<STS_FNUM_BIT) #define STS_ISYSERR (1<<17) #define STS_IDMA (1<<16) #define STS_IDMAERR (1<<15) #define STS_IMSTABRT (1<<14) #define STS_ITGTABRT (1<<13) #define STS_IPARERR (1<<12) #define STS_ERR_IRQ (STS_ISYSERR | STS_IMSTABRT | STS_ITGTABRT | STS_IPARERR) struct grpci2_bd_chan { unsigned int ctrl; /* 0x00 DMA Control */ unsigned int nchan; /* 0x04 Next DMA Channel Address */ unsigned int nbd; /* 0x08 Next Data Descriptor in chan */ unsigned int res; /* 0x0C Reserved */ }; #define BD_CHAN_EN 0x80000000 #define BD_CHAN_TYPE 0x00300000 #define BD_CHAN_BDCNT 0x0000ffff #define BD_CHAN_EN_BIT 31 #define BD_CHAN_TYPE_BIT 20 #define BD_CHAN_BDCNT_BIT 0 struct grpci2_bd_data { unsigned int ctrl; /* 0x00 DMA Data Control */ unsigned int pci_adr; /* 0x04 PCI Start Address */ unsigned int ahb_adr; /* 0x08 AHB Start address */ unsigned int next; /* 0x0C Next Data Descriptor in chan */ }; #define BD_DATA_EN 0x80000000 #define BD_DATA_IE 0x40000000 #define BD_DATA_DR 0x20000000 #define BD_DATA_TYPE 0x00300000 #define BD_DATA_ER 0x00080000 #define BD_DATA_LEN 0x0000ffff #define BD_DATA_EN_BIT 31 #define BD_DATA_IE_BIT 30 #define BD_DATA_DR_BIT 29 #define BD_DATA_TYPE_BIT 20 #define BD_DATA_ER_BIT 19 #define BD_DATA_LEN_BIT 0 /* GRPCI2 Capability */ struct grpci2_cap_first { unsigned int ctrl; unsigned int pci2ahb_map[6]; unsigned int ext2ahb_map; unsigned int io_map; unsigned int pcibar_size[6]; }; #define CAP9_CTRL_OFS 0 #define CAP9_BAR_OFS 0x4 #define CAP9_IOMAP_OFS 0x20 #define CAP9_BARSIZE_OFS 0x24 struct grpci2_priv { struct leon_pci_info info; /* must be on top of this structure */ struct grpci2_regs *regs; char irq; char irq_mode; /* IRQ Mode from CAPSTS REG */ char bt_enabled; char do_reset; char irq_mask; u32 pciid; /* PCI ID of Host */ unsigned char irq_map[4]; /* Virtual IRQ numbers */ unsigned int virq_err; unsigned int virq_dma; /* AHB PCI Windows */ unsigned long pci_area; /* MEMORY */ unsigned long pci_area_end; unsigned long pci_io; /* I/O */ unsigned long pci_conf; /* CONFIGURATION */ unsigned long pci_conf_end; unsigned long pci_io_va; struct grpci2_barcfg tgtbars[6]; }; DEFINE_SPINLOCK(grpci2_dev_lock); struct grpci2_priv *grpci2priv; int grpci2_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { struct grpci2_priv *priv = dev->bus->sysdata; int irq_group; /* Use default IRQ decoding on PCI BUS0 according slot numbering */ irq_group = slot & 0x3; pin = ((pin - 1) + irq_group) & 0x3; return priv->irq_map[pin]; } static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 *val) { unsigned int *pci_conf; unsigned long flags; u32 tmp; if (where & 0x3) return -EINVAL; if (bus == 0 && PCI_SLOT(devfn) != 0) devfn += (0x8 * 6); /* Select bus */ spin_lock_irqsave(&grpci2_dev_lock, flags); REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) | (bus << 16)); spin_unlock_irqrestore(&grpci2_dev_lock, flags); /* clear old status */ REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID)); pci_conf = (unsigned int *) (priv->pci_conf | (devfn << 8) | (where & 0xfc)); tmp = LEON3_BYPASS_LOAD_PA(pci_conf); /* Wait until GRPCI2 signals that CFG access is done, it should be * done instantaneously unless a DMA operation is ongoing... */ while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0) ; if (REGLOAD(priv->regs->sts_cap) & STS_CFGERR) { *val = 0xffffffff; } else { /* Bus always little endian (unaffected by byte-swapping) */ *val = flip_dword(tmp); } return 0; } static int grpci2_cfg_r16(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 *val) { u32 v; int ret; if (where & 0x1) return -EINVAL; ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); *val = 0xffff & (v >> (8 * (where & 0x3))); return ret; } static int grpci2_cfg_r8(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 *val) { u32 v; int ret; ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); *val = 0xff & (v >> (8 * (where & 3))); return ret; } static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 val) { unsigned int *pci_conf; unsigned long flags; if (where & 0x3) return -EINVAL; if (bus == 0 && PCI_SLOT(devfn) != 0) devfn += (0x8 * 6); /* Select bus */ spin_lock_irqsave(&grpci2_dev_lock, flags); REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) | (bus << 16)); spin_unlock_irqrestore(&grpci2_dev_lock, flags); /* clear old status */ REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID)); pci_conf = (unsigned int *) (priv->pci_conf | (devfn << 8) | (where & 0xfc)); LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val)); /* Wait until GRPCI2 signals that CFG access is done, it should be * done instantaneously unless a DMA operation is ongoing... */ while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0) ; return 0; } static int grpci2_cfg_w16(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 val) { int ret; u32 v; if (where & 0x1) return -EINVAL; ret = grpci2_cfg_r32(priv, bus, devfn, where&~3, &v); if (ret) return ret; v = (v & ~(0xffff << (8 * (where & 0x3)))) | ((0xffff & val) << (8 * (where & 0x3))); return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v); } static int grpci2_cfg_w8(struct grpci2_priv *priv, unsigned int bus, unsigned int devfn, int where, u32 val) { int ret; u32 v; ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); if (ret != 0) return ret; v = (v & ~(0xff << (8 * (where & 0x3)))) | ((0xff & val) << (8 * (where & 0x3))); return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v); } /* Read from Configuration Space. When entering here the PCI layer has taken * the pci_lock spinlock and IRQ is off. */ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct grpci2_priv *priv = grpci2priv; unsigned int busno = bus->number; int ret; if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) { *val = ~0; return 0; } switch (size) { case 1: ret = grpci2_cfg_r8(priv, busno, devfn, where, val); break; case 2: ret = grpci2_cfg_r16(priv, busno, devfn, where, val); break; case 4: ret = grpci2_cfg_r32(priv, busno, devfn, where, val); break; default: ret = -EINVAL; break; } #ifdef GRPCI2_DEBUG_CFGACCESS printk(KERN_INFO "grpci2_read_config: [%02x:%02x:%x] ofs=%d val=%x " "size=%d\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, *val, size); #endif return ret; } /* Write to Configuration Space. When entering here the PCI layer has taken * the pci_lock spinlock and IRQ is off. */ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct grpci2_priv *priv = grpci2priv; unsigned int busno = bus->number; if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) return 0; #ifdef GRPCI2_DEBUG_CFGACCESS printk(KERN_INFO "grpci2_write_config: [%02x:%02x:%x] ofs=%d size=%d " "val=%x\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); #endif switch (size) { default: return -EINVAL; case 1: return grpci2_cfg_w8(priv, busno, devfn, where, val); case 2: return grpci2_cfg_w16(priv, busno, devfn, where, val); case 4: return grpci2_cfg_w32(priv, busno, devfn, where, val); } } static struct pci_ops grpci2_ops = { .read = grpci2_read_config, .write = grpci2_write_config, }; /* GENIRQ IRQ chip implementation for GRPCI2 irqmode=0..2. In configuration * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller * this is not needed and the standard IRQ controller can be used. */ static void grpci2_mask_irq(struct irq_data *data) { unsigned long flags; unsigned int irqidx; struct grpci2_priv *priv = grpci2priv; irqidx = (unsigned int)data->chip_data - 1; if (irqidx > 3) /* only mask PCI interrupts here */ return; spin_lock_irqsave(&grpci2_dev_lock, flags); REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) & ~(1 << irqidx)); spin_unlock_irqrestore(&grpci2_dev_lock, flags); } static void grpci2_unmask_irq(struct irq_data *data) { unsigned long flags; unsigned int irqidx; struct grpci2_priv *priv = grpci2priv; irqidx = (unsigned int)data->chip_data - 1; if (irqidx > 3) /* only unmask PCI interrupts here */ return; spin_lock_irqsave(&grpci2_dev_lock, flags); REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) | (1 << irqidx)); spin_unlock_irqrestore(&grpci2_dev_lock, flags); } static unsigned int grpci2_startup_irq(struct irq_data *data) { grpci2_unmask_irq(data); return 0; } static void grpci2_shutdown_irq(struct irq_data *data) { grpci2_mask_irq(data); } static struct irq_chip grpci2_irq = { .name = "grpci2", .irq_startup = grpci2_startup_irq, .irq_shutdown = grpci2_shutdown_irq, .irq_mask = grpci2_mask_irq, .irq_unmask = grpci2_unmask_irq, }; /* Handle one or multiple IRQs from the PCI core */ static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc) { struct grpci2_priv *priv = grpci2priv; int i, ack = 0; unsigned int ctrl, sts_cap, pci_ints; ctrl = REGLOAD(priv->regs->ctrl); sts_cap = REGLOAD(priv->regs->sts_cap); /* Error Interrupt? */ if (sts_cap & STS_ERR_IRQ) { generic_handle_irq(priv->virq_err); ack = 1; } /* PCI Interrupt? */ pci_ints = ((~sts_cap) >> STS_INTSTS_BIT) & ctrl & CTRL_HOSTINT; if (pci_ints) { /* Call respective PCI Interrupt handler */ for (i = 0; i < 4; i++) { if (pci_ints & (1 << i)) generic_handle_irq(priv->irq_map[i]); } ack = 1; } /* * Decode DMA Interrupt only when shared with Err and PCI INTX#, when * the DMA is a unique IRQ the DMA interrupts doesn't end up here, they * goes directly to DMA ISR. */ if ((priv->irq_mode == 0) && (sts_cap & (STS_IDMA | STS_IDMAERR))) { generic_handle_irq(priv->virq_dma); ack = 1; } /* * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ * Controller, this must be done after IRQ sources have been handled to * avoid double IRQ generation */ if (ack) desc->irq_data.chip->irq_eoi(&desc->irq_data); } /* Create a virtual IRQ */ static unsigned int grpci2_build_device_irq(unsigned int irq) { unsigned int virq = 0, pil; pil = 1 << 8; virq = irq_alloc(irq, pil); if (virq == 0) goto out; irq_set_chip_and_handler_name(virq, &grpci2_irq, handle_simple_irq, "pcilvl"); irq_set_chip_data(virq, (void *)irq); out: return virq; } void grpci2_hw_init(struct grpci2_priv *priv) { u32 ahbadr, pciadr, bar_sz, capptr, io_map, data; struct grpci2_regs *regs = priv->regs; int i; struct grpci2_barcfg *barcfg = priv->tgtbars; /* Reset any earlier setup */ if (priv->do_reset) { printk(KERN_INFO "GRPCI2: Resetting PCI bus\n"); REGSTORE(regs->ctrl, CTRL_RESET); ssleep(1); /* Wait for boards to settle */ } REGSTORE(regs->ctrl, 0); REGSTORE(regs->sts_cap, ~0); /* Clear Status */ REGSTORE(regs->dma_ctrl, 0); REGSTORE(regs->dma_bdbase, 0); /* Translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */ REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff); /* set 1:1 mapping between AHB -> PCI memory space, for all Masters * Each AHB master has it's own mapping registers. Max 16 AHB masters. */ for (i = 0; i < 16; i++) REGSTORE(regs->ahbmst_map[i], priv->pci_area); /* Get the GRPCI2 Host PCI ID */ grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid); /* Get address to first (always defined) capability structure */ grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr); /* Enable/Disable Byte twisting */ grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map); io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map); /* Setup the Host's PCI Target BARs for other peripherals to access, * and do DMA to the host's memory. The target BARs can be sized and * enabled individually. * * User may set custom target BARs, but default is: * The first BARs is used to map kernel low (DMA is part of normal * region on sparc which is SRMMU_MAXMEM big) main memory 1:1 to the * PCI bus, the other BARs are disabled. We assume that the first BAR * is always available. */ for (i = 0; i < 6; i++) { if (barcfg[i].pciadr != ~0 && barcfg[i].ahbadr != ~0) { /* Target BARs must have the proper alignment */ ahbadr = barcfg[i].ahbadr; pciadr = barcfg[i].pciadr; bar_sz = ((pciadr - 1) & ~pciadr) + 1; } else { if (i == 0) { /* Map main memory */ bar_sz = 0xf0000008; /* 256MB prefetchable */ ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN( (unsigned long) &_end)); pciadr = ahbadr; } else { bar_sz = 0; ahbadr = 0; pciadr = 0; } } grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", i, pciadr, ahbadr); } /* set as bus master and enable pci memory responses */ grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data); data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data); /* Enable Error respone (CPU-TRAP) on illegal memory access. */ REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); } static irqreturn_t grpci2_jump_interrupt(int irq, void *arg) { printk(KERN_ERR "GRPCI2: Jump IRQ happened\n"); return IRQ_NONE; } /* Handle GRPCI2 Error Interrupt */ static irqreturn_t grpci2_err_interrupt(int irq, void *arg) { struct grpci2_priv *priv = arg; struct grpci2_regs *regs = priv->regs; unsigned int status; status = REGLOAD(regs->sts_cap); if ((status & STS_ERR_IRQ) == 0) return IRQ_NONE; if (status & STS_IPARERR) printk(KERN_ERR "GRPCI2: Parity Error\n"); if (status & STS_ITGTABRT) printk(KERN_ERR "GRPCI2: Target Abort\n"); if (status & STS_IMSTABRT) printk(KERN_ERR "GRPCI2: Master Abort\n"); if (status & STS_ISYSERR) printk(KERN_ERR "GRPCI2: System Error\n"); /* Clear handled INT TYPE IRQs */ REGSTORE(regs->sts_cap, status & STS_ERR_IRQ); return IRQ_HANDLED; } static int __devinit grpci2_of_probe(struct platform_device *ofdev) { struct grpci2_regs *regs; struct grpci2_priv *priv; int err, i, len; const int *tmp; unsigned int capability; if (grpci2priv) { printk(KERN_ERR "GRPCI2: only one GRPCI2 core supported\n"); return -ENODEV; } if (ofdev->num_resources < 3) { printk(KERN_ERR "GRPCI2: not enough APB/AHB resources\n"); return -EIO; } /* Find Device Address */ regs = of_ioremap(&ofdev->resource[0], 0, resource_size(&ofdev->resource[0]), "grlib-grpci2 regs"); if (regs == NULL) { printk(KERN_ERR "GRPCI2: ioremap failed\n"); return -EIO; } /* * Check that we're in Host Slot and that we can act as a Host Bridge * and not only as target. */ capability = REGLOAD(regs->sts_cap); if ((capability & STS_HOST) || !(capability & STS_MST)) { printk(KERN_INFO "GRPCI2: not in host system slot\n"); err = -EIO; goto err1; } priv = grpci2priv = kzalloc(sizeof(struct grpci2_priv), GFP_KERNEL); if (grpci2priv == NULL) { err = -ENOMEM; goto err1; } memset(grpci2priv, 0, sizeof(*grpci2priv)); priv->regs = regs; priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */ priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT; printk(KERN_INFO "GRPCI2: host found at %p, irq%d\n", regs, priv->irq); /* Byte twisting should be made configurable from kernel command line */ priv->bt_enabled = 1; /* Let user do custom Target BAR assignment */ tmp = of_get_property(ofdev->dev.of_node, "barcfg", &len); if (tmp && (len == 2*4*6)) memcpy(priv->tgtbars, tmp, 2*4*6); else memset(priv->tgtbars, -1, 2*4*6); /* Limit IRQ unmasking in irq_mode 2 and 3 */ tmp = of_get_property(ofdev->dev.of_node, "irq_mask", &len); if (tmp && (len == 4)) priv->do_reset = *tmp; else priv->irq_mask = 0xf; /* Optional PCI reset. Force PCI reset on startup */ tmp = of_get_property(ofdev->dev.of_node, "reset", &len); if (tmp && (len == 4)) priv->do_reset = *tmp; else priv->do_reset = 0; /* Find PCI Memory, I/O and Configuration Space Windows */ priv->pci_area = ofdev->resource[1].start; priv->pci_area_end = ofdev->resource[1].end+1; priv->pci_io = ofdev->resource[2].start; priv->pci_conf = ofdev->resource[2].start + 0x10000; priv->pci_conf_end = priv->pci_conf + 0x10000; priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000); if (!priv->pci_io_va) { err = -EIO; goto err2; } printk(KERN_INFO "GRPCI2: MEMORY SPACE [0x%08lx - 0x%08lx]\n" " I/O SPACE [0x%08lx - 0x%08lx]\n" " CONFIG SPACE [0x%08lx - 0x%08lx]\n", priv->pci_area, priv->pci_area_end-1, priv->pci_io, priv->pci_conf-1, priv->pci_conf, priv->pci_conf_end-1); /* * I/O Space resources in I/O Window mapped into Virtual Adr Space * We never use low 4KB because some devices seem have problems using * address 0. */ memset(&priv->info.io_space, 0, sizeof(struct resource)); priv->info.io_space.name = "GRPCI2 PCI I/O Space"; priv->info.io_space.start = priv->pci_io_va + 0x1000; priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1; priv->info.io_space.flags = IORESOURCE_IO; /* * GRPCI2 has no prefetchable memory, map everything as * non-prefetchable memory */ memset(&priv->info.mem_space, 0, sizeof(struct resource)); priv->info.mem_space.name = "GRPCI2 PCI MEM Space"; priv->info.mem_space.start = priv->pci_area; priv->info.mem_space.end = priv->pci_area_end - 1; priv->info.mem_space.flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &priv->info.mem_space) < 0) goto err3; if (request_resource(&ioport_resource, &priv->info.io_space) < 0) goto err4; grpci2_hw_init(priv); /* * Get PCI Interrupt to System IRQ mapping and setup IRQ handling * Error IRQ always on PCI INTA. */ if (priv->irq_mode < 2) { /* All PCI interrupts are shared using the same system IRQ */ leon_update_virq_handling(priv->irq, grpci2_pci_flow_irq, "pcilvl", 0); priv->irq_map[0] = grpci2_build_device_irq(1); priv->irq_map[1] = grpci2_build_device_irq(2); priv->irq_map[2] = grpci2_build_device_irq(3); priv->irq_map[3] = grpci2_build_device_irq(4); priv->virq_err = grpci2_build_device_irq(5); if (priv->irq_mode & 1) priv->virq_dma = ofdev->archdata.irqs[1]; else priv->virq_dma = grpci2_build_device_irq(6); /* Enable IRQs on LEON IRQ controller */ err = request_irq(priv->irq, grpci2_jump_interrupt, 0, "GRPCI2_JUMP", priv); if (err) printk(KERN_ERR "GRPCI2: ERR IRQ request failed\n"); } else { /* All PCI interrupts have an unique IRQ interrupt */ for (i = 0; i < 4; i++) { /* Make LEON IRQ layer handle level IRQ by acking */ leon_update_virq_handling(ofdev->archdata.irqs[i], handle_fasteoi_irq, "pcilvl", 1); priv->irq_map[i] = ofdev->archdata.irqs[i]; } priv->virq_err = priv->irq_map[0]; if (priv->irq_mode & 1) priv->virq_dma = ofdev->archdata.irqs[4]; else priv->virq_dma = priv->irq_map[0]; /* Unmask all PCI interrupts, request_irq will not do that */ REGSTORE(regs->ctrl, REGLOAD(regs->ctrl)|(priv->irq_mask&0xf)); } /* Setup IRQ handler for non-configuration space access errors */ err = request_irq(priv->virq_err, grpci2_err_interrupt, IRQF_SHARED, "GRPCI2_ERR", priv); if (err) { printk(KERN_DEBUG "GRPCI2: ERR VIRQ request failed: %d\n", err); goto err5; } /* * Enable Error Interrupts. PCI interrupts are unmasked once request_irq * is called by the PCI Device drivers */ REGSTORE(regs->ctrl, REGLOAD(regs->ctrl) | CTRL_EI | CTRL_SI); /* Init common layer and scan buses */ priv->info.ops = &grpci2_ops; priv->info.map_irq = grpci2_map_irq; leon_pci_init(ofdev, &priv->info); return 0; err5: release_resource(&priv->info.io_space); err4: release_resource(&priv->info.mem_space); err3: err = -ENOMEM; iounmap((void *)priv->pci_io_va); err2: kfree(priv); err1: of_iounmap(&ofdev->resource[0], regs, resource_size(&ofdev->resource[0])); return err; } static struct of_device_id grpci2_of_match[] = { { .name = "GAISLER_GRPCI2", }, { .name = "01_07c", }, {}, }; static struct platform_driver grpci2_of_driver = { .driver = { .name = "grpci2", .owner = THIS_MODULE, .of_match_table = grpci2_of_match, }, .probe = grpci2_of_probe, }; static int __init grpci2_init(void) { return platform_driver_register(&grpci2_of_driver); } subsys_initcall(grpci2_init);
gpl-2.0
high1/android_kernel_htc_golfu_solk
drivers/net/wireless/rtlwifi/ps.c
2448
18153
/****************************************************************************** * * Copyright(c) 2009-2010 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "wifi.h" #include "base.h" #include "ps.h" bool rtl_ps_enable_nic(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); /*<1> reset trx ring */ if (rtlhal->interface == INTF_PCI) rtlpriv->intf_ops->reset_trx_ring(hw); if (is_hal_stop(rtlhal)) RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Driver is already down!\n")); /*<2> Enable Adapter */ rtlpriv->cfg->ops->hw_init(hw); RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); /*<3> Enable Interrupt */ rtlpriv->cfg->ops->enable_interrupt(hw); /*<enable timer> */ rtl_watch_dog_timer_callback((unsigned long)hw); return true; } EXPORT_SYMBOL(rtl_ps_enable_nic); bool rtl_ps_disable_nic(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); /*<1> Stop all timer */ rtl_deinit_deferred_work(hw); /*<2> Disable Interrupt */ rtlpriv->cfg->ops->disable_interrupt(hw); /*<3> Disable Adapter */ rtlpriv->cfg->ops->hw_disable(hw); return true; } EXPORT_SYMBOL(rtl_ps_disable_nic); bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, enum rf_pwrstate state_toset, u32 changesource, bool protect_or_not) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); enum rf_pwrstate rtstate; bool actionallowed = false; u16 rfwait_cnt = 0; unsigned long flag; /*protect_or_not = true; */ if (protect_or_not) goto no_protect; /* *Only one thread can change *the RF state at one time, and others *should wait to be executed. */ while (true) { spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); if (ppsc->rfchange_inprogress) { spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("RF Change in progress!" "Wait to set..state_toset(%d).\n", state_toset)); /* Set RF after the previous action is done. */ while (ppsc->rfchange_inprogress) { rfwait_cnt++; mdelay(1); /* *Wait too long, return false to avoid *to be stuck here. */ if (rfwait_cnt > 100) return false; } } else { ppsc->rfchange_inprogress = true; spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); break; } } no_protect: rtstate = ppsc->rfpwr_state; switch (state_toset) { case ERFON: ppsc->rfoff_reason &= (~changesource); if ((changesource == RF_CHANGE_BY_HW) && (ppsc->hwradiooff == true)) { ppsc->hwradiooff = false; } if (!ppsc->rfoff_reason) { ppsc->rfoff_reason = 0; actionallowed = true; } break; case ERFOFF: if ((changesource == RF_CHANGE_BY_HW) && (ppsc->hwradiooff == false)) { ppsc->hwradiooff = true; } ppsc->rfoff_reason |= changesource; actionallowed = true; break; case ERFSLEEP: ppsc->rfoff_reason |= changesource; actionallowed = true; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case not process\n")); break; } if (actionallowed) rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset); if (!protect_or_not) { spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); ppsc->rfchange_inprogress = false; spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); } return actionallowed; } EXPORT_SYMBOL(rtl_ps_set_rf_state); static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); ppsc->swrf_processing = true; if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) { if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) && rtlhal->interface == INTF_PCI) { rtlpriv->intf_ops->disable_aspm(hw); RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } } rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate, RF_CHANGE_BY_IPS, false); if (ppsc->inactive_pwrstate == ERFOFF && rtlhal->interface == INTF_PCI) { if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { rtlpriv->intf_ops->enable_aspm(hw); RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } } ppsc->swrf_processing = false; } void rtl_ips_nic_off_wq_callback(void *data) { struct rtl_works *rtlworks = container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq); struct ieee80211_hw *hw = rtlworks->hw; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); enum rf_pwrstate rtstate; if (mac->opmode != NL80211_IFTYPE_STATION) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("not station return\n")); return; } if (mac->link_state > MAC80211_NOLINK) return; if (is_hal_stop(rtlhal)) return; if (rtlpriv->sec.being_setkey) return; if (ppsc->inactiveps) { rtstate = ppsc->rfpwr_state; /* *Do not enter IPS in the following conditions: *(1) RF is already OFF or Sleep *(2) swrf_processing (indicates the IPS is still under going) *(3) Connectted (only disconnected can trigger IPS) *(4) IBSS (send Beacon) *(5) AP mode (send Beacon) *(6) monitor mode (rcv packet) */ if (rtstate == ERFON && !ppsc->swrf_processing && (mac->link_state == MAC80211_NOLINK) && !mac->act_scanning) { RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("IPSEnter(): Turn off RF.\n")); ppsc->inactive_pwrstate = ERFOFF; ppsc->in_powersavemode = true; /*rtl_pci_reset_trx_ring(hw); */ _rtl_ps_inactive_ps(hw); } } } void rtl_ips_nic_off(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); /* *because when link with ap, mac80211 will ask us *to disable nic quickly after scan before linking, *this will cause link failed, so we delay 100ms here */ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ips_nic_off_wq, MSECS(100)); } void rtl_ips_nic_on(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); enum rf_pwrstate rtstate; unsigned long flags; if (mac->opmode != NL80211_IFTYPE_STATION) return; spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags); if (ppsc->inactiveps) { rtstate = ppsc->rfpwr_state; if (rtstate != ERFON && !ppsc->swrf_processing && ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) { ppsc->inactive_pwrstate = ERFON; ppsc->in_powersavemode = false; _rtl_ps_inactive_ps(hw); } } spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags); } /*for FW LPS*/ /* *Determine if we can set Fw into PS mode *in current condition.Return TRUE if it *can enter PS mode. */ static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); u32 ps_timediff; ps_timediff = jiffies_to_msecs(jiffies - ppsc->last_delaylps_stamp_jiffies); if (ps_timediff < 2000) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("Delay enter Fw LPS for DHCP, ARP," " or EAPOL exchanging state.\n")); return false; } if (mac->link_state != MAC80211_LINKED) return false; if (mac->opmode == NL80211_IFTYPE_ADHOC) return false; return true; } /* Change current and default preamble mode.*/ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); u8 rpwm_val, fw_pwrmode; if (mac->opmode == NL80211_IFTYPE_ADHOC) return; if (mac->link_state != MAC80211_LINKED) return; if (ppsc->dot11_psmode == rt_psmode) return; /* Update power save mode configured. */ ppsc->dot11_psmode = rt_psmode; /* *<FW control LPS> *1. Enter PS mode * Set RPWM to Fw to turn RF off and send H2C fw_pwrmode * cmd to set Fw into PS mode. *2. Leave PS mode * Send H2C fw_pwrmode cmd to Fw to set Fw into Active * mode and set RPWM to turn RF on. */ if ((ppsc->fwctrl_lps) && ppsc->report_linked) { bool fw_current_inps; if (ppsc->dot11_psmode == EACTIVE) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, ("FW LPS leave ps_mode:%x\n", FW_PS_ACTIVE_MODE)); rpwm_val = 0x0C; /* RF on */ fw_pwrmode = FW_PS_ACTIVE_MODE; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, (u8 *) (&rpwm_val)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, (u8 *) (&fw_pwrmode)); fw_current_inps = false; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *) (&fw_current_inps)); } else { if (rtl_get_fwlps_doze(hw)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, ("FW LPS enter ps_mode:%x\n", ppsc->fwctrl_psmode)); rpwm_val = 0x02; /* RF off */ fw_current_inps = true; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *) (&fw_current_inps)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, (u8 *) (&ppsc->fwctrl_psmode)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, (u8 *) (&rpwm_val)); } else { /* Reset the power save related parameters. */ ppsc->dot11_psmode = EACTIVE; } } } } /*Enter the leisure power save mode.*/ void rtl_lps_enter(struct ieee80211_hw *hw) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); unsigned long flag; if (!ppsc->fwctrl_lps) return; if (rtlpriv->sec.being_setkey) return; if (rtlpriv->link_info.busytraffic) return; /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */ if (mac->cnt_after_linked < 5) return; if (mac->opmode == NL80211_IFTYPE_ADHOC) return; if (mac->link_state != MAC80211_LINKED) return; spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); /* Idle for a while if we connect to AP a while ago. */ if (mac->cnt_after_linked >= 2) { if (ppsc->dot11_psmode == EACTIVE) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("Enter 802.11 power save mode...\n")); rtl_lps_set_psmode(hw, EAUTOPS); } } spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); } /*Leave the leisure power save mode.*/ void rtl_lps_leave(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); unsigned long flag; spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); if (ppsc->fwctrl_lps) { if (ppsc->dot11_psmode != EACTIVE) { /*FIX ME */ rtlpriv->cfg->ops->enable_interrupt(hw); if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM && RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) && rtlhal->interface == INTF_PCI) { rtlpriv->intf_ops->disable_aspm(hw); RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("Busy Traffic,Leave 802.11 power save..\n")); rtl_lps_set_psmode(hw, EACTIVE); } } spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); } /* For sw LPS*/ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct ieee80211_hdr *hdr = (void *) data; struct ieee80211_tim_ie *tim_ie; u8 *tim; u8 tim_len; bool u_buffed; bool m_buffed; if (mac->opmode != NL80211_IFTYPE_STATION) return; if (!rtlpriv->psc.swctrl_lps) return; if (rtlpriv->mac80211.link_state != MAC80211_LINKED) return; if (!rtlpriv->psc.sw_ps_enabled) return; if (rtlpriv->psc.fwctrl_lps) return; if (likely(!(hw->conf.flags & IEEE80211_CONF_PS))) return; /* check if this really is a beacon */ if (!ieee80211_is_beacon(hdr->frame_control)) return; /* min. beacon length + FCS_LEN */ if (len <= 40 + FCS_LEN) return; /* and only beacons from the associated BSSID, please */ if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid)) return; rtlpriv->psc.last_beacon = jiffies; tim = rtl_find_ie(data, len - FCS_LEN, WLAN_EID_TIM); if (!tim) return; if (tim[1] < sizeof(*tim_ie)) return; tim_len = tim[1]; tim_ie = (struct ieee80211_tim_ie *) &tim[2]; if (!WARN_ON_ONCE(!hw->conf.ps_dtim_period)) rtlpriv->psc.dtim_counter = tim_ie->dtim_count; /* Check whenever the PHY can be turned off again. */ /* 1. What about buffered unicast traffic for our AID? */ u_buffed = ieee80211_check_tim(tim_ie, tim_len, rtlpriv->mac80211.assoc_id); /* 2. Maybe the AP wants to send multicast/broadcast data? */ m_buffed = tim_ie->bitmap_ctrl & 0x01; rtlpriv->psc.multi_buffered = m_buffed; /* unicast will process by mac80211 through * set ~IEEE80211_CONF_PS, So we just check * multicast frames here */ if (!m_buffed) { /* back to low-power land. and delay is * prevent null power save frame tx fail */ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_work, MSECS(5)); } else { RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, ("u_bufferd: %x, " "m_buffered: %x\n", u_buffed, m_buffed)); } } void rtl_swlps_rf_awake(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); unsigned long flag; if (!rtlpriv->psc.swctrl_lps) return; if (mac->link_state != MAC80211_LINKED) return; if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM && RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { rtlpriv->intf_ops->disable_aspm(hw); RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS, false); spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); } void rtl_swlps_rfon_wq_callback(void *data) { struct rtl_works *rtlworks = container_of_dwork_rtl(data, struct rtl_works, ps_rfon_wq); struct ieee80211_hw *hw = rtlworks->hw; rtl_swlps_rf_awake(hw); } void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); unsigned long flag; u8 sleep_intv; if (!rtlpriv->psc.sw_ps_enabled) return; if ((rtlpriv->sec.being_setkey) || (mac->opmode == NL80211_IFTYPE_ADHOC)) return; /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */ if ((mac->link_state != MAC80211_LINKED) || (mac->cnt_after_linked < 5)) return; if (rtlpriv->link_info.busytraffic) return; spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); if (rtlpriv->psc.rfchange_inprogress) { spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); return; } spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS, false); spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { rtlpriv->intf_ops->enable_aspm(hw); RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } /* here is power save alg, when this beacon is DTIM * we will set sleep time to dtim_period * n; * when this beacon is not DTIM, we will set sleep * time to sleep_intv = rtlpriv->psc.dtim_counter or * MAX_SW_LPS_SLEEP_INTV(default set to 5) */ if (rtlpriv->psc.dtim_counter == 0) { if (hw->conf.ps_dtim_period == 1) sleep_intv = hw->conf.ps_dtim_period * 2; else sleep_intv = hw->conf.ps_dtim_period; } else { sleep_intv = rtlpriv->psc.dtim_counter; } if (sleep_intv > MAX_SW_LPS_SLEEP_INTV) sleep_intv = MAX_SW_LPS_SLEEP_INTV; /* this print should always be dtim_conter = 0 & * sleep = dtim_period, that meaons, we should * awake before every dtim */ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, ("dtim_counter:%x will sleep :%d" " beacon_intv\n", rtlpriv->psc.dtim_counter, sleep_intv)); /* we tested that 40ms is enough for sw & hw sw delay */ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq, MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40)); } void rtl_swlps_wq_callback(void *data) { struct rtl_works *rtlworks = container_of_dwork_rtl(data, struct rtl_works, ps_work); struct ieee80211_hw *hw = rtlworks->hw; struct rtl_priv *rtlpriv = rtl_priv(hw); bool ps = false; ps = (hw->conf.flags & IEEE80211_CONF_PS); /* we can sleep after ps null send ok */ if (rtlpriv->psc.state_inap) { rtl_swlps_rf_sleep(hw); if (rtlpriv->psc.state && !ps) { rtlpriv->psc.sleep_ms = jiffies_to_msecs(jiffies - rtlpriv->psc.last_action); } if (ps) rtlpriv->psc.last_slept = jiffies; rtlpriv->psc.last_action = jiffies; rtlpriv->psc.state = ps; } }
gpl-2.0
sgp-blackphone/Blackphone-BP1-Kernel
drivers/staging/comedi/drivers/cb_pcidio.c
3984
10125
/* comedi/drivers/cb_pcidio.c A Comedi driver for PCI-DIO24H & PCI-DIO48H of ComputerBoards (currently MeasurementComputing) COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: cb_pcidio Description: ComputerBoards' DIO boards with PCI interface Devices: [Measurement Computing] PCI-DIO24 (cb_pcidio), PCI-DIO24H, PCI-DIO48H Author: Yoshiya Matsuzaka Updated: Mon, 29 Oct 2007 15:40:47 +0000 Status: experimental This driver has been modified from skel.c of comedi-0.7.70. Configuration Options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first available PCI device will be used. Passing a zero for an option is the same as leaving it unspecified. */ /*------------------------------ HEADER FILES ---------------------------------*/ #include "../comedidev.h" #include "comedi_pci.h" #include "8255.h" /*-------------------------- MACROS and DATATYPES -----------------------------*/ #define PCI_VENDOR_ID_CB 0x1307 /* * Board descriptions for two imaginary boards. Describing the * boards in this way is optional, and completely driver-dependent. * Some drivers use arrays such as this, other do not. */ struct pcidio_board { const char *name; /* name of the board */ int dev_id; int n_8255; /* number of 8255 chips on board */ /* indices of base address regions */ int pcicontroler_badrindex; int dioregs_badrindex; }; static const struct pcidio_board pcidio_boards[] = { { .name = "pci-dio24", .dev_id = 0x0028, .n_8255 = 1, .pcicontroler_badrindex = 1, .dioregs_badrindex = 2, }, { .name = "pci-dio24h", .dev_id = 0x0014, .n_8255 = 1, .pcicontroler_badrindex = 1, .dioregs_badrindex = 2, }, { .name = "pci-dio48h", .dev_id = 0x000b, .n_8255 = 2, .pcicontroler_badrindex = 0, .dioregs_badrindex = 1, }, }; /* This is used by modprobe to translate PCI IDs to drivers. Should * only be used for PCI and ISA-PnP devices */ /* Please add your PCI vendor ID to comedidev.h, and it will be forwarded * upstream. */ static DEFINE_PCI_DEVICE_TABLE(pcidio_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0028) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0014) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x000b) }, { 0 } }; MODULE_DEVICE_TABLE(pci, pcidio_pci_table); /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct pcidio_board *)dev->board_ptr) /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct pcidio_private { int data; /* currently unused */ /* would be useful for a PCI device */ struct pci_dev *pci_dev; /* used for DO readback, currently unused */ unsigned int do_readback[4]; /* up to 4 unsigned int suffice to hold 96 bits for PCI-DIO96 */ unsigned long dio_reg_base; /* address of port A of the first 8255 chip on board */ }; /* * most drivers define the following macro to make it easy to * access the private structure. */ #define devpriv ((struct pcidio_private *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int pcidio_detach(struct comedi_device *dev); static struct comedi_driver driver_cb_pcidio = { .driver_name = "cb_pcidio", .module = THIS_MODULE, .attach = pcidio_attach, .detach = pcidio_detach, /* It is not necessary to implement the following members if you are * writing a driver for a ISA PnP or PCI card */ /* Most drivers will support multiple types of boards by * having an array of board structures. These were defined * in pcidio_boards[] above. Note that the element 'name' * was first in the structure -- Comedi uses this fact to * extract the name of the board without knowing any details * about the structure except for its length. * When a device is attached (by comedi_config), the name * of the device is given to Comedi, and Comedi tries to * match it by going through the list of board names. If * there is a match, the address of the pointer is put * into dev->board_ptr and driver->attach() is called. * * Note that these are not necessary if you can determine * the type of board in software. ISA PnP, PCI, and PCMCIA * devices are such boards. */ /* The following fields should NOT be initialized if you are dealing * with PCI devices * * .board_name = pcidio_boards, * .offset = sizeof(struct pcidio_board), * .num_names = sizeof(pcidio_boards) / sizeof(structpcidio_board), */ }; /*------------------------------- FUNCTIONS -----------------------------------*/ /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev = NULL; int index; int i; /* * Allocate the private structure area. alloc_private() is a * convenient macro defined in comedidev.h. */ if (alloc_private(dev, sizeof(struct pcidio_private)) < 0) return -ENOMEM; /* * If you can probe the device to determine what device in a series * it is, this is the place to do it. Otherwise, dev->board_ptr * should already be initialized. */ /* * Probe the device to determine what device in the series it is. */ for_each_pci_dev(pcidev) { /* is it not a computer boards card? */ if (pcidev->vendor != PCI_VENDOR_ID_CB) continue; /* loop through cards supported by this driver */ for (index = 0; index < ARRAY_SIZE(pcidio_boards); index++) { if (pcidio_boards[index].dev_id != pcidev->device) continue; /* was a particular bus/slot requested? */ if (it->options[0] || it->options[1]) { /* are we on the wrong bus/slot? */ if (pcidev->bus->number != it->options[0] || PCI_SLOT(pcidev->devfn) != it->options[1]) { continue; } } dev->board_ptr = pcidio_boards + index; goto found; } } dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n"); return -EIO; found: /* * Initialize dev->board_name. Note that we can use the "thisboard" * macro now, since we just initialized it in the last line. */ dev->board_name = thisboard->name; devpriv->pci_dev = pcidev; dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n", thisboard->name, devpriv->pci_dev->bus->number, PCI_SLOT(devpriv->pci_dev->devfn)); if (comedi_pci_enable(pcidev, thisboard->name)) return -EIO; devpriv->dio_reg_base = pci_resource_start(devpriv->pci_dev, pcidio_boards[index].dioregs_badrindex); /* * Allocate the subdevice structures. alloc_subdevice() is a * convenient macro defined in comedidev.h. */ if (alloc_subdevices(dev, thisboard->n_8255) < 0) return -ENOMEM; for (i = 0; i < thisboard->n_8255; i++) { subdev_8255_init(dev, dev->subdevices + i, NULL, devpriv->dio_reg_base + i * 4); dev_dbg(dev->hw_dev, "subdev %d: base = 0x%lx\n", i, devpriv->dio_reg_base + i * 4); } return 1; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int pcidio_detach(struct comedi_device *dev) { if (devpriv) { if (devpriv->pci_dev) { if (devpriv->dio_reg_base) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } } if (dev->subdevices) { int i; for (i = 0; i < thisboard->n_8255; i++) subdev_8255_cleanup(dev, dev->subdevices + i); } return 0; } /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ static int __devinit driver_cb_pcidio_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_cb_pcidio.driver_name); } static void __devexit driver_cb_pcidio_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_cb_pcidio_pci_driver = { .id_table = pcidio_pci_table, .probe = &driver_cb_pcidio_pci_probe, .remove = __devexit_p(&driver_cb_pcidio_pci_remove) }; static int __init driver_cb_pcidio_init_module(void) { int retval; retval = comedi_driver_register(&driver_cb_pcidio); if (retval < 0) return retval; driver_cb_pcidio_pci_driver.name = (char *)driver_cb_pcidio.driver_name; return pci_register_driver(&driver_cb_pcidio_pci_driver); } static void __exit driver_cb_pcidio_cleanup_module(void) { pci_unregister_driver(&driver_cb_pcidio_pci_driver); comedi_driver_unregister(&driver_cb_pcidio); } module_init(driver_cb_pcidio_init_module); module_exit(driver_cb_pcidio_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
royale1223/omap-kernel
arch/sh/boards/mach-rsk/setup.c
4496
2504
/* * Renesas Technology Europe RSK+ Support. * * Copyright (C) 2008 Paul Mundt * Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #ifdef CONFIG_MTD #include <linux/mtd/map.h> #endif #include <asm/machvec.h> #include <asm/io.h> static struct mtd_partition rsk_partitions[] = { { .name = "Bootloader", .offset = 0x00000000, .size = 0x00040000, .mask_flags = MTD_WRITEABLE, }, { .name = "Kernel", .offset = MTDPART_OFS_NXTBLK, .size = 0x001c0000, }, { .name = "Flash_FS", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, } }; static struct physmap_flash_data flash_data = { .parts = rsk_partitions, .nr_parts = ARRAY_SIZE(rsk_partitions), .width = 2, }; static struct resource flash_resource = { .start = 0x20000000, .end = 0x20400000, .flags = IORESOURCE_MEM, }; static struct platform_device flash_device = { .name = "physmap-flash", .id = -1, .resource = &flash_resource, .num_resources = 1, .dev = { .platform_data = &flash_data, }, }; #ifdef CONFIG_MTD static const char *probes[] = { "cmdlinepart", NULL }; static struct map_info rsk_flash_map = { .name = "RSK+ Flash", .size = 0x400000, .bankwidth = 2, }; static struct mtd_info *flash_mtd; static struct mtd_partition *parsed_partitions; static void __init set_mtd_partitions(void) { int nr_parts = 0; simple_map_init(&rsk_flash_map); flash_mtd = do_map_probe("cfi_probe", &rsk_flash_map); nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_partitions, 0); /* If there is no partition table, used the hard coded table */ if (nr_parts > 0) { flash_data.nr_parts = nr_parts; flash_data.parts = parsed_partitions; } } #else static inline void set_mtd_partitions(void) {} #endif static struct platform_device *rsk_devices[] __initdata = { &flash_device, }; static int __init rsk_devices_setup(void) { set_mtd_partitions(); return platform_add_devices(rsk_devices, ARRAY_SIZE(rsk_devices)); } device_initcall(rsk_devices_setup); /* * The Machine Vector */ static struct sh_machine_vector mv_rsk __initmv = { .mv_name = "RSK+", };
gpl-2.0
ouyanghy/nanopi2
arch/powerpc/sysdev/xics/ics-opal.c
5008
5979
/* * ICS backend for OPAL managed interrupts. * * Copyright 2011 IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #undef DEBUG #include <linux/types.h> #include <linux/kernel.h> #include <linux/irq.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/of.h> #include <linux/spinlock.h> #include <linux/msi.h> #include <asm/prom.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/irq.h> #include <asm/errno.h> #include <asm/xics.h> #include <asm/opal.h> #include <asm/firmware.h> static int ics_opal_mangle_server(int server) { /* No link for now */ return server << 2; } static int ics_opal_unmangle_server(int server) { /* No link for now */ return server >> 2; } static void ics_opal_unmask_irq(struct irq_data *d) { unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); int64_t rc; int server; pr_devel("ics-hal: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq); if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) return; server = xics_get_irq_server(d->irq, d->affinity, 0); server = ics_opal_mangle_server(server); rc = opal_set_xive(hw_irq, server, DEFAULT_PRIORITY); if (rc != OPAL_SUCCESS) pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)" " error %lld\n", __func__, d->irq, hw_irq, server, rc); } static unsigned int ics_opal_startup(struct irq_data *d) { #ifdef CONFIG_PCI_MSI /* * The generic MSI code returns with the interrupt disabled on the * card, using the MSI mask bits. Firmware doesn't appear to unmask * at that level, so we do it here by hand. */ if (d->msi_desc) unmask_msi_irq(d); #endif /* unmask it */ ics_opal_unmask_irq(d); return 0; } static void ics_opal_mask_real_irq(unsigned int hw_irq) { int server = ics_opal_mangle_server(xics_default_server); int64_t rc; if (hw_irq == XICS_IPI) return; /* Have to set XIVE to 0xff to be able to remove a slot */ rc = opal_set_xive(hw_irq, server, 0xff); if (rc != OPAL_SUCCESS) pr_err("%s: opal_set_xive(0xff) irq=%u returned %lld\n", __func__, hw_irq, rc); } static void ics_opal_mask_irq(struct irq_data *d) { unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); pr_devel("ics-hal: mask virq %d [hw 0x%x]\n", d->irq, hw_irq); if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) return; ics_opal_mask_real_irq(hw_irq); } static int ics_opal_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); int16_t server; int8_t priority; int64_t rc; int wanted_server; if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) return -1; rc = opal_get_xive(hw_irq, &server, &priority); if (rc != OPAL_SUCCESS) { pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)" " error %lld\n", __func__, d->irq, hw_irq, server, rc); return -1; } wanted_server = xics_get_irq_server(d->irq, cpumask, 1); if (wanted_server < 0) { char cpulist[128]; cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); pr_warning("%s: No online cpus in the mask %s for irq %d\n", __func__, cpulist, d->irq); return -1; } server = ics_opal_mangle_server(wanted_server); pr_devel("ics-hal: set-affinity irq %d [hw 0x%x] server: 0x%x/0x%x\n", d->irq, hw_irq, wanted_server, server); rc = opal_set_xive(hw_irq, server, priority); if (rc != OPAL_SUCCESS) { pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)" " error %lld\n", __func__, d->irq, hw_irq, server, rc); return -1; } return 0; } static struct irq_chip ics_opal_irq_chip = { .name = "OPAL ICS", .irq_startup = ics_opal_startup, .irq_mask = ics_opal_mask_irq, .irq_unmask = ics_opal_unmask_irq, .irq_eoi = NULL, /* Patched at init time */ .irq_set_affinity = ics_opal_set_affinity }; static int ics_opal_map(struct ics *ics, unsigned int virq); static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec); static long ics_opal_get_server(struct ics *ics, unsigned long vec); static int ics_opal_host_match(struct ics *ics, struct device_node *node) { return 1; } /* Only one global & state struct ics */ static struct ics ics_hal = { .map = ics_opal_map, .mask_unknown = ics_opal_mask_unknown, .get_server = ics_opal_get_server, .host_match = ics_opal_host_match, }; static int ics_opal_map(struct ics *ics, unsigned int virq) { unsigned int hw_irq = (unsigned int)virq_to_hw(virq); int64_t rc; int16_t server; int8_t priority; if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)) return -EINVAL; /* Check if HAL knows about this interrupt */ rc = opal_get_xive(hw_irq, &server, &priority); if (rc != OPAL_SUCCESS) return -ENXIO; irq_set_chip_and_handler(virq, &ics_opal_irq_chip, handle_fasteoi_irq); irq_set_chip_data(virq, &ics_hal); return 0; } static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec) { int64_t rc; int16_t server; int8_t priority; /* Check if HAL knows about this interrupt */ rc = opal_get_xive(vec, &server, &priority); if (rc != OPAL_SUCCESS) return; ics_opal_mask_real_irq(vec); } static long ics_opal_get_server(struct ics *ics, unsigned long vec) { int64_t rc; int16_t server; int8_t priority; /* Check if HAL knows about this interrupt */ rc = opal_get_xive(vec, &server, &priority); if (rc != OPAL_SUCCESS) return -1; return ics_opal_unmangle_server(server); } int __init ics_opal_init(void) { if (!firmware_has_feature(FW_FEATURE_OPAL)) return -ENODEV; /* We need to patch our irq chip's EOI to point to the * right ICP */ ics_opal_irq_chip.irq_eoi = icp_ops->eoi; /* Register ourselves */ xics_register_ics(&ics_hal); pr_info("ICS OPAL backend registered\n"); return 0; }
gpl-2.0
AmperificSuperKANG/lge-kernel-loki
drivers/rapidio/switches/idt_gen2.c
5520
11584
/* * IDT CPS Gen.2 Serial RapidIO switch family support * * Copyright 2010 Integrated Device Technology, Inc. * Alexandre Bounine <alexandre.bounine@idt.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stat.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/rio_ids.h> #include <linux/delay.h> #include "../rio.h" #define LOCAL_RTE_CONF_DESTID_SEL 0x010070 #define LOCAL_RTE_CONF_DESTID_SEL_PSEL 0x0000001f #define IDT_LT_ERR_REPORT_EN 0x03100c #define IDT_PORT_ERR_REPORT_EN(n) (0x031044 + (n)*0x40) #define IDT_PORT_ERR_REPORT_EN_BC 0x03ff04 #define IDT_PORT_ISERR_REPORT_EN(n) (0x03104C + (n)*0x40) #define IDT_PORT_ISERR_REPORT_EN_BC 0x03ff0c #define IDT_PORT_INIT_TX_ACQUIRED 0x00000020 #define IDT_LANE_ERR_REPORT_EN(n) (0x038010 + (n)*0x100) #define IDT_LANE_ERR_REPORT_EN_BC 0x03ff10 #define IDT_DEV_CTRL_1 0xf2000c #define IDT_DEV_CTRL_1_GENPW 0x02000000 #define IDT_DEV_CTRL_1_PRSTBEH 0x00000001 #define IDT_CFGBLK_ERR_CAPTURE_EN 0x020008 #define IDT_CFGBLK_ERR_REPORT 0xf20014 #define IDT_CFGBLK_ERR_REPORT_GENPW 0x00000002 #define IDT_AUX_PORT_ERR_CAP_EN 0x020000 #define IDT_AUX_ERR_REPORT_EN 0xf20018 #define IDT_AUX_PORT_ERR_LOG_I2C 0x00000002 #define IDT_AUX_PORT_ERR_LOG_JTAG 0x00000001 #define IDT_ISLTL_ADDRESS_CAP 0x021014 #define IDT_RIO_DOMAIN 0xf20020 #define IDT_RIO_DOMAIN_MASK 0x000000ff #define IDT_PW_INFO_CSR 0xf20024 #define IDT_SOFT_RESET 0xf20040 #define IDT_SOFT_RESET_REQ 0x00030097 #define IDT_I2C_MCTRL 0xf20050 #define IDT_I2C_MCTRL_GENPW 0x04000000 #define IDT_JTAG_CTRL 0xf2005c #define IDT_JTAG_CTRL_GENPW 0x00000002 #define IDT_LANE_CTRL(n) (0xff8000 + (n)*0x100) #define IDT_LANE_CTRL_BC 0xffff00 #define IDT_LANE_CTRL_GENPW 0x00200000 #define IDT_LANE_DFE_1_BC 0xffff18 #define IDT_LANE_DFE_2_BC 0xffff1c #define IDT_PORT_OPS(n) (0xf40004 + (n)*0x100) #define IDT_PORT_OPS_GENPW 0x08000000 #define IDT_PORT_OPS_PL_ELOG 0x00000040 #define IDT_PORT_OPS_LL_ELOG 0x00000020 #define IDT_PORT_OPS_LT_ELOG 0x00000010 #define IDT_PORT_OPS_BC 0xf4ff04 #define IDT_PORT_ISERR_DET(n) (0xf40008 + (n)*0x100) #define IDT_ERR_CAP 0xfd0000 #define IDT_ERR_CAP_LOG_OVERWR 0x00000004 #define IDT_ERR_RD 0xfd0004 #define IDT_DEFAULT_ROUTE 0xde #define IDT_NO_ROUTE 0xdf static int idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port) { /* * Select routing table to update */ if (table == RIO_GLOBAL_TABLE) table = 0; else table++; if (route_port == RIO_INVALID_ROUTE) route_port = IDT_DEFAULT_ROUTE; rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); /* * Program destination port for the specified destID */ rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, (u32)route_destid); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, (u32)route_port); udelay(10); return 0; } static int idtg2_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port) { u32 result; /* * Select routing table to read */ if (table == RIO_GLOBAL_TABLE) table = 0; else table++; rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); rio_mport_read_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); if (IDT_DEFAULT_ROUTE == (u8)result || IDT_NO_ROUTE == (u8)result) *route_port = RIO_INVALID_ROUTE; else *route_port = (u8)result; return 0; } static int idtg2_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table) { u32 i; /* * Select routing table to read */ if (table == RIO_GLOBAL_TABLE) table = 0; else table++; rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); for (i = RIO_STD_RTE_CONF_EXTCFGEN; i <= (RIO_STD_RTE_CONF_EXTCFGEN | 0xff);) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, i); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, (IDT_DEFAULT_ROUTE << 24) | (IDT_DEFAULT_ROUTE << 16) | (IDT_DEFAULT_ROUTE << 8) | IDT_DEFAULT_ROUTE); i += 4; } return 0; } static int idtg2_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, u8 sw_domain) { /* * Switch domain configuration operates only at global level */ rio_mport_write_config_32(mport, destid, hopcount, IDT_RIO_DOMAIN, (u32)sw_domain); return 0; } static int idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, u8 *sw_domain) { u32 regval; /* * Switch domain configuration operates only at global level */ rio_mport_read_config_32(mport, destid, hopcount, IDT_RIO_DOMAIN, &regval); *sw_domain = (u8)(regval & 0xff); return 0; } static int idtg2_em_init(struct rio_dev *rdev) { u32 regval; int i, tmp; /* * This routine performs device-specific initialization only. * All standard EM configuration should be performed at upper level. */ pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); /* Set Port-Write info CSR: PRIO=3 and CRF=1 */ rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000); /* * Configure LT LAYER error reporting. */ /* Enable standard (RIO.p8) error reporting */ rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN, REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR | REM_LTL_ERR_UNSUPTR); /* Use Port-Writes for LT layer error reporting. * Enable per-port reset */ rio_read_config_32(rdev, IDT_DEV_CTRL_1, &regval); rio_write_config_32(rdev, IDT_DEV_CTRL_1, regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH); /* * Configure PORT error reporting. */ /* Report all RIO.p8 errors supported by device */ rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037); /* Configure reporting of implementation specific errors/events */ rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED); /* Use Port-Writes for port error reporting and enable error logging */ tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo); for (i = 0; i < tmp; i++) { rio_read_config_32(rdev, IDT_PORT_OPS(i), &regval); rio_write_config_32(rdev, IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW | IDT_PORT_OPS_PL_ELOG | IDT_PORT_OPS_LL_ELOG | IDT_PORT_OPS_LT_ELOG); } /* Overwrite error log if full */ rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR); /* * Configure LANE error reporting. */ /* Disable line error reporting */ rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0); /* Use Port-Writes for lane error reporting (when enabled) * (do per-lane update because lanes may have different configuration) */ tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16; for (i = 0; i < tmp; i++) { rio_read_config_32(rdev, IDT_LANE_CTRL(i), &regval); rio_write_config_32(rdev, IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW); } /* * Configure AUX error reporting. */ /* Disable JTAG and I2C Error capture */ rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0); /* Disable JTAG and I2C Error reporting/logging */ rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0); /* Disable Port-Write notification from JTAG */ rio_write_config_32(rdev, IDT_JTAG_CTRL, 0); /* Disable Port-Write notification from I2C */ rio_read_config_32(rdev, IDT_I2C_MCTRL, &regval); rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW); /* * Configure CFG_BLK error reporting. */ /* Disable Configuration Block error capture */ rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0); /* Disable Port-Writes for Configuration Block error reporting */ rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, &regval); rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT, regval & ~IDT_CFGBLK_ERR_REPORT_GENPW); /* set TVAL = ~50us */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); return 0; } static int idtg2_em_handler(struct rio_dev *rdev, u8 portnum) { u32 regval, em_perrdet, em_ltlerrdet; rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); if (em_ltlerrdet) { /* Service Logical/Transport Layer Error(s) */ if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) { /* Implementation specific error reported */ rio_read_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, &regval); pr_debug("RIO: %s Implementation Specific LTL errors" \ " 0x%x @(0x%x)\n", rio_name(rdev), em_ltlerrdet, regval); /* Clear implementation specific address capture CSR */ rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0); } } rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); if (em_perrdet) { /* Service Port-Level Error(s) */ if (em_perrdet & REM_PED_IMPL_SPEC) { /* Implementation Specific port error reported */ /* Get IS errors reported */ rio_read_config_32(rdev, IDT_PORT_ISERR_DET(portnum), &regval); pr_debug("RIO: %s Implementation Specific Port" \ " errors 0x%x\n", rio_name(rdev), regval); /* Clear all implementation specific events */ rio_write_config_32(rdev, IDT_PORT_ISERR_DET(portnum), 0); } } return 0; } static ssize_t idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); ssize_t len = 0; u32 regval; while (!rio_read_config_32(rdev, IDT_ERR_RD, &regval)) { if (!regval) /* 0 = end of log */ break; len += snprintf(buf + len, PAGE_SIZE - len, "%08x\n", regval); if (len >= (PAGE_SIZE - 10)) break; } return len; } static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL); static int idtg2_sysfs(struct rio_dev *rdev, int create) { struct device *dev = &rdev->dev; int err = 0; if (create == RIO_SW_SYSFS_CREATE) { /* Initialize sysfs entries */ err = device_create_file(dev, &dev_attr_errlog); if (err) dev_err(dev, "Unable create sysfs errlog file\n"); } else device_remove_file(dev, &dev_attr_errlog); return err; } static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); rdev->rswitch->add_entry = idtg2_route_add_entry; rdev->rswitch->get_entry = idtg2_route_get_entry; rdev->rswitch->clr_table = idtg2_route_clr_table; rdev->rswitch->set_domain = idtg2_set_domain; rdev->rswitch->get_domain = idtg2_get_domain; rdev->rswitch->em_init = idtg2_em_init; rdev->rswitch->em_handle = idtg2_em_handler; rdev->rswitch->sw_sysfs = idtg2_sysfs; if (do_enum) { /* Ensure that default routing is disabled on startup */ rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); } return 0; } DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1432, idtg2_switch_init);
gpl-2.0
mingit/mstcpV0.89.4_linux
sound/pci/echoaudio/gina20.c
8080
3015
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define ECHOGALS_FAMILY #define ECHOCARD_GINA20 #define ECHOCARD_NAME "Gina20" #define ECHOCARD_HAS_MONITOR #define ECHOCARD_HAS_INPUT_GAIN #define ECHOCARD_HAS_DIGITAL_IO #define ECHOCARD_HAS_EXTERNAL_CLOCK #define ECHOCARD_HAS_ADAT FALSE /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 2 */ #define PX_ANALOG_IN 10 /* 2 */ #define PX_DIGITAL_IN 12 /* 2 */ #define PX_NUM 14 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 8 */ #define BX_DIGITAL_OUT 8 /* 2 */ #define BX_ANALOG_IN 10 /* 2 */ #define BX_DIGITAL_IN 12 /* 2 */ #define BX_NUM 14 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/io.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/gina20_dsp.fw"); #define FW_GINA20_DSP 0 static const struct firmware card_fw[] = { {0, "gina20_dsp.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x1801, 0xECC0, 0x0020, 0, 0, 0}, /* DSP 56301 Gina20 rev.0 */ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, /* One page (4k) contains 512 instructions. I don't know if the hw supports lists longer than this. In this case periods_max=220 is a safe limit to make sure the list never exceeds 512 instructions. */ }; #include "gina20_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c"
gpl-2.0
clumsy1991/M8_GPE_Kernel
drivers/connector/cn_queue.c
8080
3930
/* * cn_queue.c * * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/list.h> #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/suspend.h> #include <linux/connector.h> #include <linux/delay.h> static struct cn_callback_entry * cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) { struct cn_callback_entry *cbq; cbq = kzalloc(sizeof(*cbq), GFP_KERNEL); if (!cbq) { printk(KERN_ERR "Failed to create new callback queue.\n"); return NULL; } atomic_set(&cbq->refcnt, 1); atomic_inc(&dev->refcnt); cbq->pdev = dev; snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); memcpy(&cbq->id.id, id, sizeof(struct cb_id)); cbq->callback = callback; return cbq; } void cn_queue_release_callback(struct cn_callback_entry *cbq) { if (!atomic_dec_and_test(&cbq->refcnt)) return; atomic_dec(&cbq->pdev->refcnt); kfree(cbq); } int cn_cb_equal(struct cb_id *i1, struct cb_id *i2) { return ((i1->idx == i2->idx) && (i1->val == i2->val)); } int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) { struct cn_callback_entry *cbq, *__cbq; int found = 0; cbq = cn_queue_alloc_callback_entry(dev, name, id, callback); if (!cbq) return -ENOMEM; spin_lock_bh(&dev->queue_lock); list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { if (cn_cb_equal(&__cbq->id.id, id)) { found = 1; break; } } if (!found) list_add_tail(&cbq->callback_entry, &dev->queue_list); spin_unlock_bh(&dev->queue_lock); if (found) { cn_queue_release_callback(cbq); return -EINVAL; } cbq->seq = 0; cbq->group = cbq->id.id.idx; return 0; } void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) { struct cn_callback_entry *cbq, *n; int found = 0; spin_lock_bh(&dev->queue_lock); list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { if (cn_cb_equal(&cbq->id.id, id)) { list_del(&cbq->callback_entry); found = 1; break; } } spin_unlock_bh(&dev->queue_lock); if (found) cn_queue_release_callback(cbq); } struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) { struct cn_queue_dev *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; snprintf(dev->name, sizeof(dev->name), "%s", name); atomic_set(&dev->refcnt, 0); INIT_LIST_HEAD(&dev->queue_list); spin_lock_init(&dev->queue_lock); dev->nls = nls; return dev; } void cn_queue_free_dev(struct cn_queue_dev *dev) { struct cn_callback_entry *cbq, *n; spin_lock_bh(&dev->queue_lock); list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) list_del(&cbq->callback_entry); spin_unlock_bh(&dev->queue_lock); while (atomic_read(&dev->refcnt)) { printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n", dev->name, atomic_read(&dev->refcnt)); msleep(1000); } kfree(dev); dev = NULL; }
gpl-2.0
skulldreamz/bullhead_kernel
sound/pci/echoaudio/layla24.c
8080
3714
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define ECHO24_FAMILY #define ECHOCARD_LAYLA24 #define ECHOCARD_NAME "Layla24" #define ECHOCARD_HAS_MONITOR #define ECHOCARD_HAS_ASIC #define ECHOCARD_HAS_INPUT_NOMINAL_LEVEL #define ECHOCARD_HAS_OUTPUT_NOMINAL_LEVEL #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_DIGITAL_IO #define ECHOCARD_HAS_DIGITAL_IN_AUTOMUTE #define ECHOCARD_HAS_DIGITAL_MODE_SWITCH #define ECHOCARD_HAS_EXTERNAL_CLOCK #define ECHOCARD_HAS_ADAT 6 #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 #define ECHOCARD_HAS_MIDI /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 8 */ #define PX_ANALOG_IN 16 /* 8 */ #define PX_DIGITAL_IN 24 /* 8 */ #define PX_NUM 32 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 8 */ #define BX_DIGITAL_OUT 8 /* 8 */ #define BX_ANALOG_IN 16 /* 8 */ #define BX_DIGITAL_IN 24 /* 8 */ #define BX_NUM 32 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include <asm/io.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/layla24_dsp.fw"); MODULE_FIRMWARE("ea/layla24_1_asic.fw"); MODULE_FIRMWARE("ea/layla24_2A_asic.fw"); MODULE_FIRMWARE("ea/layla24_2S_asic.fw"); #define FW_361_LOADER 0 #define FW_LAYLA24_DSP 1 #define FW_LAYLA24_1_ASIC 2 #define FW_LAYLA24_2A_ASIC 3 #define FW_LAYLA24_2S_ASIC 4 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "layla24_dsp.fw"}, {0, "layla24_1_asic.fw"}, {0, "layla24_2A_asic.fw"}, {0, "layla24_2S_asic.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x3410, 0xECC0, 0x0060, 0, 0, 0}, /* DSP 56361 Layla24 rev.0 */ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_8000_96000, .rate_min = 8000, .rate_max = 100000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, /* One page (4k) contains 512 instructions. I don't know if the hw supports lists longer than this. In this case periods_max=220 is a safe limit to make sure the list never exceeds 512 instructions. */ }; #include "layla24_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio_gml.c" #include "echoaudio.c" #include "midi.c"
gpl-2.0
MikePach/Alucard-Kernel-jfltexx
sound/pci/echoaudio/gina24.c
8080
3972
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define ECHO24_FAMILY #define ECHOCARD_GINA24 #define ECHOCARD_NAME "Gina24" #define ECHOCARD_HAS_MONITOR #define ECHOCARD_HAS_ASIC #define ECHOCARD_HAS_INPUT_NOMINAL_LEVEL #define ECHOCARD_HAS_OUTPUT_NOMINAL_LEVEL #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_DIGITAL_IO #define ECHOCARD_HAS_DIGITAL_IN_AUTOMUTE #define ECHOCARD_HAS_DIGITAL_MODE_SWITCH #define ECHOCARD_HAS_EXTERNAL_CLOCK #define ECHOCARD_HAS_ADAT 6 #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 8 */ #define PX_ANALOG_IN 16 /* 2 */ #define PX_DIGITAL_IN 18 /* 8 */ #define PX_NUM 26 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 8 */ #define BX_DIGITAL_OUT 8 /* 8 */ #define BX_ANALOG_IN 16 /* 2 */ #define BX_DIGITAL_IN 18 /* 8 */ #define BX_NUM 26 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/io.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/gina24_301_dsp.fw"); MODULE_FIRMWARE("ea/gina24_361_dsp.fw"); MODULE_FIRMWARE("ea/gina24_301_asic.fw"); MODULE_FIRMWARE("ea/gina24_361_asic.fw"); #define FW_361_LOADER 0 #define FW_GINA24_301_DSP 1 #define FW_GINA24_361_DSP 2 #define FW_GINA24_301_ASIC 3 #define FW_GINA24_361_ASIC 4 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "gina24_301_dsp.fw"}, {0, "gina24_361_dsp.fw"}, {0, "gina24_301_asic.fw"}, {0, "gina24_361_asic.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x1801, 0xECC0, 0x0050, 0, 0, 0}, /* DSP 56301 Gina24 rev.0 */ {0x1057, 0x1801, 0xECC0, 0x0051, 0, 0, 0}, /* DSP 56301 Gina24 rev.1 */ {0x1057, 0x3410, 0xECC0, 0x0050, 0, 0, 0}, /* DSP 56361 Gina24 rev.0 */ {0x1057, 0x3410, 0xECC0, 0x0051, 0, 0, 0}, /* DSP 56361 Gina24 rev.1 */ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 8000, .rate_max = 96000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, /* One page (4k) contains 512 instructions. I don't know if the hw supports lists longer than this. In this case periods_max=220 is a safe limit to make sure the list never exceeds 512 instructions. 220 ~= (512 - 1 - (BUFFER_BYTES_MAX / PAGE_SIZE)) / 2 */ }; #include "gina24_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio_gml.c" #include "echoaudio.c"
gpl-2.0
czankel/xtensa-linux
security/tomoyo/load_policy.c
9616
2614
/* * security/tomoyo/load_policy.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" #ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER /* * Path to the policy loader. (default = CONFIG_SECURITY_TOMOYO_POLICY_LOADER) */ static const char *tomoyo_loader; /** * tomoyo_loader_setup - Set policy loader. * * @str: Program to use as a policy loader (e.g. /sbin/tomoyo-init ). * * Returns 0. */ static int __init tomoyo_loader_setup(char *str) { tomoyo_loader = str; return 0; } __setup("TOMOYO_loader=", tomoyo_loader_setup); /** * tomoyo_policy_loader_exists - Check whether /sbin/tomoyo-init exists. * * Returns true if /sbin/tomoyo-init exists, false otherwise. */ static bool tomoyo_policy_loader_exists(void) { struct path path; if (!tomoyo_loader) tomoyo_loader = CONFIG_SECURITY_TOMOYO_POLICY_LOADER; if (kern_path(tomoyo_loader, LOOKUP_FOLLOW, &path)) { printk(KERN_INFO "Not activating Mandatory Access Control " "as %s does not exist.\n", tomoyo_loader); return false; } path_put(&path); return true; } /* * Path to the trigger. (default = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER) */ static const char *tomoyo_trigger; /** * tomoyo_trigger_setup - Set trigger for activation. * * @str: Program to use as an activation trigger (e.g. /sbin/init ). * * Returns 0. */ static int __init tomoyo_trigger_setup(char *str) { tomoyo_trigger = str; return 0; } __setup("TOMOYO_trigger=", tomoyo_trigger_setup); /** * tomoyo_load_policy - Run external policy loader to load policy. * * @filename: The program about to start. * * This function checks whether @filename is /sbin/init , and if so * invoke /sbin/tomoyo-init and wait for the termination of /sbin/tomoyo-init * and then continues invocation of /sbin/init. * /sbin/tomoyo-init reads policy files in /etc/tomoyo/ directory and * writes to /sys/kernel/security/tomoyo/ interfaces. * * Returns nothing. */ void tomoyo_load_policy(const char *filename) { static bool done; char *argv[2]; char *envp[3]; if (tomoyo_policy_loaded || done) return; if (!tomoyo_trigger) tomoyo_trigger = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER; if (strcmp(filename, tomoyo_trigger)) return; if (!tomoyo_policy_loader_exists()) return; done = true; printk(KERN_INFO "Calling %s to load policy. Please wait.\n", tomoyo_loader); argv[0] = (char *) tomoyo_loader; argv[1] = NULL; envp[0] = "HOME=/"; envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[2] = NULL; call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); tomoyo_check_profile(); } #endif
gpl-2.0
PaoloW8/kernel_ZOPO
drivers/misc/ibmasm/dot_command.c
9872
4063
/* * IBM ASM Service Processor Device Driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2004 * * Author: Max Asböck <amax@us.ibm.com> * */ #include "ibmasm.h" #include "dot_command.h" /** * Dispatch an incoming message to the specific handler for the message. * Called from interrupt context. */ void ibmasm_receive_message(struct service_processor *sp, void *message, int message_size) { u32 size; struct dot_command_header *header = (struct dot_command_header *)message; if (message_size == 0) return; size = get_dot_command_size(message); if (size == 0) return; if (size > message_size) size = message_size; switch (header->type) { case sp_event: ibmasm_receive_event(sp, message, size); break; case sp_command_response: ibmasm_receive_command_response(sp, message, size); break; case sp_heartbeat: ibmasm_receive_heartbeat(sp, message, size); break; default: dev_err(sp->dev, "Received unknown message from service processor\n"); } } #define INIT_BUFFER_SIZE 32 /** * send the 4.3.5.10 dot command (driver VPD) to the service processor */ int ibmasm_send_driver_vpd(struct service_processor *sp) { struct command *command; struct dot_command_header *header; u8 *vpd_command; u8 *vpd_data; int result = 0; command = ibmasm_new_command(sp, INIT_BUFFER_SIZE); if (command == NULL) return -ENOMEM; header = (struct dot_command_header *)command->buffer; header->type = sp_write; header->command_size = 4; header->data_size = 16; header->status = 0; header->reserved = 0; vpd_command = command->buffer + sizeof(struct dot_command_header); vpd_command[0] = 0x4; vpd_command[1] = 0x3; vpd_command[2] = 0x5; vpd_command[3] = 0xa; vpd_data = vpd_command + header->command_size; vpd_data[0] = 0; strcat(vpd_data, IBMASM_DRIVER_VPD); vpd_data[10] = 0; vpd_data[15] = 0; ibmasm_exec_command(sp, command); ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL); if (command->status != IBMASM_CMD_COMPLETE) result = -ENODEV; command_put(command); return result; } struct os_state_command { struct dot_command_header header; unsigned char command[3]; unsigned char data; }; /** * send the 4.3.6 dot command (os state) to the service processor * During driver init this function is called with os state "up". * This causes the service processor to start sending heartbeats the * driver. * During driver exit the function is called with os state "down", * causing the service processor to stop the heartbeats. */ int ibmasm_send_os_state(struct service_processor *sp, int os_state) { struct command *cmd; struct os_state_command *os_state_cmd; int result = 0; cmd = ibmasm_new_command(sp, sizeof(struct os_state_command)); if (cmd == NULL) return -ENOMEM; os_state_cmd = (struct os_state_command *)cmd->buffer; os_state_cmd->header.type = sp_write; os_state_cmd->header.command_size = 3; os_state_cmd->header.data_size = 1; os_state_cmd->header.status = 0; os_state_cmd->command[0] = 4; os_state_cmd->command[1] = 3; os_state_cmd->command[2] = 6; os_state_cmd->data = os_state; ibmasm_exec_command(sp, cmd); ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL); if (cmd->status != IBMASM_CMD_COMPLETE) result = -ENODEV; command_put(cmd); return result; }
gpl-2.0
flaming-toast/linux-jeyu
arch/mn10300/unit-asb2303/leds.c
13712
1471
/* ASB2303 peripheral 7-segment LEDs x1 support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/intctl-regs.h> #include <asm/rtc-regs.h> #include <unit/leds.h> #if 0 static const u8 asb2303_led_hex_tbl[16] = { 0x80, 0xf2, 0x48, 0x60, 0x32, 0x24, 0x04, 0xf0, 0x00, 0x20, 0x10, 0x06, 0x8c, 0x42, 0x0c, 0x1c }; #endif static const u8 asb2303_led_chase_tbl[6] = { ~0x02, /* top - segA */ ~0x04, /* right top - segB */ ~0x08, /* right bottom - segC */ ~0x10, /* bottom - segD */ ~0x20, /* left bottom - segE */ ~0x40, /* left top - segF */ }; static unsigned asb2303_led_chase; void peripheral_leds_display_exception(enum exception_code code) { ASB2303_GPIO0DEF = 0x5555; /* configure as an output port */ ASB2303_7SEGLEDS = 0x6d; /* triple horizontal bar */ } void peripheral_leds_led_chase(void) { ASB2303_GPIO0DEF = 0x5555; /* configure as an output port */ ASB2303_7SEGLEDS = asb2303_led_chase_tbl[asb2303_led_chase]; asb2303_led_chase++; if (asb2303_led_chase >= 6) asb2303_led_chase = 0; }
gpl-2.0
vk2rq/linux-stable
fs/partitions/ibm.c
145
6651
/* * File...........: linux/fs/partitions/ibm.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Volker Sameske <sameske@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 */ #include <linux/buffer_head.h> #include <linux/hdreg.h> #include <linux/slab.h> #include <asm/dasd.h> #include <asm/ebcdic.h> #include <asm/uaccess.h> #include <asm/vtoc.h> #include "check.h" #include "ibm.h" /* * compute the block number from a * cyl-cyl-head-head structure */ static sector_t cchh2blk (struct vtoc_cchh *ptr, struct hd_geometry *geo) { sector_t cyl; __u16 head; /*decode cylinder and heads for large volumes */ cyl = ptr->hh & 0xFFF0; cyl <<= 12; cyl |= ptr->cc; head = ptr->hh & 0x000F; return cyl * geo->heads * geo->sectors + head * geo->sectors; } /* * compute the block number from a * cyl-cyl-head-head-block structure */ static sector_t cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) { sector_t cyl; __u16 head; /*decode cylinder and heads for large volumes */ cyl = ptr->hh & 0xFFF0; cyl <<= 12; cyl |= ptr->cc; head = ptr->hh & 0x000F; return cyl * geo->heads * geo->sectors + head * geo->sectors + ptr->b; } /* */ int ibm_partition(struct parsed_partitions *state, struct block_device *bdev) { int blocksize, res; loff_t i_size, offset, size, fmt_size; dasd_information2_t *info; struct hd_geometry *geo; char type[5] = {0,}; char name[7] = {0,}; union label_t { struct vtoc_volume_label_cdl vol; struct vtoc_volume_label_ldl lnx; struct vtoc_cms_label cms; } *label; unsigned char *data; Sector sect; sector_t labelsect; res = 0; blocksize = bdev_logical_block_size(bdev); if (blocksize <= 0) goto out_exit; i_size = i_size_read(bdev->bd_inode); if (i_size == 0) goto out_exit; info = kmalloc(sizeof(dasd_information2_t), GFP_KERNEL); if (info == NULL) goto out_exit; geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL); if (geo == NULL) goto out_nogeo; label = kmalloc(sizeof(union label_t), GFP_KERNEL); if (label == NULL) goto out_nolab; if (ioctl_by_bdev(bdev, BIODASDINFO2, (unsigned long)info) != 0 || ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0) goto out_freeall; /* * Special case for FBA disks: label sector does not depend on * blocksize. */ if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) || (info->cu_type == 0x3880 && info->dev_type == 0x3370)) labelsect = info->label_block; else labelsect = info->label_block * (blocksize >> 9); /* * Get volume label, extract name and type. */ data = read_dev_sector(bdev, labelsect, &sect); if (data == NULL) goto out_readerr; memcpy(label, data, sizeof(union label_t)); put_dev_sector(sect); if ((!info->FBA_layout) && (!strcmp(info->type, "ECKD"))) { strncpy(type, label->vol.vollbl, 4); strncpy(name, label->vol.volid, 6); } else { strncpy(type, label->lnx.vollbl, 4); strncpy(name, label->lnx.volid, 6); } EBCASC(type, 4); EBCASC(name, 6); res = 1; /* * Three different formats: LDL, CDL and unformated disk * * identified by info->format * * unformated disks we do not have to care about */ if (info->format == DASD_FORMAT_LDL) { if (strncmp(type, "CMS1", 4) == 0) { /* * VM style CMS1 labeled disk */ blocksize = label->cms.block_size; if (label->cms.disk_offset != 0) { printk("CMS1/%8s(MDSK):", name); /* disk is reserved minidisk */ offset = label->cms.disk_offset; size = (label->cms.block_count - 1) * (blocksize >> 9); } else { printk("CMS1/%8s:", name); offset = (info->label_block + 1); size = label->cms.block_count * (blocksize >> 9); } put_partition(state, 1, offset*(blocksize >> 9), size-offset*(blocksize >> 9)); } else { if (strncmp(type, "LNX1", 4) == 0) { printk("LNX1/%8s:", name); if (label->lnx.ldl_version == 0xf2) { fmt_size = label->lnx.formatted_blocks * (blocksize >> 9); } else if (!strcmp(info->type, "ECKD")) { /* formated w/o large volume support */ fmt_size = geo->cylinders * geo->heads * geo->sectors * (blocksize >> 9); } else { /* old label and no usable disk geometry * (e.g. DIAG) */ fmt_size = i_size >> 9; } size = i_size >> 9; if (fmt_size < size) size = fmt_size; offset = (info->label_block + 1); } else { /* unlabeled disk */ printk("(nonl)"); size = i_size >> 9; offset = (info->label_block + 1); } put_partition(state, 1, offset*(blocksize >> 9), size-offset*(blocksize >> 9)); } } else if (info->format == DASD_FORMAT_CDL) { /* * New style CDL formatted disk */ sector_t blk; int counter; /* * check if VOL1 label is available * if not, something is wrong, skipping partition detection */ if (strncmp(type, "VOL1", 4) == 0) { printk("VOL1/%8s:", name); /* * get block number and read then go through format1 * labels */ blk = cchhb2blk(&label->vol.vtoc, geo) + 1; counter = 0; data = read_dev_sector(bdev, blk * (blocksize/512), &sect); while (data != NULL) { struct vtoc_format1_label f1; memcpy(&f1, data, sizeof(struct vtoc_format1_label)); put_dev_sector(sect); /* skip FMT4 / FMT5 / FMT7 labels */ if (f1.DS1FMTID == _ascebc['4'] || f1.DS1FMTID == _ascebc['5'] || f1.DS1FMTID == _ascebc['7'] || f1.DS1FMTID == _ascebc['9']) { blk++; data = read_dev_sector(bdev, blk * (blocksize/512), &sect); continue; } /* only FMT1 and 8 labels valid at this point */ if (f1.DS1FMTID != _ascebc['1'] && f1.DS1FMTID != _ascebc['8']) break; /* OK, we got valid partition data */ offset = cchh2blk(&f1.DS1EXT1.llimit, geo); size = cchh2blk(&f1.DS1EXT1.ulimit, geo) - offset + geo->sectors; if (counter >= state->limit) break; put_partition(state, counter + 1, offset * (blocksize >> 9), size * (blocksize >> 9)); counter++; blk++; data = read_dev_sector(bdev, blk * (blocksize/512), &sect); } if (!data) /* Are we not supposed to report this ? */ goto out_readerr; } else printk(KERN_WARNING "Warning, expected Label VOL1 not " "found, treating as CDL formated Disk"); } printk("\n"); goto out_freeall; out_readerr: res = -1; out_freeall: kfree(label); out_nolab: kfree(geo); out_nogeo: kfree(info); out_exit: return res; }
gpl-2.0
notro/linux-staging
fs/exofs/namei.c
401
7430
/* * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh <ooo@electrozaur.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * from * linux/fs/minix/inode.c * Copyright (C) 1991, 1992 Linus Torvalds * * This file is part of exofs. * * exofs is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. Since it is based on ext2, and the only * valid version of GPL for the Linux kernel is version 2, the only valid * version of GPL for exofs is version 2. * * exofs is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with exofs; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "exofs.h" static inline int exofs_add_nondir(struct dentry *dentry, struct inode *inode) { int err = exofs_add_link(dentry, inode); if (!err) { d_instantiate(dentry, inode); return 0; } inode_dec_link_count(inode); iput(inode); return err; } static struct dentry *exofs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode; ino_t ino; if (dentry->d_name.len > EXOFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); ino = exofs_inode_by_name(dir, dentry); inode = ino ? exofs_iget(dir->i_sb, ino) : NULL; return d_splice_alias(inode, dentry); } static int exofs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct inode *inode = exofs_new_inode(dir, mode); int err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &exofs_file_inode_operations; inode->i_fop = &exofs_file_operations; inode->i_mapping->a_ops = &exofs_aops; mark_inode_dirty(inode); err = exofs_add_nondir(dentry, inode); } return err; } static int exofs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode *inode; int err; if (!new_valid_dev(rdev)) return -EINVAL; inode = exofs_new_inode(dir, mode); err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); mark_inode_dirty(inode); err = exofs_add_nondir(dentry, inode); } return err; } static int exofs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct super_block *sb = dir->i_sb; int err = -ENAMETOOLONG; unsigned l = strlen(symname)+1; struct inode *inode; struct exofs_i_info *oi; if (l > sb->s_blocksize) goto out; inode = exofs_new_inode(dir, S_IFLNK | S_IRWXUGO); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out; oi = exofs_i(inode); if (l > sizeof(oi->i_data)) { /* slow symlink */ inode->i_op = &page_symlink_inode_operations; inode->i_mapping->a_ops = &exofs_aops; memset(oi->i_data, 0, sizeof(oi->i_data)); err = page_symlink(inode, symname, l); if (err) goto out_fail; } else { /* fast symlink */ inode->i_op = &simple_symlink_inode_operations; inode->i_link = (char *)oi->i_data; memcpy(oi->i_data, symname, l); inode->i_size = l-1; } mark_inode_dirty(inode); err = exofs_add_nondir(dentry, inode); out: return err; out_fail: inode_dec_link_count(inode); iput(inode); goto out; } static int exofs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); inode->i_ctime = CURRENT_TIME; inode_inc_link_count(inode); ihold(inode); return exofs_add_nondir(dentry, inode); } static int exofs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; int err; inode_inc_link_count(dir); inode = exofs_new_inode(dir, S_IFDIR | mode); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_dir; inode->i_op = &exofs_dir_inode_operations; inode->i_fop = &exofs_dir_operations; inode->i_mapping->a_ops = &exofs_aops; inode_inc_link_count(inode); err = exofs_make_empty(inode, dir); if (err) goto out_fail; err = exofs_add_link(dentry, inode); if (err) goto out_fail; d_instantiate(dentry, inode); out: return err; out_fail: inode_dec_link_count(inode); inode_dec_link_count(inode); iput(inode); out_dir: inode_dec_link_count(dir); goto out; } static int exofs_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); struct exofs_dir_entry *de; struct page *page; int err = -ENOENT; de = exofs_find_entry(dir, dentry, &page); if (!de) goto out; err = exofs_delete_entry(de, page); if (err) goto out; inode->i_ctime = dir->i_ctime; inode_dec_link_count(inode); err = 0; out: return err; } static int exofs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); int err = -ENOTEMPTY; if (exofs_empty_dir(inode)) { err = exofs_unlink(dir, dentry); if (!err) { inode->i_size = 0; inode_dec_link_count(inode); inode_dec_link_count(dir); } } return err; } static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct page *dir_page = NULL; struct exofs_dir_entry *dir_de = NULL; struct page *old_page; struct exofs_dir_entry *old_de; int err = -ENOENT; old_de = exofs_find_entry(old_dir, old_dentry, &old_page); if (!old_de) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; dir_de = exofs_dotdot(old_inode, &dir_page); if (!dir_de) goto out_old; } if (new_inode) { struct page *new_page; struct exofs_dir_entry *new_de; err = -ENOTEMPTY; if (dir_de && !exofs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_de = exofs_find_entry(new_dir, new_dentry, &new_page); if (!new_de) goto out_dir; err = exofs_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; if (dir_de) drop_nlink(new_inode); inode_dec_link_count(new_inode); if (err) goto out_dir; } else { err = exofs_add_link(new_dentry, old_inode); if (err) goto out_dir; if (dir_de) inode_inc_link_count(new_dir); } old_inode->i_ctime = CURRENT_TIME; exofs_delete_entry(old_de, old_page); mark_inode_dirty(old_inode); if (dir_de) { err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); inode_dec_link_count(old_dir); if (err) goto out_dir; } return 0; out_dir: if (dir_de) { kunmap(dir_page); page_cache_release(dir_page); } out_old: kunmap(old_page); page_cache_release(old_page); out: return err; } const struct inode_operations exofs_dir_inode_operations = { .create = exofs_create, .lookup = exofs_lookup, .link = exofs_link, .unlink = exofs_unlink, .symlink = exofs_symlink, .mkdir = exofs_mkdir, .rmdir = exofs_rmdir, .mknod = exofs_mknod, .rename = exofs_rename, .setattr = exofs_setattr, }; const struct inode_operations exofs_special_inode_operations = { .setattr = exofs_setattr, };
gpl-2.0
bigbiff/android_kernel_samsung_trlte
drivers/mtd/nand/nand_ids.c
401
8007
/* * drivers/mtd/nandids.c * * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/mtd/nand.h> #include <linux/sizes.h> #define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) #define SP_OPTIONS NAND_NEED_READRDY #define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16) /* * The chip ID list: * name, device ID, page size, chip size in MiB, eraseblock size, options * * If page size and eraseblock size are 0, the sizes are taken from the * extended chip ID. */ struct nand_flash_dev nand_flash_ids[] = { /* * Some incompatible NAND chips share device ID's and so must be * listed by full ID. We list them first so that we can easily identify * the most specific match. */ {"TC58NVG2S0F 4G 3.3V 8-bit", { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} }, SZ_4K, SZ_512, SZ_256K, 0, 8, 224}, {"TC58NVG3S0F 8G 3.3V 8-bit", { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} }, SZ_4K, SZ_1K, SZ_256K, 0, 8, 232}, {"TC58NVG5D2 32G 3.3V 8-bit", { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} }, SZ_8K, SZ_4K, SZ_1M, 0, 8, 640}, {"TC58NVG6D2 64G 3.3V 8-bit", { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} }, SZ_8K, SZ_8K, SZ_2M, 0, 8, 640}, LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS), LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16), LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS), /* * These are the new chips with large page size. Their page size and * eraseblock size are determined from the extended ID bytes. */ /* 512 Megabit */ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16), /* 1 Gigabit */ EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS), EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS), EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS), EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16), /* 2 Gigabit */ EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS), EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS), EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16), /* 4 Gigabit */ EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS), EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS), EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16), /* 8 Gigabit */ EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS), EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS), EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16), /* 16 Gigabit */ EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS), EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS), EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16), /* 32 Gigabit */ EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS), EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS), EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16), /* 64 Gigabit */ EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS), EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS), EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16), /* 128 Gigabit */ EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS), EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS), EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16), /* 256 Gigabit */ EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS), EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS), EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16), /* 512 Gigabit */ EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS), EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16), EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16), {"NAND 4GiB 1,8V 8-bit", {{ 0xAC }} , 2048, 4096, 0x20000, 0 }, {NULL} }; /* Manufacturer IDs */ struct nand_manufacturers nand_manuf_ids[] = { {NAND_MFR_TOSHIBA, "Toshiba"}, {NAND_MFR_SAMSUNG, "Samsung"}, {NAND_MFR_FUJITSU, "Fujitsu"}, {NAND_MFR_NATIONAL, "National"}, {NAND_MFR_RENESAS, "Renesas"}, {NAND_MFR_STMICRO, "ST Micro"}, {NAND_MFR_HYNIX, "Hynix"}, {NAND_MFR_MICRON, "Micron"}, {NAND_MFR_AMD, "AMD/Spansion"}, {NAND_MFR_MACRONIX, "Macronix"}, {NAND_MFR_EON, "Eon"}, {NAND_MFR_ESMT, "Elite Semiconductor"}, {0x0, "Unknown"} }; EXPORT_SYMBOL(nand_manuf_ids); EXPORT_SYMBOL(nand_flash_ids); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); MODULE_DESCRIPTION("Nand device & manufacturer IDs");
gpl-2.0
PDWMorpheus/paradox
dep/mysqllite/mysys/my_delete.c
401
3831
/* Copyright (C) 2000 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "mysys_priv.h" #include "mysys_err.h" #include <my_sys.h> int my_delete(const char *name, myf MyFlags) { int err; DBUG_ENTER("my_delete"); DBUG_PRINT("my",("name %s MyFlags %d", name, MyFlags)); if ((err = unlink(name)) == -1) { my_errno=errno; if (MyFlags & (MY_FAE+MY_WME)) my_error(EE_DELETE,MYF(ME_BELL+ME_WAITTANG+(MyFlags & ME_NOINPUT)), name,errno); } else if ((MyFlags & MY_SYNC_DIR) && my_sync_dir_by_file(name, MyFlags)) err= -1; DBUG_RETURN(err); } /* my_delete */ #if defined(__WIN__) /** Delete file which is possibly not closed. This function is intended to be used exclusively as a temporal solution for Win NT in case when it is needed to delete a not closed file (note that the file must be opened everywhere with FILE_SHARE_DELETE mode). Deleting not-closed files can not be supported on Win 98|ME (and because of that is considered harmful). The function deletes the file with its preliminary renaming. This is because when not-closed share-delete file is deleted it still lives on a disk until it will not be closed everwhere. This may conflict with an attempt to create a new file with the same name. The deleted file is renamed to <name>.<num>.deleted where <name> - the initial name of the file, <num> - a hexadecimal number chosen to make the temporal name to be unique. @param the name of the being deleted file @param the flags instructing how to react on an error internally in the function @note The per-thread @c my_errno holds additional info for a caller to decide how critical the error can be. @retval 0 ok @retval 1 error */ int nt_share_delete(const char *name, myf MyFlags) { char buf[MAX_PATH + 20]; ulong cnt; DBUG_ENTER("nt_share_delete"); DBUG_PRINT("my",("name %s MyFlags %d", name, MyFlags)); for (cnt= GetTickCount(); cnt; cnt--) { errno= 0; sprintf(buf, "%s.%08X.deleted", name, cnt); if (MoveFile(name, buf)) break; if ((errno= GetLastError()) == ERROR_ALREADY_EXISTS) continue; /* This happened during tests with MERGE tables. */ if (errno == ERROR_ACCESS_DENIED) continue; DBUG_PRINT("warning", ("Failed to rename %s to %s, errno: %d", name, buf, errno)); break; } if (errno == ERROR_FILE_NOT_FOUND) { my_errno= ENOENT; // marking, that `name' doesn't exist } else if (errno == 0) { if (DeleteFile(buf)) DBUG_RETURN(0); /* The below is more complicated than necessary. For some reason, the assignment to my_errno clears the error number, which is retrieved by GetLastError() (VC2005EE). Assigning to errno first, allows to retrieve the correct value. */ errno= GetLastError(); if (errno == 0) my_errno= ENOENT; // marking, that `buf' doesn't exist else my_errno= errno; } else my_errno= errno; if (MyFlags & (MY_FAE+MY_WME)) my_error(EE_DELETE, MYF(ME_BELL + ME_WAITTANG + (MyFlags & ME_NOINPUT)), name, my_errno); DBUG_RETURN(-1); } #endif
gpl-2.0
nullpo-head/linux
drivers/s390/cio/qdio_thinint.c
2193
6869
/* * Copyright IBM Corp. 2000, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> * Cornelia Huck <cornelia.huck@de.ibm.com> * Jan Glauber <jang@linux.vnet.ibm.com> */ #include <linux/io.h> #include <linux/slab.h> #include <linux/kernel_stat.h> #include <linux/atomic.h> #include <asm/debug.h> #include <asm/qdio.h> #include <asm/airq.h> #include <asm/isc.h> #include "cio.h" #include "ioasm.h" #include "qdio.h" #include "qdio_debug.h" /* * Restriction: only 63 iqdio subchannels would have its own indicator, * after that, subsequent subchannels share one indicator */ #define TIQDIO_NR_NONSHARED_IND 63 #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) #define TIQDIO_SHARED_IND 63 /* device state change indicators */ struct indicator_t { u32 ind; /* u32 because of compare-and-swap performance */ atomic_t count; /* use count, 0 or 1 for non-shared indicators */ }; /* list of thin interrupt input queues */ static LIST_HEAD(tiq_list); static DEFINE_MUTEX(tiq_list_lock); /* Adapter interrupt definitions */ static void tiqdio_thinint_handler(struct airq_struct *airq); static struct airq_struct tiqdio_airq = { .handler = tiqdio_thinint_handler, .isc = QDIO_AIRQ_ISC, }; static struct indicator_t *q_indicators; u64 last_ai_time; /* returns addr for the device state change indicator */ static u32 *get_indicator(void) { int i; for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) if (!atomic_read(&q_indicators[i].count)) { atomic_set(&q_indicators[i].count, 1); return &q_indicators[i].ind; } /* use the shared indicator */ atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); return &q_indicators[TIQDIO_SHARED_IND].ind; } static void put_indicator(u32 *addr) { int i; if (!addr) return; i = ((unsigned long)addr - (unsigned long)q_indicators) / sizeof(struct indicator_t); atomic_dec(&q_indicators[i].count); } void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) { mutex_lock(&tiq_list_lock); list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); mutex_unlock(&tiq_list_lock); xchg(irq_ptr->dsci, 1 << 7); } void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) { struct qdio_q *q; q = irq_ptr->input_qs[0]; /* if establish triggered an error */ if (!q || !q->entry.prev || !q->entry.next) return; mutex_lock(&tiq_list_lock); list_del_rcu(&q->entry); mutex_unlock(&tiq_list_lock); synchronize_rcu(); } static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) { return irq_ptr->nr_input_qs > 1; } static inline int references_shared_dsci(struct qdio_irq *irq_ptr) { return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; } static inline int shared_ind(struct qdio_irq *irq_ptr) { return references_shared_dsci(irq_ptr) || has_multiple_inq_on_dsci(irq_ptr); } void clear_nonshared_ind(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return; if (shared_ind(irq_ptr)) return; xchg(irq_ptr->dsci, 0); } int test_nonshared_ind(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return 0; if (shared_ind(irq_ptr)) return 0; if (*irq_ptr->dsci) return 1; else return 0; } static inline u32 clear_shared_ind(void) { if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) return 0; return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); } static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) { struct qdio_q *q; int i; for_each_input_queue(irq, q, i) { if (!references_shared_dsci(irq) && has_multiple_inq_on_dsci(irq)) xchg(q->irq_ptr->dsci, 0); if (q->u.in.queue_start_poll) { /* skip if polling is enabled or already in work */ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state)) { qperf_inc(q, int_discarded); continue; } /* avoid dsci clear here, done after processing */ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, q->irq_ptr->int_parm); } else { if (!shared_ind(q->irq_ptr)) xchg(q->irq_ptr->dsci, 0); /* * Call inbound processing but not directly * since that could starve other thinint queues. */ tasklet_schedule(&q->tasklet); } } } /** * tiqdio_thinint_handler - thin interrupt handler for qdio * @alsi: pointer to adapter local summary indicator * @data: NULL */ static void tiqdio_thinint_handler(struct airq_struct *airq) { u32 si_used = clear_shared_ind(); struct qdio_q *q; last_ai_time = S390_lowcore.int_clock; inc_irq_stat(IRQIO_QAI); /* protect tiq_list entries, only changed in activate or shutdown */ rcu_read_lock(); /* check for work on all inbound thinint queues */ list_for_each_entry_rcu(q, &tiq_list, entry) { struct qdio_irq *irq; /* only process queues from changed sets */ irq = q->irq_ptr; if (unlikely(references_shared_dsci(irq))) { if (!si_used) continue; } else if (!*irq->dsci) continue; tiqdio_call_inq_handlers(irq); qperf_inc(q, adapter_int); } rcu_read_unlock(); } static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) { struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; u64 summary_indicator_addr, subchannel_indicator_addr; int rc; if (reset) { summary_indicator_addr = 0; subchannel_indicator_addr = 0; } else { summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr); subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci); } rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr, subchannel_indicator_addr); if (rc) { DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, scssc->response.code); goto out; } DBF_EVENT("setscind"); DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr)); DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr)); out: return rc; } /* allocate non-shared indicators and shared indicator */ int __init tiqdio_allocate_memory(void) { q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, GFP_KERNEL); if (!q_indicators) return -ENOMEM; return 0; } void tiqdio_free_memory(void) { kfree(q_indicators); } int __init tiqdio_register_thinints(void) { int rc; rc = register_adapter_interrupt(&tiqdio_airq); if (rc) { DBF_EVENT("RTI:%x", rc); return rc; } return 0; } int qdio_establish_thinint(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return 0; return set_subchannel_ind(irq_ptr, 0); } void qdio_setup_thinint(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return; irq_ptr->dsci = get_indicator(); DBF_HEX(&irq_ptr->dsci, sizeof(void *)); } void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return; /* reset adapter interrupt indicators */ set_subchannel_ind(irq_ptr, 1); put_indicator(irq_ptr->dsci); } void __exit tiqdio_unregister_thinints(void) { WARN_ON(!list_empty(&tiq_list)); unregister_adapter_interrupt(&tiqdio_airq); }
gpl-2.0
Capful/android_kernel_htc_msm8660
drivers/hwmon/ltc4245.c
3473
16437
/* * Driver for Linear Technology LTC4245 I2C Multiple Supply Hot Swap Controller * * Copyright (C) 2008 Ira W. Snyder <iws@ovro.caltech.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This driver is based on the ds1621 and ina209 drivers. * * Datasheet: * http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1140,P19392,D13517 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/i2c/ltc4245.h> /* Here are names of the chip's registers (a.k.a. commands) */ enum ltc4245_cmd { LTC4245_STATUS = 0x00, /* readonly */ LTC4245_ALERT = 0x01, LTC4245_CONTROL = 0x02, LTC4245_ON = 0x03, LTC4245_FAULT1 = 0x04, LTC4245_FAULT2 = 0x05, LTC4245_GPIO = 0x06, LTC4245_ADCADR = 0x07, LTC4245_12VIN = 0x10, LTC4245_12VSENSE = 0x11, LTC4245_12VOUT = 0x12, LTC4245_5VIN = 0x13, LTC4245_5VSENSE = 0x14, LTC4245_5VOUT = 0x15, LTC4245_3VIN = 0x16, LTC4245_3VSENSE = 0x17, LTC4245_3VOUT = 0x18, LTC4245_VEEIN = 0x19, LTC4245_VEESENSE = 0x1a, LTC4245_VEEOUT = 0x1b, LTC4245_GPIOADC = 0x1c, }; struct ltc4245_data { struct device *hwmon_dev; struct mutex update_lock; bool valid; unsigned long last_updated; /* in jiffies */ /* Control registers */ u8 cregs[0x08]; /* Voltage registers */ u8 vregs[0x0d]; /* GPIO ADC registers */ bool use_extra_gpios; int gpios[3]; }; /* * Update the readings from the GPIO pins. If the driver has been configured to * sample all GPIO's as analog voltages, a round-robin sampling method is used. * Otherwise, only the configured GPIO pin is sampled. * * LOCKING: must hold data->update_lock */ static void ltc4245_update_gpios(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct ltc4245_data *data = i2c_get_clientdata(client); u8 gpio_curr, gpio_next, gpio_reg; int i; /* no extra gpio support, we're basically done */ if (!data->use_extra_gpios) { data->gpios[0] = data->vregs[LTC4245_GPIOADC - 0x10]; return; } /* * If the last reading was too long ago, then we mark all old GPIO * readings as stale by setting them to -EAGAIN */ if (time_after(jiffies, data->last_updated + 5 * HZ)) { dev_dbg(&client->dev, "Marking GPIOs invalid\n"); for (i = 0; i < ARRAY_SIZE(data->gpios); i++) data->gpios[i] = -EAGAIN; } /* * Get the current GPIO pin * * The datasheet calls these GPIO[1-3], but we'll calculate the zero * based array index instead, and call them GPIO[0-2]. This is much * easier to think about. */ gpio_curr = (data->cregs[LTC4245_GPIO] & 0xc0) >> 6; if (gpio_curr > 0) gpio_curr -= 1; /* Read the GPIO voltage from the GPIOADC register */ data->gpios[gpio_curr] = data->vregs[LTC4245_GPIOADC - 0x10]; /* Find the next GPIO pin to read */ gpio_next = (gpio_curr + 1) % ARRAY_SIZE(data->gpios); /* * Calculate the correct setting for the GPIO register so it will * sample the next GPIO pin */ gpio_reg = (data->cregs[LTC4245_GPIO] & 0x3f) | ((gpio_next + 1) << 6); /* Update the GPIO register */ i2c_smbus_write_byte_data(client, LTC4245_GPIO, gpio_reg); /* Update saved data */ data->cregs[LTC4245_GPIO] = gpio_reg; } static struct ltc4245_data *ltc4245_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct ltc4245_data *data = i2c_get_clientdata(client); s32 val; int i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { dev_dbg(&client->dev, "Starting ltc4245 update\n"); /* Read control registers -- 0x00 to 0x07 */ for (i = 0; i < ARRAY_SIZE(data->cregs); i++) { val = i2c_smbus_read_byte_data(client, i); if (unlikely(val < 0)) data->cregs[i] = 0; else data->cregs[i] = val; } /* Read voltage registers -- 0x10 to 0x1c */ for (i = 0; i < ARRAY_SIZE(data->vregs); i++) { val = i2c_smbus_read_byte_data(client, i+0x10); if (unlikely(val < 0)) data->vregs[i] = 0; else data->vregs[i] = val; } /* Update GPIO readings */ ltc4245_update_gpios(dev); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* Return the voltage from the given register in millivolts */ static int ltc4245_get_voltage(struct device *dev, u8 reg) { struct ltc4245_data *data = ltc4245_update_device(dev); const u8 regval = data->vregs[reg - 0x10]; u32 voltage = 0; switch (reg) { case LTC4245_12VIN: case LTC4245_12VOUT: voltage = regval * 55; break; case LTC4245_5VIN: case LTC4245_5VOUT: voltage = regval * 22; break; case LTC4245_3VIN: case LTC4245_3VOUT: voltage = regval * 15; break; case LTC4245_VEEIN: case LTC4245_VEEOUT: voltage = regval * -55; break; case LTC4245_GPIOADC: voltage = regval * 10; break; default: /* If we get here, the developer messed up */ WARN_ON_ONCE(1); break; } return voltage; } /* Return the current in the given sense register in milliAmperes */ static unsigned int ltc4245_get_current(struct device *dev, u8 reg) { struct ltc4245_data *data = ltc4245_update_device(dev); const u8 regval = data->vregs[reg - 0x10]; unsigned int voltage; unsigned int curr; /* The strange looking conversions that follow are fixed-point * math, since we cannot do floating point in the kernel. * * Step 1: convert sense register to microVolts * Step 2: convert voltage to milliAmperes * * If you play around with the V=IR equation, you come up with * the following: X uV / Y mOhm == Z mA * * With the resistors that are fractions of a milliOhm, we multiply * the voltage and resistance by 10, to shift the decimal point. * Now we can use the normal division operator again. */ switch (reg) { case LTC4245_12VSENSE: voltage = regval * 250; /* voltage in uV */ curr = voltage / 50; /* sense resistor 50 mOhm */ break; case LTC4245_5VSENSE: voltage = regval * 125; /* voltage in uV */ curr = (voltage * 10) / 35; /* sense resistor 3.5 mOhm */ break; case LTC4245_3VSENSE: voltage = regval * 125; /* voltage in uV */ curr = (voltage * 10) / 25; /* sense resistor 2.5 mOhm */ break; case LTC4245_VEESENSE: voltage = regval * 250; /* voltage in uV */ curr = voltage / 100; /* sense resistor 100 mOhm */ break; default: /* If we get here, the developer messed up */ WARN_ON_ONCE(1); curr = 0; break; } return curr; } static ssize_t ltc4245_show_voltage(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); const int voltage = ltc4245_get_voltage(dev, attr->index); return snprintf(buf, PAGE_SIZE, "%d\n", voltage); } static ssize_t ltc4245_show_current(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); const unsigned int curr = ltc4245_get_current(dev, attr->index); return snprintf(buf, PAGE_SIZE, "%u\n", curr); } static ssize_t ltc4245_show_power(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); const unsigned int curr = ltc4245_get_current(dev, attr->index); const int output_voltage = ltc4245_get_voltage(dev, attr->index+1); /* current in mA * voltage in mV == power in uW */ const unsigned int power = abs(output_voltage * curr); return snprintf(buf, PAGE_SIZE, "%u\n", power); } static ssize_t ltc4245_show_alarm(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da); struct ltc4245_data *data = ltc4245_update_device(dev); const u8 reg = data->cregs[attr->index]; const u32 mask = attr->nr; return snprintf(buf, PAGE_SIZE, "%u\n", (reg & mask) ? 1 : 0); } static ssize_t ltc4245_show_gpio(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ltc4245_data *data = ltc4245_update_device(dev); int val = data->gpios[attr->index]; /* handle stale GPIO's */ if (val < 0) return val; /* Convert to millivolts and print */ return snprintf(buf, PAGE_SIZE, "%u\n", val * 10); } /* These macros are used below in constructing device attribute objects * for use with sysfs_create_group() to make a sysfs device file * for each register. */ #define LTC4245_VOLTAGE(name, ltc4245_cmd_idx) \ static SENSOR_DEVICE_ATTR(name, S_IRUGO, \ ltc4245_show_voltage, NULL, ltc4245_cmd_idx) #define LTC4245_CURRENT(name, ltc4245_cmd_idx) \ static SENSOR_DEVICE_ATTR(name, S_IRUGO, \ ltc4245_show_current, NULL, ltc4245_cmd_idx) #define LTC4245_POWER(name, ltc4245_cmd_idx) \ static SENSOR_DEVICE_ATTR(name, S_IRUGO, \ ltc4245_show_power, NULL, ltc4245_cmd_idx) #define LTC4245_ALARM(name, mask, reg) \ static SENSOR_DEVICE_ATTR_2(name, S_IRUGO, \ ltc4245_show_alarm, NULL, (mask), reg) #define LTC4245_GPIO_VOLTAGE(name, gpio_num) \ static SENSOR_DEVICE_ATTR(name, S_IRUGO, \ ltc4245_show_gpio, NULL, gpio_num) /* Construct a sensor_device_attribute structure for each register */ /* Input voltages */ LTC4245_VOLTAGE(in1_input, LTC4245_12VIN); LTC4245_VOLTAGE(in2_input, LTC4245_5VIN); LTC4245_VOLTAGE(in3_input, LTC4245_3VIN); LTC4245_VOLTAGE(in4_input, LTC4245_VEEIN); /* Input undervoltage alarms */ LTC4245_ALARM(in1_min_alarm, (1 << 0), LTC4245_FAULT1); LTC4245_ALARM(in2_min_alarm, (1 << 1), LTC4245_FAULT1); LTC4245_ALARM(in3_min_alarm, (1 << 2), LTC4245_FAULT1); LTC4245_ALARM(in4_min_alarm, (1 << 3), LTC4245_FAULT1); /* Currents (via sense resistor) */ LTC4245_CURRENT(curr1_input, LTC4245_12VSENSE); LTC4245_CURRENT(curr2_input, LTC4245_5VSENSE); LTC4245_CURRENT(curr3_input, LTC4245_3VSENSE); LTC4245_CURRENT(curr4_input, LTC4245_VEESENSE); /* Overcurrent alarms */ LTC4245_ALARM(curr1_max_alarm, (1 << 4), LTC4245_FAULT1); LTC4245_ALARM(curr2_max_alarm, (1 << 5), LTC4245_FAULT1); LTC4245_ALARM(curr3_max_alarm, (1 << 6), LTC4245_FAULT1); LTC4245_ALARM(curr4_max_alarm, (1 << 7), LTC4245_FAULT1); /* Output voltages */ LTC4245_VOLTAGE(in5_input, LTC4245_12VOUT); LTC4245_VOLTAGE(in6_input, LTC4245_5VOUT); LTC4245_VOLTAGE(in7_input, LTC4245_3VOUT); LTC4245_VOLTAGE(in8_input, LTC4245_VEEOUT); /* Power Bad alarms */ LTC4245_ALARM(in5_min_alarm, (1 << 0), LTC4245_FAULT2); LTC4245_ALARM(in6_min_alarm, (1 << 1), LTC4245_FAULT2); LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2); LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2); /* GPIO voltages */ LTC4245_GPIO_VOLTAGE(in9_input, 0); LTC4245_GPIO_VOLTAGE(in10_input, 1); LTC4245_GPIO_VOLTAGE(in11_input, 2); /* Power Consumption (virtual) */ LTC4245_POWER(power1_input, LTC4245_12VSENSE); LTC4245_POWER(power2_input, LTC4245_5VSENSE); LTC4245_POWER(power3_input, LTC4245_3VSENSE); LTC4245_POWER(power4_input, LTC4245_VEESENSE); /* Finally, construct an array of pointers to members of the above objects, * as required for sysfs_create_group() */ static struct attribute *ltc4245_std_attributes[] = { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in1_min_alarm.dev_attr.attr, &sensor_dev_attr_in2_min_alarm.dev_attr.attr, &sensor_dev_attr_in3_min_alarm.dev_attr.attr, &sensor_dev_attr_in4_min_alarm.dev_attr.attr, &sensor_dev_attr_curr1_input.dev_attr.attr, &sensor_dev_attr_curr2_input.dev_attr.attr, &sensor_dev_attr_curr3_input.dev_attr.attr, &sensor_dev_attr_curr4_input.dev_attr.attr, &sensor_dev_attr_curr1_max_alarm.dev_attr.attr, &sensor_dev_attr_curr2_max_alarm.dev_attr.attr, &sensor_dev_attr_curr3_max_alarm.dev_attr.attr, &sensor_dev_attr_curr4_max_alarm.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in5_min_alarm.dev_attr.attr, &sensor_dev_attr_in6_min_alarm.dev_attr.attr, &sensor_dev_attr_in7_min_alarm.dev_attr.attr, &sensor_dev_attr_in8_min_alarm.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, &sensor_dev_attr_power2_input.dev_attr.attr, &sensor_dev_attr_power3_input.dev_attr.attr, &sensor_dev_attr_power4_input.dev_attr.attr, NULL, }; static struct attribute *ltc4245_gpio_attributes[] = { &sensor_dev_attr_in10_input.dev_attr.attr, &sensor_dev_attr_in11_input.dev_attr.attr, NULL, }; static const struct attribute_group ltc4245_std_group = { .attrs = ltc4245_std_attributes, }; static const struct attribute_group ltc4245_gpio_group = { .attrs = ltc4245_gpio_attributes, }; static int ltc4245_sysfs_create_groups(struct i2c_client *client) { struct ltc4245_data *data = i2c_get_clientdata(client); struct device *dev = &client->dev; int ret; /* register the standard sysfs attributes */ ret = sysfs_create_group(&dev->kobj, &ltc4245_std_group); if (ret) { dev_err(dev, "unable to register standard attributes\n"); return ret; } /* if we're using the extra gpio support, register it's attributes */ if (data->use_extra_gpios) { ret = sysfs_create_group(&dev->kobj, &ltc4245_gpio_group); if (ret) { dev_err(dev, "unable to register gpio attributes\n"); sysfs_remove_group(&dev->kobj, &ltc4245_std_group); return ret; } } return 0; } static void ltc4245_sysfs_remove_groups(struct i2c_client *client) { struct ltc4245_data *data = i2c_get_clientdata(client); struct device *dev = &client->dev; if (data->use_extra_gpios) sysfs_remove_group(&dev->kobj, &ltc4245_gpio_group); sysfs_remove_group(&dev->kobj, &ltc4245_std_group); } static bool ltc4245_use_extra_gpios(struct i2c_client *client) { struct ltc4245_platform_data *pdata = dev_get_platdata(&client->dev); #ifdef CONFIG_OF struct device_node *np = client->dev.of_node; #endif /* prefer platform data */ if (pdata) return pdata->use_extra_gpios; #ifdef CONFIG_OF /* fallback on OF */ if (of_find_property(np, "ltc4245,use-extra-gpios", NULL)) return true; #endif return false; } static int ltc4245_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; struct ltc4245_data *data; int ret; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; goto out_kzalloc; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); data->use_extra_gpios = ltc4245_use_extra_gpios(client); /* Initialize the LTC4245 chip */ i2c_smbus_write_byte_data(client, LTC4245_FAULT1, 0x00); i2c_smbus_write_byte_data(client, LTC4245_FAULT2, 0x00); /* Register sysfs hooks */ ret = ltc4245_sysfs_create_groups(client); if (ret) goto out_sysfs_create_groups; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); goto out_hwmon_device_register; } return 0; out_hwmon_device_register: ltc4245_sysfs_remove_groups(client); out_sysfs_create_groups: kfree(data); out_kzalloc: return ret; } static int ltc4245_remove(struct i2c_client *client) { struct ltc4245_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); ltc4245_sysfs_remove_groups(client); kfree(data); return 0; } static const struct i2c_device_id ltc4245_id[] = { { "ltc4245", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ltc4245_id); /* This is the driver that will be inserted */ static struct i2c_driver ltc4245_driver = { .driver = { .name = "ltc4245", }, .probe = ltc4245_probe, .remove = ltc4245_remove, .id_table = ltc4245_id, }; static int __init ltc4245_init(void) { return i2c_add_driver(&ltc4245_driver); } static void __exit ltc4245_exit(void) { i2c_del_driver(&ltc4245_driver); } MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); MODULE_DESCRIPTION("LTC4245 driver"); MODULE_LICENSE("GPL"); module_init(ltc4245_init); module_exit(ltc4245_exit);
gpl-2.0
skritchz/msm_2.6.38
drivers/pcmcia/sa11xx_base.c
3473
7675
/*====================================================================== Device driver for the PCMCIA control functionality of StrongARM SA-1100 microprocessors. The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is John G. Dorsey <john+@cs.cmu.edu>. Portions created by John G. Dorsey are Copyright (C) 1999 John G. Dorsey. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/system.h> #include "soc_common.h" #include "sa11xx_base.h" /* * sa1100_pcmcia_default_mecr_timing * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * * Calculate MECR clock wait states for given CPU clock * speed and command wait state. This function can be over- * written by a board specific version. * * The default is to simply calculate the BS values as specified in * the INTEL SA1100 development manual * "Expansion Memory (PCMCIA) Configuration Register (MECR)" * that's section 10.2.5 in _my_ version of the manual ;) */ static unsigned int sa1100_pcmcia_default_mecr_timing(struct soc_pcmcia_socket *skt, unsigned int cpu_speed, unsigned int cmd_time) { return sa1100_pcmcia_mecr_bs(cmd_time, cpu_speed); } /* sa1100_pcmcia_set_mecr() * ^^^^^^^^^^^^^^^^^^^^^^^^ * * set MECR value for socket <sock> based on this sockets * io, mem and attribute space access speed. * Call board specific BS value calculation to allow boards * to tweak the BS values. */ static int sa1100_pcmcia_set_mecr(struct soc_pcmcia_socket *skt, unsigned int cpu_clock) { struct soc_pcmcia_timing timing; u32 mecr, old_mecr; unsigned long flags; unsigned int bs_io, bs_mem, bs_attr; soc_common_pcmcia_get_timing(skt, &timing); bs_io = skt->ops->get_timing(skt, cpu_clock, timing.io); bs_mem = skt->ops->get_timing(skt, cpu_clock, timing.mem); bs_attr = skt->ops->get_timing(skt, cpu_clock, timing.attr); local_irq_save(flags); old_mecr = mecr = MECR; MECR_FAST_SET(mecr, skt->nr, 0); MECR_BSIO_SET(mecr, skt->nr, bs_io); MECR_BSA_SET(mecr, skt->nr, bs_attr); MECR_BSM_SET(mecr, skt->nr, bs_mem); if (old_mecr != mecr) MECR = mecr; local_irq_restore(flags); debug(skt, 2, "FAST %X BSM %X BSA %X BSIO %X\n", MECR_FAST_GET(mecr, skt->nr), MECR_BSM_GET(mecr, skt->nr), MECR_BSA_GET(mecr, skt->nr), MECR_BSIO_GET(mecr, skt->nr)); return 0; } #ifdef CONFIG_CPU_FREQ static int sa1100_pcmcia_frequency_change(struct soc_pcmcia_socket *skt, unsigned long val, struct cpufreq_freqs *freqs) { switch (val) { case CPUFREQ_PRECHANGE: if (freqs->new > freqs->old) sa1100_pcmcia_set_mecr(skt, freqs->new); break; case CPUFREQ_POSTCHANGE: if (freqs->new < freqs->old) sa1100_pcmcia_set_mecr(skt, freqs->new); break; case CPUFREQ_RESUMECHANGE: sa1100_pcmcia_set_mecr(skt, freqs->new); break; } return 0; } #endif static int sa1100_pcmcia_set_timing(struct soc_pcmcia_socket *skt) { return sa1100_pcmcia_set_mecr(skt, cpufreq_get(0)); } static int sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf) { struct soc_pcmcia_timing timing; unsigned int clock = cpufreq_get(0); unsigned long mecr = MECR; char *p = buf; soc_common_pcmcia_get_timing(skt, &timing); p+=sprintf(p, "I/O : %u (%u)\n", timing.io, sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr))); p+=sprintf(p, "attribute: %u (%u)\n", timing.attr, sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr))); p+=sprintf(p, "common : %u (%u)\n", timing.mem, sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr))); return p - buf; } static const char *skt_names[] = { "PCMCIA socket 0", "PCMCIA socket 1", }; #define SKT_DEV_INFO_SIZE(n) \ (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) int sa11xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt) { skt->res_skt.start = _PCMCIA(skt->nr); skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1; skt->res_skt.name = skt_names[skt->nr]; skt->res_skt.flags = IORESOURCE_MEM; skt->res_io.start = _PCMCIAIO(skt->nr); skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1; skt->res_io.name = "io"; skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY; skt->res_mem.start = _PCMCIAMem(skt->nr); skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1; skt->res_mem.name = "memory"; skt->res_mem.flags = IORESOURCE_MEM; skt->res_attr.start = _PCMCIAAttr(skt->nr); skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1; skt->res_attr.name = "attribute"; skt->res_attr.flags = IORESOURCE_MEM; return soc_pcmcia_add_one(skt); } EXPORT_SYMBOL(sa11xx_drv_pcmcia_add_one); void sa11xx_drv_pcmcia_ops(struct pcmcia_low_level *ops) { /* * set default MECR calculation if the board specific * code did not specify one... */ if (!ops->get_timing) ops->get_timing = sa1100_pcmcia_default_mecr_timing; /* Provide our SA11x0 specific timing routines. */ ops->set_timing = sa1100_pcmcia_set_timing; ops->show_timing = sa1100_pcmcia_show_timing; #ifdef CONFIG_CPU_FREQ ops->frequency_change = sa1100_pcmcia_frequency_change; #endif } EXPORT_SYMBOL(sa11xx_drv_pcmcia_ops); int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr) { struct skt_dev_info *sinfo; struct soc_pcmcia_socket *skt; int i, ret = 0; sa11xx_drv_pcmcia_ops(ops); sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL); if (!sinfo) return -ENOMEM; sinfo->nskt = nr; /* Initialize processor specific parameters */ for (i = 0; i < nr; i++) { skt = &sinfo->skt[i]; skt->nr = first + i; skt->ops = ops; skt->socket.owner = ops->owner; skt->socket.dev.parent = dev; skt->socket.pci_irq = NO_IRQ; ret = sa11xx_drv_pcmcia_add_one(skt); if (ret) break; } if (ret) { while (--i >= 0) soc_pcmcia_remove_one(&sinfo->skt[i]); kfree(sinfo); } else { dev_set_drvdata(dev, sinfo); } return ret; } EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe); static int __init sa11xx_pcmcia_init(void) { return 0; } fs_initcall(sa11xx_pcmcia_init); static void __exit sa11xx_pcmcia_exit(void) {} module_exit(sa11xx_pcmcia_exit); MODULE_AUTHOR("John Dorsey <john+@cs.cmu.edu>"); MODULE_DESCRIPTION("Linux PCMCIA Card Services: SA-11xx core socket driver"); MODULE_LICENSE("Dual MPL/GPL");
gpl-2.0
xInterlopeRx/android_kernel_samsung_lt02ltespr
drivers/tty/hvc/hvc_console.c
3985
23665
/* * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM * Copyright (C) 2004 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. * Copyright (C) 2004 IBM Corporation * * Additional Author(s): * Ryan S. Arnold <rsa@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/console.h> #include <linux/cpumask.h> #include <linux/init.h> #include <linux/kbd_kern.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/module.h> #include <linux/major.h> #include <linux/sysrq.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/freezer.h> #include <linux/slab.h> #include <linux/serial_core.h> #include <asm/uaccess.h> #include "hvc_console.h" #define HVC_MAJOR 229 #define HVC_MINOR 0 /* * Wait this long per iteration while trying to push buffered data to the * hypervisor before allowing the tty to complete a close operation. */ #define HVC_CLOSE_WAIT (HZ/100) /* 1/10 of a second */ /* * These sizes are most efficient for vio, because they are the * native transfer size. We could make them selectable in the * future to better deal with backends that want other buffer sizes. */ #define N_OUTBUF 16 #define N_INBUF 16 #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) static struct tty_driver *hvc_driver; static struct task_struct *hvc_task; /* Picks up late kicks after list walk but before schedule() */ static int hvc_kicked; static int hvc_init(void); #ifdef CONFIG_MAGIC_SYSRQ static int sysrq_pressed; #endif /* dynamic list of hvc_struct instances */ static LIST_HEAD(hvc_structs); /* * Protect the list of hvc_struct instances from inserts and removals during * list traversal. */ static DEFINE_SPINLOCK(hvc_structs_lock); /* * This value is used to assign a tty->index value to a hvc_struct based * upon order of exposure via hvc_probe(), when we can not match it to * a console candidate registered with hvc_instantiate(). */ static int last_hvc = -1; /* * Do not call this function with either the hvc_structs_lock or the hvc_struct * lock held. If successful, this function increments the kref reference * count against the target hvc_struct so it should be released when finished. */ static struct hvc_struct *hvc_get_by_index(int index) { struct hvc_struct *hp; unsigned long flags; spin_lock(&hvc_structs_lock); list_for_each_entry(hp, &hvc_structs, next) { spin_lock_irqsave(&hp->lock, flags); if (hp->index == index) { kref_get(&hp->kref); spin_unlock_irqrestore(&hp->lock, flags); spin_unlock(&hvc_structs_lock); return hp; } spin_unlock_irqrestore(&hp->lock, flags); } hp = NULL; spin_unlock(&hvc_structs_lock); return hp; } /* * Initial console vtermnos for console API usage prior to full console * initialization. Any vty adapter outside this range will not have usable * console interfaces but can still be used as a tty device. This has to be * static because kmalloc will not work during early console init. */ static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] = {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1}; /* * Console APIs, NOT TTY. These APIs are available immediately when * hvc_console_setup() finds adapters. */ static void hvc_console_print(struct console *co, const char *b, unsigned count) { char c[N_OUTBUF] __ALIGNED__; unsigned i = 0, n = 0; int r, donecr = 0, index = co->index; /* Console access attempt outside of acceptable console range. */ if (index >= MAX_NR_HVC_CONSOLES) return; /* This console adapter was removed so it is not usable. */ if (vtermnos[index] == -1) return; while (count > 0 || i > 0) { if (count > 0 && i < sizeof(c)) { if (b[n] == '\n' && !donecr) { c[i++] = '\r'; donecr = 1; } else { c[i++] = b[n++]; donecr = 0; --count; } } else { r = cons_ops[index]->put_chars(vtermnos[index], c, i); if (r <= 0) { /* throw away characters on error * but spin in case of -EAGAIN */ if (r != -EAGAIN) i = 0; } else if (r > 0) { i -= r; if (i > 0) memmove(c, c+r, i); } } } } static struct tty_driver *hvc_console_device(struct console *c, int *index) { if (vtermnos[c->index] == -1) return NULL; *index = c->index; return hvc_driver; } static int __init hvc_console_setup(struct console *co, char *options) { if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES) return -ENODEV; if (vtermnos[co->index] == -1) return -ENODEV; return 0; } static struct console hvc_console = { .name = "hvc", .write = hvc_console_print, .device = hvc_console_device, .setup = hvc_console_setup, .flags = CON_PRINTBUFFER, .index = -1, }; /* * Early console initialization. Precedes driver initialization. * * (1) we are first, and the user specified another driver * -- index will remain -1 * (2) we are first and the user specified no driver * -- index will be set to 0, then we will fail setup. * (3) we are first and the user specified our driver * -- index will be set to user specified driver, and we will fail * (4) we are after driver, and this initcall will register us * -- if the user didn't specify a driver then the console will match * * Note that for cases 2 and 3, we will match later when the io driver * calls hvc_instantiate() and call register again. */ static int __init hvc_console_init(void) { register_console(&hvc_console); return 0; } console_initcall(hvc_console_init); /* callback when the kboject ref count reaches zero. */ static void destroy_hvc_struct(struct kref *kref) { struct hvc_struct *hp = container_of(kref, struct hvc_struct, kref); unsigned long flags; spin_lock(&hvc_structs_lock); spin_lock_irqsave(&hp->lock, flags); list_del(&(hp->next)); spin_unlock_irqrestore(&hp->lock, flags); spin_unlock(&hvc_structs_lock); kfree(hp); } /* * hvc_instantiate() is an early console discovery method which locates * consoles * prior to the vio subsystem discovering them. Hotplugged * vty adapters do NOT get an hvc_instantiate() callback since they * appear after early console init. */ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) { struct hvc_struct *hp; if (index < 0 || index >= MAX_NR_HVC_CONSOLES) return -1; if (vtermnos[index] != -1) return -1; /* make sure no no tty has been registered in this index */ hp = hvc_get_by_index(index); if (hp) { kref_put(&hp->kref, destroy_hvc_struct); return -1; } vtermnos[index] = vtermno; cons_ops[index] = ops; /* reserve all indices up to and including this index */ if (last_hvc < index) last_hvc = index; /* if this index is what the user requested, then register * now (setup won't fail at this point). It's ok to just * call register again if previously .setup failed. */ if (index == hvc_console.index) register_console(&hvc_console); return 0; } EXPORT_SYMBOL_GPL(hvc_instantiate); /* Wake the sleeping khvcd */ void hvc_kick(void) { hvc_kicked = 1; wake_up_process(hvc_task); } EXPORT_SYMBOL_GPL(hvc_kick); static void hvc_unthrottle(struct tty_struct *tty) { hvc_kick(); } /* * The TTY interface won't be used until after the vio layer has exposed the vty * adapter to the kernel. */ static int hvc_open(struct tty_struct *tty, struct file * filp) { struct hvc_struct *hp; unsigned long flags; int rc = 0; /* Auto increments kref reference if found. */ if (!(hp = hvc_get_by_index(tty->index))) return -ENODEV; spin_lock_irqsave(&hp->lock, flags); /* Check and then increment for fast path open. */ if (hp->count++ > 0) { tty_kref_get(tty); spin_unlock_irqrestore(&hp->lock, flags); hvc_kick(); return 0; } /* else count == 0 */ tty->driver_data = hp; hp->tty = tty_kref_get(tty); spin_unlock_irqrestore(&hp->lock, flags); if (hp->ops->notifier_add) rc = hp->ops->notifier_add(hp, hp->data); /* * If the notifier fails we return an error. The tty layer * will call hvc_close() after a failed open but we don't want to clean * up there so we'll clean up here and clear out the previously set * tty fields and return the kref reference. */ if (rc) { spin_lock_irqsave(&hp->lock, flags); hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); tty_kref_put(tty); tty->driver_data = NULL; kref_put(&hp->kref, destroy_hvc_struct); printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); } /* Force wakeup of the polling thread */ hvc_kick(); return rc; } static void hvc_close(struct tty_struct *tty, struct file * filp) { struct hvc_struct *hp; unsigned long flags; if (tty_hung_up_p(filp)) return; /* * No driver_data means that this close was issued after a failed * hvc_open by the tty layer's release_dev() function and we can just * exit cleanly because the kref reference wasn't made. */ if (!tty->driver_data) return; hp = tty->driver_data; spin_lock_irqsave(&hp->lock, flags); if (--hp->count == 0) { /* We are done with the tty pointer now. */ hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); if (hp->ops->notifier_del) hp->ops->notifier_del(hp, hp->data); /* cancel pending tty resize work */ cancel_work_sync(&hp->tty_resize); /* * Chain calls chars_in_buffer() and returns immediately if * there is no buffered data otherwise sleeps on a wait queue * waking periodically to check chars_in_buffer(). */ tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT); } else { if (hp->count < 0) printk(KERN_ERR "hvc_close %X: oops, count is %d\n", hp->vtermno, hp->count); spin_unlock_irqrestore(&hp->lock, flags); } tty_kref_put(tty); kref_put(&hp->kref, destroy_hvc_struct); } static void hvc_hangup(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; unsigned long flags; int temp_open_count; if (!hp) return; /* cancel pending tty resize work */ cancel_work_sync(&hp->tty_resize); spin_lock_irqsave(&hp->lock, flags); /* * The N_TTY line discipline has problems such that in a close vs * open->hangup case this can be called after the final close so prevent * that from happening for now. */ if (hp->count <= 0) { spin_unlock_irqrestore(&hp->lock, flags); return; } temp_open_count = hp->count; hp->count = 0; hp->n_outbuf = 0; hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); if (hp->ops->notifier_hangup) hp->ops->notifier_hangup(hp, hp->data); while(temp_open_count) { --temp_open_count; tty_kref_put(tty); kref_put(&hp->kref, destroy_hvc_struct); } } /* * Push buffered characters whether they were just recently buffered or waiting * on a blocked hypervisor. Call this function with hp->lock held. */ static int hvc_push(struct hvc_struct *hp) { int n; n = hp->ops->put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf); if (n <= 0) { if (n == 0 || n == -EAGAIN) { hp->do_wakeup = 1; return 0; } /* throw away output on error; this happens when there is no session connected to the vterm. */ hp->n_outbuf = 0; } else hp->n_outbuf -= n; if (hp->n_outbuf > 0) memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf); else hp->do_wakeup = 1; return n; } static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct hvc_struct *hp = tty->driver_data; unsigned long flags; int rsize, written = 0; /* This write was probably executed during a tty close. */ if (!hp) return -EPIPE; if (hp->count <= 0) return -EIO; spin_lock_irqsave(&hp->lock, flags); /* Push pending writes */ if (hp->n_outbuf > 0) hvc_push(hp); while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) { if (rsize > count) rsize = count; memcpy(hp->outbuf + hp->n_outbuf, buf, rsize); count -= rsize; buf += rsize; hp->n_outbuf += rsize; written += rsize; hvc_push(hp); } spin_unlock_irqrestore(&hp->lock, flags); /* * Racy, but harmless, kick thread if there is still pending data. */ if (hp->n_outbuf) hvc_kick(); return written; } /** * hvc_set_winsz() - Resize the hvc tty terminal window. * @work: work structure. * * The routine shall not be called within an atomic context because it * might sleep. * * Locking: hp->lock */ static void hvc_set_winsz(struct work_struct *work) { struct hvc_struct *hp; unsigned long hvc_flags; struct tty_struct *tty; struct winsize ws; hp = container_of(work, struct hvc_struct, tty_resize); spin_lock_irqsave(&hp->lock, hvc_flags); if (!hp->tty) { spin_unlock_irqrestore(&hp->lock, hvc_flags); return; } ws = hp->ws; tty = tty_kref_get(hp->tty); spin_unlock_irqrestore(&hp->lock, hvc_flags); tty_do_resize(tty, &ws); tty_kref_put(tty); } /* * This is actually a contract between the driver and the tty layer outlining * how much write room the driver can guarantee will be sent OR BUFFERED. This * driver MUST honor the return value. */ static int hvc_write_room(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; if (!hp) return -1; return hp->outbuf_size - hp->n_outbuf; } static int hvc_chars_in_buffer(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; if (!hp) return 0; return hp->n_outbuf; } /* * timeout will vary between the MIN and MAX values defined here. By default * and during console activity we will use a default MIN_TIMEOUT of 10. When * the console is idle, we increase the timeout value on each pass through * msleep until we reach the max. This may be noticeable as a brief (average * one second) delay on the console before the console responds to input when * there has been no input for some time. */ #define MIN_TIMEOUT (10) #define MAX_TIMEOUT (2000) static u32 timeout = MIN_TIMEOUT; #define HVC_POLL_READ 0x00000001 #define HVC_POLL_WRITE 0x00000002 int hvc_poll(struct hvc_struct *hp) { struct tty_struct *tty; int i, n, poll_mask = 0; char buf[N_INBUF] __ALIGNED__; unsigned long flags; int read_total = 0; int written_total = 0; spin_lock_irqsave(&hp->lock, flags); /* Push pending writes */ if (hp->n_outbuf > 0) written_total = hvc_push(hp); /* Reschedule us if still some write pending */ if (hp->n_outbuf > 0) { poll_mask |= HVC_POLL_WRITE; /* If hvc_push() was not able to write, sleep a few msecs */ timeout = (written_total) ? 0 : MIN_TIMEOUT; } /* No tty attached, just skip */ tty = tty_kref_get(hp->tty); if (tty == NULL) goto bail; /* Now check if we can get data (are we throttled ?) */ if (test_bit(TTY_THROTTLED, &tty->flags)) goto throttled; /* If we aren't notifier driven and aren't throttled, we always * request a reschedule */ if (!hp->irq_requested) poll_mask |= HVC_POLL_READ; /* Read data if any */ for (;;) { int count = tty_buffer_request_room(tty, N_INBUF); /* If flip is full, just reschedule a later read */ if (count == 0) { poll_mask |= HVC_POLL_READ; break; } n = hp->ops->get_chars(hp->vtermno, buf, count); if (n <= 0) { /* Hangup the tty when disconnected from host */ if (n == -EPIPE) { spin_unlock_irqrestore(&hp->lock, flags); tty_hangup(tty); spin_lock_irqsave(&hp->lock, flags); } else if ( n == -EAGAIN ) { /* * Some back-ends can only ensure a certain min * num of bytes read, which may be > 'count'. * Let the tty clear the flip buff to make room. */ poll_mask |= HVC_POLL_READ; } break; } for (i = 0; i < n; ++i) { #ifdef CONFIG_MAGIC_SYSRQ if (hp->index == hvc_console.index) { /* Handle the SysRq Hack */ /* XXX should support a sequence */ if (buf[i] == '\x0f') { /* ^O */ /* if ^O is pressed again, reset * sysrq_pressed and flip ^O char */ sysrq_pressed = !sysrq_pressed; if (sysrq_pressed) continue; } else if (sysrq_pressed) { handle_sysrq(buf[i]); sysrq_pressed = 0; continue; } } #endif /* CONFIG_MAGIC_SYSRQ */ tty_insert_flip_char(tty, buf[i], 0); } read_total += n; } throttled: /* Wakeup write queue if necessary */ if (hp->do_wakeup) { hp->do_wakeup = 0; tty_wakeup(tty); } bail: spin_unlock_irqrestore(&hp->lock, flags); if (read_total) { /* Activity is occurring, so reset the polling backoff value to a minimum for performance. */ timeout = MIN_TIMEOUT; tty_flip_buffer_push(tty); } if (tty) tty_kref_put(tty); return poll_mask; } EXPORT_SYMBOL_GPL(hvc_poll); /** * __hvc_resize() - Update terminal window size information. * @hp: HVC console pointer * @ws: Terminal window size structure * * Stores the specified window size information in the hvc structure of @hp. * The function schedule the tty resize update. * * Locking: Locking free; the function MUST be called holding hp->lock */ void __hvc_resize(struct hvc_struct *hp, struct winsize ws) { hp->ws = ws; schedule_work(&hp->tty_resize); } EXPORT_SYMBOL_GPL(__hvc_resize); /* * This kthread is either polling or interrupt driven. This is determined by * calling hvc_poll() who determines whether a console adapter support * interrupts. */ static int khvcd(void *unused) { int poll_mask; struct hvc_struct *hp; set_freezable(); do { poll_mask = 0; hvc_kicked = 0; try_to_freeze(); wmb(); if (!cpus_are_in_xmon()) { spin_lock(&hvc_structs_lock); list_for_each_entry(hp, &hvc_structs, next) { poll_mask |= hvc_poll(hp); } spin_unlock(&hvc_structs_lock); } else poll_mask |= HVC_POLL_READ; if (hvc_kicked) continue; set_current_state(TASK_INTERRUPTIBLE); if (!hvc_kicked) { if (poll_mask == 0) schedule(); else { if (timeout < MAX_TIMEOUT) timeout += (timeout >> 6) + 1; msleep_interruptible(timeout); } } __set_current_state(TASK_RUNNING); } while (!kthread_should_stop()); return 0; } static int hvc_tiocmget(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; if (!hp || !hp->ops->tiocmget) return -EINVAL; return hp->ops->tiocmget(hp); } static int hvc_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct hvc_struct *hp = tty->driver_data; if (!hp || !hp->ops->tiocmset) return -EINVAL; return hp->ops->tiocmset(hp, set, clear); } #ifdef CONFIG_CONSOLE_POLL int hvc_poll_init(struct tty_driver *driver, int line, char *options) { return 0; } static int hvc_poll_get_char(struct tty_driver *driver, int line) { struct tty_struct *tty = driver->ttys[0]; struct hvc_struct *hp = tty->driver_data; int n; char ch; n = hp->ops->get_chars(hp->vtermno, &ch, 1); if (n == 0) return NO_POLL_CHAR; return ch; } static void hvc_poll_put_char(struct tty_driver *driver, int line, char ch) { struct tty_struct *tty = driver->ttys[0]; struct hvc_struct *hp = tty->driver_data; int n; do { n = hp->ops->put_chars(hp->vtermno, &ch, 1); } while (n <= 0); } #endif static const struct tty_operations hvc_ops = { .open = hvc_open, .close = hvc_close, .write = hvc_write, .hangup = hvc_hangup, .unthrottle = hvc_unthrottle, .write_room = hvc_write_room, .chars_in_buffer = hvc_chars_in_buffer, .tiocmget = hvc_tiocmget, .tiocmset = hvc_tiocmset, #ifdef CONFIG_CONSOLE_POLL .poll_init = hvc_poll_init, .poll_get_char = hvc_poll_get_char, .poll_put_char = hvc_poll_put_char, #endif }; struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, const struct hv_ops *ops, int outbuf_size) { struct hvc_struct *hp; int i; /* We wait until a driver actually comes along */ if (!hvc_driver) { int err = hvc_init(); if (err) return ERR_PTR(err); } hp = kzalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size, GFP_KERNEL); if (!hp) return ERR_PTR(-ENOMEM); hp->vtermno = vtermno; hp->data = data; hp->ops = ops; hp->outbuf_size = outbuf_size; hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))]; kref_init(&hp->kref); INIT_WORK(&hp->tty_resize, hvc_set_winsz); spin_lock_init(&hp->lock); spin_lock(&hvc_structs_lock); /* * find index to use: * see if this vterm id matches one registered for console. */ for (i=0; i < MAX_NR_HVC_CONSOLES; i++) if (vtermnos[i] == hp->vtermno && cons_ops[i] == hp->ops) break; /* no matching slot, just use a counter */ if (i >= MAX_NR_HVC_CONSOLES) i = ++last_hvc; hp->index = i; list_add_tail(&(hp->next), &hvc_structs); spin_unlock(&hvc_structs_lock); return hp; } EXPORT_SYMBOL_GPL(hvc_alloc); int hvc_remove(struct hvc_struct *hp) { unsigned long flags; struct tty_struct *tty; spin_lock_irqsave(&hp->lock, flags); tty = tty_kref_get(hp->tty); if (hp->index < MAX_NR_HVC_CONSOLES) vtermnos[hp->index] = -1; /* Don't whack hp->irq because tty_hangup() will need to free the irq. */ spin_unlock_irqrestore(&hp->lock, flags); /* * We 'put' the instance that was grabbed when the kref instance * was initialized using kref_init(). Let the last holder of this * kref cause it to be removed, which will probably be the tty_vhangup * below. */ kref_put(&hp->kref, destroy_hvc_struct); /* * This function call will auto chain call hvc_hangup. */ if (tty) { tty_vhangup(tty); tty_kref_put(tty); } return 0; } EXPORT_SYMBOL_GPL(hvc_remove); /* Driver initialization: called as soon as someone uses hvc_alloc(). */ static int hvc_init(void) { struct tty_driver *drv; int err; /* We need more than hvc_count adapters due to hotplug additions. */ drv = alloc_tty_driver(HVC_ALLOC_TTY_ADAPTERS); if (!drv) { err = -ENOMEM; goto out; } drv->driver_name = "hvc"; drv->name = "hvc"; drv->major = HVC_MAJOR; drv->minor_start = HVC_MINOR; drv->type = TTY_DRIVER_TYPE_SYSTEM; drv->init_termios = tty_std_termios; drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS; tty_set_operations(drv, &hvc_ops); /* Always start the kthread because there can be hotplug vty adapters * added later. */ hvc_task = kthread_run(khvcd, NULL, "khvcd"); if (IS_ERR(hvc_task)) { printk(KERN_ERR "Couldn't create kthread for console.\n"); err = PTR_ERR(hvc_task); goto put_tty; } err = tty_register_driver(drv); if (err) { printk(KERN_ERR "Couldn't register hvc console driver\n"); goto stop_thread; } /* * Make sure tty is fully registered before allowing it to be * found by hvc_console_device. */ smp_mb(); hvc_driver = drv; return 0; stop_thread: kthread_stop(hvc_task); hvc_task = NULL; put_tty: put_tty_driver(drv); out: return err; } /* This isn't particularly necessary due to this being a console driver * but it is nice to be thorough. */ static void __exit hvc_exit(void) { if (hvc_driver) { kthread_stop(hvc_task); tty_unregister_driver(hvc_driver); /* return tty_struct instances allocated in hvc_init(). */ put_tty_driver(hvc_driver); unregister_console(&hvc_console); } } module_exit(hvc_exit);
gpl-2.0
ench0/android_kernel_samsung_hltez
drivers/media/video/ivtv/ivtv-irq.c
5009
31903
/* interrupt handling Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2004 Chris Kennedy <c@groovy.org> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ivtv-driver.h" #include "ivtv-queue.h" #include "ivtv-udma.h" #include "ivtv-irq.h" #include "ivtv-mailbox.h" #include "ivtv-vbi.h" #include "ivtv-yuv.h" #include <media/v4l2-event.h> #define DMA_MAGIC_COOKIE 0x000001fe static void ivtv_dma_dec_start(struct ivtv_stream *s); static const int ivtv_stream_map[] = { IVTV_ENC_STREAM_TYPE_MPG, IVTV_ENC_STREAM_TYPE_YUV, IVTV_ENC_STREAM_TYPE_PCM, IVTV_ENC_STREAM_TYPE_VBI, }; static void ivtv_pio_work_handler(struct ivtv *itv) { struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream]; struct ivtv_buffer *buf; int i = 0; IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n"); if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS || s->vdev == NULL || !ivtv_use_pio(s)) { itv->cur_pio_stream = -1; /* trigger PIO complete user interrupt */ write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); return; } IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name); list_for_each_entry(buf, &s->q_dma.list, list) { u32 size = s->sg_processing[i].size & 0x3ffff; /* Copy the data from the card to the buffer */ if (s->type == IVTV_DEC_STREAM_TYPE_VBI) { memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size); } else { memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size); } i++; if (i == s->sg_processing_size) break; } write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); } void ivtv_irq_work_handler(struct kthread_work *work) { struct ivtv *itv = container_of(work, struct ivtv, irq_work); if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags)) ivtv_pio_work_handler(itv); if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags)) ivtv_vbi_work_handler(itv); if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags)) ivtv_yuv_work_handler(itv); } /* Determine the required DMA size, setup enough buffers in the predma queue and actually copy the data from the card to the buffers in case a PIO transfer is required for this stream. */ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA]) { struct ivtv *itv = s->itv; struct ivtv_buffer *buf; u32 bytes_needed = 0; u32 offset, size; u32 UVoffset = 0, UVsize = 0; int skip_bufs = s->q_predma.buffers; int idx = s->sg_pending_size; int rc; /* sanity checks */ if (s->vdev == NULL) { IVTV_DEBUG_WARN("Stream %s not started\n", s->name); return -1; } if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) { IVTV_DEBUG_WARN("Stream %s not open\n", s->name); return -1; } /* determine offset, size and PTS for the various streams */ switch (s->type) { case IVTV_ENC_STREAM_TYPE_MPG: offset = data[1]; size = data[2]; s->pending_pts = 0; break; case IVTV_ENC_STREAM_TYPE_YUV: offset = data[1]; size = data[2]; UVoffset = data[3]; UVsize = data[4]; s->pending_pts = ((u64) data[5] << 32) | data[6]; break; case IVTV_ENC_STREAM_TYPE_PCM: offset = data[1] + 12; size = data[2] - 12; s->pending_pts = read_dec(offset - 8) | ((u64)(read_dec(offset - 12)) << 32); if (itv->has_cx23415) offset += IVTV_DECODER_OFFSET; break; case IVTV_ENC_STREAM_TYPE_VBI: size = itv->vbi.enc_size * itv->vbi.fpi; offset = read_enc(itv->vbi.enc_start - 4) + 12; if (offset == 12) { IVTV_DEBUG_INFO("VBI offset == 0\n"); return -1; } s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32); break; case IVTV_DEC_STREAM_TYPE_VBI: size = read_dec(itv->vbi.dec_start + 4) + 8; offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start; s->pending_pts = 0; offset += IVTV_DECODER_OFFSET; break; default: /* shouldn't happen */ return -1; } /* if this is the start of the DMA then fill in the magic cookie */ if (s->sg_pending_size == 0 && ivtv_use_dma(s)) { if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || s->type == IVTV_DEC_STREAM_TYPE_VBI)) { s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET); write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET); } else { s->pending_backup = read_enc(offset); write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset); } s->pending_offset = offset; } bytes_needed = size; if (s->type == IVTV_ENC_STREAM_TYPE_YUV) { /* The size for the Y samples needs to be rounded upwards to a multiple of the buf_size. The UV samples then start in the next buffer. */ bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size); bytes_needed += UVsize; } IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n", ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset); rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed); if (rc < 0) { /* Insufficient buffers */ IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n", bytes_needed, s->name); return -1; } if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) { IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name); IVTV_WARN("Cause: the application is not reading fast enough.\n"); } s->buffers_stolen = rc; /* got the buffers, now fill in sg_pending */ buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list); memset(buf->buf, 0, 128); list_for_each_entry(buf, &s->q_predma.list, list) { if (skip_bufs-- > 0) continue; s->sg_pending[idx].dst = buf->dma_handle; s->sg_pending[idx].src = offset; s->sg_pending[idx].size = s->buf_size; buf->bytesused = min(size, s->buf_size); buf->dma_xfer_cnt = s->dma_xfer_cnt; s->q_predma.bytesused += buf->bytesused; size -= buf->bytesused; offset += s->buf_size; /* Sync SG buffers */ ivtv_buf_sync_for_device(s, buf); if (size == 0) { /* YUV */ /* process the UV section */ offset = UVoffset; size = UVsize; } idx++; } s->sg_pending_size = idx; return 0; } static void dma_post(struct ivtv_stream *s) { struct ivtv *itv = s->itv; struct ivtv_buffer *buf = NULL; struct list_head *p; u32 offset; __le32 *u32buf; int x = 0; IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA", s->name, s->dma_offset); list_for_each(p, &s->q_dma.list) { buf = list_entry(p, struct ivtv_buffer, list); u32buf = (__le32 *)buf->buf; /* Sync Buffer */ ivtv_buf_sync_for_cpu(s, buf); if (x == 0 && ivtv_use_dma(s)) { offset = s->dma_last_offset; if (u32buf[offset / 4] != DMA_MAGIC_COOKIE) { for (offset = 0; offset < 64; offset++) { if (u32buf[offset] == DMA_MAGIC_COOKIE) { break; } } offset *= 4; if (offset == 256) { IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name); offset = s->dma_last_offset; } if (s->dma_last_offset != offset) IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset); s->dma_last_offset = offset; } if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || s->type == IVTV_DEC_STREAM_TYPE_VBI)) { write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET); } else { write_enc_sync(0, s->dma_offset); } if (offset) { buf->bytesused -= offset; memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset); } *u32buf = cpu_to_le32(s->dma_backup); } x++; /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */ if (s->type == IVTV_ENC_STREAM_TYPE_MPG || s->type == IVTV_ENC_STREAM_TYPE_VBI) buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP; } if (buf) buf->bytesused += s->dma_last_offset; if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) { list_for_each_entry(buf, &s->q_dma.list, list) { /* Parse and Groom VBI Data */ s->q_dma.bytesused -= buf->bytesused; ivtv_process_vbi_data(itv, buf, 0, s->type); s->q_dma.bytesused += buf->bytesused; } if (s->fh == NULL) { ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0); return; } } ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused); if (s->fh) wake_up(&s->waitq); } void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock) { struct ivtv *itv = s->itv; struct yuv_playback_info *yi = &itv->yuv_info; u8 frame = yi->draw_frame; struct yuv_frame_info *f = &yi->new_frame_info[frame]; struct ivtv_buffer *buf; u32 y_size = 720 * ((f->src_h + 31) & ~31); u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET; int y_done = 0; int bytes_written = 0; unsigned long flags = 0; int idx = 0; IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset); /* Insert buffer block for YUV if needed */ if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) { if (yi->blanking_dmaptr) { s->sg_pending[idx].src = yi->blanking_dmaptr; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = 720 * 16; } offset += 720 * 16; idx++; } list_for_each_entry(buf, &s->q_predma.list, list) { /* YUV UV Offset from Y Buffer */ if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && (bytes_written + buf->bytesused) >= y_size) { s->sg_pending[idx].src = buf->dma_handle; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = y_size - bytes_written; offset = uv_offset; if (s->sg_pending[idx].size != buf->bytesused) { idx++; s->sg_pending[idx].src = buf->dma_handle + s->sg_pending[idx - 1].size; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = buf->bytesused - s->sg_pending[idx - 1].size; offset += s->sg_pending[idx].size; } y_done = 1; } else { s->sg_pending[idx].src = buf->dma_handle; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = buf->bytesused; offset += buf->bytesused; } bytes_written += buf->bytesused; /* Sync SG buffers */ ivtv_buf_sync_for_device(s, buf); idx++; } s->sg_pending_size = idx; /* Sync Hardware SG List of buffers */ ivtv_stream_sync_for_device(s); if (lock) spin_lock_irqsave(&itv->dma_reg_lock, flags); if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) { ivtv_dma_dec_start(s); } else { set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags); } if (lock) spin_unlock_irqrestore(&itv->dma_reg_lock, flags); } static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s) { struct ivtv *itv = s->itv; s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src); s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst); s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000); s->sg_processed++; /* Sync Hardware SG List of buffers */ ivtv_stream_sync_for_device(s); write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR); write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER); itv->dma_timer.expires = jiffies + msecs_to_jiffies(300); add_timer(&itv->dma_timer); } static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s) { struct ivtv *itv = s->itv; s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src); s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst); s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000); s->sg_processed++; /* Sync Hardware SG List of buffers */ ivtv_stream_sync_for_device(s); write_reg(s->sg_handle, IVTV_REG_DECDMAADDR); write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER); itv->dma_timer.expires = jiffies + msecs_to_jiffies(300); add_timer(&itv->dma_timer); } /* start the encoder DMA */ static void ivtv_dma_enc_start(struct ivtv_stream *s) { struct ivtv *itv = s->itv; struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; int i; IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name); if (s->q_predma.bytesused) ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); if (ivtv_use_dma(s)) s->sg_pending[s->sg_pending_size - 1].size += 256; /* If this is an MPEG stream, and VBI data is also pending, then append the VBI DMA to the MPEG DMA and transfer both sets of data at once. VBI DMA is a second class citizen compared to MPEG and mixing them together will confuse the firmware (the end of a VBI DMA is seen as the end of a MPEG DMA, thus effectively dropping an MPEG frame). So instead we make sure we only use the MPEG DMA to transfer the VBI DMA if both are in use. This way no conflicts occur. */ clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size && s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) { ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused); if (ivtv_use_dma(s_vbi)) s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256; for (i = 0; i < s_vbi->sg_pending_size; i++) { s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i]; } s_vbi->dma_offset = s_vbi->pending_offset; s_vbi->sg_pending_size = 0; s_vbi->dma_xfer_cnt++; set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name); } s->dma_xfer_cnt++; memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size); s->sg_processing_size = s->sg_pending_size; s->sg_pending_size = 0; s->sg_processed = 0; s->dma_offset = s->pending_offset; s->dma_backup = s->pending_backup; s->dma_pts = s->pending_pts; if (ivtv_use_pio(s)) { set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags); set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); set_bit(IVTV_F_I_PIO, &itv->i_flags); itv->cur_pio_stream = s->type; } else { itv->dma_retries = 0; ivtv_dma_enc_start_xfer(s); set_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = s->type; } } static void ivtv_dma_dec_start(struct ivtv_stream *s) { struct ivtv *itv = s->itv; if (s->q_predma.bytesused) ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); s->dma_xfer_cnt++; memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size); s->sg_processing_size = s->sg_pending_size; s->sg_pending_size = 0; s->sg_processed = 0; IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name); itv->dma_retries = 0; ivtv_dma_dec_start_xfer(s); set_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = s->type; } static void ivtv_irq_dma_read(struct ivtv *itv) { struct ivtv_stream *s = NULL; struct ivtv_buffer *buf; int hw_stream_type = 0; IVTV_DEBUG_HI_IRQ("DEC DMA READ\n"); del_timer(&itv->dma_timer); if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) return; if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { s = &itv->streams[itv->cur_dma_stream]; ivtv_stream_sync_for_cpu(s); if (read_reg(IVTV_REG_DMASTATUS) & 0x14) { IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n", read_reg(IVTV_REG_DMASTATUS), s->sg_processed, s->sg_processing_size, itv->dma_retries); write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); if (itv->dma_retries == 3) { /* Too many retries, give up on this frame */ itv->dma_retries = 0; s->sg_processed = s->sg_processing_size; } else { /* Retry, starting with the first xfer segment. Just retrying the current segment is not sufficient. */ s->sg_processed = 0; itv->dma_retries++; } } if (s->sg_processed < s->sg_processing_size) { /* DMA next buffer */ ivtv_dma_dec_start_xfer(s); return; } if (s->type == IVTV_DEC_STREAM_TYPE_YUV) hw_stream_type = 2; IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused); /* For some reason must kick the firmware, like PIO mode, I think this tells the firmware we are done and the size of the xfer so it can calculate what we need next. I think we can do this part ourselves but would have to fully calculate xfer info ourselves and not use interrupts */ ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused, hw_stream_type); /* Free last DMA call */ while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) { ivtv_buf_sync_for_cpu(s, buf); ivtv_enqueue(s, buf, &s->q_free); } wake_up(&s->waitq); } clear_bit(IVTV_F_I_UDMA, &itv->i_flags); clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; wake_up(&itv->dma_waitq); } static void ivtv_irq_enc_dma_complete(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream); del_timer(&itv->dma_timer); if (itv->cur_dma_stream < 0) return; s = &itv->streams[itv->cur_dma_stream]; ivtv_stream_sync_for_cpu(s); if (data[0] & 0x18) { IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0], s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries); write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); if (itv->dma_retries == 3) { /* Too many retries, give up on this frame */ itv->dma_retries = 0; s->sg_processed = s->sg_processing_size; } else { /* Retry, starting with the first xfer segment. Just retrying the current segment is not sufficient. */ s->sg_processed = 0; itv->dma_retries++; } } if (s->sg_processed < s->sg_processing_size) { /* DMA next buffer */ ivtv_dma_enc_start_xfer(s); return; } clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; dma_post(s); if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) { s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; dma_post(s); } s->sg_processing_size = 0; s->sg_processed = 0; wake_up(&itv->dma_waitq); } static void ivtv_irq_enc_pio_complete(struct ivtv *itv) { struct ivtv_stream *s; if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) { itv->cur_pio_stream = -1; return; } s = &itv->streams[itv->cur_pio_stream]; IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name); clear_bit(IVTV_F_I_PIO, &itv->i_flags); itv->cur_pio_stream = -1; dma_post(s); if (s->type == IVTV_ENC_STREAM_TYPE_MPG) ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0); else if (s->type == IVTV_ENC_STREAM_TYPE_YUV) ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1); else if (s->type == IVTV_ENC_STREAM_TYPE_PCM) ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2); clear_bit(IVTV_F_I_PIO, &itv->i_flags); if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) { s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; dma_post(s); } wake_up(&itv->dma_waitq); } static void ivtv_irq_dma_err(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; u32 status; del_timer(&itv->dma_timer); ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); status = read_reg(IVTV_REG_DMASTATUS); IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], status, itv->cur_dma_stream); /* * We do *not* write back to the IVTV_REG_DMASTATUS register to * clear the error status, if either the encoder write (0x02) or * decoder read (0x01) bus master DMA operation do not indicate * completed. We can race with the DMA engine, which may have * transitioned to completed status *after* we read the register. * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the * DMA engine has completed, will cause the DMA engine to stop working. */ status &= 0x3; if (status == 0x3) write_reg(status, IVTV_REG_DMASTATUS); if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { /* retry */ /* * FIXME - handle cases of DMA error similar to * encoder below, except conditioned on status & 0x1 */ ivtv_dma_dec_start(s); return; } else { if ((status & 0x2) == 0) { /* * CX2341x Bus Master DMA write is ongoing. * Reset the timer and let it complete. */ itv->dma_timer.expires = jiffies + msecs_to_jiffies(600); add_timer(&itv->dma_timer); return; } if (itv->dma_retries < 3) { /* * CX2341x Bus Master DMA write has ended. * Retry the write, starting with the first * xfer segment. Just retrying the current * segment is not sufficient. */ s->sg_processed = 0; itv->dma_retries++; ivtv_dma_enc_start_xfer(s); return; } /* Too many retries, give up on this one */ } } if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { ivtv_udma_start(itv); return; } clear_bit(IVTV_F_I_UDMA, &itv->i_flags); clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; wake_up(&itv->dma_waitq); } static void ivtv_irq_enc_start_cap(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; /* Get DMA destination and size arguments from card */ ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data); IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]); if (data[0] > 2 || data[1] == 0 || data[2] == 0) { IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n", data[0], data[1], data[2]); return; } s = &itv->streams[ivtv_stream_map[data[0]]]; if (!stream_enc_dma_append(s, data)) { set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags); } } static void ivtv_irq_enc_vbi_cap(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n"); s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; if (!stream_enc_dma_append(s, data)) set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags); } static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI]; IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n"); if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) && !stream_enc_dma_append(s, data)) { set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags); } } static void ivtv_irq_dec_data_req(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; /* YUV or MPG */ if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) { ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data); itv->dma_data_req_size = 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31); itv->dma_data_req_offset = data[1]; if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0) ivtv_yuv_frame_complete(itv); s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; } else { ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data); itv->dma_data_req_size = min_t(u32, data[2], 0x10000); itv->dma_data_req_offset = data[1]; s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; } IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused, itv->dma_data_req_offset, itv->dma_data_req_size); if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) { set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); } else { if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) ivtv_yuv_setup_stream_frame(itv); clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size); ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0); } } static void ivtv_irq_vsync(struct ivtv *itv) { /* The vsync interrupt is unusual in that it won't clear until * the end of the first line for the current field, at which * point it clears itself. This can result in repeated vsync * interrupts, or a missed vsync. Read some of the registers * to determine the line being displayed and ensure we handle * one vsync per frame. */ unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1; struct yuv_playback_info *yi = &itv->yuv_info; int last_dma_frame = atomic_read(&yi->next_dma_frame); struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame]; if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n"); if (((frame ^ f->sync_field) == 0 && ((itv->last_vsync_field & 1) ^ f->sync_field)) || (frame != (itv->last_vsync_field & 1) && !f->interlaced)) { int next_dma_frame = last_dma_frame; if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) { if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) { write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c); write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830); write_reg(yuv_offset[next_dma_frame] >> 4, 0x834); write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838); next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS; atomic_set(&yi->next_dma_frame, next_dma_frame); yi->fields_lapsed = -1; yi->running = 1; } } } if (frame != (itv->last_vsync_field & 1)) { static const struct v4l2_event evtop = { .type = V4L2_EVENT_VSYNC, .u.vsync.field = V4L2_FIELD_TOP, }; static const struct v4l2_event evbottom = { .type = V4L2_EVENT_VSYNC, .u.vsync.field = V4L2_FIELD_BOTTOM, }; struct ivtv_stream *s = ivtv_get_output_stream(itv); itv->last_vsync_field += 1; if (frame == 0) { clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags); clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags); } else { set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags); } if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) { set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags); wake_up(&itv->event_waitq); if (s) wake_up(&s->waitq); } if (s && s->vdev) v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom); wake_up(&itv->vsync_waitq); /* Send VBI to saa7127 */ if (frame && (itv->output_mode == OUT_PASSTHROUGH || test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) || test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) || test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) { set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags); set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); } /* Check if we need to update the yuv registers */ if (yi->running && (yi->yuv_forced_update || f->update)) { if (!f->update) { last_dma_frame = (u8)(atomic_read(&yi->next_dma_frame) - 1) % IVTV_YUV_BUFFERS; f = &yi->new_frame_info[last_dma_frame]; } if (f->src_w) { yi->update_frame = last_dma_frame; f->update = 0; yi->yuv_forced_update = 0; set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags); set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); } } yi->fields_lapsed++; } } #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT) irqreturn_t ivtv_irq_handler(int irq, void *dev_id) { struct ivtv *itv = (struct ivtv *)dev_id; u32 combo; u32 stat; int i; u8 vsync_force = 0; spin_lock(&itv->dma_reg_lock); /* get contents of irq status register */ stat = read_reg(IVTV_REG_IRQSTATUS); combo = ~itv->irqmask & stat; /* Clear out IRQ */ if (combo) write_reg(combo, IVTV_REG_IRQSTATUS); if (0 == combo) { /* The vsync interrupt is unusual and clears itself. If we * took too long, we may have missed it. Do some checks */ if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) { /* vsync is enabled, see if we're in a new field */ if ((itv->last_vsync_field & 1) != (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) { /* New field, looks like we missed it */ IVTV_DEBUG_YUV("VSync interrupt missed %d\n", read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16); vsync_force = 1; } } if (!vsync_force) { /* No Vsync expected, wasn't for us */ spin_unlock(&itv->dma_reg_lock); return IRQ_NONE; } } /* Exclude interrupts noted below from the output, otherwise the log is flooded with these messages */ if (combo & ~0xff6d0400) IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo); if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) { IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n"); } if (combo & IVTV_IRQ_DMA_READ) { ivtv_irq_dma_read(itv); } if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) { ivtv_irq_enc_dma_complete(itv); } if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) { ivtv_irq_enc_pio_complete(itv); } if (combo & IVTV_IRQ_DMA_ERR) { ivtv_irq_dma_err(itv); } if (combo & IVTV_IRQ_ENC_START_CAP) { ivtv_irq_enc_start_cap(itv); } if (combo & IVTV_IRQ_ENC_VBI_CAP) { ivtv_irq_enc_vbi_cap(itv); } if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) { ivtv_irq_dec_vbi_reinsert(itv); } if (combo & IVTV_IRQ_ENC_EOS) { IVTV_DEBUG_IRQ("ENC EOS\n"); set_bit(IVTV_F_I_EOS, &itv->i_flags); wake_up(&itv->eos_waitq); } if (combo & IVTV_IRQ_DEC_DATA_REQ) { ivtv_irq_dec_data_req(itv); } /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */ if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) { ivtv_irq_vsync(itv); } if (combo & IVTV_IRQ_ENC_VIM_RST) { IVTV_DEBUG_IRQ("VIM RST\n"); /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */ } if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) { IVTV_DEBUG_INFO("Stereo mode changed\n"); } if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) { itv->irq_rr_idx++; for (i = 0; i < IVTV_MAX_STREAMS; i++) { int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS; struct ivtv_stream *s = &itv->streams[idx]; if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) continue; if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) ivtv_dma_dec_start(s); else ivtv_dma_enc_start(s); break; } if (i == IVTV_MAX_STREAMS && test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) ivtv_udma_start(itv); } if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) { itv->irq_rr_idx++; for (i = 0; i < IVTV_MAX_STREAMS; i++) { int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS; struct ivtv_stream *s = &itv->streams[idx]; if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags)) continue; if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG) ivtv_dma_enc_start(s); break; } } if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) { queue_kthread_work(&itv->irq_worker, &itv->irq_work); } spin_unlock(&itv->dma_reg_lock); /* If we've just handled a 'forced' vsync, it's safest to say it * wasn't ours. Another device may have triggered it at just * the right time. */ return vsync_force ? IRQ_NONE : IRQ_HANDLED; } void ivtv_unfinished_dma(unsigned long arg) { struct ivtv *itv = (struct ivtv *)arg; if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) return; IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); clear_bit(IVTV_F_I_UDMA, &itv->i_flags); clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; wake_up(&itv->dma_waitq); }
gpl-2.0
snak3ater/android_kernel_lge_mako
fs/lockd/svcproc.c
5521
14929
/* * linux/fs/lockd/svcproc.c * * Lockd server procedures. We don't implement the NLM_*_RES * procedures because we don't use the async procedures. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/time.h> #include <linux/lockd/lockd.h> #include <linux/lockd/share.h> #define NLMDBG_FACILITY NLMDBG_CLIENT #ifdef CONFIG_LOCKD_V4 static __be32 cast_to_nlm(__be32 status, u32 vers) { /* Note: status is assumed to be in network byte order !!! */ if (vers != 4){ switch (status) { case nlm_granted: case nlm_lck_denied: case nlm_lck_denied_nolocks: case nlm_lck_blocked: case nlm_lck_denied_grace_period: case nlm_drop_reply: break; case nlm4_deadlock: status = nlm_lck_denied; break; default: status = nlm_lck_denied_nolocks; } } return (status); } #define cast_status(status) (cast_to_nlm(status, rqstp->rq_vers)) #else #define cast_status(status) (status) #endif /* * Obtain client and file from arguments */ static __be32 nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_host **hostp, struct nlm_file **filp) { struct nlm_host *host = NULL; struct nlm_file *file = NULL; struct nlm_lock *lock = &argp->lock; __be32 error = 0; /* nfsd callbacks must have been installed for this procedure */ if (!nlmsvc_ops) return nlm_lck_denied_nolocks; /* Obtain host handle */ if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len)) || (argp->monitor && nsm_monitor(host) < 0)) goto no_locks; *hostp = host; /* Obtain file pointer. Not used by FREE_ALL call. */ if (filp != NULL) { if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0) goto no_locks; *filp = file; /* Set up the missing parts of the file_lock structure */ lock->fl.fl_file = file->f_file; lock->fl.fl_owner = (fl_owner_t) host; lock->fl.fl_lmops = &nlmsvc_lock_operations; } return 0; no_locks: nlmsvc_release_host(host); if (error) return error; return nlm_lck_denied_nolocks; } /* * NULL: Test for presence of service */ static __be32 nlmsvc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) { dprintk("lockd: NULL called\n"); return rpc_success; } /* * TEST: Check for conflicting lock */ static __be32 nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; __be32 rc = rpc_success; dprintk("lockd: TEST called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now check for conflicting locks */ resp->status = cast_status(nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie)); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: TEST status %d vers %d\n", ntohl(resp->status), rqstp->rq_vers); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; __be32 rc = rpc_success; dprintk("lockd: LOCK called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; #if 0 /* If supplied state doesn't match current state, we assume it's * an old request that time-warped somehow. Any error return would * do in this case because it's irrelevant anyway. * * NB: We don't retrieve the remote host's state yet. */ if (host->h_nsmstate && host->h_nsmstate != argp->state) { resp->status = nlm_lck_denied_nolocks; } else #endif /* Now try to lock the file */ resp->status = cast_status(nlmsvc_lock(rqstp, file, host, &argp->lock, argp->block, &argp->cookie, argp->reclaim)); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: CANCEL called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Try to cancel request. */ resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock)); dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * UNLOCK: release a lock */ static __be32 nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: UNLOCK called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to remove the lock */ resp->status = cast_status(nlmsvc_unlock(file, &argp->lock)); dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * GRANTED: A server calls us to tell that a process' lock request * was granted */ static __be32 nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { resp->cookie = argp->cookie; dprintk("lockd: GRANTED called\n"); resp->status = nlmclnt_grant(svc_addr(rqstp), &argp->lock); dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); return rpc_success; } /* * This is the generic lockd callback for async RPC calls */ static void nlmsvc_callback_exit(struct rpc_task *task, void *data) { dprintk("lockd: %5u callback returned %d\n", task->tk_pid, -task->tk_status); } void nlmsvc_release_call(struct nlm_rqst *call) { if (!atomic_dec_and_test(&call->a_count)) return; nlmsvc_release_host(call->a_host); kfree(call); } static void nlmsvc_callback_release(void *data) { nlmsvc_release_call(data); } static const struct rpc_call_ops nlmsvc_callback_ops = { .rpc_call_done = nlmsvc_callback_exit, .rpc_release = nlmsvc_callback_release, }; /* * `Async' versions of the above service routines. They aren't really, * because we send the callback before the reply proper. I hope this * doesn't break any clients. */ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp, __be32 (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *)) { struct nlm_host *host; struct nlm_rqst *call; __be32 stat; host = nlmsvc_lookup_host(rqstp, argp->lock.caller, argp->lock.len); if (host == NULL) return rpc_system_err; call = nlm_alloc_call(host); if (call == NULL) return rpc_system_err; stat = func(rqstp, argp, &call->a_res); if (stat != 0) { nlmsvc_release_call(call); return stat; } call->a_flags = RPC_TASK_ASYNC; if (nlm_async_reply(call, proc, &nlmsvc_callback_ops) < 0) return rpc_system_err; return rpc_success; } static __be32 nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: TEST_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_TEST_RES, argp, nlmsvc_proc_test); } static __be32 nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: LOCK_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlmsvc_proc_lock); } static __be32 nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: CANCEL_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlmsvc_proc_cancel); } static __be32 nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: UNLOCK_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlmsvc_proc_unlock); } static __be32 nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: GRANTED_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlmsvc_proc_granted); } /* * SHARE: create a DOS share or alter existing share. */ static __be32 nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: SHARE called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace() && !argp->reclaim) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to create the share */ resp->status = cast_status(nlmsvc_share_file(host, file, argp)); dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * UNSHARE: Release a DOS share. */ static __be32 nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: UNSHARE called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to unshare the file */ resp->status = cast_status(nlmsvc_unshare_file(host, file, argp)); dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * NM_LOCK: Create an unmonitored lock */ static __be32 nlmsvc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { dprintk("lockd: NM_LOCK called\n"); argp->monitor = 0; /* just clean the monitor flag */ return nlmsvc_proc_lock(rqstp, argp, resp); } /* * FREE_ALL: Release all locks and shares held by client */ static __be32 nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { struct nlm_host *host; /* Obtain client */ if (nlmsvc_retrieve_args(rqstp, argp, &host, NULL)) return rpc_success; nlmsvc_free_host_resources(host); nlmsvc_release_host(host); return rpc_success; } /* * SM_NOTIFY: private callback from statd (not part of official NLM proto) */ static __be32 nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, void *resp) { dprintk("lockd: SM_NOTIFY called\n"); if (!nlm_privileged_requester(rqstp)) { char buf[RPC_MAX_ADDRBUFLEN]; printk(KERN_WARNING "lockd: rejected NSM callback from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); return rpc_system_err; } nlm_host_rebooted(argp); return rpc_success; } /* * client sent a GRANTED_RES, let's remove the associated block */ static __be32 nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, void *resp) { if (!nlmsvc_ops) return rpc_success; dprintk("lockd: GRANTED_RES called\n"); nlmsvc_grant_reply(&argp->cookie, argp->status); return rpc_success; } /* * NLM Server procedures. */ #define nlmsvc_encode_norep nlmsvc_encode_void #define nlmsvc_decode_norep nlmsvc_decode_void #define nlmsvc_decode_testres nlmsvc_decode_void #define nlmsvc_decode_lockres nlmsvc_decode_void #define nlmsvc_decode_unlockres nlmsvc_decode_void #define nlmsvc_decode_cancelres nlmsvc_decode_void #define nlmsvc_decode_grantedres nlmsvc_decode_void #define nlmsvc_proc_none nlmsvc_proc_null #define nlmsvc_proc_test_res nlmsvc_proc_null #define nlmsvc_proc_lock_res nlmsvc_proc_null #define nlmsvc_proc_cancel_res nlmsvc_proc_null #define nlmsvc_proc_unlock_res nlmsvc_proc_null struct nlm_void { int dummy; }; #define PROC(name, xargt, xrest, argt, rest, respsize) \ { .pc_func = (svc_procfunc) nlmsvc_proc_##name, \ .pc_decode = (kxdrproc_t) nlmsvc_decode_##xargt, \ .pc_encode = (kxdrproc_t) nlmsvc_encode_##xrest, \ .pc_release = NULL, \ .pc_argsize = sizeof(struct nlm_##argt), \ .pc_ressize = sizeof(struct nlm_##rest), \ .pc_xdrressize = respsize, \ } #define Ck (1+XDR_QUADLEN(NLM_MAXCOOKIELEN)) /* cookie */ #define St 1 /* status */ #define No (1+1024/4) /* Net Obj */ #define Rg 2 /* range - offset + size */ struct svc_procedure nlmsvc_procedures[] = { PROC(null, void, void, void, void, 1), PROC(test, testargs, testres, args, res, Ck+St+2+No+Rg), PROC(lock, lockargs, res, args, res, Ck+St), PROC(cancel, cancargs, res, args, res, Ck+St), PROC(unlock, unlockargs, res, args, res, Ck+St), PROC(granted, testargs, res, args, res, Ck+St), PROC(test_msg, testargs, norep, args, void, 1), PROC(lock_msg, lockargs, norep, args, void, 1), PROC(cancel_msg, cancargs, norep, args, void, 1), PROC(unlock_msg, unlockargs, norep, args, void, 1), PROC(granted_msg, testargs, norep, args, void, 1), PROC(test_res, testres, norep, res, void, 1), PROC(lock_res, lockres, norep, res, void, 1), PROC(cancel_res, cancelres, norep, res, void, 1), PROC(unlock_res, unlockres, norep, res, void, 1), PROC(granted_res, res, norep, res, void, 1), /* statd callback */ PROC(sm_notify, reboot, void, reboot, void, 1), PROC(none, void, void, void, void, 1), PROC(none, void, void, void, void, 1), PROC(none, void, void, void, void, 1), PROC(share, shareargs, shareres, args, res, Ck+St+1), PROC(unshare, shareargs, shareres, args, res, Ck+St+1), PROC(nm_lock, lockargs, res, args, res, Ck+St), PROC(free_all, notify, void, args, void, 0), };
gpl-2.0
tommytarts/QuantumKernelS3
drivers/media/video/tm6000/tm6000-i2c.c
9617
8434
/* * tm6000-i2c.c - driver for TM5600/TM6000/TM6010 USB video capture devices * * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> * * Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com> * - Fix SMBus Read Byte command * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/usb.h> #include <linux/i2c.h> #include "tm6000.h" #include "tm6000-regs.h" #include <media/v4l2-common.h> #include <media/tuner.h> #include "tuner-xc2028.h" /* ----------------------------------------------------------- */ static unsigned int i2c_debug; module_param(i2c_debug, int, 0644); MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]"); #define i2c_dprintk(lvl, fmt, args...) if (i2c_debug >= lvl) do { \ printk(KERN_DEBUG "%s at %s: " fmt, \ dev->name, __func__, ##args); } while (0) static int tm6000_i2c_send_regs(struct tm6000_core *dev, unsigned char addr, __u8 reg, char *buf, int len) { int rc; unsigned int i2c_packet_limit = 16; if (dev->dev_type == TM6010) i2c_packet_limit = 80; if (!buf) return -1; if (len < 1 || len > i2c_packet_limit) { printk(KERN_ERR "Incorrect length of i2c packet = %d, limit set to %d\n", len, i2c_packet_limit); return -1; } /* capture mutex */ rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, buf, len); if (rc < 0) { /* release mutex */ return rc; } /* release mutex */ return rc; } /* Generic read - doesn't work fine with 16bit registers */ static int tm6000_i2c_recv_regs(struct tm6000_core *dev, unsigned char addr, __u8 reg, char *buf, int len) { int rc; u8 b[2]; unsigned int i2c_packet_limit = 16; if (dev->dev_type == TM6010) i2c_packet_limit = 64; if (!buf) return -1; if (len < 1 || len > i2c_packet_limit) { printk(KERN_ERR "Incorrect length of i2c packet = %d, limit set to %d\n", len, i2c_packet_limit); return -1; } /* capture mutex */ if ((dev->caps.has_zl10353) && (dev->demod_addr << 1 == addr) && (reg % 2 == 0)) { /* * Workaround an I2C bug when reading from zl10353 */ reg -= 1; len += 1; rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, b, len); *buf = b[1]; } else { rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, buf, len); } /* release mutex */ return rc; } /* * read from a 16bit register * for example xc2028, xc3028 or xc3028L */ static int tm6000_i2c_recv_regs16(struct tm6000_core *dev, unsigned char addr, __u16 reg, char *buf, int len) { int rc; unsigned char ureg; if (!buf || len != 2) return -1; /* capture mutex */ if (dev->dev_type == TM6010) { ureg = reg & 0xFF; rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | (reg & 0xFF00), 0, &ureg, 1); if (rc < 0) { /* release mutex */ return rc; } rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_35_AFTEK_TUNER_READ, reg, 0, buf, len); } else { rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_14_SET_GET_I2C_WR2_RDN, addr, reg, buf, len); } /* release mutex */ return rc; } static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct tm6000_core *dev = i2c_adap->algo_data; int addr, rc, i, byte; if (num <= 0) return 0; for (i = 0; i < num; i++) { addr = (msgs[i].addr << 1) & 0xff; i2c_dprintk(2, "%s %s addr=0x%x len=%d:", (msgs[i].flags & I2C_M_RD) ? "read" : "write", i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len); if (msgs[i].flags & I2C_M_RD) { /* read request without preceding register selection */ /* * The TM6000 only supports a read transaction * immediately after a 1 or 2 byte write to select * a register. We cannot fulfil this request. */ i2c_dprintk(2, " read without preceding write not" " supported"); rc = -EOPNOTSUPP; goto err; } else if (i + 1 < num && msgs[i].len <= 2 && (msgs[i + 1].flags & I2C_M_RD) && msgs[i].addr == msgs[i + 1].addr) { /* 1 or 2 byte write followed by a read */ if (i2c_debug >= 2) for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); i2c_dprintk(2, "; joined to read %s len=%d:", i == num - 2 ? "stop" : "nonstop", msgs[i + 1].len); if (msgs[i].len == 2) { rc = tm6000_i2c_recv_regs16(dev, addr, msgs[i].buf[0] << 8 | msgs[i].buf[1], msgs[i + 1].buf, msgs[i + 1].len); } else { rc = tm6000_i2c_recv_regs(dev, addr, msgs[i].buf[0], msgs[i + 1].buf, msgs[i + 1].len); } i++; if (addr == dev->tuner_addr << 1) { tm6000_set_reg(dev, REQ_50_SET_START, 0, 0); tm6000_set_reg(dev, REQ_51_SET_STOP, 0, 0); } if (i2c_debug >= 2) for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); } else { /* write bytes */ if (i2c_debug >= 2) for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); rc = tm6000_i2c_send_regs(dev, addr, msgs[i].buf[0], msgs[i].buf + 1, msgs[i].len - 1); } if (i2c_debug >= 2) printk(KERN_CONT "\n"); if (rc < 0) goto err; } return num; err: i2c_dprintk(2, " ERROR: %i\n", rc); return rc; } static int tm6000_i2c_eeprom(struct tm6000_core *dev) { int i, rc; unsigned char *p = dev->eedata; unsigned char bytes[17]; dev->i2c_client.addr = 0xa0 >> 1; dev->eedata_size = 0; bytes[16] = '\0'; for (i = 0; i < sizeof(dev->eedata); ) { *p = i; rc = tm6000_i2c_recv_regs(dev, 0xa0, i, p, 1); if (rc < 1) { if (p == dev->eedata) goto noeeprom; else { printk(KERN_WARNING "%s: i2c eeprom read error (err=%d)\n", dev->name, rc); } return -EINVAL; } dev->eedata_size++; p++; if (0 == (i % 16)) printk(KERN_INFO "%s: i2c eeprom %02x:", dev->name, i); printk(KERN_CONT " %02x", dev->eedata[i]); if ((dev->eedata[i] >= ' ') && (dev->eedata[i] <= 'z')) bytes[i%16] = dev->eedata[i]; else bytes[i%16] = '.'; i++; if (0 == (i % 16)) { bytes[16] = '\0'; printk(KERN_CONT " %s\n", bytes); } } if (0 != (i%16)) { bytes[i%16] = '\0'; for (i %= 16; i < 16; i++) printk(KERN_CONT " "); printk(KERN_CONT " %s\n", bytes); } return 0; noeeprom: printk(KERN_INFO "%s: Huh, no eeprom present (err=%d)?\n", dev->name, rc); return -EINVAL; } /* ----------------------------------------------------------- */ /* * functionality() */ static u32 functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm tm6000_algo = { .master_xfer = tm6000_i2c_xfer, .functionality = functionality, }; /* ----------------------------------------------------------- */ /* * tm6000_i2c_register() * register i2c bus */ int tm6000_i2c_register(struct tm6000_core *dev) { int rc; dev->i2c_adap.owner = THIS_MODULE; dev->i2c_adap.algo = &tm6000_algo; dev->i2c_adap.dev.parent = &dev->udev->dev; strlcpy(dev->i2c_adap.name, dev->name, sizeof(dev->i2c_adap.name)); dev->i2c_adap.algo_data = dev; i2c_set_adapdata(&dev->i2c_adap, &dev->v4l2_dev); rc = i2c_add_adapter(&dev->i2c_adap); if (rc) return rc; dev->i2c_client.adapter = &dev->i2c_adap; strlcpy(dev->i2c_client.name, "tm6000 internal", I2C_NAME_SIZE); tm6000_i2c_eeprom(dev); return 0; } /* * tm6000_i2c_unregister() * unregister i2c_bus */ int tm6000_i2c_unregister(struct tm6000_core *dev) { i2c_del_adapter(&dev->i2c_adap); return 0; }
gpl-2.0
jthatch12/STi
drivers/input/joystick/guillemot.c
9873
7558
/* * Copyright (c) 2001 Vojtech Pavlik */ /* * Guillemot Digital Interface Protocol driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/gameport.h> #include <linux/input.h> #include <linux/jiffies.h> #define DRIVER_DESC "Guillemot Digital joystick driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define GUILLEMOT_MAX_START 600 /* 600 us */ #define GUILLEMOT_MAX_STROBE 60 /* 60 us */ #define GUILLEMOT_MAX_LENGTH 17 /* 17 bytes */ static short guillemot_abs_pad[] = { ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, -1 }; static short guillemot_btn_pad[] = { BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, BTN_TL, BTN_TR, BTN_MODE, BTN_SELECT, -1 }; static struct { int x; int y; } guillemot_hat_to_axis[16] = {{ 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-1}}; struct guillemot_type { unsigned char id; short *abs; short *btn; int hat; char *name; }; struct guillemot { struct gameport *gameport; struct input_dev *dev; int bads; int reads; struct guillemot_type *type; unsigned char length; char phys[32]; }; static struct guillemot_type guillemot_type[] = { { 0x00, guillemot_abs_pad, guillemot_btn_pad, 1, "Guillemot Pad" }, { 0 }}; /* * guillemot_read_packet() reads Guillemot joystick data. */ static int guillemot_read_packet(struct gameport *gameport, u8 *data) { unsigned long flags; unsigned char u, v; unsigned int t, s; int i; for (i = 0; i < GUILLEMOT_MAX_LENGTH; i++) data[i] = 0; i = 0; t = gameport_time(gameport, GUILLEMOT_MAX_START); s = gameport_time(gameport, GUILLEMOT_MAX_STROBE); local_irq_save(flags); gameport_trigger(gameport); v = gameport_read(gameport); while (t > 0 && i < GUILLEMOT_MAX_LENGTH * 8) { t--; u = v; v = gameport_read(gameport); if (v & ~u & 0x10) { data[i >> 3] |= ((v >> 5) & 1) << (i & 7); i++; t = s; } } local_irq_restore(flags); return i; } /* * guillemot_poll() reads and analyzes Guillemot joystick data. */ static void guillemot_poll(struct gameport *gameport) { struct guillemot *guillemot = gameport_get_drvdata(gameport); struct input_dev *dev = guillemot->dev; u8 data[GUILLEMOT_MAX_LENGTH]; int i; guillemot->reads++; if (guillemot_read_packet(guillemot->gameport, data) != GUILLEMOT_MAX_LENGTH * 8 || data[0] != 0x55 || data[16] != 0xaa) { guillemot->bads++; } else { for (i = 0; i < 6 && guillemot->type->abs[i] >= 0; i++) input_report_abs(dev, guillemot->type->abs[i], data[i + 5]); if (guillemot->type->hat) { input_report_abs(dev, ABS_HAT0X, guillemot_hat_to_axis[data[4] >> 4].x); input_report_abs(dev, ABS_HAT0Y, guillemot_hat_to_axis[data[4] >> 4].y); } for (i = 0; i < 16 && guillemot->type->btn[i] >= 0; i++) input_report_key(dev, guillemot->type->btn[i], (data[2 + (i >> 3)] >> (i & 7)) & 1); } input_sync(dev); } /* * guillemot_open() is a callback from the input open routine. */ static int guillemot_open(struct input_dev *dev) { struct guillemot *guillemot = input_get_drvdata(dev); gameport_start_polling(guillemot->gameport); return 0; } /* * guillemot_close() is a callback from the input close routine. */ static void guillemot_close(struct input_dev *dev) { struct guillemot *guillemot = input_get_drvdata(dev); gameport_stop_polling(guillemot->gameport); } /* * guillemot_connect() probes for Guillemot joysticks. */ static int guillemot_connect(struct gameport *gameport, struct gameport_driver *drv) { struct guillemot *guillemot; struct input_dev *input_dev; u8 data[GUILLEMOT_MAX_LENGTH]; int i, t; int err; guillemot = kzalloc(sizeof(struct guillemot), GFP_KERNEL); input_dev = input_allocate_device(); if (!guillemot || !input_dev) { err = -ENOMEM; goto fail1; } guillemot->gameport = gameport; guillemot->dev = input_dev; gameport_set_drvdata(gameport, guillemot); err = gameport_open(gameport, drv, GAMEPORT_MODE_RAW); if (err) goto fail1; i = guillemot_read_packet(gameport, data); if (i != GUILLEMOT_MAX_LENGTH * 8 || data[0] != 0x55 || data[16] != 0xaa) { err = -ENODEV; goto fail2; } for (i = 0; guillemot_type[i].name; i++) if (guillemot_type[i].id == data[11]) break; if (!guillemot_type[i].name) { printk(KERN_WARNING "guillemot.c: Unknown joystick on %s. [ %02x%02x:%04x, ver %d.%02d ]\n", gameport->phys, data[12], data[13], data[11], data[14], data[15]); err = -ENODEV; goto fail2; } gameport_set_poll_handler(gameport, guillemot_poll); gameport_set_poll_interval(gameport, 20); snprintf(guillemot->phys, sizeof(guillemot->phys), "%s/input0", gameport->phys); guillemot->type = guillemot_type + i; input_dev->name = guillemot_type[i].name; input_dev->phys = guillemot->phys; input_dev->id.bustype = BUS_GAMEPORT; input_dev->id.vendor = GAMEPORT_ID_VENDOR_GUILLEMOT; input_dev->id.product = guillemot_type[i].id; input_dev->id.version = (int)data[14] << 8 | data[15]; input_dev->dev.parent = &gameport->dev; input_set_drvdata(input_dev, guillemot); input_dev->open = guillemot_open; input_dev->close = guillemot_close; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); for (i = 0; (t = guillemot->type->abs[i]) >= 0; i++) input_set_abs_params(input_dev, t, 0, 255, 0, 0); if (guillemot->type->hat) { input_set_abs_params(input_dev, ABS_HAT0X, -1, 1, 0, 0); input_set_abs_params(input_dev, ABS_HAT0Y, -1, 1, 0, 0); } for (i = 0; (t = guillemot->type->btn[i]) >= 0; i++) set_bit(t, input_dev->keybit); err = input_register_device(guillemot->dev); if (err) goto fail2; return 0; fail2: gameport_close(gameport); fail1: gameport_set_drvdata(gameport, NULL); input_free_device(input_dev); kfree(guillemot); return err; } static void guillemot_disconnect(struct gameport *gameport) { struct guillemot *guillemot = gameport_get_drvdata(gameport); printk(KERN_INFO "guillemot.c: Failed %d reads out of %d on %s\n", guillemot->reads, guillemot->bads, guillemot->phys); input_unregister_device(guillemot->dev); gameport_close(gameport); kfree(guillemot); } static struct gameport_driver guillemot_drv = { .driver = { .name = "guillemot", }, .description = DRIVER_DESC, .connect = guillemot_connect, .disconnect = guillemot_disconnect, }; static int __init guillemot_init(void) { return gameport_register_driver(&guillemot_drv); } static void __exit guillemot_exit(void) { gameport_unregister_driver(&guillemot_drv); } module_init(guillemot_init); module_exit(guillemot_exit);
gpl-2.0
vl197602/android_kernel_cyanogen_msm8916
net/wimax/id-table.c
12945
4471
/* * Linux WiMAX * Mappping of generic netlink family IDs to net devices * * * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * We assign a single generic netlink family ID to each device (to * simplify lookup). * * We need a way to map family ID to a wimax_dev pointer. * * The idea is to use a very simple lookup. Using a netlink attribute * with (for example) the interface name implies a heavier search over * all the network devices; seemed kind of a waste given that we know * we are looking for a WiMAX device and that most systems will have * just a single WiMAX adapter. * * We put all the WiMAX devices in the system in a linked list and * match the generic link family ID against the list. * * By using a linked list, the case of a single adapter in the system * becomes (almost) no overhead, while still working for many more. If * it ever goes beyond two, I'll be surprised. */ #include <linux/device.h> #include <net/genetlink.h> #include <linux/netdevice.h> #include <linux/list.h> #include <linux/wimax.h> #include "wimax-internal.h" #define D_SUBMODULE id_table #include "debug-levels.h" static DEFINE_SPINLOCK(wimax_id_table_lock); static struct list_head wimax_id_table = LIST_HEAD_INIT(wimax_id_table); /* * wimax_id_table_add - add a gennetlink familiy ID / wimax_dev mapping * * @wimax_dev: WiMAX device descriptor to associate to the Generic * Netlink family ID. * * Look for an empty spot in the ID table; if none found, double the * table's size and get the first spot. */ void wimax_id_table_add(struct wimax_dev *wimax_dev) { d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev); spin_lock(&wimax_id_table_lock); list_add(&wimax_dev->id_table_node, &wimax_id_table); spin_unlock(&wimax_id_table_lock); d_fnend(3, NULL, "(wimax_dev %p)\n", wimax_dev); } /* * wimax_get_netdev_by_info - lookup a wimax_dev from the gennetlink info * * The generic netlink family ID has been filled out in the * nlmsghdr->nlmsg_type field, so we pull it from there, look it up in * the mapping table and reference the wimax_dev. * * When done, the reference should be dropped with * 'dev_put(wimax_dev->net_dev)'. */ struct wimax_dev *wimax_dev_get_by_genl_info( struct genl_info *info, int ifindex) { struct wimax_dev *wimax_dev = NULL; d_fnstart(3, NULL, "(info %p ifindex %d)\n", info, ifindex); spin_lock(&wimax_id_table_lock); list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { if (wimax_dev->net_dev->ifindex == ifindex) { dev_hold(wimax_dev->net_dev); goto found; } } wimax_dev = NULL; d_printf(1, NULL, "wimax: no devices found with ifindex %d\n", ifindex); found: spin_unlock(&wimax_id_table_lock); d_fnend(3, NULL, "(info %p ifindex %d) = %p\n", info, ifindex, wimax_dev); return wimax_dev; } /* * wimax_id_table_rm - Remove a gennetlink familiy ID / wimax_dev mapping * * @id: family ID to remove from the table */ void wimax_id_table_rm(struct wimax_dev *wimax_dev) { spin_lock(&wimax_id_table_lock); list_del_init(&wimax_dev->id_table_node); spin_unlock(&wimax_id_table_lock); } /* * Release the gennetlink family id / mapping table * * On debug, verify that the table is empty upon removal. We want the * code always compiled, to ensure it doesn't bit rot. It will be * compiled out if CONFIG_BUG is disabled. */ void wimax_id_table_release(void) { struct wimax_dev *wimax_dev; #ifndef CONFIG_BUG return; #endif spin_lock(&wimax_id_table_lock); list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n", __func__, wimax_dev, wimax_dev->net_dev->ifindex); WARN_ON(1); } spin_unlock(&wimax_id_table_lock); }
gpl-2.0
sooorajjj/android_kernel_cyanogen_msm8916
arch/sh/boards/mach-microdev/fdc37c93xapm.c
13969
6415
/* * * Setup for the SMSC FDC37C93xAPM * * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) * Copyright (C) 2003, 2004 SuperH, Inc. * Copyright (C) 2004, 2005 Paul Mundt * * SuperH SH4-202 MicroDev board support. * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/err.h> #include <mach/microdev.h> #define SMSC_CONFIG_PORT_ADDR (0x3F0) #define SMSC_INDEX_PORT_ADDR SMSC_CONFIG_PORT_ADDR #define SMSC_DATA_PORT_ADDR (SMSC_INDEX_PORT_ADDR + 1) #define SMSC_ENTER_CONFIG_KEY 0x55 #define SMSC_EXIT_CONFIG_KEY 0xaa #define SMCS_LOGICAL_DEV_INDEX 0x07 /* Logical Device Number */ #define SMSC_DEVICE_ID_INDEX 0x20 /* Device ID */ #define SMSC_DEVICE_REV_INDEX 0x21 /* Device Revision */ #define SMSC_ACTIVATE_INDEX 0x30 /* Activate */ #define SMSC_PRIMARY_BASE_INDEX 0x60 /* Primary Base Address */ #define SMSC_SECONDARY_BASE_INDEX 0x62 /* Secondary Base Address */ #define SMSC_PRIMARY_INT_INDEX 0x70 /* Primary Interrupt Select */ #define SMSC_SECONDARY_INT_INDEX 0x72 /* Secondary Interrupt Select */ #define SMSC_HDCS0_INDEX 0xf0 /* HDCS0 Address Decoder */ #define SMSC_HDCS1_INDEX 0xf1 /* HDCS1 Address Decoder */ #define SMSC_IDE1_DEVICE 1 /* IDE #1 logical device */ #define SMSC_IDE2_DEVICE 2 /* IDE #2 logical device */ #define SMSC_PARALLEL_DEVICE 3 /* Parallel Port logical device */ #define SMSC_SERIAL1_DEVICE 4 /* Serial #1 logical device */ #define SMSC_SERIAL2_DEVICE 5 /* Serial #2 logical device */ #define SMSC_KEYBOARD_DEVICE 7 /* Keyboard logical device */ #define SMSC_CONFIG_REGISTERS 8 /* Configuration Registers (Aux I/O) */ #define SMSC_READ_INDEXED(index) ({ \ outb((index), SMSC_INDEX_PORT_ADDR); \ inb(SMSC_DATA_PORT_ADDR); }) #define SMSC_WRITE_INDEXED(val, index) ({ \ outb((index), SMSC_INDEX_PORT_ADDR); \ outb((val), SMSC_DATA_PORT_ADDR); }) #define IDE1_PRIMARY_BASE 0x01f0 /* Task File Registe base for IDE #1 */ #define IDE1_SECONDARY_BASE 0x03f6 /* Miscellaneous AT registers for IDE #1 */ #define IDE2_PRIMARY_BASE 0x0170 /* Task File Registe base for IDE #2 */ #define IDE2_SECONDARY_BASE 0x0376 /* Miscellaneous AT registers for IDE #2 */ #define SERIAL1_PRIMARY_BASE 0x03f8 #define SERIAL2_PRIMARY_BASE 0x02f8 #define MSB(x) ( (x) >> 8 ) #define LSB(x) ( (x) & 0xff ) /* General-Purpose base address on CPU-board FPGA */ #define MICRODEV_FPGA_GP_BASE 0xa6100000ul static int __init smsc_superio_setup(void) { unsigned char devid, devrev; /* Initially the chip is in run state */ /* Put it into configuration state */ outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); /* Read device ID info */ devid = SMSC_READ_INDEXED(SMSC_DEVICE_ID_INDEX); devrev = SMSC_READ_INDEXED(SMSC_DEVICE_REV_INDEX); if ((devid == 0x30) && (devrev == 0x01)) printk("SMSC FDC37C93xAPM SuperIO device detected\n"); else return -ENODEV; /* Select the keyboard device */ SMSC_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_KEYBOARD, SMSC_PRIMARY_INT_INDEX); SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_MOUSE, SMSC_SECONDARY_INT_INDEX); /* Select the Serial #1 device */ SMSC_WRITE_INDEXED(SMSC_SERIAL1_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL1, SMSC_PRIMARY_INT_INDEX); /* Select the Serial #2 device */ SMSC_WRITE_INDEXED(SMSC_SERIAL2_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL2, SMSC_PRIMARY_INT_INDEX); /* Select the IDE#1 device */ SMSC_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(MSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x0c, SMSC_HDCS0_INDEX); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS1_INDEX); /* select the interrupt */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE1, SMSC_PRIMARY_INT_INDEX); /* Select the IDE#2 device */ SMSC_WRITE_INDEXED(SMSC_IDE2_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(MSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); /* select the interrupt */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE2, SMSC_PRIMARY_INT_INDEX); /* Select the configuration registers */ SMSC_WRITE_INDEXED(SMSC_CONFIG_REGISTERS, SMCS_LOGICAL_DEV_INDEX); /* enable the appropriate GPIO pins for IDE functionality: * bit[0] In/Out 1==input; 0==output * bit[1] Polarity 1==invert; 0==no invert * bit[2] Int Enb #1 1==Enable Combined IRQ #1; 0==disable * bit[3:4] Function Select 00==original; 01==Alternate Function #1 */ SMSC_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */ SMSC_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ SMSC_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ SMSC_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ SMSC_WRITE_INDEXED(0x08, 0xe8); /* GP20 = nIDE2_OE */ /* Exit the configuration state */ outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); return 0; } device_initcall(smsc_superio_setup);
gpl-2.0
pio-masaki/kernel_AT270
drivers/net/wireless/orinoco/fw.c
402
9891
/* Firmware file reading and download helpers * * See copyright notice in main.c */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/device.h> #include "hermes.h" #include "hermes_dld.h" #include "orinoco.h" #include "fw.h" /* End markers (for Symbol firmware only) */ #define TEXT_END 0x1A /* End of text header */ struct fw_info { char *pri_fw; char *sta_fw; char *ap_fw; u32 pda_addr; u16 pda_size; }; static const struct fw_info orinoco_fw[] = { { NULL, "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 }, { NULL, "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 }, { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", NULL, 0x00003100, 512 } }; MODULE_FIRMWARE("agere_sta_fw.bin"); MODULE_FIRMWARE("agere_ap_fw.bin"); MODULE_FIRMWARE("prism_sta_fw.bin"); MODULE_FIRMWARE("prism_ap_fw.bin"); MODULE_FIRMWARE("symbol_sp24t_prim_fw"); MODULE_FIRMWARE("symbol_sp24t_sec_fw"); /* Structure used to access fields in FW * Make sure LE decoding macros are used */ struct orinoco_fw_header { char hdr_vers[6]; /* ASCII string for header version */ __le16 headersize; /* Total length of header */ __le32 entry_point; /* NIC entry point */ __le32 blocks; /* Number of blocks to program */ __le32 block_offset; /* Offset of block data from eof header */ __le32 pdr_offset; /* Offset to PDR data from eof header */ __le32 pri_offset; /* Offset to primary plug data */ __le32 compat_offset; /* Offset to compatibility data*/ char signature[0]; /* FW signature length headersize-20 */ } __packed; /* Check the range of various header entries. Return a pointer to a * description of the problem, or NULL if everything checks out. */ static const char *validate_fw(const struct orinoco_fw_header *hdr, size_t len) { u16 hdrsize; if (len < sizeof(*hdr)) return "image too small"; if (memcmp(hdr->hdr_vers, "HFW", 3) != 0) return "format not recognised"; hdrsize = le16_to_cpu(hdr->headersize); if (hdrsize > len) return "bad headersize"; if ((hdrsize + le32_to_cpu(hdr->block_offset)) > len) return "bad block offset"; if ((hdrsize + le32_to_cpu(hdr->pdr_offset)) > len) return "bad PDR offset"; if ((hdrsize + le32_to_cpu(hdr->pri_offset)) > len) return "bad PRI offset"; if ((hdrsize + le32_to_cpu(hdr->compat_offset)) > len) return "bad compat offset"; /* TODO: consider adding a checksum or CRC to the firmware format */ return NULL; } #if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP) static inline const struct firmware * orinoco_cached_fw_get(struct orinoco_private *priv, bool primary) { if (primary) return priv->cached_pri_fw; else return priv->cached_fw; } #else #define orinoco_cached_fw_get(priv, primary) (NULL) #endif /* Download either STA or AP firmware into the card. */ static int orinoco_dl_firmware(struct orinoco_private *priv, const struct fw_info *fw, int ap) { /* Plug Data Area (PDA) */ __le16 *pda; struct hermes *hw = &priv->hw; const struct firmware *fw_entry; const struct orinoco_fw_header *hdr; const unsigned char *first_block; const void *end; const char *firmware; const char *fw_err; struct device *dev = priv->dev; int err = 0; pda = kzalloc(fw->pda_size, GFP_KERNEL); if (!pda) return -ENOMEM; if (ap) firmware = fw->ap_fw; else firmware = fw->sta_fw; dev_dbg(dev, "Attempting to download firmware %s\n", firmware); /* Read current plug data */ err = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size); dev_dbg(dev, "Read PDA returned %d\n", err); if (err) goto free; if (!orinoco_cached_fw_get(priv, false)) { err = request_firmware(&fw_entry, firmware, priv->dev); if (err) { dev_err(dev, "Cannot find firmware %s\n", firmware); err = -ENOENT; goto free; } } else fw_entry = orinoco_cached_fw_get(priv, false); hdr = (const struct orinoco_fw_header *) fw_entry->data; fw_err = validate_fw(hdr, fw_entry->size); if (fw_err) { dev_warn(dev, "Invalid firmware image detected (%s). " "Aborting download\n", fw_err); err = -EINVAL; goto abort; } /* Enable aux port to allow programming */ err = hw->ops->program_init(hw, le32_to_cpu(hdr->entry_point)); dev_dbg(dev, "Program init returned %d\n", err); if (err != 0) goto abort; /* Program data */ first_block = (fw_entry->data + le16_to_cpu(hdr->headersize) + le32_to_cpu(hdr->block_offset)); end = fw_entry->data + fw_entry->size; err = hermes_program(hw, first_block, end); dev_dbg(dev, "Program returned %d\n", err); if (err != 0) goto abort; /* Update production data */ first_block = (fw_entry->data + le16_to_cpu(hdr->headersize) + le32_to_cpu(hdr->pdr_offset)); err = hermes_apply_pda_with_defaults(hw, first_block, end, pda, &pda[fw->pda_size / sizeof(*pda)]); dev_dbg(dev, "Apply PDA returned %d\n", err); if (err) goto abort; /* Tell card we've finished */ err = hw->ops->program_end(hw); dev_dbg(dev, "Program end returned %d\n", err); if (err != 0) goto abort; /* Check if we're running */ dev_dbg(dev, "hermes_present returned %d\n", hermes_present(hw)); abort: /* If we requested the firmware, release it. */ if (!orinoco_cached_fw_get(priv, false)) release_firmware(fw_entry); free: kfree(pda); return err; } /* * Process a firmware image - stop the card, load the firmware, reset * the card and make sure it responds. For the secondary firmware take * care of the PDA - read it and then write it on top of the firmware. */ static int symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw, const unsigned char *image, const void *end, int secondary) { struct hermes *hw = &priv->hw; int ret = 0; const unsigned char *ptr; const unsigned char *first_block; /* Plug Data Area (PDA) */ __le16 *pda = NULL; /* Binary block begins after the 0x1A marker */ ptr = image; while (*ptr++ != TEXT_END); first_block = ptr; /* Read the PDA from EEPROM */ if (secondary) { pda = kzalloc(fw->pda_size, GFP_KERNEL); if (!pda) return -ENOMEM; ret = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size); if (ret) goto free; } /* Stop the firmware, so that it can be safely rewritten */ if (priv->stop_fw) { ret = priv->stop_fw(priv, 1); if (ret) goto free; } /* Program the adapter with new firmware */ ret = hermes_program(hw, first_block, end); if (ret) goto free; /* Write the PDA to the adapter */ if (secondary) { size_t len = hermes_blocks_length(first_block, end); ptr = first_block + len; ret = hermes_apply_pda(hw, ptr, end, pda, &pda[fw->pda_size / sizeof(*pda)]); kfree(pda); if (ret) return ret; } /* Run the firmware */ if (priv->stop_fw) { ret = priv->stop_fw(priv, 0); if (ret) return ret; } /* Reset hermes chip and make sure it responds */ ret = hw->ops->init(hw); /* hermes_reset() should return 0 with the secondary firmware */ if (secondary && ret != 0) return -ENODEV; /* And this should work with any firmware */ if (!hermes_present(hw)) return -ENODEV; return 0; free: kfree(pda); return ret; } /* * Download the firmware into the card, this also does a PCMCIA soft * reset on the card, to make sure it's in a sane state. */ static int symbol_dl_firmware(struct orinoco_private *priv, const struct fw_info *fw) { struct device *dev = priv->dev; int ret; const struct firmware *fw_entry; if (!orinoco_cached_fw_get(priv, true)) { if (request_firmware(&fw_entry, fw->pri_fw, priv->dev) != 0) { dev_err(dev, "Cannot find firmware: %s\n", fw->pri_fw); return -ENOENT; } } else fw_entry = orinoco_cached_fw_get(priv, true); /* Load primary firmware */ ret = symbol_dl_image(priv, fw, fw_entry->data, fw_entry->data + fw_entry->size, 0); if (!orinoco_cached_fw_get(priv, true)) release_firmware(fw_entry); if (ret) { dev_err(dev, "Primary firmware download failed\n"); return ret; } if (!orinoco_cached_fw_get(priv, false)) { if (request_firmware(&fw_entry, fw->sta_fw, priv->dev) != 0) { dev_err(dev, "Cannot find firmware: %s\n", fw->sta_fw); return -ENOENT; } } else fw_entry = orinoco_cached_fw_get(priv, false); /* Load secondary firmware */ ret = symbol_dl_image(priv, fw, fw_entry->data, fw_entry->data + fw_entry->size, 1); if (!orinoco_cached_fw_get(priv, false)) release_firmware(fw_entry); if (ret) dev_err(dev, "Secondary firmware download failed\n"); return ret; } int orinoco_download(struct orinoco_private *priv) { int err = 0; /* Reload firmware */ switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: /* case FIRMWARE_TYPE_INTERSIL: */ err = orinoco_dl_firmware(priv, &orinoco_fw[priv->firmware_type], 0); break; case FIRMWARE_TYPE_SYMBOL: err = symbol_dl_firmware(priv, &orinoco_fw[priv->firmware_type]); break; case FIRMWARE_TYPE_INTERSIL: break; } /* TODO: if we fail we probably need to reinitialise * the driver */ return err; } #if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP) void orinoco_cache_fw(struct orinoco_private *priv, int ap) { const struct firmware *fw_entry = NULL; const char *pri_fw; const char *fw; pri_fw = orinoco_fw[priv->firmware_type].pri_fw; if (ap) fw = orinoco_fw[priv->firmware_type].ap_fw; else fw = orinoco_fw[priv->firmware_type].sta_fw; if (pri_fw) { if (request_firmware(&fw_entry, pri_fw, priv->dev) == 0) priv->cached_pri_fw = fw_entry; } if (fw) { if (request_firmware(&fw_entry, fw, priv->dev) == 0) priv->cached_fw = fw_entry; } } void orinoco_uncache_fw(struct orinoco_private *priv) { if (priv->cached_pri_fw) release_firmware(priv->cached_pri_fw); if (priv->cached_fw) release_firmware(priv->cached_fw); priv->cached_pri_fw = NULL; priv->cached_fw = NULL; } #endif
gpl-2.0
viaembedded/arm-soc
drivers/staging/speakup/speakup_decpc.c
402
15366
/* * This is the DECtalk PC speakup driver * * Some constants from DEC's DOS driver: * Copyright (c) by Digital Equipment Corp. * * 386BSD DECtalk PC driver: * Copyright (c) 1996 Brian Buhrow <buhrow@lothlorien.nfbcal.org> * * Linux DECtalk PC driver: * Copyright (c) 1997 Nicolas Pitre <nico@cam.org> * * speakup DECtalk PC Internal driver: * Copyright (c) 2003 David Borowski <david575@golden.net> * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "spk_priv.h" #include "speakup.h" #define MODULE_init 0x0dec /* module in boot code */ #define MODULE_self_test 0x8800 /* module in self-test */ #define MODULE_reset 0xffff /* reinit the whole module */ #define MODE_mask 0xf000 /* mode bits in high nibble */ #define MODE_null 0x0000 #define MODE_test 0x2000 /* in testing mode */ #define MODE_status 0x8000 #define STAT_int 0x0001 /* running in interrupt mode */ #define STAT_tr_char 0x0002 /* character data to transmit */ #define STAT_rr_char 0x0004 /* ready to receive char data */ #define STAT_cmd_ready 0x0008 /* ready to accept commands */ #define STAT_dma_ready 0x0010 /* dma command ready */ #define STAT_digitized 0x0020 /* spc in digitized mode */ #define STAT_new_index 0x0040 /* new last index ready */ #define STAT_new_status 0x0080 /* new status posted */ #define STAT_dma_state 0x0100 /* dma state toggle */ #define STAT_index_valid 0x0200 /* indexs are valid */ #define STAT_flushing 0x0400 /* flush in progress */ #define STAT_self_test 0x0800 /* module in self test */ #define MODE_ready 0xc000 /* module ready for next phase */ #define READY_boot 0x0000 #define READY_kernel 0x0001 #define MODE_error 0xf000 #define CMD_mask 0xf000 /* mask for command nibble */ #define CMD_null 0x0000 /* post status */ #define CMD_control 0x1000 /* hard control command */ #define CTRL_mask 0x0F00 /* mask off control nibble */ #define CTRL_data 0x00FF /* mask to get data byte */ #define CTRL_null 0x0000 /* null control */ #define CTRL_vol_up 0x0100 /* increase volume */ #define CTRL_vol_down 0x0200 /* decrease volume */ #define CTRL_vol_set 0x0300 /* set volume */ #define CTRL_pause 0x0400 /* pause spc */ #define CTRL_resume 0x0500 /* resume spc clock */ #define CTRL_resume_spc 0x0001 /* resume spc soft pause */ #define CTRL_flush 0x0600 /* flush all buffers */ #define CTRL_int_enable 0x0700 /* enable status change ints */ #define CTRL_buff_free 0x0800 /* buffer remain count */ #define CTRL_buff_used 0x0900 /* buffer in use */ #define CTRL_speech 0x0a00 /* immediate speech change */ #define CTRL_SP_voice 0x0001 /* voice change */ #define CTRL_SP_rate 0x0002 /* rate change */ #define CTRL_SP_comma 0x0003 /* comma pause change */ #define CTRL_SP_period 0x0004 /* period pause change */ #define CTRL_SP_rate_delta 0x0005 /* delta rate change */ #define CTRL_SP_get_param 0x0006 /* return the desired parameter */ #define CTRL_last_index 0x0b00 /* get last index spoken */ #define CTRL_io_priority 0x0c00 /* change i/o priority */ #define CTRL_free_mem 0x0d00 /* get free paragraphs on module */ #define CTRL_get_lang 0x0e00 /* return bit mask of loaded * languages */ #define CMD_test 0x2000 /* self-test request */ #define TEST_mask 0x0F00 /* isolate test field */ #define TEST_null 0x0000 /* no test requested */ #define TEST_isa_int 0x0100 /* assert isa irq */ #define TEST_echo 0x0200 /* make data in == data out */ #define TEST_seg 0x0300 /* set peek/poke segment */ #define TEST_off 0x0400 /* set peek/poke offset */ #define TEST_peek 0x0500 /* data out == *peek */ #define TEST_poke 0x0600 /* *peek == data in */ #define TEST_sub_code 0x00FF /* user defined test sub codes */ #define CMD_id 0x3000 /* return software id */ #define ID_null 0x0000 /* null id */ #define ID_kernel 0x0100 /* kernel code executing */ #define ID_boot 0x0200 /* boot code executing */ #define CMD_dma 0x4000 /* force a dma start */ #define CMD_reset 0x5000 /* reset module status */ #define CMD_sync 0x6000 /* kernel sync command */ #define CMD_char_in 0x7000 /* single character send */ #define CMD_char_out 0x8000 /* single character get */ #define CHAR_count_1 0x0100 /* one char in cmd_low */ #define CHAR_count_2 0x0200 /* the second in data_low */ #define CHAR_count_3 0x0300 /* the third in data_high */ #define CMD_spc_mode 0x9000 /* change spc mode */ #define CMD_spc_to_text 0x0100 /* set to text mode */ #define CMD_spc_to_digit 0x0200 /* set to digital mode */ #define CMD_spc_rate 0x0400 /* change spc data rate */ #define CMD_error 0xf000 /* severe error */ enum { PRIMARY_DIC = 0, USER_DIC, COMMAND_DIC, ABBREV_DIC }; #define DMA_single_in 0x01 #define DMA_single_out 0x02 #define DMA_buff_in 0x03 #define DMA_buff_out 0x04 #define DMA_control 0x05 #define DT_MEM_ALLOC 0x03 #define DT_SET_DIC 0x04 #define DT_START_TASK 0x05 #define DT_LOAD_MEM 0x06 #define DT_READ_MEM 0x07 #define DT_DIGITAL_IN 0x08 #define DMA_sync 0x06 #define DMA_sync_char 0x07 #define DRV_VERSION "2.12" #define PROCSPEECH 0x0b #define SYNTH_IO_EXTENT 8 static int synth_probe(struct spk_synth *synth); static void dtpc_release(void); static const char *synth_immediate(struct spk_synth *synth, const char *buf); static void do_catch_up(struct spk_synth *synth); static void synth_flush(struct spk_synth *synth); static int synth_portlist[] = { 0x340, 0x350, 0x240, 0x250, 0 }; static int in_escape, is_flushing; static int dt_stat, dma_state; static struct var_t vars[] = { { CAPS_START, .u.s = {"[:dv ap 200]" } }, { CAPS_STOP, .u.s = {"[:dv ap 100]" } }, { RATE, .u.n = {"[:ra %d]", 9, 0, 18, 150, 25, NULL } }, { PITCH, .u.n = {"[:dv ap %d]", 80, 0, 100, 20, 0, NULL } }, { VOL, .u.n = {"[:vo se %d]", 5, 0, 9, 5, 10, NULL } }, { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } }, { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/decpc. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_dec_pc = { .name = "decpc", .version = DRV_VERSION, .long_name = "Dectalk PC", .init = "[:pe -380]", .procspeech = PROCSPEECH, .delay = 500, .trigger = 50, .jiffies = 50, .full = 1000, .flags = SF_DEC, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = synth_probe, .release = dtpc_release, .synth_immediate = synth_immediate, .catch_up = do_catch_up, .flush = synth_flush, .is_alive = spk_synth_is_alive_nop, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = NULL, .indexing = { .command = NULL, .lowindex = 0, .highindex = 0, .currindex = 0, }, .attributes = { .attrs = synth_attrs, .name = "decpc", }, }; static int dt_getstatus(void) { dt_stat = inb_p(speakup_info.port_tts) | (inb_p(speakup_info.port_tts + 1) << 8); return dt_stat; } static void dt_sendcmd(u_int cmd) { outb_p(cmd & 0xFF, speakup_info.port_tts); outb_p((cmd >> 8) & 0xFF, speakup_info.port_tts+1); } static int dt_waitbit(int bit) { int timeout = 100; while (--timeout > 0) { if ((dt_getstatus() & bit) == bit) return 1; udelay(50); } return 0; } static int dt_wait_dma(void) { int timeout = 100, state = dma_state; if (!dt_waitbit(STAT_dma_ready)) return 0; while (--timeout > 0) { if ((dt_getstatus()&STAT_dma_state) == state) return 1; udelay(50); } dma_state = dt_getstatus() & STAT_dma_state; return 1; } static int dt_ctrl(u_int cmd) { int timeout = 10; if (!dt_waitbit(STAT_cmd_ready)) return -1; outb_p(0, speakup_info.port_tts+2); outb_p(0, speakup_info.port_tts+3); dt_getstatus(); dt_sendcmd(CMD_control|cmd); outb_p(0, speakup_info.port_tts+6); while (dt_getstatus() & STAT_cmd_ready) { udelay(20); if (--timeout == 0) break; } dt_sendcmd(CMD_null); return 0; } static void synth_flush(struct spk_synth *synth) { int timeout = 10; if (is_flushing) return; is_flushing = 4; in_escape = 0; while (dt_ctrl(CTRL_flush)) { if (--timeout == 0) break; udelay(50); } for (timeout = 0; timeout < 10; timeout++) { if (dt_waitbit(STAT_dma_ready)) break; udelay(50); } outb_p(DMA_sync, speakup_info.port_tts+4); outb_p(0, speakup_info.port_tts+4); udelay(100); for (timeout = 0; timeout < 10; timeout++) { if (!(dt_getstatus() & STAT_flushing)) break; udelay(50); } dma_state = dt_getstatus() & STAT_dma_state; dma_state ^= STAT_dma_state; is_flushing = 0; } static int dt_sendchar(char ch) { if (!dt_wait_dma()) return -1; if (!(dt_stat & STAT_rr_char)) return -2; outb_p(DMA_single_in, speakup_info.port_tts+4); outb_p(ch, speakup_info.port_tts+4); dma_state ^= STAT_dma_state; return 0; } static int testkernel(void) { int status = 0; if (dt_getstatus() == 0xffff) { status = -1; goto oops; } dt_sendcmd(CMD_sync); if (!dt_waitbit(STAT_cmd_ready)) status = -2; else if (dt_stat&0x8000) return 0; else if (dt_stat == 0x0dec) pr_warn("dec_pc at 0x%x, software not loaded\n", speakup_info.port_tts); status = -3; oops: synth_release_region(speakup_info.port_tts, SYNTH_IO_EXTENT); speakup_info.port_tts = 0; return status; } static void do_catch_up(struct spk_synth *synth) { u_char ch; static u_char last; unsigned long flags; unsigned long jiff_max; struct var_t *jiffy_delta; struct var_t *delay_time; int jiffy_delta_val; int delay_time_val; jiffy_delta = spk_get_var(JIFFY); delay_time = spk_get_var(DELAY); spin_lock_irqsave(&speakup_info.spinlock, flags); jiffy_delta_val = jiffy_delta->u.n.value; spin_unlock_irqrestore(&speakup_info.spinlock, flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { spin_lock_irqsave(&speakup_info.spinlock, flags); if (speakup_info.flushing) { speakup_info.flushing = 0; spin_unlock_irqrestore(&speakup_info.spinlock, flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spin_unlock_irqrestore(&speakup_info.spinlock, flags); break; } ch = synth_buffer_peek(); set_current_state(TASK_INTERRUPTIBLE); delay_time_val = delay_time->u.n.value; spin_unlock_irqrestore(&speakup_info.spinlock, flags); if (ch == '\n') ch = 0x0D; if (dt_sendchar(ch)) { schedule_timeout(msecs_to_jiffies(delay_time_val)); continue; } set_current_state(TASK_RUNNING); spin_lock_irqsave(&speakup_info.spinlock, flags); synth_buffer_getc(); spin_unlock_irqrestore(&speakup_info.spinlock, flags); if (ch == '[') in_escape = 1; else if (ch == ']') in_escape = 0; else if (ch <= SPACE) { if (!in_escape && strchr(",.!?;:", last)) dt_sendchar(PROCSPEECH); if (time_after_eq(jiffies, jiff_max)) { if (!in_escape) dt_sendchar(PROCSPEECH); spin_lock_irqsave(&speakup_info.spinlock, flags); jiffy_delta_val = jiffy_delta->u.n.value; delay_time_val = delay_time->u.n.value; spin_unlock_irqrestore(&speakup_info.spinlock, flags); schedule_timeout(msecs_to_jiffies (delay_time_val)); jiff_max = jiffies + jiffy_delta_val; } } last = ch; ch = 0; } if (!in_escape) dt_sendchar(PROCSPEECH); } static const char *synth_immediate(struct spk_synth *synth, const char *buf) { u_char ch; while ((ch = *buf)) { if (ch == '\n') ch = PROCSPEECH; if (dt_sendchar(ch)) return buf; buf++; } return NULL; } static int synth_probe(struct spk_synth *synth) { int i = 0, failed = 0; pr_info("Probing for %s.\n", synth->long_name); for (i = 0; synth_portlist[i]; i++) { if (synth_request_region(synth_portlist[i], SYNTH_IO_EXTENT)) { pr_warn("request_region: failed with 0x%x, %d\n", synth_portlist[i], SYNTH_IO_EXTENT); continue; } speakup_info.port_tts = synth_portlist[i]; failed = testkernel(); if (failed == 0) break; } if (failed) { pr_info("%s: not found\n", synth->long_name); return -ENODEV; } pr_info("%s: %03x-%03x, Driver Version %s,\n", synth->long_name, speakup_info.port_tts, speakup_info.port_tts + 7, synth->version); synth->alive = 1; return 0; } static void dtpc_release(void) { if (speakup_info.port_tts) synth_release_region(speakup_info.port_tts, SYNTH_IO_EXTENT); speakup_info.port_tts = 0; } module_param_named(start, synth_dec_pc.startup, short, S_IRUGO); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); module_spk_synth(synth_dec_pc); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DECtalk PC synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
agat63/AGAT_G3_kernel
drivers/s390/char/sclp_cmd.c
1938
17468
/* * Copyright IBM Corp. 2007, 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ #define KMSG_COMPONENT "sclp_cmd" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/completion.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/memory.h> #include <linux/platform_device.h> #include <asm/chpid.h> #include <asm/sclp.h> #include <asm/setup.h> #include "sclp.h" #define SCLP_CMDW_READ_SCP_INFO 0x00020001 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 struct read_info_sccb { struct sccb_header header; /* 0-7 */ u16 rnmax; /* 8-9 */ u8 rnsize; /* 10 */ u8 _reserved0[24 - 11]; /* 11-15 */ u8 loadparm[8]; /* 24-31 */ u8 _reserved1[48 - 32]; /* 32-47 */ u64 facilities; /* 48-55 */ u8 _reserved2[84 - 56]; /* 56-83 */ u8 fac84; /* 84 */ u8 _reserved3[91 - 85]; /* 85-90 */ u8 flags; /* 91 */ u8 _reserved4[100 - 92]; /* 92-99 */ u32 rnsize2; /* 100-103 */ u64 rnmax2; /* 104-111 */ u8 _reserved5[4096 - 112]; /* 112-4095 */ } __attribute__((packed, aligned(PAGE_SIZE))); static struct read_info_sccb __initdata early_read_info_sccb; static int __initdata early_read_info_sccb_valid; u64 sclp_facilities; static u8 sclp_fac84; static unsigned long long rzm; static unsigned long long rnmax; static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) { int rc; __ctl_set_bit(0, 9); rc = sclp_service_call(cmd, sccb); if (rc) goto out; __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | PSW_MASK_WAIT | PSW_DEFAULT_KEY); local_irq_disable(); out: /* Contents of the sccb might have changed. */ barrier(); __ctl_clear_bit(0, 9); return rc; } static void __init sclp_read_info_early(void) { int rc; int i; struct read_info_sccb *sccb; sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, SCLP_CMDW_READ_SCP_INFO}; sccb = &early_read_info_sccb; for (i = 0; i < ARRAY_SIZE(commands); i++) { do { memset(sccb, 0, sizeof(*sccb)); sccb->header.length = sizeof(*sccb); sccb->header.function_code = 0x80; sccb->header.control_mask[2] = 0x80; rc = sclp_cmd_sync_early(commands[i], sccb); } while (rc == -EBUSY); if (rc) break; if (sccb->header.response_code == 0x10) { early_read_info_sccb_valid = 1; break; } if (sccb->header.response_code != 0x1f0) break; } } void __init sclp_facilities_detect(void) { struct read_info_sccb *sccb; sclp_read_info_early(); if (!early_read_info_sccb_valid) return; sccb = &early_read_info_sccb; sclp_facilities = sccb->facilities; sclp_fac84 = sccb->fac84; rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; rzm <<= 20; } unsigned long long sclp_get_rnmax(void) { return rnmax; } unsigned long long sclp_get_rzm(void) { return rzm; } /* * This function will be called after sclp_facilities_detect(), which gets * called from early.c code. Therefore the sccb should have valid contents. */ void __init sclp_get_ipl_info(struct sclp_ipl_info *info) { struct read_info_sccb *sccb; if (!early_read_info_sccb_valid) return; sccb = &early_read_info_sccb; info->is_valid = 1; if (sccb->flags & 0x2) info->has_dump = 1; memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); } static void sclp_sync_callback(struct sclp_req *req, void *data) { struct completion *completion = data; complete(completion); } static int do_sync_request(sclp_cmdw_t cmd, void *sccb) { struct completion completion; struct sclp_req *request; int rc; request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) return -ENOMEM; request->command = cmd; request->sccb = sccb; request->status = SCLP_REQ_FILLED; request->callback = sclp_sync_callback; request->callback_data = &completion; init_completion(&completion); /* Perform sclp request. */ rc = sclp_add_request(request); if (rc) goto out; wait_for_completion(&completion); /* Check response. */ if (request->status != SCLP_REQ_DONE) { pr_warning("sync request failed (cmd=0x%08x, " "status=0x%02x)\n", cmd, request->status); rc = -EIO; } out: kfree(request); return rc; } /* * CPU configuration related functions. */ #define SCLP_CMDW_READ_CPU_INFO 0x00010001 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 struct read_cpu_info_sccb { struct sccb_header header; u16 nr_configured; u16 offset_configured; u16 nr_standby; u16 offset_standby; u8 reserved[4096 - 16]; } __attribute__((packed, aligned(PAGE_SIZE))); static void sclp_fill_cpu_info(struct sclp_cpu_info *info, struct read_cpu_info_sccb *sccb) { char *page = (char *) sccb; memset(info, 0, sizeof(*info)); info->configured = sccb->nr_configured; info->standby = sccb->nr_standby; info->combined = sccb->nr_configured + sccb->nr_standby; info->has_cpu_type = sclp_fac84 & 0x1; memcpy(&info->cpu, page + sccb->offset_configured, info->combined * sizeof(struct sclp_cpu_entry)); } int sclp_get_cpu_info(struct sclp_cpu_info *info) { int rc; struct read_cpu_info_sccb *sccb; if (!SCLP_HAS_CPU_INFO) return -EOPNOTSUPP; sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb); if (rc) goto out; if (sccb->header.response_code != 0x0010) { pr_warning("readcpuinfo failed (response=0x%04x)\n", sccb->header.response_code); rc = -EIO; goto out; } sclp_fill_cpu_info(info, sccb); out: free_page((unsigned long) sccb); return rc; } struct cpu_configure_sccb { struct sccb_header header; } __attribute__((packed, aligned(8))); static int do_cpu_configure(sclp_cmdw_t cmd) { struct cpu_configure_sccb *sccb; int rc; if (!SCLP_HAS_CPU_RECONFIG) return -EOPNOTSUPP; /* * This is not going to cross a page boundary since we force * kmalloc to have a minimum alignment of 8 bytes on s390. */ sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); rc = do_sync_request(cmd, sccb); if (rc) goto out; switch (sccb->header.response_code) { case 0x0020: case 0x0120: break; default: pr_warning("configure cpu failed (cmd=0x%08x, " "response=0x%04x)\n", cmd, sccb->header.response_code); rc = -EIO; break; } out: kfree(sccb); return rc; } int sclp_cpu_configure(u8 cpu) { return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8); } int sclp_cpu_deconfigure(u8 cpu) { return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); } #ifdef CONFIG_MEMORY_HOTPLUG static DEFINE_MUTEX(sclp_mem_mutex); static LIST_HEAD(sclp_mem_list); static u8 sclp_max_storage_id; static unsigned long sclp_storage_ids[256 / BITS_PER_LONG]; static int sclp_mem_state_changed; struct memory_increment { struct list_head list; u16 rn; int standby; int usecount; }; struct assign_storage_sccb { struct sccb_header header; u16 rn; } __packed; int arch_get_memory_phys_device(unsigned long start_pfn) { if (!rzm) return 0; return PFN_PHYS(start_pfn) >> ilog2(rzm); } static unsigned long long rn2addr(u16 rn) { return (unsigned long long) (rn - 1) * rzm; } static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) { struct assign_storage_sccb *sccb; int rc; sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = PAGE_SIZE; sccb->rn = rn; rc = do_sync_request(cmd, sccb); if (rc) goto out; switch (sccb->header.response_code) { case 0x0020: case 0x0120: break; default: pr_warning("assign storage failed (cmd=0x%08x, " "response=0x%04x, rn=0x%04x)\n", cmd, sccb->header.response_code, rn); rc = -EIO; break; } out: free_page((unsigned long) sccb); return rc; } static int sclp_assign_storage(u16 rn) { return do_assign_storage(0x000d0001, rn); } static int sclp_unassign_storage(u16 rn) { return do_assign_storage(0x000c0001, rn); } struct attach_storage_sccb { struct sccb_header header; u16 :16; u16 assigned; u32 :32; u32 entries[0]; } __packed; static int sclp_attach_storage(u8 id) { struct attach_storage_sccb *sccb; int rc; int i; sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = PAGE_SIZE; rc = do_sync_request(0x00080001 | id << 8, sccb); if (rc) goto out; switch (sccb->header.response_code) { case 0x0020: set_bit(id, sclp_storage_ids); for (i = 0; i < sccb->assigned; i++) sclp_unassign_storage(sccb->entries[i] >> 16); break; default: rc = -EIO; break; } out: free_page((unsigned long) sccb); return rc; } static int sclp_mem_change_state(unsigned long start, unsigned long size, int online) { struct memory_increment *incr; unsigned long long istart; int rc = 0; list_for_each_entry(incr, &sclp_mem_list, list) { istart = rn2addr(incr->rn); if (start + size - 1 < istart) break; if (start > istart + rzm - 1) continue; if (online) { if (incr->usecount++) continue; /* * Don't break the loop if one assign fails. Loop may * be walked again on CANCEL and we can't save * information if state changed before or not. * So continue and increase usecount for all increments. */ rc |= sclp_assign_storage(incr->rn); } else { if (--incr->usecount) continue; sclp_unassign_storage(incr->rn); } } return rc ? -EIO : 0; } static int sclp_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { unsigned long start, size; struct memory_notify *arg; unsigned char id; int rc = 0; arg = data; start = arg->start_pfn << PAGE_SHIFT; size = arg->nr_pages << PAGE_SHIFT; mutex_lock(&sclp_mem_mutex); for (id = 0; id <= sclp_max_storage_id; id++) if (!test_bit(id, sclp_storage_ids)) sclp_attach_storage(id); switch (action) { case MEM_ONLINE: case MEM_GOING_OFFLINE: case MEM_CANCEL_OFFLINE: break; case MEM_GOING_ONLINE: rc = sclp_mem_change_state(start, size, 1); break; case MEM_CANCEL_ONLINE: sclp_mem_change_state(start, size, 0); break; case MEM_OFFLINE: sclp_mem_change_state(start, size, 0); break; default: rc = -EINVAL; break; } if (!rc) sclp_mem_state_changed = 1; mutex_unlock(&sclp_mem_mutex); return rc ? NOTIFY_BAD : NOTIFY_OK; } static struct notifier_block sclp_mem_nb = { .notifier_call = sclp_mem_notifier, }; static void __init add_memory_merged(u16 rn) { static u16 first_rn, num; unsigned long long start, size; if (rn && first_rn && (first_rn + num == rn)) { num++; return; } if (!first_rn) goto skip_add; start = rn2addr(first_rn); size = (unsigned long long ) num * rzm; if (start >= VMEM_MAX_PHYS) goto skip_add; if (start + size > VMEM_MAX_PHYS) size = VMEM_MAX_PHYS - start; if (memory_end_set && (start >= memory_end)) goto skip_add; if (memory_end_set && (start + size > memory_end)) size = memory_end - start; add_memory(0, start, size); skip_add: first_rn = rn; num = 1; } static void __init sclp_add_standby_memory(void) { struct memory_increment *incr; list_for_each_entry(incr, &sclp_mem_list, list) if (incr->standby) add_memory_merged(incr->rn); add_memory_merged(0); } static void __init insert_increment(u16 rn, int standby, int assigned) { struct memory_increment *incr, *new_incr; struct list_head *prev; u16 last_rn; new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); if (!new_incr) return; new_incr->rn = rn; new_incr->standby = standby; if (!standby) new_incr->usecount = 1; last_rn = 0; prev = &sclp_mem_list; list_for_each_entry(incr, &sclp_mem_list, list) { if (assigned && incr->rn > rn) break; if (!assigned && incr->rn - last_rn > 1) break; last_rn = incr->rn; prev = &incr->list; } if (!assigned) new_incr->rn = last_rn + 1; if (new_incr->rn > rnmax) { kfree(new_incr); return; } list_add(&new_incr->list, prev); } static int sclp_mem_freeze(struct device *dev) { if (!sclp_mem_state_changed) return 0; pr_err("Memory hotplug state changed, suspend refused.\n"); return -EPERM; } struct read_storage_sccb { struct sccb_header header; u16 max_id; u16 assigned; u16 standby; u16 :16; u32 entries[0]; } __packed; static const struct dev_pm_ops sclp_mem_pm_ops = { .freeze = sclp_mem_freeze, }; static struct platform_driver sclp_mem_pdrv = { .driver = { .name = "sclp_mem", .pm = &sclp_mem_pm_ops, }, }; static int __init sclp_detect_standby_memory(void) { struct platform_device *sclp_pdev; struct read_storage_sccb *sccb; int i, id, assigned, rc; if (!early_read_info_sccb_valid) return 0; if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) return 0; rc = -ENOMEM; sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); if (!sccb) goto out; assigned = 0; for (id = 0; id <= sclp_max_storage_id; id++) { memset(sccb, 0, PAGE_SIZE); sccb->header.length = PAGE_SIZE; rc = do_sync_request(0x00040001 | id << 8, sccb); if (rc) goto out; switch (sccb->header.response_code) { case 0x0010: set_bit(id, sclp_storage_ids); for (i = 0; i < sccb->assigned; i++) { if (!sccb->entries[i]) continue; assigned++; insert_increment(sccb->entries[i] >> 16, 0, 1); } break; case 0x0310: break; case 0x0410: for (i = 0; i < sccb->assigned; i++) { if (!sccb->entries[i]) continue; assigned++; insert_increment(sccb->entries[i] >> 16, 1, 1); } break; default: rc = -EIO; break; } if (!rc) sclp_max_storage_id = sccb->max_id; } if (rc || list_empty(&sclp_mem_list)) goto out; for (i = 1; i <= rnmax - assigned; i++) insert_increment(0, 1, 0); rc = register_memory_notifier(&sclp_mem_nb); if (rc) goto out; rc = platform_driver_register(&sclp_mem_pdrv); if (rc) goto out; sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0); rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; if (rc) goto out_driver; sclp_add_standby_memory(); goto out; out_driver: platform_driver_unregister(&sclp_mem_pdrv); out: free_page((unsigned long) sccb); return rc; } __initcall(sclp_detect_standby_memory); #endif /* CONFIG_MEMORY_HOTPLUG */ /* * Channel path configuration related functions. */ #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001 struct chp_cfg_sccb { struct sccb_header header; u8 ccm; u8 reserved[6]; u8 cssid; } __attribute__((packed)); static int do_chp_configure(sclp_cmdw_t cmd) { struct chp_cfg_sccb *sccb; int rc; if (!SCLP_HAS_CHP_RECONFIG) return -EOPNOTSUPP; /* Prepare sccb. */ sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); rc = do_sync_request(cmd, sccb); if (rc) goto out; switch (sccb->header.response_code) { case 0x0020: case 0x0120: case 0x0440: case 0x0450: break; default: pr_warning("configure channel-path failed " "(cmd=0x%08x, response=0x%04x)\n", cmd, sccb->header.response_code); rc = -EIO; break; } out: free_page((unsigned long) sccb); return rc; } /** * sclp_chp_configure - perform configure channel-path sclp command * @chpid: channel-path ID * * Perform configure channel-path command sclp command for specified chpid. * Return 0 after command successfully finished, non-zero otherwise. */ int sclp_chp_configure(struct chp_id chpid) { return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8); } /** * sclp_chp_deconfigure - perform deconfigure channel-path sclp command * @chpid: channel-path ID * * Perform deconfigure channel-path command sclp command for specified chpid * and wait for completion. On success return 0. Return non-zero otherwise. */ int sclp_chp_deconfigure(struct chp_id chpid) { return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); } struct chp_info_sccb { struct sccb_header header; u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; u8 standby[SCLP_CHP_INFO_MASK_SIZE]; u8 configured[SCLP_CHP_INFO_MASK_SIZE]; u8 ccm; u8 reserved[6]; u8 cssid; } __attribute__((packed)); /** * sclp_chp_read_info - perform read channel-path information sclp command * @info: resulting channel-path information data * * Perform read channel-path information sclp command and wait for completion. * On success, store channel-path information in @info and return 0. Return * non-zero otherwise. */ int sclp_chp_read_info(struct sclp_chp_info *info) { struct chp_info_sccb *sccb; int rc; if (!SCLP_HAS_CHP_INFO) return -EOPNOTSUPP; /* Prepare sccb. */ sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); if (rc) goto out; if (sccb->header.response_code != 0x0010) { pr_warning("read channel-path info failed " "(response=0x%04x)\n", sccb->header.response_code); rc = -EIO; goto out; } memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE); memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE); memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE); out: free_page((unsigned long) sccb); return rc; }
gpl-2.0
novic/AniDroid-Kernel-N7000
drivers/s390/cio/ccwgroup.c
2194
16997
/* * bus driver for ccwgroup * * Copyright IBM Corp. 2002, 2009 * * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) */ #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/device.h> #include <linux/init.h> #include <linux/ctype.h> #include <linux/dcache.h> #include <asm/ccwdev.h> #include <asm/ccwgroup.h> #define CCW_BUS_ID_SIZE 20 /* In Linux 2.4, we had a channel device layer called "chandev" * that did all sorts of obscure stuff for networking devices. * This is another driver that serves as a replacement for just * one of its functions, namely the translation of single subchannels * to devices that use multiple subchannels. */ /* a device matches a driver if all its slave devices match the same * entry of the driver */ static int ccwgroup_bus_match (struct device * dev, struct device_driver * drv) { struct ccwgroup_device *gdev; struct ccwgroup_driver *gdrv; gdev = to_ccwgroupdev(dev); gdrv = to_ccwgroupdrv(drv); if (gdev->creator_id == gdrv->driver_id) return 1; return 0; } static int ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env) { /* TODO */ return 0; } static struct bus_type ccwgroup_bus_type; static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) { int i; char str[8]; for (i = 0; i < gdev->count; i++) { sprintf(str, "cdev%d", i); sysfs_remove_link(&gdev->dev.kobj, str); sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); } } /* * Remove references from ccw devices to ccw group device and from * ccw group device to ccw devices. */ static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev) { struct ccw_device *cdev; int i; for (i = 0; i < gdev->count; i++) { cdev = gdev->cdev[i]; if (!cdev) continue; spin_lock_irq(cdev->ccwlock); dev_set_drvdata(&cdev->dev, NULL); spin_unlock_irq(cdev->ccwlock); gdev->cdev[i] = NULL; put_device(&cdev->dev); } } static ssize_t ccwgroup_online_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static ssize_t ccwgroup_online_show(struct device *dev, struct device_attribute *attr, char *buf); /* * Provide an 'ungroup' attribute so the user can remove group devices no * longer needed or accidentially created. Saves memory :) */ static void ccwgroup_ungroup_callback(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); mutex_lock(&gdev->reg_mutex); if (device_is_registered(&gdev->dev)) { __ccwgroup_remove_symlinks(gdev); device_unregister(dev); __ccwgroup_remove_cdev_refs(gdev); } mutex_unlock(&gdev->reg_mutex); } static ssize_t ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ccwgroup_device *gdev; int rc; gdev = to_ccwgroupdev(dev); /* Prevent concurrent online/offline processing and ungrouping. */ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) return -EAGAIN; if (gdev->state != CCWGROUP_OFFLINE) { rc = -EINVAL; goto out; } /* Note that we cannot unregister the device from one of its * attribute methods, so we have to use this roundabout approach. */ rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); out: if (rc) { if (rc != -EAGAIN) /* Release onoff "lock" when ungrouping failed. */ atomic_set(&gdev->onoff, 0); return rc; } return count; } static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store); static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store); static struct attribute *ccwgroup_attrs[] = { &dev_attr_online.attr, &dev_attr_ungroup.attr, NULL, }; static struct attribute_group ccwgroup_attr_group = { .attrs = ccwgroup_attrs, }; static const struct attribute_group *ccwgroup_attr_groups[] = { &ccwgroup_attr_group, NULL, }; static void ccwgroup_release (struct device *dev) { kfree(to_ccwgroupdev(dev)); } static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) { char str[8]; int i, rc; for (i = 0; i < gdev->count; i++) { rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj, "group_device"); if (rc) { for (--i; i >= 0; i--) sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); return rc; } } for (i = 0; i < gdev->count; i++) { sprintf(str, "cdev%d", i); rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj, str); if (rc) { for (--i; i >= 0; i--) { sprintf(str, "cdev%d", i); sysfs_remove_link(&gdev->dev.kobj, str); } for (i = 0; i < gdev->count; i++) sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); return rc; } } return 0; } static int __get_next_bus_id(const char **buf, char *bus_id) { int rc, len; char *start, *end; start = (char *)*buf; end = strchr(start, ','); if (!end) { /* Last entry. Strip trailing newline, if applicable. */ end = strchr(start, '\n'); if (end) *end = '\0'; len = strlen(start) + 1; } else { len = end - start + 1; end++; } if (len < CCW_BUS_ID_SIZE) { strlcpy(bus_id, start, len); rc = 0; } else rc = -EINVAL; *buf = end; return rc; } static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE]) { int cssid, ssid, devno; /* Must be of form %x.%x.%04x */ if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3) return 0; return 1; } /** * ccwgroup_create_from_string() - create and register a ccw group device * @root: parent device for the new device * @creator_id: identifier of creating driver * @cdrv: ccw driver of slave devices * @num_devices: number of slave devices * @buf: buffer containing comma separated bus ids of slave devices * * Create and register a new ccw group device as a child of @root. Slave * devices are obtained from the list of bus ids given in @buf and must all * belong to @cdrv. * Returns: * %0 on success and an error code on failure. * Context: * non-atomic */ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, struct ccw_driver *cdrv, int num_devices, const char *buf) { struct ccwgroup_device *gdev; int rc, i; char tmp_bus_id[CCW_BUS_ID_SIZE]; const char *curr_buf; gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), GFP_KERNEL); if (!gdev) return -ENOMEM; atomic_set(&gdev->onoff, 0); mutex_init(&gdev->reg_mutex); mutex_lock(&gdev->reg_mutex); gdev->creator_id = creator_id; gdev->count = num_devices; gdev->dev.bus = &ccwgroup_bus_type; gdev->dev.parent = root; gdev->dev.release = ccwgroup_release; device_initialize(&gdev->dev); curr_buf = buf; for (i = 0; i < num_devices && curr_buf; i++) { rc = __get_next_bus_id(&curr_buf, tmp_bus_id); if (rc != 0) goto error; if (!__is_valid_bus_id(tmp_bus_id)) { rc = -EINVAL; goto error; } gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id); /* * All devices have to be of the same type in * order to be grouped. */ if (!gdev->cdev[i] || gdev->cdev[i]->id.driver_info != gdev->cdev[0]->id.driver_info) { rc = -EINVAL; goto error; } /* Don't allow a device to belong to more than one group. */ spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev)) { spin_unlock_irq(gdev->cdev[i]->ccwlock); rc = -EINVAL; goto error; } dev_set_drvdata(&gdev->cdev[i]->dev, gdev); spin_unlock_irq(gdev->cdev[i]->ccwlock); } /* Check for sufficient number of bus ids. */ if (i < num_devices && !curr_buf) { rc = -EINVAL; goto error; } /* Check for trailing stuff. */ if (i == num_devices && strlen(curr_buf) > 0) { rc = -EINVAL; goto error; } dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev)); gdev->dev.groups = ccwgroup_attr_groups; rc = device_add(&gdev->dev); if (rc) goto error; get_device(&gdev->dev); rc = __ccwgroup_create_symlinks(gdev); if (!rc) { mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); return 0; } device_unregister(&gdev->dev); error: for (i = 0; i < num_devices; i++) if (gdev->cdev[i]) { spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) dev_set_drvdata(&gdev->cdev[i]->dev, NULL); spin_unlock_irq(gdev->cdev[i]->ccwlock); put_device(&gdev->cdev[i]->dev); gdev->cdev[i] = NULL; } mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); return rc; } EXPORT_SYMBOL(ccwgroup_create_from_string); static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, void *data); static struct notifier_block ccwgroup_nb = { .notifier_call = ccwgroup_notifier }; static int __init init_ccwgroup(void) { int ret; ret = bus_register(&ccwgroup_bus_type); if (ret) return ret; ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb); if (ret) bus_unregister(&ccwgroup_bus_type); return ret; } static void __exit cleanup_ccwgroup(void) { bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb); bus_unregister(&ccwgroup_bus_type); } module_init(init_ccwgroup); module_exit(cleanup_ccwgroup); /************************** driver stuff ******************************/ static int ccwgroup_set_online(struct ccwgroup_device *gdev) { struct ccwgroup_driver *gdrv; int ret; if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) return -EAGAIN; if (gdev->state == CCWGROUP_ONLINE) { ret = 0; goto out; } if (!gdev->dev.driver) { ret = -EINVAL; goto out; } gdrv = to_ccwgroupdrv (gdev->dev.driver); if ((ret = gdrv->set_online ? gdrv->set_online(gdev) : 0)) goto out; gdev->state = CCWGROUP_ONLINE; out: atomic_set(&gdev->onoff, 0); return ret; } static int ccwgroup_set_offline(struct ccwgroup_device *gdev) { struct ccwgroup_driver *gdrv; int ret; if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) return -EAGAIN; if (gdev->state == CCWGROUP_OFFLINE) { ret = 0; goto out; } if (!gdev->dev.driver) { ret = -EINVAL; goto out; } gdrv = to_ccwgroupdrv (gdev->dev.driver); if ((ret = gdrv->set_offline ? gdrv->set_offline(gdev) : 0)) goto out; gdev->state = CCWGROUP_OFFLINE; out: atomic_set(&gdev->onoff, 0); return ret; } static ssize_t ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ccwgroup_device *gdev; struct ccwgroup_driver *gdrv; unsigned long value; int ret; if (!dev->driver) return -EINVAL; gdev = to_ccwgroupdev(dev); gdrv = to_ccwgroupdrv(dev->driver); if (!try_module_get(gdrv->driver.owner)) return -EINVAL; ret = strict_strtoul(buf, 0, &value); if (ret) goto out; if (value == 1) ret = ccwgroup_set_online(gdev); else if (value == 0) ret = ccwgroup_set_offline(gdev); else ret = -EINVAL; out: module_put(gdrv->driver.owner); return (ret == 0) ? count : ret; } static ssize_t ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *buf) { int online; online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE); return sprintf(buf, online ? "1\n" : "0\n"); } static int ccwgroup_probe (struct device *dev) { struct ccwgroup_device *gdev; struct ccwgroup_driver *gdrv; int ret; gdev = to_ccwgroupdev(dev); gdrv = to_ccwgroupdrv(dev->driver); ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV; return ret; } static int ccwgroup_remove (struct device *dev) { struct ccwgroup_device *gdev; struct ccwgroup_driver *gdrv; if (!dev->driver) return 0; gdev = to_ccwgroupdev(dev); gdrv = to_ccwgroupdrv(dev->driver); if (gdrv->remove) gdrv->remove(gdev); return 0; } static void ccwgroup_shutdown(struct device *dev) { struct ccwgroup_device *gdev; struct ccwgroup_driver *gdrv; if (!dev->driver) return; gdev = to_ccwgroupdev(dev); gdrv = to_ccwgroupdrv(dev->driver); if (gdrv->shutdown) gdrv->shutdown(gdev); } static int ccwgroup_pm_prepare(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); /* Fail while device is being set online/offline. */ if (atomic_read(&gdev->onoff)) return -EAGAIN; if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) return 0; return gdrv->prepare ? gdrv->prepare(gdev) : 0; } static void ccwgroup_pm_complete(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) return; if (gdrv->complete) gdrv->complete(gdev); } static int ccwgroup_pm_freeze(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) return 0; return gdrv->freeze ? gdrv->freeze(gdev) : 0; } static int ccwgroup_pm_thaw(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) return 0; return gdrv->thaw ? gdrv->thaw(gdev) : 0; } static int ccwgroup_pm_restore(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) return 0; return gdrv->restore ? gdrv->restore(gdev) : 0; } static const struct dev_pm_ops ccwgroup_pm_ops = { .prepare = ccwgroup_pm_prepare, .complete = ccwgroup_pm_complete, .freeze = ccwgroup_pm_freeze, .thaw = ccwgroup_pm_thaw, .restore = ccwgroup_pm_restore, }; static struct bus_type ccwgroup_bus_type = { .name = "ccwgroup", .match = ccwgroup_bus_match, .uevent = ccwgroup_uevent, .probe = ccwgroup_probe, .remove = ccwgroup_remove, .shutdown = ccwgroup_shutdown, .pm = &ccwgroup_pm_ops, }; static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; if (action == BUS_NOTIFY_UNBIND_DRIVER) device_schedule_callback(dev, ccwgroup_ungroup_callback); return NOTIFY_OK; } /** * ccwgroup_driver_register() - register a ccw group driver * @cdriver: driver to be registered * * This function is mainly a wrapper around driver_register(). */ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver) { /* register our new driver with the core */ cdriver->driver.bus = &ccwgroup_bus_type; return driver_register(&cdriver->driver); } static int __ccwgroup_match_all(struct device *dev, void *data) { return 1; } /** * ccwgroup_driver_unregister() - deregister a ccw group driver * @cdriver: driver to be deregistered * * This function is mainly a wrapper around driver_unregister(). */ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) { struct device *dev; /* We don't want ccwgroup devices to live longer than their driver. */ get_driver(&cdriver->driver); while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, __ccwgroup_match_all))) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); mutex_lock(&gdev->reg_mutex); __ccwgroup_remove_symlinks(gdev); device_unregister(dev); __ccwgroup_remove_cdev_refs(gdev); mutex_unlock(&gdev->reg_mutex); put_device(dev); } put_driver(&cdriver->driver); driver_unregister(&cdriver->driver); } /** * ccwgroup_probe_ccwdev() - probe function for slave devices * @cdev: ccw device to be probed * * This is a dummy probe function for ccw devices that are slave devices in * a ccw group device. * Returns: * always %0 */ int ccwgroup_probe_ccwdev(struct ccw_device *cdev) { return 0; } /** * ccwgroup_remove_ccwdev() - remove function for slave devices * @cdev: ccw device to be removed * * This is a remove function for ccw devices that are slave devices in a ccw * group device. It sets the ccw device offline and also deregisters the * embedding ccw group device. */ void ccwgroup_remove_ccwdev(struct ccw_device *cdev) { struct ccwgroup_device *gdev; /* Ignore offlining errors, device is gone anyway. */ ccw_device_set_offline(cdev); /* If one of its devices is gone, the whole group is done for. */ spin_lock_irq(cdev->ccwlock); gdev = dev_get_drvdata(&cdev->dev); if (!gdev) { spin_unlock_irq(cdev->ccwlock); return; } /* Get ccwgroup device reference for local processing. */ get_device(&gdev->dev); spin_unlock_irq(cdev->ccwlock); /* Unregister group device. */ mutex_lock(&gdev->reg_mutex); if (device_is_registered(&gdev->dev)) { __ccwgroup_remove_symlinks(gdev); device_unregister(&gdev->dev); __ccwgroup_remove_cdev_refs(gdev); } mutex_unlock(&gdev->reg_mutex); /* Release ccwgroup device reference for local processing. */ put_device(&gdev->dev); } MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccwgroup_driver_register); EXPORT_SYMBOL(ccwgroup_driver_unregister); EXPORT_SYMBOL(ccwgroup_probe_ccwdev); EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
gpl-2.0
tupurcoder/linux
fs/jffs2/compr_zlib.c
2706
5633
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * */ #if !defined(__KERNEL__) && !defined(__ECOS) #error "The userspace support got too messy and was removed. Update your mkfs.jffs2" #endif #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/zlib.h> #include <linux/zutil.h> #include "nodelist.h" #include "compr.h" /* Plan: call deflate() with avail_in == *sourcelen, avail_out = *dstlen - 12 and flush == Z_FINISH. If it doesn't manage to finish, call it again with avail_in == 0 and avail_out set to the remaining 12 bytes for it to clean up. Q: Is 12 bytes sufficient? */ #define STREAM_END_SPACE 12 static DEFINE_MUTEX(deflate_mutex); static DEFINE_MUTEX(inflate_mutex); static z_stream inf_strm, def_strm; #ifdef __KERNEL__ /* Linux-only */ #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/mutex.h> static int __init alloc_workspaces(void) { def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)); if (!def_strm.workspace) return -ENOMEM; jffs2_dbg(1, "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)); inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); if (!inf_strm.workspace) { vfree(def_strm.workspace); return -ENOMEM; } jffs2_dbg(1, "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize()); return 0; } static void free_workspaces(void) { vfree(def_strm.workspace); vfree(inf_strm.workspace); } #else #define alloc_workspaces() (0) #define free_workspaces() do { } while(0) #endif /* __KERNEL__ */ static int jffs2_zlib_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) { int ret; if (*dstlen <= STREAM_END_SPACE) return -1; mutex_lock(&deflate_mutex); if (Z_OK != zlib_deflateInit(&def_strm, 3)) { pr_warn("deflateInit failed\n"); mutex_unlock(&deflate_mutex); return -1; } def_strm.next_in = data_in; def_strm.total_in = 0; def_strm.next_out = cpage_out; def_strm.total_out = 0; while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) { def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE); def_strm.avail_in = min_t(unsigned long, (*sourcelen-def_strm.total_in), def_strm.avail_out); jffs2_dbg(1, "calling deflate with avail_in %ld, avail_out %ld\n", def_strm.avail_in, def_strm.avail_out); ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); jffs2_dbg(1, "deflate returned with avail_in %ld, avail_out %ld, total_in %ld, total_out %ld\n", def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out); if (ret != Z_OK) { jffs2_dbg(1, "deflate in loop returned %d\n", ret); zlib_deflateEnd(&def_strm); mutex_unlock(&deflate_mutex); return -1; } } def_strm.avail_out += STREAM_END_SPACE; def_strm.avail_in = 0; ret = zlib_deflate(&def_strm, Z_FINISH); zlib_deflateEnd(&def_strm); if (ret != Z_STREAM_END) { jffs2_dbg(1, "final deflate returned %d\n", ret); ret = -1; goto out; } if (def_strm.total_out >= def_strm.total_in) { jffs2_dbg(1, "zlib compressed %ld bytes into %ld; failing\n", def_strm.total_in, def_strm.total_out); ret = -1; goto out; } jffs2_dbg(1, "zlib compressed %ld bytes into %ld\n", def_strm.total_in, def_strm.total_out); *dstlen = def_strm.total_out; *sourcelen = def_strm.total_in; ret = 0; out: mutex_unlock(&deflate_mutex); return ret; } static int jffs2_zlib_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t srclen, uint32_t destlen) { int ret; int wbits = MAX_WBITS; mutex_lock(&inflate_mutex); inf_strm.next_in = data_in; inf_strm.avail_in = srclen; inf_strm.total_in = 0; inf_strm.next_out = cpage_out; inf_strm.avail_out = destlen; inf_strm.total_out = 0; /* If it's deflate, and it's got no preset dictionary, then we can tell zlib to skip the adler32 check. */ if (srclen > 2 && !(data_in[1] & PRESET_DICT) && ((data_in[0] & 0x0f) == Z_DEFLATED) && !(((data_in[0]<<8) + data_in[1]) % 31)) { jffs2_dbg(2, "inflate skipping adler32\n"); wbits = -((data_in[0] >> 4) + 8); inf_strm.next_in += 2; inf_strm.avail_in -= 2; } else { /* Let this remain D1 for now -- it should never happen */ jffs2_dbg(1, "inflate not skipping adler32\n"); } if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { pr_warn("inflateInit failed\n"); mutex_unlock(&inflate_mutex); return 1; } while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK) ; if (ret != Z_STREAM_END) { pr_notice("inflate returned %d\n", ret); } zlib_inflateEnd(&inf_strm); mutex_unlock(&inflate_mutex); return 0; } static struct jffs2_compressor jffs2_zlib_comp = { .priority = JFFS2_ZLIB_PRIORITY, .name = "zlib", .compr = JFFS2_COMPR_ZLIB, .compress = &jffs2_zlib_compress, .decompress = &jffs2_zlib_decompress, #ifdef JFFS2_ZLIB_DISABLED .disabled = 1, #else .disabled = 0, #endif }; int __init jffs2_zlib_init(void) { int ret; ret = alloc_workspaces(); if (ret) return ret; ret = jffs2_register_compressor(&jffs2_zlib_comp); if (ret) free_workspaces(); return ret; } void jffs2_zlib_exit(void) { jffs2_unregister_compressor(&jffs2_zlib_comp); free_workspaces(); }
gpl-2.0
crdroid-devices/android_kernel_htc_msm8974
drivers/scsi/isci/init.c
2706
16650
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/efi.h> #include <asm/string.h> #include <scsi/scsi_host.h> #include "host.h" #include "isci.h" #include "task.h" #include "probe_roms.h" #define MAJ 1 #define MIN 1 #define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) MODULE_VERSION(DRV_VERSION); static struct scsi_transport_template *isci_transport_template; static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { { PCI_VDEVICE(INTEL, 0x1D61),}, { PCI_VDEVICE(INTEL, 0x1D63),}, { PCI_VDEVICE(INTEL, 0x1D65),}, { PCI_VDEVICE(INTEL, 0x1D67),}, { PCI_VDEVICE(INTEL, 0x1D69),}, { PCI_VDEVICE(INTEL, 0x1D6B),}, { PCI_VDEVICE(INTEL, 0x1D60),}, { PCI_VDEVICE(INTEL, 0x1D62),}, { PCI_VDEVICE(INTEL, 0x1D64),}, { PCI_VDEVICE(INTEL, 0x1D66),}, { PCI_VDEVICE(INTEL, 0x1D68),}, { PCI_VDEVICE(INTEL, 0x1D6A),}, {} }; MODULE_DEVICE_TABLE(pci, isci_id_table); /* linux isci specific settings */ unsigned char no_outbound_task_to = 2; module_param(no_outbound_task_to, byte, 0); MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)"); u16 ssp_max_occ_to = 20; module_param(ssp_max_occ_to, ushort, 0); MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)"); u16 stp_max_occ_to = 5; module_param(stp_max_occ_to, ushort, 0); MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)"); u16 ssp_inactive_to = 5; module_param(ssp_inactive_to, ushort, 0); MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)"); u16 stp_inactive_to = 5; module_param(stp_inactive_to, ushort, 0); MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)"); unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED; module_param(phy_gen, byte, 0); MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); unsigned char max_concurr_spinup; module_param(max_concurr_spinup, byte, 0); MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); uint cable_selection_override = CABLE_OVERRIDE_DISABLED; module_param(cable_selection_override, uint, 0); MODULE_PARM_DESC(cable_selection_override, "This field indicates length of the SAS/SATA cable between " "host and device. If any bits > 15 are set (default) " "indicates \"use platform defaults\""); static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); } static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); struct device_attribute *isci_host_attrs[] = { &dev_attr_isci_id, NULL }; static struct scsi_host_template isci_sht = { .module = THIS_MODULE, .name = DRV_NAME, .proc_name = DRV_NAME, .queuecommand = sas_queuecommand, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = isci_host_scan_finished, .scan_start = isci_host_scan_start, .change_queue_depth = sas_change_queue_depth, .change_queue_type = sas_change_queue_type, .bios_param = sas_bios_param, .can_queue = ISCI_CAN_QUEUE_VAL, .cmd_per_lun = 1, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = isci_host_attrs, }; static struct sas_domain_function_template isci_transport_ops = { /* The class calls these to notify the LLDD of an event. */ .lldd_port_formed = isci_port_formed, .lldd_port_deformed = isci_port_deformed, /* The class calls these when a device is found or gone. */ .lldd_dev_found = isci_remote_device_found, .lldd_dev_gone = isci_remote_device_gone, .lldd_execute_task = isci_task_execute_task, /* Task Management Functions. Must be called from process context. */ .lldd_abort_task = isci_task_abort_task, .lldd_abort_task_set = isci_task_abort_task_set, .lldd_clear_aca = isci_task_clear_aca, .lldd_clear_task_set = isci_task_clear_task_set, .lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset, .lldd_lu_reset = isci_task_lu_reset, .lldd_query_task = isci_task_query_task, /* ata recovery called from ata-eh */ .lldd_ata_check_ready = isci_ata_check_ready, /* Port and Adapter management */ .lldd_clear_nexus_port = isci_task_clear_nexus_port, .lldd_clear_nexus_ha = isci_task_clear_nexus_ha, /* Phy management */ .lldd_control_phy = isci_phy_control, /* GPIO support */ .lldd_write_gpio = isci_gpio_write, }; /****************************************************************************** * P R O T E C T E D M E T H O D S ******************************************************************************/ /** * isci_register_sas_ha() - This method initializes various lldd * specific members of the sas_ha struct and calls the libsas * sas_register_ha() function. * @isci_host: This parameter specifies the lldd specific wrapper for the * libsas sas_ha struct. * * This method returns an error code indicating sucess or failure. The user * should check for possible memory allocation error return otherwise, a zero * indicates success. */ static int isci_register_sas_ha(struct isci_host *isci_host) { int i; struct sas_ha_struct *sas_ha = &(isci_host->sas_ha); struct asd_sas_phy **sas_phys; struct asd_sas_port **sas_ports; sas_phys = devm_kzalloc(&isci_host->pdev->dev, SCI_MAX_PHYS * sizeof(void *), GFP_KERNEL); if (!sas_phys) return -ENOMEM; sas_ports = devm_kzalloc(&isci_host->pdev->dev, SCI_MAX_PORTS * sizeof(void *), GFP_KERNEL); if (!sas_ports) return -ENOMEM; sas_ha->sas_ha_name = DRV_NAME; sas_ha->lldd_module = THIS_MODULE; sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0]; for (i = 0; i < SCI_MAX_PHYS; i++) { sas_phys[i] = &isci_host->phys[i].sas_phy; sas_ports[i] = &isci_host->sas_ports[i]; } sas_ha->sas_phy = sas_phys; sas_ha->sas_port = sas_ports; sas_ha->num_phys = SCI_MAX_PHYS; sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL; sas_ha->lldd_max_execute_num = 1; sas_ha->strict_wide_ports = 1; sas_register_ha(sas_ha); return 0; } static void isci_unregister(struct isci_host *isci_host) { struct Scsi_Host *shost; if (!isci_host) return; shost = isci_host->shost; sas_unregister_ha(&isci_host->sas_ha); sas_remove_host(isci_host->shost); scsi_remove_host(isci_host->shost); scsi_host_put(isci_host->shost); } static int __devinit isci_pci_init(struct pci_dev *pdev) { int err, bar_num, bar_mask = 0; void __iomem * const *iomap; err = pcim_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed enable PCI device %s!\n", pci_name(pdev)); return err; } for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++) bar_mask |= 1 << (bar_num * 2); err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME); if (err) return err; iomap = pcim_iomap_table(pdev); if (!iomap) return -ENOMEM; pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) return err; } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) return err; } return 0; } static int num_controllers(struct pci_dev *pdev) { /* bar size alone can tell us if we are running with a dual controller * part, no need to trust revision ids that might be under broken firmware * control */ resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2); resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2); if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS && smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS) return SCI_MAX_CONTROLLERS; else return 1; } static int isci_setup_interrupts(struct pci_dev *pdev) { int err, i, num_msix; struct isci_host *ihost; struct isci_pci_info *pci_info = to_pci_info(pdev); /* * Determine the number of vectors associated with this * PCI function. */ num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT; for (i = 0; i < num_msix; i++) pci_info->msix_entries[i].entry = i; err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix); if (err) goto intx; for (i = 0; i < num_msix; i++) { int id = i / SCI_NUM_MSI_X_INT; struct msix_entry *msix = &pci_info->msix_entries[i]; irq_handler_t isr; ihost = pci_info->hosts[id]; /* odd numbered vectors are error interrupts */ if (i & 1) isr = isci_error_isr; else isr = isci_msix_isr; err = devm_request_irq(&pdev->dev, msix->vector, isr, 0, DRV_NAME"-msix", ihost); if (!err) continue; dev_info(&pdev->dev, "msix setup failed falling back to intx\n"); while (i--) { id = i / SCI_NUM_MSI_X_INT; ihost = pci_info->hosts[id]; msix = &pci_info->msix_entries[i]; devm_free_irq(&pdev->dev, msix->vector, ihost); } pci_disable_msix(pdev); goto intx; } return 0; intx: for_each_isci_host(i, ihost, pdev) { err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx", ihost); if (err) break; } return err; } static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) { struct isci_host *isci_host; struct Scsi_Host *shost; int err; isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL); if (!isci_host) return NULL; isci_host->pdev = pdev; isci_host->id = id; shost = scsi_host_alloc(&isci_sht, sizeof(void *)); if (!shost) return NULL; isci_host->shost = shost; dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: " "{%s, %s, %s, %s}\n", (is_cable_select_overridden() ? "* " : ""), isci_host->id, lookup_cable_names(decode_cable_selection(isci_host, 3)), lookup_cable_names(decode_cable_selection(isci_host, 2)), lookup_cable_names(decode_cable_selection(isci_host, 1)), lookup_cable_names(decode_cable_selection(isci_host, 0))); err = isci_host_init(isci_host); if (err) goto err_shost; SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha; isci_host->sas_ha.core.shost = shost; shost->transportt = isci_transport_template; shost->max_id = ~0; shost->max_lun = ~0; shost->max_cmd_len = MAX_COMMAND_SIZE; err = scsi_add_host(shost, &pdev->dev); if (err) goto err_shost; err = isci_register_sas_ha(isci_host); if (err) goto err_shost_remove; return isci_host; err_shost_remove: scsi_remove_host(shost); err_shost: scsi_host_put(shost); return NULL; } static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct isci_pci_info *pci_info; int err, i; struct isci_host *isci_host; const struct firmware *fw = NULL; struct isci_orom *orom = NULL; char *source = "(platform)"; dev_info(&pdev->dev, "driver configured for rev: %d silicon\n", pdev->revision); pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL); if (!pci_info) return -ENOMEM; pci_set_drvdata(pdev, pci_info); if (efi_enabled) orom = isci_get_efi_var(pdev); if (!orom) orom = isci_request_oprom(pdev); for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { if (sci_oem_parameters_validate(&orom->ctrl[i], orom->hdr.version)) { dev_warn(&pdev->dev, "[%d]: invalid oem parameters detected, falling back to firmware\n", i); devm_kfree(&pdev->dev, orom); orom = NULL; break; } } if (!orom) { source = "(firmware)"; orom = isci_request_firmware(pdev, fw); if (!orom) { /* TODO convert this to WARN_TAINT_ONCE once the * orom/efi parameter support is widely available */ dev_warn(&pdev->dev, "Loading user firmware failed, using default " "values\n"); dev_warn(&pdev->dev, "Default OEM configuration being used: 4 " "narrow ports, and default SAS Addresses\n"); } } if (orom) dev_info(&pdev->dev, "OEM SAS parameters (version: %u.%u) loaded %s\n", (orom->hdr.version & 0xf0) >> 4, (orom->hdr.version & 0xf), source); pci_info->orom = orom; err = isci_pci_init(pdev); if (err) return err; for (i = 0; i < num_controllers(pdev); i++) { struct isci_host *h = isci_host_alloc(pdev, i); if (!h) { err = -ENOMEM; goto err_host_alloc; } pci_info->hosts[i] = h; /* turn on DIF support */ scsi_host_set_prot(h->shost, SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION); scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC); } err = isci_setup_interrupts(pdev); if (err) goto err_host_alloc; for_each_isci_host(i, isci_host, pdev) scsi_scan_host(isci_host->shost); return 0; err_host_alloc: for_each_isci_host(i, isci_host, pdev) isci_unregister(isci_host); return err; } static void __devexit isci_pci_remove(struct pci_dev *pdev) { struct isci_host *ihost; int i; for_each_isci_host(i, ihost, pdev) { wait_for_start(ihost); isci_unregister(ihost); isci_host_deinit(ihost); } } static struct pci_driver isci_pci_driver = { .name = DRV_NAME, .id_table = isci_id_table, .probe = isci_pci_probe, .remove = __devexit_p(isci_pci_remove), }; static __init int isci_init(void) { int err; pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n", DRV_NAME, DRV_VERSION); isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); if (!isci_transport_template) return -ENOMEM; err = pci_register_driver(&isci_pci_driver); if (err) sas_release_transport(isci_transport_template); return err; } static __exit void isci_exit(void) { pci_unregister_driver(&isci_pci_driver); sas_release_transport(isci_transport_template); } MODULE_LICENSE("Dual BSD/GPL"); MODULE_FIRMWARE(ISCI_FW_NAME); module_init(isci_init); module_exit(isci_exit);
gpl-2.0
djvoleur/G92XP-R4_COI9
net/mac802154/monitor.c
2962
3130
/* * Copyright 2007, 2008, 2009 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Written by: * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> * Sergey Lapin <slapin@ossfans.org> * Maxim Gorbachyov <maxim.gorbachev@siemens.com> * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/crc-ccitt.h> #include <net/ieee802154.h> #include <net/mac802154.h> #include <net/netlink.h> #include <net/wpan-phy.h> #include <linux/nl802154.h> #include "mac802154.h" static netdev_tx_t mac802154_monitor_xmit(struct sk_buff *skb, struct net_device *dev) { struct mac802154_sub_if_data *priv; u8 chan, page; priv = netdev_priv(dev); /* FIXME: locking */ chan = priv->hw->phy->current_channel; page = priv->hw->phy->current_page; if (chan == MAC802154_CHAN_NONE) /* not initialized */ return NETDEV_TX_OK; if (WARN_ON(page >= WPAN_NUM_PAGES) || WARN_ON(chan >= WPAN_NUM_CHANNELS)) return NETDEV_TX_OK; skb->skb_iif = dev->ifindex; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; return mac802154_tx(priv->hw, skb, page, chan); } void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb) { struct sk_buff *skb2; struct mac802154_sub_if_data *sdata; u16 crc = crc_ccitt(0, skb->data, skb->len); u8 *data; rcu_read_lock(); list_for_each_entry_rcu(sdata, &priv->slaves, list) { if (sdata->type != IEEE802154_DEV_MONITOR) continue; skb2 = skb_clone(skb, GFP_ATOMIC); skb2->dev = sdata->dev; skb2->pkt_type = PACKET_HOST; data = skb_put(skb2, 2); data[0] = crc & 0xff; data[1] = crc >> 8; netif_rx_ni(skb2); } rcu_read_unlock(); } static const struct net_device_ops mac802154_monitor_ops = { .ndo_open = mac802154_slave_open, .ndo_stop = mac802154_slave_close, .ndo_start_xmit = mac802154_monitor_xmit, }; void mac802154_monitor_setup(struct net_device *dev) { struct mac802154_sub_if_data *priv; dev->addr_len = 0; dev->hard_header_len = 0; dev->needed_tailroom = 2; /* room for FCS */ dev->mtu = IEEE802154_MTU; dev->tx_queue_len = 10; dev->type = ARPHRD_IEEE802154_MONITOR; dev->flags = IFF_NOARP | IFF_BROADCAST; dev->watchdog_timeo = 0; dev->destructor = free_netdev; dev->netdev_ops = &mac802154_monitor_ops; dev->ml_priv = &mac802154_mlme_reduced; priv = netdev_priv(dev); priv->type = IEEE802154_DEV_MONITOR; priv->chan = MAC802154_CHAN_NONE; /* not initialized */ priv->page = 0; }
gpl-2.0