repo_name
string
path
string
copies
string
size
string
content
string
license
string
placiano/NBKernel_NK4
drivers/net/wireless/rtlwifi/rtl8188ee/led.c
2785
4447
/****************************************************************************** * * Copyright(c) 2009-2013 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../pci.h" #include "reg.h" #include "led.h" static void rtl88ee_init_led(struct ieee80211_hw *hw, struct rtl_led *pled, enum rtl_led_pin ledpin) { pled->hw = hw; pled->ledpin = ledpin; pled->ledon = false; } void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) { u8 ledcfg; struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin); switch (pled->ledpin) { case LED_PIN_GPIO0: break; case LED_PIN_LED0: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6)); break; case LED_PIN_LED1: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } pled->ledon = true; } void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 ledcfg; u8 val; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin); switch (pled->ledpin) { case LED_PIN_GPIO0: break; case LED_PIN_LED0: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); ledcfg &= 0xf0; val = ledcfg | BIT(3) | BIT(5) | BIT(6); if (pcipriv->ledctl.led_opendrain == true) { rtl_write_byte(rtlpriv, REG_LEDCFG2, val); ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); val = ledcfg & 0xFE; rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, val); } else { rtl_write_byte(rtlpriv, REG_LEDCFG2, val); } break; case LED_PIN_LED1: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); ledcfg &= 0x10; rtl_write_byte(rtlpriv, REG_LEDCFG1, (ledcfg | BIT(3))); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } pled->ledon = false; } void rtl88ee_init_sw_leds(struct ieee80211_hw *hw) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); rtl88ee_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0); rtl88ee_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1); } static void rtl88ee_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: case LED_CTL_NO_LINK: rtl88ee_sw_led_on(hw, pLed0); break; case LED_CTL_POWER_OFF: rtl88ee_sw_led_off(hw, pLed0); break; default: break; } } void rtl88ee_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) && (ledaction == LED_CTL_TX || ledaction == LED_CTL_RX || ledaction == LED_CTL_SITE_SURVEY || ledaction == LED_CTL_LINK || ledaction == LED_CTL_NO_LINK || ledaction == LED_CTL_START_TO_LINK || ledaction == LED_CTL_POWER_ON)) { return; } RT_TRACE(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n", ledaction); rtl88ee_sw_led_control(hw, ledaction); }
gpl-2.0
kabata1975/android_kernel_c8690
drivers/media/video/soc_mediabus.c
2785
9500
/* * soc-camera media bus helper routines * * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> #include <media/soc_mediabus.h> static const struct soc_mbus_lookup mbus_fmt[] = { { .code = V4L2_MBUS_FMT_YUYV8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_YUYV, .name = "YUYV", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_YVYU8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_YVYU, .name = "YVYU", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_UYVY8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_UYVY, .name = "UYVY", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_VYUY8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_VYUY, .name = "VYUY", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB555, .name = "RGB555", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB555X, .name = "RGB555X", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB565, .name = "RGB565", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB565X, .name = "RGB565X", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR8, .name = "Bayer 8 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_Y8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_GREY, .name = "Grey", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_Y10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_Y10, .name = "Grey 10bit", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADLO, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADLO, .order = SOC_MBUS_ORDER_BE, }, }, { .code = V4L2_MBUS_FMT_JPEG_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_JPEG, .name = "JPEG", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_VARIABLE, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB444, .name = "RGB444", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, }, }, { .code = V4L2_MBUS_FMT_YUYV8_1_5X8, .fmt = { .fourcc = V4L2_PIX_FMT_YUV420, .name = "YUYV 4:2:0", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_YVYU8_1_5X8, .fmt = { .fourcc = V4L2_PIX_FMT_YVU420, .name = "YVYU 4:2:0", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_UYVY8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_UYVY, .name = "UYVY 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_VYUY8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_VYUY, .name = "VYUY 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_YUYV8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_YUYV, .name = "YUYV 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_YVYU8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_YVYU, .name = "YVYU 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGRBG8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG8, .name = "Bayer 8 GRBG", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8, .name = "Bayer 10 BGGR DPCM 8", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGBRG10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SGBRG10, .name = "Bayer 10 GBRG", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGRBG10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG10, .name = "Bayer 10 GRBG", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SRGGB10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SRGGB10, .name = "Bayer 10 RGGB", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR12, .name = "Bayer 12 BGGR", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGBRG12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SGBRG12, .name = "Bayer 12 GBRG", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGRBG12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG12, .name = "Bayer 12 GRBG", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SRGGB12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SRGGB12, .name = "Bayer 12 RGGB", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, }; int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf, unsigned int *numerator, unsigned int *denominator) { switch (mf->packing) { case SOC_MBUS_PACKING_NONE: case SOC_MBUS_PACKING_EXTEND16: *numerator = 1; *denominator = 1; return 0; case SOC_MBUS_PACKING_2X8_PADHI: case SOC_MBUS_PACKING_2X8_PADLO: *numerator = 2; *denominator = 1; return 0; case SOC_MBUS_PACKING_1_5X8: *numerator = 3; *denominator = 2; return 0; case SOC_MBUS_PACKING_VARIABLE: *numerator = 0; *denominator = 1; return 0; } return -EINVAL; } EXPORT_SYMBOL(soc_mbus_samples_per_pixel); s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf) { switch (mf->packing) { case SOC_MBUS_PACKING_NONE: return width * mf->bits_per_sample / 8; case SOC_MBUS_PACKING_2X8_PADHI: case SOC_MBUS_PACKING_2X8_PADLO: case SOC_MBUS_PACKING_EXTEND16: return width * 2; case SOC_MBUS_PACKING_1_5X8: return width * 3 / 2; case SOC_MBUS_PACKING_VARIABLE: return 0; } return -EINVAL; } EXPORT_SYMBOL(soc_mbus_bytes_per_line); const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc( enum v4l2_mbus_pixelcode code, const struct soc_mbus_lookup *lookup, int n) { int i; for (i = 0; i < n; i++) if (lookup[i].code == code) return &lookup[i].fmt; return NULL; } EXPORT_SYMBOL(soc_mbus_find_fmtdesc); const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( enum v4l2_mbus_pixelcode code) { return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt)); } EXPORT_SYMBOL(soc_mbus_get_fmtdesc); static int __init soc_mbus_init(void) { return 0; } static void __exit soc_mbus_exit(void) { } module_init(soc_mbus_init); module_exit(soc_mbus_exit); MODULE_DESCRIPTION("soc-camera media bus interface"); MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); MODULE_LICENSE("GPL v2");
gpl-2.0
lvchaqiu/meizu-m9-kernel
drivers/acpi/acpica/evxfevnt.c
3041
9567
/****************************************************************************** * * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "actables.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evxfevnt") /******************************************************************************* * * FUNCTION: acpi_enable * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Transfers the system into ACPI mode. * ******************************************************************************/ acpi_status acpi_enable(void) { acpi_status status; int retry; ACPI_FUNCTION_TRACE(acpi_enable); /* ACPI tables must be present */ if (!acpi_tb_tables_loaded()) { return_ACPI_STATUS(AE_NO_ACPI_TABLES); } /* Check current mode */ if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { ACPI_DEBUG_PRINT((ACPI_DB_INIT, "System is already in ACPI mode\n")); return_ACPI_STATUS(AE_OK); } /* Transition to ACPI mode */ status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not transition to ACPI mode")); return_ACPI_STATUS(status); } /* Sanity check that transition succeeded */ for (retry = 0; retry < 30000; ++retry) { if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { if (retry != 0) ACPI_WARNING((AE_INFO, "Platform took > %d00 usec to enter ACPI mode", retry)); return_ACPI_STATUS(AE_OK); } acpi_os_stall(100); /* 100 usec */ } ACPI_ERROR((AE_INFO, "Hardware did not enter ACPI mode")); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } ACPI_EXPORT_SYMBOL(acpi_enable) /******************************************************************************* * * FUNCTION: acpi_disable * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Transfers the system into LEGACY (non-ACPI) mode. * ******************************************************************************/ acpi_status acpi_disable(void) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(acpi_disable); if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { ACPI_DEBUG_PRINT((ACPI_DB_INIT, "System is already in legacy (non-ACPI) mode\n")); } else { /* Transition to LEGACY mode */ status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not exit ACPI mode to legacy mode")); return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI mode disabled\n")); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_disable) /******************************************************************************* * * FUNCTION: acpi_enable_event * * PARAMETERS: Event - The fixed eventto be enabled * Flags - Reserved * * RETURN: Status * * DESCRIPTION: Enable an ACPI event (fixed) * ******************************************************************************/ acpi_status acpi_enable_event(u32 event, u32 flags) { acpi_status status = AE_OK; u32 value; ACPI_FUNCTION_TRACE(acpi_enable_event); /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Enable the requested fixed event (by writing a one to the enable * register bit) */ status = acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, ACPI_ENABLE_EVENT); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Make sure that the hardware responded */ status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, &value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (value != 1) { ACPI_ERROR((AE_INFO, "Could not enable %s event", acpi_ut_get_event_name(event))); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_enable_event) /******************************************************************************* * * FUNCTION: acpi_disable_event * * PARAMETERS: Event - The fixed eventto be enabled * Flags - Reserved * * RETURN: Status * * DESCRIPTION: Disable an ACPI event (fixed) * ******************************************************************************/ acpi_status acpi_disable_event(u32 event, u32 flags) { acpi_status status = AE_OK; u32 value; ACPI_FUNCTION_TRACE(acpi_disable_event); /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Disable the requested fixed event (by writing a zero to the enable * register bit) */ status = acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, ACPI_DISABLE_EVENT); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, &value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (value != 0) { ACPI_ERROR((AE_INFO, "Could not disable %s events", acpi_ut_get_event_name(event))); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_disable_event) /******************************************************************************* * * FUNCTION: acpi_clear_event * * PARAMETERS: Event - The fixed event to be cleared * * RETURN: Status * * DESCRIPTION: Clear an ACPI event (fixed) * ******************************************************************************/ acpi_status acpi_clear_event(u32 event) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(acpi_clear_event); /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Clear the requested fixed event (By writing a one to the status * register bit) */ status = acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. status_register_id, ACPI_CLEAR_STATUS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_clear_event) /******************************************************************************* * * FUNCTION: acpi_get_event_status * * PARAMETERS: Event - The fixed event * event_status - Where the current status of the event will * be returned * * RETURN: Status * * DESCRIPTION: Obtains and returns the current status of the event * ******************************************************************************/ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) { acpi_status status = AE_OK; u32 value; ACPI_FUNCTION_TRACE(acpi_get_event_status); if (!event_status) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Get the status of the requested fixed event */ status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, &value); if (ACPI_FAILURE(status)) return_ACPI_STATUS(status); *event_status = value; status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. status_register_id, &value); if (ACPI_FAILURE(status)) return_ACPI_STATUS(status); if (value) *event_status |= ACPI_EVENT_FLAG_SET; if (acpi_gbl_fixed_event_handlers[event].handler) *event_status |= ACPI_EVENT_FLAG_HANDLE; return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_get_event_status)
gpl-2.0
lyfkevin/Wind_iproj_ICS_kernel
drivers/acpi/acpica/nsxfeval.c
3041
25941
/******************************************************************************* * * Module Name: nsxfeval - Public interfaces to the ACPI subsystem * ACPI Object evaluation interfaces * ******************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acinterp.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsxfeval") /* Local prototypes */ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info); /******************************************************************************* * * FUNCTION: acpi_evaluate_object_typed * * PARAMETERS: Handle - Object handle (optional) * Pathname - Object pathname (optional) * external_params - List of parameters to pass to method, * terminated by NULL. May be NULL * if no parameters are being passed. * return_buffer - Where to put method's return value (if * any). If NULL, no value is returned. * return_type - Expected type of return object * * RETURN: Status * * DESCRIPTION: Find and evaluate the given object, passing the given * parameters if necessary. One of "Handle" or "Pathname" must * be valid (non-null) * ******************************************************************************/ acpi_status acpi_evaluate_object_typed(acpi_handle handle, acpi_string pathname, struct acpi_object_list *external_params, struct acpi_buffer *return_buffer, acpi_object_type return_type) { acpi_status status; u8 must_free = FALSE; ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed); /* Return buffer must be valid */ if (!return_buffer) { return_ACPI_STATUS(AE_BAD_PARAMETER); } if (return_buffer->length == ACPI_ALLOCATE_BUFFER) { must_free = TRUE; } /* Evaluate the object */ status = acpi_evaluate_object(handle, pathname, external_params, return_buffer); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Type ANY means "don't care" */ if (return_type == ACPI_TYPE_ANY) { return_ACPI_STATUS(AE_OK); } if (return_buffer->length == 0) { /* Error because caller specifically asked for a return value */ ACPI_ERROR((AE_INFO, "No return value")); return_ACPI_STATUS(AE_NULL_OBJECT); } /* Examine the object type returned from evaluate_object */ if (((union acpi_object *)return_buffer->pointer)->type == return_type) { return_ACPI_STATUS(AE_OK); } /* Return object type does not match requested type */ ACPI_ERROR((AE_INFO, "Incorrect return type [%s] requested [%s]", acpi_ut_get_type_name(((union acpi_object *)return_buffer-> pointer)->type), acpi_ut_get_type_name(return_type))); if (must_free) { /* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */ ACPI_FREE(return_buffer->pointer); return_buffer->pointer = NULL; } return_buffer->length = 0; return_ACPI_STATUS(AE_TYPE); } ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed) /******************************************************************************* * * FUNCTION: acpi_evaluate_object * * PARAMETERS: Handle - Object handle (optional) * Pathname - Object pathname (optional) * external_params - List of parameters to pass to method, * terminated by NULL. May be NULL * if no parameters are being passed. * return_buffer - Where to put method's return value (if * any). If NULL, no value is returned. * * RETURN: Status * * DESCRIPTION: Find and evaluate the given object, passing the given * parameters if necessary. One of "Handle" or "Pathname" must * be valid (non-null) * ******************************************************************************/ acpi_status acpi_evaluate_object(acpi_handle handle, acpi_string pathname, struct acpi_object_list *external_params, struct acpi_buffer *return_buffer) { acpi_status status; struct acpi_evaluate_info *info; acpi_size buffer_space_needed; u32 i; ACPI_FUNCTION_TRACE(acpi_evaluate_object); /* Allocate and initialize the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { return_ACPI_STATUS(AE_NO_MEMORY); } info->pathname = pathname; /* Convert and validate the device handle */ info->prefix_node = acpi_ns_validate_handle(handle); if (!info->prefix_node) { status = AE_BAD_PARAMETER; goto cleanup; } /* * If there are parameters to be passed to a control method, the external * objects must all be converted to internal objects */ if (external_params && external_params->count) { /* * Allocate a new parameter block for the internal objects * Add 1 to count to allow for null terminated internal list */ info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size) external_params-> count + 1) * sizeof(void *)); if (!info->parameters) { status = AE_NO_MEMORY; goto cleanup; } /* Convert each external object in the list to an internal object */ for (i = 0; i < external_params->count; i++) { status = acpi_ut_copy_eobject_to_iobject(&external_params-> pointer[i], &info-> parameters[i]); if (ACPI_FAILURE(status)) { goto cleanup; } } info->parameters[external_params->count] = NULL; } /* * Three major cases: * 1) Fully qualified pathname * 2) No handle, not fully qualified pathname (error) * 3) Valid handle */ if ((pathname) && (acpi_ns_valid_root_prefix(pathname[0]))) { /* The path is fully qualified, just evaluate by name */ info->prefix_node = NULL; status = acpi_ns_evaluate(info); } else if (!handle) { /* * A handle is optional iff a fully qualified pathname is specified. * Since we've already handled fully qualified names above, this is * an error */ if (!pathname) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Both Handle and Pathname are NULL")); } else { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Null Handle with relative pathname [%s]", pathname)); } status = AE_BAD_PARAMETER; } else { /* We have a namespace a node and a possible relative path */ status = acpi_ns_evaluate(info); } /* * If we are expecting a return value, and all went well above, * copy the return value to an external object. */ if (return_buffer) { if (!info->return_object) { return_buffer->length = 0; } else { if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) == ACPI_DESC_TYPE_NAMED) { /* * If we received a NS Node as a return object, this means that * the object we are evaluating has nothing interesting to * return (such as a mutex, etc.) We return an error because * these types are essentially unsupported by this interface. * We don't check up front because this makes it easier to add * support for various types at a later date if necessary. */ status = AE_TYPE; info->return_object = NULL; /* No need to delete a NS Node */ return_buffer->length = 0; } if (ACPI_SUCCESS(status)) { /* Dereference Index and ref_of references */ acpi_ns_resolve_references(info); /* Get the size of the returned object */ status = acpi_ut_get_object_size(info->return_object, &buffer_space_needed); if (ACPI_SUCCESS(status)) { /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer (return_buffer, buffer_space_needed); if (ACPI_FAILURE(status)) { /* * Caller's buffer is too small or a new one can't * be allocated */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Needed buffer size %X, %s\n", (u32) buffer_space_needed, acpi_format_exception (status))); } else { /* We have enough space for the object, build it */ status = acpi_ut_copy_iobject_to_eobject (info->return_object, return_buffer); } } } } } if (info->return_object) { /* * Delete the internal return object. NOTE: Interpreter must be * locked to avoid race condition. */ acpi_ex_enter_interpreter(); /* Remove one reference on the return object (should delete it) */ acpi_ut_remove_reference(info->return_object); acpi_ex_exit_interpreter(); } cleanup: /* Free the input parameter list (if we created one) */ if (info->parameters) { /* Free the allocated parameter block */ acpi_ut_delete_internal_object_list(info->parameters); } ACPI_FREE(info); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_evaluate_object) /******************************************************************************* * * FUNCTION: acpi_ns_resolve_references * * PARAMETERS: Info - Evaluation info block * * RETURN: Info->return_object is replaced with the dereferenced object * * DESCRIPTION: Dereference certain reference objects. Called before an * internal return object is converted to an external union acpi_object. * * Performs an automatic dereference of Index and ref_of reference objects. * These reference objects are not supported by the union acpi_object, so this is a * last resort effort to return something useful. Also, provides compatibility * with other ACPI implementations. * * NOTE: does not handle references within returned package objects or nested * references, but this support could be added later if found to be necessary. * ******************************************************************************/ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info) { union acpi_operand_object *obj_desc = NULL; struct acpi_namespace_node *node; /* We are interested in reference objects only */ if ((info->return_object)->common.type != ACPI_TYPE_LOCAL_REFERENCE) { return; } /* * Two types of references are supported - those created by Index and * ref_of operators. A name reference (AML_NAMEPATH_OP) can be converted * to an union acpi_object, so it is not dereferenced here. A ddb_handle * (AML_LOAD_OP) cannot be dereferenced, nor can it be converted to * an union acpi_object. */ switch (info->return_object->reference.class) { case ACPI_REFCLASS_INDEX: obj_desc = *(info->return_object->reference.where); break; case ACPI_REFCLASS_REFOF: node = info->return_object->reference.object; if (node) { obj_desc = node->object; } break; default: return; } /* Replace the existing reference object */ if (obj_desc) { acpi_ut_add_reference(obj_desc); acpi_ut_remove_reference(info->return_object); info->return_object = obj_desc; } return; } /******************************************************************************* * * FUNCTION: acpi_walk_namespace * * PARAMETERS: Type - acpi_object_type to search for * start_object - Handle in namespace where search begins * max_depth - Depth to which search is to reach * pre_order_visit - Called during tree pre-order visit * when an object of "Type" is found * post_order_visit - Called during tree post-order visit * when an object of "Type" is found * Context - Passed to user function(s) above * return_value - Location where return value of * user_function is put if terminated early * * RETURNS Return value from the user_function if terminated early. * Otherwise, returns NULL. * * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, * starting (and ending) at the object specified by start_handle. * The callback function is called whenever an object that matches * the type parameter is found. If the callback function returns * a non-zero value, the search is terminated immediately and this * value is returned to the caller. * * The point of this procedure is to provide a generic namespace * walk routine that can be called from multiple places to * provide multiple services; the callback function(s) can be * tailored to each task, whether it is a print function, * a compare function, etc. * ******************************************************************************/ acpi_status acpi_walk_namespace(acpi_object_type type, acpi_handle start_object, u32 max_depth, acpi_walk_callback pre_order_visit, acpi_walk_callback post_order_visit, void *context, void **return_value) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_walk_namespace); /* Parameter validation */ if ((type > ACPI_TYPE_LOCAL_MAX) || (!max_depth) || (!pre_order_visit && !post_order_visit)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Need to acquire the namespace reader lock to prevent interference * with any concurrent table unloads (which causes the deletion of * namespace objects). We cannot allow the deletion of a namespace node * while the user function is using it. The exception to this are the * nodes created and deleted during control method execution -- these * nodes are marked as temporary nodes and are ignored by the namespace * walk. Thus, control methods can be executed while holding the * namespace deletion lock (and the user function can execute control * methods.) */ status = acpi_ut_acquire_read_lock(&acpi_gbl_namespace_rw_lock); if (ACPI_FAILURE(status)) { return status; } /* * Lock the namespace around the walk. The namespace will be * unlocked/locked around each call to the user function - since the user * function must be allowed to make ACPICA calls itself (for example, it * will typically execute control methods during device enumeration.) */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } status = acpi_ns_walk_namespace(type, start_object, max_depth, ACPI_NS_WALK_UNLOCK, pre_order_visit, post_order_visit, context, return_value); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); unlock_and_exit: (void)acpi_ut_release_read_lock(&acpi_gbl_namespace_rw_lock); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_walk_namespace) /******************************************************************************* * * FUNCTION: acpi_ns_get_device_callback * * PARAMETERS: Callback from acpi_get_device * * RETURN: Status * * DESCRIPTION: Takes callbacks from walk_namespace and filters out all non- * present devices, or if they specified a HID, it filters based * on that. * ******************************************************************************/ static acpi_status acpi_ns_get_device_callback(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { struct acpi_get_devices_info *info = context; acpi_status status; struct acpi_namespace_node *node; u32 flags; struct acpica_device_id *hid; struct acpica_device_id_list *cid; u32 i; u8 found; int no_match; status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } node = acpi_ns_validate_handle(obj_handle); status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } if (!node) { return (AE_BAD_PARAMETER); } /* * First, filter based on the device HID and CID. * * 01/2010: For this case where a specific HID is requested, we don't * want to run _STA until we have an actual HID match. Thus, we will * not unnecessarily execute _STA on devices for which the caller * doesn't care about. Previously, _STA was executed unconditionally * on all devices found here. * * A side-effect of this change is that now we will continue to search * for a matching HID even under device trees where the parent device * would have returned a _STA that indicates it is not present or * not functioning (thus aborting the search on that branch). */ if (info->hid != NULL) { status = acpi_ut_execute_HID(node, &hid); if (status == AE_NOT_FOUND) { return (AE_OK); } else if (ACPI_FAILURE(status)) { return (AE_CTRL_DEPTH); } no_match = ACPI_STRCMP(hid->string, info->hid); ACPI_FREE(hid); if (no_match) { /* * HID does not match, attempt match within the * list of Compatible IDs (CIDs) */ status = acpi_ut_execute_CID(node, &cid); if (status == AE_NOT_FOUND) { return (AE_OK); } else if (ACPI_FAILURE(status)) { return (AE_CTRL_DEPTH); } /* Walk the CID list */ found = 0; for (i = 0; i < cid->count; i++) { if (ACPI_STRCMP(cid->ids[i].string, info->hid) == 0) { found = 1; break; } } ACPI_FREE(cid); if (!found) return (AE_OK); } } /* Run _STA to determine if device is present */ status = acpi_ut_execute_STA(node, &flags); if (ACPI_FAILURE(status)) { return (AE_CTRL_DEPTH); } if (!(flags & ACPI_STA_DEVICE_PRESENT) && !(flags & ACPI_STA_DEVICE_FUNCTIONING)) { /* * Don't examine the children of the device only when the * device is neither present nor functional. See ACPI spec, * description of _STA for more information. */ return (AE_CTRL_DEPTH); } /* We have a valid device, invoke the user function */ status = info->user_function(obj_handle, nesting_level, info->context, return_value); return (status); } /******************************************************************************* * * FUNCTION: acpi_get_devices * * PARAMETERS: HID - HID to search for. Can be NULL. * user_function - Called when a matching object is found * Context - Passed to user function * return_value - Location where return value of * user_function is put if terminated early * * RETURNS Return value from the user_function if terminated early. * Otherwise, returns NULL. * * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, * starting (and ending) at the object specified by start_handle. * The user_function is called whenever an object of type * Device is found. If the user function returns * a non-zero value, the search is terminated immediately and this * value is returned to the caller. * * This is a wrapper for walk_namespace, but the callback performs * additional filtering. Please see acpi_ns_get_device_callback. * ******************************************************************************/ acpi_status acpi_get_devices(const char *HID, acpi_walk_callback user_function, void *context, void **return_value) { acpi_status status; struct acpi_get_devices_info info; ACPI_FUNCTION_TRACE(acpi_get_devices); /* Parameter validation */ if (!user_function) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * We're going to call their callback from OUR callback, so we need * to know what it is, and their context parameter. */ info.hid = HID; info.context = context; info.user_function = user_function; /* * Lock the namespace around the walk. * The namespace will be unlocked/locked around each call * to the user function - since this function * must be allowed to make Acpi calls itself. */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ns_get_device_callback, NULL, &info, return_value); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_get_devices) /******************************************************************************* * * FUNCTION: acpi_attach_data * * PARAMETERS: obj_handle - Namespace node * Handler - Handler for this attachment * Data - Pointer to data to be attached * * RETURN: Status * * DESCRIPTION: Attach arbitrary data and handler to a namespace node. * ******************************************************************************/ acpi_status acpi_attach_data(acpi_handle obj_handle, acpi_object_handler handler, void *data) { struct acpi_namespace_node *node; acpi_status status; /* Parameter validation */ if (!obj_handle || !handler || !data) { return (AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } /* Convert and validate the handle */ node = acpi_ns_validate_handle(obj_handle); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_ns_attach_data(node, handler, data); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return (status); } ACPI_EXPORT_SYMBOL(acpi_attach_data) /******************************************************************************* * * FUNCTION: acpi_detach_data * * PARAMETERS: obj_handle - Namespace node handle * Handler - Handler used in call to acpi_attach_data * * RETURN: Status * * DESCRIPTION: Remove data that was previously attached to a node. * ******************************************************************************/ acpi_status acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler) { struct acpi_namespace_node *node; acpi_status status; /* Parameter validation */ if (!obj_handle || !handler) { return (AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } /* Convert and validate the handle */ node = acpi_ns_validate_handle(obj_handle); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_ns_detach_data(node, handler); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return (status); } ACPI_EXPORT_SYMBOL(acpi_detach_data) /******************************************************************************* * * FUNCTION: acpi_get_data * * PARAMETERS: obj_handle - Namespace node * Handler - Handler used in call to attach_data * Data - Where the data is returned * * RETURN: Status * * DESCRIPTION: Retrieve data that was previously attached to a namespace node. * ******************************************************************************/ acpi_status acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data) { struct acpi_namespace_node *node; acpi_status status; /* Parameter validation */ if (!obj_handle || !handler || !data) { return (AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } /* Convert and validate the handle */ node = acpi_ns_validate_handle(obj_handle); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_ns_get_attached_data(node, handler, data); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return (status); } ACPI_EXPORT_SYMBOL(acpi_get_data)
gpl-2.0
LuweiLight/linux-3.14.35-vbal
arch/x86/um/os-Linux/tls.c
4577
1382
#include <errno.h> #include <linux/unistd.h> #include <sys/ptrace.h> #include <sys/syscall.h> #include <unistd.h> #include <sysdep/tls.h> #ifndef PTRACE_GET_THREAD_AREA #define PTRACE_GET_THREAD_AREA 25 #endif #ifndef PTRACE_SET_THREAD_AREA #define PTRACE_SET_THREAD_AREA 26 #endif /* Checks whether host supports TLS, and sets *tls_min according to the value * valid on the host. * i386 host have it == 6; x86_64 host have it == 12, for i386 emulation. */ void check_host_supports_tls(int *supports_tls, int *tls_min) { /* Values for x86 and x86_64.*/ int val[] = {GDT_ENTRY_TLS_MIN_I386, GDT_ENTRY_TLS_MIN_X86_64}; int i; for (i = 0; i < ARRAY_SIZE(val); i++) { user_desc_t info; info.entry_number = val[i]; if (syscall(__NR_get_thread_area, &info) == 0) { *tls_min = val[i]; *supports_tls = 1; return; } else { if (errno == EINVAL) continue; else if (errno == ENOSYS) *supports_tls = 0; return; } } *supports_tls = 0; } int os_set_thread_area(user_desc_t *info, int pid) { int ret; ret = ptrace(PTRACE_SET_THREAD_AREA, pid, info->entry_number, (unsigned long) info); if (ret < 0) ret = -errno; return ret; } int os_get_thread_area(user_desc_t *info, int pid) { int ret; ret = ptrace(PTRACE_GET_THREAD_AREA, pid, info->entry_number, (unsigned long) info); if (ret < 0) ret = -errno; return ret; }
gpl-2.0
GustavoRD78/78Kernel-6.0.1-23.5.A.1.291
net/ax25/ax25_iface.c
5089
5040
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> static struct ax25_protocol *protocol_list; static DEFINE_RWLOCK(protocol_list_lock); static HLIST_HEAD(ax25_linkfail_list); static DEFINE_SPINLOCK(linkfail_lock); static struct listen_struct { struct listen_struct *next; ax25_address callsign; struct net_device *dev; } *listen_list = NULL; static DEFINE_SPINLOCK(listen_lock); /* * Do not register the internal protocols AX25_P_TEXT, AX25_P_SEGMENT, * AX25_P_IP or AX25_P_ARP ... */ void ax25_register_pid(struct ax25_protocol *ap) { write_lock_bh(&protocol_list_lock); ap->next = protocol_list; protocol_list = ap; write_unlock_bh(&protocol_list_lock); } EXPORT_SYMBOL_GPL(ax25_register_pid); void ax25_protocol_release(unsigned int pid) { struct ax25_protocol *protocol; write_lock_bh(&protocol_list_lock); protocol = protocol_list; if (protocol == NULL) goto out; if (protocol->pid == pid) { protocol_list = protocol->next; goto out; } while (protocol != NULL && protocol->next != NULL) { if (protocol->next->pid == pid) { protocol->next = protocol->next->next; goto out; } protocol = protocol->next; } out: write_unlock_bh(&protocol_list_lock); } EXPORT_SYMBOL(ax25_protocol_release); void ax25_linkfail_register(struct ax25_linkfail *lf) { spin_lock_bh(&linkfail_lock); hlist_add_head(&lf->lf_node, &ax25_linkfail_list); spin_unlock_bh(&linkfail_lock); } EXPORT_SYMBOL(ax25_linkfail_register); void ax25_linkfail_release(struct ax25_linkfail *lf) { spin_lock_bh(&linkfail_lock); hlist_del_init(&lf->lf_node); spin_unlock_bh(&linkfail_lock); } EXPORT_SYMBOL(ax25_linkfail_release); int ax25_listen_register(ax25_address *callsign, struct net_device *dev) { struct listen_struct *listen; if (ax25_listen_mine(callsign, dev)) return 0; if ((listen = kmalloc(sizeof(*listen), GFP_ATOMIC)) == NULL) return -ENOMEM; listen->callsign = *callsign; listen->dev = dev; spin_lock_bh(&listen_lock); listen->next = listen_list; listen_list = listen; spin_unlock_bh(&listen_lock); return 0; } EXPORT_SYMBOL(ax25_listen_register); void ax25_listen_release(ax25_address *callsign, struct net_device *dev) { struct listen_struct *s, *listen; spin_lock_bh(&listen_lock); listen = listen_list; if (listen == NULL) { spin_unlock_bh(&listen_lock); return; } if (ax25cmp(&listen->callsign, callsign) == 0 && listen->dev == dev) { listen_list = listen->next; spin_unlock_bh(&listen_lock); kfree(listen); return; } while (listen != NULL && listen->next != NULL) { if (ax25cmp(&listen->next->callsign, callsign) == 0 && listen->next->dev == dev) { s = listen->next; listen->next = listen->next->next; spin_unlock_bh(&listen_lock); kfree(s); return; } listen = listen->next; } spin_unlock_bh(&listen_lock); } EXPORT_SYMBOL(ax25_listen_release); int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *) { int (*res)(struct sk_buff *, ax25_cb *) = NULL; struct ax25_protocol *protocol; read_lock(&protocol_list_lock); for (protocol = protocol_list; protocol != NULL; protocol = protocol->next) if (protocol->pid == pid) { res = protocol->func; break; } read_unlock(&protocol_list_lock); return res; } int ax25_listen_mine(ax25_address *callsign, struct net_device *dev) { struct listen_struct *listen; spin_lock_bh(&listen_lock); for (listen = listen_list; listen != NULL; listen = listen->next) if (ax25cmp(&listen->callsign, callsign) == 0 && (listen->dev == dev || listen->dev == NULL)) { spin_unlock_bh(&listen_lock); return 1; } spin_unlock_bh(&listen_lock); return 0; } void ax25_link_failed(ax25_cb *ax25, int reason) { struct ax25_linkfail *lf; struct hlist_node *node; spin_lock_bh(&linkfail_lock); hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node) lf->func(ax25, reason); spin_unlock_bh(&linkfail_lock); } int ax25_protocol_is_registered(unsigned int pid) { struct ax25_protocol *protocol; int res = 0; read_lock_bh(&protocol_list_lock); for (protocol = protocol_list; protocol != NULL; protocol = protocol->next) if (protocol->pid == pid) { res = 1; break; } read_unlock_bh(&protocol_list_lock); return res; }
gpl-2.0
lennox/score_linux
drivers/usb/musb/musbhsdma.c
5089
11991
/* * MUSB OTG driver - support for Mentor's DMA controller * * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2007 by Texas Instruments * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "musb_core.h" #include "musbhsdma.h" static int dma_controller_start(struct dma_controller *c) { /* nothing to do */ return 0; } static void dma_channel_release(struct dma_channel *channel); static int dma_controller_stop(struct dma_controller *c) { struct musb_dma_controller *controller = container_of(c, struct musb_dma_controller, controller); struct musb *musb = controller->private_data; struct dma_channel *channel; u8 bit; if (controller->used_channels != 0) { dev_err(musb->controller, "Stopping DMA controller while channel active\n"); for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) { if (controller->used_channels & (1 << bit)) { channel = &controller->channel[bit].channel; dma_channel_release(channel); if (!controller->used_channels) break; } } } return 0; } static struct dma_channel *dma_channel_allocate(struct dma_controller *c, struct musb_hw_ep *hw_ep, u8 transmit) { struct musb_dma_controller *controller = container_of(c, struct musb_dma_controller, controller); struct musb_dma_channel *musb_channel = NULL; struct dma_channel *channel = NULL; u8 bit; for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) { if (!(controller->used_channels & (1 << bit))) { controller->used_channels |= (1 << bit); musb_channel = &(controller->channel[bit]); musb_channel->controller = controller; musb_channel->idx = bit; musb_channel->epnum = hw_ep->epnum; musb_channel->transmit = transmit; channel = &(musb_channel->channel); channel->private_data = musb_channel; channel->status = MUSB_DMA_STATUS_FREE; channel->max_len = 0x100000; /* Tx => mode 1; Rx => mode 0 */ channel->desired_mode = transmit; channel->actual_len = 0; break; } } return channel; } static void dma_channel_release(struct dma_channel *channel) { struct musb_dma_channel *musb_channel = channel->private_data; channel->actual_len = 0; musb_channel->start_addr = 0; musb_channel->len = 0; musb_channel->controller->used_channels &= ~(1 << musb_channel->idx); channel->status = MUSB_DMA_STATUS_UNKNOWN; } static void configure_channel(struct dma_channel *channel, u16 packet_sz, u8 mode, dma_addr_t dma_addr, u32 len) { struct musb_dma_channel *musb_channel = channel->private_data; struct musb_dma_controller *controller = musb_channel->controller; struct musb *musb = controller->private_data; void __iomem *mbase = controller->base; u8 bchannel = musb_channel->idx; u16 csr = 0; dev_dbg(musb->controller, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n", channel, packet_sz, dma_addr, len, mode); if (mode) { csr |= 1 << MUSB_HSDMA_MODE1_SHIFT; BUG_ON(len < packet_sz); } csr |= MUSB_HSDMA_BURSTMODE_INCR16 << MUSB_HSDMA_BURSTMODE_SHIFT; csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT) | (1 << MUSB_HSDMA_ENABLE_SHIFT) | (1 << MUSB_HSDMA_IRQENABLE_SHIFT) | (musb_channel->transmit ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT) : 0); /* address/count */ musb_write_hsdma_addr(mbase, bchannel, dma_addr); musb_write_hsdma_count(mbase, bchannel, len); /* control (this should start things) */ musb_writew(mbase, MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL), csr); } static int dma_channel_program(struct dma_channel *channel, u16 packet_sz, u8 mode, dma_addr_t dma_addr, u32 len) { struct musb_dma_channel *musb_channel = channel->private_data; struct musb_dma_controller *controller = musb_channel->controller; struct musb *musb = controller->private_data; dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", musb_channel->epnum, musb_channel->transmit ? "Tx" : "Rx", packet_sz, dma_addr, len, mode); BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || channel->status == MUSB_DMA_STATUS_BUSY); /* Let targets check/tweak the arguments */ if (musb->ops->adjust_channel_params) { int ret = musb->ops->adjust_channel_params(channel, packet_sz, &mode, &dma_addr, &len); if (ret) return ret; } /* * The DMA engine in RTL1.8 and above cannot handle * DMA addresses that are not aligned to a 4 byte boundary. * It ends up masking the last two bits of the address * programmed in DMA_ADDR. * * Fail such DMA transfers, so that the backup PIO mode * can carry out the transfer */ if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4)) return false; channel->actual_len = 0; musb_channel->start_addr = dma_addr; musb_channel->len = len; musb_channel->max_packet_sz = packet_sz; channel->status = MUSB_DMA_STATUS_BUSY; configure_channel(channel, packet_sz, mode, dma_addr, len); return true; } static int dma_channel_abort(struct dma_channel *channel) { struct musb_dma_channel *musb_channel = channel->private_data; void __iomem *mbase = musb_channel->controller->base; u8 bchannel = musb_channel->idx; int offset; u16 csr; if (channel->status == MUSB_DMA_STATUS_BUSY) { if (musb_channel->transmit) { offset = MUSB_EP_OFFSET(musb_channel->epnum, MUSB_TXCSR); /* * The programming guide says that we must clear * the DMAENAB bit before the DMAMODE bit... */ csr = musb_readw(mbase, offset); csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); musb_writew(mbase, offset, csr); csr &= ~MUSB_TXCSR_DMAMODE; musb_writew(mbase, offset, csr); } else { offset = MUSB_EP_OFFSET(musb_channel->epnum, MUSB_RXCSR); csr = musb_readw(mbase, offset); csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB | MUSB_RXCSR_DMAMODE); musb_writew(mbase, offset, csr); } musb_writew(mbase, MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL), 0); musb_write_hsdma_addr(mbase, bchannel, 0); musb_write_hsdma_count(mbase, bchannel, 0); channel->status = MUSB_DMA_STATUS_FREE; } return 0; } static irqreturn_t dma_controller_irq(int irq, void *private_data) { struct musb_dma_controller *controller = private_data; struct musb *musb = controller->private_data; struct musb_dma_channel *musb_channel; struct dma_channel *channel; void __iomem *mbase = controller->base; irqreturn_t retval = IRQ_NONE; unsigned long flags; u8 bchannel; u8 int_hsdma; u32 addr, count; u16 csr; spin_lock_irqsave(&musb->lock, flags); int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR); #ifdef CONFIG_BLACKFIN /* Clear DMA interrupt flags */ musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma); #endif if (!int_hsdma) { dev_dbg(musb->controller, "spurious DMA irq\n"); for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) { musb_channel = (struct musb_dma_channel *) &(controller->channel[bchannel]); channel = &musb_channel->channel; if (channel->status == MUSB_DMA_STATUS_BUSY) { count = musb_read_hsdma_count(mbase, bchannel); if (count == 0) int_hsdma |= (1 << bchannel); } } dev_dbg(musb->controller, "int_hsdma = 0x%x\n", int_hsdma); if (!int_hsdma) goto done; } for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) { if (int_hsdma & (1 << bchannel)) { musb_channel = (struct musb_dma_channel *) &(controller->channel[bchannel]); channel = &musb_channel->channel; csr = musb_readw(mbase, MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL)); if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) { musb_channel->channel.status = MUSB_DMA_STATUS_BUS_ABORT; } else { u8 devctl; addr = musb_read_hsdma_addr(mbase, bchannel); channel->actual_len = addr - musb_channel->start_addr; dev_dbg(musb->controller, "ch %p, 0x%x -> 0x%x (%zu / %d) %s\n", channel, musb_channel->start_addr, addr, channel->actual_len, musb_channel->len, (channel->actual_len < musb_channel->len) ? "=> reconfig 0" : "=> complete"); devctl = musb_readb(mbase, MUSB_DEVCTL); channel->status = MUSB_DMA_STATUS_FREE; /* completed */ if ((devctl & MUSB_DEVCTL_HM) && (musb_channel->transmit) && ((channel->desired_mode == 0) || (channel->actual_len & (musb_channel->max_packet_sz - 1))) ) { u8 epnum = musb_channel->epnum; int offset = MUSB_EP_OFFSET(epnum, MUSB_TXCSR); u16 txcsr; /* * The programming guide says that we * must clear DMAENAB before DMAMODE. */ musb_ep_select(mbase, epnum); txcsr = musb_readw(mbase, offset); txcsr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_AUTOSET); musb_writew(mbase, offset, txcsr); /* Send out the packet */ txcsr &= ~MUSB_TXCSR_DMAMODE; txcsr |= MUSB_TXCSR_TXPKTRDY; musb_writew(mbase, offset, txcsr); } musb_dma_completion(musb, musb_channel->epnum, musb_channel->transmit); } } } retval = IRQ_HANDLED; done: spin_unlock_irqrestore(&musb->lock, flags); return retval; } void dma_controller_destroy(struct dma_controller *c) { struct musb_dma_controller *controller = container_of(c, struct musb_dma_controller, controller); if (!controller) return; if (controller->irq) free_irq(controller->irq, c); kfree(controller); } struct dma_controller *__init dma_controller_create(struct musb *musb, void __iomem *base) { struct musb_dma_controller *controller; struct device *dev = musb->controller; struct platform_device *pdev = to_platform_device(dev); int irq = platform_get_irq_byname(pdev, "dma"); if (irq == 0) { dev_err(dev, "No DMA interrupt line!\n"); return NULL; } controller = kzalloc(sizeof(*controller), GFP_KERNEL); if (!controller) return NULL; controller->channel_count = MUSB_HSDMA_CHANNELS; controller->private_data = musb; controller->base = base; controller->controller.start = dma_controller_start; controller->controller.stop = dma_controller_stop; controller->controller.channel_alloc = dma_channel_allocate; controller->controller.channel_release = dma_channel_release; controller->controller.channel_program = dma_channel_program; controller->controller.channel_abort = dma_channel_abort; if (request_irq(irq, dma_controller_irq, 0, dev_name(musb->controller), &controller->controller)) { dev_err(dev, "request_irq %d failed!\n", irq); dma_controller_destroy(&controller->controller); return NULL; } controller->irq = irq; return &controller->controller; }
gpl-2.0
dennes544/dennes544_kernel_lge_hammerhead
arch/x86/mm/tlb.c
7649
9009
#include <linux/init.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cache.h> #include <asm/apic.h> #include <asm/uv/uv.h> DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0, }; /* * Smarter SMP flushing macros. * c/o Linus Torvalds. * * These mean you can really definitely utterly forget about * writing to user space from interrupts. (Its not allowed anyway). * * Optimizations Manfred Spraul <manfred@colorfullife.com> * * More scalable flush, from Andi Kleen * * To avoid global state use 8 different call vectors. * Each CPU uses a specific vector to trigger flushes on other * CPUs. Depending on the received vector the target CPUs look into * the right array slot for the flush data. * * With more than 8 CPUs they are hashed to the 8 available * vectors. The limited global vector space forces us to this right now. * In future when interrupts are split into per CPU domains this could be * fixed, at the cost of triggering multiple IPIs in some cases. */ union smp_flush_state { struct { struct mm_struct *flush_mm; unsigned long flush_va; raw_spinlock_t tlbstate_lock; DECLARE_BITMAP(flush_cpumask, NR_CPUS); }; char pad[INTERNODE_CACHE_BYTES]; } ____cacheline_internodealigned_in_smp; /* State is put into the per CPU data section, but padded to a full cache line because other CPUs can access it and we don't want false sharing in the per cpu data segment. */ static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset); /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ void leave_mm(int cpu) { if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) BUG(); cpumask_clear_cpu(cpu, mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); load_cr3(swapper_pg_dir); } EXPORT_SYMBOL_GPL(leave_mm); /* * * The flush IPI assumes that a thread switch happens in this order: * [cpu0: the cpu that switches] * 1) switch_mm() either 1a) or 1b) * 1a) thread switch to a different mm * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); * Stop ipi delivery for the old mm. This is not synchronized with * the other cpus, but smp_invalidate_interrupt ignore flush ipis * for the wrong mm, and in the worst case we perform a superfluous * tlb flush. * 1a2) set cpu mmu_state to TLBSTATE_OK * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 * was in lazy tlb mode. * 1a3) update cpu active_mm * Now cpu0 accepts tlb flushes for the new mm. * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); * Now the other cpus will send tlb flush ipis. * 1a4) change cr3. * 1b) thread switch without mm change * cpu active_mm is correct, cpu0 already handles * flush ipis. * 1b1) set cpu mmu_state to TLBSTATE_OK * 1b2) test_and_set the cpu bit in cpu_vm_mask. * Atomically set the bit [other cpus will start sending flush ipis], * and test the bit. * 1b3) if the bit was 0: leave_mm was called, flush the tlb. * 2) switch %%esp, ie current * * The interrupt must handle 2 special cases: * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. * - the cpu performs speculative tlb reads, i.e. even if the cpu only * runs in kernel space, the cpu could load tlb entries for user space * pages. * * The good news is that cpu mmu_state is local to each cpu, no * write/read ordering problems. */ /* * TLB flush IPI: * * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. * * Interrupts are disabled. */ /* * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop * but still used for documentation purpose but the usage is slightly * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt * entry calls in with the first parameter in %eax. Maybe define * intrlinkage? */ #ifdef CONFIG_X86_64 asmlinkage #endif void smp_invalidate_interrupt(struct pt_regs *regs) { unsigned int cpu; unsigned int sender; union smp_flush_state *f; cpu = smp_processor_id(); /* * orig_rax contains the negated interrupt vector. * Use that to determine where the sender put the data. */ sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; f = &flush_state[sender]; if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(f->flush_va); } else leave_mm(cpu); } out: ack_APIC_irq(); smp_mb__before_clear_bit(); cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); smp_mb__after_clear_bit(); inc_irq_stat(irq_tlb_count); } static void flush_tlb_others_ipi(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va) { unsigned int sender; union smp_flush_state *f; /* Caller has disabled preemption */ sender = this_cpu_read(tlb_vector_offset); f = &flush_state[sender]; if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) raw_spin_lock(&f->tlbstate_lock); f->flush_mm = mm; f->flush_va = va; if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) { /* * We have to send the IPI only to * CPUs affected. */ apic->send_IPI_mask(to_cpumask(f->flush_cpumask), INVALIDATE_TLB_VECTOR_START + sender); while (!cpumask_empty(to_cpumask(f->flush_cpumask))) cpu_relax(); } f->flush_mm = NULL; f->flush_va = 0; if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) raw_spin_unlock(&f->tlbstate_lock); } void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va) { if (is_uv_system()) { unsigned int cpu; cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); if (cpumask) flush_tlb_others_ipi(cpumask, mm, va); return; } flush_tlb_others_ipi(cpumask, mm, va); } static void __cpuinit calculate_tlb_offset(void) { int cpu, node, nr_node_vecs, idx = 0; /* * we are changing tlb_vector_offset for each CPU in runtime, but this * will not cause inconsistency, as the write is atomic under X86. we * might see more lock contentions in a short time, but after all CPU's * tlb_vector_offset are changed, everything should go normal * * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might * waste some vectors. **/ if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS) nr_node_vecs = 1; else nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; for_each_online_node(node) { int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) * nr_node_vecs; int cpu_offset = 0; for_each_cpu(cpu, cpumask_of_node(node)) { per_cpu(tlb_vector_offset, cpu) = node_offset + cpu_offset; cpu_offset++; cpu_offset = cpu_offset % nr_node_vecs; } idx++; } } static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n, unsigned long action, void *hcpu) { switch (action & 0xf) { case CPU_ONLINE: case CPU_DEAD: calculate_tlb_offset(); } return NOTIFY_OK; } static int __cpuinit init_smp_flush(void) { int i; for (i = 0; i < ARRAY_SIZE(flush_state); i++) raw_spin_lock_init(&flush_state[i].tlbstate_lock); calculate_tlb_offset(); hotcpu_notifier(tlb_cpuhp_notify, 0); return 0; } core_initcall(init_smp_flush); void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; preempt_disable(); local_flush_tlb(); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL); preempt_enable(); } void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); if (current->active_mm == mm) { if (current->mm) local_flush_tlb(); else leave_mm(smp_processor_id()); } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL); preempt_enable(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) __flush_tlb_one(va); else leave_mm(smp_processor_id()); } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, va); preempt_enable(); } static void do_flush_tlb_all(void *info) { __flush_tlb_all(); if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); } void flush_tlb_all(void) { on_each_cpu(do_flush_tlb_all, NULL, 1); }
gpl-2.0
AKToronto/Bubba-Zombie
net/dccp/ccids/ccid2.c
9441
23704
/* * Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk> * * Changes to meet Linux coding standards, and DCCP infrastructure fixes. * * Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * This implementation should follow RFC 4341 */ #include <linux/slab.h> #include "../feat.h" #include "ccid2.h" #ifdef CONFIG_IP_DCCP_CCID2_DEBUG static bool ccid2_debug; #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) #else #define ccid2_pr_debug(format, a...) #endif static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc) { struct ccid2_seq *seqp; int i; /* check if we have space to preserve the pointer to the buffer */ if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) / sizeof(struct ccid2_seq *))) return -ENOMEM; /* allocate buffer and initialize linked list */ seqp = kmalloc(CCID2_SEQBUF_LEN * sizeof(struct ccid2_seq), gfp_any()); if (seqp == NULL) return -ENOMEM; for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) { seqp[i].ccid2s_next = &seqp[i + 1]; seqp[i + 1].ccid2s_prev = &seqp[i]; } seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp; seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; /* This is the first allocation. Initiate the head and tail. */ if (hc->tx_seqbufc == 0) hc->tx_seqh = hc->tx_seqt = seqp; else { /* link the existing list with the one we just created */ hc->tx_seqh->ccid2s_next = seqp; seqp->ccid2s_prev = hc->tx_seqh; hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt; } /* store the original pointer to the buffer so we can free it */ hc->tx_seqbuf[hc->tx_seqbufc] = seqp; hc->tx_seqbufc++; return 0; } static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) { if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk))) return CCID_PACKET_WILL_DEQUEUE_LATER; return CCID_PACKET_SEND_AT_ONCE; } static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) { u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2); /* * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always * acceptable since this causes starvation/deadlock whenever cwnd < 2. * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled). */ if (val == 0 || val > max_ratio) { DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio); val = max_ratio; } dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO, min_t(u32, val, DCCPF_ACK_RATIO_MAX)); } static void ccid2_check_l_ack_ratio(struct sock *sk) { struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); /* * After a loss, idle period, application limited period, or RTO we * need to check that the ack ratio is still less than the congestion * window. Otherwise, we will send an entire congestion window of * packets and got no response because we haven't sent ack ratio * packets yet. * If the ack ratio does need to be reduced, we reduce it to half of * the congestion window (or 1 if that's zero) instead of to the * congestion window. This prevents problems if one ack is lost. */ if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd) ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U); } static void ccid2_change_l_seq_window(struct sock *sk, u64 val) { dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW, clamp_val(val, DCCPF_SEQ_WMIN, DCCPF_SEQ_WMAX)); } static void ccid2_hc_tx_rto_expire(unsigned long data) { struct sock *sk = (struct sock *)data; struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5); goto out; } ccid2_pr_debug("RTO_EXPIRE\n"); /* back-off timer */ hc->tx_rto <<= 1; if (hc->tx_rto > DCCP_RTO_MAX) hc->tx_rto = DCCP_RTO_MAX; /* adjust pipe, cwnd etc */ hc->tx_ssthresh = hc->tx_cwnd / 2; if (hc->tx_ssthresh < 2) hc->tx_ssthresh = 2; hc->tx_cwnd = 1; hc->tx_pipe = 0; /* clear state about stuff we sent */ hc->tx_seqt = hc->tx_seqh; hc->tx_packets_acked = 0; /* clear ack ratio state. */ hc->tx_rpseq = 0; hc->tx_rpdupack = -1; ccid2_change_l_ack_ratio(sk, 1); /* if we were blocked before, we may now send cwnd=1 packet */ if (sender_was_blocked) tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); /* restart backed-off timer */ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); out: bh_unlock_sock(sk); sock_put(sk); } /* * Congestion window validation (RFC 2861). */ static bool ccid2_do_cwv = true; module_param(ccid2_do_cwv, bool, 0644); MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation"); /** * ccid2_update_used_window - Track how much of cwnd is actually used * This is done in addition to CWV. The sender needs to have an idea of how many * packets may be in flight, to set the local Sequence Window value accordingly * (RFC 4340, 7.5.2). The CWV mechanism is exploited to keep track of the * maximum-used window. We use an EWMA low-pass filter to filter out noise. */ static void ccid2_update_used_window(struct ccid2_hc_tx_sock *hc, u32 new_wnd) { hc->tx_expected_wnd = (3 * hc->tx_expected_wnd + new_wnd) / 4; } /* This borrows the code of tcp_cwnd_application_limited() */ static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now) { struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); /* don't reduce cwnd below the initial window (IW) */ u32 init_win = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache), win_used = max(hc->tx_cwnd_used, init_win); if (win_used < hc->tx_cwnd) { hc->tx_ssthresh = max(hc->tx_ssthresh, (hc->tx_cwnd >> 1) + (hc->tx_cwnd >> 2)); hc->tx_cwnd = (hc->tx_cwnd + win_used) >> 1; } hc->tx_cwnd_used = 0; hc->tx_cwnd_stamp = now; ccid2_check_l_ack_ratio(sk); } /* This borrows the code of tcp_cwnd_restart() */ static void ccid2_cwnd_restart(struct sock *sk, const u32 now) { struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); u32 cwnd = hc->tx_cwnd, restart_cwnd, iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache); hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); /* don't reduce cwnd below the initial window (IW) */ restart_cwnd = min(cwnd, iwnd); cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto; hc->tx_cwnd = max(cwnd, restart_cwnd); hc->tx_cwnd_stamp = now; hc->tx_cwnd_used = 0; ccid2_check_l_ack_ratio(sk); } static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) { struct dccp_sock *dp = dccp_sk(sk); struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); const u32 now = ccid2_time_stamp; struct ccid2_seq *next; /* slow-start after idle periods (RFC 2581, RFC 2861) */ if (ccid2_do_cwv && !hc->tx_pipe && (s32)(now - hc->tx_lsndtime) >= hc->tx_rto) ccid2_cwnd_restart(sk, now); hc->tx_lsndtime = now; hc->tx_pipe += 1; /* see whether cwnd was fully used (RFC 2861), update expected window */ if (ccid2_cwnd_network_limited(hc)) { ccid2_update_used_window(hc, hc->tx_cwnd); hc->tx_cwnd_used = 0; hc->tx_cwnd_stamp = now; } else { if (hc->tx_pipe > hc->tx_cwnd_used) hc->tx_cwnd_used = hc->tx_pipe; ccid2_update_used_window(hc, hc->tx_cwnd_used); if (ccid2_do_cwv && (s32)(now - hc->tx_cwnd_stamp) >= hc->tx_rto) ccid2_cwnd_application_limited(sk, now); } hc->tx_seqh->ccid2s_seq = dp->dccps_gss; hc->tx_seqh->ccid2s_acked = 0; hc->tx_seqh->ccid2s_sent = now; next = hc->tx_seqh->ccid2s_next; /* check if we need to alloc more space */ if (next == hc->tx_seqt) { if (ccid2_hc_tx_alloc_seq(hc)) { DCCP_CRIT("packet history - out of memory!"); /* FIXME: find a more graceful way to bail out */ return; } next = hc->tx_seqh->ccid2s_next; BUG_ON(next == hc->tx_seqt); } hc->tx_seqh = next; ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe); /* * FIXME: The code below is broken and the variables have been removed * from the socket struct. The `ackloss' variable was always set to 0, * and with arsent there are several problems: * (i) it doesn't just count the number of Acks, but all sent packets; * (ii) it is expressed in # of packets, not # of windows, so the * comparison below uses the wrong formula: Appendix A of RFC 4341 * comes up with the number K = cwnd / (R^2 - R) of consecutive windows * of data with no lost or marked Ack packets. If arsent were the # of * consecutive Acks received without loss, then Ack Ratio needs to be * decreased by 1 when * arsent >= K * cwnd / R = cwnd^2 / (R^3 - R^2) * where cwnd / R is the number of Acks received per window of data * (cf. RFC 4341, App. A). The problems are that * - arsent counts other packets as well; * - the comparison uses a formula different from RFC 4341; * - computing a cubic/quadratic equation each time is too complicated. * Hence a different algorithm is needed. */ #if 0 /* Ack Ratio. Need to maintain a concept of how many windows we sent */ hc->tx_arsent++; /* We had an ack loss in this window... */ if (hc->tx_ackloss) { if (hc->tx_arsent >= hc->tx_cwnd) { hc->tx_arsent = 0; hc->tx_ackloss = 0; } } else { /* No acks lost up to now... */ /* decrease ack ratio if enough packets were sent */ if (dp->dccps_l_ack_ratio > 1) { /* XXX don't calculate denominator each time */ int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio - dp->dccps_l_ack_ratio; denom = hc->tx_cwnd * hc->tx_cwnd / denom; if (hc->tx_arsent >= denom) { ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); hc->tx_arsent = 0; } } else { /* we can't increase ack ratio further [1] */ hc->tx_arsent = 0; /* or maybe set it to cwnd*/ } } #endif sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); #ifdef CONFIG_IP_DCCP_CCID2_DEBUG do { struct ccid2_seq *seqp = hc->tx_seqt; while (seqp != hc->tx_seqh) { ccid2_pr_debug("out seq=%llu acked=%d time=%u\n", (unsigned long long)seqp->ccid2s_seq, seqp->ccid2s_acked, seqp->ccid2s_sent); seqp = seqp->ccid2s_next; } } while (0); ccid2_pr_debug("=========\n"); #endif } /** * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm * This code is almost identical with TCP's tcp_rtt_estimator(), since * - it has a higher sampling frequency (recommended by RFC 1323), * - the RTO does not collapse into RTT due to RTTVAR going towards zero, * - it is simple (cf. more complex proposals such as Eifel timer or research * which suggests that the gain should be set according to window size), * - in tests it was found to work well with CCID2 [gerrit]. */ static void ccid2_rtt_estimator(struct sock *sk, const long mrtt) { struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); long m = mrtt ? : 1; if (hc->tx_srtt == 0) { /* First measurement m */ hc->tx_srtt = m << 3; hc->tx_mdev = m << 1; hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk)); hc->tx_rttvar = hc->tx_mdev_max; hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss; } else { /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */ m -= (hc->tx_srtt >> 3); hc->tx_srtt += m; /* Similarly, update scaled mdev with regard to |m| */ if (m < 0) { m = -m; m -= (hc->tx_mdev >> 2); /* * This neutralises RTO increase when RTT < SRTT - mdev * (see P. Sarolahti, A. Kuznetsov,"Congestion Control * in Linux TCP", USENIX 2002, pp. 49-62). */ if (m > 0) m >>= 3; } else { m -= (hc->tx_mdev >> 2); } hc->tx_mdev += m; if (hc->tx_mdev > hc->tx_mdev_max) { hc->tx_mdev_max = hc->tx_mdev; if (hc->tx_mdev_max > hc->tx_rttvar) hc->tx_rttvar = hc->tx_mdev_max; } /* * Decay RTTVAR at most once per flight, exploiting that * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2) * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1) * GAR is a useful bound for FlightSize = pipe. * AWL is probably too low here, as it over-estimates pipe. */ if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) { if (hc->tx_mdev_max < hc->tx_rttvar) hc->tx_rttvar -= (hc->tx_rttvar - hc->tx_mdev_max) >> 2; hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss; hc->tx_mdev_max = tcp_rto_min(sk); } } /* * Set RTO from SRTT and RTTVAR * As in TCP, 4 * RTTVAR >= TCP_RTO_MIN, giving a minimum RTO of 200 ms. * This agrees with RFC 4341, 5: * "Because DCCP does not retransmit data, DCCP does not require * TCP's recommended minimum timeout of one second". */ hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar; if (hc->tx_rto > DCCP_RTO_MAX) hc->tx_rto = DCCP_RTO_MAX; } static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp, unsigned int *maxincr) { struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); struct dccp_sock *dp = dccp_sk(sk); int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio; if (hc->tx_cwnd < dp->dccps_l_seq_win && r_seq_used < dp->dccps_r_seq_win) { if (hc->tx_cwnd < hc->tx_ssthresh) { if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) { hc->tx_cwnd += 1; *maxincr -= 1; hc->tx_packets_acked = 0; } } else if (++hc->tx_packets_acked >= hc->tx_cwnd) { hc->tx_cwnd += 1; hc->tx_packets_acked = 0; } } /* * Adjust the local sequence window and the ack ratio to allow about * 5 times the number of packets in the network (RFC 4340 7.5.2) */ if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win) ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2); else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2) ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U); if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win) ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2); else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2) ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2); /* * FIXME: RTT is sampled several times per acknowledgment (for each * entry in the Ack Vector), instead of once per Ack (as in TCP SACK). * This causes the RTT to be over-estimated, since the older entries * in the Ack Vector have earlier sending times. * The cleanest solution is to not use the ccid2s_sent field at all * and instead use DCCP timestamps: requires changes in other places. */ ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent); } static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) { struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) { ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); return; } hc->tx_last_cong = ccid2_time_stamp; hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; hc->tx_ssthresh = max(hc->tx_cwnd, 2U); ccid2_check_l_ack_ratio(sk); } static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type, u8 option, u8 *optval, u8 optlen) { struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); switch (option) { case DCCPO_ACK_VECTOR_0: case DCCPO_ACK_VECTOR_1: return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen, option - DCCPO_ACK_VECTOR_0); } return 0; } static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) { struct dccp_sock *dp = dccp_sk(sk); struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); struct dccp_ackvec_parsed *avp; u64 ackno, seqno; struct ccid2_seq *seqp; int done = 0; unsigned int maxincr = 0; /* check reverse path congestion */ seqno = DCCP_SKB_CB(skb)->dccpd_seq; /* XXX this whole "algorithm" is broken. Need to fix it to keep track * of the seqnos of the dupacks so that rpseq and rpdupack are correct * -sorbo. */ /* need to bootstrap */ if (hc->tx_rpdupack == -1) { hc->tx_rpdupack = 0; hc->tx_rpseq = seqno; } else { /* check if packet is consecutive */ if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1) hc->tx_rpseq = seqno; /* it's a later packet */ else if (after48(seqno, hc->tx_rpseq)) { hc->tx_rpdupack++; /* check if we got enough dupacks */ if (hc->tx_rpdupack >= NUMDUPACK) { hc->tx_rpdupack = -1; /* XXX lame */ hc->tx_rpseq = 0; #ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__ /* * FIXME: Ack Congestion Control is broken; in * the current state instabilities occurred with * Ack Ratios greater than 1; causing hang-ups * and long RTO timeouts. This needs to be fixed * before opening up dynamic changes. -- gerrit */ ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); #endif } } } /* check forward path congestion */ if (dccp_packet_without_ack(skb)) return; /* still didn't send out new data packets */ if (hc->tx_seqh == hc->tx_seqt) goto done; ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; if (after48(ackno, hc->tx_high_ack)) hc->tx_high_ack = ackno; seqp = hc->tx_seqt; while (before48(seqp->ccid2s_seq, ackno)) { seqp = seqp->ccid2s_next; if (seqp == hc->tx_seqh) { seqp = hc->tx_seqh->ccid2s_prev; break; } } /* * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2 * packets per acknowledgement. Rounding up avoids that cwnd is not * advanced when Ack Ratio is 1 and gives a slight edge otherwise. */ if (hc->tx_cwnd < hc->tx_ssthresh) maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); /* go through all ack vectors */ list_for_each_entry(avp, &hc->tx_av_chunks, node) { /* go through this ack vector */ for (; avp->len--; avp->vec++) { u64 ackno_end_rl = SUB48(ackno, dccp_ackvec_runlen(avp->vec)); ccid2_pr_debug("ackvec %llu |%u,%u|\n", (unsigned long long)ackno, dccp_ackvec_state(avp->vec) >> 6, dccp_ackvec_runlen(avp->vec)); /* if the seqno we are analyzing is larger than the * current ackno, then move towards the tail of our * seqnos. */ while (after48(seqp->ccid2s_seq, ackno)) { if (seqp == hc->tx_seqt) { done = 1; break; } seqp = seqp->ccid2s_prev; } if (done) break; /* check all seqnos in the range of the vector * run length */ while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) { const u8 state = dccp_ackvec_state(avp->vec); /* new packet received or marked */ if (state != DCCPAV_NOT_RECEIVED && !seqp->ccid2s_acked) { if (state == DCCPAV_ECN_MARKED) ccid2_congestion_event(sk, seqp); else ccid2_new_ack(sk, seqp, &maxincr); seqp->ccid2s_acked = 1; ccid2_pr_debug("Got ack for %llu\n", (unsigned long long)seqp->ccid2s_seq); hc->tx_pipe--; } if (seqp == hc->tx_seqt) { done = 1; break; } seqp = seqp->ccid2s_prev; } if (done) break; ackno = SUB48(ackno_end_rl, 1); } if (done) break; } /* The state about what is acked should be correct now * Check for NUMDUPACK */ seqp = hc->tx_seqt; while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) { seqp = seqp->ccid2s_next; if (seqp == hc->tx_seqh) { seqp = hc->tx_seqh->ccid2s_prev; break; } } done = 0; while (1) { if (seqp->ccid2s_acked) { done++; if (done == NUMDUPACK) break; } if (seqp == hc->tx_seqt) break; seqp = seqp->ccid2s_prev; } /* If there are at least 3 acknowledgements, anything unacknowledged * below the last sequence number is considered lost */ if (done == NUMDUPACK) { struct ccid2_seq *last_acked = seqp; /* check for lost packets */ while (1) { if (!seqp->ccid2s_acked) { ccid2_pr_debug("Packet lost: %llu\n", (unsigned long long)seqp->ccid2s_seq); /* XXX need to traverse from tail -> head in * order to detect multiple congestion events in * one ack vector. */ ccid2_congestion_event(sk, seqp); hc->tx_pipe--; } if (seqp == hc->tx_seqt) break; seqp = seqp->ccid2s_prev; } hc->tx_seqt = last_acked; } /* trim acked packets in tail */ while (hc->tx_seqt != hc->tx_seqh) { if (!hc->tx_seqt->ccid2s_acked) break; hc->tx_seqt = hc->tx_seqt->ccid2s_next; } /* restart RTO timer if not all outstanding data has been acked */ if (hc->tx_pipe == 0) sk_stop_timer(sk, &hc->tx_rtotimer); else sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); done: /* check if incoming Acks allow pending packets to be sent */ if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); } static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) { struct ccid2_hc_tx_sock *hc = ccid_priv(ccid); struct dccp_sock *dp = dccp_sk(sk); u32 max_ratio; /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ hc->tx_ssthresh = ~0U; /* Use larger initial windows (RFC 4341, section 5). */ hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache); hc->tx_expected_wnd = hc->tx_cwnd; /* Make sure that Ack Ratio is enabled and within bounds. */ max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2); if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio) dp->dccps_l_ack_ratio = max_ratio; /* XXX init ~ to window size... */ if (ccid2_hc_tx_alloc_seq(hc)) return -ENOMEM; hc->tx_rto = DCCP_TIMEOUT_INIT; hc->tx_rpdupack = -1; hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_time_stamp; hc->tx_cwnd_used = 0; setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, (unsigned long)sk); INIT_LIST_HEAD(&hc->tx_av_chunks); return 0; } static void ccid2_hc_tx_exit(struct sock *sk) { struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); int i; sk_stop_timer(sk, &hc->tx_rtotimer); for (i = 0; i < hc->tx_seqbufc; i++) kfree(hc->tx_seqbuf[i]); hc->tx_seqbufc = 0; } static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) { struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk); if (!dccp_data_packet(skb)) return; if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) { dccp_send_ack(sk); hc->rx_num_data_pkts = 0; } } struct ccid_operations ccid2_ops = { .ccid_id = DCCPC_CCID2, .ccid_name = "TCP-like", .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), .ccid_hc_tx_init = ccid2_hc_tx_init, .ccid_hc_tx_exit = ccid2_hc_tx_exit, .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet, .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent, .ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options, .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv, .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock), .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, }; #ifdef CONFIG_IP_DCCP_CCID2_DEBUG module_param(ccid2_debug, bool, 0644); MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages"); #endif
gpl-2.0
jpihet/linux-omap
drivers/infiniband/hw/mthca/mthca_mr.c
9953
23989
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include <linux/errno.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_memfree.h" struct mthca_mtt { struct mthca_buddy *buddy; int order; u32 first_seg; }; /* * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ struct mthca_mpt_entry { __be32 flags; __be32 page_size; __be32 key; __be32 pd; __be64 start; __be64 length; __be32 lkey; __be32 window_count; __be32 window_count_limit; __be64 mtt_seg; __be32 mtt_sz; /* Arbel only */ u32 reserved[2]; } __attribute__((packed)); #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) #define MTHCA_MPT_FLAG_MIO (1 << 17) #define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15) #define MTHCA_MPT_FLAG_PHYSICAL (1 << 9) #define MTHCA_MPT_FLAG_REGION (1 << 8) #define MTHCA_MTT_FLAG_PRESENT 1 #define MTHCA_MPT_STATUS_SW 0xF0 #define MTHCA_MPT_STATUS_HW 0x00 #define SINAI_FMR_KEY_INC 0x1000000 /* * Buddy allocator for MTT segments (currently not very efficient * since it doesn't keep a free list and just searches linearly * through the bitmaps) */ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) { int o; int m; u32 seg; spin_lock(&buddy->lock); for (o = order; o <= buddy->max_order; ++o) if (buddy->num_free[o]) { m = 1 << (buddy->max_order - o); seg = find_first_bit(buddy->bits[o], m); if (seg < m) goto found; } spin_unlock(&buddy->lock); return -1; found: clear_bit(seg, buddy->bits[o]); --buddy->num_free[o]; while (o > order) { --o; seg <<= 1; set_bit(seg ^ 1, buddy->bits[o]); ++buddy->num_free[o]; } spin_unlock(&buddy->lock); seg <<= order; return seg; } static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) { seg >>= order; spin_lock(&buddy->lock); while (test_bit(seg ^ 1, buddy->bits[order])) { clear_bit(seg ^ 1, buddy->bits[order]); --buddy->num_free[order]; seg >>= 1; ++order; } set_bit(seg, buddy->bits[order]); ++buddy->num_free[order]; spin_unlock(&buddy->lock); } static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) { int i, s; buddy->max_order = max_order; spin_lock_init(&buddy->lock); buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), GFP_KERNEL); buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, GFP_KERNEL); if (!buddy->bits || !buddy->num_free) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { s = BITS_TO_LONGS(1 << (buddy->max_order - i)); buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); if (!buddy->bits[i]) goto err_out_free; bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); } set_bit(0, buddy->bits[buddy->max_order]); buddy->num_free[buddy->max_order] = 1; return 0; err_out_free: for (i = 0; i <= buddy->max_order; ++i) kfree(buddy->bits[i]); err_out: kfree(buddy->bits); kfree(buddy->num_free); return -ENOMEM; } static void mthca_buddy_cleanup(struct mthca_buddy *buddy) { int i; for (i = 0; i <= buddy->max_order; ++i) kfree(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); } static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order, struct mthca_buddy *buddy) { u32 seg = mthca_buddy_alloc(buddy, order); if (seg == -1) return -1; if (mthca_is_memfree(dev)) if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg, seg + (1 << order) - 1)) { mthca_buddy_free(buddy, seg, order); seg = -1; } return seg; } static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size, struct mthca_buddy *buddy) { struct mthca_mtt *mtt; int i; if (size <= 0) return ERR_PTR(-EINVAL); mtt = kmalloc(sizeof *mtt, GFP_KERNEL); if (!mtt) return ERR_PTR(-ENOMEM); mtt->buddy = buddy; mtt->order = 0; for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1) ++mtt->order; mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); if (mtt->first_seg == -1) { kfree(mtt); return ERR_PTR(-ENOMEM); } return mtt; } struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size) { return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy); } void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt) { if (!mtt) return; mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order); mthca_table_put_range(dev, dev->mr_table.mtt_table, mtt->first_seg, mtt->first_seg + (1 << mtt->order) - 1); kfree(mtt); } static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) { struct mthca_mailbox *mailbox; __be64 *mtt_entry; int err = 0; int i; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mtt_entry = mailbox->buf; while (list_len > 0) { mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size + start_index * 8); mtt_entry[1] = 0; for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT); /* * If we have an odd number of entries to write, add * one more dummy entry for firmware efficiency. */ if (i & 1) mtt_entry[i + 2] = 0; err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1); if (err) { mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); goto out; } list_len -= i; start_index += i; buffer_list += i; } out: mthca_free_mailbox(dev, mailbox); return err; } int mthca_write_mtt_size(struct mthca_dev *dev) { if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy || !(dev->mthca_flags & MTHCA_FLAG_FMR)) /* * Be friendly to WRITE_MTT command * and leave two empty slots for the * index and reserved fields of the * mailbox. */ return PAGE_SIZE / sizeof (u64) - 2; /* For Arbel, all MTTs must fit in the same page. */ return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff; } static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) { u64 __iomem *mtts; int i; mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size + start_index * sizeof (u64); for (i = 0; i < list_len; ++i) mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT), mtts + i); } static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) { __be64 *mtts; dma_addr_t dma_handle; int i; int s = start_index * sizeof (u64); /* For Arbel, all MTTs must fit in the same page. */ BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE); /* Require full segments */ BUG_ON(s % dev->limits.mtt_seg_size); mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg + s / dev->limits.mtt_seg_size, &dma_handle); BUG_ON(!mtts); dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE); for (i = 0; i < list_len; ++i) mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT); dma_sync_single_for_device(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE); } int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) { int size = mthca_write_mtt_size(dev); int chunk; if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy || !(dev->mthca_flags & MTHCA_FLAG_FMR)) return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len); while (list_len > 0) { chunk = min(size, list_len); if (mthca_is_memfree(dev)) mthca_arbel_write_mtt_seg(dev, mtt, start_index, buffer_list, chunk); else mthca_tavor_write_mtt_seg(dev, mtt, start_index, buffer_list, chunk); list_len -= chunk; start_index += chunk; buffer_list += chunk; } return 0; } static inline u32 tavor_hw_index_to_key(u32 ind) { return ind; } static inline u32 tavor_key_to_hw_index(u32 key) { return key; } static inline u32 arbel_hw_index_to_key(u32 ind) { return (ind >> 24) | (ind << 8); } static inline u32 arbel_key_to_hw_index(u32 key) { return (key << 24) | (key >> 8); } static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind) { if (mthca_is_memfree(dev)) return arbel_hw_index_to_key(ind); else return tavor_hw_index_to_key(ind); } static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key) { if (mthca_is_memfree(dev)) return arbel_key_to_hw_index(key); else return tavor_key_to_hw_index(key); } static inline u32 adjust_key(struct mthca_dev *dev, u32 key) { if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) return ((key << 20) & 0x800000) | (key & 0x7fffff); else return key; } int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) { struct mthca_mailbox *mailbox; struct mthca_mpt_entry *mpt_entry; u32 key; int i; int err; WARN_ON(buffer_size_shift >= 32); key = mthca_alloc(&dev->mr_table.mpt_alloc); if (key == -1) return -ENOMEM; key = adjust_key(dev, key); mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); if (mthca_is_memfree(dev)) { err = mthca_table_get(dev, dev->mr_table.mpt_table, key); if (err) goto err_out_mpt_free; } mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_out_table; } mpt_entry = mailbox->buf; mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | MTHCA_MPT_FLAG_MIO | MTHCA_MPT_FLAG_REGION | access); if (!mr->mtt) mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL); mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12); mpt_entry->key = cpu_to_be32(key); mpt_entry->pd = cpu_to_be32(pd); mpt_entry->start = cpu_to_be64(iova); mpt_entry->length = cpu_to_be64(total_size); memset(&mpt_entry->lkey, 0, sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey)); if (mr->mtt) mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mr->mtt->first_seg * dev->limits.mtt_seg_size); if (0) { mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { if (i % 4 == 0) printk("[%02x] ", i * 4); printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); if ((i + 1) % 4 == 0) printk("\n"); } } err = mthca_SW2HW_MPT(dev, mailbox, key & (dev->limits.num_mpts - 1)); if (err) { mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); goto err_out_mailbox; } mthca_free_mailbox(dev, mailbox); return err; err_out_mailbox: mthca_free_mailbox(dev, mailbox); err_out_table: mthca_table_put(dev, dev->mr_table.mpt_table, key); err_out_mpt_free: mthca_free(&dev->mr_table.mpt_alloc, key); return err; } int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, u32 access, struct mthca_mr *mr) { mr->mtt = NULL; return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr); } int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, u64 *buffer_list, int buffer_size_shift, int list_len, u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) { int err; mr->mtt = mthca_alloc_mtt(dev, list_len); if (IS_ERR(mr->mtt)) return PTR_ERR(mr->mtt); err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len); if (err) { mthca_free_mtt(dev, mr->mtt); return err; } err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova, total_size, access, mr); if (err) mthca_free_mtt(dev, mr->mtt); return err; } /* Free mr or fmr */ static void mthca_free_region(struct mthca_dev *dev, u32 lkey) { mthca_table_put(dev, dev->mr_table.mpt_table, key_to_hw_index(dev, lkey)); mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); } void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr) { int err; err = mthca_HW2SW_MPT(dev, NULL, key_to_hw_index(dev, mr->ibmr.lkey) & (dev->limits.num_mpts - 1)); if (err) mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err); mthca_free_region(dev, mr->ibmr.lkey); mthca_free_mtt(dev, mr->mtt); } int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, u32 access, struct mthca_fmr *mr) { struct mthca_mpt_entry *mpt_entry; struct mthca_mailbox *mailbox; u64 mtt_seg; u32 key, idx; int list_len = mr->attr.max_pages; int err = -ENOMEM; int i; if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32) return -EINVAL; /* For Arbel, all MTTs must fit in the same page. */ if (mthca_is_memfree(dev) && mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE) return -EINVAL; mr->maps = 0; key = mthca_alloc(&dev->mr_table.mpt_alloc); if (key == -1) return -ENOMEM; key = adjust_key(dev, key); idx = key & (dev->limits.num_mpts - 1); mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); if (mthca_is_memfree(dev)) { err = mthca_table_get(dev, dev->mr_table.mpt_table, key); if (err) goto err_out_mpt_free; mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key, NULL); BUG_ON(!mr->mem.arbel.mpt); } else mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base + sizeof *(mr->mem.tavor.mpt) * idx; mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy); if (IS_ERR(mr->mtt)) { err = PTR_ERR(mr->mtt); goto err_out_table; } mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size; if (mthca_is_memfree(dev)) { mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table, mr->mtt->first_seg, &mr->mem.arbel.dma_handle); BUG_ON(!mr->mem.arbel.mtts); } else mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_out_free_mtt; } mpt_entry = mailbox->buf; mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | MTHCA_MPT_FLAG_MIO | MTHCA_MPT_FLAG_REGION | access); mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12); mpt_entry->key = cpu_to_be32(key); mpt_entry->pd = cpu_to_be32(pd); memset(&mpt_entry->start, 0, sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start)); mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg); if (0) { mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { if (i % 4 == 0) printk("[%02x] ", i * 4); printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); if ((i + 1) % 4 == 0) printk("\n"); } } err = mthca_SW2HW_MPT(dev, mailbox, key & (dev->limits.num_mpts - 1)); if (err) { mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); goto err_out_mailbox_free; } mthca_free_mailbox(dev, mailbox); return 0; err_out_mailbox_free: mthca_free_mailbox(dev, mailbox); err_out_free_mtt: mthca_free_mtt(dev, mr->mtt); err_out_table: mthca_table_put(dev, dev->mr_table.mpt_table, key); err_out_mpt_free: mthca_free(&dev->mr_table.mpt_alloc, key); return err; } int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr) { if (fmr->maps) return -EBUSY; mthca_free_region(dev, fmr->ibmr.lkey); mthca_free_mtt(dev, fmr->mtt); return 0; } static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, int list_len, u64 iova) { int i, page_mask; if (list_len > fmr->attr.max_pages) return -EINVAL; page_mask = (1 << fmr->attr.page_shift) - 1; /* We are getting page lists, so va must be page aligned. */ if (iova & page_mask) return -EINVAL; /* Trust the user not to pass misaligned data in page_list */ if (0) for (i = 0; i < list_len; ++i) { if (page_list[i] & ~page_mask) return -EINVAL; } if (fmr->maps >= fmr->attr.max_maps) return -EINVAL; return 0; } int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int list_len, u64 iova) { struct mthca_fmr *fmr = to_mfmr(ibfmr); struct mthca_dev *dev = to_mdev(ibfmr->device); struct mthca_mpt_entry mpt_entry; u32 key; int i, err; err = mthca_check_fmr(fmr, page_list, list_len, iova); if (err) return err; ++fmr->maps; key = tavor_key_to_hw_index(fmr->ibmr.lkey); key += dev->limits.num_mpts; fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key); writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt); for (i = 0; i < list_len; ++i) { __be64 mtt_entry = cpu_to_be64(page_list[i] | MTHCA_MTT_FLAG_PRESENT); mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i); } mpt_entry.lkey = cpu_to_be32(key); mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift)); mpt_entry.start = cpu_to_be64(iova); __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key); memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, offsetof(struct mthca_mpt_entry, window_count) - offsetof(struct mthca_mpt_entry, start)); writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt); return 0; } int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int list_len, u64 iova) { struct mthca_fmr *fmr = to_mfmr(ibfmr); struct mthca_dev *dev = to_mdev(ibfmr->device); u32 key; int i, err; err = mthca_check_fmr(fmr, page_list, list_len, iova); if (err) return err; ++fmr->maps; key = arbel_key_to_hw_index(fmr->ibmr.lkey); if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) key += SINAI_FMR_KEY_INC; else key += dev->limits.num_mpts; fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key); *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; wmb(); dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle, list_len * sizeof(u64), DMA_TO_DEVICE); for (i = 0; i < list_len; ++i) fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | MTHCA_MTT_FLAG_PRESENT); dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle, list_len * sizeof(u64), DMA_TO_DEVICE); fmr->mem.arbel.mpt->key = cpu_to_be32(key); fmr->mem.arbel.mpt->lkey = cpu_to_be32(key); fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift)); fmr->mem.arbel.mpt->start = cpu_to_be64(iova); wmb(); *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW; wmb(); return 0; } void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) { if (!fmr->maps) return; fmr->maps = 0; writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt); } void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) { if (!fmr->maps) return; fmr->maps = 0; *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; } int mthca_init_mr_table(struct mthca_dev *dev) { phys_addr_t addr; int mpts, mtts, err, i; err = mthca_alloc_init(&dev->mr_table.mpt_alloc, dev->limits.num_mpts, ~0, dev->limits.reserved_mrws); if (err) return err; if (!mthca_is_memfree(dev) && (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) dev->limits.fmr_reserved_mtts = 0; else dev->mthca_flags |= MTHCA_FLAG_FMR; if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) mthca_dbg(dev, "Memory key throughput optimization activated.\n"); err = mthca_buddy_init(&dev->mr_table.mtt_buddy, fls(dev->limits.num_mtt_segs - 1)); if (err) goto err_mtt_buddy; dev->mr_table.tavor_fmr.mpt_base = NULL; dev->mr_table.tavor_fmr.mtt_base = NULL; if (dev->limits.fmr_reserved_mtts) { i = fls(dev->limits.fmr_reserved_mtts - 1); if (i >= 31) { mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n"); err = -EINVAL; goto err_fmr_mpt; } mpts = mtts = 1 << i; } else { mtts = dev->limits.num_mtt_segs; mpts = dev->limits.num_mpts; } if (!mthca_is_memfree(dev) && (dev->mthca_flags & MTHCA_FLAG_FMR)) { addr = pci_resource_start(dev->pdev, 4) + ((pci_resource_len(dev->pdev, 4) - 1) & dev->mr_table.mpt_base); dev->mr_table.tavor_fmr.mpt_base = ioremap(addr, mpts * sizeof(struct mthca_mpt_entry)); if (!dev->mr_table.tavor_fmr.mpt_base) { mthca_warn(dev, "MPT ioremap for FMR failed.\n"); err = -ENOMEM; goto err_fmr_mpt; } addr = pci_resource_start(dev->pdev, 4) + ((pci_resource_len(dev->pdev, 4) - 1) & dev->mr_table.mtt_base); dev->mr_table.tavor_fmr.mtt_base = ioremap(addr, mtts * dev->limits.mtt_seg_size); if (!dev->mr_table.tavor_fmr.mtt_base) { mthca_warn(dev, "MTT ioremap for FMR failed.\n"); err = -ENOMEM; goto err_fmr_mtt; } } if (dev->limits.fmr_reserved_mtts) { err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, fls(mtts - 1)); if (err) goto err_fmr_mtt_buddy; /* Prevent regular MRs from using FMR keys */ err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, fls(mtts - 1)); if (err) goto err_reserve_fmr; dev->mr_table.fmr_mtt_buddy = &dev->mr_table.tavor_fmr.mtt_buddy; } else dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy; /* FMR table is always the first, take reserved MTTs out of there */ if (dev->limits.reserved_mtts) { i = fls(dev->limits.reserved_mtts - 1); if (mthca_alloc_mtt_range(dev, i, dev->mr_table.fmr_mtt_buddy) == -1) { mthca_warn(dev, "MTT table of order %d is too small.\n", dev->mr_table.fmr_mtt_buddy->max_order); err = -ENOMEM; goto err_reserve_mtts; } } return 0; err_reserve_mtts: err_reserve_fmr: if (dev->limits.fmr_reserved_mtts) mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); err_fmr_mtt_buddy: if (dev->mr_table.tavor_fmr.mtt_base) iounmap(dev->mr_table.tavor_fmr.mtt_base); err_fmr_mtt: if (dev->mr_table.tavor_fmr.mpt_base) iounmap(dev->mr_table.tavor_fmr.mpt_base); err_fmr_mpt: mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); err_mtt_buddy: mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); return err; } void mthca_cleanup_mr_table(struct mthca_dev *dev) { /* XXX check if any MRs are still allocated? */ if (dev->limits.fmr_reserved_mtts) mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); if (dev->mr_table.tavor_fmr.mtt_base) iounmap(dev->mr_table.tavor_fmr.mtt_base); if (dev->mr_table.tavor_fmr.mpt_base) iounmap(dev->mr_table.tavor_fmr.mpt_base); mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); }
gpl-2.0
Evil-Green/Lonas_KL-GT-I9300
net/irda/irlan/irlan_client_event.c
13281
13511
/********************************************************************* * * Filename: irlan_client_event.c * Version: 0.9 * Description: IrLAN client state machine * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sun Aug 31 20:14:37 1997 * Modified at: Sun Dec 26 21:52:24 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/skbuff.h> #include <net/irda/irda.h> #include <net/irda/timer.h> #include <net/irda/irmod.h> #include <net/irda/iriap.h> #include <net/irda/irlmp.h> #include <net/irda/irttp.h> #include <net/irda/irlan_common.h> #include <net/irda/irlan_client.h> #include <net/irda/irlan_event.h> static int irlan_client_state_idle (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_conn (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_info (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_open (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_wait (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_arb (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_data (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_sync (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int (*state[])(struct irlan_cb *, IRLAN_EVENT event, struct sk_buff *) = { irlan_client_state_idle, irlan_client_state_query, irlan_client_state_conn, irlan_client_state_info, irlan_client_state_media, irlan_client_state_open, irlan_client_state_wait, irlan_client_state_arb, irlan_client_state_data, irlan_client_state_close, irlan_client_state_sync }; void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); (*state[ self->client.state]) (self, event, skb); } /* * Function irlan_client_state_idle (event, skb, info) * * IDLE, We are waiting for an indication that there is a provider * available. */ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); switch (event) { case IRLAN_DISCOVERY_INDICATION: if (self->client.iriap) { IRDA_WARNING("%s(), busy with a previous query\n", __func__); return -EBUSY; } self->client.iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irlan_client_get_value_confirm); /* Get some values from peer IAS */ irlan_next_client_state(self, IRLAN_QUERY); iriap_getvaluebyclass_request(self->client.iriap, self->saddr, self->daddr, "IrLAN", "IrDA:TinyTP:LsapSel"); break; case IRLAN_WATCHDOG_TIMEOUT: IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); break; default: IRDA_DEBUG(4, "%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_query (event, skb, info) * * QUERY, We have queryed the remote IAS and is ready to connect * to provider, just waiting for the confirm. * */ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); switch(event) { case IRLAN_IAS_PROVIDER_AVAIL: IRDA_ASSERT(self->dtsap_sel_ctrl != 0, return -1;); self->client.open_retries = 0; irttp_connect_request(self->client.tsap_ctrl, self->dtsap_sel_ctrl, self->saddr, self->daddr, NULL, IRLAN_MTU, NULL); irlan_next_client_state(self, IRLAN_CONN); break; case IRLAN_IAS_PROVIDER_NOT_AVAIL: IRDA_DEBUG(2, "%s(), IAS_PROVIDER_NOT_AVAIL\n", __func__ ); irlan_next_client_state(self, IRLAN_IDLE); /* Give the client a kick! */ if ((self->provider.access_type == ACCESS_PEER) && (self->provider.state != IRLAN_IDLE)) irlan_client_wakeup(self, self->saddr, self->daddr); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); break; default: IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_conn (event, skb, info) * * CONN, We have connected to a provider but has not issued any * commands yet. * */ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); switch (event) { case IRLAN_CONNECT_COMPLETE: /* Send getinfo cmd */ irlan_get_provider_info(self); irlan_next_client_state(self, IRLAN_INFO); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); break; default: IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_info (self, event, skb, info) * * INFO, We have issued a GetInfo command and is awaiting a reply. */ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); switch (event) { case IRLAN_DATA_INDICATION: IRDA_ASSERT(skb != NULL, return -1;); irlan_client_parse_response(self, skb); irlan_next_client_state(self, IRLAN_MEDIA); irlan_get_media_char(self); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); break; default: IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_media (self, event, skb, info) * * MEDIA, The irlan_client has issued a GetMedia command and is awaiting a * reply. * */ static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); switch(event) { case IRLAN_DATA_INDICATION: irlan_client_parse_response(self, skb); irlan_open_data_channel(self); irlan_next_client_state(self, IRLAN_OPEN); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); break; default: IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_open (self, event, skb, info) * * OPEN, The irlan_client has issued a OpenData command and is awaiting a * reply * */ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { struct qos_info qos; IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); switch(event) { case IRLAN_DATA_INDICATION: irlan_client_parse_response(self, skb); /* * Check if we have got the remote TSAP for data * communications */ IRDA_ASSERT(self->dtsap_sel_data != 0, return -1;); /* Check which access type we are dealing with */ switch (self->client.access_type) { case ACCESS_PEER: if (self->provider.state == IRLAN_OPEN) { irlan_next_client_state(self, IRLAN_ARB); irlan_do_client_event(self, IRLAN_CHECK_CON_ARB, NULL); } else { irlan_next_client_state(self, IRLAN_WAIT); } break; case ACCESS_DIRECT: case ACCESS_HOSTED: qos.link_disc_time.bits = 0x01; /* 3 secs */ irttp_connect_request(self->tsap_data, self->dtsap_sel_data, self->saddr, self->daddr, &qos, IRLAN_MTU, NULL); irlan_next_client_state(self, IRLAN_DATA); break; default: IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ ); break; } break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); break; default: IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_wait (self, event, skb, info) * * WAIT, The irlan_client is waiting for the local provider to enter the * provider OPEN state. * */ static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); switch(event) { case IRLAN_PROVIDER_SIGNAL: irlan_next_client_state(self, IRLAN_ARB); irlan_do_client_event(self, IRLAN_CHECK_CON_ARB, NULL); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); break; default: IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { struct qos_info qos; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); switch(event) { case IRLAN_CHECK_CON_ARB: if (self->client.recv_arb_val == self->provider.send_arb_val) { irlan_next_client_state(self, IRLAN_CLOSE); irlan_close_data_channel(self); } else if (self->client.recv_arb_val < self->provider.send_arb_val) { qos.link_disc_time.bits = 0x01; /* 3 secs */ irlan_next_client_state(self, IRLAN_DATA); irttp_connect_request(self->tsap_data, self->dtsap_sel_data, self->saddr, self->daddr, &qos, IRLAN_MTU, NULL); } else if (self->client.recv_arb_val > self->provider.send_arb_val) { IRDA_DEBUG(2, "%s(), lost the battle :-(\n", __func__ ); } break; case IRLAN_DATA_CONNECT_INDICATION: irlan_next_client_state(self, IRLAN_DATA); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); break; default: IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_data (self, event, skb, info) * * DATA, The data channel is connected, allowing data transfers between * the local and remote machines. * */ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); switch(event) { case IRLAN_DATA_INDICATION: irlan_client_parse_response(self, skb); break; case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */ case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; default: IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_close (self, event, skb, info) * * * */ static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_DEBUG(2, "%s()\n", __func__ ); if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_sync (self, event, skb, info) * * * */ static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_DEBUG(2, "%s()\n", __func__ ); if (skb) dev_kfree_skb(skb); return 0; }
gpl-2.0
imasahiro/gccjs
gcc/testsuite/gcc.c-torture/execute/lshrdi-1.c
226
4538
#include <limits.h> extern void abort(void); extern void exit(int); #if __LONG_LONG_MAX__ == 9223372036854775807LL #define BITS 64 static unsigned long long const zext[64] = { 0x87654321fedcba90ULL, 0x43b2a190ff6e5d48ULL, 0x21d950c87fb72ea4ULL, 0x10eca8643fdb9752ULL, 0x87654321fedcba9ULL, 0x43b2a190ff6e5d4ULL, 0x21d950c87fb72eaULL, 0x10eca8643fdb975ULL, 0x87654321fedcbaULL, 0x43b2a190ff6e5dULL, 0x21d950c87fb72eULL, 0x10eca8643fdb97ULL, 0x87654321fedcbULL, 0x43b2a190ff6e5ULL, 0x21d950c87fb72ULL, 0x10eca8643fdb9ULL, 0x87654321fedcULL, 0x43b2a190ff6eULL, 0x21d950c87fb7ULL, 0x10eca8643fdbULL, 0x87654321fedULL, 0x43b2a190ff6ULL, 0x21d950c87fbULL, 0x10eca8643fdULL, 0x87654321feULL, 0x43b2a190ffULL, 0x21d950c87fULL, 0x10eca8643fULL, 0x87654321fULL, 0x43b2a190fULL, 0x21d950c87ULL, 0x10eca8643ULL, 0x87654321ULL, 0x43b2a190ULL, 0x21d950c8ULL, 0x10eca864ULL, 0x8765432ULL, 0x43b2a19ULL, 0x21d950cULL, 0x10eca86ULL, 0x876543ULL, 0x43b2a1ULL, 0x21d950ULL, 0x10eca8ULL, 0x87654ULL, 0x43b2aULL, 0x21d95ULL, 0x10ecaULL, 0x8765ULL, 0x43b2ULL, 0x21d9ULL, 0x10ecULL, 0x876ULL, 0x43bULL, 0x21dULL, 0x10eULL, 0x87ULL, 0x43ULL, 0x21ULL, 0x10ULL, 0x8ULL, 0x4ULL, 0x2ULL, 0x1ULL }; #elif __LONG_LONG_MAX__ == 2147483647LL #define BITS 32 static unsigned long long const zext[32] = { 0x87654321ULL, 0x43b2a190ULL, 0x21d950c8ULL, 0x10eca864ULL, 0x8765432ULL, 0x43b2a19ULL, 0x21d950cULL, 0x10eca86ULL, 0x876543ULL, 0x43b2a1ULL, 0x21d950ULL, 0x10eca8ULL, 0x87654ULL, 0x43b2aULL, 0x21d95ULL, 0x10ecaULL, 0x8765ULL, 0x43b2ULL, 0x21d9ULL, 0x10ecULL, 0x876ULL, 0x43bULL, 0x21dULL, 0x10eULL, 0x87ULL, 0x43ULL, 0x21ULL, 0x10ULL, 0x8ULL, 0x4ULL, 0x2ULL, 0x1ULL, }; #else #error "Update the test case." #endif static unsigned long long variable_shift(unsigned long long x, int i) { return x >> i; } static unsigned long long constant_shift(unsigned long long x, int i) { switch (i) { case 0: x = x >> 0; break; case 1: x = x >> 1; break; case 2: x = x >> 2; break; case 3: x = x >> 3; break; case 4: x = x >> 4; break; case 5: x = x >> 5; break; case 6: x = x >> 6; break; case 7: x = x >> 7; break; case 8: x = x >> 8; break; case 9: x = x >> 9; break; case 10: x = x >> 10; break; case 11: x = x >> 11; break; case 12: x = x >> 12; break; case 13: x = x >> 13; break; case 14: x = x >> 14; break; case 15: x = x >> 15; break; case 16: x = x >> 16; break; case 17: x = x >> 17; break; case 18: x = x >> 18; break; case 19: x = x >> 19; break; case 20: x = x >> 20; break; case 21: x = x >> 21; break; case 22: x = x >> 22; break; case 23: x = x >> 23; break; case 24: x = x >> 24; break; case 25: x = x >> 25; break; case 26: x = x >> 26; break; case 27: x = x >> 27; break; case 28: x = x >> 28; break; case 29: x = x >> 29; break; case 30: x = x >> 30; break; case 31: x = x >> 31; break; #if BITS > 32 case 32: x = x >> 32; break; case 33: x = x >> 33; break; case 34: x = x >> 34; break; case 35: x = x >> 35; break; case 36: x = x >> 36; break; case 37: x = x >> 37; break; case 38: x = x >> 38; break; case 39: x = x >> 39; break; case 40: x = x >> 40; break; case 41: x = x >> 41; break; case 42: x = x >> 42; break; case 43: x = x >> 43; break; case 44: x = x >> 44; break; case 45: x = x >> 45; break; case 46: x = x >> 46; break; case 47: x = x >> 47; break; case 48: x = x >> 48; break; case 49: x = x >> 49; break; case 50: x = x >> 50; break; case 51: x = x >> 51; break; case 52: x = x >> 52; break; case 53: x = x >> 53; break; case 54: x = x >> 54; break; case 55: x = x >> 55; break; case 56: x = x >> 56; break; case 57: x = x >> 57; break; case 58: x = x >> 58; break; case 59: x = x >> 59; break; case 60: x = x >> 60; break; case 61: x = x >> 61; break; case 62: x = x >> 62; break; case 63: x = x >> 63; break; #endif default: abort (); } return x; } int main() { int i; for (i = 0; i < BITS; ++i) { unsigned long long y = variable_shift (zext[0], i); if (y != zext[i]) abort (); } for (i = 0; i < BITS; ++i) { unsigned long long y = constant_shift (zext[0], i); if (y != zext[i]) abort (); } exit (0); }
gpl-2.0
qnhoang81/Kernel_POC
sound/usb/usx2y/usX2Yhwdep.c
482
7508
/* * Driver for Tascam US-X2Y USB soundcards * * FPGA Loader + ALSA Startup * * Copyright (c) 2003 by Karsten Wiese <annabellesgarden@yahoo.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/interrupt.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/memalloc.h> #include <sound/pcm.h> #include <sound/hwdep.h> #include "usx2y.h" #include "usbusx2y.h" #include "usX2Yhwdep.h" static int snd_us428ctls_vm_fault(struct vm_area_struct *area, struct vm_fault *vmf) { unsigned long offset; struct page * page; void *vaddr; snd_printdd("ENTER, start %lXh, pgoff %ld\n", area->vm_start, vmf->pgoff); offset = vmf->pgoff << PAGE_SHIFT; vaddr = (char*)((struct usX2Ydev *)area->vm_private_data)->us428ctls_sharedmem + offset; page = virt_to_page(vaddr); get_page(page); vmf->page = page; snd_printdd("vaddr=%p made us428ctls_vm_fault() page %p\n", vaddr, page); return 0; } static const struct vm_operations_struct us428ctls_vm_ops = { .fault = snd_us428ctls_vm_fault, }; static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct vm_area_struct *area) { unsigned long size = (unsigned long)(area->vm_end - area->vm_start); struct usX2Ydev *us428 = hw->private_data; // FIXME this hwdep interface is used twice: fpga download and mmap for controlling Lights etc. Maybe better using 2 hwdep devs? // so as long as the device isn't fully initialised yet we return -EBUSY here. if (!(us428->chip_status & USX2Y_STAT_CHIP_INIT)) return -EBUSY; /* if userspace tries to mmap beyond end of our buffer, fail */ if (size > PAGE_ALIGN(sizeof(struct us428ctls_sharedmem))) { snd_printd( "%lu > %lu\n", size, (unsigned long)sizeof(struct us428ctls_sharedmem)); return -EINVAL; } if (!us428->us428ctls_sharedmem) { init_waitqueue_head(&us428->us428ctls_wait_queue_head); if(!(us428->us428ctls_sharedmem = snd_malloc_pages(sizeof(struct us428ctls_sharedmem), GFP_KERNEL))) return -ENOMEM; memset(us428->us428ctls_sharedmem, -1, sizeof(struct us428ctls_sharedmem)); us428->us428ctls_sharedmem->CtlSnapShotLast = -2; } area->vm_ops = &us428ctls_vm_ops; area->vm_flags |= VM_RESERVED | VM_DONTEXPAND; area->vm_private_data = hw->private_data; return 0; } static unsigned int snd_us428ctls_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait) { unsigned int mask = 0; struct usX2Ydev *us428 = hw->private_data; struct us428ctls_sharedmem *shm = us428->us428ctls_sharedmem; if (us428->chip_status & USX2Y_STAT_CHIP_HUP) return POLLHUP; poll_wait(file, &us428->us428ctls_wait_queue_head, wait); if (shm != NULL && shm->CtlSnapShotLast != shm->CtlSnapShotRed) mask |= POLLIN; return mask; } static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw, struct snd_hwdep_dsp_status *info) { static char *type_ids[USX2Y_TYPE_NUMS] = { [USX2Y_TYPE_122] = "us122", [USX2Y_TYPE_224] = "us224", [USX2Y_TYPE_428] = "us428", }; struct usX2Ydev *us428 = hw->private_data; int id = -1; switch (le16_to_cpu(us428->chip.dev->descriptor.idProduct)) { case USB_ID_US122: id = USX2Y_TYPE_122; break; case USB_ID_US224: id = USX2Y_TYPE_224; break; case USB_ID_US428: id = USX2Y_TYPE_428; break; } if (0 > id) return -ENODEV; strcpy(info->id, type_ids[id]); info->num_dsps = 2; // 0: Prepad Data, 1: FPGA Code if (us428->chip_status & USX2Y_STAT_CHIP_INIT) info->chip_ready = 1; info->version = USX2Y_DRIVER_VERSION; return 0; } static int usX2Y_create_usbmidi(struct snd_card *card) { static struct snd_usb_midi_endpoint_info quirk_data_1 = { .out_ep = 0x06, .in_ep = 0x06, .out_cables = 0x001, .in_cables = 0x001 }; static struct snd_usb_audio_quirk quirk_1 = { .vendor_name = "TASCAM", .product_name = NAME_ALLCAPS, .ifnum = 0, .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &quirk_data_1 }; static struct snd_usb_midi_endpoint_info quirk_data_2 = { .out_ep = 0x06, .in_ep = 0x06, .out_cables = 0x003, .in_cables = 0x003 }; static struct snd_usb_audio_quirk quirk_2 = { .vendor_name = "TASCAM", .product_name = "US428", .ifnum = 0, .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &quirk_data_2 }; struct usb_device *dev = usX2Y(card)->chip.dev; struct usb_interface *iface = usb_ifnum_to_if(dev, 0); struct snd_usb_audio_quirk *quirk = le16_to_cpu(dev->descriptor.idProduct) == USB_ID_US428 ? &quirk_2 : &quirk_1; snd_printdd("usX2Y_create_usbmidi \n"); return snd_usb_create_midi_interface(&usX2Y(card)->chip, iface, quirk); } static int usX2Y_create_alsa_devices(struct snd_card *card) { int err; do { if ((err = usX2Y_create_usbmidi(card)) < 0) { snd_printk(KERN_ERR "usX2Y_create_alsa_devices: usX2Y_create_usbmidi error %i \n", err); break; } if ((err = usX2Y_audio_create(card)) < 0) break; if ((err = usX2Y_hwdep_pcm_new(card)) < 0) break; if ((err = snd_card_register(card)) < 0) break; } while (0); return err; } static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw, struct snd_hwdep_dsp_image *dsp) { struct usX2Ydev *priv = hw->private_data; int lret, err = -EINVAL; snd_printdd( "dsp_load %s\n", dsp->name); if (access_ok(VERIFY_READ, dsp->image, dsp->length)) { struct usb_device* dev = priv->chip.dev; char *buf; buf = memdup_user(dsp->image, dsp->length); if (IS_ERR(buf)) return PTR_ERR(buf); err = usb_set_interface(dev, 0, 1); if (err) snd_printk(KERN_ERR "usb_set_interface error \n"); else err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000); kfree(buf); } if (err) return err; if (dsp->index == 1) { msleep(250); // give the device some time err = usX2Y_AsyncSeq04_init(priv); if (err) { snd_printk(KERN_ERR "usX2Y_AsyncSeq04_init error \n"); return err; } err = usX2Y_In04_init(priv); if (err) { snd_printk(KERN_ERR "usX2Y_In04_init error \n"); return err; } err = usX2Y_create_alsa_devices(hw->card); if (err) { snd_printk(KERN_ERR "usX2Y_create_alsa_devices error %i \n", err); snd_card_free(hw->card); return err; } priv->chip_status |= USX2Y_STAT_CHIP_INIT; snd_printdd("%s: alsa all started\n", hw->name); } return err; } int usX2Y_hwdep_new(struct snd_card *card, struct usb_device* device) { int err; struct snd_hwdep *hw; if ((err = snd_hwdep_new(card, SND_USX2Y_LOADER_ID, 0, &hw)) < 0) return err; hw->iface = SNDRV_HWDEP_IFACE_USX2Y; hw->private_data = usX2Y(card); hw->ops.dsp_status = snd_usX2Y_hwdep_dsp_status; hw->ops.dsp_load = snd_usX2Y_hwdep_dsp_load; hw->ops.mmap = snd_us428ctls_mmap; hw->ops.poll = snd_us428ctls_poll; hw->exclusive = 1; sprintf(hw->name, "/proc/bus/usb/%03d/%03d", device->bus->busnum, device->devnum); return 0; }
gpl-2.0
cocafe/i9070_kernel_CoCore_Refresh
kernel/trace/ftrace.c
482
73070
/* * Infrastructure for profiling code inserted by 'gcc -pg'. * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> * * Originally ported from the -rt patch by: * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> * * Based on code in the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 William Lee Irwin III */ #include <linux/stop_machine.h> #include <linux/clocksource.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/suspend.h> #include <linux/debugfs.h> #include <linux/hardirq.h> #include <linux/kthread.h> #include <linux/uaccess.h> #include <linux/ftrace.h> #include <linux/sysctl.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/list.h> #include <linux/hash.h> #include <linux/rcupdate.h> #include <trace/events/sched.h> #include <asm/ftrace.h> #include <asm/setup.h> #include "trace_output.h" #include "trace_stat.h" #define FTRACE_WARN_ON(cond) \ do { \ if (WARN_ON(cond)) \ ftrace_kill(); \ } while (0) #define FTRACE_WARN_ON_ONCE(cond) \ do { \ if (WARN_ON_ONCE(cond)) \ ftrace_kill(); \ } while (0) /* hash bits for specific function selection */ #define FTRACE_HASH_BITS 7 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) /* ftrace_enabled is a method to turn ftrace on or off */ int ftrace_enabled __read_mostly; static int last_ftrace_enabled; /* Quick disabling of function tracer. */ int function_trace_stop; /* List for set_ftrace_pid's pids. */ LIST_HEAD(ftrace_pids); struct ftrace_pid { struct list_head list; struct pid *pid; }; /* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled. */ static int ftrace_disabled __read_mostly; static DEFINE_MUTEX(ftrace_lock); static struct ftrace_ops ftrace_list_end __read_mostly = { .func = ftrace_stub, }; static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; /* * Traverse the ftrace_list, invoking all entries. The reason that we * can use rcu_dereference_raw() is that elements removed from this list * are simply leaked, so there is no need to interact with a grace-period * mechanism. The rcu_dereference_raw() calls are needed to handle * concurrent insertions into the ftrace_list. * * Silly Alpha and silly pointer-speculation compiler optimizations! */ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) { struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ while (op != &ftrace_list_end) { op->func(ip, parent_ip); op = rcu_dereference_raw(op->next); /*see above*/ }; } static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) { if (!test_tsk_trace_trace(current)) return; ftrace_pid_function(ip, parent_ip); } static void set_ftrace_pid_function(ftrace_func_t func) { /* do not set ftrace_pid_function to itself! */ if (func != ftrace_pid_func) ftrace_pid_function = func; } /** * clear_ftrace_function - reset the ftrace function * * This NULLs the ftrace function and in essence stops * tracing. There may be lag */ void clear_ftrace_function(void) { ftrace_trace_function = ftrace_stub; __ftrace_trace_function = ftrace_stub; ftrace_pid_function = ftrace_stub; } #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST /* * For those archs that do not test ftrace_trace_stop in their * mcount call site, we need to do it from C. */ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) { if (function_trace_stop) return; __ftrace_trace_function(ip, parent_ip); } #endif static int __register_ftrace_function(struct ftrace_ops *ops) { ops->next = ftrace_list; /* * We are entering ops into the ftrace_list but another * CPU might be walking that list. We need to make sure * the ops->next pointer is valid before another CPU sees * the ops pointer included into the ftrace_list. */ rcu_assign_pointer(ftrace_list, ops); if (ftrace_enabled) { ftrace_func_t func; if (ops->next == &ftrace_list_end) func = ops->func; else func = ftrace_list_func; if (!list_empty(&ftrace_pids)) { set_ftrace_pid_function(func); func = ftrace_pid_func; } /* * For one func, simply call it directly. * For more than one func, call the chain. */ #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST ftrace_trace_function = func; #else __ftrace_trace_function = func; ftrace_trace_function = ftrace_test_stop_func; #endif } return 0; } static int __unregister_ftrace_function(struct ftrace_ops *ops) { struct ftrace_ops **p; /* * If we are removing the last function, then simply point * to the ftrace_stub. */ if (ftrace_list == ops && ops->next == &ftrace_list_end) { ftrace_trace_function = ftrace_stub; ftrace_list = &ftrace_list_end; return 0; } for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) if (*p == ops) break; if (*p != ops) return -1; *p = (*p)->next; if (ftrace_enabled) { /* If we only have one func left, then call that directly */ if (ftrace_list->next == &ftrace_list_end) { ftrace_func_t func = ftrace_list->func; if (!list_empty(&ftrace_pids)) { set_ftrace_pid_function(func); func = ftrace_pid_func; } #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST ftrace_trace_function = func; #else __ftrace_trace_function = func; #endif } } return 0; } static void ftrace_update_pid_func(void) { ftrace_func_t func; if (ftrace_trace_function == ftrace_stub) return; #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST func = ftrace_trace_function; #else func = __ftrace_trace_function; #endif if (!list_empty(&ftrace_pids)) { set_ftrace_pid_function(func); func = ftrace_pid_func; } else { if (func == ftrace_pid_func) func = ftrace_pid_function; } #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST ftrace_trace_function = func; #else __ftrace_trace_function = func; #endif } #ifdef CONFIG_FUNCTION_PROFILER struct ftrace_profile { struct hlist_node node; unsigned long ip; unsigned long counter; #ifdef CONFIG_FUNCTION_GRAPH_TRACER unsigned long long time; unsigned long long time_squared; #endif }; struct ftrace_profile_page { struct ftrace_profile_page *next; unsigned long index; struct ftrace_profile records[]; }; struct ftrace_profile_stat { atomic_t disabled; struct hlist_head *hash; struct ftrace_profile_page *pages; struct ftrace_profile_page *start; struct tracer_stat stat; }; #define PROFILE_RECORDS_SIZE \ (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) #define PROFILES_PER_PAGE \ (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) static int ftrace_profile_bits __read_mostly; static int ftrace_profile_enabled __read_mostly; /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ static DEFINE_MUTEX(ftrace_profile_lock); static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */ static void * function_stat_next(void *v, int idx) { struct ftrace_profile *rec = v; struct ftrace_profile_page *pg; pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); again: if (idx != 0) rec++; if ((void *)rec >= (void *)&pg->records[pg->index]) { pg = pg->next; if (!pg) return NULL; rec = &pg->records[0]; if (!rec->counter) goto again; } return rec; } static void *function_stat_start(struct tracer_stat *trace) { struct ftrace_profile_stat *stat = container_of(trace, struct ftrace_profile_stat, stat); if (!stat || !stat->start) return NULL; return function_stat_next(&stat->start->records[0], 0); } #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* function graph compares on total time */ static int function_stat_cmp(void *p1, void *p2) { struct ftrace_profile *a = p1; struct ftrace_profile *b = p2; if (a->time < b->time) return -1; if (a->time > b->time) return 1; else return 0; } #else /* not function graph compares against hits */ static int function_stat_cmp(void *p1, void *p2) { struct ftrace_profile *a = p1; struct ftrace_profile *b = p2; if (a->counter < b->counter) return -1; if (a->counter > b->counter) return 1; else return 0; } #endif static int function_stat_headers(struct seq_file *m) { #ifdef CONFIG_FUNCTION_GRAPH_TRACER seq_printf(m, " Function " "Hit Time Avg s^2\n" " -------- " "--- ---- --- ---\n"); #else seq_printf(m, " Function Hit\n" " -------- ---\n"); #endif return 0; } static int function_stat_show(struct seq_file *m, void *v) { struct ftrace_profile *rec = v; char str[KSYM_SYMBOL_LEN]; int ret = 0; #ifdef CONFIG_FUNCTION_GRAPH_TRACER static struct trace_seq s; unsigned long long avg; unsigned long long stddev; #endif mutex_lock(&ftrace_profile_lock); /* we raced with function_profile_reset() */ if (unlikely(rec->counter == 0)) { ret = -EBUSY; goto out; } kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); seq_printf(m, " %-30.30s %10lu", str, rec->counter); #ifdef CONFIG_FUNCTION_GRAPH_TRACER seq_printf(m, " "); avg = rec->time; do_div(avg, rec->counter); /* Sample standard deviation (s^2) */ if (rec->counter <= 1) stddev = 0; else { stddev = rec->time_squared - rec->counter * avg * avg; /* * Divide only 1000 for ns^2 -> us^2 conversion. * trace_print_graph_duration will divide 1000 again. */ do_div(stddev, (rec->counter - 1) * 1000); } trace_seq_init(&s); trace_print_graph_duration(rec->time, &s); trace_seq_puts(&s, " "); trace_print_graph_duration(avg, &s); trace_seq_puts(&s, " "); trace_print_graph_duration(stddev, &s); trace_print_seq(m, &s); #endif seq_putc(m, '\n'); out: mutex_unlock(&ftrace_profile_lock); return ret; } static void ftrace_profile_reset(struct ftrace_profile_stat *stat) { struct ftrace_profile_page *pg; pg = stat->pages = stat->start; while (pg) { memset(pg->records, 0, PROFILE_RECORDS_SIZE); pg->index = 0; pg = pg->next; } memset(stat->hash, 0, FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); } int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) { struct ftrace_profile_page *pg; int functions; int pages; int i; /* If we already allocated, do nothing */ if (stat->pages) return 0; stat->pages = (void *)get_zeroed_page(GFP_KERNEL); if (!stat->pages) return -ENOMEM; #ifdef CONFIG_DYNAMIC_FTRACE functions = ftrace_update_tot_cnt; #else /* * We do not know the number of functions that exist because * dynamic tracing is what counts them. With past experience * we have around 20K functions. That should be more than enough. * It is highly unlikely we will execute every function in * the kernel. */ functions = 20000; #endif pg = stat->start = stat->pages; pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); for (i = 0; i < pages; i++) { pg->next = (void *)get_zeroed_page(GFP_KERNEL); if (!pg->next) goto out_free; pg = pg->next; } return 0; out_free: pg = stat->start; while (pg) { unsigned long tmp = (unsigned long)pg; pg = pg->next; free_page(tmp); } free_page((unsigned long)stat->pages); stat->pages = NULL; stat->start = NULL; return -ENOMEM; } static int ftrace_profile_init_cpu(int cpu) { struct ftrace_profile_stat *stat; int size; stat = &per_cpu(ftrace_profile_stats, cpu); if (stat->hash) { /* If the profile is already created, simply reset it */ ftrace_profile_reset(stat); return 0; } /* * We are profiling all functions, but usually only a few thousand * functions are hit. We'll make a hash of 1024 items. */ size = FTRACE_PROFILE_HASH_SIZE; stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); if (!stat->hash) return -ENOMEM; if (!ftrace_profile_bits) { size--; for (; size; size >>= 1) ftrace_profile_bits++; } /* Preallocate the function profiling pages */ if (ftrace_profile_pages_init(stat) < 0) { kfree(stat->hash); stat->hash = NULL; return -ENOMEM; } return 0; } static int ftrace_profile_init(void) { int cpu; int ret = 0; for_each_online_cpu(cpu) { ret = ftrace_profile_init_cpu(cpu); if (ret) break; } return ret; } /* interrupts must be disabled */ static struct ftrace_profile * ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) { struct ftrace_profile *rec; struct hlist_head *hhd; struct hlist_node *n; unsigned long key; key = hash_long(ip, ftrace_profile_bits); hhd = &stat->hash[key]; if (hlist_empty(hhd)) return NULL; hlist_for_each_entry_rcu(rec, n, hhd, node) { if (rec->ip == ip) return rec; } return NULL; } static void ftrace_add_profile(struct ftrace_profile_stat *stat, struct ftrace_profile *rec) { unsigned long key; key = hash_long(rec->ip, ftrace_profile_bits); hlist_add_head_rcu(&rec->node, &stat->hash[key]); } /* * The memory is already allocated, this simply finds a new record to use. */ static struct ftrace_profile * ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) { struct ftrace_profile *rec = NULL; /* prevent recursion (from NMIs) */ if (atomic_inc_return(&stat->disabled) != 1) goto out; /* * Try to find the function again since an NMI * could have added it */ rec = ftrace_find_profiled_func(stat, ip); if (rec) goto out; if (stat->pages->index == PROFILES_PER_PAGE) { if (!stat->pages->next) goto out; stat->pages = stat->pages->next; } rec = &stat->pages->records[stat->pages->index++]; rec->ip = ip; ftrace_add_profile(stat, rec); out: atomic_dec(&stat->disabled); return rec; } static void function_profile_call(unsigned long ip, unsigned long parent_ip) { struct ftrace_profile_stat *stat; struct ftrace_profile *rec; unsigned long flags; if (!ftrace_profile_enabled) return; local_irq_save(flags); stat = &__get_cpu_var(ftrace_profile_stats); if (!stat->hash || !ftrace_profile_enabled) goto out; rec = ftrace_find_profiled_func(stat, ip); if (!rec) { rec = ftrace_profile_alloc(stat, ip); if (!rec) goto out; } rec->counter++; out: local_irq_restore(flags); } #ifdef CONFIG_FUNCTION_GRAPH_TRACER static int profile_graph_entry(struct ftrace_graph_ent *trace) { function_profile_call(trace->func, 0); return 1; } static void profile_graph_return(struct ftrace_graph_ret *trace) { struct ftrace_profile_stat *stat; unsigned long long calltime; struct ftrace_profile *rec; unsigned long flags; local_irq_save(flags); stat = &__get_cpu_var(ftrace_profile_stats); if (!stat->hash || !ftrace_profile_enabled) goto out; /* If the calltime was zero'd ignore it */ if (!trace->calltime) goto out; calltime = trace->rettime - trace->calltime; if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { int index; index = trace->depth; /* Append this call time to the parent time to subtract */ if (index) current->ret_stack[index - 1].subtime += calltime; if (current->ret_stack[index].subtime < calltime) calltime -= current->ret_stack[index].subtime; else calltime = 0; } rec = ftrace_find_profiled_func(stat, trace->func); if (rec) { rec->time += calltime; rec->time_squared += calltime * calltime; } out: local_irq_restore(flags); } static int register_ftrace_profiler(void) { return register_ftrace_graph(&profile_graph_return, &profile_graph_entry); } static void unregister_ftrace_profiler(void) { unregister_ftrace_graph(); } #else static struct ftrace_ops ftrace_profile_ops __read_mostly = { .func = function_profile_call, }; static int register_ftrace_profiler(void) { return register_ftrace_function(&ftrace_profile_ops); } static void unregister_ftrace_profiler(void) { unregister_ftrace_function(&ftrace_profile_ops); } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ static ssize_t ftrace_profile_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; char buf[64]; /* big enough to hold a number */ int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = strict_strtoul(buf, 10, &val); if (ret < 0) return ret; val = !!val; mutex_lock(&ftrace_profile_lock); if (ftrace_profile_enabled ^ val) { if (val) { ret = ftrace_profile_init(); if (ret < 0) { cnt = ret; goto out; } ret = register_ftrace_profiler(); if (ret < 0) { cnt = ret; goto out; } ftrace_profile_enabled = 1; } else { ftrace_profile_enabled = 0; /* * unregister_ftrace_profiler calls stop_machine * so this acts like an synchronize_sched. */ unregister_ftrace_profiler(); } } out: mutex_unlock(&ftrace_profile_lock); *ppos += cnt; return cnt; } static ssize_t ftrace_profile_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; /* big enough to hold a number */ int r; r = sprintf(buf, "%u\n", ftrace_profile_enabled); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static const struct file_operations ftrace_profile_fops = { .open = tracing_open_generic, .read = ftrace_profile_read, .write = ftrace_profile_write, }; /* used to initialize the real stat files */ static struct tracer_stat function_stats __initdata = { .name = "functions", .stat_start = function_stat_start, .stat_next = function_stat_next, .stat_cmp = function_stat_cmp, .stat_headers = function_stat_headers, .stat_show = function_stat_show }; static __init void ftrace_profile_debugfs(struct dentry *d_tracer) { struct ftrace_profile_stat *stat; struct dentry *entry; char *name; int ret; int cpu; for_each_possible_cpu(cpu) { stat = &per_cpu(ftrace_profile_stats, cpu); /* allocate enough for function name + cpu number */ name = kmalloc(32, GFP_KERNEL); if (!name) { /* * The files created are permanent, if something happens * we still do not free memory. */ WARN(1, "Could not allocate stat file for cpu %d\n", cpu); return; } stat->stat = function_stats; snprintf(name, 32, "function%d", cpu); stat->stat.name = name; ret = register_stat_tracer(&stat->stat); if (ret) { WARN(1, "Could not register function stat for cpu %d\n", cpu); kfree(name); return; } } entry = debugfs_create_file("function_profile_enabled", 0644, d_tracer, NULL, &ftrace_profile_fops); if (!entry) pr_warning("Could not create debugfs " "'function_profile_enabled' entry\n"); } #else /* CONFIG_FUNCTION_PROFILER */ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) { } #endif /* CONFIG_FUNCTION_PROFILER */ static struct pid * const ftrace_swapper_pid = &init_struct_pid; #ifdef CONFIG_DYNAMIC_FTRACE #ifndef CONFIG_FTRACE_MCOUNT_RECORD # error Dynamic ftrace depends on MCOUNT_RECORD #endif static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; struct ftrace_func_probe { struct hlist_node node; struct ftrace_probe_ops *ops; unsigned long flags; unsigned long ip; void *data; struct rcu_head rcu; }; enum { FTRACE_ENABLE_CALLS = (1 << 0), FTRACE_DISABLE_CALLS = (1 << 1), FTRACE_UPDATE_TRACE_FUNC = (1 << 2), FTRACE_ENABLE_MCOUNT = (1 << 3), FTRACE_DISABLE_MCOUNT = (1 << 4), FTRACE_START_FUNC_RET = (1 << 5), FTRACE_STOP_FUNC_RET = (1 << 6), }; static int ftrace_filtered; static struct dyn_ftrace *ftrace_new_addrs; static DEFINE_MUTEX(ftrace_regex_lock); struct ftrace_page { struct ftrace_page *next; int index; struct dyn_ftrace records[]; }; #define ENTRIES_PER_PAGE \ ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace)) /* estimate from running different kernels */ #define NR_TO_INIT 10000 static struct ftrace_page *ftrace_pages_start; static struct ftrace_page *ftrace_pages; static struct dyn_ftrace *ftrace_free_records; /* * This is a double for. Do not use 'break' to break out of the loop, * you must use a goto. */ #define do_for_each_ftrace_rec(pg, rec) \ for (pg = ftrace_pages_start; pg; pg = pg->next) { \ int _____i; \ for (_____i = 0; _____i < pg->index; _____i++) { \ rec = &pg->records[_____i]; #define while_for_each_ftrace_rec() \ } \ } static void ftrace_free_rec(struct dyn_ftrace *rec) { rec->freelist = ftrace_free_records; ftrace_free_records = rec; rec->flags |= FTRACE_FL_FREE; } static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) { struct dyn_ftrace *rec; /* First check for freed records */ if (ftrace_free_records) { rec = ftrace_free_records; if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { FTRACE_WARN_ON_ONCE(1); ftrace_free_records = NULL; return NULL; } ftrace_free_records = rec->freelist; memset(rec, 0, sizeof(*rec)); return rec; } if (ftrace_pages->index == ENTRIES_PER_PAGE) { if (!ftrace_pages->next) { /* allocate another page */ ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); if (!ftrace_pages->next) return NULL; } ftrace_pages = ftrace_pages->next; } return &ftrace_pages->records[ftrace_pages->index++]; } static struct dyn_ftrace * ftrace_record_ip(unsigned long ip) { struct dyn_ftrace *rec; if (ftrace_disabled) return NULL; rec = ftrace_alloc_dyn_node(ip); if (!rec) return NULL; rec->ip = ip; rec->newlist = ftrace_new_addrs; ftrace_new_addrs = rec; return rec; } static void print_ip_ins(const char *fmt, unsigned char *p) { int i; printk(KERN_CONT "%s", fmt); for (i = 0; i < MCOUNT_INSN_SIZE; i++) printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); } static void ftrace_bug(int failed, unsigned long ip) { switch (failed) { case -EFAULT: FTRACE_WARN_ON_ONCE(1); pr_info("ftrace faulted on modifying "); print_ip_sym(ip); break; case -EINVAL: FTRACE_WARN_ON_ONCE(1); pr_info("ftrace failed to modify "); print_ip_sym(ip); print_ip_ins(" actual: ", (unsigned char *)ip); printk(KERN_CONT "\n"); break; case -EPERM: FTRACE_WARN_ON_ONCE(1); pr_info("ftrace faulted on writing "); print_ip_sym(ip); break; default: FTRACE_WARN_ON_ONCE(1); pr_info("ftrace faulted on unknown error "); print_ip_sym(ip); } } /* Return 1 if the address range is reserved for ftrace */ int ftrace_text_reserved(void *start, void *end) { struct dyn_ftrace *rec; struct ftrace_page *pg; do_for_each_ftrace_rec(pg, rec) { if (rec->ip <= (unsigned long)end && rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start) return 1; } while_for_each_ftrace_rec(); return 0; } static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) { unsigned long ftrace_addr; unsigned long flag = 0UL; ftrace_addr = (unsigned long)FTRACE_ADDR; /* * If this record is not to be traced or we want to disable it, * then disable it. * * If we want to enable it and filtering is off, then enable it. * * If we want to enable it and filtering is on, enable it only if * it's filtered */ if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) { if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER)) flag = FTRACE_FL_ENABLED; } /* If the state of this record hasn't changed, then do nothing */ if ((rec->flags & FTRACE_FL_ENABLED) == flag) return 0; if (flag) { rec->flags |= FTRACE_FL_ENABLED; return ftrace_make_call(rec, ftrace_addr); } rec->flags &= ~FTRACE_FL_ENABLED; return ftrace_make_nop(NULL, rec, ftrace_addr); } static void ftrace_replace_code(int enable) { struct dyn_ftrace *rec; struct ftrace_page *pg; int failed; do_for_each_ftrace_rec(pg, rec) { /* * Skip over free records, records that have * failed and not converted. */ if (rec->flags & FTRACE_FL_FREE || rec->flags & FTRACE_FL_FAILED || !(rec->flags & FTRACE_FL_CONVERTED)) continue; failed = __ftrace_replace_code(rec, enable); if (failed) { rec->flags |= FTRACE_FL_FAILED; ftrace_bug(failed, rec->ip); /* Stop processing */ return; } } while_for_each_ftrace_rec(); } static int ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) { unsigned long ip; int ret; ip = rec->ip; ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); if (ret) { ftrace_bug(ret, ip); rec->flags |= FTRACE_FL_FAILED; return 0; } return 1; } /* * archs can override this function if they must do something * before the modifying code is performed. */ int __weak ftrace_arch_code_modify_prepare(void) { return 0; } /* * archs can override this function if they must do something * after the modifying code is performed. */ int __weak ftrace_arch_code_modify_post_process(void) { return 0; } static int __ftrace_modify_code(void *data) { int *command = data; if (*command & FTRACE_ENABLE_CALLS) ftrace_replace_code(1); else if (*command & FTRACE_DISABLE_CALLS) ftrace_replace_code(0); if (*command & FTRACE_UPDATE_TRACE_FUNC) ftrace_update_ftrace_func(ftrace_trace_function); if (*command & FTRACE_START_FUNC_RET) ftrace_enable_ftrace_graph_caller(); else if (*command & FTRACE_STOP_FUNC_RET) ftrace_disable_ftrace_graph_caller(); return 0; } static void ftrace_run_update_code(int command) { int ret; ret = ftrace_arch_code_modify_prepare(); FTRACE_WARN_ON(ret); if (ret) return; stop_machine(__ftrace_modify_code, &command, NULL); ret = ftrace_arch_code_modify_post_process(); FTRACE_WARN_ON(ret); } static ftrace_func_t saved_ftrace_func; static int ftrace_start_up; static void ftrace_startup_enable(int command) { if (saved_ftrace_func != ftrace_trace_function) { saved_ftrace_func = ftrace_trace_function; command |= FTRACE_UPDATE_TRACE_FUNC; } if (!command || !ftrace_enabled) return; ftrace_run_update_code(command); } static void ftrace_startup(int command) { if (unlikely(ftrace_disabled)) return; ftrace_start_up++; command |= FTRACE_ENABLE_CALLS; ftrace_startup_enable(command); } static void ftrace_shutdown(int command) { if (unlikely(ftrace_disabled)) return; ftrace_start_up--; /* * Just warn in case of unbalance, no need to kill ftrace, it's not * critical but the ftrace_call callers may be never nopped again after * further ftrace uses. */ WARN_ON_ONCE(ftrace_start_up < 0); if (!ftrace_start_up) command |= FTRACE_DISABLE_CALLS; if (saved_ftrace_func != ftrace_trace_function) { saved_ftrace_func = ftrace_trace_function; command |= FTRACE_UPDATE_TRACE_FUNC; } if (!command || !ftrace_enabled) return; ftrace_run_update_code(command); } static void ftrace_startup_sysctl(void) { int command = FTRACE_ENABLE_MCOUNT; if (unlikely(ftrace_disabled)) return; /* Force update next time */ saved_ftrace_func = NULL; /* ftrace_start_up is true if we want ftrace running */ if (ftrace_start_up) command |= FTRACE_ENABLE_CALLS; ftrace_run_update_code(command); } static void ftrace_shutdown_sysctl(void) { int command = FTRACE_DISABLE_MCOUNT; if (unlikely(ftrace_disabled)) return; /* ftrace_start_up is true if ftrace is running */ if (ftrace_start_up) command |= FTRACE_DISABLE_CALLS; ftrace_run_update_code(command); } static cycle_t ftrace_update_time; static unsigned long ftrace_update_cnt; unsigned long ftrace_update_tot_cnt; static int ftrace_update_code(struct module *mod) { struct dyn_ftrace *p; cycle_t start, stop; start = ftrace_now(raw_smp_processor_id()); ftrace_update_cnt = 0; while (ftrace_new_addrs) { /* If something went wrong, bail without enabling anything */ if (unlikely(ftrace_disabled)) return -1; p = ftrace_new_addrs; ftrace_new_addrs = p->newlist; p->flags = 0L; /* * Do the initial record convertion from mcount jump * to the NOP instructions. */ if (!ftrace_code_disable(mod, p)) { ftrace_free_rec(p); continue; } p->flags |= FTRACE_FL_CONVERTED; ftrace_update_cnt++; /* * If the tracing is enabled, go ahead and enable the record. * * The reason not to enable the record immediatelly is the * inherent check of ftrace_make_nop/ftrace_make_call for * correct previous instructions. Making first the NOP * conversion puts the module to the correct state, thus * passing the ftrace_make_call check. */ if (ftrace_start_up) { int failed = __ftrace_replace_code(p, 1); if (failed) { ftrace_bug(failed, p->ip); ftrace_free_rec(p); } } } stop = ftrace_now(raw_smp_processor_id()); ftrace_update_time = stop - start; ftrace_update_tot_cnt += ftrace_update_cnt; return 0; } static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) { struct ftrace_page *pg; int cnt; int i; /* allocate a few pages */ ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); if (!ftrace_pages_start) return -1; /* * Allocate a few more pages. * * TODO: have some parser search vmlinux before * final linking to find all calls to ftrace. * Then we can: * a) know how many pages to allocate. * and/or * b) set up the table then. * * The dynamic code is still necessary for * modules. */ pg = ftrace_pages = ftrace_pages_start; cnt = num_to_init / ENTRIES_PER_PAGE; pr_info("ftrace: allocating %ld entries in %d pages\n", num_to_init, cnt + 1); for (i = 0; i < cnt; i++) { pg->next = (void *)get_zeroed_page(GFP_KERNEL); /* If we fail, we'll try later anyway */ if (!pg->next) break; pg = pg->next; } return 0; } enum { FTRACE_ITER_FILTER = (1 << 0), FTRACE_ITER_NOTRACE = (1 << 1), FTRACE_ITER_FAILURES = (1 << 2), FTRACE_ITER_PRINTALL = (1 << 3), FTRACE_ITER_HASH = (1 << 4), }; #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ struct ftrace_iterator { struct ftrace_page *pg; int hidx; int idx; unsigned flags; struct trace_parser parser; }; static void * t_hash_next(struct seq_file *m, void *v, loff_t *pos) { struct ftrace_iterator *iter = m->private; struct hlist_node *hnd = v; struct hlist_head *hhd; WARN_ON(!(iter->flags & FTRACE_ITER_HASH)); (*pos)++; retry: if (iter->hidx >= FTRACE_FUNC_HASHSIZE) return NULL; hhd = &ftrace_func_hash[iter->hidx]; if (hlist_empty(hhd)) { iter->hidx++; hnd = NULL; goto retry; } if (!hnd) hnd = hhd->first; else { hnd = hnd->next; if (!hnd) { iter->hidx++; goto retry; } } return hnd; } static void *t_hash_start(struct seq_file *m, loff_t *pos) { struct ftrace_iterator *iter = m->private; void *p = NULL; loff_t l; if (!(iter->flags & FTRACE_ITER_HASH)) *pos = 0; iter->flags |= FTRACE_ITER_HASH; iter->hidx = 0; for (l = 0; l <= *pos; ) { p = t_hash_next(m, p, &l); if (!p) break; } return p; } static int t_hash_show(struct seq_file *m, void *v) { struct ftrace_func_probe *rec; struct hlist_node *hnd = v; rec = hlist_entry(hnd, struct ftrace_func_probe, node); if (rec->ops->print) return rec->ops->print(m, rec->ip, rec->ops, rec->data); seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); if (rec->data) seq_printf(m, ":%p", rec->data); seq_putc(m, '\n'); return 0; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct ftrace_iterator *iter = m->private; struct dyn_ftrace *rec = NULL; if (iter->flags & FTRACE_ITER_HASH) return t_hash_next(m, v, pos); (*pos)++; if (iter->flags & FTRACE_ITER_PRINTALL) return NULL; retry: if (iter->idx >= iter->pg->index) { if (iter->pg->next) { iter->pg = iter->pg->next; iter->idx = 0; goto retry; } } else { rec = &iter->pg->records[iter->idx++]; if ((rec->flags & FTRACE_FL_FREE) || (!(iter->flags & FTRACE_ITER_FAILURES) && (rec->flags & FTRACE_FL_FAILED)) || ((iter->flags & FTRACE_ITER_FAILURES) && !(rec->flags & FTRACE_FL_FAILED)) || ((iter->flags & FTRACE_ITER_FILTER) && !(rec->flags & FTRACE_FL_FILTER)) || ((iter->flags & FTRACE_ITER_NOTRACE) && !(rec->flags & FTRACE_FL_NOTRACE))) { rec = NULL; goto retry; } } return rec; } static void *t_start(struct seq_file *m, loff_t *pos) { struct ftrace_iterator *iter = m->private; void *p = NULL; loff_t l; mutex_lock(&ftrace_lock); /* * For set_ftrace_filter reading, if we have the filter * off, we can short cut and just print out that all * functions are enabled. */ if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { if (*pos > 0) return t_hash_start(m, pos); iter->flags |= FTRACE_ITER_PRINTALL; /* reset in case of seek/pread */ iter->flags &= ~FTRACE_ITER_HASH; return iter; } if (iter->flags & FTRACE_ITER_HASH) return t_hash_start(m, pos); iter->pg = ftrace_pages_start; iter->idx = 0; for (l = 0; l <= *pos; ) { p = t_next(m, p, &l); if (!p) break; } if (!p && iter->flags & FTRACE_ITER_FILTER) return t_hash_start(m, pos); return p; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&ftrace_lock); } static int t_show(struct seq_file *m, void *v) { struct ftrace_iterator *iter = m->private; struct dyn_ftrace *rec = v; if (iter->flags & FTRACE_ITER_HASH) return t_hash_show(m, v); if (iter->flags & FTRACE_ITER_PRINTALL) { seq_printf(m, "#### all functions enabled ####\n"); return 0; } if (!rec) return 0; seq_printf(m, "%ps\n", (void *)rec->ip); return 0; } static const struct seq_operations show_ftrace_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int ftrace_avail_open(struct inode *inode, struct file *file) { struct ftrace_iterator *iter; int ret; if (unlikely(ftrace_disabled)) return -ENODEV; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return -ENOMEM; iter->pg = ftrace_pages_start; ret = seq_open(file, &show_ftrace_seq_ops); if (!ret) { struct seq_file *m = file->private_data; m->private = iter; } else { kfree(iter); } return ret; } static int ftrace_failures_open(struct inode *inode, struct file *file) { int ret; struct seq_file *m; struct ftrace_iterator *iter; ret = ftrace_avail_open(inode, file); if (!ret) { m = (struct seq_file *)file->private_data; iter = (struct ftrace_iterator *)m->private; iter->flags = FTRACE_ITER_FAILURES; } return ret; } static void ftrace_filter_reset(int enable) { struct ftrace_page *pg; struct dyn_ftrace *rec; unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; mutex_lock(&ftrace_lock); if (enable) ftrace_filtered = 0; do_for_each_ftrace_rec(pg, rec) { if (rec->flags & FTRACE_FL_FAILED) continue; rec->flags &= ~type; } while_for_each_ftrace_rec(); mutex_unlock(&ftrace_lock); } static int ftrace_regex_open(struct inode *inode, struct file *file, int enable) { struct ftrace_iterator *iter; int ret = 0; if (unlikely(ftrace_disabled)) return -ENODEV; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return -ENOMEM; if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { kfree(iter); return -ENOMEM; } mutex_lock(&ftrace_regex_lock); if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) ftrace_filter_reset(enable); if (file->f_mode & FMODE_READ) { iter->pg = ftrace_pages_start; iter->flags = enable ? FTRACE_ITER_FILTER : FTRACE_ITER_NOTRACE; ret = seq_open(file, &show_ftrace_seq_ops); if (!ret) { struct seq_file *m = file->private_data; m->private = iter; } else { trace_parser_put(&iter->parser); kfree(iter); } } else file->private_data = iter; mutex_unlock(&ftrace_regex_lock); return ret; } static int ftrace_filter_open(struct inode *inode, struct file *file) { return ftrace_regex_open(inode, file, 1); } static int ftrace_notrace_open(struct inode *inode, struct file *file) { return ftrace_regex_open(inode, file, 0); } static loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin) { loff_t ret; if (file->f_mode & FMODE_READ) ret = seq_lseek(file, offset, origin); else file->f_pos = ret = 1; return ret; } static int ftrace_match(char *str, char *regex, int len, int type) { int matched = 0; int slen; switch (type) { case MATCH_FULL: if (strcmp(str, regex) == 0) matched = 1; break; case MATCH_FRONT_ONLY: if (strncmp(str, regex, len) == 0) matched = 1; break; case MATCH_MIDDLE_ONLY: if (strstr(str, regex)) matched = 1; break; case MATCH_END_ONLY: slen = strlen(str); if (slen >= len && memcmp(str + slen - len, regex, len) == 0) matched = 1; break; } return matched; } static int ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) { char str[KSYM_SYMBOL_LEN]; kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); return ftrace_match(str, regex, len, type); } static int ftrace_match_records(char *buff, int len, int enable) { unsigned int search_len; struct ftrace_page *pg; struct dyn_ftrace *rec; unsigned long flag; char *search; int type; int not; int found = 0; flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; type = filter_parse_regex(buff, len, &search, &not); search_len = strlen(search); mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if (rec->flags & FTRACE_FL_FAILED) continue; if (ftrace_match_record(rec, search, search_len, type)) { if (not) rec->flags &= ~flag; else rec->flags |= flag; found = 1; } /* * Only enable filtering if we have a function that * is filtered on. */ if (enable && (rec->flags & FTRACE_FL_FILTER)) ftrace_filtered = 1; } while_for_each_ftrace_rec(); mutex_unlock(&ftrace_lock); return found; } static int ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, char *regex, int len, int type) { char str[KSYM_SYMBOL_LEN]; char *modname; kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); if (!modname || strcmp(modname, mod)) return 0; /* blank search means to match all funcs in the mod */ if (len) return ftrace_match(str, regex, len, type); else return 1; } static int ftrace_match_module_records(char *buff, char *mod, int enable) { unsigned search_len = 0; struct ftrace_page *pg; struct dyn_ftrace *rec; int type = MATCH_FULL; char *search = buff; unsigned long flag; int not = 0; int found = 0; flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; /* blank or '*' mean the same */ if (strcmp(buff, "*") == 0) buff[0] = 0; /* handle the case of 'dont filter this module' */ if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { buff[0] = 0; not = 1; } if (strlen(buff)) { type = filter_parse_regex(buff, strlen(buff), &search, &not); search_len = strlen(search); } mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if (rec->flags & FTRACE_FL_FAILED) continue; if (ftrace_match_module_record(rec, mod, search, search_len, type)) { if (not) rec->flags &= ~flag; else rec->flags |= flag; found = 1; } if (enable && (rec->flags & FTRACE_FL_FILTER)) ftrace_filtered = 1; } while_for_each_ftrace_rec(); mutex_unlock(&ftrace_lock); return found; } /* * We register the module command as a template to show others how * to register the a command as well. */ static int ftrace_mod_callback(char *func, char *cmd, char *param, int enable) { char *mod; /* * cmd == 'mod' because we only registered this func * for the 'mod' ftrace_func_command. * But if you register one func with multiple commands, * you can tell which command was used by the cmd * parameter. */ /* we must have a module name */ if (!param) return -EINVAL; mod = strsep(&param, ":"); if (!strlen(mod)) return -EINVAL; if (ftrace_match_module_records(func, mod, enable)) return 0; return -EINVAL; } static struct ftrace_func_command ftrace_mod_cmd = { .name = "mod", .func = ftrace_mod_callback, }; static int __init ftrace_mod_cmd_init(void) { return register_ftrace_command(&ftrace_mod_cmd); } device_initcall(ftrace_mod_cmd_init); static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip) { struct ftrace_func_probe *entry; struct hlist_head *hhd; struct hlist_node *n; unsigned long key; int resched; key = hash_long(ip, FTRACE_HASH_BITS); hhd = &ftrace_func_hash[key]; if (hlist_empty(hhd)) return; /* * Disable preemption for these calls to prevent a RCU grace * period. This syncs the hash iteration and freeing of items * on the hash. rcu_read_lock is too dangerous here. */ resched = ftrace_preempt_disable(); hlist_for_each_entry_rcu(entry, n, hhd, node) { if (entry->ip == ip) entry->ops->func(ip, parent_ip, &entry->data); } ftrace_preempt_enable(resched); } static struct ftrace_ops trace_probe_ops __read_mostly = { .func = function_trace_probe_call, }; static int ftrace_probe_registered; static void __enable_ftrace_function_probe(void) { int i; if (ftrace_probe_registered) return; for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { struct hlist_head *hhd = &ftrace_func_hash[i]; if (hhd->first) break; } /* Nothing registered? */ if (i == FTRACE_FUNC_HASHSIZE) return; __register_ftrace_function(&trace_probe_ops); ftrace_startup(0); ftrace_probe_registered = 1; } static void __disable_ftrace_function_probe(void) { int i; if (!ftrace_probe_registered) return; for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { struct hlist_head *hhd = &ftrace_func_hash[i]; if (hhd->first) return; } /* no more funcs left */ __unregister_ftrace_function(&trace_probe_ops); ftrace_shutdown(0); ftrace_probe_registered = 0; } static void ftrace_free_entry_rcu(struct rcu_head *rhp) { struct ftrace_func_probe *entry = container_of(rhp, struct ftrace_func_probe, rcu); if (entry->ops->free) entry->ops->free(&entry->data); kfree(entry); } int register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_probe *entry; struct ftrace_page *pg; struct dyn_ftrace *rec; int type, len, not; unsigned long key; int count = 0; char *search; type = filter_parse_regex(glob, strlen(glob), &search, &not); len = strlen(search); /* we do not support '!' for function probes */ if (WARN_ON(not)) return -EINVAL; mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if (rec->flags & FTRACE_FL_FAILED) continue; if (!ftrace_match_record(rec, search, len, type)) continue; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { /* If we did not process any, then return error */ if (!count) count = -ENOMEM; goto out_unlock; } count++; entry->data = data; /* * The caller might want to do something special * for each function we find. We call the callback * to give the caller an opportunity to do so. */ if (ops->callback) { if (ops->callback(rec->ip, &entry->data) < 0) { /* caller does not like this func */ kfree(entry); continue; } } entry->ops = ops; entry->ip = rec->ip; key = hash_long(entry->ip, FTRACE_HASH_BITS); hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); } while_for_each_ftrace_rec(); __enable_ftrace_function_probe(); out_unlock: mutex_unlock(&ftrace_lock); return count; } enum { PROBE_TEST_FUNC = 1, PROBE_TEST_DATA = 2 }; static void __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, void *data, int flags) { struct ftrace_func_probe *entry; struct hlist_node *n, *tmp; char str[KSYM_SYMBOL_LEN]; int type = MATCH_FULL; int i, len = 0; char *search; if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) glob = NULL; else if (glob) { int not; type = filter_parse_regex(glob, strlen(glob), &search, &not); len = strlen(search); /* we do not support '!' for function probes */ if (WARN_ON(not)) return; } mutex_lock(&ftrace_lock); for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { struct hlist_head *hhd = &ftrace_func_hash[i]; hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { /* break up if statements for readability */ if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) continue; if ((flags & PROBE_TEST_DATA) && entry->data != data) continue; /* do this last, since it is the most expensive */ if (glob) { kallsyms_lookup(entry->ip, NULL, NULL, NULL, str); if (!ftrace_match(str, glob, len, type)) continue; } hlist_del(&entry->node); call_rcu(&entry->rcu, ftrace_free_entry_rcu); } } __disable_ftrace_function_probe(); mutex_unlock(&ftrace_lock); } void unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, void *data) { __unregister_ftrace_function_probe(glob, ops, data, PROBE_TEST_FUNC | PROBE_TEST_DATA); } void unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) { __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); } void unregister_ftrace_function_probe_all(char *glob) { __unregister_ftrace_function_probe(glob, NULL, NULL, 0); } static LIST_HEAD(ftrace_commands); static DEFINE_MUTEX(ftrace_cmd_mutex); int register_ftrace_command(struct ftrace_func_command *cmd) { struct ftrace_func_command *p; int ret = 0; mutex_lock(&ftrace_cmd_mutex); list_for_each_entry(p, &ftrace_commands, list) { if (strcmp(cmd->name, p->name) == 0) { ret = -EBUSY; goto out_unlock; } } list_add(&cmd->list, &ftrace_commands); out_unlock: mutex_unlock(&ftrace_cmd_mutex); return ret; } int unregister_ftrace_command(struct ftrace_func_command *cmd) { struct ftrace_func_command *p, *n; int ret = -ENODEV; mutex_lock(&ftrace_cmd_mutex); list_for_each_entry_safe(p, n, &ftrace_commands, list) { if (strcmp(cmd->name, p->name) == 0) { ret = 0; list_del_init(&p->list); goto out_unlock; } } out_unlock: mutex_unlock(&ftrace_cmd_mutex); return ret; } static int ftrace_process_regex(char *buff, int len, int enable) { char *func, *command, *next = buff; struct ftrace_func_command *p; int ret = -EINVAL; func = strsep(&next, ":"); if (!next) { if (ftrace_match_records(func, len, enable)) return 0; return ret; } /* command found */ command = strsep(&next, ":"); mutex_lock(&ftrace_cmd_mutex); list_for_each_entry(p, &ftrace_commands, list) { if (strcmp(p->name, command) == 0) { ret = p->func(func, command, next, enable); goto out_unlock; } } out_unlock: mutex_unlock(&ftrace_cmd_mutex); return ret; } static ssize_t ftrace_regex_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos, int enable) { struct ftrace_iterator *iter; struct trace_parser *parser; ssize_t ret, read; if (!cnt) return 0; mutex_lock(&ftrace_regex_lock); if (file->f_mode & FMODE_READ) { struct seq_file *m = file->private_data; iter = m->private; } else iter = file->private_data; parser = &iter->parser; read = trace_get_user(parser, ubuf, cnt, ppos); if (read >= 0 && trace_parser_loaded(parser) && !trace_parser_cont(parser)) { ret = ftrace_process_regex(parser->buffer, parser->idx, enable); trace_parser_clear(parser); if (ret) goto out_unlock; } ret = read; out_unlock: mutex_unlock(&ftrace_regex_lock); return ret; } static ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { return ftrace_regex_write(file, ubuf, cnt, ppos, 1); } static ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { return ftrace_regex_write(file, ubuf, cnt, ppos, 0); } static void ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) { if (unlikely(ftrace_disabled)) return; mutex_lock(&ftrace_regex_lock); if (reset) ftrace_filter_reset(enable); if (buf) ftrace_match_records(buf, len, enable); mutex_unlock(&ftrace_regex_lock); } /** * ftrace_set_filter - set a function to filter on in ftrace * @buf - the string that holds the function filter text. * @len - the length of the string. * @reset - non zero to reset all filters before applying this filter. * * Filters denote which functions should be enabled when tracing is enabled. * If @buf is NULL and reset is set, all functions will be enabled for tracing. */ void ftrace_set_filter(unsigned char *buf, int len, int reset) { ftrace_set_regex(buf, len, reset, 1); } /** * ftrace_set_notrace - set a function to not trace in ftrace * @buf - the string that holds the function notrace text. * @len - the length of the string. * @reset - non zero to reset all filters before applying this filter. * * Notrace Filters denote which functions should not be enabled when tracing * is enabled. If @buf is NULL and reset is set, all functions will be enabled * for tracing. */ void ftrace_set_notrace(unsigned char *buf, int len, int reset) { ftrace_set_regex(buf, len, reset, 0); } /* * command line interface to allow users to set filters on boot up. */ #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; static int __init set_ftrace_notrace(char *str) { strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); return 1; } __setup("ftrace_notrace=", set_ftrace_notrace); static int __init set_ftrace_filter(char *str) { strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); return 1; } __setup("ftrace_filter=", set_ftrace_filter); #ifdef CONFIG_FUNCTION_GRAPH_TRACER static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); static int __init set_graph_function(char *str) { strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); return 1; } __setup("ftrace_graph_filter=", set_graph_function); static void __init set_ftrace_early_graph(char *buf) { int ret; char *func; while (buf) { func = strsep(&buf, ","); /* we allow only one expression at a time */ ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, func); if (ret) printk(KERN_DEBUG "ftrace: function %s not " "traceable\n", func); } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ static void __init set_ftrace_early_filter(char *buf, int enable) { char *func; while (buf) { func = strsep(&buf, ","); ftrace_set_regex(func, strlen(func), 0, enable); } } static void __init set_ftrace_early_filters(void) { if (ftrace_filter_buf[0]) set_ftrace_early_filter(ftrace_filter_buf, 1); if (ftrace_notrace_buf[0]) set_ftrace_early_filter(ftrace_notrace_buf, 0); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (ftrace_graph_buf[0]) set_ftrace_early_graph(ftrace_graph_buf); #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ } static int ftrace_regex_release(struct inode *inode, struct file *file, int enable) { struct seq_file *m = (struct seq_file *)file->private_data; struct ftrace_iterator *iter; struct trace_parser *parser; mutex_lock(&ftrace_regex_lock); if (file->f_mode & FMODE_READ) { iter = m->private; seq_release(inode, file); } else iter = file->private_data; parser = &iter->parser; if (trace_parser_loaded(parser)) { parser->buffer[parser->idx] = 0; ftrace_match_records(parser->buffer, parser->idx, enable); } mutex_lock(&ftrace_lock); if (ftrace_start_up && ftrace_enabled) ftrace_run_update_code(FTRACE_ENABLE_CALLS); mutex_unlock(&ftrace_lock); trace_parser_put(parser); kfree(iter); mutex_unlock(&ftrace_regex_lock); return 0; } static int ftrace_filter_release(struct inode *inode, struct file *file) { return ftrace_regex_release(inode, file, 1); } static int ftrace_notrace_release(struct inode *inode, struct file *file) { return ftrace_regex_release(inode, file, 0); } static const struct file_operations ftrace_avail_fops = { .open = ftrace_avail_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static const struct file_operations ftrace_failures_fops = { .open = ftrace_failures_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, .read = seq_read, .write = ftrace_filter_write, .llseek = no_llseek, .release = ftrace_filter_release, }; static const struct file_operations ftrace_notrace_fops = { .open = ftrace_notrace_open, .read = seq_read, .write = ftrace_notrace_write, .llseek = ftrace_regex_lseek, .release = ftrace_notrace_release, }; #ifdef CONFIG_FUNCTION_GRAPH_TRACER static DEFINE_MUTEX(graph_lock); int ftrace_graph_count; int ftrace_graph_filter_enabled; unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; static void * __g_next(struct seq_file *m, loff_t *pos) { if (*pos >= ftrace_graph_count) return NULL; return &ftrace_graph_funcs[*pos]; } static void * g_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; return __g_next(m, pos); } static void *g_start(struct seq_file *m, loff_t *pos) { mutex_lock(&graph_lock); /* Nothing, tell g_show to print all functions are enabled */ if (!ftrace_graph_filter_enabled && !*pos) return (void *)1; return __g_next(m, pos); } static void g_stop(struct seq_file *m, void *p) { mutex_unlock(&graph_lock); } static int g_show(struct seq_file *m, void *v) { unsigned long *ptr = v; if (!ptr) return 0; if (ptr == (unsigned long *)1) { seq_printf(m, "#### all functions enabled ####\n"); return 0; } seq_printf(m, "%ps\n", (void *)*ptr); return 0; } static const struct seq_operations ftrace_graph_seq_ops = { .start = g_start, .next = g_next, .stop = g_stop, .show = g_show, }; static int ftrace_graph_open(struct inode *inode, struct file *file) { int ret = 0; if (unlikely(ftrace_disabled)) return -ENODEV; mutex_lock(&graph_lock); if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { ftrace_graph_filter_enabled = 0; ftrace_graph_count = 0; memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); } mutex_unlock(&graph_lock); if (file->f_mode & FMODE_READ) ret = seq_open(file, &ftrace_graph_seq_ops); return ret; } static int ftrace_graph_release(struct inode *inode, struct file *file) { if (file->f_mode & FMODE_READ) seq_release(inode, file); return 0; } static int ftrace_set_func(unsigned long *array, int *idx, char *buffer) { struct dyn_ftrace *rec; struct ftrace_page *pg; int search_len; int fail = 1; int type, not; char *search; bool exists; int i; if (ftrace_disabled) return -ENODEV; /* decode regex */ type = filter_parse_regex(buffer, strlen(buffer), &search, &not); if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) return -EBUSY; search_len = strlen(search); mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) continue; if (ftrace_match_record(rec, search, search_len, type)) { /* if it is in the array */ exists = false; for (i = 0; i < *idx; i++) { if (array[i] == rec->ip) { exists = true; break; } } if (!not) { fail = 0; if (!exists) { array[(*idx)++] = rec->ip; if (*idx >= FTRACE_GRAPH_MAX_FUNCS) goto out; } } else { if (exists) { array[i] = array[--(*idx)]; array[*idx] = 0; fail = 0; } } } } while_for_each_ftrace_rec(); out: mutex_unlock(&ftrace_lock); if (fail) return -EINVAL; ftrace_graph_filter_enabled = 1; return 0; } static ssize_t ftrace_graph_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_parser parser; ssize_t read, ret; if (!cnt) return 0; mutex_lock(&graph_lock); if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { ret = -ENOMEM; goto out_unlock; } read = trace_get_user(&parser, ubuf, cnt, ppos); if (read >= 0 && trace_parser_loaded((&parser))) { parser.buffer[parser.idx] = 0; /* we allow only one expression at a time */ ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, parser.buffer); if (ret) goto out_free; } ret = read; out_free: trace_parser_put(&parser); out_unlock: mutex_unlock(&graph_lock); return ret; } static const struct file_operations ftrace_graph_fops = { .open = ftrace_graph_open, .read = seq_read, .write = ftrace_graph_write, .release = ftrace_graph_release, }; #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { trace_create_file("available_filter_functions", 0444, d_tracer, NULL, &ftrace_avail_fops); trace_create_file("failures", 0444, d_tracer, NULL, &ftrace_failures_fops); trace_create_file("set_ftrace_filter", 0644, d_tracer, NULL, &ftrace_filter_fops); trace_create_file("set_ftrace_notrace", 0644, d_tracer, NULL, &ftrace_notrace_fops); #ifdef CONFIG_FUNCTION_GRAPH_TRACER trace_create_file("set_graph_function", 0444, d_tracer, NULL, &ftrace_graph_fops); #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ return 0; } static int ftrace_process_locs(struct module *mod, unsigned long *start, unsigned long *end) { unsigned long *p; unsigned long addr; unsigned long flags; mutex_lock(&ftrace_lock); p = start; while (p < end) { addr = ftrace_call_adjust(*p++); /* * Some architecture linkers will pad between * the different mcount_loc sections of different * object files to satisfy alignments. * Skip any NULL pointers. */ if (!addr) continue; ftrace_record_ip(addr); } /* disable interrupts to prevent kstop machine */ local_irq_save(flags); ftrace_update_code(mod); local_irq_restore(flags); mutex_unlock(&ftrace_lock); return 0; } #ifdef CONFIG_MODULES void ftrace_release_mod(struct module *mod) { struct dyn_ftrace *rec; struct ftrace_page *pg; if (ftrace_disabled) return; mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if (within_module_core(rec->ip, mod)) { /* * rec->ip is changed in ftrace_free_rec() * It should not between s and e if record was freed. */ FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE); ftrace_free_rec(rec); } } while_for_each_ftrace_rec(); mutex_unlock(&ftrace_lock); } static void ftrace_init_module(struct module *mod, unsigned long *start, unsigned long *end) { if (ftrace_disabled || start == end) return; ftrace_process_locs(mod, start, end); } static int ftrace_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; switch (val) { case MODULE_STATE_COMING: ftrace_init_module(mod, mod->ftrace_callsites, mod->ftrace_callsites + mod->num_ftrace_callsites); break; case MODULE_STATE_GOING: ftrace_release_mod(mod); break; } return 0; } #else static int ftrace_module_notify(struct notifier_block *self, unsigned long val, void *data) { return 0; } #endif /* CONFIG_MODULES */ struct notifier_block ftrace_module_nb = { .notifier_call = ftrace_module_notify, .priority = 0, }; extern unsigned long __start_mcount_loc[]; extern unsigned long __stop_mcount_loc[]; void __init ftrace_init(void) { unsigned long count, addr, flags; int ret; /* Keep the ftrace pointer to the stub */ addr = (unsigned long)ftrace_stub; local_irq_save(flags); ftrace_dyn_arch_init(&addr); local_irq_restore(flags); /* ftrace_dyn_arch_init places the return code in addr */ if (addr) goto failed; count = __stop_mcount_loc - __start_mcount_loc; ret = ftrace_dyn_table_alloc(count); if (ret) goto failed; last_ftrace_enabled = ftrace_enabled = 1; ret = ftrace_process_locs(NULL, __start_mcount_loc, __stop_mcount_loc); ret = register_module_notifier(&ftrace_module_nb); if (ret) pr_warning("Failed to register trace ftrace module notifier\n"); set_ftrace_early_filters(); return; failed: ftrace_disabled = 1; } #else static int __init ftrace_nodyn_init(void) { ftrace_enabled = 1; return 0; } device_initcall(ftrace_nodyn_init); static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } static inline void ftrace_startup_enable(int command) { } /* Keep as macros so we do not need to define the commands */ # define ftrace_startup(command) do { } while (0) # define ftrace_shutdown(command) do { } while (0) # define ftrace_startup_sysctl() do { } while (0) # define ftrace_shutdown_sysctl() do { } while (0) #endif /* CONFIG_DYNAMIC_FTRACE */ static void clear_ftrace_swapper(void) { struct task_struct *p; int cpu; get_online_cpus(); for_each_online_cpu(cpu) { p = idle_task(cpu); clear_tsk_trace_trace(p); } put_online_cpus(); } static void set_ftrace_swapper(void) { struct task_struct *p; int cpu; get_online_cpus(); for_each_online_cpu(cpu) { p = idle_task(cpu); set_tsk_trace_trace(p); } put_online_cpus(); } static void clear_ftrace_pid(struct pid *pid) { struct task_struct *p; rcu_read_lock(); do_each_pid_task(pid, PIDTYPE_PID, p) { clear_tsk_trace_trace(p); } while_each_pid_task(pid, PIDTYPE_PID, p); rcu_read_unlock(); put_pid(pid); } static void set_ftrace_pid(struct pid *pid) { struct task_struct *p; rcu_read_lock(); do_each_pid_task(pid, PIDTYPE_PID, p) { set_tsk_trace_trace(p); } while_each_pid_task(pid, PIDTYPE_PID, p); rcu_read_unlock(); } static void clear_ftrace_pid_task(struct pid *pid) { if (pid == ftrace_swapper_pid) clear_ftrace_swapper(); else clear_ftrace_pid(pid); } static void set_ftrace_pid_task(struct pid *pid) { if (pid == ftrace_swapper_pid) set_ftrace_swapper(); else set_ftrace_pid(pid); } static int ftrace_pid_add(int p) { struct pid *pid; struct ftrace_pid *fpid; int ret = -EINVAL; mutex_lock(&ftrace_lock); if (!p) pid = ftrace_swapper_pid; else pid = find_get_pid(p); if (!pid) goto out; ret = 0; list_for_each_entry(fpid, &ftrace_pids, list) if (fpid->pid == pid) goto out_put; ret = -ENOMEM; fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); if (!fpid) goto out_put; list_add(&fpid->list, &ftrace_pids); fpid->pid = pid; set_ftrace_pid_task(pid); ftrace_update_pid_func(); ftrace_startup_enable(0); mutex_unlock(&ftrace_lock); return 0; out_put: if (pid != ftrace_swapper_pid) put_pid(pid); out: mutex_unlock(&ftrace_lock); return ret; } static void ftrace_pid_reset(void) { struct ftrace_pid *fpid, *safe; mutex_lock(&ftrace_lock); list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { struct pid *pid = fpid->pid; clear_ftrace_pid_task(pid); list_del(&fpid->list); kfree(fpid); } ftrace_update_pid_func(); ftrace_startup_enable(0); mutex_unlock(&ftrace_lock); } static void *fpid_start(struct seq_file *m, loff_t *pos) { mutex_lock(&ftrace_lock); if (list_empty(&ftrace_pids) && (!*pos)) return (void *) 1; return seq_list_start(&ftrace_pids, *pos); } static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) { if (v == (void *)1) return NULL; return seq_list_next(v, &ftrace_pids, pos); } static void fpid_stop(struct seq_file *m, void *p) { mutex_unlock(&ftrace_lock); } static int fpid_show(struct seq_file *m, void *v) { const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); if (v == (void *)1) { seq_printf(m, "no pid\n"); return 0; } if (fpid->pid == ftrace_swapper_pid) seq_printf(m, "swapper tasks\n"); else seq_printf(m, "%u\n", pid_vnr(fpid->pid)); return 0; } static const struct seq_operations ftrace_pid_sops = { .start = fpid_start, .next = fpid_next, .stop = fpid_stop, .show = fpid_show, }; static int ftrace_pid_open(struct inode *inode, struct file *file) { int ret = 0; if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) ftrace_pid_reset(); if (file->f_mode & FMODE_READ) ret = seq_open(file, &ftrace_pid_sops); return ret; } static ssize_t ftrace_pid_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64], *tmp; long val; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; /* * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" * to clean the filter quietly. */ tmp = strstrip(buf); if (strlen(tmp) == 0) return 1; ret = strict_strtol(tmp, 10, &val); if (ret < 0) return ret; ret = ftrace_pid_add(val); return ret ? ret : cnt; } static int ftrace_pid_release(struct inode *inode, struct file *file) { if (file->f_mode & FMODE_READ) seq_release(inode, file); return 0; } static const struct file_operations ftrace_pid_fops = { .open = ftrace_pid_open, .write = ftrace_pid_write, .read = seq_read, .llseek = seq_lseek, .release = ftrace_pid_release, }; static __init int ftrace_init_debugfs(void) { struct dentry *d_tracer; d_tracer = tracing_init_dentry(); if (!d_tracer) return 0; ftrace_init_dyn_debugfs(d_tracer); trace_create_file("set_ftrace_pid", 0644, d_tracer, NULL, &ftrace_pid_fops); ftrace_profile_debugfs(d_tracer); return 0; } fs_initcall(ftrace_init_debugfs); /** * ftrace_kill - kill ftrace * * This function should be used by panic code. It stops ftrace * but in a not so nice way. If you need to simply kill ftrace * from a non-atomic section, use ftrace_kill. */ void ftrace_kill(void) { ftrace_disabled = 1; ftrace_enabled = 0; clear_ftrace_function(); } /** * register_ftrace_function - register a function for profiling * @ops - ops structure that holds the function for profiling. * * Register a function to be called by all functions in the * kernel. * * Note: @ops->func and all the functions it calls must be labeled * with "notrace", otherwise it will go into a * recursive loop. */ int register_ftrace_function(struct ftrace_ops *ops) { int ret; if (unlikely(ftrace_disabled)) return -1; mutex_lock(&ftrace_lock); ret = __register_ftrace_function(ops); ftrace_startup(0); mutex_unlock(&ftrace_lock); return ret; } /** * unregister_ftrace_function - unregister a function for profiling. * @ops - ops structure that holds the function to unregister * * Unregister a function that was added to be called by ftrace profiling. */ int unregister_ftrace_function(struct ftrace_ops *ops) { int ret; mutex_lock(&ftrace_lock); ret = __unregister_ftrace_function(ops); ftrace_shutdown(0); mutex_unlock(&ftrace_lock); return ret; } int ftrace_enable_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; if (unlikely(ftrace_disabled)) return -ENODEV; mutex_lock(&ftrace_lock); ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) goto out; last_ftrace_enabled = !!ftrace_enabled; if (ftrace_enabled) { ftrace_startup_sysctl(); /* we are starting ftrace again */ if (ftrace_list != &ftrace_list_end) { if (ftrace_list->next == &ftrace_list_end) ftrace_trace_function = ftrace_list->func; else ftrace_trace_function = ftrace_list_func; } } else { /* stopping ftrace calls (just send to ftrace_stub) */ ftrace_trace_function = ftrace_stub; ftrace_shutdown_sysctl(); } out: mutex_unlock(&ftrace_lock); return ret; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER static int ftrace_graph_active; static struct notifier_block ftrace_suspend_notifier; int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) { return 0; } /* The callbacks that hook a function */ trace_func_graph_ret_t ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) { int i; int ret = 0; unsigned long flags; int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; struct task_struct *g, *t; for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH * sizeof(struct ftrace_ret_stack), GFP_KERNEL); if (!ret_stack_list[i]) { start = 0; end = i; ret = -ENOMEM; goto free; } } read_lock_irqsave(&tasklist_lock, flags); do_each_thread(g, t) { if (start == end) { ret = -EAGAIN; goto unlock; } if (t->ret_stack == NULL) { atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); t->curr_ret_stack = -1; /* Make sure the tasks see the -1 first: */ smp_wmb(); t->ret_stack = ret_stack_list[start++]; } } while_each_thread(g, t); unlock: read_unlock_irqrestore(&tasklist_lock, flags); free: for (i = start; i < end; i++) kfree(ret_stack_list[i]); return ret; } static void ftrace_graph_probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) { unsigned long long timestamp; int index; /* * Does the user want to count the time a function was asleep. * If so, do not update the time stamps. */ if (trace_flags & TRACE_ITER_SLEEP_TIME) return; timestamp = trace_clock_local(); prev->ftrace_timestamp = timestamp; /* only process tasks that we timestamped */ if (!next->ftrace_timestamp) return; /* * Update all the counters in next to make up for the * time next was sleeping. */ timestamp -= next->ftrace_timestamp; for (index = next->curr_ret_stack; index >= 0; index--) next->ret_stack[index].calltime += timestamp; } /* Allocate a return stack for each task */ static int start_graph_tracing(void) { struct ftrace_ret_stack **ret_stack_list; int ret, cpu; ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * sizeof(struct ftrace_ret_stack *), GFP_KERNEL); if (!ret_stack_list) return -ENOMEM; /* The cpu_boot init_task->ret_stack will never be freed */ for_each_online_cpu(cpu) { if (!idle_task(cpu)->ret_stack) ftrace_graph_init_task(idle_task(cpu)); } do { ret = alloc_retstack_tasklist(ret_stack_list); } while (ret == -EAGAIN); if (!ret) { ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); if (ret) pr_info("ftrace_graph: Couldn't activate tracepoint" " probe to kernel_sched_switch\n"); } kfree(ret_stack_list); return ret; } /* * Hibernation protection. * The state of the current task is too much unstable during * suspend/restore to disk. We want to protect against that. */ static int ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, void *unused) { switch (state) { case PM_HIBERNATION_PREPARE: pause_graph_tracing(); break; case PM_POST_HIBERNATION: unpause_graph_tracing(); break; } return NOTIFY_DONE; } int register_ftrace_graph(trace_func_graph_ret_t retfunc, trace_func_graph_ent_t entryfunc) { int ret = 0; mutex_lock(&ftrace_lock); /* we currently allow only one tracer registered at a time */ if (ftrace_graph_active) { ret = -EBUSY; goto out; } ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; register_pm_notifier(&ftrace_suspend_notifier); ftrace_graph_active++; ret = start_graph_tracing(); if (ret) { ftrace_graph_active--; goto out; } ftrace_graph_return = retfunc; ftrace_graph_entry = entryfunc; ftrace_startup(FTRACE_START_FUNC_RET); out: mutex_unlock(&ftrace_lock); return ret; } void unregister_ftrace_graph(void) { mutex_lock(&ftrace_lock); if (unlikely(!ftrace_graph_active)) goto out; ftrace_graph_active--; ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; ftrace_graph_entry = ftrace_graph_entry_stub; ftrace_shutdown(FTRACE_STOP_FUNC_RET); unregister_pm_notifier(&ftrace_suspend_notifier); unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); out: mutex_unlock(&ftrace_lock); } /* Allocate a return stack for newly created task */ void ftrace_graph_init_task(struct task_struct *t) { /* Make sure we do not use the parent ret_stack */ t->ret_stack = NULL; t->curr_ret_stack = -1; if (ftrace_graph_active) { struct ftrace_ret_stack *ret_stack; ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH * sizeof(struct ftrace_ret_stack), GFP_KERNEL); if (!ret_stack) return; atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); t->ftrace_timestamp = 0; /* make curr_ret_stack visable before we add the ret_stack */ smp_wmb(); t->ret_stack = ret_stack; } } void ftrace_graph_exit_task(struct task_struct *t) { struct ftrace_ret_stack *ret_stack = t->ret_stack; t->ret_stack = NULL; /* NULL must become visible to IRQs before we free it: */ barrier(); kfree(ret_stack); } void ftrace_graph_stop(void) { ftrace_stop(); } #endif
gpl-2.0
koolkhel/linux
drivers/of/of_net.c
738
2444
/* * OF helpers for network devices. * * This file is released under the GPLv2 * * Initially copied out of arch/powerpc/kernel/prom_parse.c */ #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/export.h> /** * of_get_phy_mode - Get phy mode for given device_node * @np: Pointer to the given device_node * * The function gets phy interface string from property 'phy-mode' or * 'phy-connection-type', and return its index in phy_modes table, or errno in * error case. */ int of_get_phy_mode(struct device_node *np) { const char *pm; int err, i; err = of_property_read_string(np, "phy-mode", &pm); if (err < 0) err = of_property_read_string(np, "phy-connection-type", &pm); if (err < 0) return err; for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) if (!strcasecmp(pm, phy_modes(i))) return i; return -ENODEV; } EXPORT_SYMBOL_GPL(of_get_phy_mode); /** * Search the device tree for the best MAC address to use. 'mac-address' is * checked first, because that is supposed to contain to "most recent" MAC * address. If that isn't set, then 'local-mac-address' is checked next, * because that is the default address. If that isn't set, then the obsolete * 'address' is checked, just in case we're using an old device tree. * * Note that the 'address' property is supposed to contain a virtual address of * the register set, but some DTS files have redefined that property to be the * MAC address. * * All-zero MAC addresses are rejected, because those could be properties that * exist in the device tree, but were not set by U-Boot. For example, the * DTS could define 'mac-address' and 'local-mac-address', with zero MAC * addresses. Some older U-Boots only initialized 'local-mac-address'. In * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists * but is all zeros. */ const void *of_get_mac_address(struct device_node *np) { struct property *pp; pp = of_find_property(np, "mac-address", NULL); if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) return pp->value; pp = of_find_property(np, "local-mac-address", NULL); if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) return pp->value; pp = of_find_property(np, "address", NULL); if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) return pp->value; return NULL; } EXPORT_SYMBOL(of_get_mac_address);
gpl-2.0
andr00ib/e730-e739_cm10.1_kernel
drivers/regulator/wm8400-regulator.c
994
10303
/* * Regulator support for WM8400 * * Copyright 2008 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * */ #include <linux/bug.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/regulator/driver.h> #include <linux/mfd/wm8400-private.h> static int wm8400_ldo_is_enabled(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev)); return (val & WM8400_LDO1_ENA) != 0; } static int wm8400_ldo_enable(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev), WM8400_LDO1_ENA, WM8400_LDO1_ENA); } static int wm8400_ldo_disable(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev), WM8400_LDO1_ENA, 0); } static int wm8400_ldo_list_voltage(struct regulator_dev *dev, unsigned selector) { if (selector > WM8400_LDO1_VSEL_MASK) return -EINVAL; if (selector < 15) return 900000 + (selector * 50000); else return 1600000 + ((selector - 14) * 100000); } static int wm8400_ldo_get_voltage(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev)); val &= WM8400_LDO1_VSEL_MASK; return wm8400_ldo_list_voltage(dev, val); } static int wm8400_ldo_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; if (min_uV < 900000 || min_uV > 3300000) return -EINVAL; if (min_uV < 1700000) { /* Steps of 50mV from 900mV; */ val = (min_uV - 850001) / 50000; if ((val * 50000) + 900000 > max_uV) return -EINVAL; BUG_ON((val * 50000) + 900000 < min_uV); } else { /* Steps of 100mV from 1700mV */ val = ((min_uV - 1600001) / 100000); if ((val * 100000) + 1700000 > max_uV) return -EINVAL; BUG_ON((val * 100000) + 1700000 < min_uV); val += 0xf; } return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev), WM8400_LDO1_VSEL_MASK, val); } static struct regulator_ops wm8400_ldo_ops = { .is_enabled = wm8400_ldo_is_enabled, .enable = wm8400_ldo_enable, .disable = wm8400_ldo_disable, .list_voltage = wm8400_ldo_list_voltage, .get_voltage = wm8400_ldo_get_voltage, .set_voltage = wm8400_ldo_set_voltage, }; static int wm8400_dcdc_is_enabled(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; u16 val; val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset); return (val & WM8400_DC1_ENA) != 0; } static int wm8400_dcdc_enable(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_ENA, WM8400_DC1_ENA); } static int wm8400_dcdc_disable(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_ENA, 0); } static int wm8400_dcdc_list_voltage(struct regulator_dev *dev, unsigned selector) { if (selector > WM8400_DC1_VSEL_MASK) return -EINVAL; return 850000 + (selector * 25000); } static int wm8400_dcdc_get_voltage(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset); val &= WM8400_DC1_VSEL_MASK; return 850000 + (25000 * val); } static int wm8400_dcdc_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; if (min_uV < 850000) return -EINVAL; val = (min_uV - 825001) / 25000; if (850000 + (25000 * val) > max_uV) return -EINVAL; BUG_ON(850000 + (25000 * val) < min_uV); return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_VSEL_MASK, val); } static unsigned int wm8400_dcdc_get_mode(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; u16 data[2]; int ret; ret = wm8400_block_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset, 2, data); if (ret != 0) return 0; /* Datasheet: hibernate */ if (data[0] & WM8400_DC1_SLEEP) return REGULATOR_MODE_STANDBY; /* Datasheet: standby */ if (!(data[0] & WM8400_DC1_ACTIVE)) return REGULATOR_MODE_IDLE; /* Datasheet: active with or without force PWM */ if (data[1] & WM8400_DC1_FRC_PWM) return REGULATOR_MODE_FAST; else return REGULATOR_MODE_NORMAL; } static int wm8400_dcdc_set_mode(struct regulator_dev *dev, unsigned int mode) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; int ret; switch (mode) { case REGULATOR_MODE_FAST: /* Datasheet: active with force PWM */ ret = wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_2 + offset, WM8400_DC1_FRC_PWM, WM8400_DC1_FRC_PWM); if (ret != 0) return ret; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP, WM8400_DC1_ACTIVE); case REGULATOR_MODE_NORMAL: /* Datasheet: active */ ret = wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_2 + offset, WM8400_DC1_FRC_PWM, 0); if (ret != 0) return ret; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP, WM8400_DC1_ACTIVE); case REGULATOR_MODE_IDLE: /* Datasheet: standby */ ret = wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_ACTIVE, 0); if (ret != 0) return ret; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_SLEEP, 0); default: return -EINVAL; } } static unsigned int wm8400_dcdc_get_optimum_mode(struct regulator_dev *dev, int input_uV, int output_uV, int load_uA) { return REGULATOR_MODE_NORMAL; } static struct regulator_ops wm8400_dcdc_ops = { .is_enabled = wm8400_dcdc_is_enabled, .enable = wm8400_dcdc_enable, .disable = wm8400_dcdc_disable, .list_voltage = wm8400_dcdc_list_voltage, .get_voltage = wm8400_dcdc_get_voltage, .set_voltage = wm8400_dcdc_set_voltage, .get_mode = wm8400_dcdc_get_mode, .set_mode = wm8400_dcdc_set_mode, .get_optimum_mode = wm8400_dcdc_get_optimum_mode, }; static struct regulator_desc regulators[] = { { .name = "LDO1", .id = WM8400_LDO1, .ops = &wm8400_ldo_ops, .n_voltages = WM8400_LDO1_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO2", .id = WM8400_LDO2, .ops = &wm8400_ldo_ops, .n_voltages = WM8400_LDO2_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO3", .id = WM8400_LDO3, .ops = &wm8400_ldo_ops, .n_voltages = WM8400_LDO3_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO4", .id = WM8400_LDO4, .ops = &wm8400_ldo_ops, .n_voltages = WM8400_LDO4_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "DCDC1", .id = WM8400_DCDC1, .ops = &wm8400_dcdc_ops, .n_voltages = WM8400_DC1_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "DCDC2", .id = WM8400_DCDC2, .ops = &wm8400_dcdc_ops, .n_voltages = WM8400_DC2_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, }; static int __devinit wm8400_regulator_probe(struct platform_device *pdev) { struct wm8400 *wm8400 = container_of(pdev, struct wm8400, regulators[pdev->id]); struct regulator_dev *rdev; rdev = regulator_register(&regulators[pdev->id], &pdev->dev, pdev->dev.platform_data, wm8400); if (IS_ERR(rdev)) return PTR_ERR(rdev); platform_set_drvdata(pdev, rdev); return 0; } static int __devexit wm8400_regulator_remove(struct platform_device *pdev) { struct regulator_dev *rdev = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); regulator_unregister(rdev); return 0; } static struct platform_driver wm8400_regulator_driver = { .driver = { .name = "wm8400-regulator", }, .probe = wm8400_regulator_probe, .remove = __devexit_p(wm8400_regulator_remove), }; /** * wm8400_register_regulator - enable software control of a WM8400 regulator * * This function enables software control of a WM8400 regulator via * the regulator API. It is intended to be called from the * platform_init() callback of the WM8400 MFD driver. * * @param dev The WM8400 device to operate on. * @param reg The regulator to control. * @param initdata Regulator initdata for the regulator. */ int wm8400_register_regulator(struct device *dev, int reg, struct regulator_init_data *initdata) { struct wm8400 *wm8400 = dev_get_drvdata(dev); if (wm8400->regulators[reg].name) return -EBUSY; initdata->driver_data = wm8400; wm8400->regulators[reg].name = "wm8400-regulator"; wm8400->regulators[reg].id = reg; wm8400->regulators[reg].dev.parent = dev; wm8400->regulators[reg].dev.platform_data = initdata; return platform_device_register(&wm8400->regulators[reg]); } EXPORT_SYMBOL_GPL(wm8400_register_regulator); static int __init wm8400_regulator_init(void) { return platform_driver_register(&wm8400_regulator_driver); } subsys_initcall(wm8400_regulator_init); static void __exit wm8400_regulator_exit(void) { platform_driver_unregister(&wm8400_regulator_driver); } module_exit(wm8400_regulator_exit); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("WM8400 regulator driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm8400-regulator");
gpl-2.0
jameskdev/android_kernel_sky_ef30s
drivers/hwmon/max6650.c
994
20901
/* * max6650.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring. * * (C) 2007 by Hans J. Koch <hjk@linutronix.de> * * based on code written by John Morris <john.morris@spirentcom.com> * Copyright (c) 2003 Spirent Communications * and Claus Gindhart <claus.gindhart@kontron.com> * * This module has only been tested with the MAX6650 chip. It should * also work with the MAX6651. It does not distinguish max6650 and max6651 * chips. * * The datasheet was last seen at: * * http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> /* * Addresses to scan. There are four disjoint possibilities, by pin config. */ static const unsigned short normal_i2c[] = {0x1b, 0x1f, 0x48, 0x4b, I2C_CLIENT_END}; /* * Insmod parameters */ /* fan_voltage: 5=5V fan, 12=12V fan, 0=don't change */ static int fan_voltage; /* prescaler: Possible values are 1, 2, 4, 8, 16 or 0 for don't change */ static int prescaler; /* clock: The clock frequency of the chip the driver should assume */ static int clock = 254000; module_param(fan_voltage, int, S_IRUGO); module_param(prescaler, int, S_IRUGO); module_param(clock, int, S_IRUGO); /* * MAX 6650/6651 registers */ #define MAX6650_REG_SPEED 0x00 #define MAX6650_REG_CONFIG 0x02 #define MAX6650_REG_GPIO_DEF 0x04 #define MAX6650_REG_DAC 0x06 #define MAX6650_REG_ALARM_EN 0x08 #define MAX6650_REG_ALARM 0x0A #define MAX6650_REG_TACH0 0x0C #define MAX6650_REG_TACH1 0x0E #define MAX6650_REG_TACH2 0x10 #define MAX6650_REG_TACH3 0x12 #define MAX6650_REG_GPIO_STAT 0x14 #define MAX6650_REG_COUNT 0x16 /* * Config register bits */ #define MAX6650_CFG_V12 0x08 #define MAX6650_CFG_PRESCALER_MASK 0x07 #define MAX6650_CFG_PRESCALER_2 0x01 #define MAX6650_CFG_PRESCALER_4 0x02 #define MAX6650_CFG_PRESCALER_8 0x03 #define MAX6650_CFG_PRESCALER_16 0x04 #define MAX6650_CFG_MODE_MASK 0x30 #define MAX6650_CFG_MODE_ON 0x00 #define MAX6650_CFG_MODE_OFF 0x10 #define MAX6650_CFG_MODE_CLOSED_LOOP 0x20 #define MAX6650_CFG_MODE_OPEN_LOOP 0x30 #define MAX6650_COUNT_MASK 0x03 /* * Alarm status register bits */ #define MAX6650_ALRM_MAX 0x01 #define MAX6650_ALRM_MIN 0x02 #define MAX6650_ALRM_TACH 0x04 #define MAX6650_ALRM_GPIO1 0x08 #define MAX6650_ALRM_GPIO2 0x10 /* Minimum and maximum values of the FAN-RPM */ #define FAN_RPM_MIN 240 #define FAN_RPM_MAX 30000 #define DIV_FROM_REG(reg) (1 << (reg & 7)) static int max6650_probe(struct i2c_client *client, const struct i2c_device_id *id); static int max6650_detect(struct i2c_client *client, struct i2c_board_info *info); static int max6650_init_client(struct i2c_client *client); static int max6650_remove(struct i2c_client *client); static struct max6650_data *max6650_update_device(struct device *dev); /* * Driver data (common to all clients) */ static const struct i2c_device_id max6650_id[] = { { "max6650", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max6650_id); static struct i2c_driver max6650_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "max6650", }, .probe = max6650_probe, .remove = max6650_remove, .id_table = max6650_id, .detect = max6650_detect, .address_list = normal_i2c, }; /* * Client data (each client gets its own) */ struct max6650_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ /* register values */ u8 speed; u8 config; u8 tach[4]; u8 count; u8 dac; u8 alarm; }; static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct max6650_data *data = max6650_update_device(dev); int rpm; /* * Calculation details: * * Each tachometer counts over an interval given by the "count" * register (0.25, 0.5, 1 or 2 seconds). This module assumes * that the fans produce two pulses per revolution (this seems * to be the most common). */ rpm = ((data->tach[attr->index] * 120) / DIV_FROM_REG(data->count)); return sprintf(buf, "%d\n", rpm); } /* * Set the fan speed to the specified RPM (or read back the RPM setting). * This works in closed loop mode only. Use pwm1 for open loop speed setting. * * The MAX6650/1 will automatically control fan speed when in closed loop * mode. * * Assumptions: * * 1) The MAX6650/1 internal 254kHz clock frequency is set correctly. Use * the clock module parameter if you need to fine tune this. * * 2) The prescaler (low three bits of the config register) has already * been set to an appropriate value. Use the prescaler module parameter * if your BIOS doesn't initialize the chip properly. * * The relevant equations are given on pages 21 and 22 of the datasheet. * * From the datasheet, the relevant equation when in regulation is: * * [fCLK / (128 x (KTACH + 1))] = 2 x FanSpeed / KSCALE * * where: * * fCLK is the oscillator frequency (either the 254kHz internal * oscillator or the externally applied clock) * * KTACH is the value in the speed register * * FanSpeed is the speed of the fan in rps * * KSCALE is the prescaler value (1, 2, 4, 8, or 16) * * When reading, we need to solve for FanSpeed. When writing, we need to * solve for KTACH. * * Note: this tachometer is completely separate from the tachometers * used to measure the fan speeds. Only one fan's speed (fan1) is * controlled. */ static ssize_t get_target(struct device *dev, struct device_attribute *devattr, char *buf) { struct max6650_data *data = max6650_update_device(dev); int kscale, ktach, rpm; /* * Use the datasheet equation: * * FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)] * * then multiply by 60 to give rpm. */ kscale = DIV_FROM_REG(data->config); ktach = data->speed; rpm = 60 * kscale * clock / (256 * (ktach + 1)); return sprintf(buf, "%d\n", rpm); } static ssize_t set_target(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6650_data *data = i2c_get_clientdata(client); int rpm = simple_strtoul(buf, NULL, 10); int kscale, ktach; rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); /* * Divide the required speed by 60 to get from rpm to rps, then * use the datasheet equation: * * KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1 */ mutex_lock(&data->update_lock); kscale = DIV_FROM_REG(data->config); ktach = ((clock * kscale) / (256 * rpm / 60)) - 1; if (ktach < 0) ktach = 0; if (ktach > 255) ktach = 255; data->speed = ktach; i2c_smbus_write_byte_data(client, MAX6650_REG_SPEED, data->speed); mutex_unlock(&data->update_lock); return count; } /* * Get/set the fan speed in open loop mode using pwm1 sysfs file. * Speed is given as a relative value from 0 to 255, where 255 is maximum * speed. Note that this is done by writing directly to the chip's DAC, * it won't change the closed loop speed set by fan1_target. * Also note that due to rounding errors it is possible that you don't read * back exactly the value you have set. */ static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { int pwm; struct max6650_data *data = max6650_update_device(dev); /* Useful range for dac is 0-180 for 12V fans and 0-76 for 5V fans. Lower DAC values mean higher speeds. */ if (data->config & MAX6650_CFG_V12) pwm = 255 - (255 * (int)data->dac)/180; else pwm = 255 - (255 * (int)data->dac)/76; if (pwm < 0) pwm = 0; return sprintf(buf, "%d\n", pwm); } static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6650_data *data = i2c_get_clientdata(client); int pwm = simple_strtoul(buf, NULL, 10); pwm = SENSORS_LIMIT(pwm, 0, 255); mutex_lock(&data->update_lock); if (data->config & MAX6650_CFG_V12) data->dac = 180 - (180 * pwm)/255; else data->dac = 76 - (76 * pwm)/255; i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac); mutex_unlock(&data->update_lock); return count; } /* * Get/Set controller mode: * Possible values: * 0 = Fan always on * 1 = Open loop, Voltage is set according to speed, not regulated. * 2 = Closed loop, RPM for all fans regulated by fan1 tachometer */ static ssize_t get_enable(struct device *dev, struct device_attribute *devattr, char *buf) { struct max6650_data *data = max6650_update_device(dev); int mode = (data->config & MAX6650_CFG_MODE_MASK) >> 4; int sysfs_modes[4] = {0, 1, 2, 1}; return sprintf(buf, "%d\n", sysfs_modes[mode]); } static ssize_t set_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6650_data *data = i2c_get_clientdata(client); int mode = simple_strtoul(buf, NULL, 10); int max6650_modes[3] = {0, 3, 2}; if ((mode < 0)||(mode > 2)) { dev_err(&client->dev, "illegal value for pwm1_enable (%d)\n", mode); return -EINVAL; } mutex_lock(&data->update_lock); data->config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG); data->config = (data->config & ~MAX6650_CFG_MODE_MASK) | (max6650_modes[mode] << 4); i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, data->config); mutex_unlock(&data->update_lock); return count; } /* * Read/write functions for fan1_div sysfs file. The MAX6650 has no such * divider. We handle this by converting between divider and counttime: * * (counttime == k) <==> (divider == 2^k), k = 0, 1, 2, or 3 * * Lower values of k allow to connect a faster fan without the risk of * counter overflow. The price is lower resolution. You can also set counttime * using the module parameter. Note that the module parameter "prescaler" also * influences the behaviour. Unfortunately, there's no sysfs attribute * defined for that. See the data sheet for details. */ static ssize_t get_div(struct device *dev, struct device_attribute *devattr, char *buf) { struct max6650_data *data = max6650_update_device(dev); return sprintf(buf, "%d\n", DIV_FROM_REG(data->count)); } static ssize_t set_div(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6650_data *data = i2c_get_clientdata(client); int div = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); switch (div) { case 1: data->count = 0; break; case 2: data->count = 1; break; case 4: data->count = 2; break; case 8: data->count = 3; break; default: mutex_unlock(&data->update_lock); dev_err(&client->dev, "illegal value for fan divider (%d)\n", div); return -EINVAL; } i2c_smbus_write_byte_data(client, MAX6650_REG_COUNT, data->count); mutex_unlock(&data->update_lock); return count; } /* * Get alarm stati: * Possible values: * 0 = no alarm * 1 = alarm */ static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct max6650_data *data = max6650_update_device(dev); struct i2c_client *client = to_i2c_client(dev); int alarm = 0; if (data->alarm & attr->index) { mutex_lock(&data->update_lock); alarm = 1; data->alarm &= ~attr->index; data->alarm |= i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM); mutex_unlock(&data->update_lock); } return sprintf(buf, "%d\n", alarm); } static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1); static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2); static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3); static DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target); static DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div); static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable); static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm); static SENSOR_DEVICE_ATTR(fan1_max_alarm, S_IRUGO, get_alarm, NULL, MAX6650_ALRM_MAX); static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, get_alarm, NULL, MAX6650_ALRM_MIN); static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_alarm, NULL, MAX6650_ALRM_TACH); static SENSOR_DEVICE_ATTR(gpio1_alarm, S_IRUGO, get_alarm, NULL, MAX6650_ALRM_GPIO1); static SENSOR_DEVICE_ATTR(gpio2_alarm, S_IRUGO, get_alarm, NULL, MAX6650_ALRM_GPIO2); static mode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct i2c_client *client = to_i2c_client(dev); u8 alarm_en = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN); struct device_attribute *devattr; /* * Hide the alarms that have not been enabled by the firmware */ devattr = container_of(a, struct device_attribute, attr); if (devattr == &sensor_dev_attr_fan1_max_alarm.dev_attr || devattr == &sensor_dev_attr_fan1_min_alarm.dev_attr || devattr == &sensor_dev_attr_fan1_fault.dev_attr || devattr == &sensor_dev_attr_gpio1_alarm.dev_attr || devattr == &sensor_dev_attr_gpio2_alarm.dev_attr) { if (!(alarm_en & to_sensor_dev_attr(devattr)->index)) return 0; } return a->mode; } static struct attribute *max6650_attrs[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan4_input.dev_attr.attr, &dev_attr_fan1_target.attr, &dev_attr_fan1_div.attr, &dev_attr_pwm1_enable.attr, &dev_attr_pwm1.attr, &sensor_dev_attr_fan1_max_alarm.dev_attr.attr, &sensor_dev_attr_fan1_min_alarm.dev_attr.attr, &sensor_dev_attr_fan1_fault.dev_attr.attr, &sensor_dev_attr_gpio1_alarm.dev_attr.attr, &sensor_dev_attr_gpio2_alarm.dev_attr.attr, NULL }; static struct attribute_group max6650_attr_grp = { .attrs = max6650_attrs, .is_visible = max6650_attrs_visible, }; /* * Real code */ /* Return 0 if detection is successful, -ENODEV otherwise */ static int max6650_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int address = client->addr; dev_dbg(&adapter->dev, "max6650_detect called\n"); if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_dbg(&adapter->dev, "max6650: I2C bus doesn't support " "byte read mode, skipping.\n"); return -ENODEV; } if (((i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG) & 0xC0) ||(i2c_smbus_read_byte_data(client, MAX6650_REG_GPIO_STAT) & 0xE0) ||(i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN) & 0xE0) ||(i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM) & 0xE0) ||(i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT) & 0xFC))) { dev_dbg(&adapter->dev, "max6650: detection failed at 0x%02x.\n", address); return -ENODEV; } dev_info(&adapter->dev, "max6650: chip found at 0x%02x.\n", address); strlcpy(info->type, "max6650", I2C_NAME_SIZE); return 0; } static int max6650_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max6650_data *data; int err; if (!(data = kzalloc(sizeof(struct max6650_data), GFP_KERNEL))) { dev_err(&client->dev, "out of memory.\n"); return -ENOMEM; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* * Initialize the max6650 chip */ err = max6650_init_client(client); if (err) goto err_free; err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp); if (err) goto err_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (!IS_ERR(data->hwmon_dev)) return 0; err = PTR_ERR(data->hwmon_dev); dev_err(&client->dev, "error registering hwmon device.\n"); sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); err_free: kfree(data); return err; } static int max6650_remove(struct i2c_client *client) { struct max6650_data *data = i2c_get_clientdata(client); sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); hwmon_device_unregister(data->hwmon_dev); kfree(data); return 0; } static int max6650_init_client(struct i2c_client *client) { struct max6650_data *data = i2c_get_clientdata(client); int config; int err = -EIO; config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG); if (config < 0) { dev_err(&client->dev, "Error reading config, aborting.\n"); return err; } switch (fan_voltage) { case 0: break; case 5: config &= ~MAX6650_CFG_V12; break; case 12: config |= MAX6650_CFG_V12; break; default: dev_err(&client->dev, "illegal value for fan_voltage (%d)\n", fan_voltage); } dev_info(&client->dev, "Fan voltage is set to %dV.\n", (config & MAX6650_CFG_V12) ? 12 : 5); switch (prescaler) { case 0: break; case 1: config &= ~MAX6650_CFG_PRESCALER_MASK; break; case 2: config = (config & ~MAX6650_CFG_PRESCALER_MASK) | MAX6650_CFG_PRESCALER_2; break; case 4: config = (config & ~MAX6650_CFG_PRESCALER_MASK) | MAX6650_CFG_PRESCALER_4; break; case 8: config = (config & ~MAX6650_CFG_PRESCALER_MASK) | MAX6650_CFG_PRESCALER_8; break; case 16: config = (config & ~MAX6650_CFG_PRESCALER_MASK) | MAX6650_CFG_PRESCALER_16; break; default: dev_err(&client->dev, "illegal value for prescaler (%d)\n", prescaler); } dev_info(&client->dev, "Prescaler is set to %d.\n", 1 << (config & MAX6650_CFG_PRESCALER_MASK)); /* If mode is set to "full off", we change it to "open loop" and * set DAC to 255, which has the same effect. We do this because * there's no "full off" mode defined in hwmon specifcations. */ if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) { dev_dbg(&client->dev, "Change mode to open loop, full off.\n"); config = (config & ~MAX6650_CFG_MODE_MASK) | MAX6650_CFG_MODE_OPEN_LOOP; if (i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, 255)) { dev_err(&client->dev, "DAC write error, aborting.\n"); return err; } } if (i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, config)) { dev_err(&client->dev, "Config write error, aborting.\n"); return err; } data->config = config; data->count = i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT); return 0; } static const u8 tach_reg[] = { MAX6650_REG_TACH0, MAX6650_REG_TACH1, MAX6650_REG_TACH2, MAX6650_REG_TACH3, }; static struct max6650_data *max6650_update_device(struct device *dev) { int i; struct i2c_client *client = to_i2c_client(dev); struct max6650_data *data = i2c_get_clientdata(client); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { data->speed = i2c_smbus_read_byte_data(client, MAX6650_REG_SPEED); data->config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG); for (i = 0; i < 4; i++) { data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); } data->count = i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT); data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC); /* Alarms are cleared on read in case the condition that * caused the alarm is removed. Keep the value latched here * for providing the register through different alarm files. */ data->alarm |= i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __init sensors_max6650_init(void) { return i2c_add_driver(&max6650_driver); } static void __exit sensors_max6650_exit(void) { i2c_del_driver(&max6650_driver); } MODULE_AUTHOR("Hans J. Koch"); MODULE_DESCRIPTION("MAX6650 sensor driver"); MODULE_LICENSE("GPL"); module_init(sensors_max6650_init); module_exit(sensors_max6650_exit);
gpl-2.0
andr7e/rk3188_tablet_jb
kernel/drivers/ide/cy82c693.c
2530
6413
/* * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz * * CYPRESS CY82C693 chipset IDE controller * * The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards. */ #include <linux/module.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "cy82c693" /* * NOTE: the value for busmaster timeout is tricky and I got it by * trial and error! By using a to low value will cause DMA timeouts * and drop IDE performance, and by using a to high value will cause * audio playback to scatter. * If you know a better value or how to calc it, please let me know. */ /* twice the value written in cy82c693ub datasheet */ #define BUSMASTER_TIMEOUT 0x50 /* * the value above was tested on my machine and it seems to work okay */ /* here are the offset definitions for the registers */ #define CY82_IDE_CMDREG 0x04 #define CY82_IDE_ADDRSETUP 0x48 #define CY82_IDE_MASTER_IOR 0x4C #define CY82_IDE_MASTER_IOW 0x4D #define CY82_IDE_SLAVE_IOR 0x4E #define CY82_IDE_SLAVE_IOW 0x4F #define CY82_IDE_MASTER_8BIT 0x50 #define CY82_IDE_SLAVE_8BIT 0x51 #define CY82_INDEX_PORT 0x22 #define CY82_DATA_PORT 0x23 #define CY82_INDEX_CHANNEL0 0x30 #define CY82_INDEX_CHANNEL1 0x31 #define CY82_INDEX_TIMEOUT 0x32 /* * set DMA mode a specific channel for CY82C693 */ static void cy82c693_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { const u8 mode = drive->dma_mode; u8 single = (mode & 0x10) >> 4, index = 0, data = 0; index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0; data = (mode & 3) | (single << 2); outb(index, CY82_INDEX_PORT); outb(data, CY82_DATA_PORT); /* * note: below we set the value for Bus Master IDE TimeOut Register * I'm not absolutely sure what this does, but it solved my problem * with IDE DMA and sound, so I now can play sound and work with * my IDE driver at the same time :-) * * If you know the correct (best) value for this register please * let me know - ASK */ data = BUSMASTER_TIMEOUT; outb(CY82_INDEX_TIMEOUT, CY82_INDEX_PORT); outb(data, CY82_DATA_PORT); } static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); int bus_speed = ide_pci_clk ? ide_pci_clk : 33; const unsigned long T = 1000000 / bus_speed; unsigned int addrCtrl; struct ide_timing t; u8 time_16, time_8; /* select primary or secondary channel */ if (hwif->index > 0) { /* drive is on the secondary channel */ dev = pci_get_slot(dev->bus, dev->devfn+1); if (!dev) { printk(KERN_ERR "%s: tune_drive: " "Cannot find secondary interface!\n", drive->name); return; } } ide_timing_compute(drive, drive->pio_mode, &t, T, 1); time_16 = clamp_val(t.recover - 1, 0, 15) | (clamp_val(t.active - 1, 0, 15) << 4); time_8 = clamp_val(t.act8b - 1, 0, 15) | (clamp_val(t.rec8b - 1, 0, 15) << 4); /* now let's write the clocks registers */ if ((drive->dn & 1) == 0) { /* * set master drive * address setup control register * is 32 bit !!! */ pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); addrCtrl &= (~0xF); addrCtrl |= clamp_val(t.setup - 1, 0, 15); pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl); /* now let's set the remaining registers */ pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, time_16); pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, time_16); pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, time_8); } else { /* * set slave drive * address setup control register * is 32 bit !!! */ pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); addrCtrl &= (~0xF0); addrCtrl |= (clamp_val(t.setup - 1, 0, 15) << 4); pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl); /* now let's set the remaining registers */ pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, time_16); pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16); pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8); } } static void __devinit init_iops_cy82c693(ide_hwif_t *hwif) { static ide_hwif_t *primary; struct pci_dev *dev = to_pci_dev(hwif->dev); if (PCI_FUNC(dev->devfn) == 1) primary = hwif; else { hwif->mate = primary; hwif->channel = 1; } } static const struct ide_port_ops cy82c693_port_ops = { .set_pio_mode = cy82c693_set_pio_mode, .set_dma_mode = cy82c693_set_dma_mode, }; static const struct ide_port_info cy82c693_chipset __devinitdata = { .name = DRV_NAME, .init_iops = init_iops_cy82c693, .port_ops = &cy82c693_port_ops, .host_flags = IDE_HFLAG_SINGLE, .pio_mask = ATA_PIO4, .swdma_mask = ATA_SWDMA2, .mwdma_mask = ATA_MWDMA2, }; static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_device_id *id) { struct pci_dev *dev2; int ret = -ENODEV; /* CY82C693 is more than only a IDE controller. Function 1 is primary IDE channel, function 2 - secondary. */ if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && PCI_FUNC(dev->devfn) == 1) { dev2 = pci_get_slot(dev->bus, dev->devfn + 1); ret = ide_pci_init_two(dev, dev2, &cy82c693_chipset, NULL); if (ret) pci_dev_put(dev2); } return ret; } static void __devexit cy82c693_remove(struct pci_dev *dev) { struct ide_host *host = pci_get_drvdata(dev); struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL; ide_pci_remove(dev); pci_dev_put(dev2); } static const struct pci_device_id cy82c693_pci_tbl[] = { { PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, cy82c693_pci_tbl); static struct pci_driver cy82c693_pci_driver = { .name = "Cypress_IDE", .id_table = cy82c693_pci_tbl, .probe = cy82c693_init_one, .remove = __devexit_p(cy82c693_remove), .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init cy82c693_ide_init(void) { return ide_pci_register_driver(&cy82c693_pci_driver); } static void __exit cy82c693_ide_exit(void) { pci_unregister_driver(&cy82c693_pci_driver); } module_init(cy82c693_ide_init); module_exit(cy82c693_ide_exit); MODULE_AUTHOR("Andreas Krebs, Andre Hedrick, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE"); MODULE_LICENSE("GPL");
gpl-2.0
rudij7/android_kernel_oneplus_one
net/nfc/rawsock.c
2786
7632
/* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> * Lauro Ramos Venancio <lauro.venancio@openbossa.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <net/tcp_states.h> #include <linux/nfc.h> #include <linux/export.h> #include "nfc.h" static void rawsock_write_queue_purge(struct sock *sk) { pr_debug("sk=%p\n", sk); spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_purge(&sk->sk_write_queue); nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); } static void rawsock_report_error(struct sock *sk, int err) { pr_debug("sk=%p err=%d\n", sk, err); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_err = -err; sk->sk_error_report(sk); rawsock_write_queue_purge(sk); } static int rawsock_release(struct socket *sock) { struct sock *sk = sock->sk; pr_debug("sock=%p\n", sock); sock_orphan(sk); sock_put(sk); return 0; } static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, int len, int flags) { struct sock *sk = sock->sk; struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr; struct nfc_dev *dev; int rc = 0; pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags); if (!addr || len < sizeof(struct sockaddr_nfc) || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, addr->target_idx, addr->nfc_protocol); lock_sock(sk); if (sock->state == SS_CONNECTED) { rc = -EISCONN; goto error; } dev = nfc_get_device(addr->dev_idx); if (!dev) { rc = -ENODEV; goto error; } rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); if (rc) goto put_dev; nfc_rawsock(sk)->dev = dev; nfc_rawsock(sk)->target_idx = addr->target_idx; sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; sk->sk_state_change(sk); release_sock(sk); return 0; put_dev: nfc_put_device(dev); error: release_sock(sk); return rc; } static int rawsock_add_header(struct sk_buff *skb) { *skb_push(skb, NFC_HEADER_SIZE) = 0; return 0; } static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb, int err) { struct sock *sk = (struct sock *) context; BUG_ON(in_irq()); pr_debug("sk=%p err=%d\n", sk, err); if (err) goto error; err = rawsock_add_header(skb); if (err) goto error; err = sock_queue_rcv_skb(sk, skb); if (err) goto error; spin_lock_bh(&sk->sk_write_queue.lock); if (!skb_queue_empty(&sk->sk_write_queue)) schedule_work(&nfc_rawsock(sk)->tx_work); else nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); sock_put(sk); return; error: rawsock_report_error(sk, err); sock_put(sk); } static void rawsock_tx_work(struct work_struct *work) { struct sock *sk = to_rawsock_sk(work); struct nfc_dev *dev = nfc_rawsock(sk)->dev; u32 target_idx = nfc_rawsock(sk)->target_idx; struct sk_buff *skb; int rc; pr_debug("sk=%p target_idx=%u\n", sk, target_idx); if (sk->sk_shutdown & SEND_SHUTDOWN) { rawsock_write_queue_purge(sk); return; } skb = skb_dequeue(&sk->sk_write_queue); sock_hold(sk); rc = nfc_data_exchange(dev, target_idx, skb, rawsock_data_exchange_complete, sk); if (rc) { rawsock_report_error(sk, rc); sock_put(sk); } } static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nfc_dev *dev = nfc_rawsock(sk)->dev; struct sk_buff *skb; int rc; pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len); if (msg->msg_namelen) return -EOPNOTSUPP; if (sock->state != SS_CONNECTED) return -ENOTCONN; skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc); if (skb == NULL) return rc; rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (rc < 0) { kfree_skb(skb); return rc; } spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_tail(&sk->sk_write_queue, skb); if (!nfc_rawsock(sk)->tx_work_scheduled) { schedule_work(&nfc_rawsock(sk)->tx_work); nfc_rawsock(sk)->tx_work_scheduled = true; } spin_unlock_bh(&sk->sk_write_queue.lock); return len; } static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int rc; pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, noblock, &rc); if (!skb) return rc; msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); skb_free_datagram(sk, skb); return rc ? : copied; } static const struct proto_ops rawsock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .release = rawsock_release, .bind = sock_no_bind, .connect = rawsock_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = rawsock_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, }; static void rawsock_destruct(struct sock *sk) { pr_debug("sk=%p\n", sk); if (sk->sk_state == TCP_ESTABLISHED) { nfc_deactivate_target(nfc_rawsock(sk)->dev, nfc_rawsock(sk)->target_idx); nfc_put_device(nfc_rawsock(sk)->dev); } skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Freeing alive NFC raw socket %p\n", sk); return; } } static int rawsock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto) { struct sock *sk; pr_debug("sock=%p\n", sock); if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; sock->ops = &rawsock_ops; sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_protocol = nfc_proto->id; sk->sk_destruct = rawsock_destruct; sock->state = SS_UNCONNECTED; INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); nfc_rawsock(sk)->tx_work_scheduled = false; return 0; } static struct proto rawsock_proto = { .name = "NFC_RAW", .owner = THIS_MODULE, .obj_size = sizeof(struct nfc_rawsock), }; static const struct nfc_protocol rawsock_nfc_proto = { .id = NFC_SOCKPROTO_RAW, .proto = &rawsock_proto, .owner = THIS_MODULE, .create = rawsock_create }; int __init rawsock_init(void) { int rc; rc = nfc_proto_register(&rawsock_nfc_proto); return rc; } void rawsock_exit(void) { nfc_proto_unregister(&rawsock_nfc_proto); }
gpl-2.0
andixlm/android_kernel_samsung_galaxys2plus-common
net/ipv4/ipcomp.c
3042
4458
/* * IP Payload Compression Protocol (IPComp) - RFC3173. * * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * Todo: * - Tunable compression parameters. * - Compression stats. * - Adaptive compression. */ #include <linux/module.h> #include <linux/err.h> #include <linux/rtnetlink.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/icmp.h> #include <net/ipcomp.h> #include <net/protocol.h> #include <net/sock.h> static void ipcomp4_err(struct sk_buff *skb, u32 info) { struct net *net = dev_net(skb->dev); __be32 spi; const struct iphdr *iph = (const struct iphdr *)skb->data; struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) return; spi = htonl(ntohs(ipch->cpi)); x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET); if (!x) return; NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI4\n", spi, &iph->daddr); xfrm_state_put(x); } /* We always hold one tunnel user reference to indicate a tunnel */ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) { struct net *net = xs_net(x); struct xfrm_state *t; t = xfrm_state_alloc(net); if (t == NULL) goto out; t->id.proto = IPPROTO_IPIP; t->id.spi = x->props.saddr.a4; t->id.daddr.a4 = x->id.daddr.a4; memcpy(&t->sel, &x->sel, sizeof(t->sel)); t->props.family = AF_INET; t->props.mode = x->props.mode; t->props.saddr.a4 = x->props.saddr.a4; t->props.flags = x->props.flags; memcpy(&t->mark, &x->mark, sizeof(t->mark)); if (xfrm_init_state(t)) goto error; atomic_set(&t->tunnel_users, 1); out: return t; error: t->km.state = XFRM_STATE_DEAD; xfrm_state_put(t); t = NULL; goto out; } /* * Must be protected by xfrm_cfg_mutex. State and tunnel user references are * always incremented on success. */ static int ipcomp_tunnel_attach(struct xfrm_state *x) { struct net *net = xs_net(x); int err = 0; struct xfrm_state *t; u32 mark = x->mark.v & x->mark.m; t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr.a4, x->props.saddr.a4, IPPROTO_IPIP, AF_INET); if (!t) { t = ipcomp_tunnel_create(x); if (!t) { err = -EINVAL; goto out; } xfrm_state_insert(t); xfrm_state_hold(t); } x->tunnel = t; atomic_inc(&t->tunnel_users); out: return err; } static int ipcomp4_init_state(struct xfrm_state *x) { int err = -EINVAL; x->props.header_len = 0; switch (x->props.mode) { case XFRM_MODE_TRANSPORT: break; case XFRM_MODE_TUNNEL: x->props.header_len += sizeof(struct iphdr); break; default: goto out; } err = ipcomp_init_state(x); if (err) goto out; if (x->props.mode == XFRM_MODE_TUNNEL) { err = ipcomp_tunnel_attach(x); if (err) goto out; } err = 0; out: return err; } static const struct xfrm_type ipcomp_type = { .description = "IPCOMP4", .owner = THIS_MODULE, .proto = IPPROTO_COMP, .init_state = ipcomp4_init_state, .destructor = ipcomp_destroy, .input = ipcomp_input, .output = ipcomp_output }; static const struct net_protocol ipcomp4_protocol = { .handler = xfrm4_rcv, .err_handler = ipcomp4_err, .no_policy = 1, }; static int __init ipcomp4_init(void) { if (xfrm_register_type(&ipcomp_type, AF_INET) < 0) { printk(KERN_INFO "ipcomp init: can't add xfrm type\n"); return -EAGAIN; } if (inet_add_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) { printk(KERN_INFO "ipcomp init: can't add protocol\n"); xfrm_unregister_type(&ipcomp_type, AF_INET); return -EAGAIN; } return 0; } static void __exit ipcomp4_fini(void) { if (inet_del_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) printk(KERN_INFO "ip ipcomp close: can't remove protocol\n"); if (xfrm_unregister_type(&ipcomp_type, AF_INET) < 0) printk(KERN_INFO "ip ipcomp close: can't remove xfrm type\n"); } module_init(ipcomp4_init); module_exit(ipcomp4_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp/IPv4) - RFC3173"); MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_COMP);
gpl-2.0
derekhe/huawei-g330d-u8825d-kernel
drivers/i2c/busses/scx200_acb.c
3042
14239
/* Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> National Semiconductor SCx200 ACCESS.bus support Also supports the AMD CS5535 and AMD CS5536 Based on i2c-keywest.c which is: Copyright (c) 2001 Benjamin Herrenschmidt <benh@kernel.crashing.org> Copyright (c) 2000 Philip Edelbrock <phil@stimpy.netroedge.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/scx200.h> #define NAME "scx200_acb" MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver"); MODULE_ALIAS("platform:cs5535-smb"); MODULE_LICENSE("GPL"); #define MAX_DEVICES 4 static int base[MAX_DEVICES] = { 0x820, 0x840 }; module_param_array(base, int, NULL, 0); MODULE_PARM_DESC(base, "Base addresses for the ACCESS.bus controllers"); #define POLL_TIMEOUT (HZ/5) enum scx200_acb_state { state_idle, state_address, state_command, state_repeat_start, state_quick, state_read, state_write, }; static const char *scx200_acb_state_name[] = { "idle", "address", "command", "repeat_start", "quick", "read", "write", }; /* Physical interface */ struct scx200_acb_iface { struct scx200_acb_iface *next; struct i2c_adapter adapter; unsigned base; struct mutex mutex; /* State machine data */ enum scx200_acb_state state; int result; u8 address_byte; u8 command; u8 *ptr; char needs_reset; unsigned len; }; /* Register Definitions */ #define ACBSDA (iface->base + 0) #define ACBST (iface->base + 1) #define ACBST_SDAST 0x40 /* SDA Status */ #define ACBST_BER 0x20 #define ACBST_NEGACK 0x10 /* Negative Acknowledge */ #define ACBST_STASTR 0x08 /* Stall After Start */ #define ACBST_MASTER 0x02 #define ACBCST (iface->base + 2) #define ACBCST_BB 0x02 #define ACBCTL1 (iface->base + 3) #define ACBCTL1_STASTRE 0x80 #define ACBCTL1_NMINTE 0x40 #define ACBCTL1_ACK 0x10 #define ACBCTL1_STOP 0x02 #define ACBCTL1_START 0x01 #define ACBADDR (iface->base + 4) #define ACBCTL2 (iface->base + 5) #define ACBCTL2_ENABLE 0x01 /************************************************************************/ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status) { const char *errmsg; dev_dbg(&iface->adapter.dev, "state %s, status = 0x%02x\n", scx200_acb_state_name[iface->state], status); if (status & ACBST_BER) { errmsg = "bus error"; goto error; } if (!(status & ACBST_MASTER)) { errmsg = "not master"; goto error; } if (status & ACBST_NEGACK) { dev_dbg(&iface->adapter.dev, "negative ack in state %s\n", scx200_acb_state_name[iface->state]); iface->state = state_idle; iface->result = -ENXIO; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); outb(ACBST_STASTR | ACBST_NEGACK, ACBST); /* Reset the status register */ outb(0, ACBST); return; } switch (iface->state) { case state_idle: dev_warn(&iface->adapter.dev, "interrupt in idle state\n"); break; case state_address: /* Do a pointer write first */ outb(iface->address_byte & ~1, ACBSDA); iface->state = state_command; break; case state_command: outb(iface->command, ACBSDA); if (iface->address_byte & 1) iface->state = state_repeat_start; else iface->state = state_write; break; case state_repeat_start: outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1); /* fallthrough */ case state_quick: if (iface->address_byte & 1) { if (iface->len == 1) outb(inb(ACBCTL1) | ACBCTL1_ACK, ACBCTL1); else outb(inb(ACBCTL1) & ~ACBCTL1_ACK, ACBCTL1); outb(iface->address_byte, ACBSDA); iface->state = state_read; } else { outb(iface->address_byte, ACBSDA); iface->state = state_write; } break; case state_read: /* Set ACK if _next_ byte will be the last one */ if (iface->len == 2) outb(inb(ACBCTL1) | ACBCTL1_ACK, ACBCTL1); else outb(inb(ACBCTL1) & ~ACBCTL1_ACK, ACBCTL1); if (iface->len == 1) { iface->result = 0; iface->state = state_idle; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); } *iface->ptr++ = inb(ACBSDA); --iface->len; break; case state_write: if (iface->len == 0) { iface->result = 0; iface->state = state_idle; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); break; } outb(*iface->ptr++, ACBSDA); --iface->len; break; } return; error: dev_err(&iface->adapter.dev, "%s in state %s (addr=0x%02x, len=%d, status=0x%02x)\n", errmsg, scx200_acb_state_name[iface->state], iface->address_byte, iface->len, status); iface->state = state_idle; iface->result = -EIO; iface->needs_reset = 1; } static void scx200_acb_poll(struct scx200_acb_iface *iface) { u8 status; unsigned long timeout; timeout = jiffies + POLL_TIMEOUT; while (1) { status = inb(ACBST); /* Reset the status register to avoid the hang */ outb(0, ACBST); if ((status & (ACBST_SDAST|ACBST_BER|ACBST_NEGACK)) != 0) { scx200_acb_machine(iface, status); return; } if (time_after(jiffies, timeout)) break; cpu_relax(); cond_resched(); } dev_err(&iface->adapter.dev, "timeout in state %s\n", scx200_acb_state_name[iface->state]); iface->state = state_idle; iface->result = -EIO; iface->needs_reset = 1; } static void scx200_acb_reset(struct scx200_acb_iface *iface) { /* Disable the ACCESS.bus device and Configure the SCL frequency: 16 clock cycles */ outb(0x70, ACBCTL2); /* Polling mode */ outb(0, ACBCTL1); /* Disable slave address */ outb(0, ACBADDR); /* Enable the ACCESS.bus device */ outb(inb(ACBCTL2) | ACBCTL2_ENABLE, ACBCTL2); /* Free STALL after START */ outb(inb(ACBCTL1) & ~(ACBCTL1_STASTRE | ACBCTL1_NMINTE), ACBCTL1); /* Send a STOP */ outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); /* Clear BER, NEGACK and STASTR bits */ outb(ACBST_BER | ACBST_NEGACK | ACBST_STASTR, ACBST); /* Clear BB bit */ outb(inb(ACBCST) | ACBCST_BB, ACBCST); } static s32 scx200_acb_smbus_xfer(struct i2c_adapter *adapter, u16 address, unsigned short flags, char rw, u8 command, int size, union i2c_smbus_data *data) { struct scx200_acb_iface *iface = i2c_get_adapdata(adapter); int len; u8 *buffer; u16 cur_word; int rc; switch (size) { case I2C_SMBUS_QUICK: len = 0; buffer = NULL; break; case I2C_SMBUS_BYTE: len = 1; buffer = rw ? &data->byte : &command; break; case I2C_SMBUS_BYTE_DATA: len = 1; buffer = &data->byte; break; case I2C_SMBUS_WORD_DATA: len = 2; cur_word = cpu_to_le16(data->word); buffer = (u8 *)&cur_word; break; case I2C_SMBUS_I2C_BLOCK_DATA: len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; buffer = &data->block[1]; break; default: return -EINVAL; } dev_dbg(&adapter->dev, "size=%d, address=0x%x, command=0x%x, len=%d, read=%d\n", size, address, command, len, rw); if (!len && rw == I2C_SMBUS_READ) { dev_dbg(&adapter->dev, "zero length read\n"); return -EINVAL; } mutex_lock(&iface->mutex); iface->address_byte = (address << 1) | rw; iface->command = command; iface->ptr = buffer; iface->len = len; iface->result = -EINVAL; iface->needs_reset = 0; outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1); if (size == I2C_SMBUS_QUICK || size == I2C_SMBUS_BYTE) iface->state = state_quick; else iface->state = state_address; while (iface->state != state_idle) scx200_acb_poll(iface); if (iface->needs_reset) scx200_acb_reset(iface); rc = iface->result; mutex_unlock(&iface->mutex); if (rc == 0 && size == I2C_SMBUS_WORD_DATA && rw == I2C_SMBUS_READ) data->word = le16_to_cpu(cur_word); #ifdef DEBUG dev_dbg(&adapter->dev, "transfer done, result: %d", rc); if (buffer) { int i; printk(" data:"); for (i = 0; i < len; ++i) printk(" %02x", buffer[i]); } printk("\n"); #endif return rc; } static u32 scx200_acb_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_I2C_BLOCK; } /* For now, we only handle combined mode (smbus) */ static const struct i2c_algorithm scx200_acb_algorithm = { .smbus_xfer = scx200_acb_smbus_xfer, .functionality = scx200_acb_func, }; static struct scx200_acb_iface *scx200_acb_list; static DEFINE_MUTEX(scx200_acb_list_mutex); static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface) { u8 val; /* Disable the ACCESS.bus device and Configure the SCL frequency: 16 clock cycles */ outb(0x70, ACBCTL2); if (inb(ACBCTL2) != 0x70) { pr_debug(NAME ": ACBCTL2 readback failed\n"); return -ENXIO; } outb(inb(ACBCTL1) | ACBCTL1_NMINTE, ACBCTL1); val = inb(ACBCTL1); if (val) { pr_debug(NAME ": disabled, but ACBCTL1=0x%02x\n", val); return -ENXIO; } outb(inb(ACBCTL2) | ACBCTL2_ENABLE, ACBCTL2); outb(inb(ACBCTL1) | ACBCTL1_NMINTE, ACBCTL1); val = inb(ACBCTL1); if ((val & ACBCTL1_NMINTE) != ACBCTL1_NMINTE) { pr_debug(NAME ": enabled, but NMINTE won't be set, " "ACBCTL1=0x%02x\n", val); return -ENXIO; } return 0; } static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text, struct device *dev, int index) { struct scx200_acb_iface *iface; struct i2c_adapter *adapter; iface = kzalloc(sizeof(*iface), GFP_KERNEL); if (!iface) { printk(KERN_ERR NAME ": can't allocate memory\n"); return NULL; } adapter = &iface->adapter; i2c_set_adapdata(adapter, iface); snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index); adapter->owner = THIS_MODULE; adapter->algo = &scx200_acb_algorithm; adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adapter->dev.parent = dev; mutex_init(&iface->mutex); return iface; } static int __devinit scx200_acb_create(struct scx200_acb_iface *iface) { struct i2c_adapter *adapter; int rc; adapter = &iface->adapter; rc = scx200_acb_probe(iface); if (rc) { printk(KERN_WARNING NAME ": probe failed\n"); return rc; } scx200_acb_reset(iface); if (i2c_add_adapter(adapter) < 0) { printk(KERN_ERR NAME ": failed to register\n"); return -ENODEV; } if (!adapter->dev.parent) { /* If there's no dev, we're tracking (ISA) ifaces manually */ mutex_lock(&scx200_acb_list_mutex); iface->next = scx200_acb_list; scx200_acb_list = iface; mutex_unlock(&scx200_acb_list_mutex); } return 0; } static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text, unsigned long base, int index, struct device *dev) { struct scx200_acb_iface *iface; int rc; iface = scx200_create_iface(text, dev, index); if (iface == NULL) return NULL; if (!request_region(base, 8, iface->adapter.name)) { printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n", base, base + 8 - 1); goto errout_free; } iface->base = base; rc = scx200_acb_create(iface); if (rc == 0) return iface; release_region(base, 8); errout_free: kfree(iface); return NULL; } static int __devinit scx200_probe(struct platform_device *pdev) { struct scx200_acb_iface *iface; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) { dev_err(&pdev->dev, "can't fetch device resource info\n"); return -ENODEV; } iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev); if (!iface) return -EIO; dev_info(&pdev->dev, "SCx200 device '%s' registered\n", iface->adapter.name); platform_set_drvdata(pdev, iface); return 0; } static void __devexit scx200_cleanup_iface(struct scx200_acb_iface *iface) { i2c_del_adapter(&iface->adapter); release_region(iface->base, 8); kfree(iface); } static int __devexit scx200_remove(struct platform_device *pdev) { struct scx200_acb_iface *iface; iface = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); scx200_cleanup_iface(iface); return 0; } static struct platform_driver scx200_pci_drv = { .driver = { .name = "cs5535-smb", .owner = THIS_MODULE, }, .probe = scx200_probe, .remove = __devexit_p(scx200_remove), }; static const struct pci_device_id scx200_isa[] __initconst = { { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) }, { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) }, { 0, } }; static __init void scx200_scan_isa(void) { int i; if (!pci_dev_present(scx200_isa)) return; for (i = 0; i < MAX_DEVICES; ++i) { if (base[i] == 0) continue; /* XXX: should we care about failures? */ scx200_create_dev("SCx200", base[i], i, NULL); } } static int __init scx200_acb_init(void) { pr_debug(NAME ": NatSemi SCx200 ACCESS.bus Driver\n"); /* First scan for ISA-based devices */ scx200_scan_isa(); /* XXX: should we care about errors? */ /* If at least one bus was created, init must succeed */ if (scx200_acb_list) return 0; /* No ISA devices; register the platform driver for PCI-based devices */ return platform_driver_register(&scx200_pci_drv); } static void __exit scx200_acb_cleanup(void) { struct scx200_acb_iface *iface; platform_driver_unregister(&scx200_pci_drv); mutex_lock(&scx200_acb_list_mutex); while ((iface = scx200_acb_list) != NULL) { scx200_acb_list = iface->next; mutex_unlock(&scx200_acb_list_mutex); scx200_cleanup_iface(iface); mutex_lock(&scx200_acb_list_mutex); } mutex_unlock(&scx200_acb_list_mutex); } module_init(scx200_acb_init); module_exit(scx200_acb_cleanup);
gpl-2.0
Nyks45/Veno-M
drivers/s390/char/keyboard.c
3810
12561
/* * ebcdic keycode functions for s390 console drivers * * S390 version * Copyright IBM Corp. 2003 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/sysrq.h> #include <linux/consolemap.h> #include <linux/kbd_kern.h> #include <linux/kbd_diacr.h> #include <asm/uaccess.h> #include "keyboard.h" /* * Handler Tables. */ #define K_HANDLERS\ k_self, k_fn, k_spec, k_ignore,\ k_dead, k_ignore, k_ignore, k_ignore,\ k_ignore, k_ignore, k_ignore, k_ignore,\ k_ignore, k_ignore, k_ignore, k_ignore typedef void (k_handler_fn)(struct kbd_data *, unsigned char); static k_handler_fn K_HANDLERS; static k_handler_fn *k_handler[16] = { K_HANDLERS }; /* maximum values each key_handler can handle */ static const int kbd_max_vals[] = { 255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0, NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals); static unsigned char ret_diacr[NR_DEAD] = { '`', '\'', '^', '~', '"', ',' }; /* * Alloc/free of kbd_data structures. */ struct kbd_data * kbd_alloc(void) { struct kbd_data *kbd; int i; kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL); if (!kbd) goto out; kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL); if (!kbd->key_maps) goto out_kbd; for (i = 0; i < ARRAY_SIZE(key_maps); i++) { if (key_maps[i]) { kbd->key_maps[i] = kmemdup(key_maps[i], sizeof(u_short) * NR_KEYS, GFP_KERNEL); if (!kbd->key_maps[i]) goto out_maps; } } kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL); if (!kbd->func_table) goto out_maps; for (i = 0; i < ARRAY_SIZE(func_table); i++) { if (func_table[i]) { kbd->func_table[i] = kstrdup(func_table[i], GFP_KERNEL); if (!kbd->func_table[i]) goto out_func; } } kbd->fn_handler = kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); if (!kbd->fn_handler) goto out_func; kbd->accent_table = kmemdup(accent_table, sizeof(struct kbdiacruc) * MAX_DIACR, GFP_KERNEL); if (!kbd->accent_table) goto out_fn_handler; kbd->accent_table_size = accent_table_size; return kbd; out_fn_handler: kfree(kbd->fn_handler); out_func: for (i = 0; i < ARRAY_SIZE(func_table); i++) kfree(kbd->func_table[i]); kfree(kbd->func_table); out_maps: for (i = 0; i < ARRAY_SIZE(key_maps); i++) kfree(kbd->key_maps[i]); kfree(kbd->key_maps); out_kbd: kfree(kbd); out: return NULL; } void kbd_free(struct kbd_data *kbd) { int i; kfree(kbd->accent_table); kfree(kbd->fn_handler); for (i = 0; i < ARRAY_SIZE(func_table); i++) kfree(kbd->func_table[i]); kfree(kbd->func_table); for (i = 0; i < ARRAY_SIZE(key_maps); i++) kfree(kbd->key_maps[i]); kfree(kbd->key_maps); kfree(kbd); } /* * Generate ascii -> ebcdic translation table from kbd_data. */ void kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc) { unsigned short *keymap, keysym; int i, j, k; memset(ascebc, 0x40, 256); for (i = 0; i < ARRAY_SIZE(key_maps); i++) { keymap = kbd->key_maps[i]; if (!keymap) continue; for (j = 0; j < NR_KEYS; j++) { k = ((i & 1) << 7) + j; keysym = keymap[j]; if (KTYP(keysym) == (KT_LATIN | 0xf0) || KTYP(keysym) == (KT_LETTER | 0xf0)) ascebc[KVAL(keysym)] = k; else if (KTYP(keysym) == (KT_DEAD | 0xf0)) ascebc[ret_diacr[KVAL(keysym)]] = k; } } } #if 0 /* * Generate ebcdic -> ascii translation table from kbd_data. */ void kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc) { unsigned short *keymap, keysym; int i, j, k; memset(ebcasc, ' ', 256); for (i = 0; i < ARRAY_SIZE(key_maps); i++) { keymap = kbd->key_maps[i]; if (!keymap) continue; for (j = 0; j < NR_KEYS; j++) { keysym = keymap[j]; k = ((i & 1) << 7) + j; if (KTYP(keysym) == (KT_LATIN | 0xf0) || KTYP(keysym) == (KT_LETTER | 0xf0)) ebcasc[k] = KVAL(keysym); else if (KTYP(keysym) == (KT_DEAD | 0xf0)) ebcasc[k] = ret_diacr[KVAL(keysym)]; } } } #endif /* * We have a combining character DIACR here, followed by the character CH. * If the combination occurs in the table, return the corresponding value. * Otherwise, if CH is a space or equals DIACR, return DIACR. * Otherwise, conclude that DIACR was not combining after all, * queue it and return CH. */ static unsigned int handle_diacr(struct kbd_data *kbd, unsigned int ch) { int i, d; d = kbd->diacr; kbd->diacr = 0; for (i = 0; i < kbd->accent_table_size; i++) { if (kbd->accent_table[i].diacr == d && kbd->accent_table[i].base == ch) return kbd->accent_table[i].result; } if (ch == ' ' || ch == d) return d; kbd_put_queue(kbd->port, d); return ch; } /* * Handle dead key. */ static void k_dead(struct kbd_data *kbd, unsigned char value) { value = ret_diacr[value]; kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value); } /* * Normal character handler. */ static void k_self(struct kbd_data *kbd, unsigned char value) { if (kbd->diacr) value = handle_diacr(kbd, value); kbd_put_queue(kbd->port, value); } /* * Special key handlers */ static void k_ignore(struct kbd_data *kbd, unsigned char value) { } /* * Function key handler. */ static void k_fn(struct kbd_data *kbd, unsigned char value) { if (kbd->func_table[value]) kbd_puts_queue(kbd->port, kbd->func_table[value]); } static void k_spec(struct kbd_data *kbd, unsigned char value) { if (value >= NR_FN_HANDLER) return; if (kbd->fn_handler[value]) kbd->fn_handler[value](kbd); } /* * Put utf8 character to tty flip buffer. * UTF-8 is defined for words of up to 31 bits, * but we need only 16 bits here */ static void to_utf8(struct tty_port *port, ushort c) { if (c < 0x80) /* 0******* */ kbd_put_queue(port, c); else if (c < 0x800) { /* 110***** 10****** */ kbd_put_queue(port, 0xc0 | (c >> 6)); kbd_put_queue(port, 0x80 | (c & 0x3f)); } else { /* 1110**** 10****** 10****** */ kbd_put_queue(port, 0xe0 | (c >> 12)); kbd_put_queue(port, 0x80 | ((c >> 6) & 0x3f)); kbd_put_queue(port, 0x80 | (c & 0x3f)); } } /* * Process keycode. */ void kbd_keycode(struct kbd_data *kbd, unsigned int keycode) { unsigned short keysym; unsigned char type, value; if (!kbd) return; if (keycode >= 384) keysym = kbd->key_maps[5][keycode - 384]; else if (keycode >= 256) keysym = kbd->key_maps[4][keycode - 256]; else if (keycode >= 128) keysym = kbd->key_maps[1][keycode - 128]; else keysym = kbd->key_maps[0][keycode]; type = KTYP(keysym); if (type >= 0xf0) { type -= 0xf0; if (type == KT_LETTER) type = KT_LATIN; value = KVAL(keysym); #ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */ if (kbd->sysrq) { if (kbd->sysrq == K(KT_LATIN, '-')) { kbd->sysrq = 0; handle_sysrq(value); return; } if (value == '-') { kbd->sysrq = K(KT_LATIN, '-'); return; } /* Incomplete sysrq sequence. */ (*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq)); kbd->sysrq = 0; } else if ((type == KT_LATIN && value == '^') || (type == KT_DEAD && ret_diacr[value] == '^')) { kbd->sysrq = K(type, value); return; } #endif (*k_handler[type])(kbd, value); } else to_utf8(kbd->port, keysym); } /* * Ioctl stuff. */ static int do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe, int cmd, int perm) { struct kbentry tmp; ushort *key_map, val, ov; if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) return -EFAULT; #if NR_KEYS < 256 if (tmp.kb_index >= NR_KEYS) return -EINVAL; #endif #if MAX_NR_KEYMAPS < 256 if (tmp.kb_table >= MAX_NR_KEYMAPS) return -EINVAL; #endif switch (cmd) { case KDGKBENT: key_map = kbd->key_maps[tmp.kb_table]; if (key_map) { val = U(key_map[tmp.kb_index]); if (KTYP(val) >= KBD_NR_TYPES) val = K_HOLE; } else val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP); return put_user(val, &user_kbe->kb_value); case KDSKBENT: if (!perm) return -EPERM; if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) { /* disallocate map */ key_map = kbd->key_maps[tmp.kb_table]; if (key_map) { kbd->key_maps[tmp.kb_table] = NULL; kfree(key_map); } break; } if (KTYP(tmp.kb_value) >= KBD_NR_TYPES) return -EINVAL; if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)]) return -EINVAL; if (!(key_map = kbd->key_maps[tmp.kb_table])) { int j; key_map = kmalloc(sizeof(plain_map), GFP_KERNEL); if (!key_map) return -ENOMEM; kbd->key_maps[tmp.kb_table] = key_map; for (j = 0; j < NR_KEYS; j++) key_map[j] = U(K_HOLE); } ov = U(key_map[tmp.kb_index]); if (tmp.kb_value == ov) break; /* nothing to do */ /* * Attention Key. */ if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) && !capable(CAP_SYS_ADMIN)) return -EPERM; key_map[tmp.kb_index] = U(tmp.kb_value); break; } return 0; } static int do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs, int cmd, int perm) { unsigned char kb_func; char *p; int len; /* Get u_kbs->kb_func. */ if (get_user(kb_func, &u_kbs->kb_func)) return -EFAULT; #if MAX_NR_FUNC < 256 if (kb_func >= MAX_NR_FUNC) return -EINVAL; #endif switch (cmd) { case KDGKBSENT: p = kbd->func_table[kb_func]; if (p) { len = strlen(p); if (len >= sizeof(u_kbs->kb_string)) len = sizeof(u_kbs->kb_string) - 1; if (copy_to_user(u_kbs->kb_string, p, len)) return -EFAULT; } else len = 0; if (put_user('\0', u_kbs->kb_string + len)) return -EFAULT; break; case KDSKBSENT: if (!perm) return -EPERM; len = strnlen_user(u_kbs->kb_string, sizeof(u_kbs->kb_string) - 1); if (!len) return -EFAULT; if (len > sizeof(u_kbs->kb_string) - 1) return -EINVAL; p = kmalloc(len + 1, GFP_KERNEL); if (!p) return -ENOMEM; if (copy_from_user(p, u_kbs->kb_string, len)) { kfree(p); return -EFAULT; } p[len] = 0; kfree(kbd->func_table[kb_func]); kbd->func_table[kb_func] = p; break; } return 0; } int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg) { struct tty_struct *tty; void __user *argp; unsigned int ct; int perm; argp = (void __user *)arg; /* * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. */ tty = tty_port_tty_get(kbd->port); /* FIXME this test is pretty racy */ perm = current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG); tty_kref_put(tty); switch (cmd) { case KDGKBTYPE: return put_user(KB_101, (char __user *)argp); case KDGKBENT: case KDSKBENT: return do_kdsk_ioctl(kbd, argp, cmd, perm); case KDGKBSENT: case KDSKBSENT: return do_kdgkb_ioctl(kbd, argp, cmd, perm); case KDGKBDIACR: { struct kbdiacrs __user *a = argp; struct kbdiacr diacr; int i; if (put_user(kbd->accent_table_size, &a->kb_cnt)) return -EFAULT; for (i = 0; i < kbd->accent_table_size; i++) { diacr.diacr = kbd->accent_table[i].diacr; diacr.base = kbd->accent_table[i].base; diacr.result = kbd->accent_table[i].result; if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr))) return -EFAULT; } return 0; } case KDGKBDIACRUC: { struct kbdiacrsuc __user *a = argp; ct = kbd->accent_table_size; if (put_user(ct, &a->kb_cnt)) return -EFAULT; if (copy_to_user(a->kbdiacruc, kbd->accent_table, ct * sizeof(struct kbdiacruc))) return -EFAULT; return 0; } case KDSKBDIACR: { struct kbdiacrs __user *a = argp; struct kbdiacr diacr; int i; if (!perm) return -EPERM; if (get_user(ct, &a->kb_cnt)) return -EFAULT; if (ct >= MAX_DIACR) return -EINVAL; kbd->accent_table_size = ct; for (i = 0; i < ct; i++) { if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr))) return -EFAULT; kbd->accent_table[i].diacr = diacr.diacr; kbd->accent_table[i].base = diacr.base; kbd->accent_table[i].result = diacr.result; } return 0; } case KDSKBDIACRUC: { struct kbdiacrsuc __user *a = argp; if (!perm) return -EPERM; if (get_user(ct, &a->kb_cnt)) return -EFAULT; if (ct >= MAX_DIACR) return -EINVAL; kbd->accent_table_size = ct; if (copy_from_user(kbd->accent_table, a->kbdiacruc, ct * sizeof(struct kbdiacruc))) return -EFAULT; return 0; } default: return -ENOIOCTLCMD; } } EXPORT_SYMBOL(kbd_ioctl); EXPORT_SYMBOL(kbd_ascebc); EXPORT_SYMBOL(kbd_free); EXPORT_SYMBOL(kbd_alloc); EXPORT_SYMBOL(kbd_keycode);
gpl-2.0
hiikezoe/android_kernel_nec_is11n
sound/drivers/opl3/opl3_seq.c
4834
7609
/* * Copyright (c) by Uros Bizjak <uros@kss-loka.si> * * Midi Sequencer interface routines for OPL2/OPL3/OPL4 FM * * OPL2/3 FM instrument loader: * alsa-tools/seq/sbiload/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "opl3_voice.h" #include <linux/init.h> #include <linux/moduleparam.h> #include <sound/initval.h> MODULE_AUTHOR("Uros Bizjak <uros@kss-loka.si>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ALSA driver for OPL3 FM synth"); int use_internal_drums = 0; module_param(use_internal_drums, bool, 0444); MODULE_PARM_DESC(use_internal_drums, "Enable internal OPL2/3 drums."); int snd_opl3_synth_use_inc(struct snd_opl3 * opl3) { if (!try_module_get(opl3->card->module)) return -EFAULT; return 0; } void snd_opl3_synth_use_dec(struct snd_opl3 * opl3) { module_put(opl3->card->module); } int snd_opl3_synth_setup(struct snd_opl3 * opl3) { int idx; struct snd_hwdep *hwdep = opl3->hwdep; mutex_lock(&hwdep->open_mutex); if (hwdep->used) { mutex_unlock(&hwdep->open_mutex); return -EBUSY; } hwdep->used++; mutex_unlock(&hwdep->open_mutex); snd_opl3_reset(opl3); for (idx = 0; idx < MAX_OPL3_VOICES; idx++) { opl3->voices[idx].state = SNDRV_OPL3_ST_OFF; opl3->voices[idx].time = 0; opl3->voices[idx].keyon_reg = 0x00; } opl3->use_time = 0; opl3->connection_reg = 0x00; if (opl3->hardware >= OPL3_HW_OPL3) { /* Clear 4-op connections */ opl3->command(opl3, OPL3_RIGHT | OPL3_REG_CONNECTION_SELECT, opl3->connection_reg); opl3->max_voices = MAX_OPL3_VOICES; } return 0; } void snd_opl3_synth_cleanup(struct snd_opl3 * opl3) { unsigned long flags; struct snd_hwdep *hwdep; /* Stop system timer */ spin_lock_irqsave(&opl3->sys_timer_lock, flags); if (opl3->sys_timer_status) { del_timer(&opl3->tlist); opl3->sys_timer_status = 0; } spin_unlock_irqrestore(&opl3->sys_timer_lock, flags); snd_opl3_reset(opl3); hwdep = opl3->hwdep; mutex_lock(&hwdep->open_mutex); hwdep->used--; mutex_unlock(&hwdep->open_mutex); wake_up(&hwdep->open_wait); } static int snd_opl3_synth_use(void *private_data, struct snd_seq_port_subscribe * info) { struct snd_opl3 *opl3 = private_data; int err; if ((err = snd_opl3_synth_setup(opl3)) < 0) return err; if (use_internal_drums) { /* Percussion mode */ opl3->voices[6].state = opl3->voices[7].state = opl3->voices[8].state = SNDRV_OPL3_ST_NOT_AVAIL; snd_opl3_load_drums(opl3); opl3->drum_reg = OPL3_PERCUSSION_ENABLE; opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION, opl3->drum_reg); } else { opl3->drum_reg = 0x00; } if (info->sender.client != SNDRV_SEQ_CLIENT_SYSTEM) { if ((err = snd_opl3_synth_use_inc(opl3)) < 0) return err; } opl3->synth_mode = SNDRV_OPL3_MODE_SEQ; return 0; } static int snd_opl3_synth_unuse(void *private_data, struct snd_seq_port_subscribe * info) { struct snd_opl3 *opl3 = private_data; snd_opl3_synth_cleanup(opl3); if (info->sender.client != SNDRV_SEQ_CLIENT_SYSTEM) snd_opl3_synth_use_dec(opl3); return 0; } /* * MIDI emulation operators */ struct snd_midi_op opl3_ops = { .note_on = snd_opl3_note_on, .note_off = snd_opl3_note_off, .key_press = snd_opl3_key_press, .note_terminate = snd_opl3_terminate_note, .control = snd_opl3_control, .nrpn = snd_opl3_nrpn, .sysex = snd_opl3_sysex, }; static int snd_opl3_synth_event_input(struct snd_seq_event * ev, int direct, void *private_data, int atomic, int hop) { struct snd_opl3 *opl3 = private_data; snd_midi_process_event(&opl3_ops, ev, opl3->chset); return 0; } /* ------------------------------ */ static void snd_opl3_synth_free_port(void *private_data) { struct snd_opl3 *opl3 = private_data; snd_midi_channel_free_set(opl3->chset); } static int snd_opl3_synth_create_port(struct snd_opl3 * opl3) { struct snd_seq_port_callback callbacks; char name[32]; int voices, opl_ver; voices = (opl3->hardware < OPL3_HW_OPL3) ? MAX_OPL2_VOICES : MAX_OPL3_VOICES; opl3->chset = snd_midi_channel_alloc_set(16); if (opl3->chset == NULL) return -ENOMEM; opl3->chset->private_data = opl3; memset(&callbacks, 0, sizeof(callbacks)); callbacks.owner = THIS_MODULE; callbacks.use = snd_opl3_synth_use; callbacks.unuse = snd_opl3_synth_unuse; callbacks.event_input = snd_opl3_synth_event_input; callbacks.private_free = snd_opl3_synth_free_port; callbacks.private_data = opl3; opl_ver = (opl3->hardware & OPL3_HW_MASK) >> 8; sprintf(name, "OPL%i FM Port", opl_ver); opl3->chset->client = opl3->seq_client; opl3->chset->port = snd_seq_event_port_attach(opl3->seq_client, &callbacks, SNDRV_SEQ_PORT_CAP_WRITE | SNDRV_SEQ_PORT_CAP_SUBS_WRITE, SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC | SNDRV_SEQ_PORT_TYPE_MIDI_GM | SNDRV_SEQ_PORT_TYPE_DIRECT_SAMPLE | SNDRV_SEQ_PORT_TYPE_HARDWARE | SNDRV_SEQ_PORT_TYPE_SYNTHESIZER, 16, voices, name); if (opl3->chset->port < 0) { int port; port = opl3->chset->port; snd_midi_channel_free_set(opl3->chset); return port; } return 0; } /* ------------------------------ */ static int snd_opl3_seq_new_device(struct snd_seq_device *dev) { struct snd_opl3 *opl3; int client, err; char name[32]; int opl_ver; opl3 = *(struct snd_opl3 **)SNDRV_SEQ_DEVICE_ARGPTR(dev); if (opl3 == NULL) return -EINVAL; spin_lock_init(&opl3->voice_lock); opl3->seq_client = -1; /* allocate new client */ opl_ver = (opl3->hardware & OPL3_HW_MASK) >> 8; sprintf(name, "OPL%i FM synth", opl_ver); client = opl3->seq_client = snd_seq_create_kernel_client(opl3->card, opl3->seq_dev_num, name); if (client < 0) return client; if ((err = snd_opl3_synth_create_port(opl3)) < 0) { snd_seq_delete_kernel_client(client); opl3->seq_client = -1; return err; } /* setup system timer */ init_timer(&opl3->tlist); opl3->tlist.function = snd_opl3_timer_func; opl3->tlist.data = (unsigned long) opl3; spin_lock_init(&opl3->sys_timer_lock); opl3->sys_timer_status = 0; #ifdef CONFIG_SND_SEQUENCER_OSS snd_opl3_init_seq_oss(opl3, name); #endif return 0; } static int snd_opl3_seq_delete_device(struct snd_seq_device *dev) { struct snd_opl3 *opl3; opl3 = *(struct snd_opl3 **)SNDRV_SEQ_DEVICE_ARGPTR(dev); if (opl3 == NULL) return -EINVAL; #ifdef CONFIG_SND_SEQUENCER_OSS snd_opl3_free_seq_oss(opl3); #endif if (opl3->seq_client >= 0) { snd_seq_delete_kernel_client(opl3->seq_client); opl3->seq_client = -1; } return 0; } static int __init alsa_opl3_seq_init(void) { static struct snd_seq_dev_ops ops = { snd_opl3_seq_new_device, snd_opl3_seq_delete_device }; return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL3, &ops, sizeof(struct snd_opl3 *)); } static void __exit alsa_opl3_seq_exit(void) { snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_OPL3); } module_init(alsa_opl3_seq_init) module_exit(alsa_opl3_seq_exit)
gpl-2.0
SlimRoms/kernel_sony_apq8064
arch/arm/plat-orion/pcie.c
4834
7329
/* * arch/arm/plat-orion/pcie.c * * Marvell Orion SoC PCIe handling. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/mbus.h> #include <asm/mach/pci.h> #include <plat/pcie.h> #include <plat/addr-map.h> #include <linux/delay.h> /* * PCIe unit register offsets. */ #define PCIE_DEV_ID_OFF 0x0000 #define PCIE_CMD_OFF 0x0004 #define PCIE_DEV_REV_OFF 0x0008 #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) #define PCIE_HEADER_LOG_4_OFF 0x0128 #define PCIE_BAR_CTRL_OFF(n) (0x1804 + ((n - 1) * 4)) #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4)) #define PCIE_WIN5_CTRL_OFF 0x1880 #define PCIE_WIN5_BASE_OFF 0x1884 #define PCIE_WIN5_REMAP_OFF 0x188c #define PCIE_CONF_ADDR_OFF 0x18f8 #define PCIE_CONF_ADDR_EN 0x80000000 #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc)) #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16) #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11) #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8) #define PCIE_CONF_DATA_OFF 0x18fc #define PCIE_MASK_OFF 0x1910 #define PCIE_CTRL_OFF 0x1a00 #define PCIE_CTRL_X1_MODE 0x0001 #define PCIE_STAT_OFF 0x1a04 #define PCIE_STAT_DEV_OFFS 20 #define PCIE_STAT_DEV_MASK 0x1f #define PCIE_STAT_BUS_OFFS 8 #define PCIE_STAT_BUS_MASK 0xff #define PCIE_STAT_LINK_DOWN 1 #define PCIE_DEBUG_CTRL 0x1a60 #define PCIE_DEBUG_SOFT_RESET (1<<20) u32 __init orion_pcie_dev_id(void __iomem *base) { return readl(base + PCIE_DEV_ID_OFF) >> 16; } u32 __init orion_pcie_rev(void __iomem *base) { return readl(base + PCIE_DEV_REV_OFF) & 0xff; } int orion_pcie_link_up(void __iomem *base) { return !(readl(base + PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); } int __init orion_pcie_x4_mode(void __iomem *base) { return !(readl(base + PCIE_CTRL_OFF) & PCIE_CTRL_X1_MODE); } int orion_pcie_get_local_bus_nr(void __iomem *base) { u32 stat = readl(base + PCIE_STAT_OFF); return (stat >> PCIE_STAT_BUS_OFFS) & PCIE_STAT_BUS_MASK; } void __init orion_pcie_set_local_bus_nr(void __iomem *base, int nr) { u32 stat; stat = readl(base + PCIE_STAT_OFF); stat &= ~(PCIE_STAT_BUS_MASK << PCIE_STAT_BUS_OFFS); stat |= nr << PCIE_STAT_BUS_OFFS; writel(stat, base + PCIE_STAT_OFF); } void __init orion_pcie_reset(void __iomem *base) { u32 reg; int i; /* * MV-S104860-U0, Rev. C: * PCI Express Unit Soft Reset * When set, generates an internal reset in the PCI Express unit. * This bit should be cleared after the link is re-established. */ reg = readl(base + PCIE_DEBUG_CTRL); reg |= PCIE_DEBUG_SOFT_RESET; writel(reg, base + PCIE_DEBUG_CTRL); for (i = 0; i < 20; i++) { mdelay(10); if (orion_pcie_link_up(base)) break; } reg &= ~(PCIE_DEBUG_SOFT_RESET); writel(reg, base + PCIE_DEBUG_CTRL); } /* * Setup PCIE BARs and Address Decode Wins: * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks * WIN[0-3] -> DRAM bank[0-3] */ static void __init orion_pcie_setup_wins(void __iomem *base, struct mbus_dram_target_info *dram) { u32 size; int i; /* * First, disable and clear BARs and windows. */ for (i = 1; i <= 2; i++) { writel(0, base + PCIE_BAR_CTRL_OFF(i)); writel(0, base + PCIE_BAR_LO_OFF(i)); writel(0, base + PCIE_BAR_HI_OFF(i)); } for (i = 0; i < 5; i++) { writel(0, base + PCIE_WIN04_CTRL_OFF(i)); writel(0, base + PCIE_WIN04_BASE_OFF(i)); writel(0, base + PCIE_WIN04_REMAP_OFF(i)); } writel(0, base + PCIE_WIN5_CTRL_OFF); writel(0, base + PCIE_WIN5_BASE_OFF); writel(0, base + PCIE_WIN5_REMAP_OFF); /* * Setup windows for DDR banks. Count total DDR size on the fly. */ size = 0; for (i = 0; i < dram->num_cs; i++) { struct mbus_dram_window *cs = dram->cs + i; writel(cs->base & 0xffff0000, base + PCIE_WIN04_BASE_OFF(i)); writel(0, base + PCIE_WIN04_REMAP_OFF(i)); writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, base + PCIE_WIN04_CTRL_OFF(i)); size += cs->size; } /* * Round up 'size' to the nearest power of two. */ if ((size & (size - 1)) != 0) size = 1 << fls(size); /* * Setup BAR[1] to all DRAM banks. */ writel(dram->cs[0].base, base + PCIE_BAR_LO_OFF(1)); writel(0, base + PCIE_BAR_HI_OFF(1)); writel(((size - 1) & 0xffff0000) | 1, base + PCIE_BAR_CTRL_OFF(1)); } void __init orion_pcie_setup(void __iomem *base) { u16 cmd; u32 mask; /* * Point PCIe unit MBUS decode windows to DRAM space. */ orion_pcie_setup_wins(base, &orion_mbus_dram_info); /* * Master + slave enable. */ cmd = readw(base + PCIE_CMD_OFF); cmd |= PCI_COMMAND_IO; cmd |= PCI_COMMAND_MEMORY; cmd |= PCI_COMMAND_MASTER; writew(cmd, base + PCIE_CMD_OFF); /* * Enable interrupt lines A-D. */ mask = readl(base + PCIE_MASK_OFF); mask |= 0x0f000000; writel(mask, base + PCIE_MASK_OFF); } int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { writel(PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | PCIE_CONF_ADDR_EN, base + PCIE_CONF_ADDR_OFF); *val = readl(base + PCIE_CONF_DATA_OFF); if (size == 1) *val = (*val >> (8 * (where & 3))) & 0xff; else if (size == 2) *val = (*val >> (8 * (where & 3))) & 0xffff; return PCIBIOS_SUCCESSFUL; } int orion_pcie_rd_conf_tlp(void __iomem *base, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { writel(PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | PCIE_CONF_ADDR_EN, base + PCIE_CONF_ADDR_OFF); *val = readl(base + PCIE_CONF_DATA_OFF); if (bus->number != orion_pcie_get_local_bus_nr(base) || PCI_FUNC(devfn) != 0) *val = readl(base + PCIE_HEADER_LOG_4_OFF); if (size == 1) *val = (*val >> (8 * (where & 3))) & 0xff; else if (size == 2) *val = (*val >> (8 * (where & 3))) & 0xffff; return PCIBIOS_SUCCESSFUL; } int orion_pcie_rd_conf_wa(void __iomem *wa_base, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { *val = readl(wa_base + (PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))); if (size == 1) *val = (*val >> (8 * (where & 3))) & 0xff; else if (size == 2) *val = (*val >> (8 * (where & 3))) & 0xffff; return PCIBIOS_SUCCESSFUL; } int orion_pcie_wr_conf(void __iomem *base, struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { int ret = PCIBIOS_SUCCESSFUL; writel(PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | PCIE_CONF_ADDR_EN, base + PCIE_CONF_ADDR_OFF); if (size == 4) { writel(val, base + PCIE_CONF_DATA_OFF); } else if (size == 2) { writew(val, base + PCIE_CONF_DATA_OFF + (where & 3)); } else if (size == 1) { writeb(val, base + PCIE_CONF_DATA_OFF + (where & 3)); } else { ret = PCIBIOS_BAD_REGISTER_NUMBER; } return ret; }
gpl-2.0
Jazz-823/kernel_lge_hammerhead_CM
drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.c
8162
75502
/** @verbatim Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier Tel: +19(0)7223/9493-0 Fax: +49(0)7223/9493-92 http://www.addi-data.com info@addi-data.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA You should also find the complete GPL in the COPYING file accompanying this source code. @endverbatim */ /* +-----------------------------------------------------------------------+ | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier | +-----------------------------------------------------------------------+ | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com | | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com | +-----------------------------------------------------------------------+ | Project : API APCI1710 | Compiler : gcc | | Module name : CHRONO.C | Version : 2.96 | +-------------------------------+---------------------------------------+ | Project manager: Eric Stolz | Date : 02/12/2002 | +-----------------------------------------------------------------------+ | Description : APCI-1710 chronometer module | | | | | +-----------------------------------------------------------------------+ | UPDATES | +-----------------------------------------------------------------------+ | Date | Author | Description of updates | +----------+-----------+------------------------------------------------+ | 29/06/98 | S. Weber | Digital input / output implementation | |----------|-----------|------------------------------------------------| | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 | | | | available | +-----------------------------------------------------------------------+ | | | | | | | | +-----------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Included files | +----------------------------------------------------------------------------+ */ #include "APCI1710_Chrono.h" /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_InitChrono | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_ChronoMode, | | unsigned char_ b_PCIInputClock, | | unsigned char_ b_TimingUnit, | | ULONG_ ul_TimingInterval, | | PULONG_ pul_RealTimingInterval) +----------------------------------------------------------------------------+ | Task : Configure the chronometer operating mode (b_ChronoMode)| | from selected module (b_ModulNbr). | | The ul_TimingInterval and ul_TimingUnit determine the | | timing base for the measurement. | | The pul_RealTimingInterval return the real timing | | value. You must calling this function be for you call | | any other function witch access of the chronometer. | | | | Witch this functionality from the APCI-1710 you have | | the possibility to measure the timing witch two event. | | | | The mode 0 and 1 is appropriate for period measurement.| | The mode 2 and 3 is appropriate for frequent | | measurement. | | The mode 4 to 7 is appropriate for measuring the timing| | between two event. | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr CR_AREF(insn->chanspec) : Module number to configure | | (0 to 3) | | unsigned char_ b_ChronoMode data[0] : Chronometer action mode | | (0 to 7). | | unsigned char_ b_PCIInputClock data[1] : Selection from PCI bus clock| | - APCI1710_30MHZ : | | The PC have a PCI bus | | clock from 30 MHz | | - APCI1710_33MHZ : | | The PC have a PCI bus | | clock from 33 MHz | | - APCI1710_40MHZ | | The APCI-1710 have a | | integrated 40Mhz | | quartz. | | unsigned char_ b_TimingUnit data[2] : Base timing unity (0 to 4) | | 0 : ns | | 1 : µs | | 2 : ms | | 3 : s | | 4 : mn | | ULONG_ ul_TimingInterval : data[3] Base timing value. | +----------------------------------------------------------------------------+ | Output Parameters : PULONG_ pul_RealTimingInterval : Real base timing | | value. | data[0] +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a Chronometer module | | -4: Chronometer mode selection is wrong | | -5: The selected PCI input clock is wrong | | -6: Timing unity selection is wrong | | -7: Base timing selection is wrong | | -8: You can not used the 40MHz clock selection with | | this board | | -9: You can not used the 40MHz clock selection with | | this CHRONOS version | +----------------------------------------------------------------------------+ */ int i_APCI1710_InsnConfigInitChrono(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i_ReturnValue = 0; unsigned int ul_TimerValue = 0; unsigned int ul_TimingInterval = 0; unsigned int ul_RealTimingInterval = 0; double d_RealTimingInterval = 0; unsigned int dw_ModeArray[8] = { 0x01, 0x05, 0x00, 0x04, 0x02, 0x0E, 0x0A, 0x06 }; unsigned char b_ModulNbr, b_ChronoMode, b_PCIInputClock, b_TimingUnit; b_ModulNbr = CR_AREF(insn->chanspec); b_ChronoMode = (unsigned char) data[0]; b_PCIInputClock = (unsigned char) data[1]; b_TimingUnit = (unsigned char) data[2]; ul_TimingInterval = (unsigned int) data[3]; i_ReturnValue = insn->n; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***********************/ /* Test if chronometer */ /***********************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_CHRONOMETER) { /*****************************/ /* Test the chronometer mode */ /*****************************/ if (b_ChronoMode <= 7) { /**************************/ /* Test the PCI bus clock */ /**************************/ if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ) || (b_PCIInputClock == APCI1710_40MHZ)) { /*************************/ /* Test the timing unity */ /*************************/ if (b_TimingUnit <= 4) { /**********************************/ /* Test the base timing selection */ /**********************************/ if (((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 66) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 143165576UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 143165UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 143UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 2UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 60) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 130150240UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 130150UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 130UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 2UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 50) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 107374182UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 107374UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 107UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 1UL))) { /**************************/ /* Test the board version */ /**************************/ if (((b_PCIInputClock == APCI1710_40MHZ) && (devpriv->s_BoardInfos.b_BoardVersion > 0)) || (b_PCIInputClock != APCI1710_40MHZ)) { /************************/ /* Test the TOR version */ /************************/ if (((b_PCIInputClock == APCI1710_40MHZ) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3131)) || (b_PCIInputClock != APCI1710_40MHZ)) { fpu_begin (); /****************************************/ /* Calculate the timer 0 division fator */ /****************************************/ switch (b_TimingUnit) { /******/ /* ns */ /******/ case 0: /******************/ /* Timer 0 factor */ /******************/ ul_TimerValue = (unsigned int) (ul_TimingInterval * (0.001 * b_PCIInputClock)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_TimingInterval * (0.001 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) { ul_TimerValue = ul_TimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealTimingInterval = (unsigned int) (ul_TimerValue / (0.001 * (double)b_PCIInputClock)); d_RealTimingInterval = (double) ul_TimerValue / (0.001 * (double) b_PCIInputClock); if ((double)((double)ul_TimerValue / (0.001 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) { ul_RealTimingInterval = ul_RealTimingInterval + 1; } ul_TimingInterval = ul_TimingInterval - 1; ul_TimerValue = ul_TimerValue - 2; if (b_PCIInputClock != APCI1710_40MHZ) { ul_TimerValue = (unsigned int) ( (double) (ul_TimerValue) * 0.99392); } break; /******/ /* æs */ /******/ case 1: /******************/ /* Timer 0 factor */ /******************/ ul_TimerValue = (unsigned int) (ul_TimingInterval * (1.0 * b_PCIInputClock)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_TimingInterval * (1.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) { ul_TimerValue = ul_TimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealTimingInterval = (unsigned int) (ul_TimerValue / (1.0 * (double)b_PCIInputClock)); d_RealTimingInterval = (double) ul_TimerValue / ( (double) 1.0 * (double) b_PCIInputClock); if ((double)((double)ul_TimerValue / (1.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) { ul_RealTimingInterval = ul_RealTimingInterval + 1; } ul_TimingInterval = ul_TimingInterval - 1; ul_TimerValue = ul_TimerValue - 2; if (b_PCIInputClock != APCI1710_40MHZ) { ul_TimerValue = (unsigned int) ( (double) (ul_TimerValue) * 0.99392); } break; /******/ /* ms */ /******/ case 2: /******************/ /* Timer 0 factor */ /******************/ ul_TimerValue = ul_TimingInterval * (1000 * b_PCIInputClock); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_TimingInterval * (1000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) { ul_TimerValue = ul_TimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealTimingInterval = (unsigned int) (ul_TimerValue / (1000.0 * (double)b_PCIInputClock)); d_RealTimingInterval = (double) ul_TimerValue / (1000.0 * (double) b_PCIInputClock); if ((double)((double)ul_TimerValue / (1000.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) { ul_RealTimingInterval = ul_RealTimingInterval + 1; } ul_TimingInterval = ul_TimingInterval - 1; ul_TimerValue = ul_TimerValue - 2; if (b_PCIInputClock != APCI1710_40MHZ) { ul_TimerValue = (unsigned int) ( (double) (ul_TimerValue) * 0.99392); } break; /*****/ /* s */ /*****/ case 3: /******************/ /* Timer 0 factor */ /******************/ ul_TimerValue = (unsigned int) (ul_TimingInterval * (1000000.0 * b_PCIInputClock)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_TimingInterval * (1000000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) { ul_TimerValue = ul_TimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealTimingInterval = (unsigned int) (ul_TimerValue / (1000000.0 * (double) b_PCIInputClock)); d_RealTimingInterval = (double) ul_TimerValue / (1000000.0 * (double) b_PCIInputClock); if ((double)((double)ul_TimerValue / (1000000.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) { ul_RealTimingInterval = ul_RealTimingInterval + 1; } ul_TimingInterval = ul_TimingInterval - 1; ul_TimerValue = ul_TimerValue - 2; if (b_PCIInputClock != APCI1710_40MHZ) { ul_TimerValue = (unsigned int) ( (double) (ul_TimerValue) * 0.99392); } break; /******/ /* mn */ /******/ case 4: /******************/ /* Timer 0 factor */ /******************/ ul_TimerValue = (unsigned int) ( (ul_TimingInterval * 60) * (1000000.0 * b_PCIInputClock)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)(ul_TimingInterval * 60.0) * (1000000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) { ul_TimerValue = ul_TimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealTimingInterval = (unsigned int) (ul_TimerValue / (1000000.0 * (double) b_PCIInputClock)) / 60; d_RealTimingInterval = ( (double) ul_TimerValue / (0.001 * (double)b_PCIInputClock)) / 60.0; if ((double)(((double)ul_TimerValue / (1000000.0 * (double)b_PCIInputClock)) / 60.0) >= (double)((double)ul_RealTimingInterval + 0.5)) { ul_RealTimingInterval = ul_RealTimingInterval + 1; } ul_TimingInterval = ul_TimingInterval - 1; ul_TimerValue = ul_TimerValue - 2; if (b_PCIInputClock != APCI1710_40MHZ) { ul_TimerValue = (unsigned int) ( (double) (ul_TimerValue) * 0.99392); } break; } fpu_end(); /****************************/ /* Save the PCI input clock */ /****************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. b_PCIInputClock = b_PCIInputClock; /*************************/ /* Save the timing unity */ /*************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. b_TimingUnit = b_TimingUnit; /************************/ /* Save the base timing */ /************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. d_TimingInterval = d_RealTimingInterval; /****************************/ /* Set the chronometer mode */ /****************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. dw_ConfigReg = dw_ModeArray [b_ChronoMode]; /***********************/ /* Test if 40 MHz used */ /***********************/ if (b_PCIInputClock == APCI1710_40MHZ) { devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. dw_ConfigReg = devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. dw_ConfigReg | 0x80; } outl(devpriv->s_ModuleInfo[b_ModulNbr].s_ChronoModuleInfo.dw_ConfigReg, devpriv->s_BoardInfos.ui_Address + 16 + (64 * b_ModulNbr)); /***********************/ /* Write timer 0 value */ /***********************/ outl(ul_TimerValue, devpriv->s_BoardInfos.ui_Address + (64 * b_ModulNbr)); /*********************/ /* Chronometer init. */ /*********************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. b_ChronoInit = 1; } else { /***********************************************/ /* TOR version error for 40MHz clock selection */ /***********************************************/ DPRINTK("TOR version error for 40MHz clock selection\n"); i_ReturnValue = -9; } } else { /**************************************************************/ /* You can not use the 40MHz clock selection with this board */ /**************************************************************/ DPRINTK("You can not used the 40MHz clock selection with this board\n"); i_ReturnValue = -8; } } else { /**********************************/ /* Base timing selection is wrong */ /**********************************/ DPRINTK("Base timing selection is wrong\n"); i_ReturnValue = -7; } } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */ else { /***********************************/ /* Timing unity selection is wrong */ /***********************************/ DPRINTK("Timing unity selection is wrong\n"); i_ReturnValue = -6; } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */ } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */ else { /*****************************************/ /* The selected PCI input clock is wrong */ /*****************************************/ DPRINTK("The selected PCI input clock is wrong\n"); i_ReturnValue = -5; } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */ } /* if (b_ChronoMode >= 0 && b_ChronoMode <= 7) */ else { /***************************************/ /* Chronometer mode selection is wrong */ /***************************************/ DPRINTK("Chronometer mode selection is wrong\n"); i_ReturnValue = -4; } /* if (b_ChronoMode >= 0 && b_ChronoMode <= 7) */ } else { /******************************************/ /* The module is not a Chronometer module */ /******************************************/ DPRINTK("The module is not a Chronometer module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } data[0] = ul_RealTimingInterval; return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_EnableChrono | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_CycleMode, | | unsigned char_ b_InterruptEnable) int i_APCI1710_InsnWriteEnableDisableChrono(struct comedi_device *dev, struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Enable the chronometer from selected module | | (b_ModulNbr). You must calling the | | "i_APCI1710_InitChrono" function be for you call this | | function. | | If you enable the chronometer interrupt, the | | chronometer generate a interrupt after the stop signal.| | See function "i_APCI1710_SetBoardIntRoutineX" and the | | Interrupt mask description chapter from this manual. | | The b_CycleMode parameter determine if you will | | measured a single or more cycle. | Disable the chronometer from selected module | | (b_ModulNbr). If you disable the chronometer after a | | start signal occur and you restart the chronometer | | witch the " i_APCI1710_EnableChrono" function, if no | | stop signal occur this start signal is ignored. +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr CR_AREF(chanspec) : Selected module number (0 to 3) | data[0] ENABle/Disable chrono | unsigned char_ b_CycleMode : Selected the chronometer | | data[1] acquisition mode | | unsigned char_ b_InterruptEnable : Enable or disable the | | data[2] chronometer interrupt. | | APCI1710_ENABLE: | | Enable the chronometer | | interrupt | | APCI1710_DISABLE: | | Disable the chronometer | | interrupt | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a Chronometer module | | -4: Chronometer not initialised see function | | "i_APCI1710_InitChrono" | | -5: Chronometer acquisition mode cycle is wrong | | -6: Interrupt parameter is wrong | | -7: Interrupt function not initialised. | | See function "i_APCI1710_SetBoardIntRoutineX" -8: data[0] wrong input | +----------------------------------------------------------------------------+ */ int i_APCI1710_InsnWriteEnableDisableChrono(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i_ReturnValue = 0; unsigned char b_ModulNbr, b_CycleMode, b_InterruptEnable, b_Action; b_ModulNbr = CR_AREF(insn->chanspec); b_Action = (unsigned char) data[0]; b_CycleMode = (unsigned char) data[1]; b_InterruptEnable = (unsigned char) data[2]; i_ReturnValue = insn->n; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***********************/ /* Test if chronometer */ /***********************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_CHRONOMETER) { /***********************************/ /* Test if chronometer initialised */ /***********************************/ if (devpriv->s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo.b_ChronoInit == 1) { switch (b_Action) { case APCI1710_ENABLE: /*********************************/ /* Test the cycle mode parameter */ /*********************************/ if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) { /***************************/ /* Test the interrupt flag */ /***************************/ if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) { /***************************/ /* Save the interrupt flag */ /***************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. b_InterruptMask = b_InterruptEnable; /***********************/ /* Save the cycle mode */ /***********************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. b_CycleMode = b_CycleMode; devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. dw_ConfigReg = (devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. dw_ConfigReg & 0x8F) | ((1 & b_InterruptEnable) << 5) | ((1 & b_CycleMode) << 6) | 0x10; /*****************************/ /* Test if interrupt enabled */ /*****************************/ if (b_InterruptEnable == APCI1710_ENABLE) { /****************************/ /* Clear the interrupt flag */ /****************************/ outl(devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. dw_ConfigReg, devpriv-> s_BoardInfos. ui_Address + 32 + (64 * b_ModulNbr)); devpriv->tsk_Current = current; /* Save the current process task structure */ } /***********************************/ /* Enable or disable the interrupt */ /* Enable the chronometer */ /***********************************/ outl(devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. dw_ConfigReg, devpriv-> s_BoardInfos. ui_Address + 16 + (64 * b_ModulNbr)); /*************************/ /* Clear status register */ /*************************/ outl(0, devpriv-> s_BoardInfos. ui_Address + 36 + (64 * b_ModulNbr)); } /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */ else { /********************************/ /* Interrupt parameter is wrong */ /********************************/ DPRINTK("Interrupt parameter is wrong\n"); i_ReturnValue = -6; } /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */ } /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */ else { /***********************************************/ /* Chronometer acquisition mode cycle is wrong */ /***********************************************/ DPRINTK("Chronometer acquisition mode cycle is wrong\n"); i_ReturnValue = -5; } /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */ break; case APCI1710_DISABLE: devpriv->s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo. b_InterruptMask = 0; devpriv->s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo. dw_ConfigReg = devpriv-> s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo. dw_ConfigReg & 0x2F; /***************************/ /* Disable the interrupt */ /* Disable the chronometer */ /***************************/ outl(devpriv->s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo.dw_ConfigReg, devpriv->s_BoardInfos. ui_Address + 16 + (64 * b_ModulNbr)); /***************************/ /* Test if continuous mode */ /***************************/ if (devpriv->s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo. b_CycleMode == APCI1710_CONTINUOUS) { /*************************/ /* Clear status register */ /*************************/ outl(0, devpriv->s_BoardInfos. ui_Address + 36 + (64 * b_ModulNbr)); } break; default: DPRINTK("Inputs wrong! Enable or Disable chrono\n"); i_ReturnValue = -8; } /* switch ENABLE/DISABLE */ } else { /*******************************/ /* Chronometer not initialised */ /*******************************/ DPRINTK("Chronometer not initialised\n"); i_ReturnValue = -4; } } else { /******************************************/ /* The module is not a Chronometer module */ /******************************************/ DPRINTK("The module is not a Chronometer module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name :INT i_APCI1710_InsnReadChrono(struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Read functions for Timer | +----------------------------------------------------------------------------+ | Input Parameters : +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : +----------------------------------------------------------------------------+ */ int i_APCI1710_InsnReadChrono(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned char b_ReadType; int i_ReturnValue = insn->n; b_ReadType = CR_CHAN(insn->chanspec); switch (b_ReadType) { case APCI1710_CHRONO_PROGRESS_STATUS: i_ReturnValue = i_APCI1710_GetChronoProgressStatus(dev, (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]); break; case APCI1710_CHRONO_READVALUE: i_ReturnValue = i_APCI1710_ReadChronoValue(dev, (unsigned char) CR_AREF(insn->chanspec), (unsigned int) insn->unused[0], (unsigned char *) &data[0], (unsigned int *) &data[1]); break; case APCI1710_CHRONO_CONVERTVALUE: i_ReturnValue = i_APCI1710_ConvertChronoValue(dev, (unsigned char) CR_AREF(insn->chanspec), (unsigned int) insn->unused[0], (unsigned int *) &data[0], (unsigned char *) &data[1], (unsigned char *) &data[2], (unsigned int *) &data[3], (unsigned int *) &data[4], (unsigned int *) &data[5]); break; case APCI1710_CHRONO_READINTERRUPT: printk("In Chrono Read Interrupt\n"); data[0] = devpriv->s_InterruptParameters. s_FIFOInterruptParameters[devpriv-> s_InterruptParameters.ui_Read].b_OldModuleMask; data[1] = devpriv->s_InterruptParameters. s_FIFOInterruptParameters[devpriv-> s_InterruptParameters.ui_Read].ul_OldInterruptMask; data[2] = devpriv->s_InterruptParameters. s_FIFOInterruptParameters[devpriv-> s_InterruptParameters.ui_Read].ul_OldCounterLatchValue; /**************************/ /* Increment the read FIFO */ /***************************/ devpriv-> s_InterruptParameters. ui_Read = (devpriv-> s_InterruptParameters. ui_Read + 1) % APCI1710_SAVE_INTERRUPT; break; default: printk("ReadType Parameter wrong\n"); } if (i_ReturnValue >= 0) i_ReturnValue = insn->n; return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_GetChronoProgressStatus | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char *_ pb_ChronoStatus) | +----------------------------------------------------------------------------+ | Task : Return the chronometer status (pb_ChronoStatus) from | | selected chronometer module (b_ModulNbr). | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number (0 to 3) | +----------------------------------------------------------------------------+ | Output Parameters : PULONG_ pb_ChronoStatus : Return the chronometer | | status. | | 0 : Measurement not started.| | No start signal occur. | | 1 : Measurement started. | | A start signal occur. | | 2 : Measurement stopped. | | A stop signal occur. | | The measurement is | | terminate. | | 3: A overflow occur. You | | must change the base | | timing witch the | | function | | "i_APCI1710_InitChrono" | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a Chronometer module | | -4: Chronometer not initialised see function | | "i_APCI1710_InitChrono" | +----------------------------------------------------------------------------+ */ int i_APCI1710_GetChronoProgressStatus(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned char *pb_ChronoStatus) { int i_ReturnValue = 0; unsigned int dw_Status; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***********************/ /* Test if chronometer */ /***********************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_CHRONOMETER) { /***********************************/ /* Test if chronometer initialised */ /***********************************/ if (devpriv-> s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo.b_ChronoInit == 1) { dw_Status = inl(devpriv->s_BoardInfos. ui_Address + 8 + (64 * b_ModulNbr)); /********************/ /* Test if overflow */ /********************/ if ((dw_Status & 8) == 8) { /******************/ /* Overflow occur */ /******************/ *pb_ChronoStatus = 3; } /* if ((dw_Status & 8) == 8) */ else { /*******************************/ /* Test if measurement stopped */ /*******************************/ if ((dw_Status & 2) == 2) { /***********************/ /* A stop signal occur */ /***********************/ *pb_ChronoStatus = 2; } /* if ((dw_Status & 2) == 2) */ else { /*******************************/ /* Test if measurement started */ /*******************************/ if ((dw_Status & 1) == 1) { /************************/ /* A start signal occur */ /************************/ *pb_ChronoStatus = 1; } /* if ((dw_Status & 1) == 1) */ else { /***************************/ /* Measurement not started */ /***************************/ *pb_ChronoStatus = 0; } /* if ((dw_Status & 1) == 1) */ } /* if ((dw_Status & 2) == 2) */ } /* if ((dw_Status & 8) == 8) */ } else { /*******************************/ /* Chronometer not initialised */ /*******************************/ DPRINTK("Chronometer not initialised\n"); i_ReturnValue = -4; } } else { /******************************************/ /* The module is not a Chronometer module */ /******************************************/ DPRINTK("The module is not a Chronometer module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_ReadChronoValue | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned int_ ui_TimeOut, | | unsigned char *_ pb_ChronoStatus, | | PULONG_ pul_ChronoValue) | +----------------------------------------------------------------------------+ | Task : Return the chronometer status (pb_ChronoStatus) and the| | timing value (pul_ChronoValue) after a stop signal | | occur from selected chronometer module (b_ModulNbr). | | This function are only avaible if you have disabled | | the interrupt functionality. See function | | "i_APCI1710_EnableChrono" and the Interrupt mask | | description chapter. | | You can test the chronometer status witch the | | "i_APCI1710_GetChronoProgressStatus" function. | | | | The returned value from pul_ChronoValue parameter is | | not real measured timing. | | You must used the "i_APCI1710_ConvertChronoValue" | | function or make this operation for calculate the | | timing: | | | | Timing = pul_ChronoValue * pul_RealTimingInterval. | | | | pul_RealTimingInterval is the returned parameter from | | "i_APCI1710_InitChrono" function and the time unity is | | the b_TimingUnit from "i_APCI1710_InitChrono" function| +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number (0 to 3) | +----------------------------------------------------------------------------+ | Output Parameters : PULONG_ pb_ChronoStatus : Return the chronometer | | status. | | 0 : Measurement not started.| | No start signal occur. | | 1 : Measurement started. | | A start signal occur. | | 2 : Measurement stopped. | | A stop signal occur. | | The measurement is | | terminate. | | 3: A overflow occur. You | | must change the base | | timing witch the | | function | | "i_APCI1710_InitChrono" | | unsigned int * pul_ChronoValue : Chronometer timing value. | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a Chronometer module | | -4: Chronometer not initialised see function | | "i_APCI1710_InitChrono" | | -5: Timeout parameter is wrong (0 to 65535) | | -6: Interrupt routine installed. You can not read | | directly the chronometer measured timing. | +----------------------------------------------------------------------------+ */ int i_APCI1710_ReadChronoValue(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned int ui_TimeOut, unsigned char *pb_ChronoStatus, unsigned int *pul_ChronoValue) { int i_ReturnValue = 0; unsigned int dw_Status; unsigned int dw_TimeOut = 0; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***********************/ /* Test if chronometer */ /***********************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_CHRONOMETER) { /***********************************/ /* Test if chronometer initialised */ /***********************************/ if (devpriv-> s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo.b_ChronoInit == 1) { /*****************************/ /* Test the timout parameter */ /*****************************/ if (ui_TimeOut <= 65535UL) { for (;;) { /*******************/ /* Read the status */ /*******************/ dw_Status = inl(devpriv-> s_BoardInfos. ui_Address + 8 + (64 * b_ModulNbr)); /********************/ /* Test if overflow */ /********************/ if ((dw_Status & 8) == 8) { /******************/ /* Overflow occur */ /******************/ *pb_ChronoStatus = 3; /***************************/ /* Test if continuous mode */ /***************************/ if (devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. b_CycleMode == APCI1710_CONTINUOUS) { /*************************/ /* Clear status register */ /*************************/ outl(0, devpriv->s_BoardInfos.ui_Address + 36 + (64 * b_ModulNbr)); } break; } /* if ((dw_Status & 8) == 8) */ else { /*******************************/ /* Test if measurement stopped */ /*******************************/ if ((dw_Status & 2) == 2) { /***********************/ /* A stop signal occur */ /***********************/ *pb_ChronoStatus = 2; /***************************/ /* Test if continnous mode */ /***************************/ if (devpriv-> s_ModuleInfo [b_ModulNbr]. s_ChronoModuleInfo. b_CycleMode == APCI1710_CONTINUOUS) { /*************************/ /* Clear status register */ /*************************/ outl(0, devpriv->s_BoardInfos.ui_Address + 36 + (64 * b_ModulNbr)); } break; } /* if ((dw_Status & 2) == 2) */ else { /*******************************/ /* Test if measurement started */ /*******************************/ if ((dw_Status & 1) == 1) { /************************/ /* A start signal occur */ /************************/ *pb_ChronoStatus = 1; } /* if ((dw_Status & 1) == 1) */ else { /***************************/ /* Measurement not started */ /***************************/ *pb_ChronoStatus = 0; } /* if ((dw_Status & 1) == 1) */ } /* if ((dw_Status & 2) == 2) */ } /* if ((dw_Status & 8) == 8) */ if (dw_TimeOut == ui_TimeOut) { /*****************/ /* Timeout occur */ /*****************/ break; } else { /*************************/ /* Increment the timeout */ /*************************/ dw_TimeOut = dw_TimeOut + 1; mdelay(1000); } } /* for (;;) */ /*****************************/ /* Test if stop signal occur */ /*****************************/ if (*pb_ChronoStatus == 2) { /**********************************/ /* Read the measured timing value */ /**********************************/ *pul_ChronoValue = inl(devpriv-> s_BoardInfos. ui_Address + 4 + (64 * b_ModulNbr)); if (*pul_ChronoValue != 0) { *pul_ChronoValue = *pul_ChronoValue - 1; } } else { /*************************/ /* Test if timeout occur */ /*************************/ if ((*pb_ChronoStatus != 3) && (dw_TimeOut == ui_TimeOut) && (ui_TimeOut != 0)) { /*****************/ /* Timeout occur */ /*****************/ *pb_ChronoStatus = 4; } } } else { /******************************/ /* Timeout parameter is wrong */ /******************************/ DPRINTK("Timeout parameter is wrong\n"); i_ReturnValue = -5; } } else { /*******************************/ /* Chronometer not initialised */ /*******************************/ DPRINTK("Chronometer not initialised\n"); i_ReturnValue = -4; } } else { /******************************************/ /* The module is not a Chronometer module */ /******************************************/ DPRINTK("The module is not a Chronometer module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_ConvertChronoValue | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | ULONG_ ul_ChronoValue, | | PULONG_ pul_Hour, | | unsigned char *_ pb_Minute, | | unsigned char *_ pb_Second, | | unsigned int *_ pui_MilliSecond, | | unsigned int *_ pui_MicroSecond, | | unsigned int *_ pui_NanoSecond) | +----------------------------------------------------------------------------+ | Task : Convert the chronometer measured timing | | (ul_ChronoValue) in to h, mn, s, ms, µs, ns. | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number (0 to 3)| | ULONG_ ul_ChronoValue : Measured chronometer timing | | value. | | See"i_APCI1710_ReadChronoValue"| +----------------------------------------------------------------------------+ | Output Parameters : PULONG_ pul_Hour : Chronometer timing hour | | unsigned char *_ pb_Minute : Chronometer timing minute | | unsigned char *_ pb_Second : Chronometer timing second | | unsigned int *_ pui_MilliSecond : Chronometer timing mini | | second | | unsigned int *_ pui_MicroSecond : Chronometer timing micro | | second | | unsigned int *_ pui_NanoSecond : Chronometer timing nano | | second | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a Chronometer module | | -4: Chronometer not initialised see function | | "i_APCI1710_InitChrono" | +----------------------------------------------------------------------------+ */ int i_APCI1710_ConvertChronoValue(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned int ul_ChronoValue, unsigned int *pul_Hour, unsigned char *pb_Minute, unsigned char *pb_Second, unsigned int *pui_MilliSecond, unsigned int *pui_MicroSecond, unsigned int *pui_NanoSecond) { int i_ReturnValue = 0; double d_Hour; double d_Minute; double d_Second; double d_MilliSecond; double d_MicroSecond; double d_NanoSecond; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***********************/ /* Test if chronometer */ /***********************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_CHRONOMETER) { /***********************************/ /* Test if chronometer initialised */ /***********************************/ if (devpriv-> s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo.b_ChronoInit == 1) { fpu_begin(); d_Hour = (double)ul_ChronoValue *(double) devpriv->s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo.d_TimingInterval; switch (devpriv-> s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo.b_TimingUnit) { case 0: d_Hour = d_Hour / (double)1000.0; case 1: d_Hour = d_Hour / (double)1000.0; case 2: d_Hour = d_Hour / (double)1000.0; case 3: d_Hour = d_Hour / (double)60.0; case 4: /**********************/ /* Calculate the hour */ /**********************/ d_Hour = d_Hour / (double)60.0; *pul_Hour = (unsigned int) d_Hour; /************************/ /* Calculate the minute */ /************************/ d_Minute = d_Hour - *pul_Hour; d_Minute = d_Minute * 60; *pb_Minute = (unsigned char) d_Minute; /************************/ /* Calculate the second */ /************************/ d_Second = d_Minute - *pb_Minute; d_Second = d_Second * 60; *pb_Second = (unsigned char) d_Second; /*****************************/ /* Calculate the mini second */ /*****************************/ d_MilliSecond = d_Second - *pb_Second; d_MilliSecond = d_MilliSecond * 1000; *pui_MilliSecond = (unsigned int) d_MilliSecond; /******************************/ /* Calculate the micro second */ /******************************/ d_MicroSecond = d_MilliSecond - *pui_MilliSecond; d_MicroSecond = d_MicroSecond * 1000; *pui_MicroSecond = (unsigned int) d_MicroSecond; /******************************/ /* Calculate the micro second */ /******************************/ d_NanoSecond = d_MicroSecond - *pui_MicroSecond; d_NanoSecond = d_NanoSecond * 1000; *pui_NanoSecond = (unsigned int) d_NanoSecond; break; } fpu_end(); } else { /*******************************/ /* Chronometer not initialised */ /*******************************/ DPRINTK("Chronometer not initialised\n"); i_ReturnValue = -4; } } else { /******************************************/ /* The module is not a Chronometer module */ /******************************************/ DPRINTK("The module is not a Chronometer module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : int i_APCI1710_InsnBitsChronoDigitalIO(struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Sets the output witch has been passed with the | | parameter b_Channel. Setting an output means setting an| | output high. | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number (0 to 3)| | unsigned char_ b_OutputChannel : Selection from digital output | | CR_CHAN() channel (0 to 2) | | 0 : Channel H | | 1 : Channel A | | 2 : Channel B | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a Chronometer module | | -4: The selected digital output is wrong | | -5: Chronometer not initialised see function | | "i_APCI1710_InitChrono" | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_SetChronoChlOff | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_OutputChannel) | +----------------------------------------------------------------------------+ | Task : Resets the output witch has been passed with the | | parameter b_Channel. Resetting an output means setting | | an output low. | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 data[0] : Chl ON, Chl OFF , Chl Read , Port Read | unsigned char_ b_ModulNbr CR_AREF : Selected module number (0 to 3)| | unsigned char_ b_OutputChannel CR_CHAN : Selection from digital output | | channel (0 to 2) | | 0 : Channel H | | 1 : Channel A | | 2 : Channel B | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a Chronometer module | | -4: The selected digital output is wrong | | -5: Chronometer not initialised see function | | "i_APCI1710_InitChrono" | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_ReadChronoChlValue | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_InputChannel, | | unsigned char *_ pb_ChannelStatus) | +----------------------------------------------------------------------------+ | Task : Return the status from selected digital input | | (b_InputChannel) from selected chronometer | | module (b_ModulNbr). | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number (0 to 3)| | unsigned char_ b_InputChannel : Selection from digital input | | channel (0 to 2) | | CR_CHAN() 0 : Channel E | | 1 : Channel F | | 2 : Channel G | +----------------------------------------------------------------------------+ | Output Parameters : unsigned char *_ pb_ChannelStatus : Digital input channel status.| | data[0] 0 : Channel is not active | | 1 : Channel is active | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a Chronometer module | | -4: The selected digital input is wrong | | -5: Chronometer not initialised see function | | "i_APCI1710_InitChrono" | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_ReadChronoPortValue | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char *_ pb_PortValue) | +----------------------------------------------------------------------------+ | Task : Return the status from digital inputs port from | | selected (b_ModulNbr) chronometer module. | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number (0 to 3)| +----------------------------------------------------------------------------+ | Output Parameters : unsigned char *_ pb_PortValue : Digital inputs port status. | data[0] +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a Chronometer module | | -4: Chronometer not initialised see function | | "i_APCI1710_InitChrono" | +----------------------------------------------------------------------------+ */ int i_APCI1710_InsnBitsChronoDigitalIO(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i_ReturnValue = 0; unsigned char b_ModulNbr, b_OutputChannel, b_InputChannel, b_IOType; unsigned int dw_Status; unsigned char *pb_ChannelStatus; unsigned char *pb_PortValue; b_ModulNbr = CR_AREF(insn->chanspec); i_ReturnValue = insn->n; b_IOType = (unsigned char) data[0]; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***********************/ /* Test if chronometer */ /***********************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_CHRONOMETER) { /***********************************/ /* Test if chronometer initialised */ /***********************************/ if (devpriv->s_ModuleInfo[b_ModulNbr]. s_ChronoModuleInfo.b_ChronoInit == 1) { /***********************************/ /* Test the digital output channel */ /***********************************/ switch (b_IOType) { case APCI1710_CHRONO_SET_CHANNELOFF: b_OutputChannel = (unsigned char) CR_CHAN(insn->chanspec); if (b_OutputChannel <= 2) { outl(0, devpriv->s_BoardInfos. ui_Address + 20 + (b_OutputChannel * 4) + (64 * b_ModulNbr)); } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */ else { /****************************************/ /* The selected digital output is wrong */ /****************************************/ DPRINTK("The selected digital output is wrong\n"); i_ReturnValue = -4; } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */ break; case APCI1710_CHRONO_SET_CHANNELON: b_OutputChannel = (unsigned char) CR_CHAN(insn->chanspec); if (b_OutputChannel <= 2) { outl(1, devpriv->s_BoardInfos. ui_Address + 20 + (b_OutputChannel * 4) + (64 * b_ModulNbr)); } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */ else { /****************************************/ /* The selected digital output is wrong */ /****************************************/ DPRINTK("The selected digital output is wrong\n"); i_ReturnValue = -4; } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */ break; case APCI1710_CHRONO_READ_CHANNEL: /**********************************/ /* Test the digital input channel */ /**********************************/ pb_ChannelStatus = (unsigned char *) &data[0]; b_InputChannel = (unsigned char) CR_CHAN(insn->chanspec); if (b_InputChannel <= 2) { dw_Status = inl(devpriv-> s_BoardInfos. ui_Address + 12 + (64 * b_ModulNbr)); *pb_ChannelStatus = (unsigned char) (((dw_Status >> b_InputChannel) & 1) ^ 1); } /* if ((b_InputChannel >= 0) && (b_InputChannel <= 2)) */ else { /***************************************/ /* The selected digital input is wrong */ /***************************************/ DPRINTK("The selected digital input is wrong\n"); i_ReturnValue = -4; } /* if ((b_InputChannel >= 0) && (b_InputChannel <= 2)) */ break; case APCI1710_CHRONO_READ_PORT: pb_PortValue = (unsigned char *) &data[0]; dw_Status = inl(devpriv->s_BoardInfos. ui_Address + 12 + (64 * b_ModulNbr)); *pb_PortValue = (unsigned char) ((dw_Status & 0x7) ^ 7); break; } } else { /*******************************/ /* Chronometer not initialised */ /*******************************/ DPRINTK("Chronometer not initialised\n"); i_ReturnValue = -5; } } else { /******************************************/ /* The module is not a Chronometer module */ /******************************************/ DPRINTK("The module is not a Chronometer module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; }
gpl-2.0
Steven-Cai/pi-kernel
arch/mn10300/mm/misalignment.c
8674
30021
/* MN10300 Misalignment fixup handler * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/atomic.h> #include <asm/smp.h> #include <asm/pgalloc.h> #include <asm/cpu-regs.h> #include <asm/busctl-regs.h> #include <asm/fpu.h> #include <asm/gdb-stub.h> #include <asm/asm-offsets.h> #if 0 #define kdebug(FMT, ...) printk(KERN_DEBUG "MISALIGN: "FMT"\n", ##__VA_ARGS__) #else #define kdebug(FMT, ...) do {} while (0) #endif static int misalignment_addr(unsigned long *registers, unsigned long sp, unsigned params, unsigned opcode, unsigned long disp, void **_address, unsigned long **_postinc, unsigned long *_inc); static int misalignment_reg(unsigned long *registers, unsigned params, unsigned opcode, unsigned long disp, unsigned long **_register); static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode); static const unsigned Dreg_index[] = { REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2 }; static const unsigned Areg_index[] = { REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2 }; static const unsigned Rreg_index[] = { REG_E0 >> 2, REG_E1 >> 2, REG_E2 >> 2, REG_E3 >> 2, REG_E4 >> 2, REG_E5 >> 2, REG_E6 >> 2, REG_E7 >> 2, REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2, REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2 }; enum format_id { FMT_S0, FMT_S1, FMT_S2, FMT_S4, FMT_D0, FMT_D1, FMT_D2, FMT_D4, FMT_D6, FMT_D7, FMT_D8, FMT_D9, FMT_D10, }; static const struct { u_int8_t opsz, dispsz; } format_tbl[16] = { [FMT_S0] = { 8, 0 }, [FMT_S1] = { 8, 8 }, [FMT_S2] = { 8, 16 }, [FMT_S4] = { 8, 32 }, [FMT_D0] = { 16, 0 }, [FMT_D1] = { 16, 8 }, [FMT_D2] = { 16, 16 }, [FMT_D4] = { 16, 32 }, [FMT_D6] = { 24, 0 }, [FMT_D7] = { 24, 8 }, [FMT_D8] = { 24, 24 }, [FMT_D9] = { 24, 32 }, [FMT_D10] = { 32, 0 }, }; enum value_id { DM0, /* data reg in opcode in bits 0-1 */ DM1, /* data reg in opcode in bits 2-3 */ DM2, /* data reg in opcode in bits 4-5 */ AM0, /* addr reg in opcode in bits 0-1 */ AM1, /* addr reg in opcode in bits 2-3 */ AM2, /* addr reg in opcode in bits 4-5 */ RM0, /* reg in opcode in bits 0-3 */ RM1, /* reg in opcode in bits 2-5 */ RM2, /* reg in opcode in bits 4-7 */ RM4, /* reg in opcode in bits 8-11 */ RM6, /* reg in opcode in bits 12-15 */ RD0, /* reg in displacement in bits 0-3 */ RD2, /* reg in displacement in bits 4-7 */ SP, /* stack pointer */ SD8, /* 8-bit signed displacement */ SD16, /* 16-bit signed displacement */ SD24, /* 24-bit signed displacement */ SIMM4_2, /* 4-bit signed displacement in opcode bits 4-7 */ SIMM8, /* 8-bit signed immediate */ IMM8, /* 8-bit unsigned immediate */ IMM16, /* 16-bit unsigned immediate */ IMM24, /* 24-bit unsigned immediate */ IMM32, /* 32-bit unsigned immediate */ IMM32_HIGH8, /* 32-bit unsigned immediate, LSB in opcode */ IMM32_MEM, /* 32-bit unsigned displacement */ IMM32_HIGH8_MEM, /* 32-bit unsigned displacement, LSB in opcode */ DN0 = DM0, DN1 = DM1, DN2 = DM2, AN0 = AM0, AN1 = AM1, AN2 = AM2, RN0 = RM0, RN1 = RM1, RN2 = RM2, RN4 = RM4, RN6 = RM6, DI = DM1, RI = RM2, }; struct mn10300_opcode { const char name[8]; u_int32_t opcode; u_int32_t opmask; unsigned exclusion; enum format_id format; unsigned cpu_mask; #define AM33 330 unsigned params[2]; #define MEM(ADDR) (0x80000000 | (ADDR)) #define MEM2(ADDR1, ADDR2) (0x80000000 | (ADDR1) << 8 | (ADDR2)) #define MEMINC(ADDR) (0x81000000 | (ADDR)) #define MEMINC2(ADDR, INC) (0x81000000 | (ADDR) << 8 | (INC)) }; /* LIBOPCODES EXCERPT Assemble Matsushita MN10300 instructions. Copyright 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public Licence as published by the Free Software Foundation; either version 2 of the Licence, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more details. You should have received a copy of the GNU General Public Licence along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static const struct mn10300_opcode mn10300_opcodes[] = { { "mov", 0x4200, 0xf300, 0, FMT_S1, 0, {DM1, MEM2(IMM8, SP)}}, { "mov", 0x4300, 0xf300, 0, FMT_S1, 0, {AM1, MEM2(IMM8, SP)}}, { "mov", 0x5800, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), DN0}}, { "mov", 0x5c00, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), AN0}}, { "mov", 0x60, 0xf0, 0, FMT_S0, 0, {DM1, MEM(AN0)}}, { "mov", 0x70, 0xf0, 0, FMT_S0, 0, {MEM(AM0), DN1}}, { "mov", 0xf000, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), AN1}}, { "mov", 0xf010, 0xfff0, 0, FMT_D0, 0, {AM1, MEM(AN0)}}, { "mov", 0xf300, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}}, { "mov", 0xf340, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}}, { "mov", 0xf380, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), AN2}}, { "mov", 0xf3c0, 0xffc0, 0, FMT_D0, 0, {AM2, MEM2(DI, AN0)}}, { "mov", 0xf80000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}}, { "mov", 0xf81000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}}, { "mov", 0xf82000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8,AM0), AN1}}, { "mov", 0xf83000, 0xfff000, 0, FMT_D1, 0, {AM1, MEM2(SD8, AN0)}}, { "mov", 0xf90a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}}, { "mov", 0xf91a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}}, { "mov", 0xf96a00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}}, { "mov", 0xf97a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}}, { "mov", 0xfa000000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}}, { "mov", 0xfa100000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}}, { "mov", 0xfa200000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), AN1}}, { "mov", 0xfa300000, 0xfff00000, 0, FMT_D2, 0, {AM1, MEM2(SD16, AN0)}}, { "mov", 0xfa900000, 0xfff30000, 0, FMT_D2, 0, {AM1, MEM2(IMM16, SP)}}, { "mov", 0xfa910000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}}, { "mov", 0xfab00000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), AN0}}, { "mov", 0xfab40000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}}, { "mov", 0xfb0a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}}, { "mov", 0xfb1a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}}, { "mov", 0xfb6a0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}}, { "mov", 0xfb7a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}}, { "mov", 0xfb8a0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}}, { "mov", 0xfb8e0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}}, { "mov", 0xfb9a0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}}, { "mov", 0xfb9e0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}}, { "mov", 0xfc000000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}}, { "mov", 0xfc100000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}}, { "mov", 0xfc200000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), AN1}}, { "mov", 0xfc300000, 0xfff00000, 0, FMT_D4, 0, {AM1, MEM2(IMM32,AN0)}}, { "mov", 0xfc800000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM(IMM32_MEM)}}, { "mov", 0xfc810000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}}, { "mov", 0xfc900000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM2(IMM32, SP)}}, { "mov", 0xfc910000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}}, { "mov", 0xfca00000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), AN0}}, { "mov", 0xfca40000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}}, { "mov", 0xfcb00000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), AN0}}, { "mov", 0xfcb40000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}}, { "mov", 0xfd0a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}}, { "mov", 0xfd1a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}}, { "mov", 0xfd6a0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}}, { "mov", 0xfd7a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}}, { "mov", 0xfd8a0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}}, { "mov", 0xfd9a0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}}, { "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, { "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, { "mov", 0xfe0e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}}, { "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, { "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, { "mov", 0xfe1e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}}, { "mov", 0xfe6a0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}}, { "mov", 0xfe7a0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}}, { "mov", 0xfe8a0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}}, { "mov", 0xfe9a0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}}, { "movhu", 0xf060, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), DN1}}, { "movhu", 0xf070, 0xfff0, 0, FMT_D0, 0, {DM1, MEM(AN0)}}, { "movhu", 0xf480, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}}, { "movhu", 0xf4c0, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}}, { "movhu", 0xf86000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}}, { "movhu", 0xf87000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}}, { "movhu", 0xf89300, 0xfff300, 0, FMT_D1, 0, {DM1, MEM2(IMM8, SP)}}, { "movhu", 0xf8bc00, 0xfffc00, 0, FMT_D1, 0, {MEM2(IMM8, SP), DN0}}, { "movhu", 0xf94a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}}, { "movhu", 0xf95a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}}, { "movhu", 0xf9ea00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}}, { "movhu", 0xf9fa00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}}, { "movhu", 0xfa600000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}}, { "movhu", 0xfa700000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}}, { "movhu", 0xfa930000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}}, { "movhu", 0xfabc0000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}}, { "movhu", 0xfb4a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}}, { "movhu", 0xfb5a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}}, { "movhu", 0xfbca0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}}, { "movhu", 0xfbce0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}}, { "movhu", 0xfbda0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}}, { "movhu", 0xfbde0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}}, { "movhu", 0xfbea0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}}, { "movhu", 0xfbfa0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}}, { "movhu", 0xfc600000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}}, { "movhu", 0xfc700000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}}, { "movhu", 0xfc830000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}}, { "movhu", 0xfc930000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}}, { "movhu", 0xfcac0000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}}, { "movhu", 0xfcbc0000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}}, { "movhu", 0xfd4a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}}, { "movhu", 0xfd5a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}}, { "movhu", 0xfdca0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}}, { "movhu", 0xfdda0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}}, { "movhu", 0xfdea0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}}, { "movhu", 0xfdfa0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}}, { "movhu", 0xfe4a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, { "movhu", 0xfe4e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}}, { "movhu", 0xfe5a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, { "movhu", 0xfe5e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}}, { "movhu", 0xfeca0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}}, { "movhu", 0xfeda0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}}, { "movhu", 0xfeea0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}}, { "movhu", 0xfefa0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}}, { "mov_llt", 0xf7e00000, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lgt", 0xf7e00001, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lge", 0xf7e00002, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lle", 0xf7e00003, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lcs", 0xf7e00004, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lhi", 0xf7e00005, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lcc", 0xf7e00006, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lls", 0xf7e00007, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_leq", 0xf7e00008, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lne", 0xf7e00009, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lra", 0xf7e0000a, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "", 0, 0, 0, 0, 0, {0}}, }; /* * fix up misalignment problems where possible */ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) { const struct exception_table_entry *fixup; const struct mn10300_opcode *pop; unsigned long *registers = (unsigned long *) regs; unsigned long data, *store, *postinc, disp, inc, sp; mm_segment_t seg; siginfo_t info; uint32_t opcode, noc, xo, xm; uint8_t *pc, byte, datasz; void *address; unsigned tmp, npop, dispsz, loop; /* we don't fix up userspace misalignment faults */ if (user_mode(regs)) goto bus_error; sp = (unsigned long) regs + sizeof(*regs); kdebug("==>misalignment({pc=%lx,sp=%lx})", regs->pc, sp); if (regs->epsw & EPSW_IE) asm volatile("or %0,epsw" : : "i"(EPSW_IE)); seg = get_fs(); set_fs(KERNEL_DS); fixup = search_exception_tables(regs->pc); /* first thing to do is to match the opcode */ pc = (u_int8_t *) regs->pc; if (__get_user(byte, pc) != 0) goto fetch_error; opcode = byte; noc = 8; for (pop = mn10300_opcodes; pop->name[0]; pop++) { npop = ilog2(pop->opcode | pop->opmask); if (npop <= 0 || npop > 31) continue; npop = (npop + 8) & ~7; got_more_bits: if (npop == noc) { if ((opcode & pop->opmask) == pop->opcode) goto found_opcode; } else if (npop > noc) { xo = pop->opcode >> (npop - noc); xm = pop->opmask >> (npop - noc); if ((opcode & xm) != xo) continue; /* we've got a partial match (an exact match on the * first N bytes), so we need to get some more data */ pc++; if (__get_user(byte, pc) != 0) goto fetch_error; opcode = opcode << 8 | byte; noc += 8; goto got_more_bits; } else { /* there's already been a partial match as long as the * complete match we're now considering, so this one * should't match */ continue; } } /* didn't manage to find a fixup */ printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x\n", regs->pc, opcode); failed: set_fs(seg); if (die_if_no_fixup("misalignment error", regs, code)) return; bus_error: info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void *) regs->pc; force_sig_info(SIGBUS, &info, current); return; /* error reading opcodes */ fetch_error: printk(KERN_CRIT "MISALIGN: %p: fault whilst reading instruction data\n", pc); goto failed; bad_addr_mode: printk(KERN_CRIT "MISALIGN: %lx: unsupported addressing mode %x\n", regs->pc, opcode); goto failed; bad_reg_mode: printk(KERN_CRIT "MISALIGN: %lx: unsupported register mode %x\n", regs->pc, opcode); goto failed; unsupported_instruction: printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x (%s)\n", regs->pc, opcode, pop->name); goto failed; transfer_failed: set_fs(seg); if (fixup) { regs->pc = fixup->fixup; return; } if (die_if_no_fixup("misalignment fixup", regs, code)) return; info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = 0; info.si_addr = (void *) regs->pc; force_sig_info(SIGSEGV, &info, current); return; /* we matched the opcode */ found_opcode: kdebug("%lx: %x==%x { %x, %x }", regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]); tmp = format_tbl[pop->format].opsz; BUG_ON(tmp > noc); /* match was less complete than it ought to have been */ if (tmp < noc) { tmp = noc - tmp; opcode >>= tmp; pc -= tmp >> 3; } /* grab the extra displacement (note it's LSB first) */ disp = 0; dispsz = format_tbl[pop->format].dispsz; for (loop = 0; loop < dispsz; loop += 8) { pc++; if (__get_user(byte, pc) != 0) goto fetch_error; disp |= byte << loop; kdebug("{%p} disp[%02x]=%02x", pc, loop, byte); } kdebug("disp=%lx", disp); set_fs(KERNEL_XDS); if (fixup) set_fs(seg); tmp = (pop->params[0] ^ pop->params[1]) & 0x80000000; if (!tmp) { printk(KERN_CRIT "MISALIGN: %lx: insn not move to/from memory %x\n", regs->pc, opcode); goto failed; } /* determine the data transfer size of the move */ if (pop->name[3] == 0 || /* "mov" */ pop->name[4] == 'l') /* mov_lcc */ inc = datasz = 4; else if (pop->name[3] == 'h') /* movhu */ inc = datasz = 2; else goto unsupported_instruction; if (pop->params[0] & 0x80000000) { /* move memory to register */ if (!misalignment_addr(registers, sp, pop->params[0], opcode, disp, &address, &postinc, &inc)) goto bad_addr_mode; if (!misalignment_reg(registers, pop->params[1], opcode, disp, &store)) goto bad_reg_mode; kdebug("mov%u (%p),DARn", datasz, address); if (copy_from_user(&data, (void *) address, datasz) != 0) goto transfer_failed; if (pop->params[0] & 0x1000000) { kdebug("inc=%lx", inc); *postinc += inc; } *store = data; kdebug("loaded %lx", data); } else { /* move register to memory */ if (!misalignment_reg(registers, pop->params[0], opcode, disp, &store)) goto bad_reg_mode; if (!misalignment_addr(registers, sp, pop->params[1], opcode, disp, &address, &postinc, &inc)) goto bad_addr_mode; data = *store; kdebug("mov%u %lx,(%p)", datasz, data, address); if (copy_to_user((void *) address, &data, datasz) != 0) goto transfer_failed; if (pop->params[1] & 0x1000000) *postinc += inc; } tmp = format_tbl[pop->format].opsz + format_tbl[pop->format].dispsz; regs->pc += tmp >> 3; /* handle MOV_Lcc, which are currently the only FMT_D10 insns that * access memory */ if (pop->format == FMT_D10) misalignment_MOV_Lcc(regs, opcode); set_fs(seg); } /* * determine the address that was being accessed */ static int misalignment_addr(unsigned long *registers, unsigned long sp, unsigned params, unsigned opcode, unsigned long disp, void **_address, unsigned long **_postinc, unsigned long *_inc) { unsigned long *postinc = NULL, address = 0, tmp; if (!(params & 0x1000000)) { kdebug("noinc"); *_inc = 0; _inc = NULL; } params &= 0x00ffffff; do { switch (params & 0xff) { case DM0: postinc = &registers[Dreg_index[opcode & 0x03]]; address += *postinc; break; case DM1: postinc = &registers[Dreg_index[opcode >> 2 & 0x03]]; address += *postinc; break; case DM2: postinc = &registers[Dreg_index[opcode >> 4 & 0x03]]; address += *postinc; break; case AM0: postinc = &registers[Areg_index[opcode & 0x03]]; address += *postinc; break; case AM1: postinc = &registers[Areg_index[opcode >> 2 & 0x03]]; address += *postinc; break; case AM2: postinc = &registers[Areg_index[opcode >> 4 & 0x03]]; address += *postinc; break; case RM0: postinc = &registers[Rreg_index[opcode & 0x0f]]; address += *postinc; break; case RM1: postinc = &registers[Rreg_index[opcode >> 2 & 0x0f]]; address += *postinc; break; case RM2: postinc = &registers[Rreg_index[opcode >> 4 & 0x0f]]; address += *postinc; break; case RM4: postinc = &registers[Rreg_index[opcode >> 8 & 0x0f]]; address += *postinc; break; case RM6: postinc = &registers[Rreg_index[opcode >> 12 & 0x0f]]; address += *postinc; break; case RD0: postinc = &registers[Rreg_index[disp & 0x0f]]; address += *postinc; break; case RD2: postinc = &registers[Rreg_index[disp >> 4 & 0x0f]]; address += *postinc; break; case SP: address += sp; break; /* displacements are either to be added to the address * before use, or, in the case of post-inc addressing, * to be added into the base register after use */ case SD8: case SIMM8: disp = (long) (int8_t) (disp & 0xff); goto displace_or_inc; case SD16: disp = (long) (int16_t) (disp & 0xffff); goto displace_or_inc; case SD24: tmp = disp << 8; asm("asr 8,%0" : "=r"(tmp) : "0"(tmp) : "cc"); disp = (long) tmp; goto displace_or_inc; case SIMM4_2: tmp = opcode >> 4 & 0x0f; tmp <<= 28; asm("asr 28,%0" : "=r"(tmp) : "0"(tmp) : "cc"); disp = (long) tmp; goto displace_or_inc; case IMM8: disp &= 0x000000ff; goto displace_or_inc; case IMM16: disp &= 0x0000ffff; goto displace_or_inc; case IMM24: disp &= 0x00ffffff; goto displace_or_inc; case IMM32: case IMM32_MEM: case IMM32_HIGH8: case IMM32_HIGH8_MEM: displace_or_inc: kdebug("%s %lx", _inc ? "incr" : "disp", disp); if (!_inc) address += disp; else *_inc = disp; break; default: BUG(); return 0; } } while ((params >>= 8)); *_address = (void *) address; *_postinc = postinc; return 1; } /* * determine the register that is acting as source/dest */ static int misalignment_reg(unsigned long *registers, unsigned params, unsigned opcode, unsigned long disp, unsigned long **_register) { params &= 0x7fffffff; if (params & 0xffffff00) return 0; switch (params & 0xff) { case DM0: *_register = &registers[Dreg_index[opcode & 0x03]]; break; case DM1: *_register = &registers[Dreg_index[opcode >> 2 & 0x03]]; break; case DM2: *_register = &registers[Dreg_index[opcode >> 4 & 0x03]]; break; case AM0: *_register = &registers[Areg_index[opcode & 0x03]]; break; case AM1: *_register = &registers[Areg_index[opcode >> 2 & 0x03]]; break; case AM2: *_register = &registers[Areg_index[opcode >> 4 & 0x03]]; break; case RM0: *_register = &registers[Rreg_index[opcode & 0x0f]]; break; case RM1: *_register = &registers[Rreg_index[opcode >> 2 & 0x0f]]; break; case RM2: *_register = &registers[Rreg_index[opcode >> 4 & 0x0f]]; break; case RM4: *_register = &registers[Rreg_index[opcode >> 8 & 0x0f]]; break; case RM6: *_register = &registers[Rreg_index[opcode >> 12 & 0x0f]]; break; case RD0: *_register = &registers[Rreg_index[disp & 0x0f]]; break; case RD2: *_register = &registers[Rreg_index[disp >> 4 & 0x0f]]; break; case SP: *_register = &registers[REG_SP >> 2]; break; default: BUG(); return 0; } return 1; } /* * handle the conditional loop part of the move-and-loop instructions */ static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode) { unsigned long epsw = regs->epsw; unsigned long NxorV; kdebug("MOV_Lcc %x [flags=%lx]", opcode, epsw & 0xf); /* calculate N^V and shift onto the same bit position as Z */ NxorV = ((epsw >> 3) ^ epsw >> 1) & 1; switch (opcode & 0xf) { case 0x0: /* MOV_LLT: N^V */ if (NxorV) goto take_the_loop; return; case 0x1: /* MOV_LGT: ~(Z or (N^V))*/ if (!((epsw & EPSW_FLAG_Z) | NxorV)) goto take_the_loop; return; case 0x2: /* MOV_LGE: ~(N^V) */ if (!NxorV) goto take_the_loop; return; case 0x3: /* MOV_LLE: Z or (N^V) */ if ((epsw & EPSW_FLAG_Z) | NxorV) goto take_the_loop; return; case 0x4: /* MOV_LCS: C */ if (epsw & EPSW_FLAG_C) goto take_the_loop; return; case 0x5: /* MOV_LHI: ~(C or Z) */ if (!(epsw & (EPSW_FLAG_C | EPSW_FLAG_Z))) goto take_the_loop; return; case 0x6: /* MOV_LCC: ~C */ if (!(epsw & EPSW_FLAG_C)) goto take_the_loop; return; case 0x7: /* MOV_LLS: C or Z */ if (epsw & (EPSW_FLAG_C | EPSW_FLAG_Z)) goto take_the_loop; return; case 0x8: /* MOV_LEQ: Z */ if (epsw & EPSW_FLAG_Z) goto take_the_loop; return; case 0x9: /* MOV_LNE: ~Z */ if (!(epsw & EPSW_FLAG_Z)) goto take_the_loop; return; case 0xa: /* MOV_LRA: always */ goto take_the_loop; default: BUG(); } take_the_loop: /* wind the PC back to just after the SETLB insn */ kdebug("loop LAR=%lx", regs->lar); regs->pc = regs->lar - 4; } /* * misalignment handler tests */ #ifdef CONFIG_TEST_MISALIGNMENT_HANDLER static u8 __initdata testbuf[512] __attribute__((aligned(16))) = { [257] = 0x11, [258] = 0x22, [259] = 0x33, [260] = 0x44, }; #define ASSERTCMP(X, OP, Y) \ do { \ if (unlikely(!((X) OP (Y)))) { \ printk(KERN_ERR "\n"); \ printk(KERN_ERR "MISALIGN: Assertion failed at line %u\n", \ __LINE__); \ printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \ (unsigned long)(X), (unsigned long)(Y)); \ BUG(); \ } \ } while(0) static int __init test_misalignment(void) { register void *r asm("e0"); register u32 y asm("e1"); void *p = testbuf, *q; u32 tmp, tmp2, x; printk(KERN_NOTICE "==>test_misalignment() [testbuf=%p]\n", p); p++; printk(KERN_NOTICE "___ MOV (Am),Dn ___\n"); q = p + 256; asm volatile("mov (%0),%1" : "+a"(q), "=d"(x)); ASSERTCMP(q, ==, p + 256); ASSERTCMP(x, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (256,Am),Dn ___\n"); q = p; asm volatile("mov (256,%0),%1" : "+a"(q), "=d"(x)); ASSERTCMP(q, ==, p); ASSERTCMP(x, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (Di,Am),Dn ___\n"); tmp = 256; q = p; asm volatile("mov (%2,%0),%1" : "+a"(q), "=d"(x), "+d"(tmp)); ASSERTCMP(q, ==, p); ASSERTCMP(x, ==, 0x44332211); ASSERTCMP(tmp, ==, 256); printk(KERN_NOTICE "___ MOV (256,Rm),Rn ___\n"); r = p; asm volatile("mov (256,%0),%1" : "+r"(r), "=r"(y)); ASSERTCMP(r, ==, p); ASSERTCMP(y, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (Rm+),Rn ___\n"); r = p + 256; asm volatile("mov (%0+),%1" : "+r"(r), "=r"(y)); ASSERTCMP(r, ==, p + 256 + 4); ASSERTCMP(y, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (Rm+,8),Rn ___\n"); r = p + 256; asm volatile("mov (%0+,8),%1" : "+r"(r), "=r"(y)); ASSERTCMP(r, ==, p + 256 + 8); ASSERTCMP(y, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (7,SP),Rn ___\n"); asm volatile( "add -16,sp \n" "mov +0x11,%0 \n" "movbu %0,(7,sp) \n" "mov +0x22,%0 \n" "movbu %0,(8,sp) \n" "mov +0x33,%0 \n" "movbu %0,(9,sp) \n" "mov +0x44,%0 \n" "movbu %0,(10,sp) \n" "mov (7,sp),%1 \n" "add +16,sp \n" : "+a"(q), "=d"(x)); ASSERTCMP(x, ==, 0x44332211); printk(KERN_NOTICE "___ MOV (259,SP),Rn ___\n"); asm volatile( "add -264,sp \n" "mov +0x11,%0 \n" "movbu %0,(259,sp) \n" "mov +0x22,%0 \n" "movbu %0,(260,sp) \n" "mov +0x33,%0 \n" "movbu %0,(261,sp) \n" "mov +0x55,%0 \n" "movbu %0,(262,sp) \n" "mov (259,sp),%1 \n" "add +264,sp \n" : "+d"(tmp), "=d"(x)); ASSERTCMP(x, ==, 0x55332211); printk(KERN_NOTICE "___ MOV (260,SP),Rn ___\n"); asm volatile( "add -264,sp \n" "mov +0x11,%0 \n" "movbu %0,(260,sp) \n" "mov +0x22,%0 \n" "movbu %0,(261,sp) \n" "mov +0x33,%0 \n" "movbu %0,(262,sp) \n" "mov +0x55,%0 \n" "movbu %0,(263,sp) \n" "mov (260,sp),%1 \n" "add +264,sp \n" : "+d"(tmp), "=d"(x)); ASSERTCMP(x, ==, 0x55332211); printk(KERN_NOTICE "___ MOV_LNE ___\n"); tmp = 1; tmp2 = 2; q = p + 256; asm volatile( "setlb \n" "mov %2,%3 \n" "mov %1,%2 \n" "cmp +0,%1 \n" "mov_lne (%0+,4),%1" : "+r"(q), "+d"(tmp), "+d"(tmp2), "=d"(x) : : "cc"); ASSERTCMP(q, ==, p + 256 + 12); ASSERTCMP(x, ==, 0x44332211); printk(KERN_NOTICE "___ MOV in SETLB ___\n"); tmp = 1; tmp2 = 2; q = p + 256; asm volatile( "setlb \n" "mov %1,%3 \n" "mov (%0+),%1 \n" "cmp +0,%1 \n" "lne " : "+a"(q), "+d"(tmp), "+d"(tmp2), "=d"(x) : : "cc"); ASSERTCMP(q, ==, p + 256 + 8); ASSERTCMP(x, ==, 0x44332211); printk(KERN_NOTICE "<==test_misalignment()\n"); return 0; } arch_initcall(test_misalignment); #endif /* CONFIG_TEST_MISALIGNMENT_HANDLER */
gpl-2.0
CyanogenMod/htc-kernel-pyramid
drivers/video/sgivwfb.c
9186
23539
/* * linux/drivers/video/sgivwfb.c -- SGI DBE frame buffer device * * Copyright (C) 1999 Silicon Graphics, Inc. * Jeffrey Newquist, newquist@engr.sgi.som * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/mtrr.h> #include <asm/visws/sgivw.h> #define INCLUDE_TIMING_TABLE_DATA #define DBE_REG_BASE par->regs #include <video/sgivw.h> struct sgivw_par { struct asregs *regs; u32 cmap_fifo; u_long timing_num; }; #define FLATPANEL_SGI_1600SW 5 /* * RAM we reserve for the frame buffer. This defines the maximum screen * size * * The default can be overridden if the driver is compiled as a module */ static int ypan = 0; static int ywrap = 0; static int flatpanel_id = -1; static struct fb_fix_screeninfo sgivwfb_fix __devinitdata = { .id = "SGI Vis WS FB", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .mmio_start = DBE_REG_PHYS, .mmio_len = DBE_REG_SIZE, .accel = FB_ACCEL_NONE, .line_length = 640, }; static struct fb_var_screeninfo sgivwfb_var __devinitdata = { /* 640x480, 8 bpp */ .xres = 640, .yres = 480, .xres_virtual = 640, .yres_virtual = 480, .bits_per_pixel = 8, .red = { 0, 8, 0 }, .green = { 0, 8, 0 }, .blue = { 0, 8, 0 }, .height = -1, .width = -1, .pixclock = 20000, .left_margin = 64, .right_margin = 64, .upper_margin = 32, .lower_margin = 32, .hsync_len = 64, .vsync_len = 2, .vmode = FB_VMODE_NONINTERLACED }; static struct fb_var_screeninfo sgivwfb_var1600sw __devinitdata = { /* 1600x1024, 8 bpp */ .xres = 1600, .yres = 1024, .xres_virtual = 1600, .yres_virtual = 1024, .bits_per_pixel = 8, .red = { 0, 8, 0 }, .green = { 0, 8, 0 }, .blue = { 0, 8, 0 }, .height = -1, .width = -1, .pixclock = 9353, .left_margin = 20, .right_margin = 30, .upper_margin = 37, .lower_margin = 3, .hsync_len = 20, .vsync_len = 3, .vmode = FB_VMODE_NONINTERLACED }; /* * Interface used by the world */ int sgivwfb_init(void); static int sgivwfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); static int sgivwfb_set_par(struct fb_info *info); static int sgivwfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int sgivwfb_mmap(struct fb_info *info, struct vm_area_struct *vma); static struct fb_ops sgivwfb_ops = { .owner = THIS_MODULE, .fb_check_var = sgivwfb_check_var, .fb_set_par = sgivwfb_set_par, .fb_setcolreg = sgivwfb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_mmap = sgivwfb_mmap, }; /* * Internal routines */ static unsigned long bytes_per_pixel(int bpp) { switch (bpp) { case 8: return 1; case 16: return 2; case 32: return 4; default: printk(KERN_INFO "sgivwfb: unsupported bpp %d\n", bpp); return 0; } } static unsigned long get_line_length(int xres_virtual, int bpp) { return (xres_virtual * bytes_per_pixel(bpp)); } /* * Function: dbe_TurnOffDma * Parameters: (None) * Description: This should turn off the monitor and dbe. This is used * when switching between the serial console and the graphics * console. */ static void dbe_TurnOffDma(struct sgivw_par *par) { unsigned int readVal; int i; // Check to see if things are already turned off: // 1) Check to see if dbe is not using the internal dotclock. // 2) Check to see if the xy counter in dbe is already off. DBE_GETREG(ctrlstat, readVal); if (GET_DBE_FIELD(CTRLSTAT, PCLKSEL, readVal) < 2) return; DBE_GETREG(vt_xy, readVal); if (GET_DBE_FIELD(VT_XY, VT_FREEZE, readVal) == 1) return; // Otherwise, turn off dbe DBE_GETREG(ovr_control, readVal); SET_DBE_FIELD(OVR_CONTROL, OVR_DMA_ENABLE, readVal, 0); DBE_SETREG(ovr_control, readVal); udelay(1000); DBE_GETREG(frm_control, readVal); SET_DBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, readVal, 0); DBE_SETREG(frm_control, readVal); udelay(1000); DBE_GETREG(did_control, readVal); SET_DBE_FIELD(DID_CONTROL, DID_DMA_ENABLE, readVal, 0); DBE_SETREG(did_control, readVal); udelay(1000); // XXX HACK: // // This was necessary for GBE--we had to wait through two // vertical retrace periods before the pixel DMA was // turned off for sure. I've left this in for now, in // case dbe needs it. for (i = 0; i < 10000; i++) { DBE_GETREG(frm_inhwctrl, readVal); if (GET_DBE_FIELD(FRM_INHWCTRL, FRM_DMA_ENABLE, readVal) == 0) udelay(10); else { DBE_GETREG(ovr_inhwctrl, readVal); if (GET_DBE_FIELD (OVR_INHWCTRL, OVR_DMA_ENABLE, readVal) == 0) udelay(10); else { DBE_GETREG(did_inhwctrl, readVal); if (GET_DBE_FIELD (DID_INHWCTRL, DID_DMA_ENABLE, readVal) == 0) udelay(10); else break; } } } } /* * Set the User Defined Part of the Display. Again if par use it to get * real video mode. */ static int sgivwfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct sgivw_par *par = (struct sgivw_par *)info->par; struct dbe_timing_info *timing; u_long line_length; u_long min_mode; int req_dot; int test_mode; /* * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal! * as FB_VMODE_SMOOTH_XPAN is only used internally */ if (var->vmode & FB_VMODE_CONUPDATE) { var->vmode |= FB_VMODE_YWRAP; var->xoffset = info->var.xoffset; var->yoffset = info->var.yoffset; } /* XXX FIXME - forcing var's */ var->xoffset = 0; var->yoffset = 0; /* Limit bpp to 8, 16, and 32 */ if (var->bits_per_pixel <= 8) var->bits_per_pixel = 8; else if (var->bits_per_pixel <= 16) var->bits_per_pixel = 16; else if (var->bits_per_pixel <= 32) var->bits_per_pixel = 32; else return -EINVAL; var->grayscale = 0; /* No grayscale for now */ /* determine valid resolution and timing */ for (min_mode = 0; min_mode < ARRAY_SIZE(dbeVTimings); min_mode++) { if (dbeVTimings[min_mode].width >= var->xres && dbeVTimings[min_mode].height >= var->yres) break; } if (min_mode == ARRAY_SIZE(dbeVTimings)) return -EINVAL; /* Resolution to high */ /* XXX FIXME - should try to pick best refresh rate */ /* for now, pick closest dot-clock within 3MHz */ req_dot = PICOS2KHZ(var->pixclock); printk(KERN_INFO "sgivwfb: requested pixclock=%d ps (%d KHz)\n", var->pixclock, req_dot); test_mode = min_mode; while (dbeVTimings[min_mode].width == dbeVTimings[test_mode].width) { if (dbeVTimings[test_mode].cfreq + 3000 > req_dot) break; test_mode++; } if (dbeVTimings[min_mode].width != dbeVTimings[test_mode].width) test_mode--; min_mode = test_mode; timing = &dbeVTimings[min_mode]; printk(KERN_INFO "sgivwfb: granted dot-clock=%d KHz\n", timing->cfreq); /* Adjust virtual resolution, if necessary */ if (var->xres > var->xres_virtual || (!ywrap && !ypan)) var->xres_virtual = var->xres; if (var->yres > var->yres_virtual || (!ywrap && !ypan)) var->yres_virtual = var->yres; /* * Memory limit */ line_length = get_line_length(var->xres_virtual, var->bits_per_pixel); if (line_length * var->yres_virtual > sgivwfb_mem_size) return -ENOMEM; /* Virtual resolution to high */ info->fix.line_length = line_length; switch (var->bits_per_pixel) { case 8: var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case 16: /* RGBA 5551 */ var->red.offset = 11; var->red.length = 5; var->green.offset = 6; var->green.length = 5; var->blue.offset = 1; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case 32: /* RGB 8888 */ var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; } var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.msb_right = 0; /* set video timing information */ var->pixclock = KHZ2PICOS(timing->cfreq); var->left_margin = timing->htotal - timing->hsync_end; var->right_margin = timing->hsync_start - timing->width; var->upper_margin = timing->vtotal - timing->vsync_end; var->lower_margin = timing->vsync_start - timing->height; var->hsync_len = timing->hsync_end - timing->hsync_start; var->vsync_len = timing->vsync_end - timing->vsync_start; /* Ouch. This breaks the rules but timing_num is only important if you * change a video mode */ par->timing_num = min_mode; printk(KERN_INFO "sgivwfb: new video mode xres=%d yres=%d bpp=%d\n", var->xres, var->yres, var->bits_per_pixel); printk(KERN_INFO " vxres=%d vyres=%d\n", var->xres_virtual, var->yres_virtual); return 0; } /* * Setup flatpanel related registers. */ static void sgivwfb_setup_flatpanel(struct sgivw_par *par, struct dbe_timing_info *currentTiming) { int fp_wid, fp_hgt, fp_vbs, fp_vbe; u32 outputVal = 0; SET_DBE_FIELD(VT_FLAGS, HDRV_INVERT, outputVal, (currentTiming->flags & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1); SET_DBE_FIELD(VT_FLAGS, VDRV_INVERT, outputVal, (currentTiming->flags & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1); DBE_SETREG(vt_flags, outputVal); /* Turn on the flat panel */ switch (flatpanel_id) { case FLATPANEL_SGI_1600SW: fp_wid = 1600; fp_hgt = 1024; fp_vbs = 0; fp_vbe = 1600; currentTiming->pll_m = 4; currentTiming->pll_n = 1; currentTiming->pll_p = 0; break; default: fp_wid = fp_hgt = fp_vbs = fp_vbe = 0xfff; } outputVal = 0; SET_DBE_FIELD(FP_DE, FP_DE_ON, outputVal, fp_vbs); SET_DBE_FIELD(FP_DE, FP_DE_OFF, outputVal, fp_vbe); DBE_SETREG(fp_de, outputVal); outputVal = 0; SET_DBE_FIELD(FP_HDRV, FP_HDRV_OFF, outputVal, fp_wid); DBE_SETREG(fp_hdrv, outputVal); outputVal = 0; SET_DBE_FIELD(FP_VDRV, FP_VDRV_ON, outputVal, 1); SET_DBE_FIELD(FP_VDRV, FP_VDRV_OFF, outputVal, fp_hgt + 1); DBE_SETREG(fp_vdrv, outputVal); } /* * Set the hardware according to 'par'. */ static int sgivwfb_set_par(struct fb_info *info) { struct sgivw_par *par = info->par; int i, j, htmp, temp; u32 readVal, outputVal; int wholeTilesX, maxPixelsPerTileX; int frmWrite1, frmWrite2, frmWrite3b; struct dbe_timing_info *currentTiming; /* Current Video Timing */ int xpmax, ypmax; // Monitor resolution int bytesPerPixel; // Bytes per pixel currentTiming = &dbeVTimings[par->timing_num]; bytesPerPixel = bytes_per_pixel(info->var.bits_per_pixel); xpmax = currentTiming->width; ypmax = currentTiming->height; /* dbe_InitGraphicsBase(); */ /* Turn on dotclock PLL */ DBE_SETREG(ctrlstat, 0x20000000); dbe_TurnOffDma(par); /* dbe_CalculateScreenParams(); */ maxPixelsPerTileX = 512 / bytesPerPixel; wholeTilesX = xpmax / maxPixelsPerTileX; if (wholeTilesX * maxPixelsPerTileX < xpmax) wholeTilesX++; printk(KERN_DEBUG "sgivwfb: pixPerTile=%d wholeTilesX=%d\n", maxPixelsPerTileX, wholeTilesX); /* dbe_InitGammaMap(); */ udelay(10); for (i = 0; i < 256; i++) { DBE_ISETREG(gmap, i, (i << 24) | (i << 16) | (i << 8)); } /* dbe_TurnOn(); */ DBE_GETREG(vt_xy, readVal); if (GET_DBE_FIELD(VT_XY, VT_FREEZE, readVal) == 1) { DBE_SETREG(vt_xy, 0x00000000); udelay(1); } else dbe_TurnOffDma(par); /* dbe_Initdbe(); */ for (i = 0; i < 256; i++) { for (j = 0; j < 100; j++) { DBE_GETREG(cm_fifo, readVal); if (readVal != 0x00000000) break; else udelay(10); } // DBE_ISETREG(cmap, i, 0x00000000); DBE_ISETREG(cmap, i, (i << 8) | (i << 16) | (i << 24)); } /* dbe_InitFramebuffer(); */ frmWrite1 = 0; SET_DBE_FIELD(FRM_SIZE_TILE, FRM_WIDTH_TILE, frmWrite1, wholeTilesX); SET_DBE_FIELD(FRM_SIZE_TILE, FRM_RHS, frmWrite1, 0); switch (bytesPerPixel) { case 1: SET_DBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, frmWrite1, DBE_FRM_DEPTH_8); break; case 2: SET_DBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, frmWrite1, DBE_FRM_DEPTH_16); break; case 4: SET_DBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, frmWrite1, DBE_FRM_DEPTH_32); break; } frmWrite2 = 0; SET_DBE_FIELD(FRM_SIZE_PIXEL, FB_HEIGHT_PIX, frmWrite2, ypmax); // Tell dbe about the framebuffer location and type // XXX What format is the FRM_TILE_PTR?? 64K aligned address? frmWrite3b = 0; SET_DBE_FIELD(FRM_CONTROL, FRM_TILE_PTR, frmWrite3b, sgivwfb_mem_phys >> 9); SET_DBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, frmWrite3b, 1); SET_DBE_FIELD(FRM_CONTROL, FRM_LINEAR, frmWrite3b, 1); /* Initialize DIDs */ outputVal = 0; switch (bytesPerPixel) { case 1: SET_DBE_FIELD(WID, TYP, outputVal, DBE_CMODE_I8); break; case 2: SET_DBE_FIELD(WID, TYP, outputVal, DBE_CMODE_RGBA5); break; case 4: SET_DBE_FIELD(WID, TYP, outputVal, DBE_CMODE_RGB8); break; } SET_DBE_FIELD(WID, BUF, outputVal, DBE_BMODE_BOTH); for (i = 0; i < 32; i++) { DBE_ISETREG(mode_regs, i, outputVal); } /* dbe_InitTiming(); */ DBE_SETREG(vt_intr01, 0xffffffff); DBE_SETREG(vt_intr23, 0xffffffff); DBE_GETREG(dotclock, readVal); DBE_SETREG(dotclock, readVal & 0xffff); DBE_SETREG(vt_xymax, 0x00000000); outputVal = 0; SET_DBE_FIELD(VT_VSYNC, VT_VSYNC_ON, outputVal, currentTiming->vsync_start); SET_DBE_FIELD(VT_VSYNC, VT_VSYNC_OFF, outputVal, currentTiming->vsync_end); DBE_SETREG(vt_vsync, outputVal); outputVal = 0; SET_DBE_FIELD(VT_HSYNC, VT_HSYNC_ON, outputVal, currentTiming->hsync_start); SET_DBE_FIELD(VT_HSYNC, VT_HSYNC_OFF, outputVal, currentTiming->hsync_end); DBE_SETREG(vt_hsync, outputVal); outputVal = 0; SET_DBE_FIELD(VT_VBLANK, VT_VBLANK_ON, outputVal, currentTiming->vblank_start); SET_DBE_FIELD(VT_VBLANK, VT_VBLANK_OFF, outputVal, currentTiming->vblank_end); DBE_SETREG(vt_vblank, outputVal); outputVal = 0; SET_DBE_FIELD(VT_HBLANK, VT_HBLANK_ON, outputVal, currentTiming->hblank_start); SET_DBE_FIELD(VT_HBLANK, VT_HBLANK_OFF, outputVal, currentTiming->hblank_end - 3); DBE_SETREG(vt_hblank, outputVal); outputVal = 0; SET_DBE_FIELD(VT_VCMAP, VT_VCMAP_ON, outputVal, currentTiming->vblank_start); SET_DBE_FIELD(VT_VCMAP, VT_VCMAP_OFF, outputVal, currentTiming->vblank_end); DBE_SETREG(vt_vcmap, outputVal); outputVal = 0; SET_DBE_FIELD(VT_HCMAP, VT_HCMAP_ON, outputVal, currentTiming->hblank_start); SET_DBE_FIELD(VT_HCMAP, VT_HCMAP_OFF, outputVal, currentTiming->hblank_end - 3); DBE_SETREG(vt_hcmap, outputVal); if (flatpanel_id != -1) sgivwfb_setup_flatpanel(par, currentTiming); outputVal = 0; temp = currentTiming->vblank_start - currentTiming->vblank_end - 1; if (temp > 0) temp = -temp; SET_DBE_FIELD(DID_START_XY, DID_STARTY, outputVal, (u32) temp); if (currentTiming->hblank_end >= 20) SET_DBE_FIELD(DID_START_XY, DID_STARTX, outputVal, currentTiming->hblank_end - 20); else SET_DBE_FIELD(DID_START_XY, DID_STARTX, outputVal, currentTiming->htotal - (20 - currentTiming-> hblank_end)); DBE_SETREG(did_start_xy, outputVal); outputVal = 0; SET_DBE_FIELD(CRS_START_XY, CRS_STARTY, outputVal, (u32) (temp + 1)); if (currentTiming->hblank_end >= DBE_CRS_MAGIC) SET_DBE_FIELD(CRS_START_XY, CRS_STARTX, outputVal, currentTiming->hblank_end - DBE_CRS_MAGIC); else SET_DBE_FIELD(CRS_START_XY, CRS_STARTX, outputVal, currentTiming->htotal - (DBE_CRS_MAGIC - currentTiming-> hblank_end)); DBE_SETREG(crs_start_xy, outputVal); outputVal = 0; SET_DBE_FIELD(VC_START_XY, VC_STARTY, outputVal, (u32) temp); SET_DBE_FIELD(VC_START_XY, VC_STARTX, outputVal, currentTiming->hblank_end - 4); DBE_SETREG(vc_start_xy, outputVal); DBE_SETREG(frm_size_tile, frmWrite1); DBE_SETREG(frm_size_pixel, frmWrite2); outputVal = 0; SET_DBE_FIELD(DOTCLK, M, outputVal, currentTiming->pll_m - 1); SET_DBE_FIELD(DOTCLK, N, outputVal, currentTiming->pll_n - 1); SET_DBE_FIELD(DOTCLK, P, outputVal, currentTiming->pll_p); SET_DBE_FIELD(DOTCLK, RUN, outputVal, 1); DBE_SETREG(dotclock, outputVal); udelay(11 * 1000); DBE_SETREG(vt_vpixen, 0xffffff); DBE_SETREG(vt_hpixen, 0xffffff); outputVal = 0; SET_DBE_FIELD(VT_XYMAX, VT_MAXX, outputVal, currentTiming->htotal); SET_DBE_FIELD(VT_XYMAX, VT_MAXY, outputVal, currentTiming->vtotal); DBE_SETREG(vt_xymax, outputVal); outputVal = frmWrite1; SET_DBE_FIELD(FRM_SIZE_TILE, FRM_FIFO_RESET, outputVal, 1); DBE_SETREG(frm_size_tile, outputVal); DBE_SETREG(frm_size_tile, frmWrite1); outputVal = 0; SET_DBE_FIELD(OVR_WIDTH_TILE, OVR_FIFO_RESET, outputVal, 1); DBE_SETREG(ovr_width_tile, outputVal); DBE_SETREG(ovr_width_tile, 0); DBE_SETREG(frm_control, frmWrite3b); DBE_SETREG(did_control, 0); // Wait for dbe to take frame settings for (i = 0; i < 100000; i++) { DBE_GETREG(frm_inhwctrl, readVal); if (GET_DBE_FIELD(FRM_INHWCTRL, FRM_DMA_ENABLE, readVal) != 0) break; else udelay(1); } if (i == 100000) printk(KERN_INFO "sgivwfb: timeout waiting for frame DMA enable.\n"); outputVal = 0; htmp = currentTiming->hblank_end - 19; if (htmp < 0) htmp += currentTiming->htotal; /* allow blank to wrap around */ SET_DBE_FIELD(VT_HPIXEN, VT_HPIXEN_ON, outputVal, htmp); SET_DBE_FIELD(VT_HPIXEN, VT_HPIXEN_OFF, outputVal, ((htmp + currentTiming->width - 2) % currentTiming->htotal)); DBE_SETREG(vt_hpixen, outputVal); outputVal = 0; SET_DBE_FIELD(VT_VPIXEN, VT_VPIXEN_OFF, outputVal, currentTiming->vblank_start); SET_DBE_FIELD(VT_VPIXEN, VT_VPIXEN_ON, outputVal, currentTiming->vblank_end); DBE_SETREG(vt_vpixen, outputVal); // Turn off mouse cursor par->regs->crs_ctl = 0; // XXX What's this section for?? DBE_GETREG(ctrlstat, readVal); readVal &= 0x02000000; if (readVal != 0) { DBE_SETREG(ctrlstat, 0x30000000); } return 0; } /* * Set a single color register. The values supplied are already * rounded down to the hardware's capabilities (according to the * entries in the var structure). Return != 0 for invalid regno. */ static int sgivwfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct sgivw_par *par = (struct sgivw_par *) info->par; if (regno > 255) return 1; red >>= 8; green >>= 8; blue >>= 8; /* wait for the color map FIFO to have a free entry */ while (par->cmap_fifo == 0) par->cmap_fifo = par->regs->cm_fifo; par->regs->cmap[regno] = (red << 24) | (green << 16) | (blue << 8); par->cmap_fifo--; /* assume FIFO is filling up */ return 0; } static int sgivwfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned long size = vma->vm_end - vma->vm_start; unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; if (offset + size > sgivwfb_mem_size) return -EINVAL; offset += sgivwfb_mem_phys; pgprot_val(vma->vm_page_prot) = pgprot_val(vma->vm_page_prot) | _PAGE_PCD; vma->vm_flags |= VM_IO; if (remap_pfn_range(vma, vma->vm_start, offset >> PAGE_SHIFT, size, vma->vm_page_prot)) return -EAGAIN; printk(KERN_DEBUG "sgivwfb: mmap framebuffer P(%lx)->V(%lx)\n", offset, vma->vm_start); return 0; } int __init sgivwfb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!strncmp(this_opt, "monitor:", 8)) { if (!strncmp(this_opt + 8, "crt", 3)) flatpanel_id = -1; else if (!strncmp(this_opt + 8, "1600sw", 6)) flatpanel_id = FLATPANEL_SGI_1600SW; } } return 0; } /* * Initialisation */ static int __devinit sgivwfb_probe(struct platform_device *dev) { struct sgivw_par *par; struct fb_info *info; char *monitor; info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 16, &dev->dev); if (!info) return -ENOMEM; par = info->par; if (!request_mem_region(DBE_REG_PHYS, DBE_REG_SIZE, "sgivwfb")) { printk(KERN_ERR "sgivwfb: couldn't reserve mmio region\n"); framebuffer_release(info); return -EBUSY; } par->regs = (struct asregs *) ioremap_nocache(DBE_REG_PHYS, DBE_REG_SIZE); if (!par->regs) { printk(KERN_ERR "sgivwfb: couldn't ioremap registers\n"); goto fail_ioremap_regs; } mtrr_add(sgivwfb_mem_phys, sgivwfb_mem_size, MTRR_TYPE_WRCOMB, 1); sgivwfb_fix.smem_start = sgivwfb_mem_phys; sgivwfb_fix.smem_len = sgivwfb_mem_size; sgivwfb_fix.ywrapstep = ywrap; sgivwfb_fix.ypanstep = ypan; info->fix = sgivwfb_fix; switch (flatpanel_id) { case FLATPANEL_SGI_1600SW: info->var = sgivwfb_var1600sw; monitor = "SGI 1600SW flatpanel"; break; default: info->var = sgivwfb_var; monitor = "CRT"; } printk(KERN_INFO "sgivwfb: %s monitor selected\n", monitor); info->fbops = &sgivwfb_ops; info->pseudo_palette = (void *) (par + 1); info->flags = FBINFO_DEFAULT; info->screen_base = ioremap_nocache((unsigned long) sgivwfb_mem_phys, sgivwfb_mem_size); if (!info->screen_base) { printk(KERN_ERR "sgivwfb: couldn't ioremap screen_base\n"); goto fail_ioremap_fbmem; } if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) goto fail_color_map; if (register_framebuffer(info) < 0) { printk(KERN_ERR "sgivwfb: couldn't register framebuffer\n"); goto fail_register_framebuffer; } platform_set_drvdata(dev, info); printk(KERN_INFO "fb%d: SGI DBE frame buffer device, using %ldK of video memory at %#lx\n", info->node, sgivwfb_mem_size >> 10, sgivwfb_mem_phys); return 0; fail_register_framebuffer: fb_dealloc_cmap(&info->cmap); fail_color_map: iounmap((char *) info->screen_base); fail_ioremap_fbmem: iounmap(par->regs); fail_ioremap_regs: release_mem_region(DBE_REG_PHYS, DBE_REG_SIZE); framebuffer_release(info); return -ENXIO; } static int __devexit sgivwfb_remove(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); if (info) { struct sgivw_par *par = info->par; unregister_framebuffer(info); dbe_TurnOffDma(par); iounmap(par->regs); iounmap(info->screen_base); release_mem_region(DBE_REG_PHYS, DBE_REG_SIZE); fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } return 0; } static struct platform_driver sgivwfb_driver = { .probe = sgivwfb_probe, .remove = __devexit_p(sgivwfb_remove), .driver = { .name = "sgivwfb", }, }; static struct platform_device *sgivwfb_device; int __init sgivwfb_init(void) { int ret; #ifndef MODULE char *option = NULL; if (fb_get_options("sgivwfb", &option)) return -ENODEV; sgivwfb_setup(option); #endif ret = platform_driver_register(&sgivwfb_driver); if (!ret) { sgivwfb_device = platform_device_alloc("sgivwfb", 0); if (sgivwfb_device) { ret = platform_device_add(sgivwfb_device); } else ret = -ENOMEM; if (ret) { platform_driver_unregister(&sgivwfb_driver); platform_device_put(sgivwfb_device); } } return ret; } module_init(sgivwfb_init); #ifdef MODULE MODULE_LICENSE("GPL"); static void __exit sgivwfb_exit(void) { platform_device_unregister(sgivwfb_device); platform_driver_unregister(&sgivwfb_driver); } module_exit(sgivwfb_exit); #endif /* MODULE */
gpl-2.0
showp1984/bricked-ville-3.4
drivers/net/wireless/libertas/tx.c
9954
5517
/* * This file contains the handling of TX in wlan driver. */ #include <linux/hardirq.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/sched.h> #include <linux/export.h> #include <net/cfg80211.h> #include "host.h" #include "radiotap.h" #include "decl.h" #include "defs.h" #include "dev.h" #include "mesh.h" /** * convert_radiotap_rate_to_mv - converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE * units (500 Kb/s) into Marvell WLAN format (see Table 8 in Section 3.2.1) * * @rate: Input rate * returns: Output Rate (0 if invalid) */ static u32 convert_radiotap_rate_to_mv(u8 rate) { switch (rate) { case 2: /* 1 Mbps */ return 0 | (1 << 4); case 4: /* 2 Mbps */ return 1 | (1 << 4); case 11: /* 5.5 Mbps */ return 2 | (1 << 4); case 22: /* 11 Mbps */ return 3 | (1 << 4); case 12: /* 6 Mbps */ return 4 | (1 << 4); case 18: /* 9 Mbps */ return 5 | (1 << 4); case 24: /* 12 Mbps */ return 6 | (1 << 4); case 36: /* 18 Mbps */ return 7 | (1 << 4); case 48: /* 24 Mbps */ return 8 | (1 << 4); case 72: /* 36 Mbps */ return 9 | (1 << 4); case 96: /* 48 Mbps */ return 10 | (1 << 4); case 108: /* 54 Mbps */ return 11 | (1 << 4); } return 0; } /** * lbs_hard_start_xmit - checks the conditions and sends packet to IF * layer if everything is ok * * @skb: A pointer to skb which includes TX packet * @dev: A pointer to the &struct net_device * returns: 0 or -1 */ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; struct lbs_private *priv = dev->ml_priv; struct txpd *txpd; char *p802x_hdr; uint16_t pkt_len; netdev_tx_t ret = NETDEV_TX_OK; lbs_deb_enter(LBS_DEB_TX); /* We need to protect against the queues being restarted before we get round to stopping them */ spin_lock_irqsave(&priv->driver_lock, flags); if (priv->surpriseremoved) goto free; if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) { lbs_deb_tx("tx err: skb length %d 0 or > %zd\n", skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE); /* We'll never manage to send this one; drop it and return 'OK' */ dev->stats.tx_dropped++; dev->stats.tx_errors++; goto free; } netif_stop_queue(priv->dev); if (priv->mesh_dev) netif_stop_queue(priv->mesh_dev); if (priv->tx_pending_len) { /* This can happen if packets come in on the mesh and eth device simultaneously -- there's no mutual exclusion on hard_start_xmit() calls between devices. */ lbs_deb_tx("Packet on %s while busy\n", dev->name); ret = NETDEV_TX_BUSY; goto unlock; } priv->tx_pending_len = -1; spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100)); txpd = (void *)priv->tx_pending_buf; memset(txpd, 0, sizeof(struct txpd)); p802x_hdr = skb->data; pkt_len = skb->len; if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data; /* set txpd fields from the radiotap header */ txpd->tx_control = cpu_to_le32(convert_radiotap_rate_to_mv(rtap_hdr->rate)); /* skip the radiotap header */ p802x_hdr += sizeof(*rtap_hdr); pkt_len -= sizeof(*rtap_hdr); /* copy destination address from 802.11 header */ memcpy(txpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN); } else { /* copy destination address from 802.3 header */ memcpy(txpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN); } txpd->tx_packet_length = cpu_to_le16(pkt_len); txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); lbs_mesh_set_txpd(priv, dev, txpd); lbs_deb_hex(LBS_DEB_TX, "txpd", (u8 *) &txpd, sizeof(struct txpd)); lbs_deb_hex(LBS_DEB_TX, "Tx Data", (u8 *) p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); memcpy(&txpd[1], p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); spin_lock_irqsave(&priv->driver_lock, flags); priv->tx_pending_len = pkt_len + sizeof(struct txpd); lbs_deb_tx("%s lined up packet\n", __func__); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { /* Keep the skb to echo it back once Tx feedback is received from FW */ skb_orphan(skb); /* Keep the skb around for when we get feedback */ priv->currenttxskb = skb; } else { free: dev_kfree_skb_any(skb); } unlock: spin_unlock_irqrestore(&priv->driver_lock, flags); wake_up(&priv->waitq); lbs_deb_leave_args(LBS_DEB_TX, "ret %d", ret); return ret; } /** * lbs_send_tx_feedback - sends to the host the last transmitted packet, * filling the radiotap headers with transmission information. * * @priv: A pointer to &struct lbs_private structure * @try_count: A 32-bit value containing transmission retry status. * * returns: void */ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count) { struct tx_radiotap_hdr *radiotap_hdr; if (priv->wdev->iftype != NL80211_IFTYPE_MONITOR || priv->currenttxskb == NULL) return; radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data; radiotap_hdr->data_retries = try_count ? (1 + priv->txretrycount - try_count) : 0; priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb, priv->dev); netif_rx(priv->currenttxskb); priv->currenttxskb = NULL; if (priv->connect_status == LBS_CONNECTED) netif_wake_queue(priv->dev); if (priv->mesh_dev && netif_running(priv->mesh_dev)) netif_wake_queue(priv->mesh_dev); } EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
gpl-2.0
sivu/linux
arch/x86/ia32/ia32_signal.c
227
10348
/* * linux/arch/x86_64/ia32/ia32_signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/personality.h> #include <linux/compat.h> #include <linux/binfmts.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/fpu/internal.h> #include <asm/fpu/signal.h> #include <asm/ptrace.h> #include <asm/ia32_unistd.h> #include <asm/user32.h> #include <uapi/asm/sigcontext.h> #include <asm/proto.h> #include <asm/vdso.h> #include <asm/sigframe.h> #include <asm/sighandling.h> #include <asm/sys_ia32.h> #include <asm/smap.h> /* * Do a signal return; undo the signal stack. */ #define loadsegment_gs(v) load_gs_index(v) #define loadsegment_fs(v) loadsegment(fs, v) #define loadsegment_ds(v) loadsegment(ds, v) #define loadsegment_es(v) loadsegment(es, v) #define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; }) #define set_user_seg(seg, v) loadsegment_##seg(v) #define COPY(x) { \ get_user_ex(regs->x, &sc->x); \ } #define GET_SEG(seg) ({ \ unsigned short tmp; \ get_user_ex(tmp, &sc->seg); \ tmp; \ }) #define COPY_SEG_CPL3(seg) do { \ regs->seg = GET_SEG(seg) | 3; \ } while (0) #define RELOAD_SEG(seg) { \ unsigned int pre = GET_SEG(seg); \ unsigned int cur = get_user_seg(seg); \ pre |= 3; \ if (pre != cur) \ set_user_seg(seg, pre); \ } static int ia32_restore_sigcontext(struct pt_regs *regs, struct sigcontext_32 __user *sc) { unsigned int tmpflags, err = 0; void __user *buf; u32 tmp; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; get_user_try { /* * Reload fs and gs if they have changed in the signal * handler. This does not handle long fs/gs base changes in * the handler, but does not clobber them at least in the * normal case. */ RELOAD_SEG(gs); RELOAD_SEG(fs); RELOAD_SEG(ds); RELOAD_SEG(es); COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); COPY(ax); /* Don't touch extended registers */ COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); /* disable syscall checks */ regs->orig_ax = -1; get_user_ex(tmp, &sc->fpstate); buf = compat_ptr(tmp); } get_user_catch(err); err |= fpu__restore_sig(buf, 1); force_iret(); return err; } asmlinkage long sys32_sigreturn(void) { struct pt_regs *regs = current_pt_regs(); struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); sigset_t set; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) || (_COMPAT_NSIG_WORDS > 1 && __copy_from_user((((char *) &set.sig) + 4), &frame->extramask, sizeof(frame->extramask)))) goto badframe; set_current_blocked(&set); if (ia32_restore_sigcontext(regs, &frame->sc)) goto badframe; return regs->ax; badframe: signal_fault(regs, frame, "32bit sigreturn"); return 0; } asmlinkage long sys32_rt_sigreturn(void) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe_ia32 __user *frame; sigset_t set; frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; return regs->ax; badframe: signal_fault(regs, frame, "32bit rt sigreturn"); return 0; } /* * Set up a signal frame. */ static int ia32_setup_sigcontext(struct sigcontext_32 __user *sc, void __user *fpstate, struct pt_regs *regs, unsigned int mask) { int err = 0; put_user_try { put_user_ex(get_user_seg(gs), (unsigned int __user *)&sc->gs); put_user_ex(get_user_seg(fs), (unsigned int __user *)&sc->fs); put_user_ex(get_user_seg(ds), (unsigned int __user *)&sc->ds); put_user_ex(get_user_seg(es), (unsigned int __user *)&sc->es); put_user_ex(regs->di, &sc->di); put_user_ex(regs->si, &sc->si); put_user_ex(regs->bp, &sc->bp); put_user_ex(regs->sp, &sc->sp); put_user_ex(regs->bx, &sc->bx); put_user_ex(regs->dx, &sc->dx); put_user_ex(regs->cx, &sc->cx); put_user_ex(regs->ax, &sc->ax); put_user_ex(current->thread.trap_nr, &sc->trapno); put_user_ex(current->thread.error_code, &sc->err); put_user_ex(regs->ip, &sc->ip); put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); put_user_ex(regs->flags, &sc->flags); put_user_ex(regs->sp, &sc->sp_at_signal); put_user_ex(regs->ss, (unsigned int __user *)&sc->ss); put_user_ex(ptr_to_compat(fpstate), &sc->fpstate); /* non-iBCS2 extensions.. */ put_user_ex(mask, &sc->oldmask); put_user_ex(current->thread.cr2, &sc->cr2); } put_user_catch(err); return err; } /* * Determine which stack to use.. */ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size, void __user **fpstate) { struct fpu *fpu = &current->thread.fpu; unsigned long sp; /* Default to using normal stack */ sp = regs->sp; /* This is the X/Open sanctioned signal stack switching. */ if (ksig->ka.sa.sa_flags & SA_ONSTACK) sp = sigsp(sp, ksig); /* This is the legacy signal stack switching. */ else if ((regs->ss & 0xffff) != __USER32_DS && !(ksig->ka.sa.sa_flags & SA_RESTORER) && ksig->ka.sa.sa_restorer) sp = (unsigned long) ksig->ka.sa.sa_restorer; if (fpu->fpstate_active) { unsigned long fx_aligned, math_size; sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size); *fpstate = (struct _fpstate_32 __user *) sp; if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned, math_size) < 0) return (void __user *) -1L; } sp -= frame_size; /* Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ sp = ((sp + 4) & -16ul) - 4; return (void __user *) sp; } int ia32_setup_frame(int sig, struct ksignal *ksig, compat_sigset_t *set, struct pt_regs *regs) { struct sigframe_ia32 __user *frame; void __user *restorer; int err = 0; void __user *fpstate = NULL; /* copy_to_user optimizes that into a single 8 byte store */ static const struct { u16 poplmovl; u32 val; u16 int80; } __attribute__((packed)) code = { 0xb858, /* popl %eax ; movl $...,%eax */ __NR_ia32_sigreturn, 0x80cd, /* int $0x80 */ }; frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; if (__put_user(sig, &frame->sig)) return -EFAULT; if (ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0])) return -EFAULT; if (_COMPAT_NSIG_WORDS > 1) { if (__copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask))) return -EFAULT; } if (ksig->ka.sa.sa_flags & SA_RESTORER) { restorer = ksig->ka.sa.sa_restorer; } else { /* Return stub is in 32bit vsyscall page */ if (current->mm->context.vdso) restorer = current->mm->context.vdso + vdso_image_32.sym___kernel_sigreturn; else restorer = &frame->retcode; } put_user_try { put_user_ex(ptr_to_compat(restorer), &frame->pretcode); /* * These are actually not used anymore, but left because some * gdb versions depend on them as a marker. */ put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); } put_user_catch(err); if (err) return -EFAULT; /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ksig->ka.sa.sa_handler; /* Make -mregparm=3 work */ regs->ax = sig; regs->dx = 0; regs->cx = 0; loadsegment(ds, __USER32_DS); loadsegment(es, __USER32_DS); regs->cs = __USER32_CS; regs->ss = __USER32_DS; return 0; } int ia32_setup_rt_frame(int sig, struct ksignal *ksig, compat_sigset_t *set, struct pt_regs *regs) { struct rt_sigframe_ia32 __user *frame; void __user *restorer; int err = 0; void __user *fpstate = NULL; /* __copy_to_user optimizes that into a single 8 byte store */ static const struct { u8 movl; u32 val; u16 int80; u8 pad; } __attribute__((packed)) code = { 0xb8, __NR_ia32_rt_sigreturn, 0x80cd, 0, }; frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; put_user_try { put_user_ex(sig, &frame->sig); put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo); put_user_ex(ptr_to_compat(&frame->uc), &frame->puc); /* Create the ucontext. */ if (cpu_has_xsave) put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); else put_user_ex(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp); if (ksig->ka.sa.sa_flags & SA_RESTORER) restorer = ksig->ka.sa.sa_restorer; else restorer = current->mm->context.vdso + vdso_image_32.sym___kernel_rt_sigreturn; put_user_ex(ptr_to_compat(restorer), &frame->pretcode); /* * Not actually used anymore, but left because some gdb * versions need it. */ put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); } put_user_catch(err); err |= copy_siginfo_to_user32(&frame->info, &ksig->info); err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) return -EFAULT; /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ksig->ka.sa.sa_handler; /* Make -mregparm=3 work */ regs->ax = sig; regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; loadsegment(ds, __USER32_DS); loadsegment(es, __USER32_DS); regs->cs = __USER32_CS; regs->ss = __USER32_DS; return 0; }
gpl-2.0
CyanogenMod/semc-kernel-msm7x30
arch/arm/mach-netx/xc.c
739
5913
/* * arch/arm/mach-netx/xc.c * * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/mutex.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/netx-regs.h> #include <mach/xc.h> static DEFINE_MUTEX(xc_lock); static int xc_in_use = 0; struct fw_desc { unsigned int ofs; unsigned int size; unsigned int patch_ofs; unsigned int patch_entries; }; struct fw_header { unsigned int magic; unsigned int type; unsigned int version; unsigned int reserved[5]; struct fw_desc fw_desc[3]; } __attribute__ ((packed)); int xc_stop(struct xc *x) { writel(RPU_HOLD_PC, x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS); writel(TPU_HOLD_PC, x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS); writel(XPU_HOLD_PC, x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS); return 0; } int xc_start(struct xc *x) { writel(0, x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS); writel(0, x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS); writel(0, x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS); return 0; } int xc_running(struct xc *x) { return (readl(x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS) & RPU_HOLD_PC) || (readl(x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS) & TPU_HOLD_PC) || (readl(x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS) & XPU_HOLD_PC) ? 0 : 1; } int xc_reset(struct xc *x) { writel(0, x->xpec_base + NETX_XPEC_PC_OFS); return 0; } static int xc_check_ptr(struct xc *x, unsigned long adr, unsigned int size) { if (adr >= NETX_PA_XMAC(x->no) && adr + size < NETX_PA_XMAC(x->no) + XMAC_MEM_SIZE) return 0; if (adr >= NETX_PA_XPEC(x->no) && adr + size < NETX_PA_XPEC(x->no) + XPEC_MEM_SIZE) return 0; dev_err(x->dev, "Illegal pointer in firmware found. aborting\n"); return -1; } static int xc_patch(struct xc *x, const void *patch, int count) { unsigned int val, adr; const unsigned int *data = patch; int i; for (i = 0; i < count; i++) { adr = *data++; val = *data++; if (xc_check_ptr(x, adr, 4) < 0) return -EINVAL; writel(val, (void __iomem *)io_p2v(adr)); } return 0; } int xc_request_firmware(struct xc *x) { int ret; char name[16]; const struct firmware *fw; struct fw_header *head; unsigned int size; int i; const void *src; unsigned long dst; sprintf(name, "xc%d.bin", x->no); ret = request_firmware(&fw, name, x->dev); if (ret < 0) { dev_err(x->dev, "request_firmware failed\n"); return ret; } head = (struct fw_header *)fw->data; if (head->magic != 0x4e657458) { if (head->magic == 0x5874654e) { dev_err(x->dev, "firmware magic is 'XteN'. Endianess problems?\n"); ret = -ENODEV; goto exit_release_firmware; } dev_err(x->dev, "unrecognized firmware magic 0x%08x\n", head->magic); ret = -ENODEV; goto exit_release_firmware; } x->type = head->type; x->version = head->version; ret = -EINVAL; for (i = 0; i < 3; i++) { src = fw->data + head->fw_desc[i].ofs; dst = *(unsigned int *)src; src += sizeof (unsigned int); size = head->fw_desc[i].size - sizeof (unsigned int); if (xc_check_ptr(x, dst, size)) goto exit_release_firmware; memcpy((void *)io_p2v(dst), src, size); src = fw->data + head->fw_desc[i].patch_ofs; size = head->fw_desc[i].patch_entries; ret = xc_patch(x, src, size); if (ret < 0) goto exit_release_firmware; } ret = 0; exit_release_firmware: release_firmware(fw); return ret; } struct xc *request_xc(int xcno, struct device *dev) { struct xc *x = NULL; mutex_lock(&xc_lock); if (xcno > 3) goto exit; if (xc_in_use & (1 << xcno)) goto exit; x = kmalloc(sizeof (struct xc), GFP_KERNEL); if (!x) goto exit; if (!request_mem_region (NETX_PA_XPEC(xcno), XPEC_MEM_SIZE, kobject_name(&dev->kobj))) goto exit_free; if (!request_mem_region (NETX_PA_XMAC(xcno), XMAC_MEM_SIZE, kobject_name(&dev->kobj))) goto exit_release_1; if (!request_mem_region (SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE, kobject_name(&dev->kobj))) goto exit_release_2; x->xpec_base = (void * __iomem)io_p2v(NETX_PA_XPEC(xcno)); x->xmac_base = (void * __iomem)io_p2v(NETX_PA_XMAC(xcno)); x->sram_base = ioremap(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE); if (!x->sram_base) goto exit_release_3; x->irq = NETX_IRQ_XPEC(xcno); x->no = xcno; x->dev = dev; xc_in_use |= (1 << xcno); goto exit; exit_release_3: release_mem_region(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE); exit_release_2: release_mem_region(NETX_PA_XMAC(xcno), XMAC_MEM_SIZE); exit_release_1: release_mem_region(NETX_PA_XPEC(xcno), XPEC_MEM_SIZE); exit_free: kfree(x); x = NULL; exit: mutex_unlock(&xc_lock); return x; } void free_xc(struct xc *x) { int xcno = x->no; mutex_lock(&xc_lock); iounmap(x->sram_base); release_mem_region(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE); release_mem_region(NETX_PA_XMAC(xcno), XMAC_MEM_SIZE); release_mem_region(NETX_PA_XPEC(xcno), XPEC_MEM_SIZE); xc_in_use &= ~(1 << x->no); kfree(x); mutex_unlock(&xc_lock); } EXPORT_SYMBOL(free_xc); EXPORT_SYMBOL(request_xc); EXPORT_SYMBOL(xc_request_firmware); EXPORT_SYMBOL(xc_reset); EXPORT_SYMBOL(xc_running); EXPORT_SYMBOL(xc_start); EXPORT_SYMBOL(xc_stop);
gpl-2.0
TheTypoMaster/asuswrt
release/src/router/openssl/openssl/crypto/objects/obj_err.c
739
3881
/* crypto/objects/obj_err.c */ /* ==================================================================== * Copyright (c) 1999-2006 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ /* NOTE: this file was auto generated by the mkerr.pl script: any changes * made to it will be overwritten when the script next updates this file, * only reason strings will be preserved. */ #include <stdio.h> #include <openssl/err.h> #include <openssl/objects.h> /* BEGIN ERROR CODES */ #ifndef OPENSSL_NO_ERR #define ERR_FUNC(func) ERR_PACK(ERR_LIB_OBJ,func,0) #define ERR_REASON(reason) ERR_PACK(ERR_LIB_OBJ,0,reason) static ERR_STRING_DATA OBJ_str_functs[]= { {ERR_FUNC(OBJ_F_OBJ_ADD_OBJECT), "OBJ_add_object"}, {ERR_FUNC(OBJ_F_OBJ_CREATE), "OBJ_create"}, {ERR_FUNC(OBJ_F_OBJ_DUP), "OBJ_dup"}, {ERR_FUNC(OBJ_F_OBJ_NAME_NEW_INDEX), "OBJ_NAME_new_index"}, {ERR_FUNC(OBJ_F_OBJ_NID2LN), "OBJ_nid2ln"}, {ERR_FUNC(OBJ_F_OBJ_NID2OBJ), "OBJ_nid2obj"}, {ERR_FUNC(OBJ_F_OBJ_NID2SN), "OBJ_nid2sn"}, {0,NULL} }; static ERR_STRING_DATA OBJ_str_reasons[]= { {ERR_REASON(OBJ_R_MALLOC_FAILURE) ,"malloc failure"}, {ERR_REASON(OBJ_R_UNKNOWN_NID) ,"unknown nid"}, {0,NULL} }; #endif void ERR_load_OBJ_strings(void) { #ifndef OPENSSL_NO_ERR if (ERR_func_error_string(OBJ_str_functs[0].error) == NULL) { ERR_load_strings(0,OBJ_str_functs); ERR_load_strings(0,OBJ_str_reasons); } #endif }
gpl-2.0
linino/linux
net/netfilter/nfnetlink_queue_ct.c
1763
2516
/* * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_queue.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nfnetlink_queue.h> struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo) { struct nfq_ct_hook *nfq_ct; struct nf_conn *ct; /* rcu_read_lock()ed by __nf_queue already. */ nfq_ct = rcu_dereference(nfq_ct_hook); if (nfq_ct == NULL) return NULL; ct = nf_ct_get(entskb, ctinfo); if (ct) { if (!nf_ct_is_untracked(ct)) *size += nfq_ct->build_size(ct); else ct = NULL; } return ct; } struct nf_conn * nfqnl_ct_parse(const struct sk_buff *skb, const struct nlattr *attr, enum ip_conntrack_info *ctinfo) { struct nfq_ct_hook *nfq_ct; struct nf_conn *ct; /* rcu_read_lock()ed by __nf_queue already. */ nfq_ct = rcu_dereference(nfq_ct_hook); if (nfq_ct == NULL) return NULL; ct = nf_ct_get(skb, ctinfo); if (ct && !nf_ct_is_untracked(ct)) nfq_ct->parse(attr, ct); return ct; } int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { struct nfq_ct_hook *nfq_ct; struct nlattr *nest_parms; u_int32_t tmp; nfq_ct = rcu_dereference(nfq_ct_hook); if (nfq_ct == NULL) return 0; nest_parms = nla_nest_start(skb, NFQA_CT | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (nfq_ct->build(skb, ct) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); tmp = ctinfo; if (nla_put_be32(skb, NFQA_CT_INFO, htonl(tmp))) goto nla_put_failure; return 0; nla_put_failure: return -1; } void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, int diff) { struct nfq_ct_hook *nfq_ct; nfq_ct = rcu_dereference(nfq_ct_hook); if (nfq_ct == NULL) return; if ((ct->status & IPS_NAT_MASK) && diff) nfq_ct->seq_adjust(skb, ct, ctinfo, diff); } int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr, u32 portid, u32 report) { struct nfq_ct_hook *nfq_ct; if (nf_ct_is_untracked(ct)) return 0; nfq_ct = rcu_dereference(nfq_ct_hook); if (nfq_ct == NULL) return -EOPNOTSUPP; return nfq_ct->attach_expect(attr, ct, portid, report); }
gpl-2.0
chasmodo/android_kernel_oneplus_msm8974
mm/madvise.c
1763
12171
/* * linux/mm/madvise.c * * Copyright (C) 1999 Linus Torvalds * Copyright (C) 2002 Christoph Hellwig */ #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/mempolicy.h> #include <linux/page-isolation.h> #include <linux/hugetlb.h> #include <linux/sched.h> #include <linux/ksm.h> #include <linux/file.h> /* * Any behaviour which results in changes to the vma->vm_flags needs to * take mmap_sem for writing. Others, which simply traverse vmas, need * to only take it for reading. */ static int madvise_need_mmap_write(int behavior) { switch (behavior) { case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: return 0; default: /* be safe, default to 1. list exceptions explicitly */ return 1; } } /* * We can potentially split a vm area into separate * areas, each area with its own behavior. */ static long madvise_behavior(struct vm_area_struct * vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) { struct mm_struct * mm = vma->vm_mm; int error = 0; pgoff_t pgoff; unsigned long new_flags = vma->vm_flags; switch (behavior) { case MADV_NORMAL: new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; break; case MADV_SEQUENTIAL: new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; break; case MADV_RANDOM: new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; break; case MADV_DONTFORK: new_flags |= VM_DONTCOPY; break; case MADV_DOFORK: if (vma->vm_flags & VM_IO) { error = -EINVAL; goto out; } new_flags &= ~VM_DONTCOPY; break; case MADV_DONTDUMP: new_flags |= VM_NODUMP; break; case MADV_DODUMP: new_flags &= ~VM_NODUMP; break; case MADV_MERGEABLE: case MADV_UNMERGEABLE: error = ksm_madvise(vma, start, end, behavior, &new_flags); if (error) goto out; break; case MADV_HUGEPAGE: case MADV_NOHUGEPAGE: error = hugepage_madvise(vma, &new_flags, behavior); if (error) goto out; break; } if (new_flags == vma->vm_flags) { *prev = vma; goto out; } pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma_get_anon_name(vma)); if (*prev) { vma = *prev; goto success; } *prev = vma; if (start != vma->vm_start) { error = split_vma(mm, vma, start, 1); if (error) goto out; } if (end != vma->vm_end) { error = split_vma(mm, vma, end, 0); if (error) goto out; } success: /* * vm_flags is protected by the mmap_sem held in write mode. */ vma->vm_flags = new_flags; out: if (error == -ENOMEM) error = -EAGAIN; return error; } /* * Schedule all required I/O operations. Do not wait for completion. */ static long madvise_willneed(struct vm_area_struct * vma, struct vm_area_struct ** prev, unsigned long start, unsigned long end) { struct file *file = vma->vm_file; if (!file) return -EBADF; if (file->f_mapping->a_ops->get_xip_mem) { /* no bad return value, but ignore advice */ return 0; } *prev = vma; start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; if (end > vma->vm_end) end = vma->vm_end; end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; force_page_cache_readahead(file->f_mapping, file, start, end - start); return 0; } /* * Application no longer needs these pages. If the pages are dirty, * it's OK to just throw them away. The app will be more careful about * data it wants to keep. Be sure to free swap resources too. The * zap_page_range call sets things up for shrink_active_list to actually free * these pages later if no one else has touched them in the meantime, * although we could add these pages to a global reuse list for * shrink_active_list to pick up before reclaiming other pages. * * NB: This interface discards data rather than pushes it out to swap, * as some implementations do. This has performance implications for * applications like large transactional databases which want to discard * pages in anonymous maps after committing to backing store the data * that was kept in them. There is no reason to write this data out to * the swap area if the application is discarding it. * * An interface that causes the system to free clean pages and flush * dirty pages is already available as msync(MS_INVALIDATE). */ static long madvise_dontneed(struct vm_area_struct * vma, struct vm_area_struct ** prev, unsigned long start, unsigned long end) { *prev = vma; if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) return -EINVAL; if (unlikely(vma->vm_flags & VM_NONLINEAR)) { struct zap_details details = { .nonlinear_vma = vma, .last_index = ULONG_MAX, }; zap_page_range(vma, start, end - start, &details); } else zap_page_range(vma, start, end - start, NULL); return 0; } /* * Application wants to free up the pages and associated backing store. * This is effectively punching a hole into the middle of a file. * * NOTE: Currently, only shmfs/tmpfs is supported for this operation. * Other filesystems return -ENOSYS. */ static long madvise_remove(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) { struct address_space *mapping; loff_t offset, endoff; int error; struct file *f; *prev = NULL; /* tell sys_madvise we drop mmap_sem */ if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) return -EINVAL; f = vma->vm_file; if (!f || !f->f_mapping || !f->f_mapping->host) { return -EINVAL; } if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) return -EACCES; mapping = vma->vm_file->f_mapping; offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); endoff = (loff_t)(end - vma->vm_start - 1) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); /* * vmtruncate_range may need to take i_mutex. We need to * explicitly grab a reference because the vma (and hence the * vma's reference to the file) can go away as soon as we drop * mmap_sem. */ get_file(f); up_read(&current->mm->mmap_sem); error = vmtruncate_range(mapping->host, offset, endoff); fput(f); down_read(&current->mm->mmap_sem); return error; } #ifdef CONFIG_MEMORY_FAILURE /* * Error injection support for memory error handling. */ static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) { int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; for (; start < end; start += PAGE_SIZE) { struct page *p; int ret = get_user_pages_fast(start, 1, 0, &p); if (ret != 1) return ret; if (bhv == MADV_SOFT_OFFLINE) { printk(KERN_INFO "Soft offlining page %lx at %lx\n", page_to_pfn(p), start); ret = soft_offline_page(p, MF_COUNT_INCREASED); if (ret) break; continue; } printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n", page_to_pfn(p), start); /* Ignore return value for now */ memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED); } return ret; } #endif static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) { switch (behavior) { case MADV_REMOVE: return madvise_remove(vma, prev, start, end); case MADV_WILLNEED: return madvise_willneed(vma, prev, start, end); case MADV_DONTNEED: return madvise_dontneed(vma, prev, start, end); default: return madvise_behavior(vma, prev, start, end, behavior); } } static int madvise_behavior_valid(int behavior) { switch (behavior) { case MADV_DOFORK: case MADV_DONTFORK: case MADV_NORMAL: case MADV_SEQUENTIAL: case MADV_RANDOM: case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: #ifdef CONFIG_KSM case MADV_MERGEABLE: case MADV_UNMERGEABLE: #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE case MADV_HUGEPAGE: case MADV_NOHUGEPAGE: #endif case MADV_DONTDUMP: case MADV_DODUMP: return 1; default: return 0; } } /* * The madvise(2) system call. * * Applications can use madvise() to advise the kernel how it should * handle paging I/O in this VM area. The idea is to help the kernel * use appropriate read-ahead and caching techniques. The information * provided is advisory only, and can be safely disregarded by the * kernel without affecting the correct operation of the application. * * behavior values: * MADV_NORMAL - the default behavior is to read clusters. This * results in some read-ahead and read-behind. * MADV_RANDOM - the system should read the minimum amount of data * on any access, since it is unlikely that the appli- * cation will need more than what it asks for. * MADV_SEQUENTIAL - pages in the given range will probably be accessed * once, so they can be aggressively read ahead, and * can be freed soon after they are accessed. * MADV_WILLNEED - the application is notifying the system to read * some pages ahead. * MADV_DONTNEED - the application is finished with the given range, * so the kernel can free resources associated with it. * MADV_REMOVE - the application wants to free up the given range of * pages and associated backing store. * MADV_DONTFORK - omit this area from child's address space when forking: * typically, to avoid COWing pages pinned by get_user_pages(). * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. * MADV_MERGEABLE - the application recommends that KSM try to merge pages in * this area with pages of identical content from other such areas. * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. * * return values: * zero - success * -EINVAL - start + len < 0, start is not page-aligned, * "behavior" is not a valid value, or application * is attempting to release locked or shared pages. * -ENOMEM - addresses in the specified range are not currently * mapped, or are outside the AS of the process. * -EIO - an I/O error occurred while paging in data. * -EBADF - map exists, but area maps something that isn't a file. * -EAGAIN - a kernel resource was temporarily unavailable. */ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) { unsigned long end, tmp; struct vm_area_struct * vma, *prev; int unmapped_error = 0; int error = -EINVAL; int write; size_t len; #ifdef CONFIG_MEMORY_FAILURE if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) return madvise_hwpoison(behavior, start, start+len_in); #endif if (!madvise_behavior_valid(behavior)) return error; write = madvise_need_mmap_write(behavior); if (write) down_write(&current->mm->mmap_sem); else down_read(&current->mm->mmap_sem); if (start & ~PAGE_MASK) goto out; len = (len_in + ~PAGE_MASK) & PAGE_MASK; /* Check to see whether len was rounded up from small -ve to zero */ if (len_in && !len) goto out; end = start + len; if (end < start) goto out; error = 0; if (end == start) goto out; /* * If the interval [start,end) covers some unmapped address * ranges, just ignore them, but return -ENOMEM at the end. * - different from the way of handling in mlock etc. */ vma = find_vma_prev(current->mm, start, &prev); if (vma && start > vma->vm_start) prev = vma; for (;;) { /* Still start < end. */ error = -ENOMEM; if (!vma) goto out; /* Here start < (end|vma->vm_end). */ if (start < vma->vm_start) { unmapped_error = -ENOMEM; start = vma->vm_start; if (start >= end) goto out; } /* Here vma->vm_start <= start < (end|vma->vm_end) */ tmp = vma->vm_end; if (end < tmp) tmp = end; /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ error = madvise_vma(vma, &prev, start, tmp, behavior); if (error) goto out; start = tmp; if (prev && start < prev->vm_end) start = prev->vm_end; error = unmapped_error; if (start >= end) goto out; if (prev) vma = prev->vm_next; else /* madvise_remove dropped mmap_sem */ vma = find_vma(current->mm, start); } out: if (write) up_write(&current->mm->mmap_sem); else up_read(&current->mm->mmap_sem); return error; }
gpl-2.0
GalaxyTab4/android_kernel_samsung_s3ve3g
drivers/video/msm/mddi_quickvx.c
2275
22671
/* Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <mach/pmic.h> #include "msm_fb.h" #include "mddihost.h" #include "mddihosti.h" /* WVGA Primary Display */ #define MDDI_QUICKVX_1_2 1 /* MDDI Manufacturer Code */ #define QUICKVX_MDDI_MFR_CODE 0xc583 /* MDDI Product Code */ #define QUICKVX_MDDI_PRD_CODE 0x5800 /* Register Address Maps */ /* MDDI Address Anti-fuse values for bits [31:22] */ #define QUICKVX_ADDR_31_22_AF (0X000 << 22) /* MDDI Address Maps */ /* VEE Block Address Base */ #define QUICKVX_VEE_BASE (QUICKVX_ADDR_31_22_AF | 0x00000000) /* SPI Block Address Base */ #define QUICKVX_SPI_BASE (QUICKVX_ADDR_31_22_AF | 0x00010000) /* Clock and Reset (CAR) Address Base */ #define QUICKVX_CAR_BASE (QUICKVX_ADDR_31_22_AF | 0x00020000) /* Register Control Block (RCB) Address Base */ #define QUICKVX_RCB_BASE (QUICKVX_ADDR_31_22_AF | 0x00030000) /* Cellular RAM Address Base */ #define QUICKVX_CELLRAM_BASE (QUICKVX_ADDR_31_22_AF | 0x00100000) /* FB through A2F Address Base */ #define QUICKVX_FB_A2F_BASE (QUICKVX_ADDR_31_22_AF | 0x00200000) /*************************************************** * Common Registers in Register Control Block (RCB) Registers ***************************************************/ /* CellRAM Configuration RCR Register */ #define QUICKVX_RCB_RCR_REG (QUICKVX_RCB_BASE | 0x00000000) /* Image Effect Register */ #define QUICKVX_RCB_IER_REG (QUICKVX_RCB_BASE | 0x00000004) /* Row Number Register */ #define QUICKVX_RCB_ROWNUM_REG (QUICKVX_RCB_BASE | 0x00000008) /* TCON Timing0 Register */ #define QUICKVX_RCB_TCON0_REG (QUICKVX_RCB_BASE | 0x0000000C) /* TCON Timing1 Register */ #define QUICKVX_RCB_TCON1_REG (QUICKVX_RCB_BASE | 0x00000010) /* TCON Timing2 Register */ #define QUICKVX_RCB_TCON2_REG (QUICKVX_RCB_BASE | 0x00000014) /* PWM Control Register */ #define QUICKVX_RCB_PWMC_REG (QUICKVX_RCB_BASE | 0x00000018) /* PWM Width Register */ #define QUICKVX_RCB_PWMW_REG (QUICKVX_RCB_BASE | 0x0000001C) /* VEE Configuration Register */ #define QUICKVX_RCB_VEECONF_REG (QUICKVX_RCB_BASE | 0x00000020) /* CellRAM Configuration BCR Register */ #define QUICKVX_RCB_CELLBCR_REG (QUICKVX_RCB_BASE | 0x00000024) /* CellRAM Configuration Control Register */ #define QUICKVX_RCB_CELLCC_REG (QUICKVX_RCB_BASE | 0x00000028) /* Use Case Register */ #define QUICKVX_RCB_USECASE_REG (QUICKVX_RCB_BASE | 0x00000100) /* Video Parameter Register */ #define QUICKVX_RCB_VPARM_REG (QUICKVX_RCB_BASE | 0x00000104) /* MDDI Client Wake-up Register */ #define QUICKVX_RCB_MCW_REG (QUICKVX_RCB_BASE | 0x00000108) /* Burst Length Register */ #define QUICKVX_RCB_BURSTLN_REG (QUICKVX_RCB_BASE | 0x0000010C) /* Display Attributes Register */ #define QUICKVX_RCB_DISPATTR_REG (QUICKVX_RCB_BASE | 0x00000110) /* Error Status Register */ #define QUICKVX_RCB_ERRSTAT_REG (QUICKVX_RCB_BASE | 0x00000114) /* Error Mask Register */ #define QUICKVX_RCB_ERRMSK_REG (QUICKVX_RCB_BASE | 0x00000118) /* MDDI ASSP FIFO Overflow Address Register */ #define QUICKVX_RCB_ASSPFOA_REG (QUICKVX_RCB_BASE | 0x0000011C) /* MDDI Fabric FIFO Overflow Address Register */ #define QUICKVX_RCB_FABFOA_REG (QUICKVX_RCB_BASE | 0x00000120) /* Incoming RGB FIFO Overflow Address Register */ #define QUICKVX_RCB_IRFOA_REG (QUICKVX_RCB_BASE | 0x00000124) /* SPI Overflow Address Register */ #define QUICKVX_RCB_SPIOA_REG (QUICKVX_RCB_BASE | 0x00000128) /* Ping Buffer Address Register */ #define QUICKVX_RCB_PINGBA_REG (QUICKVX_RCB_BASE | 0x0000012C) /* Pong Buffer Address Register */ #define QUICKVX_RCB_PONGBA_REG (QUICKVX_RCB_BASE | 0x00000130) /* Configuration Done Register */ #define QUICKVX_RCB_CONFDONE_REG (QUICKVX_RCB_BASE | 0x00000134) /* FIFO Flush Register */ #define QUICKVX_RCB_FFLUSH_REG (QUICKVX_RCB_BASE | 0x00000138) /*************************************************** * SPI Block Registers ***************************************************/ /* SPI Rx0 Register */ #define QUICKVX_SPI_RX0_REG (QUICKVX_SPI_BASE | 0x00000000) /* SPI Rx1 Register */ #define QUICKVX_SPI_RX1_REG (QUICKVX_SPI_BASE | 0x00000004) /* SPI Rx2 Register */ #define QUICKVX_SPI_RX2_REG (QUICKVX_SPI_BASE | 0x00000008) /* SPI Rx3 Register */ #define QUICKVX_SPI_RX3_REG (QUICKVX_SPI_BASE | 0x0000000C) /* SPI Rx4 Register */ #define QUICKVX_SPI_RX4_REG (QUICKVX_SPI_BASE | 0x00000010) /* SPI Rx5 Register */ #define QUICKVX_SPI_RX5_REG (QUICKVX_SPI_BASE | 0x00000014) /* SPI Rx6 Register */ #define QUICKVX_SPI_RX6_REG (QUICKVX_SPI_BASE | 0x00000018) /* SPI Rx7 Register */ #define QUICKVX_SPI_RX7_REG (QUICKVX_SPI_BASE | 0x0000001C) /* SPI Tx0 Register */ #define QUICKVX_SPI_TX0_REG (QUICKVX_SPI_BASE | 0x00000020) /* SPI Tx1 Register */ #define QUICKVX_SPI_TX1_REG (QUICKVX_SPI_BASE | 0x00000024) /* SPI Tx2 Register */ #define QUICKVX_SPI_TX2_REG (QUICKVX_SPI_BASE | 0x00000028) /* SPI Tx3 Register */ #define QUICKVX_SPI_TX3_REG (QUICKVX_SPI_BASE | 0x0000002C) /* SPI Tx4 Register */ #define QUICKVX_SPI_TX4_REG (QUICKVX_SPI_BASE | 0x00000030) /* SPI Tx5 Register */ #define QUICKVX_SPI_TX5_REG (QUICKVX_SPI_BASE | 0x00000034) /* SPI Tx6 Register */ #define QUICKVX_SPI_TX6_REG (QUICKVX_SPI_BASE | 0x00000038) /* SPI Tx7 Register */ #define QUICKVX_SPI_TX7_REG (QUICKVX_SPI_BASE | 0x0000003C) /* SPI Control Register */ #define QUICKVX_SPI_CTRL_REG (QUICKVX_SPI_BASE | 0x00000040) /* SPI Transfer Length Register */ #define QUICKVX_SPI_TLEN_REG (QUICKVX_SPI_BASE | 0x00000044) /*************************************************** * Clock and Reset (CAR) Block Registers ***************************************************/ /* ASSP Global Clock Enable Register */ #define QUICKVX_CAR_ASSP_GCE_REG (QUICKVX_CAR_BASE | 0x00000000) /* VLP Control1 Register */ #define QUICKVX_CAR_VLPCTRL1_REG (QUICKVX_CAR_BASE | 0x00000004) /* VLP Control2 Register */ #define QUICKVX_CAR_VLPCTRL2_REG (QUICKVX_CAR_BASE | 0x00000008) /* Clock Selection Register */ #define QUICKVX_CAR_CLKSEL_REG (QUICKVX_CAR_BASE | 0x0000000C) /* PLL Control Register */ #define QUICKVX_CAR_PLLCTRL_REG (QUICKVX_CAR_BASE | 0x00000010) /* PLL Clock Ratio Register */ #define QUICKVX_CAR_PLLCLKRATIO_REG (QUICKVX_CAR_BASE | 0x00000014) /*************************************************** * VEE Block Registers ***************************************************/ /* VEE Control Register */ #define QUICKVX_VEE_VEECTRL_REG (QUICKVX_VEE_BASE | 0x00000000) /* Strength Register */ #define QUICKVX_VEE_STRENGTH_REG (QUICKVX_VEE_BASE | 0x0000000C) /* Variance Register */ #define QUICKVX_VEE_VARIANCE_REG (QUICKVX_VEE_BASE | 0x00000010) /* Slope Register */ #define QUICKVX_VEE_SLOPE_REG (QUICKVX_VEE_BASE | 0x00000014) /* Sharpen Control0 Register */ #define QUICKVX_VEE_SHRPCTRL0_REG (QUICKVX_VEE_BASE | 0x0000001C) /* Sharpen Control1 Register */ #define QUICKVX_VEE_SHRPCTRL1_REG (QUICKVX_VEE_BASE | 0x00000020) /* Upper Horizontal Positon Register */ #define QUICKVX_VEE_UHPOS_REG (QUICKVX_VEE_BASE | 0x00000024) /* Lower Horizontal Positon Register */ #define QUICKVX_VEE_LHPOS_REG (QUICKVX_VEE_BASE | 0x00000028) /* Upper Vertical Positon Register */ #define QUICKVX_VEE_UVPOS_REG (QUICKVX_VEE_BASE | 0x0000002C) /* Lower Vertical Positon Register */ #define QUICKVX_VEE_LVPOS_REG (QUICKVX_VEE_BASE | 0x00000030) /* Upper Frame Width Register */ #define QUICKVX_VEE_UFWDTH_REG (QUICKVX_VEE_BASE | 0x00000034) /* Lower Frame Width Register */ #define QUICKVX_VEE_LFWDTH_REG (QUICKVX_VEE_BASE | 0x00000038) /* Upper Frame Height Register */ #define QUICKVX_VEE_UFHGHT_REG (QUICKVX_VEE_BASE | 0x0000003C) /* Lower Frame Height Register */ #define QUICKVX_VEE_LFHGHT_REG (QUICKVX_VEE_BASE | 0x00000040) /* Control0 Register */ #define QUICKVX_VEE_CTRL0_REG (QUICKVX_VEE_BASE | 0x00000044) /* Control1 Register */ #define QUICKVX_VEE_CTRL1_REG (QUICKVX_VEE_BASE | 0x00000048) /* Video Enhancement Enable Register */ #define QUICKVX_VEE_VDOEEN_REG (QUICKVX_VEE_BASE | 0x0000004C) /* Black Level Register */ #define QUICKVX_VEE_BLCKLEV_REG (QUICKVX_VEE_BASE | 0x00000050) /* White Level Register */ #define QUICKVX_VEE_WHTLEV_REG (QUICKVX_VEE_BASE | 0x00000054) /* Amplification Limits Register */ #define QUICKVX_VEE_AMPLMTS_REG (QUICKVX_VEE_BASE | 0x00000060) /* Dithering Mode Register */ #define QUICKVX_VEE_DITHMOD_REG (QUICKVX_VEE_BASE | 0x00000064) /* Upper Look-up Data Register */ #define QUICKVX_VEE_ULUD_REG (QUICKVX_VEE_BASE | 0x00000080) /* Lower Look-up Data Register */ #define QUICKVX_VEE_LLUD_REG (QUICKVX_VEE_BASE | 0x00000084) /* Look-up Address Register */ #define QUICKVX_VEE_LUADDR_REG (QUICKVX_VEE_BASE | 0x00000088) /* Look-up Write Enable Register */ #define QUICKVX_VEE_LUWREN_REG (QUICKVX_VEE_BASE | 0x0000008C) /* VEE ID Register */ #define QUICKVX_VEE_VEEID_REG (QUICKVX_VEE_BASE | 0x000003FC) /* M_11 Register */ #define QUICKVX_VEE_M_11_REG (QUICKVX_VEE_BASE | 0x000000C0) /* M_12 Register */ #define QUICKVX_VEE_M_12_REG (QUICKVX_VEE_BASE | 0x000000C4) /* M_13 Register */ #define QUICKVX_VEE_M_13_REG (QUICKVX_VEE_BASE | 0x000000C8) /* M_21 Register */ #define QUICKVX_VEE_M_21_REG (QUICKVX_VEE_BASE | 0x000000CC) /* M_22 Register */ #define QUICKVX_VEE_M_22_REG (QUICKVX_VEE_BASE | 0x000000D0) /* M_23 Register */ #define QUICKVX_VEE_M_23_REG (QUICKVX_VEE_BASE | 0x000000D4) /* M_31 Register */ #define QUICKVX_VEE_M_31_REG (QUICKVX_VEE_BASE | 0x000000D8) /* M_32 Register */ #define QUICKVX_VEE_M_32_REG (QUICKVX_VEE_BASE | 0x000000DC) /* M_33 Register */ #define QUICKVX_VEE_M_33_REG (QUICKVX_VEE_BASE | 0x000000E0) /* R Offset Register */ #define QUICKVX_VEE_OFFSET_R_REG (QUICKVX_VEE_BASE | 0x000000E8) /* G Offset Register */ #define QUICKVX_VEE_OFFSET_G_REG (QUICKVX_VEE_BASE | 0x000000EC) /* B Offset Register */ #define QUICKVX_VEE_OFFSET_B_REG (QUICKVX_VEE_BASE | 0x000000F0) /* LCD Reset Register */ #define QUICKVX_FB_A2F_LCD_RESET_REG (QUICKVX_FB_A2F_BASE | 0x00000000) /* Register bit defines */ /* PLL Lock bit in the PLL Control Register */ #define QUICKVX_PLL_LOCK_BIT (1 << 7) #define QL_SPI_CTRL_rSPISTart(x) (x) #define QL_SPI_CTRL_rCPHA(x) (x << 1) #define QL_SPI_CTRL_rCPOL(x) (x << 2) #define QL_SPI_CTRL_rLSB(x) (x << 3) #define QL_SPI_CTRL_rSLVSEL(x) (x << 4) #define QL_SPI_CTRL_MASK_rTxDone (1 << 9) #define QL_SPI_LCD_DEV_ID 0x1c #define QL_SPI_LCD_RS(x) (x << 1) #define QL_SPI_LCD_RW(x) (x) #define QL_SPI_LCD_INDEX_START_BYTE ((QL_SPI_LCD_DEV_ID << 2) | \ QL_SPI_LCD_RS(0) | QL_SPI_LCD_RW(0)) #define QL_SPI_LCD_CMD_START_BYTE ((QL_SPI_LCD_DEV_ID << 2) | \ QL_SPI_LCD_RS(1) | QL_SPI_LCD_RW(0)) #define QL_SPI_CTRL_LCD_START (QL_SPI_CTRL_rSPISTart(1) | \ QL_SPI_CTRL_rCPHA(1) | QL_SPI_CTRL_rCPOL(1) | \ QL_SPI_CTRL_rLSB(0) | QL_SPI_CTRL_rSLVSEL(0)) int ql_mddi_write(uint32 address, uint32 value) { int ret = 0; ret = mddi_queue_register_write(address, value, TRUE, 0); return ret; } int ql_mddi_read(uint32 address, uint32 *regval) { int ret = 0; ret = mddi_queue_register_read(address, regval, TRUE, 0); MDDI_MSG_DEBUG("\nql_mddi_read[0x%x]=0x%x", address, *regval); return ret; } int ql_send_spi_cmd_to_lcd(uint32 index, uint32 cmd) { MDDI_MSG_DEBUG("\n %s(): index 0x%x, cmd 0x%x", __func__, index, cmd); /* do the index phase */ /* send 24 bits in the index phase */ ql_mddi_write(QUICKVX_SPI_TLEN_REG, 23); /* send 24 bits in the index phase, starting at bit 23 of TX0 reg */ ql_mddi_write(QUICKVX_SPI_TX0_REG, (QL_SPI_LCD_INDEX_START_BYTE << 16) | index); /* set start */ ql_mddi_write(QUICKVX_SPI_CTRL_REG, QL_SPI_CTRL_LCD_START); /* do the command phase */ /* send 24 bits in the cmd phase */ ql_mddi_write(QUICKVX_SPI_TLEN_REG, 23); /* send 24 bits in the cmd phase, starting at bit 23 of TX0 reg. */ ql_mddi_write(QUICKVX_SPI_TX0_REG, (QL_SPI_LCD_CMD_START_BYTE << 16) | cmd); /* set start */ ql_mddi_write(QUICKVX_SPI_CTRL_REG, QL_SPI_CTRL_LCD_START); return 0; } int ql_send_spi_data_from_lcd(uint32 index, uint32 *value) { MDDI_MSG_DEBUG("\n %s(): index 0x%x", __func__, index); /* do the index phase */ /* send 24 bits in the index phase */ ql_mddi_write(QUICKVX_SPI_TLEN_REG, 23); /* send 24 bits in the index phase, starting at bit 23 of TX0 reg */ ql_mddi_write(QUICKVX_SPI_TX0_REG, (QL_SPI_LCD_INDEX_START_BYTE << 16) | index); /* set start */ ql_mddi_write(QUICKVX_SPI_CTRL_REG, QL_SPI_CTRL_LCD_START); /* do the command phase */ /* send 8 bits and read 24 bits in the cmd phase, so total 32 bits */ ql_mddi_write(QUICKVX_SPI_TLEN_REG, 31); /* send 24 bits in the cmd phase, starting at bit 31 of TX0 reg */ ql_mddi_write(QUICKVX_SPI_TX0_REG, ((QL_SPI_LCD_CMD_START_BYTE << 16)) << 8); /* set start */ ql_mddi_write(QUICKVX_SPI_CTRL_REG, QL_SPI_CTRL_LCD_START); return 0; } /* Global Variables */ static uint32 mddi_quickvx_rows_per_second; static uint32 mddi_quickvx_usecs_per_refresh; static uint32 mddi_quickvx_rows_per_refresh; void mddi_quickvx_configure_registers(void) { MDDI_MSG_DEBUG("\n%s(): ", __func__); ql_mddi_write(QUICKVX_CAR_CLKSEL_REG, 0x00007000); ql_mddi_write(QUICKVX_RCB_PWMW_REG, 0x0000FFFF); ql_mddi_write(QUICKVX_RCB_PWMC_REG, 0x00000001); ql_mddi_write(QUICKVX_RCB_CONFDONE_REG, 0x00000000); /* display is x width = 480, y width = 864 */ ql_mddi_write(QUICKVX_RCB_TCON0_REG, 0x035f01df); /* VFP=2, VBP=4, HFP=16, HBP=16 */ ql_mddi_write(QUICKVX_RCB_TCON1_REG, 0x01e301e1); /* VSW =2, HSW=8 */ ql_mddi_write(QUICKVX_RCB_TCON2_REG, 0x000000e1); ql_mddi_write(QUICKVX_RCB_DISPATTR_REG, 0x00000000); ql_mddi_write(QUICKVX_RCB_USECASE_REG, 0x00000025); ql_mddi_write(QUICKVX_RCB_VPARM_REG, 0x00000888); ql_mddi_write(QUICKVX_RCB_VEECONF_REG, 0x00000001); ql_mddi_write(QUICKVX_RCB_IER_REG, 0x00000000); ql_mddi_write(QUICKVX_RCB_RCR_REG, 0x80000010); ql_mddi_write(QUICKVX_RCB_CELLBCR_REG, 0x8008746F); ql_mddi_write(QUICKVX_RCB_CELLCC_REG, 0x800000A3); ql_mddi_write(QUICKVX_RCB_CONFDONE_REG, 0x00000001); } void mddi_quickvx_prim_lcd_init(void) { uint32 value; MDDI_MSG_DEBUG("\n%s(): ", __func__); ql_send_spi_data_from_lcd(0, &value); ql_send_spi_cmd_to_lcd(0x0100, 0x3000); /* power control1 */ ql_send_spi_cmd_to_lcd(0x0101, 0x4010); /* power control2 */ ql_send_spi_cmd_to_lcd(0x0106, 0x0000); /* auto seq setting */ mddi_wait(3); ql_mddi_write(QUICKVX_FB_A2F_LCD_RESET_REG, 0x00000001); mddi_wait(1); ql_mddi_write(QUICKVX_FB_A2F_LCD_RESET_REG, 0x00000000); mddi_wait(1); ql_mddi_write(QUICKVX_FB_A2F_LCD_RESET_REG, 0x00000001); mddi_wait(10); ql_send_spi_cmd_to_lcd(0x0001, 0x0310); /* driver out control */ ql_send_spi_cmd_to_lcd(0x0002, 0x0100); /* lcd ac control */ ql_send_spi_cmd_to_lcd(0x0003, 0x0000); /* entry mode */ ql_send_spi_cmd_to_lcd(0x0007, 0x0000); /* disp cont1 */ ql_send_spi_cmd_to_lcd(0x0008, 0x0004); /* disp cont2 */ ql_send_spi_cmd_to_lcd(0x0009, 0x000C); /* disp cont3 */ ql_send_spi_cmd_to_lcd(0x000C, 0x4010); /* disp if cont1 */ ql_send_spi_cmd_to_lcd(0x000E, 0x0000); /* disp if cont2 */ ql_send_spi_cmd_to_lcd(0x0020, 0x013F); /* panel if cont1 */ ql_send_spi_cmd_to_lcd(0x0022, 0x7600); /* panel if cont3 */ ql_send_spi_cmd_to_lcd(0x0023, 0x1C0A); /* panel if cont4 */ ql_send_spi_cmd_to_lcd(0x0024, 0x1C2C); /* panel if cont5 */ ql_send_spi_cmd_to_lcd(0x0025, 0x1C4E); /* panel if cont6 */ ql_send_spi_cmd_to_lcd(0x0027, 0x0000); /* panel if cont8 */ ql_send_spi_cmd_to_lcd(0x0028, 0x760C); /* panel if cont9 */ ql_send_spi_cmd_to_lcd(0x0300, 0x0000); /* gamma adj0 */ ql_send_spi_cmd_to_lcd(0x0301, 0x0502); /* gamma adj1 */ ql_send_spi_cmd_to_lcd(0x0302, 0x0705); /* gamma adj2 */ ql_send_spi_cmd_to_lcd(0x0303, 0x0000); /* gamma adj3 */ ql_send_spi_cmd_to_lcd(0x0304, 0x0200); /* gamma adj4 */ ql_send_spi_cmd_to_lcd(0x0305, 0x0707); /* gamma adj5 */ ql_send_spi_cmd_to_lcd(0x0306, 0x1010); /* gamma adj6 */ ql_send_spi_cmd_to_lcd(0x0307, 0x0202); /* gamma adj7 */ ql_send_spi_cmd_to_lcd(0x0308, 0x0704); /* gamma adj8 */ ql_send_spi_cmd_to_lcd(0x0309, 0x0707); /* gamma adj9 */ ql_send_spi_cmd_to_lcd(0x030A, 0x0000); /* gamma adja */ ql_send_spi_cmd_to_lcd(0x030B, 0x0000); /* gamma adjb */ ql_send_spi_cmd_to_lcd(0x030C, 0x0707); /* gamma adjc */ ql_send_spi_cmd_to_lcd(0x030D, 0x1010); /* gamma adjd */ ql_send_spi_cmd_to_lcd(0x0310, 0x0104); /* gamma adj10 */ ql_send_spi_cmd_to_lcd(0x0311, 0x0503); /* gamma adj11 */ ql_send_spi_cmd_to_lcd(0x0312, 0x0304); /* gamma adj12 */ ql_send_spi_cmd_to_lcd(0x0315, 0x0304); /* gamma adj15 */ ql_send_spi_cmd_to_lcd(0x0316, 0x031C); /* gamma adj16 */ ql_send_spi_cmd_to_lcd(0x0317, 0x0204); /* gamma adj17 */ ql_send_spi_cmd_to_lcd(0x0318, 0x0402); /* gamma adj18 */ ql_send_spi_cmd_to_lcd(0x0319, 0x0305); /* gamma adj19 */ ql_send_spi_cmd_to_lcd(0x031C, 0x0707); /* gamma adj1c */ ql_send_spi_cmd_to_lcd(0x031D, 0x021F); /* gamma adj1d */ ql_send_spi_cmd_to_lcd(0x0320, 0x0507); /* gamma adj20 */ ql_send_spi_cmd_to_lcd(0x0321, 0x0604); /* gamma adj21 */ ql_send_spi_cmd_to_lcd(0x0322, 0x0405); /* gamma adj22 */ ql_send_spi_cmd_to_lcd(0x0327, 0x0203); /* gamma adj27 */ ql_send_spi_cmd_to_lcd(0x0328, 0x0300); /* gamma adj28 */ ql_send_spi_cmd_to_lcd(0x0329, 0x0002); /* gamma adj29 */ ql_send_spi_cmd_to_lcd(0x0100, 0x363C); /* power cont1 */ mddi_wait(1); ql_send_spi_cmd_to_lcd(0x0101, 0x4003); /* power cont2 */ ql_send_spi_cmd_to_lcd(0x0102, 0x0001); /* power cont3 */ ql_send_spi_cmd_to_lcd(0x0103, 0x3C58); /* power cont4 */ ql_send_spi_cmd_to_lcd(0x010C, 0x0135); /* power cont6 */ ql_send_spi_cmd_to_lcd(0x0106, 0x0002); /* auto seq */ ql_send_spi_cmd_to_lcd(0x0029, 0x03BF); /* panel if cont10 */ ql_send_spi_cmd_to_lcd(0x0106, 0x0003); /* auto seq */ mddi_wait(5); ql_send_spi_cmd_to_lcd(0x0101, 0x4010); /* power cont2 */ mddi_wait(10); } /* Function to Power On the Primary and Secondary LCD panels */ static int mddi_quickvx_lcd_on(struct platform_device *pdev) { struct msm_fb_data_type *mfd; MDDI_MSG_DEBUG("\n%s(): ", __func__); mfd = platform_get_drvdata(pdev); if (!mfd) { MDDI_MSG_DEBUG("\n mddi_quickvx_lcd_on: Device not found!"); return -ENODEV; } if (mfd->key != MFD_KEY) { MDDI_MSG_DEBUG("\n mddi_quickvx_lcd_on: Invalid MFD key!"); return -EINVAL; } mddi_host_client_cnt_reset(); mddi_quickvx_configure_registers(); mddi_quickvx_prim_lcd_init(); return 0; } /* Function to Power Off the Primary and Secondary LCD panels */ static int mddi_quickvx_lcd_off(struct platform_device *pdev) { MDDI_MSG_DEBUG("\n%s(): ", __func__); mddi_wait(1); ql_send_spi_cmd_to_lcd(0x0106, 0x0002); /* Auto Sequencer setting */ mddi_wait(10); ql_send_spi_cmd_to_lcd(0x0106, 0x0000); /* Auto Sequencer setting */ ql_send_spi_cmd_to_lcd(0x0029, 0x0002); /* Panel IF control 10 */ ql_send_spi_cmd_to_lcd(0x0100, 0x300D); /* Power Control 1 */ mddi_wait(1); return 0; } /* Function to set the Backlight brightness level */ static void mddi_quickvx_lcd_set_backlight(struct msm_fb_data_type *mfd) { int32 level, i = 0, ret; MDDI_MSG_DEBUG("%s(): ", __func__); level = mfd->bl_level; MDDI_MSG_DEBUG("\n level = %d", level); if (level < 0) { MDDI_MSG_DEBUG("mddi_quickvx_lcd_set_backlight: " "Invalid backlight level (%d)!\n", level); return; } while (i++ < 3) { ret = pmic_set_led_intensity(LED_LCD, level); if (ret == 0) return; msleep(10); } MDDI_MSG_DEBUG("%s: can't set lcd backlight!\n", __func__); } /* Driver Probe function */ static int __devinit mddi_quickvx_lcd_probe(struct platform_device *pdev) { MDDI_MSG_DEBUG("\n%s(): id is %d", __func__, pdev->id); msm_fb_add_device(pdev); return 0; } /* Driver data structure */ static struct platform_driver this_driver = { .probe = mddi_quickvx_lcd_probe, .driver = { .name = "mddi_quickvx", }, }; /* Primary LCD panel data structure */ static struct msm_fb_panel_data mddi_quickvx_panel_data0 = { .on = mddi_quickvx_lcd_on, .off = mddi_quickvx_lcd_off, .set_backlight = mddi_quickvx_lcd_set_backlight, }; /* Primary LCD panel device structure */ static struct platform_device this_device0 = { .name = "mddi_quickvx", .id = MDDI_QUICKVX_1_2, .dev = { .platform_data = &mddi_quickvx_panel_data0, } }; /* Module init - driver main entry point */ static int __init mddi_quickvx_lcd_init(void) { int ret; struct msm_panel_info *pinfo; #ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT u32 cid; MDDI_MSG_DEBUG("\n%s(): ", __func__); ret = msm_fb_detect_client("mddi_quickvx"); if (ret == -ENODEV) { /* Device not found */ MDDI_MSG_DEBUG("\n mddi_quickvx_lcd_init: No device found!"); return 0; } if (ret) { cid = mddi_get_client_id(); MDDI_MSG_DEBUG("\n cid = 0x%x", cid); if (((cid >> 16) != QUICKVX_MDDI_MFR_CODE) || ((cid & 0xFFFF) != QUICKVX_MDDI_PRD_CODE)) { /* MDDI Client ID not matching */ MDDI_MSG_DEBUG("\n mddi_quickvx_lcd_init: " "Client ID missmatch!"); return 0; } MDDI_MSG_DEBUG("\n mddi_quickvx_lcd_init: " "QuickVX LCD panel detected!"); } #endif /* CONFIG_FB_MSM_MDDI_AUTO_DETECT */ mddi_quickvx_rows_per_refresh = 872; mddi_quickvx_rows_per_second = 52364; mddi_quickvx_usecs_per_refresh = 16574; ret = platform_driver_register(&this_driver); if (!ret) { pinfo = &mddi_quickvx_panel_data0.panel_info; pinfo->xres = 480; pinfo->yres = 864; MSM_FB_SINGLE_MODE_PANEL(pinfo); pinfo->type = MDDI_PANEL; pinfo->pdest = DISPLAY_1; pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR; pinfo->wait_cycle = 0; pinfo->bpp = 24; pinfo->fb_num = 2; pinfo->clk_rate = 192000000; pinfo->clk_min = 192000000; pinfo->clk_max = 200000000; pinfo->lcd.rev = 1; pinfo->lcd.vsync_enable = TRUE; pinfo->lcd.refx100 = (mddi_quickvx_rows_per_second \ * 100)/mddi_quickvx_rows_per_refresh; pinfo->mddi.is_type1 = TRUE; pinfo->lcd.v_back_porch = 4; pinfo->lcd.v_front_porch = 2; pinfo->lcd.v_pulse_width = 2; pinfo->lcd.hw_vsync_mode = TRUE; pinfo->lcd.vsync_notifier_period = (1 * HZ); pinfo->bl_max = 10; pinfo->bl_min = 0; ret = platform_device_register(&this_device0); if (ret) { platform_driver_unregister(&this_driver); MDDI_MSG_DEBUG("mddi_quickvx_lcd_init: " "Primary device registration failed!\n"); } } return ret; } module_init(mddi_quickvx_lcd_init);
gpl-2.0
BlownFuze/i717_TW_JBkernel
drivers/ata/pata_it821x.c
2531
28097
/* * pata_it821x.c - IT821x PATA for new ATA layer * (C) 2005 Red Hat Inc * Alan Cox <alan@lxorguk.ukuu.org.uk> * (C) 2007 Bartlomiej Zolnierkiewicz * * based upon * * it821x.c * * linux/drivers/ide/pci/it821x.c Version 0.09 December 2004 * * Copyright (C) 2004 Red Hat * * May be copied or modified under the terms of the GNU General Public License * Based in part on the ITE vendor provided SCSI driver. * * Documentation available from IT8212F_V04.pdf * http://www.ite.com.tw/EN/products_more.aspx?CategoryID=3&ID=5,91 * Some other documents are NDA. * * The ITE8212 isn't exactly a standard IDE controller. It has two * modes. In pass through mode then it is an IDE controller. In its smart * mode its actually quite a capable hardware raid controller disguised * as an IDE controller. Smart mode only understands DMA read/write and * identify, none of the fancier commands apply. The IT8211 is identical * in other respects but lacks the raid mode. * * Errata: * o Rev 0x10 also requires master/slave hold the same DMA timings and * cannot do ATAPI MWDMA. * o The identify data for raid volumes lacks CHS info (technically ok) * but also fails to set the LBA28 and other bits. We fix these in * the IDE probe quirk code. * o If you write LBA48 sized I/O's (ie > 256 sector) in smart mode * raid then the controller firmware dies * o Smart mode without RAID doesn't clear all the necessary identify * bits to reduce the command set to the one used * * This has a few impacts on the driver * - In pass through mode we do all the work you would expect * - In smart mode the clocking set up is done by the controller generally * but we must watch the other limits and filter. * - There are a few extra vendor commands that actually talk to the * controller but only work PIO with no IRQ. * * Vendor areas of the identify block in smart mode are used for the * timing and policy set up. Each HDD in raid mode also has a serial * block on the disk. The hardware extra commands are get/set chip status, * rebuild, get rebuild status. * * In Linux the driver supports pass through mode as if the device was * just another IDE controller. If the smart mode is running then * volumes are managed by the controller firmware and each IDE "disk" * is a raid volume. Even more cute - the controller can do automated * hotplug and rebuild. * * The pass through controller itself is a little demented. It has a * flaw that it has a single set of PIO/MWDMA timings per channel so * non UDMA devices restrict each others performance. It also has a * single clock source per channel so mixed UDMA100/133 performance * isn't perfect and we have to pick a clock. Thankfully none of this * matters in smart mode. ATAPI DMA is not currently supported. * * It seems the smart mode is a win for RAID1/RAID10 but otherwise not. * * TODO * - ATAPI and other speed filtering * - RAID configuration ioctls */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/slab.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_it821x" #define DRV_VERSION "0.4.2" struct it821x_dev { unsigned int smart:1, /* Are we in smart raid mode */ timing10:1; /* Rev 0x10 */ u8 clock_mode; /* 0, ATA_50 or ATA_66 */ u8 want[2][2]; /* Mode/Pri log for master slave */ /* We need these for switching the clock when DMA goes on/off The high byte is the 66Mhz timing */ u16 pio[2]; /* Cached PIO values */ u16 mwdma[2]; /* Cached MWDMA values */ u16 udma[2]; /* Cached UDMA values (per drive) */ u16 last_device; /* Master or slave loaded ? */ }; #define ATA_66 0 #define ATA_50 1 #define ATA_ANY 2 #define UDMA_OFF 0 #define MWDMA_OFF 0 /* * We allow users to force the card into non raid mode without * flashing the alternative BIOS. This is also necessary right now * for embedded platforms that cannot run a PC BIOS but are using this * device. */ static int it8212_noraid; /** * it821x_program - program the PIO/MWDMA registers * @ap: ATA port * @adev: Device to program * @timing: Timing value (66Mhz in top 8bits, 50 in the low 8) * * Program the PIO/MWDMA timing for this channel according to the * current clock. These share the same register so are managed by * the DMA start/stop sequence as with the old driver. */ static void it821x_program(struct ata_port *ap, struct ata_device *adev, u16 timing) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct it821x_dev *itdev = ap->private_data; int channel = ap->port_no; u8 conf; /* Program PIO/MWDMA timing bits */ if (itdev->clock_mode == ATA_66) conf = timing >> 8; else conf = timing & 0xFF; pci_write_config_byte(pdev, 0x54 + 4 * channel, conf); } /** * it821x_program_udma - program the UDMA registers * @ap: ATA port * @adev: ATA device to update * @timing: Timing bits. Top 8 are for 66Mhz bottom for 50Mhz * * Program the UDMA timing for this drive according to the * current clock. Handles the dual clocks and also knows about * the errata on the 0x10 revision. The UDMA errata is partly handled * here and partly in start_dma. */ static void it821x_program_udma(struct ata_port *ap, struct ata_device *adev, u16 timing) { struct it821x_dev *itdev = ap->private_data; struct pci_dev *pdev = to_pci_dev(ap->host->dev); int channel = ap->port_no; int unit = adev->devno; u8 conf; /* Program UDMA timing bits */ if (itdev->clock_mode == ATA_66) conf = timing >> 8; else conf = timing & 0xFF; if (itdev->timing10 == 0) pci_write_config_byte(pdev, 0x56 + 4 * channel + unit, conf); else { /* Early revision must be programmed for both together */ pci_write_config_byte(pdev, 0x56 + 4 * channel, conf); pci_write_config_byte(pdev, 0x56 + 4 * channel + 1, conf); } } /** * it821x_clock_strategy * @ap: ATA interface * @adev: ATA device being updated * * Select between the 50 and 66Mhz base clocks to get the best * results for this interface. */ static void it821x_clock_strategy(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct it821x_dev *itdev = ap->private_data; u8 unit = adev->devno; struct ata_device *pair = ata_dev_pair(adev); int clock, altclock; u8 v; int sel = 0; /* Look for the most wanted clocking */ if (itdev->want[0][0] > itdev->want[1][0]) { clock = itdev->want[0][1]; altclock = itdev->want[1][1]; } else { clock = itdev->want[1][1]; altclock = itdev->want[0][1]; } /* Master doesn't care does the slave ? */ if (clock == ATA_ANY) clock = altclock; /* Nobody cares - keep the same clock */ if (clock == ATA_ANY) return; /* No change */ if (clock == itdev->clock_mode) return; /* Load this into the controller */ if (clock == ATA_66) itdev->clock_mode = ATA_66; else { itdev->clock_mode = ATA_50; sel = 1; } pci_read_config_byte(pdev, 0x50, &v); v &= ~(1 << (1 + ap->port_no)); v |= sel << (1 + ap->port_no); pci_write_config_byte(pdev, 0x50, v); /* * Reprogram the UDMA/PIO of the pair drive for the switch * MWDMA will be dealt with by the dma switcher */ if (pair && itdev->udma[1-unit] != UDMA_OFF) { it821x_program_udma(ap, pair, itdev->udma[1-unit]); it821x_program(ap, pair, itdev->pio[1-unit]); } /* * Reprogram the UDMA/PIO of our drive for the switch. * MWDMA will be dealt with by the dma switcher */ if (itdev->udma[unit] != UDMA_OFF) { it821x_program_udma(ap, adev, itdev->udma[unit]); it821x_program(ap, adev, itdev->pio[unit]); } } /** * it821x_passthru_set_piomode - set PIO mode data * @ap: ATA interface * @adev: ATA device * * Configure for PIO mode. This is complicated as the register is * shared by PIO and MWDMA and for both channels. */ static void it821x_passthru_set_piomode(struct ata_port *ap, struct ata_device *adev) { /* Spec says 89 ref driver uses 88 */ static const u16 pio[] = { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 }; static const u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY }; struct it821x_dev *itdev = ap->private_data; int unit = adev->devno; int mode_wanted = adev->pio_mode - XFER_PIO_0; /* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */ itdev->want[unit][1] = pio_want[mode_wanted]; itdev->want[unit][0] = 1; /* PIO is lowest priority */ itdev->pio[unit] = pio[mode_wanted]; it821x_clock_strategy(ap, adev); it821x_program(ap, adev, itdev->pio[unit]); } /** * it821x_passthru_set_dmamode - set initial DMA mode data * @ap: ATA interface * @adev: ATA device * * Set up the DMA modes. The actions taken depend heavily on the mode * to use. If UDMA is used as is hopefully the usual case then the * timing register is private and we need only consider the clock. If * we are using MWDMA then we have to manage the setting ourself as * we switch devices and mode. */ static void it821x_passthru_set_dmamode(struct ata_port *ap, struct ata_device *adev) { static const u16 dma[] = { 0x8866, 0x3222, 0x3121 }; static const u8 mwdma_want[] = { ATA_ANY, ATA_66, ATA_ANY }; static const u16 udma[] = { 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 }; static const u8 udma_want[] = { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 }; struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct it821x_dev *itdev = ap->private_data; int channel = ap->port_no; int unit = adev->devno; u8 conf; if (adev->dma_mode >= XFER_UDMA_0) { int mode_wanted = adev->dma_mode - XFER_UDMA_0; itdev->want[unit][1] = udma_want[mode_wanted]; itdev->want[unit][0] = 3; /* UDMA is high priority */ itdev->mwdma[unit] = MWDMA_OFF; itdev->udma[unit] = udma[mode_wanted]; if (mode_wanted >= 5) itdev->udma[unit] |= 0x8080; /* UDMA 5/6 select on */ /* UDMA on. Again revision 0x10 must do the pair */ pci_read_config_byte(pdev, 0x50, &conf); if (itdev->timing10) conf &= channel ? 0x9F: 0xE7; else conf &= ~ (1 << (3 + 2 * channel + unit)); pci_write_config_byte(pdev, 0x50, conf); it821x_clock_strategy(ap, adev); it821x_program_udma(ap, adev, itdev->udma[unit]); } else { int mode_wanted = adev->dma_mode - XFER_MW_DMA_0; itdev->want[unit][1] = mwdma_want[mode_wanted]; itdev->want[unit][0] = 2; /* MWDMA is low priority */ itdev->mwdma[unit] = dma[mode_wanted]; itdev->udma[unit] = UDMA_OFF; /* UDMA bits off - Revision 0x10 do them in pairs */ pci_read_config_byte(pdev, 0x50, &conf); if (itdev->timing10) conf |= channel ? 0x60: 0x18; else conf |= 1 << (3 + 2 * channel + unit); pci_write_config_byte(pdev, 0x50, conf); it821x_clock_strategy(ap, adev); } } /** * it821x_passthru_dma_start - DMA start callback * @qc: Command in progress * * Usually drivers set the DMA timing at the point the set_dmamode call * is made. IT821x however requires we load new timings on the * transitions in some cases. */ static void it821x_passthru_bmdma_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct it821x_dev *itdev = ap->private_data; int unit = adev->devno; if (itdev->mwdma[unit] != MWDMA_OFF) it821x_program(ap, adev, itdev->mwdma[unit]); else if (itdev->udma[unit] != UDMA_OFF && itdev->timing10) it821x_program_udma(ap, adev, itdev->udma[unit]); ata_bmdma_start(qc); } /** * it821x_passthru_dma_stop - DMA stop callback * @qc: ATA command * * We loaded new timings in dma_start, as a result we need to restore * the PIO timings in dma_stop so that the next command issue gets the * right clock values. */ static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct it821x_dev *itdev = ap->private_data; int unit = adev->devno; ata_bmdma_stop(qc); if (itdev->mwdma[unit] != MWDMA_OFF) it821x_program(ap, adev, itdev->pio[unit]); } /** * it821x_passthru_dev_select - Select master/slave * @ap: ATA port * @device: Device number (not pointer) * * Device selection hook. If necessary perform clock switching */ static void it821x_passthru_dev_select(struct ata_port *ap, unsigned int device) { struct it821x_dev *itdev = ap->private_data; if (itdev && device != itdev->last_device) { struct ata_device *adev = &ap->link.device[device]; it821x_program(ap, adev, itdev->pio[adev->devno]); itdev->last_device = device; } ata_sff_dev_select(ap, device); } /** * it821x_smart_qc_issue - wrap qc issue prot * @qc: command * * Wrap the command issue sequence for the IT821x. We need to * perform out own device selection timing loads before the * usual happenings kick off */ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc) { switch(qc->tf.command) { /* Commands the firmware supports */ case ATA_CMD_READ: case ATA_CMD_READ_EXT: case ATA_CMD_WRITE: case ATA_CMD_WRITE_EXT: case ATA_CMD_PIO_READ: case ATA_CMD_PIO_READ_EXT: case ATA_CMD_PIO_WRITE: case ATA_CMD_PIO_WRITE_EXT: case ATA_CMD_READ_MULTI: case ATA_CMD_READ_MULTI_EXT: case ATA_CMD_WRITE_MULTI: case ATA_CMD_WRITE_MULTI_EXT: case ATA_CMD_ID_ATA: case ATA_CMD_INIT_DEV_PARAMS: case 0xFC: /* Internal 'report rebuild state' */ /* Arguably should just no-op this one */ case ATA_CMD_SET_FEATURES: return ata_bmdma_qc_issue(qc); } printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command); return AC_ERR_DEV; } /** * it821x_passthru_qc_issue - wrap qc issue prot * @qc: command * * Wrap the command issue sequence for the IT821x. We need to * perform out own device selection timing loads before the * usual happenings kick off */ static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc) { it821x_passthru_dev_select(qc->ap, qc->dev->devno); return ata_bmdma_qc_issue(qc); } /** * it821x_smart_set_mode - mode setting * @link: interface to set up * @unused: device that failed (error only) * * Use a non standard set_mode function. We don't want to be tuned. * The BIOS configured everything. Our job is not to fiddle. We * read the dma enabled bits from the PCI configuration of the device * and respect them. */ static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unused) { struct ata_device *dev; ata_for_each_dev(dev, link, ENABLED) { /* We don't really care */ dev->pio_mode = XFER_PIO_0; dev->dma_mode = XFER_MW_DMA_0; /* We do need the right mode information for DMA or PIO and this comes from the current configuration flags */ if (ata_id_has_dma(dev->id)) { ata_dev_printk(dev, KERN_INFO, "configured for DMA\n"); dev->xfer_mode = XFER_MW_DMA_0; dev->xfer_shift = ATA_SHIFT_MWDMA; dev->flags &= ~ATA_DFLAG_PIO; } else { ata_dev_printk(dev, KERN_INFO, "configured for PIO\n"); dev->xfer_mode = XFER_PIO_0; dev->xfer_shift = ATA_SHIFT_PIO; dev->flags |= ATA_DFLAG_PIO; } } return 0; } /** * it821x_dev_config - Called each device identify * @adev: Device that has just been identified * * Perform the initial setup needed for each device that is chip * special. In our case we need to lock the sector count to avoid * blowing the brains out of the firmware with large LBA48 requests * */ static void it821x_dev_config(struct ata_device *adev) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); if (adev->max_sectors > 255) adev->max_sectors = 255; if (strstr(model_num, "Integrated Technology Express")) { /* RAID mode */ ata_dev_printk(adev, KERN_INFO, "%sRAID%d volume", adev->id[147]?"Bootable ":"", adev->id[129]); if (adev->id[129] != 1) printk("(%dK stripe)", adev->id[146]); printk(".\n"); } /* This is a controller firmware triggered funny, don't report the drive faulty! */ adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC; /* No HPA in 'smart' mode */ adev->horkage |= ATA_HORKAGE_BROKEN_HPA; } /** * it821x_read_id - Hack identify data up * @adev: device to read * @tf: proposed taskfile * @id: buffer for returned ident data * * Query the devices on this firmware driven port and slightly * mash the identify data to stop us and common tools trying to * use features not firmware supported. The firmware itself does * some masking (eg SMART) but not enough. */ static unsigned int it821x_read_id(struct ata_device *adev, struct ata_taskfile *tf, u16 *id) { unsigned int err_mask; unsigned char model_num[ATA_ID_PROD_LEN + 1]; err_mask = ata_do_dev_read_id(adev, tf, id); if (err_mask) return err_mask; ata_id_c_string(id, model_num, ATA_ID_PROD, sizeof(model_num)); id[83] &= ~(1 << 12); /* Cache flush is firmware handled */ id[83] &= ~(1 << 13); /* Ditto for LBA48 flushes */ id[84] &= ~(1 << 6); /* No FUA */ id[85] &= ~(1 << 10); /* No HPA */ id[76] = 0; /* No NCQ/AN etc */ if (strstr(model_num, "Integrated Technology Express")) { /* Set feature bits the firmware neglects */ id[49] |= 0x0300; /* LBA, DMA */ id[83] &= 0x7FFF; id[83] |= 0x4400; /* Word 83 is valid and LBA48 */ id[86] |= 0x0400; /* LBA48 on */ id[ATA_ID_MAJOR_VER] |= 0x1F; /* Clear the serial number because it's different each boot which breaks validation on resume */ memset(&id[ATA_ID_SERNO], 0x20, ATA_ID_SERNO_LEN); } return err_mask; } /** * it821x_check_atapi_dma - ATAPI DMA handler * @qc: Command we are about to issue * * Decide if this ATAPI command can be issued by DMA on this * controller. Return 0 if it can be. */ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct it821x_dev *itdev = ap->private_data; /* Only use dma for transfers to/from the media. */ if (ata_qc_raw_nbytes(qc) < 2048) return -EOPNOTSUPP; /* No ATAPI DMA in smart mode */ if (itdev->smart) return -EOPNOTSUPP; /* No ATAPI DMA on rev 10 */ if (itdev->timing10) return -EOPNOTSUPP; /* Cool */ return 0; } /** * it821x_display_disk - display disk setup * @n: Device number * @buf: Buffer block from firmware * * Produce a nice informative display of the device setup as provided * by the firmware. */ static void it821x_display_disk(int n, u8 *buf) { unsigned char id[41]; int mode = 0; char *mtype = ""; char mbuf[8]; char *cbl = "(40 wire cable)"; static const char *types[5] = { "RAID0", "RAID1" "RAID 0+1", "JBOD", "DISK" }; if (buf[52] > 4) /* No Disk */ return; ata_id_c_string((u16 *)buf, id, 0, 41); if (buf[51]) { mode = ffs(buf[51]); mtype = "UDMA"; } else if (buf[49]) { mode = ffs(buf[49]); mtype = "MWDMA"; } if (buf[76]) cbl = ""; if (mode) snprintf(mbuf, 8, "%5s%d", mtype, mode - 1); else strcpy(mbuf, "PIO"); if (buf[52] == 4) printk(KERN_INFO "%d: %-6s %-8s %s %s\n", n, mbuf, types[buf[52]], id, cbl); else printk(KERN_INFO "%d: %-6s %-8s Volume: %1d %s %s\n", n, mbuf, types[buf[52]], buf[53], id, cbl); if (buf[125] < 100) printk(KERN_INFO "%d: Rebuilding: %d%%\n", n, buf[125]); } /** * it821x_firmware_command - issue firmware command * @ap: IT821x port to interrogate * @cmd: command * @len: length * * Issue firmware commands expecting data back from the controller. We * use this to issue commands that do not go via the normal paths. Other * commands such as 0xFC can be issued normally. */ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len) { u8 status; int n = 0; u16 *buf = kmalloc(len, GFP_KERNEL); if (buf == NULL) { printk(KERN_ERR "it821x_firmware_command: Out of memory\n"); return NULL; } /* This isn't quite a normal ATA command as we are talking to the firmware not the drives */ ap->ctl |= ATA_NIEN; iowrite8(ap->ctl, ap->ioaddr.ctl_addr); ata_wait_idle(ap); iowrite8(ATA_DEVICE_OBS, ap->ioaddr.device_addr); iowrite8(cmd, ap->ioaddr.command_addr); udelay(1); /* This should be almost immediate but a little paranoia goes a long way. */ while(n++ < 10) { status = ioread8(ap->ioaddr.status_addr); if (status & ATA_ERR) { kfree(buf); printk(KERN_ERR "it821x_firmware_command: rejected\n"); return NULL; } if (status & ATA_DRQ) { ioread16_rep(ap->ioaddr.data_addr, buf, len/2); return (u8 *)buf; } mdelay(1); } kfree(buf); printk(KERN_ERR "it821x_firmware_command: timeout\n"); return NULL; } /** * it821x_probe_firmware - firmware reporting/setup * @ap: IT821x port being probed * * Probe the firmware of the controller by issuing firmware command * 0xFA and analysing the returned data. */ static void it821x_probe_firmware(struct ata_port *ap) { u8 *buf; int i; /* This is a bit ugly as we can't just issue a task file to a device as this is controller magic */ buf = it821x_firmware_command(ap, 0xFA, 512); if (buf != NULL) { printk(KERN_INFO "pata_it821x: Firmware %02X/%02X/%02X%02X\n", buf[505], buf[506], buf[507], buf[508]); for (i = 0; i < 4; i++) it821x_display_disk(i, buf + 128 * i); kfree(buf); } } /** * it821x_port_start - port setup * @ap: ATA port being set up * * The it821x needs to maintain private data structures and also to * use the standard PCI interface which lacks support for this * functionality. We instead set up the private data on the port * start hook, and tear it down on port stop */ static int it821x_port_start(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct it821x_dev *itdev; u8 conf; int ret = ata_bmdma_port_start(ap); if (ret < 0) return ret; itdev = devm_kzalloc(&pdev->dev, sizeof(struct it821x_dev), GFP_KERNEL); if (itdev == NULL) return -ENOMEM; ap->private_data = itdev; pci_read_config_byte(pdev, 0x50, &conf); if (conf & 1) { itdev->smart = 1; /* Long I/O's although allowed in LBA48 space cause the onboard firmware to enter the twighlight zone */ /* No ATAPI DMA in this mode either */ if (ap->port_no == 0) it821x_probe_firmware(ap); } /* Pull the current clocks from 0x50 */ if (conf & (1 << (1 + ap->port_no))) itdev->clock_mode = ATA_50; else itdev->clock_mode = ATA_66; itdev->want[0][1] = ATA_ANY; itdev->want[1][1] = ATA_ANY; itdev->last_device = -1; if (pdev->revision == 0x10) { itdev->timing10 = 1; /* Need to disable ATAPI DMA for this case */ if (!itdev->smart) printk(KERN_WARNING DRV_NAME": Revision 0x10, workarounds activated.\n"); } return 0; } /** * it821x_rdc_cable - Cable detect for RDC1010 * @ap: port we are checking * * Return the RDC1010 cable type. Unlike the IT821x we know how to do * this and can do host side cable detect */ static int it821x_rdc_cable(struct ata_port *ap) { u16 r40; struct pci_dev *pdev = to_pci_dev(ap->host->dev); pci_read_config_word(pdev, 0x40, &r40); if (r40 & (1 << (2 + ap->port_no))) return ATA_CBL_PATA40; return ATA_CBL_PATA80; } static struct scsi_host_template it821x_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations it821x_smart_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, .qc_issue = it821x_smart_qc_issue, .cable_detect = ata_cable_80wire, .set_mode = it821x_smart_set_mode, .dev_config = it821x_dev_config, .read_id = it821x_read_id, .port_start = it821x_port_start, }; static struct ata_port_operations it821x_passthru_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, .sff_dev_select = it821x_passthru_dev_select, .bmdma_start = it821x_passthru_bmdma_start, .bmdma_stop = it821x_passthru_bmdma_stop, .qc_issue = it821x_passthru_qc_issue, .cable_detect = ata_cable_unknown, .set_piomode = it821x_passthru_set_piomode, .set_dmamode = it821x_passthru_set_dmamode, .port_start = it821x_port_start, }; static struct ata_port_operations it821x_rdc_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, .sff_dev_select = it821x_passthru_dev_select, .bmdma_start = it821x_passthru_bmdma_start, .bmdma_stop = it821x_passthru_bmdma_stop, .qc_issue = it821x_passthru_qc_issue, .cable_detect = it821x_rdc_cable, .set_piomode = it821x_passthru_set_piomode, .set_dmamode = it821x_passthru_set_dmamode, .port_start = it821x_port_start, }; static void it821x_disable_raid(struct pci_dev *pdev) { /* Neither the RDC nor the IT8211 */ if (pdev->vendor != PCI_VENDOR_ID_ITE || pdev->device != PCI_DEVICE_ID_ITE_8212) return; /* Reset local CPU, and set BIOS not ready */ pci_write_config_byte(pdev, 0x5E, 0x01); /* Set to bypass mode, and reset PCI bus */ pci_write_config_byte(pdev, 0x50, 0x00); pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_PARITY | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); pci_write_config_word(pdev, 0x40, 0xA0F3); pci_write_config_dword(pdev,0x4C, 0x02040204); pci_write_config_byte(pdev, 0x42, 0x36); pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20); } static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { u8 conf; static const struct ata_port_info info_smart = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &it821x_smart_port_ops }; static const struct ata_port_info info_passthru = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &it821x_passthru_port_ops }; static const struct ata_port_info info_rdc = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &it821x_rdc_port_ops }; static const struct ata_port_info info_rdc_11 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, /* No UDMA */ .port_ops = &it821x_rdc_port_ops }; const struct ata_port_info *ppi[] = { NULL, NULL }; static char *mode[2] = { "pass through", "smart" }; int rc; rc = pcim_enable_device(pdev); if (rc) return rc; if (pdev->vendor == PCI_VENDOR_ID_RDC) { /* Deal with Vortex86SX */ if (pdev->revision == 0x11) ppi[0] = &info_rdc_11; else ppi[0] = &info_rdc; } else { /* Force the card into bypass mode if so requested */ if (it8212_noraid) { printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n"); it821x_disable_raid(pdev); } pci_read_config_byte(pdev, 0x50, &conf); conf &= 1; printk(KERN_INFO DRV_NAME": controller in %s mode.\n", mode[conf]); if (conf == 0) ppi[0] = &info_passthru; else ppi[0] = &info_smart; } return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0); } #ifdef CONFIG_PM static int it821x_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; /* Resume - turn raid back off if need be */ if (it8212_noraid) it821x_disable_raid(pdev); ata_host_resume(host); return rc; } #endif static const struct pci_device_id it821x[] = { { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), }, { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), }, { PCI_VDEVICE(RDC, PCI_DEVICE_ID_RDC_D1010), }, { }, }; static struct pci_driver it821x_pci_driver = { .name = DRV_NAME, .id_table = it821x, .probe = it821x_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = it821x_reinit_one, #endif }; static int __init it821x_init(void) { return pci_register_driver(&it821x_pci_driver); } static void __exit it821x_exit(void) { pci_unregister_driver(&it821x_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, it821x); MODULE_VERSION(DRV_VERSION); module_param_named(noraid, it8212_noraid, int, S_IRUGO); MODULE_PARM_DESC(noraid, "Force card into bypass mode"); module_init(it821x_init); module_exit(it821x_exit);
gpl-2.0
philippedeswert/android_kernel_lge_hammerhead
net/wireless/wext-sme.c
4835
9469
/* * cfg80211 wext compat for managed mode. * * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2009 Intel Corporation. All rights reserved. */ #include <linux/export.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <net/cfg80211.h> #include <net/cfg80211-wext.h> #include "wext-compat.h" #include "nl80211.h" int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct cfg80211_cached_keys *ck = NULL; const u8 *prev_bssid = NULL; int err, i; ASSERT_RDEV_LOCK(rdev); ASSERT_WDEV_LOCK(wdev); if (!netif_running(wdev->netdev)) return 0; wdev->wext.connect.ie = wdev->wext.ie; wdev->wext.connect.ie_len = wdev->wext.ie_len; /* Use default background scan period */ wdev->wext.connect.bg_scan_period = -1; if (wdev->wext.keys) { wdev->wext.keys->def = wdev->wext.default_key; wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; if (wdev->wext.default_key != -1) wdev->wext.connect.privacy = true; } if (!wdev->wext.connect.ssid_len) return 0; if (wdev->wext.keys) { ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); if (!ck) return -ENOMEM; for (i = 0; i < 6; i++) ck->params[i].key = ck->data[i]; } if (wdev->wext.prev_bssid_valid) prev_bssid = wdev->wext.prev_bssid; err = __cfg80211_connect(rdev, wdev->netdev, &wdev->wext.connect, ck, prev_bssid); if (err) kfree(ck); return err; } int cfg80211_mgd_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wextfreq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct ieee80211_channel *chan = NULL; int err, freq; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); if (freq < 0) return freq; if (freq) { chan = ieee80211_get_channel(wdev->wiphy, freq); if (!chan) return -EINVAL; if (chan->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; } cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); if (wdev->sme_state != CFG80211_SME_IDLE) { bool event = true; if (wdev->wext.connect.channel == chan) { err = 0; goto out; } /* if SSID set, we'll try right again, avoid event */ if (wdev->wext.connect.ssid_len) event = false; err = __cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } wdev->wext.connect.channel = chan; /* SSID is not set, we just want to switch channel */ if (chan && !wdev->wext.connect.ssid_len) { err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); goto out; } err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); return err; } int cfg80211_mgd_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_channel *chan = NULL; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; wdev_lock(wdev); if (wdev->current_bss) chan = wdev->current_bss->pub.channel; else if (wdev->wext.connect.channel) chan = wdev->wext.connect.channel; wdev_unlock(wdev); if (chan) { freq->m = chan->center_freq; freq->e = 6; return 0; } /* no channel if not joining */ return -EINVAL; } int cfg80211_mgd_wext_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); size_t len = data->length; int err; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (!data->flags) len = 0; /* iwconfig uses nul termination in SSID.. */ if (len > 0 && ssid[len - 1] == '\0') len--; cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = 0; if (wdev->sme_state != CFG80211_SME_IDLE) { bool event = true; if (wdev->wext.connect.ssid && len && len == wdev->wext.connect.ssid_len && memcmp(wdev->wext.connect.ssid, ssid, len) == 0) goto out; /* if SSID set now, we'll try to connect, avoid event */ if (len) event = false; err = __cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } wdev->wext.prev_bssid_valid = false; wdev->wext.connect.ssid = wdev->wext.ssid; memcpy(wdev->wext.ssid, ssid, len); wdev->wext.connect.ssid_len = len; wdev->wext.connect.crypto.control_port = false; wdev->wext.connect.crypto.control_port_ethertype = cpu_to_be16(ETH_P_PAE); err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); return err; } int cfg80211_mgd_wext_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; data->flags = 0; wdev_lock(wdev); if (wdev->current_bss) { const u8 *ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, WLAN_EID_SSID); if (ie) { data->flags = 1; data->length = ie[1]; memcpy(ssid, ie + 2, data->length); } } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { data->flags = 1; data->length = wdev->wext.connect.ssid_len; memcpy(ssid, wdev->wext.connect.ssid, data->length); } wdev_unlock(wdev); return 0; } int cfg80211_mgd_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); u8 *bssid = ap_addr->sa_data; int err; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (ap_addr->sa_family != ARPHRD_ETHER) return -EINVAL; /* automatic mode */ if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) bssid = NULL; cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); if (wdev->sme_state != CFG80211_SME_IDLE) { err = 0; /* both automatic */ if (!bssid && !wdev->wext.connect.bssid) goto out; /* fixed already - and no change */ if (wdev->wext.connect.bssid && bssid && compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0) goto out; err = __cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } if (bssid) { memcpy(wdev->wext.bssid, bssid, ETH_ALEN); wdev->wext.connect.bssid = wdev->wext.bssid; } else wdev->wext.connect.bssid = NULL; err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); return err; } int cfg80211_mgd_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; ap_addr->sa_family = ARPHRD_ETHER; wdev_lock(wdev); if (wdev->current_bss) memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); else memset(ap_addr->sa_data, 0, ETH_ALEN); wdev_unlock(wdev); return 0; } int cfg80211_wext_siwgenie(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); u8 *ie = extra; int ie_len = data->length, err; if (wdev->iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!ie_len) ie = NULL; wdev_lock(wdev); /* no change */ err = 0; if (wdev->wext.ie_len == ie_len && memcmp(wdev->wext.ie, ie, ie_len) == 0) goto out; if (ie_len) { ie = kmemdup(extra, ie_len, GFP_KERNEL); if (!ie) { err = -ENOMEM; goto out; } } else ie = NULL; kfree(wdev->wext.ie); wdev->wext.ie = ie; wdev->wext.ie_len = ie_len; if (wdev->sme_state != CFG80211_SME_IDLE) { err = __cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } /* userspace better not think we'll reconnect */ err = 0; out: wdev_unlock(wdev); return err; } int cfg80211_wext_siwmlme(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct iw_mlme *mlme = (struct iw_mlme *)extra; struct cfg80211_registered_device *rdev; int err; if (!wdev) return -EOPNOTSUPP; rdev = wiphy_to_dev(wdev->wiphy); if (wdev->iftype != NL80211_IFTYPE_STATION) return -EINVAL; if (mlme->addr.sa_family != ARPHRD_ETHER) return -EINVAL; wdev_lock(wdev); switch (mlme->cmd) { case IW_MLME_DEAUTH: case IW_MLME_DISASSOC: err = __cfg80211_disconnect(rdev, dev, mlme->reason_code, true); break; default: err = -EOPNOTSUPP; break; } wdev_unlock(wdev); return err; }
gpl-2.0
jmztaylor/android_kernel_htc_a5dug
drivers/usb/musb/am35x.c
4835
16702
/* * Texas Instruments AM35x "glue layer" * * Copyright (c) 2010, by Texas Instruments * * Based on the DA8xx "glue layer" code. * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com> * * This file is part of the Inventra Controller Driver for Linux. * * The Inventra Controller Driver for Linux is free software; you * can redistribute it and/or modify it under the terms of the GNU * General Public License version 2 as published by the Free Software * Foundation. * * The Inventra Controller Driver for Linux is distributed in * the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public * License for more details. * * You should have received a copy of the GNU General Public License * along with The Inventra Controller Driver for Linux ; if not, * write to the Free Software Foundation, Inc., 59 Temple Place, * Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <plat/usb.h> #include "musb_core.h" /* * AM35x specific definitions */ /* USB 2.0 OTG module registers */ #define USB_REVISION_REG 0x00 #define USB_CTRL_REG 0x04 #define USB_STAT_REG 0x08 #define USB_EMULATION_REG 0x0c /* 0x10 Reserved */ #define USB_AUTOREQ_REG 0x14 #define USB_SRP_FIX_TIME_REG 0x18 #define USB_TEARDOWN_REG 0x1c #define EP_INTR_SRC_REG 0x20 #define EP_INTR_SRC_SET_REG 0x24 #define EP_INTR_SRC_CLEAR_REG 0x28 #define EP_INTR_MASK_REG 0x2c #define EP_INTR_MASK_SET_REG 0x30 #define EP_INTR_MASK_CLEAR_REG 0x34 #define EP_INTR_SRC_MASKED_REG 0x38 #define CORE_INTR_SRC_REG 0x40 #define CORE_INTR_SRC_SET_REG 0x44 #define CORE_INTR_SRC_CLEAR_REG 0x48 #define CORE_INTR_MASK_REG 0x4c #define CORE_INTR_MASK_SET_REG 0x50 #define CORE_INTR_MASK_CLEAR_REG 0x54 #define CORE_INTR_SRC_MASKED_REG 0x58 /* 0x5c Reserved */ #define USB_END_OF_INTR_REG 0x60 /* Control register bits */ #define AM35X_SOFT_RESET_MASK 1 /* USB interrupt register bits */ #define AM35X_INTR_USB_SHIFT 16 #define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT) #define AM35X_INTR_DRVVBUS 0x100 #define AM35X_INTR_RX_SHIFT 16 #define AM35X_INTR_TX_SHIFT 0 #define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */ #define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */ #define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT) #define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT) #define USB_MENTOR_CORE_OFFSET 0x400 struct am35x_glue { struct device *dev; struct platform_device *musb; struct clk *phy_clk; struct clk *clk; }; #define glue_to_musb(g) platform_get_drvdata(g->musb) /* * am35x_musb_enable - enable interrupts */ static void am35x_musb_enable(struct musb *musb) { void __iomem *reg_base = musb->ctrl_base; u32 epmask; /* Workaround: setup IRQs through both register sets. */ epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) | ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT); musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask); musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK); /* Force the DRVVBUS IRQ so we can start polling for ID change. */ if (is_otg_enabled(musb)) musb_writel(reg_base, CORE_INTR_SRC_SET_REG, AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT); } /* * am35x_musb_disable - disable HDRC and flush interrupts */ static void am35x_musb_disable(struct musb *musb) { void __iomem *reg_base = musb->ctrl_base; musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK); musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG, AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); musb_writel(reg_base, USB_END_OF_INTR_REG, 0); } #define portstate(stmt) stmt static void am35x_musb_set_vbus(struct musb *musb, int is_on) { WARN_ON(is_on && is_peripheral_active(musb)); } #define POLL_SECONDS 2 static struct timer_list otg_workaround; static void otg_timer(unsigned long _musb) { struct musb *musb = (void *)_musb; void __iomem *mregs = musb->mregs; u8 devctl; unsigned long flags; /* * We poll because AM35x's won't expose several OTG-critical * status change events (from the transceiver) otherwise. */ devctl = musb_readb(mregs, MUSB_DEVCTL); dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, otg_state_string(musb->xceiv->state)); spin_lock_irqsave(&musb->lock, flags); switch (musb->xceiv->state) { case OTG_STATE_A_WAIT_BCON: devctl &= ~MUSB_DEVCTL_SESSION; musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if (devctl & MUSB_DEVCTL_BDEVICE) { musb->xceiv->state = OTG_STATE_B_IDLE; MUSB_DEV_MODE(musb); } else { musb->xceiv->state = OTG_STATE_A_IDLE; MUSB_HST_MODE(musb); } break; case OTG_STATE_A_WAIT_VFALL: musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG, MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT); break; case OTG_STATE_B_IDLE: if (!is_peripheral_enabled(musb)) break; devctl = musb_readb(mregs, MUSB_DEVCTL); if (devctl & MUSB_DEVCTL_BDEVICE) mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); else musb->xceiv->state = OTG_STATE_A_IDLE; break; default: break; } spin_unlock_irqrestore(&musb->lock, flags); } static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout) { static unsigned long last_timer; if (!is_otg_enabled(musb)) return; if (timeout == 0) timeout = jiffies + msecs_to_jiffies(3); /* Never idle if active, or when VBUS timeout is not set as host */ if (musb->is_active || (musb->a_wait_bcon == 0 && musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { dev_dbg(musb->controller, "%s active, deleting timer\n", otg_state_string(musb->xceiv->state)); del_timer(&otg_workaround); last_timer = jiffies; return; } if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) { dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n"); return; } last_timer = timeout; dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", otg_state_string(musb->xceiv->state), jiffies_to_msecs(timeout - jiffies)); mod_timer(&otg_workaround, timeout); } static irqreturn_t am35x_musb_interrupt(int irq, void *hci) { struct musb *musb = hci; void __iomem *reg_base = musb->ctrl_base; struct device *dev = musb->controller; struct musb_hdrc_platform_data *plat = dev->platform_data; struct omap_musb_board_data *data = plat->board_data; struct usb_otg *otg = musb->xceiv->otg; unsigned long flags; irqreturn_t ret = IRQ_NONE; u32 epintr, usbintr; spin_lock_irqsave(&musb->lock, flags); /* Get endpoint interrupts */ epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG); if (epintr) { musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr); musb->int_rx = (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT; musb->int_tx = (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT; } /* Get usb core interrupts */ usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG); if (!usbintr && !epintr) goto eoi; if (usbintr) { musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr); musb->int_usb = (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT; } /* * DRVVBUS IRQs are the only proxy we have (a very poor one!) for * AM35x's missing ID change IRQ. We need an ID change IRQ to * switch appropriately between halves of the OTG state machine. * Managing DEVCTL.SESSION per Mentor docs requires that we know its * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. * Also, DRVVBUS pulses for SRP (but not at 5V) ... */ if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) { int drvvbus = musb_readl(reg_base, USB_STAT_REG); void __iomem *mregs = musb->mregs; u8 devctl = musb_readb(mregs, MUSB_DEVCTL); int err; err = is_host_enabled(musb) && (musb->int_usb & MUSB_INTR_VBUSERROR); if (err) { /* * The Mentor core doesn't debounce VBUS as needed * to cope with device connect current spikes. This * means it's not uncommon for bus-powered devices * to get VBUS errors during enumeration. * * This is a workaround, but newer RTL from Mentor * seems to allow a better one: "re"-starting sessions * without waiting for VBUS to stop registering in * devctl. */ musb->int_usb &= ~MUSB_INTR_VBUSERROR; musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); WARNING("VBUS error workaround (delay coming)\n"); } else if (is_host_enabled(musb) && drvvbus) { MUSB_HST_MODE(musb); otg->default_a = 1; musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; portstate(musb->port1_status |= USB_PORT_STAT_POWER); del_timer(&otg_workaround); } else { musb->is_active = 0; MUSB_DEV_MODE(musb); otg->default_a = 0; musb->xceiv->state = OTG_STATE_B_IDLE; portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); } /* NOTE: this must complete power-on within 100 ms. */ dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", drvvbus ? "on" : "off", otg_state_string(musb->xceiv->state), err ? " ERROR" : "", devctl); ret = IRQ_HANDLED; } if (musb->int_tx || musb->int_rx || musb->int_usb) ret |= musb_interrupt(musb); eoi: /* EOI needs to be written for the IRQ to be re-asserted. */ if (ret == IRQ_HANDLED || epintr || usbintr) { /* clear level interrupt */ if (data->clear_irq) data->clear_irq(); /* write EOI */ musb_writel(reg_base, USB_END_OF_INTR_REG, 0); } /* Poll for ID change */ if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); spin_unlock_irqrestore(&musb->lock, flags); return ret; } static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode) { struct device *dev = musb->controller; struct musb_hdrc_platform_data *plat = dev->platform_data; struct omap_musb_board_data *data = plat->board_data; int retval = 0; if (data->set_mode) data->set_mode(musb_mode); else retval = -EIO; return retval; } static int am35x_musb_init(struct musb *musb) { struct device *dev = musb->controller; struct musb_hdrc_platform_data *plat = dev->platform_data; struct omap_musb_board_data *data = plat->board_data; void __iomem *reg_base = musb->ctrl_base; u32 rev; musb->mregs += USB_MENTOR_CORE_OFFSET; /* Returns zero if e.g. not clocked */ rev = musb_readl(reg_base, USB_REVISION_REG); if (!rev) return -ENODEV; usb_nop_xceiv_register(); musb->xceiv = usb_get_transceiver(); if (!musb->xceiv) return -ENODEV; if (is_host_enabled(musb)) setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); /* Reset the musb */ if (data->reset) data->reset(); /* Reset the controller */ musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK); /* Start the on-chip PHY and its PLL. */ if (data->set_phy_power) data->set_phy_power(1); msleep(5); musb->isr = am35x_musb_interrupt; /* clear level interrupt */ if (data->clear_irq) data->clear_irq(); return 0; } static int am35x_musb_exit(struct musb *musb) { struct device *dev = musb->controller; struct musb_hdrc_platform_data *plat = dev->platform_data; struct omap_musb_board_data *data = plat->board_data; if (is_host_enabled(musb)) del_timer_sync(&otg_workaround); /* Shutdown the on-chip PHY and its PLL. */ if (data->set_phy_power) data->set_phy_power(0); usb_put_transceiver(musb->xceiv); usb_nop_xceiv_unregister(); return 0; } /* AM35x supports only 32bit read operation */ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) { void __iomem *fifo = hw_ep->fifo; u32 val; int i; /* Read for 32bit-aligned destination address */ if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) { readsl(fifo, dst, len >> 2); dst += len & ~0x03; len &= 0x03; } /* * Now read the remaining 1 to 3 byte or complete length if * unaligned address. */ if (len > 4) { for (i = 0; i < (len >> 2); i++) { *(u32 *) dst = musb_readl(fifo, 0); dst += 4; } len &= 0x03; } if (len > 0) { val = musb_readl(fifo, 0); memcpy(dst, &val, len); } } static const struct musb_platform_ops am35x_ops = { .init = am35x_musb_init, .exit = am35x_musb_exit, .enable = am35x_musb_enable, .disable = am35x_musb_disable, .set_mode = am35x_musb_set_mode, .try_idle = am35x_musb_try_idle, .set_vbus = am35x_musb_set_vbus, }; static u64 am35x_dmamask = DMA_BIT_MASK(32); static int __devinit am35x_probe(struct platform_device *pdev) { struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; struct platform_device *musb; struct am35x_glue *glue; struct clk *phy_clk; struct clk *clk; int ret = -ENOMEM; glue = kzalloc(sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&pdev->dev, "failed to allocate glue context\n"); goto err0; } musb = platform_device_alloc("musb-hdrc", -1); if (!musb) { dev_err(&pdev->dev, "failed to allocate musb device\n"); goto err1; } phy_clk = clk_get(&pdev->dev, "fck"); if (IS_ERR(phy_clk)) { dev_err(&pdev->dev, "failed to get PHY clock\n"); ret = PTR_ERR(phy_clk); goto err2; } clk = clk_get(&pdev->dev, "ick"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "failed to get clock\n"); ret = PTR_ERR(clk); goto err3; } ret = clk_enable(phy_clk); if (ret) { dev_err(&pdev->dev, "failed to enable PHY clock\n"); goto err4; } ret = clk_enable(clk); if (ret) { dev_err(&pdev->dev, "failed to enable clock\n"); goto err5; } musb->dev.parent = &pdev->dev; musb->dev.dma_mask = &am35x_dmamask; musb->dev.coherent_dma_mask = am35x_dmamask; glue->dev = &pdev->dev; glue->musb = musb; glue->phy_clk = phy_clk; glue->clk = clk; pdata->platform_ops = &am35x_ops; platform_set_drvdata(pdev, glue); ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources); if (ret) { dev_err(&pdev->dev, "failed to add resources\n"); goto err6; } ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); if (ret) { dev_err(&pdev->dev, "failed to add platform_data\n"); goto err6; } ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); goto err6; } return 0; err6: clk_disable(clk); err5: clk_disable(phy_clk); err4: clk_put(clk); err3: clk_put(phy_clk); err2: platform_device_put(musb); err1: kfree(glue); err0: return ret; } static int __devexit am35x_remove(struct platform_device *pdev) { struct am35x_glue *glue = platform_get_drvdata(pdev); platform_device_del(glue->musb); platform_device_put(glue->musb); clk_disable(glue->clk); clk_disable(glue->phy_clk); clk_put(glue->clk); clk_put(glue->phy_clk); kfree(glue); return 0; } #ifdef CONFIG_PM static int am35x_suspend(struct device *dev) { struct am35x_glue *glue = dev_get_drvdata(dev); struct musb_hdrc_platform_data *plat = dev->platform_data; struct omap_musb_board_data *data = plat->board_data; /* Shutdown the on-chip PHY and its PLL. */ if (data->set_phy_power) data->set_phy_power(0); clk_disable(glue->phy_clk); clk_disable(glue->clk); return 0; } static int am35x_resume(struct device *dev) { struct am35x_glue *glue = dev_get_drvdata(dev); struct musb_hdrc_platform_data *plat = dev->platform_data; struct omap_musb_board_data *data = plat->board_data; int ret; /* Start the on-chip PHY and its PLL. */ if (data->set_phy_power) data->set_phy_power(1); ret = clk_enable(glue->phy_clk); if (ret) { dev_err(dev, "failed to enable PHY clock\n"); return ret; } ret = clk_enable(glue->clk); if (ret) { dev_err(dev, "failed to enable clock\n"); return ret; } return 0; } static struct dev_pm_ops am35x_pm_ops = { .suspend = am35x_suspend, .resume = am35x_resume, }; #define DEV_PM_OPS &am35x_pm_ops #else #define DEV_PM_OPS NULL #endif static struct platform_driver am35x_driver = { .probe = am35x_probe, .remove = __devexit_p(am35x_remove), .driver = { .name = "musb-am35x", .pm = DEV_PM_OPS, }, }; MODULE_DESCRIPTION("AM35x MUSB Glue Layer"); MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>"); MODULE_LICENSE("GPL v2"); static int __init am35x_init(void) { return platform_driver_register(&am35x_driver); } module_init(am35x_init); static void __exit am35x_exit(void) { platform_driver_unregister(&am35x_driver); } module_exit(am35x_exit);
gpl-2.0
GreatDevs/kernel_sony_msm8974
net/wireless/debugfs.c
7395
3115
/* * cfg80211 debugfs * * Copyright 2009 Luis R. Rodriguez <lrodriguez@atheros.com> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include "core.h" #include "debugfs.h" #define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ static ssize_t name## _read(struct file *file, char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct wiphy *wiphy= file->private_data; \ char buf[buflen]; \ int res; \ \ res = scnprintf(buf, buflen, fmt "\n", ##value); \ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ } \ \ static const struct file_operations name## _ops = { \ .read = name## _read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", wiphy->rts_threshold) DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", wiphy->frag_threshold); DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d", wiphy->retry_short) DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d", wiphy->retry_long); static int ht_print_chan(struct ieee80211_channel *chan, char *buf, int buf_size, int offset) { if (WARN_ON(offset > buf_size)) return 0; if (chan->flags & IEEE80211_CHAN_DISABLED) return snprintf(buf + offset, buf_size - offset, "%d Disabled\n", chan->center_freq); return snprintf(buf + offset, buf_size - offset, "%d HT40 %c%c\n", chan->center_freq, (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-', (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+'); } static ssize_t ht40allow_map_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct wiphy *wiphy = file->private_data; char *buf; unsigned int offset = 0, buf_size = PAGE_SIZE, i, r; enum ieee80211_band band; struct ieee80211_supported_band *sband; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&cfg80211_mutex); for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) offset += ht_print_chan(&sband->channels[i], buf, buf_size, offset); } mutex_unlock(&cfg80211_mutex); r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); kfree(buf); return r; } static const struct file_operations ht40allow_map_ops = { .read = ht40allow_map_read, .open = simple_open, .llseek = default_llseek, }; #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, S_IRUGO, phyd, &rdev->wiphy, &name## _ops); void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) { struct dentry *phyd = rdev->wiphy.debugfsdir; DEBUGFS_ADD(rts_threshold); DEBUGFS_ADD(fragmentation_threshold); DEBUGFS_ADD(short_retry_limit); DEBUGFS_ADD(long_retry_limit); DEBUGFS_ADD(ht40allow_map); }
gpl-2.0
onealtom/MYD-C335X-Linux-Kernel
arch/mips/lasat/lasat_board.c
14051
7154
/* * Thomas Horsten <thh@lasat.com> * Copyright (C) 2000 LASAT Networks A/S. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Routines specific to the LASAT boards */ #include <linux/types.h> #include <linux/crc32.h> #include <asm/lasat/lasat.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/mutex.h> #include <asm/addrspace.h> #include "at93c.h" /* New model description table */ #include "lasat_models.h" static DEFINE_MUTEX(lasat_eeprom_mutex); #define EEPROM_CRC(data, len) (~crc32(~0, data, len)) struct lasat_info lasat_board_info; int EEPROMRead(unsigned int pos, unsigned char *data, int len) { int i; for (i = 0; i < len; i++) *data++ = at93c_read(pos++); return 0; } int EEPROMWrite(unsigned int pos, unsigned char *data, int len) { int i; for (i = 0; i < len; i++) at93c_write(pos++, *data++); return 0; } static void init_flash_sizes(void) { unsigned long *lb = lasat_board_info.li_flashpart_base; unsigned long *ls = lasat_board_info.li_flashpart_size; int i; ls[LASAT_MTD_BOOTLOADER] = 0x40000; ls[LASAT_MTD_SERVICE] = 0xC0000; ls[LASAT_MTD_NORMAL] = 0x100000; if (!IS_LASAT_200()) { lasat_board_info.li_flash_base = 0x1e000000; lb[LASAT_MTD_BOOTLOADER] = 0x1e400000; if (lasat_board_info.li_flash_size > 0x200000) { ls[LASAT_MTD_CONFIG] = 0x100000; ls[LASAT_MTD_FS] = 0x500000; } } else { lasat_board_info.li_flash_base = 0x10000000; if (lasat_board_info.li_flash_size < 0x1000000) { lb[LASAT_MTD_BOOTLOADER] = 0x10000000; ls[LASAT_MTD_CONFIG] = 0x100000; if (lasat_board_info.li_flash_size >= 0x400000) ls[LASAT_MTD_FS] = lasat_board_info.li_flash_size - 0x300000; } } for (i = 1; i < LASAT_MTD_LAST; i++) lb[i] = lb[i-1] + ls[i-1]; } int lasat_init_board_info(void) { int c; unsigned long crc; unsigned long cfg0, cfg1; const struct product_info *ppi; int i_n_base_models = N_BASE_MODELS; const char * const * i_txt_base_models = txt_base_models; int i_n_prids = N_PRIDS; memset(&lasat_board_info, 0, sizeof(lasat_board_info)); /* First read the EEPROM info */ EEPROMRead(0, (unsigned char *)&lasat_board_info.li_eeprom_info, sizeof(struct lasat_eeprom_struct)); /* Check the CRC */ crc = EEPROM_CRC((unsigned char *)(&lasat_board_info.li_eeprom_info), sizeof(struct lasat_eeprom_struct) - 4); if (crc != lasat_board_info.li_eeprom_info.crc32) { printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM CRC does " "not match calculated, attempting to soldier on...\n"); } if (lasat_board_info.li_eeprom_info.version != LASAT_EEPROM_VERSION) { printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM version " "%d, wanted version %d, attempting to soldier on...\n", (unsigned int)lasat_board_info.li_eeprom_info.version, LASAT_EEPROM_VERSION); } cfg0 = lasat_board_info.li_eeprom_info.cfg[0]; cfg1 = lasat_board_info.li_eeprom_info.cfg[1]; if (LASAT_W0_DSCTYPE(cfg0) != 1) { printk(KERN_WARNING "WARNING...\nWARNING...\n" "Invalid configuration read from EEPROM, attempting to " "soldier on..."); } /* We have a valid configuration */ switch (LASAT_W0_SDRAMBANKSZ(cfg0)) { case 0: lasat_board_info.li_memsize = 0x0800000; break; case 1: lasat_board_info.li_memsize = 0x1000000; break; case 2: lasat_board_info.li_memsize = 0x2000000; break; case 3: lasat_board_info.li_memsize = 0x4000000; break; case 4: lasat_board_info.li_memsize = 0x8000000; break; default: lasat_board_info.li_memsize = 0; } switch (LASAT_W0_SDRAMBANKS(cfg0)) { case 0: break; case 1: lasat_board_info.li_memsize *= 2; break; default: break; } switch (LASAT_W0_BUSSPEED(cfg0)) { case 0x0: lasat_board_info.li_bus_hz = 60000000; break; case 0x1: lasat_board_info.li_bus_hz = 66000000; break; case 0x2: lasat_board_info.li_bus_hz = 66666667; break; case 0x3: lasat_board_info.li_bus_hz = 80000000; break; case 0x4: lasat_board_info.li_bus_hz = 83333333; break; case 0x5: lasat_board_info.li_bus_hz = 100000000; break; } switch (LASAT_W0_CPUCLK(cfg0)) { case 0x0: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz; break; case 0x1: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + (lasat_board_info.li_bus_hz >> 1); break; case 0x2: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz; break; case 0x3: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz + (lasat_board_info.li_bus_hz >> 1); break; case 0x4: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz; break; } /* Flash size */ switch (LASAT_W1_FLASHSIZE(cfg1)) { case 0: lasat_board_info.li_flash_size = 0x200000; break; case 1: lasat_board_info.li_flash_size = 0x400000; break; case 2: lasat_board_info.li_flash_size = 0x800000; break; case 3: lasat_board_info.li_flash_size = 0x1000000; break; case 4: lasat_board_info.li_flash_size = 0x2000000; break; } init_flash_sizes(); lasat_board_info.li_bmid = LASAT_W0_BMID(cfg0); lasat_board_info.li_prid = lasat_board_info.li_eeprom_info.prid; if (lasat_board_info.li_prid == 0xffff || lasat_board_info.li_prid == 0) lasat_board_info.li_prid = lasat_board_info.li_bmid; /* Base model stuff */ if (lasat_board_info.li_bmid > i_n_base_models) lasat_board_info.li_bmid = i_n_base_models; strcpy(lasat_board_info.li_bmstr, i_txt_base_models[lasat_board_info.li_bmid]); /* Product ID dependent values */ c = lasat_board_info.li_prid; if (c >= i_n_prids) { strcpy(lasat_board_info.li_namestr, "Unknown Model"); strcpy(lasat_board_info.li_typestr, "Unknown Type"); } else { ppi = &vendor_info_table[0].vi_product_info[c]; strcpy(lasat_board_info.li_namestr, ppi->pi_name); if (ppi->pi_type) strcpy(lasat_board_info.li_typestr, ppi->pi_type); else sprintf(lasat_board_info.li_typestr, "%d", 10 * c); } return 0; } void lasat_write_eeprom_info(void) { unsigned long crc; mutex_lock(&lasat_eeprom_mutex); /* Generate the CRC */ crc = EEPROM_CRC((unsigned char *)(&lasat_board_info.li_eeprom_info), sizeof(struct lasat_eeprom_struct) - 4); lasat_board_info.li_eeprom_info.crc32 = crc; /* Write the EEPROM info */ EEPROMWrite(0, (unsigned char *)&lasat_board_info.li_eeprom_info, sizeof(struct lasat_eeprom_struct)); mutex_unlock(&lasat_eeprom_mutex); }
gpl-2.0
HarveyHunt/CI20_linux
arch/mips/lasat/lasat_board.c
14051
7154
/* * Thomas Horsten <thh@lasat.com> * Copyright (C) 2000 LASAT Networks A/S. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Routines specific to the LASAT boards */ #include <linux/types.h> #include <linux/crc32.h> #include <asm/lasat/lasat.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/mutex.h> #include <asm/addrspace.h> #include "at93c.h" /* New model description table */ #include "lasat_models.h" static DEFINE_MUTEX(lasat_eeprom_mutex); #define EEPROM_CRC(data, len) (~crc32(~0, data, len)) struct lasat_info lasat_board_info; int EEPROMRead(unsigned int pos, unsigned char *data, int len) { int i; for (i = 0; i < len; i++) *data++ = at93c_read(pos++); return 0; } int EEPROMWrite(unsigned int pos, unsigned char *data, int len) { int i; for (i = 0; i < len; i++) at93c_write(pos++, *data++); return 0; } static void init_flash_sizes(void) { unsigned long *lb = lasat_board_info.li_flashpart_base; unsigned long *ls = lasat_board_info.li_flashpart_size; int i; ls[LASAT_MTD_BOOTLOADER] = 0x40000; ls[LASAT_MTD_SERVICE] = 0xC0000; ls[LASAT_MTD_NORMAL] = 0x100000; if (!IS_LASAT_200()) { lasat_board_info.li_flash_base = 0x1e000000; lb[LASAT_MTD_BOOTLOADER] = 0x1e400000; if (lasat_board_info.li_flash_size > 0x200000) { ls[LASAT_MTD_CONFIG] = 0x100000; ls[LASAT_MTD_FS] = 0x500000; } } else { lasat_board_info.li_flash_base = 0x10000000; if (lasat_board_info.li_flash_size < 0x1000000) { lb[LASAT_MTD_BOOTLOADER] = 0x10000000; ls[LASAT_MTD_CONFIG] = 0x100000; if (lasat_board_info.li_flash_size >= 0x400000) ls[LASAT_MTD_FS] = lasat_board_info.li_flash_size - 0x300000; } } for (i = 1; i < LASAT_MTD_LAST; i++) lb[i] = lb[i-1] + ls[i-1]; } int lasat_init_board_info(void) { int c; unsigned long crc; unsigned long cfg0, cfg1; const struct product_info *ppi; int i_n_base_models = N_BASE_MODELS; const char * const * i_txt_base_models = txt_base_models; int i_n_prids = N_PRIDS; memset(&lasat_board_info, 0, sizeof(lasat_board_info)); /* First read the EEPROM info */ EEPROMRead(0, (unsigned char *)&lasat_board_info.li_eeprom_info, sizeof(struct lasat_eeprom_struct)); /* Check the CRC */ crc = EEPROM_CRC((unsigned char *)(&lasat_board_info.li_eeprom_info), sizeof(struct lasat_eeprom_struct) - 4); if (crc != lasat_board_info.li_eeprom_info.crc32) { printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM CRC does " "not match calculated, attempting to soldier on...\n"); } if (lasat_board_info.li_eeprom_info.version != LASAT_EEPROM_VERSION) { printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM version " "%d, wanted version %d, attempting to soldier on...\n", (unsigned int)lasat_board_info.li_eeprom_info.version, LASAT_EEPROM_VERSION); } cfg0 = lasat_board_info.li_eeprom_info.cfg[0]; cfg1 = lasat_board_info.li_eeprom_info.cfg[1]; if (LASAT_W0_DSCTYPE(cfg0) != 1) { printk(KERN_WARNING "WARNING...\nWARNING...\n" "Invalid configuration read from EEPROM, attempting to " "soldier on..."); } /* We have a valid configuration */ switch (LASAT_W0_SDRAMBANKSZ(cfg0)) { case 0: lasat_board_info.li_memsize = 0x0800000; break; case 1: lasat_board_info.li_memsize = 0x1000000; break; case 2: lasat_board_info.li_memsize = 0x2000000; break; case 3: lasat_board_info.li_memsize = 0x4000000; break; case 4: lasat_board_info.li_memsize = 0x8000000; break; default: lasat_board_info.li_memsize = 0; } switch (LASAT_W0_SDRAMBANKS(cfg0)) { case 0: break; case 1: lasat_board_info.li_memsize *= 2; break; default: break; } switch (LASAT_W0_BUSSPEED(cfg0)) { case 0x0: lasat_board_info.li_bus_hz = 60000000; break; case 0x1: lasat_board_info.li_bus_hz = 66000000; break; case 0x2: lasat_board_info.li_bus_hz = 66666667; break; case 0x3: lasat_board_info.li_bus_hz = 80000000; break; case 0x4: lasat_board_info.li_bus_hz = 83333333; break; case 0x5: lasat_board_info.li_bus_hz = 100000000; break; } switch (LASAT_W0_CPUCLK(cfg0)) { case 0x0: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz; break; case 0x1: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + (lasat_board_info.li_bus_hz >> 1); break; case 0x2: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz; break; case 0x3: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz + (lasat_board_info.li_bus_hz >> 1); break; case 0x4: lasat_board_info.li_cpu_hz = lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz + lasat_board_info.li_bus_hz; break; } /* Flash size */ switch (LASAT_W1_FLASHSIZE(cfg1)) { case 0: lasat_board_info.li_flash_size = 0x200000; break; case 1: lasat_board_info.li_flash_size = 0x400000; break; case 2: lasat_board_info.li_flash_size = 0x800000; break; case 3: lasat_board_info.li_flash_size = 0x1000000; break; case 4: lasat_board_info.li_flash_size = 0x2000000; break; } init_flash_sizes(); lasat_board_info.li_bmid = LASAT_W0_BMID(cfg0); lasat_board_info.li_prid = lasat_board_info.li_eeprom_info.prid; if (lasat_board_info.li_prid == 0xffff || lasat_board_info.li_prid == 0) lasat_board_info.li_prid = lasat_board_info.li_bmid; /* Base model stuff */ if (lasat_board_info.li_bmid > i_n_base_models) lasat_board_info.li_bmid = i_n_base_models; strcpy(lasat_board_info.li_bmstr, i_txt_base_models[lasat_board_info.li_bmid]); /* Product ID dependent values */ c = lasat_board_info.li_prid; if (c >= i_n_prids) { strcpy(lasat_board_info.li_namestr, "Unknown Model"); strcpy(lasat_board_info.li_typestr, "Unknown Type"); } else { ppi = &vendor_info_table[0].vi_product_info[c]; strcpy(lasat_board_info.li_namestr, ppi->pi_name); if (ppi->pi_type) strcpy(lasat_board_info.li_typestr, ppi->pi_type); else sprintf(lasat_board_info.li_typestr, "%d", 10 * c); } return 0; } void lasat_write_eeprom_info(void) { unsigned long crc; mutex_lock(&lasat_eeprom_mutex); /* Generate the CRC */ crc = EEPROM_CRC((unsigned char *)(&lasat_board_info.li_eeprom_info), sizeof(struct lasat_eeprom_struct) - 4); lasat_board_info.li_eeprom_info.crc32 = crc; /* Write the EEPROM info */ EEPROMWrite(0, (unsigned char *)&lasat_board_info.li_eeprom_info, sizeof(struct lasat_eeprom_struct)); mutex_unlock(&lasat_eeprom_mutex); }
gpl-2.0
manashmndl/CHIP-linux
drivers/staging/gdm72xx/gdm_wimax.c
228
18592
/* * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/etherdevice.h> #include <asm/byteorder.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/udp.h> #include <linux/in.h> #include "gdm_wimax.h" #include "hci.h" #include "wm_ioctl.h" #include "netlink_k.h" #define gdm_wimax_send(n, d, l) \ (n->phy_dev->send_func)(n->phy_dev->priv_dev, d, l, NULL, NULL) #define gdm_wimax_send_with_cb(n, d, l, c, b) \ (n->phy_dev->send_func)(n->phy_dev->priv_dev, d, l, c, b) #define gdm_wimax_rcv_with_cb(n, c, b) \ (n->phy_dev->rcv_func)(n->phy_dev->priv_dev, c, b) #define EVT_MAX_SIZE 2048 struct evt_entry { struct list_head list; struct net_device *dev; char evt_data[EVT_MAX_SIZE]; int size; }; static struct { int ref_cnt; struct sock *sock; struct list_head evtq; spinlock_t evt_lock; struct list_head freeq; struct work_struct ws; } wm_event; static u8 gdm_wimax_macaddr[6] = {0x00, 0x0a, 0x3b, 0xf0, 0x01, 0x30}; static inline int gdm_wimax_header(struct sk_buff **pskb) { u16 buf[HCI_HEADER_SIZE / sizeof(u16)]; struct hci_s *hci = (struct hci_s *)buf; struct sk_buff *skb = *pskb; if (unlikely(skb_headroom(skb) < HCI_HEADER_SIZE)) { struct sk_buff *skb2; skb2 = skb_realloc_headroom(skb, HCI_HEADER_SIZE); if (skb2 == NULL) return -ENOMEM; if (skb->sk) skb_set_owner_w(skb2, skb->sk); kfree_skb(skb); skb = skb2; } skb_push(skb, HCI_HEADER_SIZE); hci->cmd_evt = cpu_to_be16(WIMAX_TX_SDU); hci->length = cpu_to_be16(skb->len - HCI_HEADER_SIZE); memcpy(skb->data, buf, HCI_HEADER_SIZE); *pskb = skb; return 0; } static inline struct evt_entry *alloc_event_entry(void) { return kmalloc(sizeof(struct evt_entry), GFP_ATOMIC); } static inline void free_event_entry(struct evt_entry *e) { kfree(e); } static struct evt_entry *get_event_entry(void) { struct evt_entry *e; if (list_empty(&wm_event.freeq)) { e = alloc_event_entry(); } else { e = list_entry(wm_event.freeq.next, struct evt_entry, list); list_del(&e->list); } return e; } static void put_event_entry(struct evt_entry *e) { BUG_ON(!e); list_add_tail(&e->list, &wm_event.freeq); } static void gdm_wimax_event_rcv(struct net_device *dev, u16 type, void *msg, int len) { struct nic *nic = netdev_priv(dev); u8 *buf = msg; u16 hci_cmd = (buf[0]<<8) | buf[1]; u16 hci_len = (buf[2]<<8) | buf[3]; netdev_dbg(dev, "H=>D: 0x%04x(%d)\n", hci_cmd, hci_len); gdm_wimax_send(nic, msg, len); } static void __gdm_wimax_event_send(struct work_struct *work) { int idx; unsigned long flags; struct evt_entry *e; struct evt_entry *tmp; spin_lock_irqsave(&wm_event.evt_lock, flags); list_for_each_entry_safe(e, tmp, &wm_event.evtq, list) { spin_unlock_irqrestore(&wm_event.evt_lock, flags); if (sscanf(e->dev->name, "wm%d", &idx) == 1) netlink_send(wm_event.sock, idx, 0, e->evt_data, e->size); spin_lock_irqsave(&wm_event.evt_lock, flags); list_del(&e->list); put_event_entry(e); } spin_unlock_irqrestore(&wm_event.evt_lock, flags); } static int gdm_wimax_event_init(void) { if (!wm_event.ref_cnt) { wm_event.sock = netlink_init(NETLINK_WIMAX, gdm_wimax_event_rcv); if (wm_event.sock) { INIT_LIST_HEAD(&wm_event.evtq); INIT_LIST_HEAD(&wm_event.freeq); INIT_WORK(&wm_event.ws, __gdm_wimax_event_send); spin_lock_init(&wm_event.evt_lock); } } if (wm_event.sock) { wm_event.ref_cnt++; return 0; } pr_err("Creating WiMax Event netlink is failed\n"); return -1; } static void gdm_wimax_event_exit(void) { if (wm_event.sock && --wm_event.ref_cnt == 0) { struct evt_entry *e, *temp; unsigned long flags; spin_lock_irqsave(&wm_event.evt_lock, flags); list_for_each_entry_safe(e, temp, &wm_event.evtq, list) { list_del(&e->list); free_event_entry(e); } list_for_each_entry_safe(e, temp, &wm_event.freeq, list) { list_del(&e->list); free_event_entry(e); } spin_unlock_irqrestore(&wm_event.evt_lock, flags); netlink_exit(wm_event.sock); wm_event.sock = NULL; } } static int gdm_wimax_event_send(struct net_device *dev, char *buf, int size) { struct evt_entry *e; unsigned long flags; u16 hci_cmd = ((u8)buf[0]<<8) | (u8)buf[1]; u16 hci_len = ((u8)buf[2]<<8) | (u8)buf[3]; netdev_dbg(dev, "D=>H: 0x%04x(%d)\n", hci_cmd, hci_len); spin_lock_irqsave(&wm_event.evt_lock, flags); e = get_event_entry(); if (!e) { netdev_err(dev, "%s: No memory for event\n", __func__); spin_unlock_irqrestore(&wm_event.evt_lock, flags); return -ENOMEM; } e->dev = dev; e->size = size; memcpy(e->evt_data, buf, size); list_add_tail(&e->list, &wm_event.evtq); spin_unlock_irqrestore(&wm_event.evt_lock, flags); schedule_work(&wm_event.ws); return 0; } static void tx_complete(void *arg) { struct nic *nic = arg; if (netif_queue_stopped(nic->netdev)) netif_wake_queue(nic->netdev); } int gdm_wimax_send_tx(struct sk_buff *skb, struct net_device *dev) { int ret = 0; struct nic *nic = netdev_priv(dev); ret = gdm_wimax_send_with_cb(nic, skb->data, skb->len, tx_complete, nic); if (ret == -ENOSPC) { netif_stop_queue(dev); ret = 0; } if (ret) { skb_pull(skb, HCI_HEADER_SIZE); return ret; } dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len - HCI_HEADER_SIZE; kfree_skb(skb); return ret; } static int gdm_wimax_tx(struct sk_buff *skb, struct net_device *dev) { int ret = 0; ret = gdm_wimax_header(&skb); if (ret < 0) { skb_pull(skb, HCI_HEADER_SIZE); return ret; } #if defined(CONFIG_WIMAX_GDM72XX_QOS) ret = gdm_qos_send_hci_pkt(skb, dev); #else ret = gdm_wimax_send_tx(skb, dev); #endif return ret; } static int gdm_wimax_set_config(struct net_device *dev, struct ifmap *map) { if (dev->flags & IFF_UP) return -EBUSY; return 0; } static void __gdm_wimax_set_mac_addr(struct net_device *dev, char *mac_addr) { u16 hci_pkt_buf[32 / sizeof(u16)]; struct hci_s *hci = (struct hci_s *)hci_pkt_buf; struct nic *nic = netdev_priv(dev); /* Since dev is registered as a ethernet device, * ether_setup has made dev->addr_len to be ETH_ALEN */ memcpy(dev->dev_addr, mac_addr, dev->addr_len); /* Let lower layer know of this change by sending * SetInformation(MAC Address) */ hci->cmd_evt = cpu_to_be16(WIMAX_SET_INFO); hci->length = cpu_to_be16(8); hci->data[0] = 0; /* T */ hci->data[1] = 6; /* L */ memcpy(&hci->data[2], mac_addr, dev->addr_len); /* V */ gdm_wimax_send(nic, hci, HCI_HEADER_SIZE + 8); } /* A driver function */ static int gdm_wimax_set_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; __gdm_wimax_set_mac_addr(dev, addr->sa_data); return 0; } static void gdm_wimax_ind_if_updown(struct net_device *dev, int if_up) { u16 buf[32 / sizeof(u16)]; struct hci_s *hci = (struct hci_s *)buf; unsigned char up_down; up_down = if_up ? WIMAX_IF_UP : WIMAX_IF_DOWN; /* Indicate updating fsm */ hci->cmd_evt = cpu_to_be16(WIMAX_IF_UPDOWN); hci->length = cpu_to_be16(sizeof(up_down)); hci->data[0] = up_down; gdm_wimax_event_send(dev, (char *)hci, HCI_HEADER_SIZE+sizeof(up_down)); } static int gdm_wimax_open(struct net_device *dev) { struct nic *nic = netdev_priv(dev); struct fsm_s *fsm = (struct fsm_s *)nic->sdk_data[SIOC_DATA_FSM].buf; netif_start_queue(dev); if (fsm && fsm->m_status != M_INIT) gdm_wimax_ind_if_updown(dev, 1); return 0; } static int gdm_wimax_close(struct net_device *dev) { struct nic *nic = netdev_priv(dev); struct fsm_s *fsm = (struct fsm_s *)nic->sdk_data[SIOC_DATA_FSM].buf; netif_stop_queue(dev); if (fsm && fsm->m_status != M_INIT) gdm_wimax_ind_if_updown(dev, 0); return 0; } static void kdelete(void **buf) { if (buf && *buf) { kfree(*buf); *buf = NULL; } } static int gdm_wimax_ioctl_get_data(struct data_s *dst, struct data_s *src) { int size; size = dst->size < src->size ? dst->size : src->size; dst->size = size; if (src->size) { if (!dst->buf) return -EINVAL; if (copy_to_user((void __user *)dst->buf, src->buf, size)) return -EFAULT; } return 0; } static int gdm_wimax_ioctl_set_data(struct data_s *dst, struct data_s *src) { if (!src->size) { dst->size = 0; return 0; } if (!src->buf) return -EINVAL; if (!(dst->buf && dst->size == src->size)) { kdelete(&dst->buf); dst->buf = kmalloc(src->size, GFP_KERNEL); if (dst->buf == NULL) return -ENOMEM; } if (copy_from_user(dst->buf, (void __user *)src->buf, src->size)) { kdelete(&dst->buf); return -EFAULT; } dst->size = src->size; return 0; } static void gdm_wimax_cleanup_ioctl(struct net_device *dev) { struct nic *nic = netdev_priv(dev); int i; for (i = 0; i < SIOC_DATA_MAX; i++) kdelete(&nic->sdk_data[i].buf); } static void gdm_wimax_ind_fsm_update(struct net_device *dev, struct fsm_s *fsm) { u16 buf[32 / sizeof(u16)]; struct hci_s *hci = (struct hci_s *)buf; /* Indicate updating fsm */ hci->cmd_evt = cpu_to_be16(WIMAX_FSM_UPDATE); hci->length = cpu_to_be16(sizeof(struct fsm_s)); memcpy(&hci->data[0], fsm, sizeof(struct fsm_s)); gdm_wimax_event_send(dev, (char *)hci, HCI_HEADER_SIZE + sizeof(struct fsm_s)); } static void gdm_update_fsm(struct net_device *dev, struct fsm_s *new_fsm) { struct nic *nic = netdev_priv(dev); struct fsm_s *cur_fsm = (struct fsm_s *) nic->sdk_data[SIOC_DATA_FSM].buf; if (!cur_fsm) return; if (cur_fsm->m_status != new_fsm->m_status || cur_fsm->c_status != new_fsm->c_status) { if (new_fsm->m_status == M_CONNECTED) { netif_carrier_on(dev); } else if (cur_fsm->m_status == M_CONNECTED) { netif_carrier_off(dev); #if defined(CONFIG_WIMAX_GDM72XX_QOS) gdm_qos_release_list(nic); #endif } gdm_wimax_ind_fsm_update(dev, new_fsm); } } static int gdm_wimax_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct wm_req_s *req = (struct wm_req_s *)ifr; struct nic *nic = netdev_priv(dev); int ret; if (cmd != SIOCWMIOCTL) return -EOPNOTSUPP; switch (req->cmd) { case SIOCG_DATA: case SIOCS_DATA: if (req->data_id >= SIOC_DATA_MAX) { netdev_err(dev, "%s error: data-index(%d) is invalid!!\n", __func__, req->data_id); return -EOPNOTSUPP; } if (req->cmd == SIOCG_DATA) { ret = gdm_wimax_ioctl_get_data( &req->data, &nic->sdk_data[req->data_id]); if (ret < 0) return ret; } else if (req->cmd == SIOCS_DATA) { if (req->data_id == SIOC_DATA_FSM) { /* NOTE: gdm_update_fsm should be called * before gdm_wimax_ioctl_set_data is called. */ gdm_update_fsm(dev, (struct fsm_s *)req->data.buf); } ret = gdm_wimax_ioctl_set_data( &nic->sdk_data[req->data_id], &req->data); if (ret < 0) return ret; } break; default: netdev_err(dev, "%s: %x unknown ioctl\n", __func__, cmd); return -EOPNOTSUPP; } return 0; } static void gdm_wimax_prepare_device(struct net_device *dev) { struct nic *nic = netdev_priv(dev); u16 buf[32 / sizeof(u16)]; struct hci_s *hci = (struct hci_s *)buf; u16 len = 0; u32 val = 0; __be32 val_be32; /* GetInformation mac address */ len = 0; hci->cmd_evt = cpu_to_be16(WIMAX_GET_INFO); hci->data[len++] = TLV_T(T_MAC_ADDRESS); hci->length = cpu_to_be16(len); gdm_wimax_send(nic, hci, HCI_HEADER_SIZE+len); val = T_CAPABILITY_WIMAX | T_CAPABILITY_MULTI_CS; #if defined(CONFIG_WIMAX_GDM72XX_QOS) val |= T_CAPABILITY_QOS; #endif #if defined(CONFIG_WIMAX_GDM72XX_WIMAX2) val |= T_CAPABILITY_AGGREGATION; #endif /* Set capability */ len = 0; hci->cmd_evt = cpu_to_be16(WIMAX_SET_INFO); hci->data[len++] = TLV_T(T_CAPABILITY); hci->data[len++] = TLV_L(T_CAPABILITY); val_be32 = cpu_to_be32(val); memcpy(&hci->data[len], &val_be32, TLV_L(T_CAPABILITY)); len += TLV_L(T_CAPABILITY); hci->length = cpu_to_be16(len); gdm_wimax_send(nic, hci, HCI_HEADER_SIZE+len); netdev_info(dev, "GDM WiMax Set CAPABILITY: 0x%08X\n", val); } static int gdm_wimax_hci_get_tlv(u8 *buf, u8 *T, u16 *L, u8 **V) { #define __U82U16(b) ((u16)((u8 *)(b))[0] | ((u16)((u8 *)(b))[1] << 8)) int next_pos; *T = buf[0]; if (buf[1] == 0x82) { *L = be16_to_cpu(__U82U16(&buf[2])); next_pos = 1/*type*/+3/*len*/; } else { *L = buf[1]; next_pos = 1/*type*/+1/*len*/; } *V = &buf[next_pos]; next_pos += *L/*length of val*/; return next_pos; } static int gdm_wimax_get_prepared_info(struct net_device *dev, char *buf, int len) { u8 T, *V; u16 L; u16 cmd_evt, cmd_len; int pos = HCI_HEADER_SIZE; cmd_evt = be16_to_cpup((const __be16 *)&buf[0]); cmd_len = be16_to_cpup((const __be16 *)&buf[2]); if (len < cmd_len + HCI_HEADER_SIZE) { netdev_err(dev, "%s: invalid length [%d/%d]\n", __func__, cmd_len + HCI_HEADER_SIZE, len); return -1; } if (cmd_evt == WIMAX_GET_INFO_RESULT) { if (cmd_len < 2) { netdev_err(dev, "%s: len is too short [%x/%d]\n", __func__, cmd_evt, len); return -1; } pos += gdm_wimax_hci_get_tlv(&buf[pos], &T, &L, &V); if (T == TLV_T(T_MAC_ADDRESS)) { if (L != dev->addr_len) { netdev_err(dev, "%s Invalid inofrmation result T/L [%x/%d]\n", __func__, T, L); return -1; } netdev_info(dev, "MAC change [%pM]->[%pM]\n", dev->dev_addr, V); memcpy(dev->dev_addr, V, dev->addr_len); return 1; } } gdm_wimax_event_send(dev, buf, len); return 0; } static void gdm_wimax_netif_rx(struct net_device *dev, char *buf, int len) { struct sk_buff *skb; int ret; skb = dev_alloc_skb(len + 2); if (!skb) return; skb_reserve(skb, 2); dev->stats.rx_packets++; dev->stats.rx_bytes += len; memcpy(skb_put(skb, len), buf, len); skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); /* what will happen? */ ret = in_interrupt() ? netif_rx(skb) : netif_rx_ni(skb); if (ret == NET_RX_DROP) netdev_err(dev, "%s skb dropped\n", __func__); } static void gdm_wimax_transmit_aggr_pkt(struct net_device *dev, char *buf, int len) { #define HCI_PADDING_BYTE 4 #define HCI_RESERVED_BYTE 4 struct hci_s *hci; int length; while (len > 0) { hci = (struct hci_s *)buf; if (hci->cmd_evt != cpu_to_be16(WIMAX_RX_SDU)) { netdev_err(dev, "Wrong cmd_evt(0x%04X)\n", be16_to_cpu(hci->cmd_evt)); break; } length = be16_to_cpu(hci->length); gdm_wimax_netif_rx(dev, hci->data, length); if (length & 0x3) { /* Add padding size */ length += HCI_PADDING_BYTE - (length & 0x3); } length += HCI_HEADER_SIZE + HCI_RESERVED_BYTE; len -= length; buf += length; } } static void gdm_wimax_transmit_pkt(struct net_device *dev, char *buf, int len) { #if defined(CONFIG_WIMAX_GDM72XX_QOS) struct nic *nic = netdev_priv(dev); #endif u16 cmd_evt, cmd_len; /* This code is added for certain rx packet to be ignored. */ if (len == 0) return; cmd_evt = be16_to_cpup((const __be16 *)&buf[0]); cmd_len = be16_to_cpup((const __be16 *)&buf[2]); if (len < cmd_len + HCI_HEADER_SIZE) { if (len) netdev_err(dev, "%s: invalid length [%d/%d]\n", __func__, cmd_len + HCI_HEADER_SIZE, len); return; } switch (cmd_evt) { case WIMAX_RX_SDU_AGGR: gdm_wimax_transmit_aggr_pkt(dev, &buf[HCI_HEADER_SIZE], cmd_len); break; case WIMAX_RX_SDU: gdm_wimax_netif_rx(dev, &buf[HCI_HEADER_SIZE], cmd_len); break; #if defined(CONFIG_WIMAX_GDM72XX_QOS) case WIMAX_EVT_MODEM_REPORT: gdm_recv_qos_hci_packet(nic, buf, len); break; #endif case WIMAX_SDU_TX_FLOW: if (buf[4] == 0) { if (!netif_queue_stopped(dev)) netif_stop_queue(dev); } else if (buf[4] == 1) { if (netif_queue_stopped(dev)) netif_wake_queue(dev); } break; default: gdm_wimax_event_send(dev, buf, len); break; } } static void rx_complete(void *arg, void *data, int len) { struct nic *nic = arg; gdm_wimax_transmit_pkt(nic->netdev, data, len); gdm_wimax_rcv_with_cb(nic, rx_complete, nic); } static void prepare_rx_complete(void *arg, void *data, int len) { struct nic *nic = arg; int ret; ret = gdm_wimax_get_prepared_info(nic->netdev, data, len); if (ret == 1) { gdm_wimax_rcv_with_cb(nic, rx_complete, nic); } else { if (ret < 0) netdev_err(nic->netdev, "get_prepared_info failed(%d)\n", ret); gdm_wimax_rcv_with_cb(nic, prepare_rx_complete, nic); } } static void start_rx_proc(struct nic *nic) { gdm_wimax_rcv_with_cb(nic, prepare_rx_complete, nic); } static struct net_device_ops gdm_netdev_ops = { .ndo_open = gdm_wimax_open, .ndo_stop = gdm_wimax_close, .ndo_set_config = gdm_wimax_set_config, .ndo_start_xmit = gdm_wimax_tx, .ndo_set_mac_address = gdm_wimax_set_mac_addr, .ndo_do_ioctl = gdm_wimax_ioctl, }; int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev) { struct nic *nic = NULL; struct net_device *dev; int ret; dev = alloc_netdev(sizeof(*nic), "wm%d", NET_NAME_UNKNOWN, ether_setup); if (!dev) { pr_err("alloc_etherdev failed\n"); return -ENOMEM; } SET_NETDEV_DEV(dev, pdev); dev->mtu = 1400; dev->netdev_ops = &gdm_netdev_ops; dev->flags &= ~IFF_MULTICAST; memcpy(dev->dev_addr, gdm_wimax_macaddr, sizeof(gdm_wimax_macaddr)); nic = netdev_priv(dev); nic->netdev = dev; nic->phy_dev = phy_dev; phy_dev->netdev = dev; /* event socket init */ ret = gdm_wimax_event_init(); if (ret < 0) { pr_err("Cannot create event.\n"); goto cleanup; } ret = register_netdev(dev); if (ret) goto cleanup; netif_carrier_off(dev); #ifdef CONFIG_WIMAX_GDM72XX_QOS gdm_qos_init(nic); #endif start_rx_proc(nic); /* Prepare WiMax device */ gdm_wimax_prepare_device(dev); return 0; cleanup: pr_err("register_netdev failed\n"); free_netdev(dev); return ret; } void unregister_wimax_device(struct phy_dev *phy_dev) { struct nic *nic = netdev_priv(phy_dev->netdev); struct fsm_s *fsm = (struct fsm_s *)nic->sdk_data[SIOC_DATA_FSM].buf; if (fsm) fsm->m_status = M_INIT; unregister_netdev(nic->netdev); gdm_wimax_event_exit(); #if defined(CONFIG_WIMAX_GDM72XX_QOS) gdm_qos_release_list(nic); #endif gdm_wimax_cleanup_ioctl(phy_dev->netdev); free_netdev(nic->netdev); }
gpl-2.0
sonicxml/tuna-feather-kernel
arch/mips/lantiq/clk.c
228
3098
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com> * Copyright (C) 2010 John Crispin <blogic@openwrt.org> */ #include <linux/io.h> #include <linux/export.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/err.h> #include <linux/list.h> #include <asm/time.h> #include <asm/irq.h> #include <asm/div64.h> #include <lantiq_soc.h> #include "clk.h" #include "prom.h" /* lantiq socs have 3 static clocks */ static struct clk cpu_clk_generic[3]; void clkdev_add_static(unsigned long cpu, unsigned long fpi, unsigned long io) { cpu_clk_generic[0].rate = cpu; cpu_clk_generic[1].rate = fpi; cpu_clk_generic[2].rate = io; } struct clk *clk_get_cpu(void) { return &cpu_clk_generic[0]; } struct clk *clk_get_fpi(void) { return &cpu_clk_generic[1]; } EXPORT_SYMBOL_GPL(clk_get_fpi); struct clk *clk_get_io(void) { return &cpu_clk_generic[2]; } static inline int clk_good(struct clk *clk) { return clk && !IS_ERR(clk); } unsigned long clk_get_rate(struct clk *clk) { if (unlikely(!clk_good(clk))) return 0; if (clk->rate != 0) return clk->rate; if (clk->get_rate != NULL) return clk->get_rate(); return 0; } EXPORT_SYMBOL(clk_get_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { if (unlikely(!clk_good(clk))) return 0; if (clk->rates && *clk->rates) { unsigned long *r = clk->rates; while (*r && (*r != rate)) r++; if (!*r) { pr_err("clk %s.%s: trying to set invalid rate %ld\n", clk->cl.dev_id, clk->cl.con_id, rate); return -1; } } clk->rate = rate; return 0; } EXPORT_SYMBOL(clk_set_rate); int clk_enable(struct clk *clk) { if (unlikely(!clk_good(clk))) return -1; if (clk->enable) return clk->enable(clk); return -1; } EXPORT_SYMBOL(clk_enable); void clk_disable(struct clk *clk) { if (unlikely(!clk_good(clk))) return; if (clk->disable) clk->disable(clk); } EXPORT_SYMBOL(clk_disable); int clk_activate(struct clk *clk) { if (unlikely(!clk_good(clk))) return -1; if (clk->activate) return clk->activate(clk); return -1; } EXPORT_SYMBOL(clk_activate); void clk_deactivate(struct clk *clk) { if (unlikely(!clk_good(clk))) return; if (clk->deactivate) clk->deactivate(clk); } EXPORT_SYMBOL(clk_deactivate); struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) { return NULL; } static inline u32 get_counter_resolution(void) { u32 res; __asm__ __volatile__( ".set push\n" ".set mips32r2\n" "rdhwr %0, $3\n" ".set pop\n" : "=&r" (res) : /* no input */ : "memory"); return res; } void __init plat_time_init(void) { struct clk *clk; ltq_soc_init(); clk = clk_get_cpu(); mips_hpt_frequency = clk_get_rate(clk) / get_counter_resolution(); write_c0_compare(read_c0_count()); pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000); clk_put(clk); }
gpl-2.0
mdr78/Linux-3.8.7-galileo
drivers/usb/gadget/configfs.c
228
25008
#include <linux/configfs.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/usb/composite.h> #include <linux/usb/gadget_configfs.h> #include "configfs.h" int check_user_usb_string(const char *name, struct usb_gadget_strings *stringtab_dev) { unsigned primary_lang; unsigned sub_lang; u16 num; int ret; ret = kstrtou16(name, 0, &num); if (ret) return ret; primary_lang = num & 0x3ff; sub_lang = num >> 10; /* simple sanity check for valid langid */ switch (primary_lang) { case 0: case 0x62 ... 0xfe: case 0x100 ... 0x3ff: return -EINVAL; } if (!sub_lang) return -EINVAL; stringtab_dev->language = num; return 0; } #define MAX_NAME_LEN 40 #define MAX_USB_STRING_LANGS 2 struct gadget_info { struct config_group group; struct config_group functions_group; struct config_group configs_group; struct config_group strings_group; struct config_group *default_groups[4]; struct mutex lock; struct usb_gadget_strings *gstrings[MAX_USB_STRING_LANGS + 1]; struct list_head string_list; struct list_head available_func; const char *udc_name; #ifdef CONFIG_USB_OTG struct usb_otg_descriptor otg; #endif struct usb_composite_driver composite; struct usb_composite_dev cdev; }; struct config_usb_cfg { struct config_group group; struct config_group strings_group; struct config_group *default_groups[2]; struct list_head string_list; struct usb_configuration c; struct list_head func_list; struct usb_gadget_strings *gstrings[MAX_USB_STRING_LANGS + 1]; }; struct gadget_strings { struct usb_gadget_strings stringtab_dev; struct usb_string strings[USB_GADGET_FIRST_AVAIL_IDX]; char *manufacturer; char *product; char *serialnumber; struct config_group group; struct list_head list; }; struct gadget_config_name { struct usb_gadget_strings stringtab_dev; struct usb_string strings; char *configuration; struct config_group group; struct list_head list; }; static int usb_string_copy(const char *s, char **s_copy) { int ret; char *str; char *copy = *s_copy; ret = strlen(s); if (ret > 126) return -EOVERFLOW; str = kstrdup(s, GFP_KERNEL); if (!str) return -ENOMEM; if (str[ret - 1] == '\n') str[ret - 1] = '\0'; kfree(copy); *s_copy = str; return 0; } CONFIGFS_ATTR_STRUCT(gadget_info); CONFIGFS_ATTR_STRUCT(config_usb_cfg); #define GI_DEVICE_DESC_ITEM_ATTR(name) \ static struct gadget_info_attribute gadget_cdev_desc_##name = \ __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, \ gadget_dev_desc_##name##_show, \ gadget_dev_desc_##name##_store) #define GI_DEVICE_DESC_SIMPLE_R_u8(__name) \ static ssize_t gadget_dev_desc_##__name##_show(struct gadget_info *gi, \ char *page) \ { \ return sprintf(page, "0x%02x\n", gi->cdev.desc.__name); \ } #define GI_DEVICE_DESC_SIMPLE_R_u16(__name) \ static ssize_t gadget_dev_desc_##__name##_show(struct gadget_info *gi, \ char *page) \ { \ return sprintf(page, "0x%04x\n", le16_to_cpup(&gi->cdev.desc.__name)); \ } #define GI_DEVICE_DESC_SIMPLE_W_u8(_name) \ static ssize_t gadget_dev_desc_##_name##_store(struct gadget_info *gi, \ const char *page, size_t len) \ { \ u8 val; \ int ret; \ ret = kstrtou8(page, 0, &val); \ if (ret) \ return ret; \ gi->cdev.desc._name = val; \ return len; \ } #define GI_DEVICE_DESC_SIMPLE_W_u16(_name) \ static ssize_t gadget_dev_desc_##_name##_store(struct gadget_info *gi, \ const char *page, size_t len) \ { \ u16 val; \ int ret; \ ret = kstrtou16(page, 0, &val); \ if (ret) \ return ret; \ gi->cdev.desc._name = cpu_to_le16p(&val); \ return len; \ } #define GI_DEVICE_DESC_SIMPLE_RW(_name, _type) \ GI_DEVICE_DESC_SIMPLE_R_##_type(_name) \ GI_DEVICE_DESC_SIMPLE_W_##_type(_name) GI_DEVICE_DESC_SIMPLE_R_u16(bcdUSB); GI_DEVICE_DESC_SIMPLE_RW(bDeviceClass, u8); GI_DEVICE_DESC_SIMPLE_RW(bDeviceSubClass, u8); GI_DEVICE_DESC_SIMPLE_RW(bDeviceProtocol, u8); GI_DEVICE_DESC_SIMPLE_RW(bMaxPacketSize0, u8); GI_DEVICE_DESC_SIMPLE_RW(idVendor, u16); GI_DEVICE_DESC_SIMPLE_RW(idProduct, u16); GI_DEVICE_DESC_SIMPLE_R_u16(bcdDevice); static ssize_t is_valid_bcd(u16 bcd_val) { if ((bcd_val & 0xf) > 9) return -EINVAL; if (((bcd_val >> 4) & 0xf) > 9) return -EINVAL; if (((bcd_val >> 8) & 0xf) > 9) return -EINVAL; if (((bcd_val >> 12) & 0xf) > 9) return -EINVAL; return 0; } static ssize_t gadget_dev_desc_bcdDevice_store(struct gadget_info *gi, const char *page, size_t len) { u16 bcdDevice; int ret; ret = kstrtou16(page, 0, &bcdDevice); if (ret) return ret; ret = is_valid_bcd(bcdDevice); if (ret) return ret; gi->cdev.desc.bcdDevice = cpu_to_le16(bcdDevice); return len; } static ssize_t gadget_dev_desc_bcdUSB_store(struct gadget_info *gi, const char *page, size_t len) { u16 bcdUSB; int ret; ret = kstrtou16(page, 0, &bcdUSB); if (ret) return ret; ret = is_valid_bcd(bcdUSB); if (ret) return ret; gi->cdev.desc.bcdUSB = cpu_to_le16(bcdUSB); return len; } static ssize_t gadget_dev_desc_UDC_show(struct gadget_info *gi, char *page) { return sprintf(page, "%s\n", gi->udc_name ?: ""); } static int unregister_gadget(struct gadget_info *gi) { int ret; if (!gi->udc_name) return -ENODEV; ret = usb_gadget_unregister_driver(&gi->composite.gadget_driver); if (ret) return ret; kfree(gi->udc_name); gi->udc_name = NULL; return 0; } static ssize_t gadget_dev_desc_UDC_store(struct gadget_info *gi, const char *page, size_t len) { char *name; int ret; name = kstrdup(page, GFP_KERNEL); if (!name) return -ENOMEM; if (name[len - 1] == '\n') name[len - 1] = '\0'; mutex_lock(&gi->lock); if (!strlen(name)) { ret = unregister_gadget(gi); if (ret) goto err; } else { if (gi->udc_name) { ret = -EBUSY; goto err; } ret = udc_attach_driver(name, &gi->composite.gadget_driver); if (ret) goto err; gi->udc_name = name; } mutex_unlock(&gi->lock); return len; err: kfree(name); mutex_unlock(&gi->lock); return ret; } GI_DEVICE_DESC_ITEM_ATTR(bDeviceClass); GI_DEVICE_DESC_ITEM_ATTR(bDeviceSubClass); GI_DEVICE_DESC_ITEM_ATTR(bDeviceProtocol); GI_DEVICE_DESC_ITEM_ATTR(bMaxPacketSize0); GI_DEVICE_DESC_ITEM_ATTR(idVendor); GI_DEVICE_DESC_ITEM_ATTR(idProduct); GI_DEVICE_DESC_ITEM_ATTR(bcdDevice); GI_DEVICE_DESC_ITEM_ATTR(bcdUSB); GI_DEVICE_DESC_ITEM_ATTR(UDC); static struct configfs_attribute *gadget_root_attrs[] = { &gadget_cdev_desc_bDeviceClass.attr, &gadget_cdev_desc_bDeviceSubClass.attr, &gadget_cdev_desc_bDeviceProtocol.attr, &gadget_cdev_desc_bMaxPacketSize0.attr, &gadget_cdev_desc_idVendor.attr, &gadget_cdev_desc_idProduct.attr, &gadget_cdev_desc_bcdDevice.attr, &gadget_cdev_desc_bcdUSB.attr, &gadget_cdev_desc_UDC.attr, NULL, }; static inline struct gadget_info *to_gadget_info(struct config_item *item) { return container_of(to_config_group(item), struct gadget_info, group); } static inline struct gadget_strings *to_gadget_strings(struct config_item *item) { return container_of(to_config_group(item), struct gadget_strings, group); } static inline struct gadget_config_name *to_gadget_config_name( struct config_item *item) { return container_of(to_config_group(item), struct gadget_config_name, group); } static inline struct config_usb_cfg *to_config_usb_cfg(struct config_item *item) { return container_of(to_config_group(item), struct config_usb_cfg, group); } static inline struct usb_function_instance *to_usb_function_instance( struct config_item *item) { return container_of(to_config_group(item), struct usb_function_instance, group); } static void gadget_info_attr_release(struct config_item *item) { struct gadget_info *gi = to_gadget_info(item); WARN_ON(!list_empty(&gi->cdev.configs)); WARN_ON(!list_empty(&gi->string_list)); WARN_ON(!list_empty(&gi->available_func)); kfree(gi->composite.gadget_driver.function); kfree(gi); } CONFIGFS_ATTR_OPS(gadget_info); static struct configfs_item_operations gadget_root_item_ops = { .release = gadget_info_attr_release, .show_attribute = gadget_info_attr_show, .store_attribute = gadget_info_attr_store, }; static void gadget_config_attr_release(struct config_item *item) { struct config_usb_cfg *cfg = to_config_usb_cfg(item); WARN_ON(!list_empty(&cfg->c.functions)); list_del(&cfg->c.list); kfree(cfg->c.label); kfree(cfg); } static int config_usb_cfg_link( struct config_item *usb_cfg_ci, struct config_item *usb_func_ci) { struct config_usb_cfg *cfg = to_config_usb_cfg(usb_cfg_ci); struct usb_composite_dev *cdev = cfg->c.cdev; struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev); struct config_group *group = to_config_group(usb_func_ci); struct usb_function_instance *fi = container_of(group, struct usb_function_instance, group); struct usb_function_instance *a_fi; struct usb_function *f; int ret; mutex_lock(&gi->lock); /* * Make sure this function is from within our _this_ gadget and not * from another gadget or a random directory. * Also a function instance can only be linked once. */ list_for_each_entry(a_fi, &gi->available_func, cfs_list) { if (a_fi == fi) break; } if (a_fi != fi) { ret = -EINVAL; goto out; } list_for_each_entry(f, &cfg->func_list, list) { if (f->fi == fi) { ret = -EEXIST; goto out; } } f = usb_get_function(fi); if (IS_ERR(f)) { ret = PTR_ERR(f); goto out; } /* stash the function until we bind it to the gadget */ list_add_tail(&f->list, &cfg->func_list); ret = 0; out: mutex_unlock(&gi->lock); return ret; } static int config_usb_cfg_unlink( struct config_item *usb_cfg_ci, struct config_item *usb_func_ci) { struct config_usb_cfg *cfg = to_config_usb_cfg(usb_cfg_ci); struct usb_composite_dev *cdev = cfg->c.cdev; struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev); struct config_group *group = to_config_group(usb_func_ci); struct usb_function_instance *fi = container_of(group, struct usb_function_instance, group); struct usb_function *f; /* * ideally I would like to forbid to unlink functions while a gadget is * bound to an UDC. Since this isn't possible at the moment, we simply * force an unbind, the function is available here and then we can * remove the function. */ mutex_lock(&gi->lock); if (gi->udc_name) unregister_gadget(gi); WARN_ON(gi->udc_name); list_for_each_entry(f, &cfg->func_list, list) { if (f->fi == fi) { list_del(&f->list); usb_put_function(f); mutex_unlock(&gi->lock); return 0; } } mutex_unlock(&gi->lock); WARN(1, "Unable to locate function to unbind\n"); return 0; } CONFIGFS_ATTR_OPS(config_usb_cfg); static struct configfs_item_operations gadget_config_item_ops = { .release = gadget_config_attr_release, .show_attribute = config_usb_cfg_attr_show, .store_attribute = config_usb_cfg_attr_store, .allow_link = config_usb_cfg_link, .drop_link = config_usb_cfg_unlink, }; static ssize_t gadget_config_desc_MaxPower_show(struct config_usb_cfg *cfg, char *page) { return sprintf(page, "%u\n", cfg->c.MaxPower); } static ssize_t gadget_config_desc_MaxPower_store(struct config_usb_cfg *cfg, const char *page, size_t len) { u16 val; int ret; ret = kstrtou16(page, 0, &val); if (ret) return ret; if (DIV_ROUND_UP(val, 8) > 0xff) return -ERANGE; cfg->c.MaxPower = val; return len; } static ssize_t gadget_config_desc_bmAttributes_show(struct config_usb_cfg *cfg, char *page) { return sprintf(page, "0x%02x\n", cfg->c.bmAttributes); } static ssize_t gadget_config_desc_bmAttributes_store(struct config_usb_cfg *cfg, const char *page, size_t len) { u8 val; int ret; ret = kstrtou8(page, 0, &val); if (ret) return ret; if (!(val & USB_CONFIG_ATT_ONE)) return -EINVAL; if (val & ~(USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER | USB_CONFIG_ATT_WAKEUP)) return -EINVAL; cfg->c.bmAttributes = val; return len; } #define CFG_CONFIG_DESC_ITEM_ATTR(name) \ static struct config_usb_cfg_attribute gadget_usb_cfg_##name = \ __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, \ gadget_config_desc_##name##_show, \ gadget_config_desc_##name##_store) CFG_CONFIG_DESC_ITEM_ATTR(MaxPower); CFG_CONFIG_DESC_ITEM_ATTR(bmAttributes); static struct configfs_attribute *gadget_config_attrs[] = { &gadget_usb_cfg_MaxPower.attr, &gadget_usb_cfg_bmAttributes.attr, NULL, }; static struct config_item_type gadget_config_type = { .ct_item_ops = &gadget_config_item_ops, .ct_attrs = gadget_config_attrs, .ct_owner = THIS_MODULE, }; static struct config_item_type gadget_root_type = { .ct_item_ops = &gadget_root_item_ops, .ct_attrs = gadget_root_attrs, .ct_owner = THIS_MODULE, }; static void composite_init_dev(struct usb_composite_dev *cdev) { spin_lock_init(&cdev->lock); INIT_LIST_HEAD(&cdev->configs); INIT_LIST_HEAD(&cdev->gstrings); } static struct config_group *function_make( struct config_group *group, const char *name) { struct gadget_info *gi; struct usb_function_instance *fi; char buf[MAX_NAME_LEN]; char *func_name; char *instance_name; int ret; ret = snprintf(buf, MAX_NAME_LEN, "%s", name); if (ret >= MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); func_name = buf; instance_name = strchr(func_name, '.'); if (!instance_name) { pr_err("Unable to locate . in FUNC.INSTANCE\n"); return ERR_PTR(-EINVAL); } *instance_name = '\0'; instance_name++; fi = usb_get_function_instance(func_name); if (IS_ERR(fi)) return ERR_CAST(fi); ret = config_item_set_name(&fi->group.cg_item, name); if (ret) { usb_put_function_instance(fi); return ERR_PTR(ret); } if (fi->set_inst_name) { ret = fi->set_inst_name(fi, instance_name); if (ret) { usb_put_function_instance(fi); return ERR_PTR(ret); } } gi = container_of(group, struct gadget_info, functions_group); mutex_lock(&gi->lock); list_add_tail(&fi->cfs_list, &gi->available_func); mutex_unlock(&gi->lock); return &fi->group; } static void function_drop( struct config_group *group, struct config_item *item) { struct usb_function_instance *fi = to_usb_function_instance(item); struct gadget_info *gi; gi = container_of(group, struct gadget_info, functions_group); mutex_lock(&gi->lock); list_del(&fi->cfs_list); mutex_unlock(&gi->lock); config_item_put(item); } static struct configfs_group_operations functions_ops = { .make_group = &function_make, .drop_item = &function_drop, }; static struct config_item_type functions_type = { .ct_group_ops = &functions_ops, .ct_owner = THIS_MODULE, }; CONFIGFS_ATTR_STRUCT(gadget_config_name); GS_STRINGS_RW(gadget_config_name, configuration); static struct configfs_attribute *gadget_config_name_langid_attrs[] = { &gadget_config_name_configuration.attr, NULL, }; static void gadget_config_name_attr_release(struct config_item *item) { struct gadget_config_name *cn = to_gadget_config_name(item); kfree(cn->configuration); list_del(&cn->list); kfree(cn); } USB_CONFIG_STRING_RW_OPS(gadget_config_name); USB_CONFIG_STRINGS_LANG(gadget_config_name, config_usb_cfg); static struct config_group *config_desc_make( struct config_group *group, const char *name) { struct gadget_info *gi; struct config_usb_cfg *cfg; char buf[MAX_NAME_LEN]; char *num_str; u8 num; int ret; gi = container_of(group, struct gadget_info, configs_group); ret = snprintf(buf, MAX_NAME_LEN, "%s", name); if (ret >= MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); num_str = strchr(buf, '.'); if (!num_str) { pr_err("Unable to locate . in name.bConfigurationValue\n"); return ERR_PTR(-EINVAL); } *num_str = '\0'; num_str++; if (!strlen(buf)) return ERR_PTR(-EINVAL); ret = kstrtou8(num_str, 0, &num); if (ret) return ERR_PTR(ret); cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); if (!cfg) return ERR_PTR(-ENOMEM); cfg->c.label = kstrdup(buf, GFP_KERNEL); if (!cfg->c.label) { ret = -ENOMEM; goto err; } cfg->c.bConfigurationValue = num; cfg->c.MaxPower = CONFIG_USB_GADGET_VBUS_DRAW; cfg->c.bmAttributes = USB_CONFIG_ATT_ONE; INIT_LIST_HEAD(&cfg->string_list); INIT_LIST_HEAD(&cfg->func_list); cfg->group.default_groups = cfg->default_groups; cfg->default_groups[0] = &cfg->strings_group; config_group_init_type_name(&cfg->group, name, &gadget_config_type); config_group_init_type_name(&cfg->strings_group, "strings", &gadget_config_name_strings_type); ret = usb_add_config_only(&gi->cdev, &cfg->c); if (ret) goto err; return &cfg->group; err: kfree(cfg->c.label); kfree(cfg); return ERR_PTR(ret); } static void config_desc_drop( struct config_group *group, struct config_item *item) { config_item_put(item); } static struct configfs_group_operations config_desc_ops = { .make_group = &config_desc_make, .drop_item = &config_desc_drop, }; static struct config_item_type config_desc_type = { .ct_group_ops = &config_desc_ops, .ct_owner = THIS_MODULE, }; CONFIGFS_ATTR_STRUCT(gadget_strings); GS_STRINGS_RW(gadget_strings, manufacturer); GS_STRINGS_RW(gadget_strings, product); GS_STRINGS_RW(gadget_strings, serialnumber); static struct configfs_attribute *gadget_strings_langid_attrs[] = { &gadget_strings_manufacturer.attr, &gadget_strings_product.attr, &gadget_strings_serialnumber.attr, NULL, }; static void gadget_strings_attr_release(struct config_item *item) { struct gadget_strings *gs = to_gadget_strings(item); kfree(gs->manufacturer); kfree(gs->product); kfree(gs->serialnumber); list_del(&gs->list); kfree(gs); } USB_CONFIG_STRING_RW_OPS(gadget_strings); USB_CONFIG_STRINGS_LANG(gadget_strings, gadget_info); static int configfs_do_nothing(struct usb_composite_dev *cdev) { WARN_ON(1); return -EINVAL; } int composite_dev_prepare(struct usb_composite_driver *composite, struct usb_composite_dev *dev); static void purge_configs_funcs(struct gadget_info *gi) { struct usb_configuration *c; list_for_each_entry(c, &gi->cdev.configs, list) { struct usb_function *f, *tmp; struct config_usb_cfg *cfg; cfg = container_of(c, struct config_usb_cfg, c); list_for_each_entry_safe(f, tmp, &c->functions, list) { list_move_tail(&f->list, &cfg->func_list); if (f->unbind) { dev_err(&gi->cdev.gadget->dev, "unbind function" " '%s'/%p\n", f->name, f); f->unbind(c, f); } } c->next_interface_id = 0; c->superspeed = 0; c->highspeed = 0; c->fullspeed = 0; } } static int configfs_composite_bind(struct usb_gadget *gadget, struct usb_gadget_driver *gdriver) { struct usb_composite_driver *composite = to_cdriver(gdriver); struct gadget_info *gi = container_of(composite, struct gadget_info, composite); struct usb_composite_dev *cdev = &gi->cdev; struct usb_configuration *c; struct usb_string *s; unsigned i; int ret; /* the gi->lock is hold by the caller */ cdev->gadget = gadget; set_gadget_data(gadget, cdev); ret = composite_dev_prepare(composite, cdev); if (ret) return ret; /* and now the gadget bind */ ret = -EINVAL; if (list_empty(&gi->cdev.configs)) { pr_err("Need atleast one configuration in %s.\n", gi->composite.name); goto err_comp_cleanup; } list_for_each_entry(c, &gi->cdev.configs, list) { struct config_usb_cfg *cfg; cfg = container_of(c, struct config_usb_cfg, c); if (list_empty(&cfg->func_list)) { pr_err("Config %s/%d of %s needs atleast one function.\n", c->label, c->bConfigurationValue, gi->composite.name); goto err_comp_cleanup; } } /* init all strings */ if (!list_empty(&gi->string_list)) { struct gadget_strings *gs; i = 0; list_for_each_entry(gs, &gi->string_list, list) { gi->gstrings[i] = &gs->stringtab_dev; gs->stringtab_dev.strings = gs->strings; gs->strings[USB_GADGET_MANUFACTURER_IDX].s = gs->manufacturer; gs->strings[USB_GADGET_PRODUCT_IDX].s = gs->product; gs->strings[USB_GADGET_SERIAL_IDX].s = gs->serialnumber; i++; } gi->gstrings[i] = NULL; s = usb_gstrings_attach(&gi->cdev, gi->gstrings, USB_GADGET_FIRST_AVAIL_IDX); if (IS_ERR(s)) { ret = PTR_ERR(s); goto err_comp_cleanup; } gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id; gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id; gi->cdev.desc.iSerialNumber = s[USB_GADGET_SERIAL_IDX].id; } /* Go through all configs, attach all functions */ list_for_each_entry(c, &gi->cdev.configs, list) { struct config_usb_cfg *cfg; struct usb_function *f; struct usb_function *tmp; struct gadget_config_name *cn; cfg = container_of(c, struct config_usb_cfg, c); if (!list_empty(&cfg->string_list)) { i = 0; list_for_each_entry(cn, &cfg->string_list, list) { cfg->gstrings[i] = &cn->stringtab_dev; cn->stringtab_dev.strings = &cn->strings; cn->strings.s = cn->configuration; i++; } cfg->gstrings[i] = NULL; s = usb_gstrings_attach(&gi->cdev, cfg->gstrings, 1); if (IS_ERR(s)) { ret = PTR_ERR(s); goto err_comp_cleanup; } c->iConfiguration = s[0].id; } list_for_each_entry_safe(f, tmp, &cfg->func_list, list) { list_del(&f->list); ret = usb_add_function(c, f); if (ret) { list_add(&f->list, &cfg->func_list); goto err_purge_funcs; } } usb_ep_autoconfig_reset(cdev->gadget); } usb_ep_autoconfig_reset(cdev->gadget); return 0; err_purge_funcs: purge_configs_funcs(gi); err_comp_cleanup: composite_dev_cleanup(cdev); return ret; } static void configfs_composite_unbind(struct usb_gadget *gadget) { struct usb_composite_dev *cdev; struct gadget_info *gi; /* the gi->lock is hold by the caller */ cdev = get_gadget_data(gadget); gi = container_of(cdev, struct gadget_info, cdev); purge_configs_funcs(gi); composite_dev_cleanup(cdev); usb_ep_autoconfig_reset(cdev->gadget); cdev->gadget = NULL; set_gadget_data(gadget, NULL); } static const struct usb_gadget_driver configfs_driver_template = { .bind = configfs_composite_bind, .unbind = configfs_composite_unbind, .setup = composite_setup, .disconnect = composite_disconnect, .max_speed = USB_SPEED_SUPER, .driver = { .owner = THIS_MODULE, .name = "configfs-gadget", }, }; static struct config_group *gadgets_make( struct config_group *group, const char *name) { struct gadget_info *gi; gi = kzalloc(sizeof(*gi), GFP_KERNEL); if (!gi) return ERR_PTR(-ENOMEM); gi->group.default_groups = gi->default_groups; gi->group.default_groups[0] = &gi->functions_group; gi->group.default_groups[1] = &gi->configs_group; gi->group.default_groups[2] = &gi->strings_group; config_group_init_type_name(&gi->functions_group, "functions", &functions_type); config_group_init_type_name(&gi->configs_group, "configs", &config_desc_type); config_group_init_type_name(&gi->strings_group, "strings", &gadget_strings_strings_type); gi->composite.bind = configfs_do_nothing; gi->composite.unbind = configfs_do_nothing; gi->composite.suspend = NULL; gi->composite.resume = NULL; gi->composite.max_speed = USB_SPEED_SUPER; mutex_init(&gi->lock); INIT_LIST_HEAD(&gi->string_list); INIT_LIST_HEAD(&gi->available_func); composite_init_dev(&gi->cdev); gi->cdev.desc.bLength = USB_DT_DEVICE_SIZE; gi->cdev.desc.bDescriptorType = USB_DT_DEVICE; gi->cdev.desc.bcdDevice = cpu_to_le16(get_default_bcdDevice()); gi->composite.gadget_driver = configfs_driver_template; gi->composite.gadget_driver.function = kstrdup(name, GFP_KERNEL); gi->composite.name = gi->composite.gadget_driver.function; if (!gi->composite.gadget_driver.function) goto err; #ifdef CONFIG_USB_OTG gi->otg.bLength = sizeof(struct usb_otg_descriptor); gi->otg.bDescriptorType = USB_DT_OTG; gi->otg.bmAttributes = USB_OTG_SRP | USB_OTG_HNP; #endif config_group_init_type_name(&gi->group, name, &gadget_root_type); return &gi->group; err: kfree(gi); return ERR_PTR(-ENOMEM); } static void gadgets_drop(struct config_group *group, struct config_item *item) { config_item_put(item); } static struct configfs_group_operations gadgets_ops = { .make_group = &gadgets_make, .drop_item = &gadgets_drop, }; static struct config_item_type gadgets_type = { .ct_group_ops = &gadgets_ops, .ct_owner = THIS_MODULE, }; static struct configfs_subsystem gadget_subsys = { .su_group = { .cg_item = { .ci_namebuf = "usb_gadget", .ci_type = &gadgets_type, }, }, .su_mutex = __MUTEX_INITIALIZER(gadget_subsys.su_mutex), }; void unregister_gadget_item(struct config_item *item) { struct gadget_info *gi = to_gadget_info(item); unregister_gadget(gi); } EXPORT_SYMBOL(unregister_gadget_item); static int __init gadget_cfs_init(void) { int ret; config_group_init(&gadget_subsys.su_group); ret = configfs_register_subsystem(&gadget_subsys); return ret; } module_init(gadget_cfs_init); static void __exit gadget_cfs_exit(void) { configfs_unregister_subsystem(&gadget_subsys); } module_exit(gadget_cfs_exit);
gpl-2.0
careyli/linux-3.16.2
drivers/input/touchscreen/tps6507x-ts.c
996
7700
/* * Touchscreen driver for the tps6507x chip. * * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com) * * Credits: * * Using code from tsc2007, MtekVision Co., Ltd. * * For licencing details see kernel-base/COPYING * * TPS65070, TPS65073, TPS650731, and TPS650732 support * 10 bit touch screen interface. */ #include <linux/module.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/input-polldev.h> #include <linux/platform_device.h> #include <linux/mfd/tps6507x.h> #include <linux/input/tps6507x-ts.h> #include <linux/delay.h> #define TSC_DEFAULT_POLL_PERIOD 30 /* ms */ #define TPS_DEFAULT_MIN_PRESSURE 0x30 #define MAX_10BIT ((1 << 10) - 1) #define TPS6507X_ADCONFIG_CONVERT_TS (TPS6507X_ADCONFIG_AD_ENABLE | \ TPS6507X_ADCONFIG_START_CONVERSION | \ TPS6507X_ADCONFIG_INPUT_REAL_TSC) #define TPS6507X_ADCONFIG_POWER_DOWN_TS (TPS6507X_ADCONFIG_INPUT_REAL_TSC) struct ts_event { u16 x; u16 y; u16 pressure; }; struct tps6507x_ts { struct device *dev; struct input_polled_dev *poll_dev; struct tps6507x_dev *mfd; char phys[32]; struct ts_event tc; u16 min_pressure; bool pendown; }; static int tps6507x_read_u8(struct tps6507x_ts *tsc, u8 reg, u8 *data) { int err; err = tsc->mfd->read_dev(tsc->mfd, reg, 1, data); if (err) return err; return 0; } static int tps6507x_write_u8(struct tps6507x_ts *tsc, u8 reg, u8 data) { return tsc->mfd->write_dev(tsc->mfd, reg, 1, &data); } static s32 tps6507x_adc_conversion(struct tps6507x_ts *tsc, u8 tsc_mode, u16 *value) { s32 ret; u8 adc_status; u8 result; /* Route input signal to A/D converter */ ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, tsc_mode); if (ret) { dev_err(tsc->dev, "TSC mode read failed\n"); goto err; } /* Start A/D conversion */ ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG, TPS6507X_ADCONFIG_CONVERT_TS); if (ret) { dev_err(tsc->dev, "ADC config write failed\n"); return ret; } do { ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADCONFIG, &adc_status); if (ret) { dev_err(tsc->dev, "ADC config read failed\n"); goto err; } } while (adc_status & TPS6507X_ADCONFIG_START_CONVERSION); ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_2, &result); if (ret) { dev_err(tsc->dev, "ADC result 2 read failed\n"); goto err; } *value = (result & TPS6507X_REG_ADRESULT_2_MASK) << 8; ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_1, &result); if (ret) { dev_err(tsc->dev, "ADC result 1 read failed\n"); goto err; } *value |= result; dev_dbg(tsc->dev, "TSC channel %d = 0x%X\n", tsc_mode, *value); err: return ret; } /* Need to call tps6507x_adc_standby() after using A/D converter for the * touch screen interrupt to work properly. */ static s32 tps6507x_adc_standby(struct tps6507x_ts *tsc) { s32 ret; s32 loops = 0; u8 val; ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG, TPS6507X_ADCONFIG_INPUT_TSC); if (ret) return ret; ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, TPS6507X_TSCMODE_STANDBY); if (ret) return ret; ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val); if (ret) return ret; while (val & TPS6507X_REG_TSC_INT) { mdelay(10); ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val); if (ret) return ret; loops++; } return ret; } static void tps6507x_ts_poll(struct input_polled_dev *poll_dev) { struct tps6507x_ts *tsc = poll_dev->private; struct input_dev *input_dev = poll_dev->input; bool pendown; s32 ret; ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_PRESSURE, &tsc->tc.pressure); if (ret) goto done; pendown = tsc->tc.pressure > tsc->min_pressure; if (unlikely(!pendown && tsc->pendown)) { dev_dbg(tsc->dev, "UP\n"); input_report_key(input_dev, BTN_TOUCH, 0); input_report_abs(input_dev, ABS_PRESSURE, 0); input_sync(input_dev); tsc->pendown = false; } if (pendown) { if (!tsc->pendown) { dev_dbg(tsc->dev, "DOWN\n"); input_report_key(input_dev, BTN_TOUCH, 1); } else dev_dbg(tsc->dev, "still down\n"); ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_X_POSITION, &tsc->tc.x); if (ret) goto done; ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_Y_POSITION, &tsc->tc.y); if (ret) goto done; input_report_abs(input_dev, ABS_X, tsc->tc.x); input_report_abs(input_dev, ABS_Y, tsc->tc.y); input_report_abs(input_dev, ABS_PRESSURE, tsc->tc.pressure); input_sync(input_dev); tsc->pendown = true; } done: tps6507x_adc_standby(tsc); } static int tps6507x_ts_probe(struct platform_device *pdev) { struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); const struct tps6507x_board *tps_board; const struct touchscreen_init_data *init_data; struct tps6507x_ts *tsc; struct input_polled_dev *poll_dev; struct input_dev *input_dev; int error; /* * tps_board points to pmic related constants * coming from the board-evm file. */ tps_board = dev_get_platdata(tps6507x_dev->dev); if (!tps_board) { dev_err(tps6507x_dev->dev, "Could not find tps6507x platform data\n"); return -ENODEV; } /* * init_data points to array of regulator_init structures * coming from the board-evm file. */ init_data = tps_board->tps6507x_ts_init_data; tsc = kzalloc(sizeof(struct tps6507x_ts), GFP_KERNEL); if (!tsc) { dev_err(tps6507x_dev->dev, "failed to allocate driver data\n"); return -ENOMEM; } tsc->mfd = tps6507x_dev; tsc->dev = tps6507x_dev->dev; tsc->min_pressure = init_data ? init_data->min_pressure : TPS_DEFAULT_MIN_PRESSURE; snprintf(tsc->phys, sizeof(tsc->phys), "%s/input0", dev_name(tsc->dev)); poll_dev = input_allocate_polled_device(); if (!poll_dev) { dev_err(tsc->dev, "Failed to allocate polled input device.\n"); error = -ENOMEM; goto err_free_mem; } tsc->poll_dev = poll_dev; poll_dev->private = tsc; poll_dev->poll = tps6507x_ts_poll; poll_dev->poll_interval = init_data ? init_data->poll_period : TSC_DEFAULT_POLL_PERIOD; input_dev = poll_dev->input; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 0, MAX_10BIT, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, MAX_10BIT, 0, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_10BIT, 0, 0); input_dev->name = "TPS6507x Touchscreen"; input_dev->phys = tsc->phys; input_dev->dev.parent = tsc->dev; input_dev->id.bustype = BUS_I2C; if (init_data) { input_dev->id.vendor = init_data->vendor; input_dev->id.product = init_data->product; input_dev->id.version = init_data->version; } error = tps6507x_adc_standby(tsc); if (error) goto err_free_polled_dev; error = input_register_polled_device(poll_dev); if (error) goto err_free_polled_dev; platform_set_drvdata(pdev, tsc); return 0; err_free_polled_dev: input_free_polled_device(poll_dev); err_free_mem: kfree(tsc); return error; } static int tps6507x_ts_remove(struct platform_device *pdev) { struct tps6507x_ts *tsc = platform_get_drvdata(pdev); struct input_polled_dev *poll_dev = tsc->poll_dev; input_unregister_polled_device(poll_dev); input_free_polled_device(poll_dev); kfree(tsc); return 0; } static struct platform_driver tps6507x_ts_driver = { .driver = { .name = "tps6507x-ts", .owner = THIS_MODULE, }, .probe = tps6507x_ts_probe, .remove = tps6507x_ts_remove, }; module_platform_driver(tps6507x_ts_driver); MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>"); MODULE_DESCRIPTION("TPS6507x - TouchScreen driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:tps6507x-ts");
gpl-2.0
x942/GuardianKernel-Tuna
arch/arm/mach-pxa/pxa3xx.c
2276
11511
/* * linux/arch/arm/mach-pxa/pxa3xx.c * * code specific to pxa3xx aka Monahans * * Copyright (C) 2006 Marvell International Ltd. * * 2007-09-02: eric miao <eric.miao@marvell.com> * initial version * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/syscore_ops.h> #include <linux/i2c/pxa-i2c.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/gpio.h> #include <mach/pxa3xx-regs.h> #include <mach/reset.h> #include <mach/ohci.h> #include <mach/pm.h> #include <mach/dma.h> #include <mach/regs-intc.h> #include <mach/smemc.h> #include "generic.h" #include "devices.h" #include "clock.h" #define PECR_IE(n) ((1 << ((n) * 2)) << 28) #define PECR_IS(n) ((1 << ((n) * 2)) << 29) static DEFINE_PXA3_CKEN(pxa3xx_ffuart, FFUART, 14857000, 1); static DEFINE_PXA3_CKEN(pxa3xx_btuart, BTUART, 14857000, 1); static DEFINE_PXA3_CKEN(pxa3xx_stuart, STUART, 14857000, 1); static DEFINE_PXA3_CKEN(pxa3xx_i2c, I2C, 32842000, 0); static DEFINE_PXA3_CKEN(pxa3xx_udc, UDC, 48000000, 5); static DEFINE_PXA3_CKEN(pxa3xx_usbh, USBH, 48000000, 0); static DEFINE_PXA3_CKEN(pxa3xx_u2d, USB2, 48000000, 0); static DEFINE_PXA3_CKEN(pxa3xx_keypad, KEYPAD, 32768, 0); static DEFINE_PXA3_CKEN(pxa3xx_ssp1, SSP1, 13000000, 0); static DEFINE_PXA3_CKEN(pxa3xx_ssp2, SSP2, 13000000, 0); static DEFINE_PXA3_CKEN(pxa3xx_ssp3, SSP3, 13000000, 0); static DEFINE_PXA3_CKEN(pxa3xx_ssp4, SSP4, 13000000, 0); static DEFINE_PXA3_CKEN(pxa3xx_pwm0, PWM0, 13000000, 0); static DEFINE_PXA3_CKEN(pxa3xx_pwm1, PWM1, 13000000, 0); static DEFINE_PXA3_CKEN(pxa3xx_mmc1, MMC1, 19500000, 0); static DEFINE_PXA3_CKEN(pxa3xx_mmc2, MMC2, 19500000, 0); static DEFINE_CK(pxa3xx_lcd, LCD, &clk_pxa3xx_hsio_ops); static DEFINE_CK(pxa3xx_smemc, SMC, &clk_pxa3xx_smemc_ops); static DEFINE_CK(pxa3xx_camera, CAMERA, &clk_pxa3xx_hsio_ops); static DEFINE_CK(pxa3xx_ac97, AC97, &clk_pxa3xx_ac97_ops); static DEFINE_CLK(pxa3xx_pout, &clk_pxa3xx_pout_ops, 13000000, 70); static struct clk_lookup pxa3xx_clkregs[] = { INIT_CLKREG(&clk_pxa3xx_pout, NULL, "CLK_POUT"), /* Power I2C clock is always on */ INIT_CLKREG(&clk_dummy, "pxa3xx-pwri2c.1", NULL), INIT_CLKREG(&clk_pxa3xx_lcd, "pxa2xx-fb", NULL), INIT_CLKREG(&clk_pxa3xx_camera, NULL, "CAMCLK"), INIT_CLKREG(&clk_pxa3xx_ac97, NULL, "AC97CLK"), INIT_CLKREG(&clk_pxa3xx_ffuart, "pxa2xx-uart.0", NULL), INIT_CLKREG(&clk_pxa3xx_btuart, "pxa2xx-uart.1", NULL), INIT_CLKREG(&clk_pxa3xx_stuart, "pxa2xx-uart.2", NULL), INIT_CLKREG(&clk_pxa3xx_stuart, "pxa2xx-ir", "UARTCLK"), INIT_CLKREG(&clk_pxa3xx_i2c, "pxa2xx-i2c.0", NULL), INIT_CLKREG(&clk_pxa3xx_udc, "pxa27x-udc", NULL), INIT_CLKREG(&clk_pxa3xx_usbh, "pxa27x-ohci", NULL), INIT_CLKREG(&clk_pxa3xx_u2d, "pxa3xx-u2d", NULL), INIT_CLKREG(&clk_pxa3xx_keypad, "pxa27x-keypad", NULL), INIT_CLKREG(&clk_pxa3xx_ssp1, "pxa27x-ssp.0", NULL), INIT_CLKREG(&clk_pxa3xx_ssp2, "pxa27x-ssp.1", NULL), INIT_CLKREG(&clk_pxa3xx_ssp3, "pxa27x-ssp.2", NULL), INIT_CLKREG(&clk_pxa3xx_ssp4, "pxa27x-ssp.3", NULL), INIT_CLKREG(&clk_pxa3xx_pwm0, "pxa27x-pwm.0", NULL), INIT_CLKREG(&clk_pxa3xx_pwm1, "pxa27x-pwm.1", NULL), INIT_CLKREG(&clk_pxa3xx_mmc1, "pxa2xx-mci.0", NULL), INIT_CLKREG(&clk_pxa3xx_mmc2, "pxa2xx-mci.1", NULL), INIT_CLKREG(&clk_pxa3xx_smemc, "pxa2xx-pcmcia", NULL), }; #ifdef CONFIG_PM #define ISRAM_START 0x5c000000 #define ISRAM_SIZE SZ_256K static void __iomem *sram; static unsigned long wakeup_src; /* * Enter a standby mode (S0D1C2 or S0D2C2). Upon wakeup, the dynamic * memory controller has to be reinitialised, so we place some code * in the SRAM to perform this function. * * We disable FIQs across the standby - otherwise, we might receive a * FIQ while the SDRAM is unavailable. */ static void pxa3xx_cpu_standby(unsigned int pwrmode) { extern const char pm_enter_standby_start[], pm_enter_standby_end[]; void (*fn)(unsigned int) = (void __force *)(sram + 0x8000); memcpy_toio(sram + 0x8000, pm_enter_standby_start, pm_enter_standby_end - pm_enter_standby_start); AD2D0SR = ~0; AD2D1SR = ~0; AD2D0ER = wakeup_src; AD2D1ER = 0; ASCR = ASCR; ARSR = ARSR; local_fiq_disable(); fn(pwrmode); local_fiq_enable(); AD2D0ER = 0; AD2D1ER = 0; } /* * NOTE: currently, the OBM (OEM Boot Module) binary comes along with * PXA3xx development kits assumes that the resuming process continues * with the address stored within the first 4 bytes of SDRAM. The PSPR * register is used privately by BootROM and OBM, and _must_ be set to * 0x5c014000 for the moment. */ static void pxa3xx_cpu_pm_suspend(void) { volatile unsigned long *p = (volatile void *)0xc0000000; unsigned long saved_data = *p; extern void pxa3xx_cpu_suspend(long); /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */ CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM); CKENB |= 1 << (CKEN_HSIO2 & 0x1f); /* clear and setup wakeup source */ AD3SR = ~0; AD3ER = wakeup_src; ASCR = ASCR; ARSR = ARSR; PCFR |= (1u << 13); /* L1_DIS */ PCFR &= ~((1u << 12) | (1u << 1)); /* L0_EN | SL_ROD */ PSPR = 0x5c014000; /* overwrite with the resume address */ *p = virt_to_phys(cpu_resume); pxa3xx_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); *p = saved_data; AD3ER = 0; } static void pxa3xx_cpu_pm_enter(suspend_state_t state) { /* * Don't sleep if no wakeup sources are defined */ if (wakeup_src == 0) { printk(KERN_ERR "Not suspending: no wakeup sources\n"); return; } switch (state) { case PM_SUSPEND_STANDBY: pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2); break; case PM_SUSPEND_MEM: pxa3xx_cpu_pm_suspend(); break; } } static int pxa3xx_cpu_pm_valid(suspend_state_t state) { return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY; } static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = { .valid = pxa3xx_cpu_pm_valid, .enter = pxa3xx_cpu_pm_enter, }; static void __init pxa3xx_init_pm(void) { sram = ioremap(ISRAM_START, ISRAM_SIZE); if (!sram) { printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n"); return; } /* * Since we copy wakeup code into the SRAM, we need to ensure * that it is preserved over the low power modes. Note: bit 8 * is undocumented in the developer manual, but must be set. */ AD1R |= ADXR_L2 | ADXR_R0; AD2R |= ADXR_L2 | ADXR_R0; AD3R |= ADXR_L2 | ADXR_R0; /* * Clear the resume enable registers. */ AD1D0ER = 0; AD2D0ER = 0; AD2D1ER = 0; AD3ER = 0; pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns; } static int pxa3xx_set_wake(struct irq_data *d, unsigned int on) { unsigned long flags, mask = 0; switch (d->irq) { case IRQ_SSP3: mask = ADXER_MFP_WSSP3; break; case IRQ_MSL: mask = ADXER_WMSL0; break; case IRQ_USBH2: case IRQ_USBH1: mask = ADXER_WUSBH; break; case IRQ_KEYPAD: mask = ADXER_WKP; break; case IRQ_AC97: mask = ADXER_MFP_WAC97; break; case IRQ_USIM: mask = ADXER_WUSIM0; break; case IRQ_SSP2: mask = ADXER_MFP_WSSP2; break; case IRQ_I2C: mask = ADXER_MFP_WI2C; break; case IRQ_STUART: mask = ADXER_MFP_WUART3; break; case IRQ_BTUART: mask = ADXER_MFP_WUART2; break; case IRQ_FFUART: mask = ADXER_MFP_WUART1; break; case IRQ_MMC: mask = ADXER_MFP_WMMC1; break; case IRQ_SSP: mask = ADXER_MFP_WSSP1; break; case IRQ_RTCAlrm: mask = ADXER_WRTC; break; case IRQ_SSP4: mask = ADXER_MFP_WSSP4; break; case IRQ_TSI: mask = ADXER_WTSI; break; case IRQ_USIM2: mask = ADXER_WUSIM1; break; case IRQ_MMC2: mask = ADXER_MFP_WMMC2; break; case IRQ_NAND: mask = ADXER_MFP_WFLASH; break; case IRQ_USB2: mask = ADXER_WUSB2; break; case IRQ_WAKEUP0: mask = ADXER_WEXTWAKE0; break; case IRQ_WAKEUP1: mask = ADXER_WEXTWAKE1; break; case IRQ_MMC3: mask = ADXER_MFP_GEN12; break; default: return -EINVAL; } local_irq_save(flags); if (on) wakeup_src |= mask; else wakeup_src &= ~mask; local_irq_restore(flags); return 0; } #else static inline void pxa3xx_init_pm(void) {} #define pxa3xx_set_wake NULL #endif static void pxa_ack_ext_wakeup(struct irq_data *d) { PECR |= PECR_IS(d->irq - IRQ_WAKEUP0); } static void pxa_mask_ext_wakeup(struct irq_data *d) { ICMR2 &= ~(1 << ((d->irq - PXA_IRQ(0)) & 0x1f)); PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0); } static void pxa_unmask_ext_wakeup(struct irq_data *d) { ICMR2 |= 1 << ((d->irq - PXA_IRQ(0)) & 0x1f); PECR |= PECR_IE(d->irq - IRQ_WAKEUP0); } static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type) { if (flow_type & IRQ_TYPE_EDGE_RISING) PWER |= 1 << (d->irq - IRQ_WAKEUP0); if (flow_type & IRQ_TYPE_EDGE_FALLING) PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2); return 0; } static struct irq_chip pxa_ext_wakeup_chip = { .name = "WAKEUP", .irq_ack = pxa_ack_ext_wakeup, .irq_mask = pxa_mask_ext_wakeup, .irq_unmask = pxa_unmask_ext_wakeup, .irq_set_type = pxa_set_ext_wakeup_type, }; static void __init pxa_init_ext_wakeup_irq(set_wake_t fn) { int irq; for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) { irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip, handle_edge_irq); set_irq_flags(irq, IRQF_VALID); } pxa_ext_wakeup_chip.irq_set_wake = fn; } void __init pxa3xx_init_irq(void) { /* enable CP6 access */ u32 value; __asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value)); value |= (1 << 6); __asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value)); pxa_init_irq(56, pxa3xx_set_wake); pxa_init_ext_wakeup_irq(pxa3xx_set_wake); pxa_init_gpio(IRQ_GPIO_2_x, 2, 127, NULL); } static struct map_desc pxa3xx_io_desc[] __initdata = { { /* Mem Ctl */ .virtual = SMEMC_VIRT, .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE), .length = 0x00200000, .type = MT_DEVICE } }; void __init pxa3xx_map_io(void) { pxa_map_io(); iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc)); pxa3xx_get_clk_frequency_khz(1); } /* * device registration specific to PXA3xx. */ void __init pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info) { pxa_register_device(&pxa3xx_device_i2c_power, info); } static struct platform_device *devices[] __initdata = { &pxa27x_device_udc, &pxa_device_pmu, &pxa_device_i2s, &pxa_device_asoc_ssp1, &pxa_device_asoc_ssp2, &pxa_device_asoc_ssp3, &pxa_device_asoc_ssp4, &pxa_device_asoc_platform, &sa1100_device_rtc, &pxa_device_rtc, &pxa27x_device_ssp1, &pxa27x_device_ssp2, &pxa27x_device_ssp3, &pxa3xx_device_ssp4, &pxa27x_device_pwm0, &pxa27x_device_pwm1, }; static int __init pxa3xx_init(void) { int ret = 0; if (cpu_is_pxa3xx()) { reset_status = ARSR; /* * clear RDH bit every time after reset * * Note: the last 3 bits DxS are write-1-to-clear so carefully * preserve them here in case they will be referenced later */ ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); clkdev_add_table(pxa3xx_clkregs, ARRAY_SIZE(pxa3xx_clkregs)); if ((ret = pxa_init_dma(IRQ_DMA, 32))) return ret; pxa3xx_init_pm(); register_syscore_ops(&pxa_irq_syscore_ops); register_syscore_ops(&pxa3xx_mfp_syscore_ops); register_syscore_ops(&pxa_gpio_syscore_ops); register_syscore_ops(&pxa3xx_clock_syscore_ops); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } return ret; } postcore_initcall(pxa3xx_init);
gpl-2.0
PatrikKT/android_kernel_htc_a31ul
drivers/tty/serial/8250/8250_em.c
2276
5006
/* * Renesas Emma Mobile 8250 driver * * Copyright (C) 2012 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/device.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/serial_8250.h> #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/slab.h> #include "8250.h" #define UART_DLL_EM 9 #define UART_DLM_EM 10 struct serial8250_em_priv { struct clk *sclk; int line; }; static void serial8250_em_serial_out(struct uart_port *p, int offset, int value) { switch (offset) { case UART_TX: /* TX @ 0x00 */ writeb(value, p->membase); break; case UART_FCR: /* FCR @ 0x0c (+1) */ case UART_LCR: /* LCR @ 0x10 (+1) */ case UART_MCR: /* MCR @ 0x14 (+1) */ case UART_SCR: /* SCR @ 0x20 (+1) */ writel(value, p->membase + ((offset + 1) << 2)); break; case UART_IER: /* IER @ 0x04 */ value &= 0x0f; /* only 4 valid bits - not Xscale */ /* fall-through */ case UART_DLL_EM: /* DLL @ 0x24 (+9) */ case UART_DLM_EM: /* DLM @ 0x28 (+9) */ writel(value, p->membase + (offset << 2)); } } static unsigned int serial8250_em_serial_in(struct uart_port *p, int offset) { switch (offset) { case UART_RX: /* RX @ 0x00 */ return readb(p->membase); case UART_MCR: /* MCR @ 0x14 (+1) */ case UART_LSR: /* LSR @ 0x18 (+1) */ case UART_MSR: /* MSR @ 0x1c (+1) */ case UART_SCR: /* SCR @ 0x20 (+1) */ return readl(p->membase + ((offset + 1) << 2)); case UART_IER: /* IER @ 0x04 */ case UART_IIR: /* IIR @ 0x08 */ case UART_DLL_EM: /* DLL @ 0x24 (+9) */ case UART_DLM_EM: /* DLM @ 0x28 (+9) */ return readl(p->membase + (offset << 2)); } return 0; } static int serial8250_em_serial_dl_read(struct uart_8250_port *up) { return serial_in(up, UART_DLL_EM) | serial_in(up, UART_DLM_EM) << 8; } static void serial8250_em_serial_dl_write(struct uart_8250_port *up, int value) { serial_out(up, UART_DLL_EM, value & 0xff); serial_out(up, UART_DLM_EM, value >> 8 & 0xff); } static int serial8250_em_probe(struct platform_device *pdev) { struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); struct serial8250_em_priv *priv; struct uart_8250_port up; int ret = -EINVAL; if (!regs || !irq) { dev_err(&pdev->dev, "missing registers or irq\n"); goto err0; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "unable to allocate private data\n"); ret = -ENOMEM; goto err0; } priv->sclk = clk_get(&pdev->dev, "sclk"); if (IS_ERR(priv->sclk)) { dev_err(&pdev->dev, "unable to get clock\n"); ret = PTR_ERR(priv->sclk); goto err1; } memset(&up, 0, sizeof(up)); up.port.mapbase = regs->start; up.port.irq = irq->start; up.port.type = PORT_UNKNOWN; up.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_IOREMAP; up.port.dev = &pdev->dev; up.port.private_data = priv; clk_enable(priv->sclk); up.port.uartclk = clk_get_rate(priv->sclk); up.port.iotype = UPIO_MEM32; up.port.serial_in = serial8250_em_serial_in; up.port.serial_out = serial8250_em_serial_out; up.dl_read = serial8250_em_serial_dl_read; up.dl_write = serial8250_em_serial_dl_write; ret = serial8250_register_8250_port(&up); if (ret < 0) { dev_err(&pdev->dev, "unable to register 8250 port\n"); goto err2; } priv->line = ret; platform_set_drvdata(pdev, priv); return 0; err2: clk_disable(priv->sclk); clk_put(priv->sclk); err1: kfree(priv); err0: return ret; } static int serial8250_em_remove(struct platform_device *pdev) { struct serial8250_em_priv *priv = platform_get_drvdata(pdev); serial8250_unregister_port(priv->line); clk_disable(priv->sclk); clk_put(priv->sclk); kfree(priv); return 0; } static const struct of_device_id serial8250_em_dt_ids[] = { { .compatible = "renesas,em-uart", }, {}, }; MODULE_DEVICE_TABLE(of, serial8250_em_dt_ids); static struct platform_driver serial8250_em_platform_driver = { .driver = { .name = "serial8250-em", .of_match_table = serial8250_em_dt_ids, .owner = THIS_MODULE, }, .probe = serial8250_em_probe, .remove = serial8250_em_remove, }; module_platform_driver(serial8250_em_platform_driver); MODULE_AUTHOR("Magnus Damm"); MODULE_DESCRIPTION("Renesas Emma Mobile 8250 Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
CyanogenMod/lge-kernel-star
drivers/scsi/sun3_NCR5380.c
2532
95185
/* sun3_NCR5380.c -- adapted from atari_NCR5380.c for the sun3 by Sam Creasey. */ /* * NCR 5380 generic driver routines. These should make it *trivial* * to implement 5380 SCSI drivers under Linux with a non-trantor * architecture. * * Note that these routines also work with NR53c400 family chips. * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 666-5836 * * DISTRIBUTION RELEASE 6. * * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * ++roman: To port the 5380 driver to the Atari, I had to do some changes in * this file, too: * * - Some of the debug statements were incorrect (undefined variables and the * like). I fixed that. * * - In information_transfer(), I think a #ifdef was wrong. Looking at the * possible DMA transfer size should also happen for REAL_DMA. I added this * in the #if statement. * * - When using real DMA, information_transfer() should return in a DATAOUT * phase after starting the DMA. It has nothing more to do. * * - The interrupt service routine should run main after end of DMA, too (not * only after RESELECTION interrupts). Additionally, it should _not_ test * for more interrupts after running main, since a DMA process may have * been started and interrupts are turned on now. The new int could happen * inside the execution of NCR5380_intr(), leading to recursive * calls. * * - I've added a function merge_contiguous_buffers() that tries to * merge scatter-gather buffers that are located at contiguous * physical addresses and can be processed with the same DMA setup. * Since most scatter-gather operations work on a page (4K) of * 4 buffers (1K), in more than 90% of all cases three interrupts and * DMA setup actions are saved. * * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA * and USLEEP, because these were messing up readability and will never be * needed for Atari SCSI. * * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' * stuff), and 'main' is executed in a bottom half if awoken by an * interrupt. * * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..." * constructs. In my eyes, this made the source rather unreadable, so I * finally replaced that by the *_PRINTK() macros. * */ #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_spi.h> /* * Further development / testing that should be done : * 1. Test linked command handling code after Eric is ready with * the high level code. */ #if (NDEBUG & NDEBUG_LISTS) #define LIST(x,y) \ { printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); \ if ((x)==(y)) udelay(5); } #define REMOVE(w,x,y,z) \ { printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, \ (void*)(w), (void*)(x), (void*)(y), (void*)(z)); \ if ((x)==(y)) udelay(5); } #else #define LIST(x,y) #define REMOVE(w,x,y,z) #endif #ifndef notyet #undef LINKED #endif /* * Design * Issues : * * The other Linux SCSI drivers were written when Linux was Intel PC-only, * and specifically for each board rather than each chip. This makes their * adaptation to platforms like the Mac (Some of which use NCR5380's) * more difficult than it has to be. * * Also, many of the SCSI drivers were written before the command queuing * routines were implemented, meaning their implementations of queued * commands were hacked on rather than designed in from the start. * * When I designed the Linux SCSI drivers I figured that * while having two different SCSI boards in a system might be useful * for debugging things, two of the same type wouldn't be used. * Well, I was wrong and a number of users have mailed me about running * multiple high-performance SCSI boards in a server. * * Finally, when I get questions from users, I have no idea what * revision of my driver they are running. * * This driver attempts to address these problems : * This is a generic 5380 driver. To use it on a different platform, * one simply writes appropriate system specific macros (ie, data * transfer - some PC's will use the I/O bus, 68K's must use * memory mapped) and drops this file in their 'C' wrapper. * * As far as command queueing, two queues are maintained for * each 5380 in the system - commands that haven't been issued yet, * and commands that are currently executing. This means that an * unlimited number of commands may be queued, letting * more commands propagate from the higher driver levels giving higher * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, * allowing multiple commands to propagate all the way to a SCSI-II device * while a command is already executing. * * To solve the multiple-boards-in-the-same-system problem, * there is a separate instance structure for each instance * of a 5380 in the system. So, multiple NCR5380 drivers will * be able to coexist with appropriate changes to the high level * SCSI code. * * A NCR5380_PUBLIC_REVISION macro is provided, with the release * number (updated for each public release) printed by the * NCR5380_print_options command, which should be called from the * wrapper detect function, so that I know what release of the driver * users are using. * * Issues specific to the NCR5380 : * * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead * piece of hardware that requires you to sit in a loop polling for * the REQ signal as long as you are connected. Some devices are * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect * while doing long seek operations. * * The workaround for this is to keep track of devices that have * disconnected. If the device hasn't disconnected, for commands that * should disconnect, we do something like * * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } * * Some tweaking of N and M needs to be done. An algorithm based * on "time to data" would give the best results as long as short time * to datas (ie, on the same track) were considered, however these * broken devices are the exception rather than the rule and I'd rather * spend my time optimizing for the normal case. * * Architecture : * * At the heart of the design is a coroutine, NCR5380_main, * which is started when not running by the interrupt handler, * timer, and queue command function. It attempts to establish * I_T_L or I_T_L_Q nexuses by removing the commands from the * issue queue and calling NCR5380_select() if a nexus * is not established. * * Once a nexus is established, the NCR5380_information_transfer() * phase goes through the various phases as instructed by the target. * if the target goes into MSG IN and sends a DISCONNECT message, * the command structure is placed into the per instance disconnected * queue, and NCR5380_main tries to find more work. If USLEEP * was defined, and the target is idle for too long, the system * will try to sleep. * * If a command has disconnected, eventually an interrupt will trigger, * calling NCR5380_intr() which will in turn call NCR5380_reselect * to reestablish a nexus. This will run main if necessary. * * On command termination, the done function will be called as * appropriate. * * SCSI pointers are maintained in the SCp field of SCSI command * structures, being initialized after the command is connected * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. * Note that in violation of the standard, an implicit SAVE POINTERS operation * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. */ /* * Using this file : * This file a skeleton Linux SCSI driver for the NCR 5380 series * of chips. To use it, you write an architecture specific functions * and macros and include this file in your driver. * * These macros control options : * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * * LINKED - if defined, linked commands are supported. * * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. * * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible * * These macros MUST be defined : * * NCR5380_read(register) - read from the specified register * * NCR5380_write(register, value) - write to the specific register * * Either real DMA *or* pseudo DMA may be implemented * REAL functions : * NCR5380_REAL_DMA should be defined if real DMA is to be used. * Note that the DMA setup functions should return the number of bytes * that they were able to program the controller for. * * Also note that generic i386/PC versions of these macros are * available as NCR5380_i386_dma_write_setup, * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. * * NCR5380_dma_write_setup(instance, src, count) - initialize * NCR5380_dma_read_setup(instance, dst, count) - initialize * NCR5380_dma_residual(instance); - residual count * * PSEUDO functions : * NCR5380_pwrite(instance, src, count) * NCR5380_pread(instance, dst, count); * * If nothing specific to this implementation needs doing (ie, with external * hardware), you must also define * * NCR5380_queue_command * NCR5380_reset * NCR5380_abort * NCR5380_proc_info * * to be the global entry points into the specific driver, ie * #define NCR5380_queue_command t128_queue_command. * * If this is not done, the routines will be defined as static functions * with the NCR5380* names and the user must provide a globally * accessible wrapper function. * * The generic driver is initialized by calling NCR5380_init(instance), * after setting the appropriate host specific fields and ID. If the * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, * possible) function may be used. Before the specific driver initialization * code finishes, NCR5380_print_options should be called. */ static struct Scsi_Host *first_instance = NULL; static struct scsi_host_template *the_template = NULL; /* Macros ease life... :-) */ #define SETUP_HOSTDATA(in) \ struct NCR5380_hostdata *hostdata = \ (struct NCR5380_hostdata *)(in)->hostdata #define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) #define NEXT(cmd) (*(struct scsi_cmnd **)&((cmd)->host_scribble)) #define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble)) #define HOSTNO instance->host_no #define H_NO(cmd) (cmd)->device->host->host_no #define SGADDR(buffer) (void *)(((unsigned long)sg_virt(((buffer))))) #ifdef SUPPORT_TAGS /* * Functions for handling tagged queuing * ===================================== * * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes: * * Using consecutive numbers for the tags is no good idea in my eyes. There * could be wrong re-usings if the counter (8 bit!) wraps and some early * command has been preempted for a long time. My solution: a bitfield for * remembering used tags. * * There's also the problem that each target has a certain queue size, but we * cannot know it in advance :-( We just see a QUEUE_FULL status being * returned. So, in this case, the driver internal queue size assumption is * reduced to the number of active tags if QUEUE_FULL is returned by the * target. The command is returned to the mid-level, but with status changed * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL * correctly. * * We're also not allowed running tagged commands as long as an untagged * command is active. And REQUEST SENSE commands after a contingent allegiance * condition _must_ be untagged. To keep track whether an untagged command has * been issued, the host->busy array is still employed, as it is without * support for tagged queuing. * * One could suspect that there are possible race conditions between * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(), * which already guaranteed to be running at most once. It is also the only * place where tags/LUNs are allocated. So no other allocation can slip * between that pair, there could only happen a reselection, which can free a * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes * important: the tag bit must be cleared before 'nr_allocated' is decreased. */ /* -1 for TAG_NONE is not possible with unsigned char cmd->tag */ #undef TAG_NONE #define TAG_NONE 0xff /* For the m68k, the number of bits in 'allocated' must be a multiple of 32! */ #if (MAX_TAGS % 32) != 0 #error "MAX_TAGS must be a multiple of 32!" #endif typedef struct { char allocated[MAX_TAGS/8]; int nr_allocated; int queue_size; } TAG_ALLOC; static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */ static void __init init_tags( void ) { int target, lun; TAG_ALLOC *ta; if (!setup_use_tagged_queuing) return; for( target = 0; target < 8; ++target ) { for( lun = 0; lun < 8; ++lun ) { ta = &TagAlloc[target][lun]; memset( &ta->allocated, 0, MAX_TAGS/8 ); ta->nr_allocated = 0; /* At the beginning, assume the maximum queue size we could * support (MAX_TAGS). This value will be decreased if the target * returns QUEUE_FULL status. */ ta->queue_size = MAX_TAGS; } } } /* Check if we can issue a command to this LUN: First see if the LUN is marked * busy by an untagged command. If the command should use tagged queuing, also * check that there is a free tag and the target's queue won't overflow. This * function should be called with interrupts disabled to avoid race * conditions. */ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) { SETUP_HOSTDATA(cmd->device->host); if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)) return( 1 ); if (!should_be_tagged || !setup_use_tagged_queuing || !cmd->device->tagged_supported) return( 0 ); if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); return( 1 ); } return( 0 ); } /* Allocate a tag for a command (there are no checks anymore, check_lun_busy() * must be called before!), or reserve the LUN in 'busy' if the command is * untagged. */ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) { SETUP_HOSTDATA(cmd->device->host); /* If we or the target don't support tagged queuing, allocate the LUN for * an untagged command. */ if (!should_be_tagged || !setup_use_tagged_queuing || !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged " "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); } else { TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); set_bit( cmd->tag, &ta->allocated ); ta->nr_allocated++; TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d " "(now %d tags in use)\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, ta->nr_allocated ); } } /* Mark the tag of command 'cmd' as free, or in case of an untagged command, * unlock the LUN. */ static void cmd_free_tag(struct scsi_cmnd *cmd) { SETUP_HOSTDATA(cmd->device->host); if (cmd->tag == TAG_NONE) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); } else if (cmd->tag >= MAX_TAGS) { printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", H_NO(cmd), cmd->tag ); } else { TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; clear_bit( cmd->tag, &ta->allocated ); ta->nr_allocated--; TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun ); } } static void free_all_tags( void ) { int target, lun; TAG_ALLOC *ta; if (!setup_use_tagged_queuing) return; for( target = 0; target < 8; ++target ) { for( lun = 0; lun < 8; ++lun ) { ta = &TagAlloc[target][lun]; memset( &ta->allocated, 0, MAX_TAGS/8 ); ta->nr_allocated = 0; } } } #endif /* SUPPORT_TAGS */ /* * Function: void merge_contiguous_buffers(struct scsi_cmnd *cmd) * * Purpose: Try to merge several scatter-gather requests into one DMA * transfer. This is possible if the scatter buffers lie on * physical contiguous addresses. * * Parameters: struct scsi_cmnd *cmd * The command to work on. The first scatter buffer's data are * assumed to be already transferred into ptr/this_residual. */ static void merge_contiguous_buffers(struct scsi_cmnd *cmd) { unsigned long endaddr; #if (NDEBUG & NDEBUG_MERGING) unsigned long oldlen = cmd->SCp.this_residual; int cnt = 1; #endif for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; cmd->SCp.buffers_residual && virt_to_phys(SGADDR(&(cmd->SCp.buffer[1]))) == endaddr; ) { MER_PRINTK("VTOP(%p) == %08lx -> merging\n", SGADDR(&(cmd->SCp.buffer[1])), endaddr); #if (NDEBUG & NDEBUG_MERGING) ++cnt; #endif ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual += cmd->SCp.buffer->length; endaddr += cmd->SCp.buffer->length; } #if (NDEBUG & NDEBUG_MERGING) if (oldlen != cmd->SCp.this_residual) MER_PRINTK("merged %d buffers from %p, new length %08x\n", cnt, cmd->SCp.ptr, cmd->SCp.this_residual); #endif } /* * Function : void initialize_SCp(struct scsi_cmnd *cmd) * * Purpose : initialize the saved data pointers for cmd to point to the * start of the buffer. * * Inputs : cmd - struct scsi_cmnd structure to have pointers reset. */ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd) { /* * Initialize the Scsi Pointer field so that all of the commands in the * various queues are valid. */ if (scsi_bufflen(cmd)) { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; /* ++roman: Try to merge some scatter-buffers if they are at * contiguous physical addresses. */ // merge_contiguous_buffers( cmd ); } else { cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } } #include <linux/delay.h> #if 1 static struct { unsigned char mask; const char * name;} signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, { SR_SEL, "SEL" }, {0, NULL}}, basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}}, icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, {0, NULL}}, mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, {MR_MONITOR_BSY, "MODE MONITOR BSY"}, {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, {0, NULL}}; /* * Function : void NCR5380_print(struct Scsi_Host *instance) * * Purpose : print the SCSI bus signals for debugging purposes * * Input : instance - which NCR5380 */ static void NCR5380_print(struct Scsi_Host *instance) { unsigned char status, data, basr, mr, icr, i; unsigned long flags; local_irq_save(flags); data = NCR5380_read(CURRENT_SCSI_DATA_REG); status = NCR5380_read(STATUS_REG); mr = NCR5380_read(MODE_REG); icr = NCR5380_read(INITIATOR_COMMAND_REG); basr = NCR5380_read(BUS_AND_STATUS_REG); local_irq_restore(flags); printk("STATUS_REG: %02x ", status); for (i = 0; signals[i].mask ; ++i) if (status & signals[i].mask) printk(",%s", signals[i].name); printk("\nBASR: %02x ", basr); for (i = 0; basrs[i].mask ; ++i) if (basr & basrs[i].mask) printk(",%s", basrs[i].name); printk("\nICR: %02x ", icr); for (i = 0; icrs[i].mask; ++i) if (icr & icrs[i].mask) printk(",%s", icrs[i].name); printk("\nMODE: %02x ", mr); for (i = 0; mrs[i].mask; ++i) if (mr & mrs[i].mask) printk(",%s", mrs[i].name); printk("\n"); } static struct { unsigned char value; const char *name; } phases[] = { {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, {PHASE_UNKNOWN, "UNKNOWN"}}; /* * Function : void NCR5380_print_phase(struct Scsi_Host *instance) * * Purpose : print the current SCSI phase for debugging purposes * * Input : instance - which NCR5380 */ static void NCR5380_print_phase(struct Scsi_Host *instance) { unsigned char status; int i; status = NCR5380_read(STATUS_REG); if (!(status & SR_REQ)) printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); else { for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i); printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); } } #else /* !NDEBUG */ /* dummies... */ __inline__ void NCR5380_print(struct Scsi_Host *instance) { }; __inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { }; #endif /* * ++roman: New scheme of calling NCR5380_main() * * If we're not in an interrupt, we can call our main directly, it cannot be * already running. Else, we queue it on a task queue, if not 'main_running' * tells us that a lower level is already executing it. This way, * 'main_running' needs not be protected in a special way. * * queue_main() is a utility function for putting our main onto the task * queue, if main_running is false. It should be called only from a * interrupt or bottom half. */ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/interrupt.h> static volatile int main_running = 0; static DECLARE_WORK(NCR5380_tqueue, NCR5380_main); static __inline__ void queue_main(void) { if (!main_running) { /* If in interrupt and NCR5380_main() not already running, queue it on the 'immediate' task queue, to be processed immediately after the current interrupt processing has finished. */ schedule_work(&NCR5380_tqueue); } /* else: nothing to do: the running NCR5380_main() will pick up any newly queued command. */ } static inline void NCR5380_all_init (void) { static int done = 0; if (!done) { INI_PRINTK("scsi : NCR5380_all_init()\n"); done = 1; } } /* * Function : void NCR58380_print_options (struct Scsi_Host *instance) * * Purpose : called by probe code indicating the NCR5380 driver * options that were selected. * * Inputs : instance, pointer to this instance. Unused. */ static void __init NCR5380_print_options (struct Scsi_Host *instance) { printk(" generic options" #ifdef AUTOSENSE " AUTOSENSE" #endif #ifdef REAL_DMA " REAL DMA" #endif #ifdef PARITY " PARITY" #endif #ifdef SUPPORT_TAGS " SCSI-2 TAGGED QUEUING" #endif ); printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); } /* * Function : void NCR5380_print_status (struct Scsi_Host *instance) * * Purpose : print commands in the various queues, called from * NCR5380_abort and NCR5380_debug to aid debugging. * * Inputs : instance, pointer to this instance. */ static void NCR5380_print_status (struct Scsi_Host *instance) { char *pr_bfr; char *start; int len; NCR_PRINT(NDEBUG_ANY); NCR_PRINT_PHASE(NDEBUG_ANY); pr_bfr = (char *) __get_free_page(GFP_ATOMIC); if (!pr_bfr) { printk("NCR5380_print_status: no memory for print buffer\n"); return; } len = NCR5380_proc_info(instance, pr_bfr, &start, 0, PAGE_SIZE, 0); pr_bfr[len] = 0; printk("\n%s\n", pr_bfr); free_page((unsigned long) pr_bfr); } /******************************************/ /* * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED] * * *buffer: I/O buffer * **start: if inout == FALSE pointer into buffer where user read should start * offset: current offset * length: length of buffer * hostno: Scsi_Host host_no * inout: TRUE - user is writing; FALSE - user is reading * * Return the number of bytes read from or written */ #undef SPRINTF #define SPRINTF(fmt,args...) \ do { if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \ pos += sprintf(pos, fmt , ## args); } while(0) static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer, int length); static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start, off_t offset, int length, int inout) { char *pos = buffer; struct NCR5380_hostdata *hostdata; struct scsi_cmnd *ptr; unsigned long flags; off_t begin = 0; #define check_offset() \ do { \ if (pos - buffer < offset - begin) { \ begin += pos - buffer; \ pos = buffer; \ } \ } while (0) hostdata = (struct NCR5380_hostdata *)instance->hostdata; if (inout) { /* Has data been written to the file ? */ return(-ENOSYS); /* Currently this is a no-op */ } SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); check_offset(); local_irq_save(flags); SPRINTF("NCR5380: coroutine is%s running.\n", main_running ? "" : "n't"); check_offset(); if (!hostdata->connected) SPRINTF("scsi%d: no currently connected command\n", HOSTNO); else pos = lprint_Scsi_Cmnd ((struct scsi_cmnd *) hostdata->connected, pos, buffer, length); SPRINTF("scsi%d: issue_queue\n", HOSTNO); check_offset(); for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = NEXT(ptr)) { pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length); check_offset(); } SPRINTF("scsi%d: disconnected_queue\n", HOSTNO); check_offset(); for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = NEXT(ptr)) { pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length); check_offset(); } local_irq_restore(flags); *start = buffer + (offset - begin); if (pos - buffer < offset - begin) return 0; else if (pos - buffer - (offset - begin) < length) return pos - buffer - (offset - begin); return length; } static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer, int length) { int i, s; unsigned char *command; SPRINTF("scsi%d: destination target %d, lun %d\n", H_NO(cmd), cmd->device->id, cmd->device->lun); SPRINTF(" command = "); command = cmd->cmnd; SPRINTF("%2d (0x%02x)", command[0], command[0]); for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) SPRINTF(" %02x", command[i]); SPRINTF("\n"); return pos; } /* * Function : void NCR5380_init (struct Scsi_Host *instance) * * Purpose : initializes *instance and corresponding 5380 chip. * * Inputs : instance - instantiation of the 5380 driver. * * Notes : I assume that the host, hostno, and id bits have been * set correctly. I don't care about the irq and other fields. * */ static int NCR5380_init (struct Scsi_Host *instance, int flags) { int i; SETUP_HOSTDATA(instance); NCR5380_all_init(); hostdata->aborted = 0; hostdata->id_mask = 1 << instance->this_id; hostdata->id_higher_mask = 0; for (i = hostdata->id_mask; i <= 0x80; i <<= 1) if (i > hostdata->id_mask) hostdata->id_higher_mask |= i; for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef SUPPORT_TAGS init_tags(); #endif #if defined (REAL_DMA) hostdata->dma_len = 0; #endif hostdata->targets_present = 0; hostdata->connected = NULL; hostdata->issue_queue = NULL; hostdata->disconnected_queue = NULL; hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; if (!the_template) { the_template = instance->hostt; first_instance = instance; } #ifndef AUTOSENSE if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1)) printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n" " without AUTOSENSE option, contingent allegiance conditions may\n" " be incorrectly cleared.\n", HOSTNO); #endif /* def AUTOSENSE */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); return 0; } /* * Function : int NCR5380_queue_command (struct scsi_cmnd *cmd, * void (*done)(struct scsi_cmnd *)) * * Purpose : enqueues a SCSI command * * Inputs : cmd - SCSI command, done - function called on completion, with * a pointer to the command descriptor. * * Returns : 0 * * Side effects : * cmd is added to the per instance issue_queue, with minor * twiddling done to the host specific fields of cmd. If the * main coroutine is not running, it is restarted. * */ /* Only make static if a wrapper function is used */ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { SETUP_HOSTDATA(cmd->device->host); struct scsi_cmnd *tmp; unsigned long flags; #if (NDEBUG & NDEBUG_NO_WRITE) switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", H_NO(cmd)); cmd->result = (DID_ERROR << 16); done(cmd); return 0; } #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ #ifdef NCR5380_STATS # if 0 if (!hostdata->connected && !hostdata->issue_queue && !hostdata->disconnected_queue) { hostdata->timebase = jiffies; } # endif # ifdef NCR5380_STAT_LIMIT if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) # endif switch (cmd->cmnd[0]) { case WRITE: case WRITE_6: case WRITE_10: hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd); hostdata->pendingw++; break; case READ: case READ_6: case READ_10: hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd); hostdata->pendingr++; break; } #endif /* * We use the host_scribble field as a pointer to the next command * in a queue */ NEXT(cmd) = NULL; cmd->scsi_done = done; cmd->result = 0; /* * Insert the cmd into the issue queue. Note that REQUEST SENSE * commands are added to the head of the queue since any command will * clear the contingent allegiance condition that exists and the * sense data is only guaranteed to be valid while the condition exists. */ local_irq_save(flags); /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. * Otherwise a running NCR5380_main may steal the lock. * Lock before actually inserting due to fairness reasons explained in * atari_scsi.c. If we insert first, then it's impossible for this driver * to release the lock. * Stop timer for this command while waiting for the lock, or timeouts * may happen (and they really do), and it's no good if the command doesn't * appear in any of the queues. * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which would * alter queues and touch the lock. */ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { LIST(cmd, hostdata->issue_queue); NEXT(cmd) = hostdata->issue_queue; hostdata->issue_queue = cmd; } else { for (tmp = (struct scsi_cmnd *)hostdata->issue_queue; NEXT(tmp); tmp = NEXT(tmp)) ; LIST(cmd, tmp); NEXT(tmp) = cmd; } local_irq_restore(flags); QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* If queue_command() is called from an interrupt (real one or bottom * half), we let queue_main() do the job of taking care about main. If it * is already running, this is a no-op, else main will be queued. * * If we're not in an interrupt, we can call NCR5380_main() * unconditionally, because it cannot be already running. */ if (in_interrupt() || ((flags >> 8) & 7) >= 6) queue_main(); else NCR5380_main(NULL); return 0; } static DEF_SCSI_QCMD(NCR5380_queue_command) /* * Function : NCR5380_main (void) * * Purpose : NCR5380_main is a coroutine that runs as long as more work can * be done on the NCR5380 host adapters in a system. Both * NCR5380_queue_command() and NCR5380_intr() will try to start it * in case it is not running. * * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should * reenable them. This prevents reentrancy and kernel stack overflow. */ static void NCR5380_main (struct work_struct *bl) { struct scsi_cmnd *tmp, *prev; struct Scsi_Host *instance = first_instance; struct NCR5380_hostdata *hostdata = HOSTDATA(instance); int done; unsigned long flags; /* * We run (with interrupts disabled) until we're sure that none of * the host adapters have anything that can be done, at which point * we set main_running to 0 and exit. * * Interrupts are enabled before doing various other internal * instructions, after we've decided that we need to run through * the loop again. * * this should prevent any race conditions. * * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which can * alter queues and touch the Falcon lock. */ /* Tell int handlers main() is now already executing. Note that no races are possible here. If an int comes in before 'main_running' is set here, and queues/executes main via the task queue, it doesn't do any harm, just this instance of main won't find any work left to do. */ if (main_running) return; main_running = 1; local_save_flags(flags); do { local_irq_disable(); /* Freeze request queues */ done = 1; if (!hostdata->connected) { MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO ); /* * Search through the issue_queue for a command destined * for a target that's not busy. */ #if (NDEBUG & NDEBUG_LISTS) for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) ; if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/ #endif for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { #if (NDEBUG & NDEBUG_LISTS) if (prev != tmp) printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun); #endif /* When we find one, remove it from the issue queue. */ /* ++guenther: possible race with Falcon locking */ if ( #ifdef SUPPORT_TAGS !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) #else !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun)) #endif ) { /* ++guenther: just to be sure, this must be atomic */ local_irq_disable(); if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); NEXT(prev) = NEXT(tmp); } else { REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); hostdata->issue_queue = NEXT(tmp); } NEXT(tmp) = NULL; /* reenable interrupts after finding one */ local_irq_restore(flags); /* * Attempt to establish an I_T_L nexus here. * On success, instance->hostdata->connected is set. * On failure, we must add the command back to the * issue queue so we can keep trying. */ MAIN_PRINTK("scsi%d: main(): command for target %d " "lun %d removed from issue_queue\n", HOSTNO, tmp->target, tmp->lun); /* * REQUEST SENSE commands are issued without tagged * queueing, even on SCSI-II devices because the * contingent allegiance condition exists for the * entire unit. */ /* ++roman: ...and the standard also requires that * REQUEST SENSE command are untagged. */ #ifdef SUPPORT_TAGS cmd_get_tag( tmp, tmp->cmnd[0] != REQUEST_SENSE ); #endif if (!NCR5380_select(instance, tmp, (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) { break; } else { local_irq_disable(); LIST(tmp, hostdata->issue_queue); NEXT(tmp) = hostdata->issue_queue; hostdata->issue_queue = tmp; #ifdef SUPPORT_TAGS cmd_free_tag( tmp ); #endif local_irq_restore(flags); MAIN_PRINTK("scsi%d: main(): select() failed, " "returned to issue_queue\n", HOSTNO); if (hostdata->connected) break; } } /* if target/lun/target queue is not busy */ } /* for issue_queue */ } /* if (!hostdata->connected) */ if (hostdata->connected #ifdef REAL_DMA && !hostdata->dma_len #endif ) { local_irq_restore(flags); MAIN_PRINTK("scsi%d: main: performing information transfer\n", HOSTNO); NCR5380_information_transfer(instance); MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); done = 0; } } while (!done); /* Better allow ints _after_ 'main_running' has been cleared, else an interrupt could believe we'll pick up the work it left for us, but we won't see it anymore here... */ main_running = 0; local_irq_restore(flags); } #ifdef REAL_DMA /* * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) * * Purpose : Called by interrupt handler when DMA finishes or a phase * mismatch occurs (which would finish the DMA transfer). * * Inputs : instance - this instance of the NCR5380. * */ static void NCR5380_dma_complete( struct Scsi_Host *instance ) { SETUP_HOSTDATA(instance); int transfered; unsigned char **data; volatile int *count; if (!hostdata->connected) { printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " "no connected cmd\n", HOSTNO); return; } DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); if((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { printk("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", HOSTNO); printk("please e-mail sammy@sammy.net with a description of how this\n"); printk("error was produced.\n"); BUG(); } /* make sure we're not stuck in a data phase */ if((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == (BASR_PHASE_MATCH | BASR_ACK)) { printk("scsi%d: BASR %02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG)); printk("scsi%d: bus stuck in data phase -- probably a single byte " "overrun!\n", HOSTNO); printk("not prepared for this error!\n"); printk("please e-mail sammy@sammy.net with a description of how this\n"); printk("error was produced.\n"); BUG(); } (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); transfered = hostdata->dma_len - NCR5380_dma_residual(instance); hostdata->dma_len = 0; data = (unsigned char **) &(hostdata->connected->SCp.ptr); count = &(hostdata->connected->SCp.this_residual); *data += transfered; *count -= transfered; } #endif /* REAL_DMA */ /* * Function : void NCR5380_intr (int irq) * * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses * from the disconnected queue, and restarting NCR5380_main() * as required. * * Inputs : int irq, irq that caused this interrupt. * */ static irqreturn_t NCR5380_intr (int irq, void *dev_id) { struct Scsi_Host *instance = first_instance; int done = 1, handled = 0; unsigned char basr; INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); /* Look for pending interrupts */ basr = NCR5380_read(BUS_AND_STATUS_REG); INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); /* dispatch to appropriate routine if found and done=0 */ if (basr & BASR_IRQ) { NCR_PRINT(NDEBUG_INTR); if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { done = 0; // ENABLE_IRQ(); INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); NCR5380_reselect(instance); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { /* * The rest of the interrupt conditions can occur only during a * DMA transfer */ #if defined(REAL_DMA) /* * We should only get PHASE MISMATCH and EOP interrupts if we have * DMA enabled, so do a sanity check based on the current setting * of the MODE register. */ if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) { INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); NCR5380_dma_complete( instance ); done = 0; // ENABLE_IRQ(); } else #endif /* REAL_DMA */ { /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ if (basr & BASR_PHASE_MATCH) INT_PRINTK("scsi%d: unknown interrupt, " "BASR 0x%x, MR 0x%x, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } } /* if !(SELECTION || PARITY) */ handled = 1; } /* BASR & IRQ */ else { printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } if (!done) { INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); /* Put a call to NCR5380_main() on the queue... */ queue_main(); } return IRQ_RETVAL(handled); } #ifdef NCR5380_STATS static void collect_stats(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { # ifdef NCR5380_STAT_LIMIT if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) # endif switch (cmd->cmnd[0]) { case WRITE: case WRITE_6: case WRITE_10: hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase); /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/ hostdata->pendingw--; break; case READ: case READ_6: case READ_10: hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase); /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/ hostdata->pendingr--; break; } } #endif /* * Function : int NCR5380_select(struct Scsi_Host *instance, * struct scsi_cmnd *cmd, int tag); * * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, * including ARBITRATION, SELECTION, and initial message out for * IDENTIFY and queue messages. * * Inputs : instance - instantiation of the 5380 driver on which this * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for * the command that is presently connected. * * Returns : -1 if selection could not execute for some reason, * 0 if selection succeeded or failed because the target * did not respond. * * Side effects : * If bus busy, arbitration failed, etc, NCR5380_select() will exit * with registers as they should have been on entry - ie * SELECT_ENABLE will be set appropriately, the NCR5380 * will cease to drive any SCSI bus signals. * * If successful : I_T_L or I_T_L_Q nexus will be established, * instance->connected will be set to cmd. * SELECT interrupt will be disabled. * * If failed (no target) : cmd->scsi_done() will be called, and the * cmd->result host byte set to DID_BAD_TARGET. */ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, int tag) { SETUP_HOSTDATA(instance); unsigned char tmp[3], phase; unsigned char *data; int len; unsigned long timeout; unsigned long flags; hostdata->restart_select = 0; NCR_PRINT(NDEBUG_ARBITRATION); ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, instance->this_id); /* * Set the phase bits to 0, otherwise the NCR5380 won't drive the * data bus during SELECTION. */ local_irq_save(flags); if (hostdata->connected) { local_irq_restore(flags); return -1; } NCR5380_write(TARGET_COMMAND_REG, 0); /* * Start arbitration. */ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(MODE_REG, MR_ARBITRATE); local_irq_restore(flags); /* Wait for arbitration logic to complete */ #if NCR_TIMEOUT { unsigned long timeout = jiffies + 2*NCR_TIMEOUT; while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && time_before(jiffies, timeout) && !hostdata->connected) ; if (time_after_eq(jiffies, timeout)) { printk("scsi : arbitration timeout at %d\n", __LINE__); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } } #else /* NCR_TIMEOUT */ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && !hostdata->connected); #endif ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); return -1; } /* * The arbitration delay is 2.2us, but this is a minimum and there is * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate * the integral nature of udelay(). * */ udelay(3); /* Check for lost arbitration */ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", HOSTNO); return -1; } /* after/during arbitration, BSY should be asserted. IBM DPES-31080 Version S31Q works now */ /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY ) ; if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", HOSTNO); return -1; } /* * Again, bus clear + bus settle time is 1.2us, however, this is * a minimum so we'll udelay ceil(1.2) */ #ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY /* ++roman: But some targets (see above :-) seem to need a bit more... */ udelay(15); #else udelay(2); #endif if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); /* * Now that we have won arbitration, start Selection process, asserting * the host and target ID's on the SCSI bus. */ NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); /* * Raise ATN while SEL is true before BSY goes false from arbitration, * since this is the only way to guarantee that we'll get a MESSAGE OUT * phase immediately after selection. */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); NCR5380_write(MODE_REG, MR_BASE); /* * Reselect interrupts must be turned off prior to the dropping of BSY, * otherwise we will trigger an interrupt. */ if (hostdata->connected) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } NCR5380_write(SELECT_ENABLE_REG, 0); /* * The initiator shall then wait at least two deskew delays and release * the BSY signal. */ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ /* Reset BSY */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL)); /* * Something weird happens when we cease to drive BSY - looks * like the board/chip is letting us do another read before the * appropriate propagation delay has expired, and we're confusing * a BSY signal from ourselves as the target's response to SELECTION. * * A small delay (the 'C++' frontend breaks the pipeline with an * unnecessary jump, making it work on my 386-33/Trantor T128, the * tighter 'C' code breaks and requires this) solves the problem - * the 1 us delay is arbitrary, and only used because this delay will * be the same on other platforms and since it works here, it should * work there. * * wingel suggests that this could be due to failing to wait * one deskew delay. */ udelay(1); SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); /* * The SCSI specification calls for a 250 ms timeout for the actual * selection. */ timeout = jiffies + 25; /* * XXX very interesting - we're seeing a bounce where the BSY we * asserted is being reflected / still asserted (propagation delay?) * and it's detecting as true. Sigh. */ #if 0 /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert * IO while SEL is true. But again, there are some disks out the in the * world that do that nevertheless. (Somebody claimed that this announces * reselection capability of the target.) So we better skip that test and * only wait for BSY... (Famous german words: Der Klügere gibt nach :-) */ while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO))); if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_reselect(instance); printk (KERN_ERR "scsi%d: reselection after won arbitration?\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } #else while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)); #endif /* * No less than two deskew delays after the initiator detects the * BSY signal is true, it shall release the SEL signal and may * change the DATA BUS. -wingel */ udelay(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); if (hostdata->targets_present & (1 << cmd->device->id)) { printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); if (hostdata->restart_select) printk(KERN_NOTICE "\trestart select\n"); NCR_PRINT(NDEBUG_ANY); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } cmd->result = DID_BAD_TARGET << 16; #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } hostdata->targets_present |= (1 << cmd->device->id); /* * Since we followed the SCSI spec, and raised ATN while SEL * was true but before BSY was false during selection, the information * transfer phase should be a MESSAGE OUT phase so that we can send the * IDENTIFY message. * * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG * message (2 bytes) with a tag ID that we increment with every command * until it wraps back to 0. * * XXX - it turns out that there are some broken SCSI-II devices, * which claim to support tagged queuing but fail when more than * some number of commands are issued at once. */ /* Wait for start of REQ/ACK handshake */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)); SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", HOSTNO, cmd->device->id); tmp[0] = IDENTIFY(1, cmd->device->lun); #ifdef SUPPORT_TAGS if (cmd->tag != TAG_NONE) { tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; tmp[2] = cmd->tag; len = 3; } else len = 1; #else len = 1; cmd->tag=0; #endif /* SUPPORT_TAGS */ /* Send message(s) */ data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); /* XXX need to handle errors here */ hostdata->connected = cmd; #ifndef SUPPORT_TAGS hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); #endif #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif initialize_SCp(cmd); return 0; } /* * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using polled I/O * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes are transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * * XXX Note : handling for bus free may be useful. */ /* * Note : this code is not as quick as it could be, however it * IS 100% reliable, and for the actual data transfer where speed * counts, we will always do a pseudo DMA or DMA transfer. */ static int NCR5380_transfer_pio( struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { register unsigned char p = *phase, tmp; register int c = *count; register unsigned char *d = *data; /* * The NCR5380 chip will only drive the SCSI bus when the * phase specified in the appropriate bits of the TARGET COMMAND * REGISTER match the STATUS REGISTER */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); do { /* * Wait for assertion of REQ, after which the phase bits will be * valid */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); NCR_PRINT_PHASE(NDEBUG_PIO); break; } /* Do actual transfer from SCSI bus to / from memory */ if (!(p & SR_IO)) NCR5380_write(OUTPUT_DATA_REG, *d); else *d = NCR5380_read(CURRENT_SCSI_DATA_REG); ++d; /* * The SCSI standard suggests that in MSGOUT phase, the initiator * should drop ATN on the last byte of the message phase * after REQ has been asserted for the handshake but before * the initiator raises ACK. */ if (!(p & SR_IO)) { if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } while (NCR5380_read(STATUS_REG) & SR_REQ); HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); /* * We have several special cases to consider during REQ/ACK handshaking : * 1. We were in MSGOUT phase, and we are on the last byte of the * message. ATN must be dropped as ACK is dropped. * * 2. We are in a MSGIN phase, and we are on the last byte of the * message. We must exit with ACK asserted, so that the calling * code may raise ATN before dropping ACK to reject the message. * * 3. ACK and ATN are clear and the target may proceed as normal. */ if (!(p == PHASE_MSGIN && c == 1)) { if (p == PHASE_MSGOUT && c > 1) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); else NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); } } while (--c); PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); *count = c; *data = d; tmp = NCR5380_read(STATUS_REG); /* The phase read from the bus is valid if either REQ is (already) * asserted or if ACK hasn't been released yet. The latter is the case if * we're in MSGIN and all wanted bytes have been received. */ if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) *phase = tmp & PHASE_MASK; else *phase = PHASE_UNKNOWN; if (!c || (*phase == p)) return 0; else return -1; } /* * Function : do_abort (Scsi_Host *host) * * Purpose : abort the currently established nexus. Should only be * called from a routine which can drop into a * * Returns : 0 on success, -1 on failure. */ static int do_abort (struct Scsi_Host *host) { unsigned char tmp, *msgptr, phase; int len; /* Request message out phase */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); /* * Wait for the target to indicate a valid phase by asserting * REQ. Once this happens, we'll have either a MSGOUT phase * and can immediately send the ABORT message, or we'll have some * other phase and will have to source/sink data. * * We really don't care what value was on the bus or what value * the target sees, so we just handshake. */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); } tmp = ABORT; msgptr = &tmp; len = 1; phase = PHASE_MSGOUT; NCR5380_transfer_pio (host, &phase, &len, &msgptr); /* * If we got here, and the command completed successfully, * we're about to go into bus free state. */ return len ? -1 : 0; } #if defined(REAL_DMA) /* * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using either real * or pseudo DMA. * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes or transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * */ static int NCR5380_transfer_dma( struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { SETUP_HOSTDATA(instance); register int c = *count; register unsigned char p = *phase; unsigned long flags; /* sanity check */ if(!sun3_dma_setup_done) { printk("scsi%d: transfer_dma without setup!\n", HOSTNO); BUG(); } hostdata->dma_len = c; DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", HOSTNO, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", *data); /* netbsd turns off ints here, why not be safe and do it too */ local_irq_save(flags); /* send start chain */ sun3scsi_dma_start(c, *data); if (p & SR_IO) { NCR5380_write(TARGET_COMMAND_REG, 1); NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, 0); NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); } else { NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_SEND_REG, 0); } #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif local_irq_restore(flags); sun3_dma_active = 1; return 0; } #endif /* defined(REAL_DMA) */ /* * Function : NCR5380_information_transfer (struct Scsi_Host *instance) * * Purpose : run through the various SCSI phases and do as the target * directs us to. Operates on the currently connected command, * instance->connected. * * Inputs : instance, instance for which we are doing commands * * Side effects : SCSI things happen, the disconnected queue will be * modified if a command disconnects, *instance->connected will * change. * * XXX Note : we need to watch for bus free or a reset condition here * to recover from an unexpected bus free condition. */ static void NCR5380_information_transfer (struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned long flags; unsigned char msgout = NOP; int sink = 0; int len; #if defined(REAL_DMA) int transfersize; #endif unsigned char *data; unsigned char phase, tmp, extended_msg[10], old_phase=0xff; struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected; #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif while (1) { tmp = NCR5380_read(STATUS_REG); /* We only have a valid SCSI phase when REQ is asserted */ if (tmp & SR_REQ) { phase = (tmp & PHASE_MASK); if (phase != old_phase) { old_phase = phase; NCR_PRINT_PHASE(NDEBUG_INFORMATION); } if(phase == PHASE_CMDOUT) { void *d; unsigned long count; if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { count = cmd->SCp.buffer->length; d = SGADDR(cmd->SCp.buffer); } else { count = cmd->SCp.this_residual; d = cmd->SCp.ptr; } #ifdef REAL_DMA /* this command setup for dma yet? */ if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != cmd)) { if (cmd->request->cmd_type == REQ_TYPE_FS) { sun3scsi_dma_setup(d, count, rq_data_dir(cmd->request)); sun3_dma_setup_done = cmd; } } #endif #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif } if (sink && (phase != PHASE_MSGOUT)) { NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 0; continue; } switch (phase) { case PHASE_DATAOUT: #if (NDEBUG & NDEBUG_NO_DATAOUT) printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " "aborted\n", HOSTNO); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); return; #endif case PHASE_DATAIN: /* * If there is no room left in the current buffer in the * scatter-gather list, move onto the next one. */ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); /* ++roman: Try to merge some scatter-buffers if * they are at contiguous physical addresses. */ // merge_contiguous_buffers( cmd ); INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", HOSTNO, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } /* * The preferred transfer method is going to be * PSEUDO-DMA for systems that are strictly PIO, * since we can let the hardware do the handshaking. * * For this to work, we need to know the transfersize * ahead of time, since the pseudo-DMA code will sit * in an unconditional loop. */ /* ++roman: I suggest, this should be * #if def(REAL_DMA) * instead of leaving REAL_DMA out. */ #if defined(REAL_DMA) // if (!cmd->device->borken && if((transfersize = NCR5380_dma_xfer_len(instance,cmd,phase)) > SUN3_DMA_MINSIZE) { len = transfersize; cmd->SCp.phase = phase; if (NCR5380_transfer_dma(instance, &phase, &len, (unsigned char **) &cmd->SCp.ptr)) { /* * If the watchdog timer fires, all future * accesses to this device will use the * polled-IO. */ printk(KERN_NOTICE "scsi%d: switching target %d " "lun %d to slow handshake\n", HOSTNO, cmd->device->id, cmd->device->lun); cmd->device->borken = 1; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); /* XXX - need to source or sink data here, as appropriate */ } else { #ifdef REAL_DMA /* ++roman: When using real DMA, * information_transfer() should return after * starting DMA since it has nothing more to * do. */ return; #else cmd->SCp.this_residual -= transfersize - len; #endif } } else #endif /* defined(REAL_DMA) */ NCR5380_transfer_pio(instance, &phase, (int *) &cmd->SCp.this_residual, (unsigned char **) &cmd->SCp.ptr); #ifdef REAL_DMA /* if we had intended to dma that command clear it */ if(sun3_dma_setup_done == cmd) sun3_dma_setup_done = NULL; #endif break; case PHASE_MSGIN: len = 1; data = &tmp; NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Message = tmp; switch (tmp) { /* * Linking lets us reduce the time required to get the * next command out to the device, hopefully this will * mean we don't waste another revolution due to the delays * required by ARBITRATION and another SELECTION. * * In the current implementation proposal, low level drivers * merely have to start the next command, pointed to by * next_link, done() is called as with unlinked commands. */ #ifdef LINKED case LINKED_CMD_COMPLETE: case LINKED_FLG_CMD_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); LNK_PRINTK("scsi%d: target %d lun %d linked command " "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Sanity check : A linked command should only terminate * with one of these messages if there are more linked * commands available. */ if (!cmd->next_link) { printk(KERN_NOTICE "scsi%d: target %d lun %d " "linked command complete, no next_link\n", HOSTNO, cmd->device->id, cmd->device->lun); sink = 1; do_abort (instance); return; } initialize_SCp(cmd->next_link); /* The next command is still part of this process; copy it * and don't free it! */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); LNK_PRINTK("scsi%d: target %d lun %d linked request " "done, calling scsi_done().\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); cmd = hostdata->connected; break; #endif /* def LINKED */ case ABORT: case COMMAND_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = NULL; QU_PRINTK("scsi%d: command for target %d, lun %d " "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { /* Turn a QUEUE FULL status into BUSY, I think the * mid level cannot handle QUEUE FULL :-( (The * command is retried after BUSY). Also update our * queue size to the number of currently issued * commands now. */ /* ++Andreas: the mid level code knows about QUEUE_FULL now. */ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; TAG_PRINTK("scsi%d: target %d lun %d returned " "QUEUE_FULL after %d commands\n", HOSTNO, cmd->device->id, cmd->device->lun, ta->nr_allocated); if (ta->queue_size > ta->nr_allocated) ta->nr_allocated = ta->queue_size; } #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * I'm not sure what the correct thing to do here is : * * If the command that just executed is NOT a request * sense, the obvious thing to do is to set the result * code to the values of the stored parameters. * * If it was a REQUEST SENSE command, we need some way to * differentiate between the failure code of the original * and the failure code of the REQUEST sense - the obvious * case is success, where we fall through and leave the * result code unchanged. * * The non-obvious place is where the REQUEST SENSE failed */ if (cmd->cmnd[0] != REQUEST_SENSE) cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); else if (status_byte(cmd->SCp.Status) != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); #ifdef AUTOSENSE if ((cmd->cmnd[0] == REQUEST_SENSE) && hostdata->ses.cmd_len) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); hostdata->ses.cmd_len = 0 ; } if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO); /* this is initialized from initialize_SCp cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; */ local_irq_save(flags); LIST(cmd,hostdata->issue_queue); NEXT(cmd) = hostdata->issue_queue; hostdata->issue_queue = (struct scsi_cmnd *) cmd; local_irq_restore(flags); QU_PRINTK("scsi%d: REQUEST SENSE added to head of " "issue queue\n", H_NO(cmd)); } else #endif /* def AUTOSENSE */ { #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); } NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); return; case MESSAGE_REJECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); switch (hostdata->last_message) { case HEAD_OF_QUEUE_TAG: case ORDERED_QUEUE_TAG: case SIMPLE_QUEUE_TAG: /* The target obviously doesn't support tagged * queuing, even though it announced this ability in * its INQUIRY data ?!? (maybe only this LUN?) Ok, * clear 'tagged_supported' and lock the LUN, since * the command is treated as untagged further on. */ cmd->device->tagged_supported = 0; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); cmd->tag = TAG_NONE; TAG_PRINTK("scsi%d: target %d lun %d rejected " "QUEUE_TAG message; tagged queuing " "disabled\n", HOSTNO, cmd->device->id, cmd->device->lun); break; } break; case DISCONNECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); local_irq_save(flags); cmd->device->disconnect = 1; LIST(cmd,hostdata->disconnected_queue); NEXT(cmd) = hostdata->disconnected_queue; hostdata->connected = NULL; hostdata->disconnected_queue = cmd; local_irq_restore(flags); QU_PRINTK("scsi%d: command for target %d lun %d was " "moved from connected to the " "disconnected_queue\n", HOSTNO, cmd->device->id, cmd->device->lun); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* Wait for bus free to avoid nasty timeouts */ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif return; /* * The SCSI data pointer is *IMPLICITLY* saved on a disconnect * operation, in violation of the SCSI spec so we can safely * ignore SAVE/RESTORE pointers calls. * * Unfortunately, some disks violate the SCSI spec and * don't issue the required SAVE_POINTERS message before * disconnecting, and we have to break spec to remain * compatible. */ case SAVE_POINTERS: case RESTORE_POINTERS: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); break; case EXTENDED_MESSAGE: /* * Extended messages are sent in the following format : * Byte * 0 EXTENDED_MESSAGE == 1 * 1 length (includes one byte for code, doesn't * include first two bytes) * 2 code * 3..length+1 arguments * * Start the extended message buffer with the EXTENDED_MESSAGE * byte, since spi_print_msg() wants the whole thing. */ extended_msg[0] = EXTENDED_MESSAGE; /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, (int)extended_msg[1], (int)extended_msg[2]); if (!len && extended_msg[1] <= (sizeof (extended_msg) - 1)) { /* Accept third byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = extended_msg[1] - 1; data = extended_msg + 3; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); EXT_PRINTK("scsi%d: message received, residual %d\n", HOSTNO, len); switch (extended_msg[2]) { case EXTENDED_SDTR: case EXTENDED_WDTR: case EXTENDED_MODIFY_DATA_POINTER: case EXTENDED_EXTENDED_IDENTIFY: tmp = 0; } } else if (len) { printk(KERN_NOTICE "scsi%d: error receiving " "extended message\n", HOSTNO); tmp = 0; } else { printk(KERN_NOTICE "scsi%d: extended message " "code %02x length %d is too long\n", HOSTNO, extended_msg[2], extended_msg[1]); tmp = 0; } /* Fall through to reject message */ /* * If we get something weird that we aren't expecting, * reject it. */ default: if (!tmp) { printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); spi_print_msg(extended_msg); printk("\n"); } else if (tmp != EXTENDED_MESSAGE) printk(KERN_DEBUG "scsi%d: rejecting unknown " "message %02x from target %d, lun %d\n", HOSTNO, tmp, cmd->device->id, cmd->device->lun); else printk(KERN_DEBUG "scsi%d: rejecting unknown " "extended message " "code %02x, length %d from target %d, lun %d\n", HOSTNO, extended_msg[1], extended_msg[0], cmd->device->id, cmd->device->lun); msgout = MESSAGE_REJECT; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); break; } /* switch (tmp) */ break; case PHASE_MSGOUT: len = 1; data = &msgout; hostdata->last_message = msgout; NCR5380_transfer_pio(instance, &phase, &len, &data); if (msgout == ABORT) { #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif hostdata->connected = NULL; cmd->result = DID_ERROR << 16; #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return; } msgout = NOP; break; case PHASE_CMDOUT: len = cmd->cmd_len; data = cmd->cmnd; /* * XXX for performance reasons, on machines with a * PSEUDO-DMA architecture we should probably * use the dma transfer function. */ NCR5380_transfer_pio(instance, &phase, &len, &data); break; case PHASE_STATIN: len = 1; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Status = tmp; break; default: printk("scsi%d: unknown phase\n", HOSTNO); NCR_PRINT(NDEBUG_ANY); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ } /* while (1) */ } /* * Function : void NCR5380_reselect (struct Scsi_Host *instance) * * Purpose : does reselection, initializing the instance->connected * field to point to the struct scsi_cmnd for which the I_T_L or I_T_L_Q * nexus has been reestablished, * * Inputs : instance - this instance of the NCR5380. * */ /* it might eventually prove necessary to do a dma setup on reselection, but it doesn't seem to be needed now -- sam */ static void NCR5380_reselect (struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned char target_mask; unsigned char lun; #ifdef SUPPORT_TAGS unsigned char tag; #endif unsigned char msg[3]; struct scsi_cmnd *tmp = NULL, *prev; /* unsigned long flags; */ /* * Disable arbitration, etc. since the host adapter obviously * lost, and tell an interrupted NCR5380_select() to restart. */ NCR5380_write(MODE_REG, MR_BASE); hostdata->restart_select = 1; target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); RSL_PRINTK("scsi%d: reselect\n", HOSTNO); /* * At this point, we have detected that our SCSI ID is on the bus, * SEL is true and BSY was false for at least one bus settle delay * (400 ns). * * We must assert BSY ourselves, until the target drops the SEL * signal. */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); while (NCR5380_read(STATUS_REG) & SR_SEL); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* * Wait for target to go into MSGIN. */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)); #if 1 // acknowledge toggle to MSGIN NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); // peek at the byte without really hitting the bus msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); #endif if (!(msg[0] & 0x80)) { printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); spi_print_msg(msg); do_abort(instance); return; } lun = (msg[0] & 0x07); /* * Find the command corresponding to the I_T_L or I_T_L_Q nexus we * just reestablished, and remove it from the disconnected queue. */ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) #ifdef SUPPORT_TAGS && (tag == tmp->tag) #endif ) { if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); NEXT(prev) = NEXT(tmp); } else { REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); hostdata->disconnected_queue = NEXT(tmp); } NEXT(tmp) = NULL; break; } } if (!tmp) { printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d " #ifdef SUPPORT_TAGS "tag %d " #endif "not in disconnected_queue.\n", HOSTNO, target_mask, lun #ifdef SUPPORT_TAGS , tag #endif ); /* * Since we have an established nexus that we can't do anything * with, we must abort it. */ do_abort(instance); return; } #if 1 /* engage dma setup for the command we just saw */ { void *d; unsigned long count; if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { count = tmp->SCp.buffer->length; d = SGADDR(tmp->SCp.buffer); } else { count = tmp->SCp.this_residual; d = tmp->SCp.ptr; } #ifdef REAL_DMA /* setup this command for dma if not already */ if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != tmp)) { sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request)); sun3_dma_setup_done = tmp; } #endif } #endif NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); #ifdef SUPPORT_TAGS /* If the phase is still MSGIN, the target wants to send some more * messages. In case it supports tagged queuing, this is probably a * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. */ tag = TAG_NONE; if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { /* Accept previous IDENTIFY message by clearing ACK */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); len = 2; data = msg+1; if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " "reselection\n", HOSTNO, target_mask, lun, tag); } #endif hostdata->connected = tmp; RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", HOSTNO, tmp->target, tmp->lun, tmp->tag); } /* * Function : int NCR5380_abort(struct scsi_cmnd *cmd) * * Purpose : abort a command * * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the * host byte of the result field to, if zero DID_ABORTED is * used. * * Returns : 0 - success, -1 on failure. * * XXX - there is no way to abort the command that is currently * connected, you have to wait for it to complete. If this is * a problem, we could implement longjmp() / setjmp(), setjmp() * called where the loop started in NCR5380_main(). */ static int NCR5380_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; SETUP_HOSTDATA(instance); struct scsi_cmnd *tmp, **prev; unsigned long flags; printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO); scsi_print_command(cmd); NCR5380_print_status (instance); local_irq_save(flags); ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); #if 1 /* * Case 1 : If the command is the currently executing command, * we'll set the aborted flag and return control so that * information transfer routine can exit cleanly. */ if (hostdata->connected == cmd) { ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); /* * We should perform BSY checking, and make sure we haven't slipped * into BUS FREE. */ /* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ /* * Since we can't change phases until we've completed the current * handshake, we have to source or sink a byte of data if the current * phase is not MSGOUT. */ /* * Return control to the executing NCR drive so we can clear the * aborted flag and get back into our main loop. */ if (do_abort(instance) == 0) { hostdata->aborted = 1; hostdata->connected = NULL; cmd->result = DID_ABORT << 16; #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif local_irq_restore(flags); cmd->scsi_done(cmd); return SCSI_ABORT_SUCCESS; } else { /* local_irq_restore(flags); */ printk("scsi%d: abort of connected command failed!\n", HOSTNO); return SCSI_ABORT_ERROR; } } #endif /* * Case 2 : If the command hasn't been issued yet, we simply remove it * from the issue queue. */ for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue), tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); (*prev) = NEXT(tmp); NEXT(tmp) = NULL; tmp->result = DID_ABORT << 16; local_irq_restore(flags); ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", HOSTNO); /* Tagged queuing note: no tag to free here, hasn't been assigned * yet... */ tmp->scsi_done(tmp); return SCSI_ABORT_SUCCESS; } /* * Case 3 : If any commands are connected, we're going to fail the abort * and let the high level SCSI driver retry at a later time or * issue a reset. * * Timeouts, and therefore aborted commands, will be highly unlikely * and handling them cleanly in this situation would make the common * case of noresets less efficient, and would pollute our code. So, * we fail. */ if (hostdata->connected) { local_irq_restore(flags); ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); return SCSI_ABORT_SNOOZE; } /* * Case 4: If the command is currently disconnected from the bus, and * there are no connected commands, we reconnect the I_T_L or * I_T_L_Q nexus associated with it, go into message out, and send * an abort message. * * This case is especially ugly. In order to reestablish the nexus, we * need to call NCR5380_select(). The easiest way to implement this * function was to abort if the bus was busy, and let the interrupt * handler triggered on the SEL for reselect take care of lost arbitrations * where necessary, meaning interrupts need to be enabled. * * When interrupts are enabled, the queues may change - so we * can't remove it from the disconnected queue before selecting it * because that could cause a failure in hashing the nexus if that * device reselected. * * Since the queues may change, we can't use the pointers from when we * first locate it. * * So, we must first locate the command, and if NCR5380_select() * succeeds, then issue the abort, relocate the command and remove * it from the disconnected queue. */ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; tmp = NEXT(tmp)) if (cmd == tmp) { local_irq_restore(flags); ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); if (NCR5380_select (instance, cmd, (int) cmd->tag)) return SCSI_ABORT_BUSY; ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); do_abort (instance); local_irq_save(flags); for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue), tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp) ) if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); *prev = NEXT(tmp); NEXT(tmp) = NULL; tmp->result = DID_ABORT << 16; /* We must unlock the tag/LUN immediately here, since the * target goes to BUS FREE and doesn't send us another * message (COMMAND_COMPLETE or the like) */ #ifdef SUPPORT_TAGS cmd_free_tag( tmp ); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif local_irq_restore(flags); tmp->scsi_done(tmp); return SCSI_ABORT_SUCCESS; } } /* * Case 5 : If we reached this point, the command was not found in any of * the queues. * * We probably reached this point because of an unlikely race condition * between the command completing successfully and the abortion code, * so we won't panic, but we will notify the user in case something really * broke. */ local_irq_restore(flags); printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); return SCSI_ABORT_NOT_RUNNING; } /* * Function : int NCR5380_bus_reset(struct scsi_cmnd *cmd) * * Purpose : reset the SCSI bus. * * Returns : SCSI_RESET_WAKEUP * */ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) { SETUP_HOSTDATA(cmd->device->host); int i; unsigned long flags; #if 1 struct scsi_cmnd *connected, *disconnected_queue; #endif NCR5380_print_status (cmd->device->host); /* get in phase */ NCR5380_write( TARGET_COMMAND_REG, PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); /* assert RST */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); udelay (40); /* reset NCR registers */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); NCR5380_write( MODE_REG, MR_BASE ); NCR5380_write( TARGET_COMMAND_REG, 0 ); NCR5380_write( SELECT_ENABLE_REG, 0 ); /* ++roman: reset interrupt condition! otherwise no interrupts don't get * through anymore ... */ (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); #if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ /* XXX see below XXX */ /* MSch: old-style reset: actually abort all command processing here */ /* After the reset, there are no more connected or disconnected commands * and no busy units; to avoid problems with re-inserting the commands * into the issue_queue (via scsi_done()), the aborted commands are * remembered in local variables first. */ local_irq_save(flags); connected = (struct scsi_cmnd *)hostdata->connected; hostdata->connected = NULL; disconnected_queue = (struct scsi_cmnd *)hostdata->disconnected_queue; hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS free_all_tags(); #endif for( i = 0; i < 8; ++i ) hostdata->busy[i] = 0; #ifdef REAL_DMA hostdata->dma_len = 0; #endif local_irq_restore(flags); /* In order to tell the mid-level code which commands were aborted, * set the command status to DID_RESET and call scsi_done() !!! * This ultimately aborts processing of these commands in the mid-level. */ if ((cmd = connected)) { ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done( cmd ); } for (i = 0; (cmd = disconnected_queue); ++i) { disconnected_queue = NEXT(cmd); NEXT(cmd) = NULL; cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done( cmd ); } if (i > 0) ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); /* since all commands have been explicitly terminated, we need to tell * the midlevel code that the reset was SUCCESSFUL, and there is no * need to 'wake up' the commands by a request_sense */ return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; #else /* 1 */ /* MSch: new-style reset handling: let the mid-level do what it can */ /* ++guenther: MID-LEVEL IS STILL BROKEN. * Mid-level is supposed to requeue all commands that were active on the * various low-level queues. In fact it does this, but that's not enough * because all these commands are subject to timeout. And if a timeout * happens for any removed command, *_abort() is called but all queues * are now empty. Abort then gives up the falcon lock, which is fatal, * since the mid-level will queue more commands and must have the lock * (it's all happening inside timer interrupt handler!!). * Even worse, abort will return NOT_RUNNING for all those commands not * on any queue, so they won't be retried ... * * Conclusion: either scsi.c disables timeout for all resetted commands * immediately, or we lose! As of linux-2.0.20 it doesn't. */ /* After the reset, there are no more connected or disconnected commands * and no busy units; so clear the low-level status here to avoid * conflicts when the mid-level code tries to wake up the affected * commands! */ if (hostdata->issue_queue) ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); if (hostdata->connected) ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); if (hostdata->disconnected_queue) ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); local_irq_save(flags); hostdata->issue_queue = NULL; hostdata->connected = NULL; hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS free_all_tags(); #endif for( i = 0; i < 8; ++i ) hostdata->busy[i] = 0; #ifdef REAL_DMA hostdata->dma_len = 0; #endif local_irq_restore(flags); /* we did no complete reset of all commands, so a wakeup is required */ return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; #endif /* 1 */ } /* Local Variables: */ /* tab-width: 8 */ /* End: */
gpl-2.0
eMarco/kernel_samsung_tuna
drivers/media/radio/radio-timb.c
2532
6631
/* * radio-timb.c Timberdale FPGA Radio driver * Copyright (c) 2009 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/version.h> #include <linux/io.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-device.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/i2c.h> #include <media/timb_radio.h> #define DRIVER_NAME "timb-radio" struct timbradio { struct timb_radio_platform_data pdata; struct v4l2_subdev *sd_tuner; struct v4l2_subdev *sd_dsp; struct video_device video_dev; struct v4l2_device v4l2_dev; struct mutex lock; }; static int timbradio_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *v) { strlcpy(v->driver, DRIVER_NAME, sizeof(v->driver)); strlcpy(v->card, "Timberdale Radio", sizeof(v->card)); snprintf(v->bus_info, sizeof(v->bus_info), "platform:"DRIVER_NAME); v->version = KERNEL_VERSION(0, 0, 1); v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; return 0; } static int timbradio_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct timbradio *tr = video_drvdata(file); return v4l2_subdev_call(tr->sd_tuner, tuner, g_tuner, v); } static int timbradio_vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct timbradio *tr = video_drvdata(file); return v4l2_subdev_call(tr->sd_tuner, tuner, s_tuner, v); } static int timbradio_vidioc_g_input(struct file *filp, void *priv, unsigned int *i) { *i = 0; return 0; } static int timbradio_vidioc_s_input(struct file *filp, void *priv, unsigned int i) { return i ? -EINVAL : 0; } static int timbradio_vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { a->index = 0; strlcpy(a->name, "Radio", sizeof(a->name)); a->capability = V4L2_AUDCAP_STEREO; return 0; } static int timbradio_vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *a) { return a->index ? -EINVAL : 0; } static int timbradio_vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct timbradio *tr = video_drvdata(file); return v4l2_subdev_call(tr->sd_tuner, tuner, s_frequency, f); } static int timbradio_vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct timbradio *tr = video_drvdata(file); return v4l2_subdev_call(tr->sd_tuner, tuner, g_frequency, f); } static int timbradio_vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { struct timbradio *tr = video_drvdata(file); return v4l2_subdev_call(tr->sd_dsp, core, queryctrl, qc); } static int timbradio_vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct timbradio *tr = video_drvdata(file); return v4l2_subdev_call(tr->sd_dsp, core, g_ctrl, ctrl); } static int timbradio_vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct timbradio *tr = video_drvdata(file); return v4l2_subdev_call(tr->sd_dsp, core, s_ctrl, ctrl); } static const struct v4l2_ioctl_ops timbradio_ioctl_ops = { .vidioc_querycap = timbradio_vidioc_querycap, .vidioc_g_tuner = timbradio_vidioc_g_tuner, .vidioc_s_tuner = timbradio_vidioc_s_tuner, .vidioc_g_frequency = timbradio_vidioc_g_frequency, .vidioc_s_frequency = timbradio_vidioc_s_frequency, .vidioc_g_input = timbradio_vidioc_g_input, .vidioc_s_input = timbradio_vidioc_s_input, .vidioc_g_audio = timbradio_vidioc_g_audio, .vidioc_s_audio = timbradio_vidioc_s_audio, .vidioc_queryctrl = timbradio_vidioc_queryctrl, .vidioc_g_ctrl = timbradio_vidioc_g_ctrl, .vidioc_s_ctrl = timbradio_vidioc_s_ctrl }; static const struct v4l2_file_operations timbradio_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, }; static int __devinit timbradio_probe(struct platform_device *pdev) { struct timb_radio_platform_data *pdata = pdev->dev.platform_data; struct timbradio *tr; int err; if (!pdata) { dev_err(&pdev->dev, "Platform data missing\n"); err = -EINVAL; goto err; } tr = kzalloc(sizeof(*tr), GFP_KERNEL); if (!tr) { err = -ENOMEM; goto err; } tr->pdata = *pdata; mutex_init(&tr->lock); strlcpy(tr->video_dev.name, "Timberdale Radio", sizeof(tr->video_dev.name)); tr->video_dev.fops = &timbradio_fops; tr->video_dev.ioctl_ops = &timbradio_ioctl_ops; tr->video_dev.release = video_device_release_empty; tr->video_dev.minor = -1; tr->video_dev.lock = &tr->lock; strlcpy(tr->v4l2_dev.name, DRIVER_NAME, sizeof(tr->v4l2_dev.name)); err = v4l2_device_register(NULL, &tr->v4l2_dev); if (err) goto err_v4l2_dev; tr->video_dev.v4l2_dev = &tr->v4l2_dev; err = video_register_device(&tr->video_dev, VFL_TYPE_RADIO, -1); if (err) { dev_err(&pdev->dev, "Error reg video\n"); goto err_video_req; } video_set_drvdata(&tr->video_dev, tr); platform_set_drvdata(pdev, tr); return 0; err_video_req: video_device_release_empty(&tr->video_dev); v4l2_device_unregister(&tr->v4l2_dev); err_v4l2_dev: kfree(tr); err: dev_err(&pdev->dev, "Failed to register: %d\n", err); return err; } static int __devexit timbradio_remove(struct platform_device *pdev) { struct timbradio *tr = platform_get_drvdata(pdev); video_unregister_device(&tr->video_dev); video_device_release_empty(&tr->video_dev); v4l2_device_unregister(&tr->v4l2_dev); kfree(tr); return 0; } static struct platform_driver timbradio_platform_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, .probe = timbradio_probe, .remove = timbradio_remove, }; /*--------------------------------------------------------------------------*/ static int __init timbradio_init(void) { return platform_driver_register(&timbradio_platform_driver); } static void __exit timbradio_exit(void) { platform_driver_unregister(&timbradio_platform_driver); } module_init(timbradio_init); module_exit(timbradio_exit); MODULE_DESCRIPTION("Timberdale Radio driver"); MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:"DRIVER_NAME);
gpl-2.0
MassStash/htc_m9_kernel_sense_5.0.2
drivers/media/dvb-frontends/it913x-fe.c
2788
24826
/* * Driver for it913x-fe Frontend * * with support for on chip it9137 integral tuner * * Copyright (C) 2011 Malcolm Priestley (tvboxspy@gmail.com) * IT9137 Copyright (C) ITE Tech Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.= */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include "dvb_frontend.h" #include "it913x-fe.h" #include "it913x-fe-priv.h" static int it913x_debug; module_param_named(debug, it913x_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))."); #define dprintk(level, args...) do { \ if (level & it913x_debug) \ printk(KERN_DEBUG "it913x-fe: " args); \ } while (0) #define deb_info(args...) dprintk(0x01, args) #define debug_data_snipet(level, name, p) \ dprintk(level, name" (%02x%02x%02x%02x%02x%02x%02x%02x)", \ *p, *(p+1), *(p+2), *(p+3), *(p+4), \ *(p+5), *(p+6), *(p+7)); #define info(format, arg...) \ printk(KERN_INFO "it913x-fe: " format "\n" , ## arg) struct it913x_fe_state { struct dvb_frontend frontend; struct i2c_adapter *i2c_adap; struct ite_config *config; u8 i2c_addr; u32 frequency; fe_modulation_t constellation; fe_transmit_mode_t transmission_mode; u8 priority; u32 crystalFrequency; u32 adcFrequency; u8 tuner_type; struct adctable *table; fe_status_t it913x_status; u16 tun_xtal; u8 tun_fdiv; u8 tun_clk_mode; u32 tun_fn_min; u32 ucblocks; }; static int it913x_read_reg(struct it913x_fe_state *state, u32 reg, u8 *data, u8 count) { int ret; u8 pro = PRO_DMOD; /* All reads from demodulator */ u8 b[4]; struct i2c_msg msg[2] = { { .addr = state->i2c_addr + (pro << 1), .flags = 0, .buf = b, .len = sizeof(b) }, { .addr = state->i2c_addr + (pro << 1), .flags = I2C_M_RD, .buf = data, .len = count } }; b[0] = (u8) reg >> 24; b[1] = (u8)(reg >> 16) & 0xff; b[2] = (u8)(reg >> 8) & 0xff; b[3] = (u8) reg & 0xff; ret = i2c_transfer(state->i2c_adap, msg, 2); return ret; } static int it913x_read_reg_u8(struct it913x_fe_state *state, u32 reg) { int ret; u8 b[1]; ret = it913x_read_reg(state, reg, &b[0], sizeof(b)); return (ret < 0) ? -ENODEV : b[0]; } static int it913x_write(struct it913x_fe_state *state, u8 pro, u32 reg, u8 buf[], u8 count) { u8 b[256]; struct i2c_msg msg[1] = { { .addr = state->i2c_addr + (pro << 1), .flags = 0, .buf = b, .len = count + 4 } }; int ret; b[0] = (u8) reg >> 24; b[1] = (u8)(reg >> 16) & 0xff; b[2] = (u8)(reg >> 8) & 0xff; b[3] = (u8) reg & 0xff; memcpy(&b[4], buf, count); ret = i2c_transfer(state->i2c_adap, msg, 1); if (ret < 0) return -EIO; return 0; } static int it913x_write_reg(struct it913x_fe_state *state, u8 pro, u32 reg, u32 data) { int ret; u8 b[4]; u8 s; b[0] = data >> 24; b[1] = (data >> 16) & 0xff; b[2] = (data >> 8) & 0xff; b[3] = data & 0xff; /* expand write as needed */ if (data < 0x100) s = 3; else if (data < 0x1000) s = 2; else if (data < 0x100000) s = 1; else s = 0; ret = it913x_write(state, pro, reg, &b[s], sizeof(b) - s); return ret; } static int it913x_fe_script_loader(struct it913x_fe_state *state, struct it913xset *loadscript) { int ret, i; if (loadscript == NULL) return -EINVAL; for (i = 0; i < 1000; ++i) { if (loadscript[i].pro == 0xff) break; ret = it913x_write(state, loadscript[i].pro, loadscript[i].address, loadscript[i].reg, loadscript[i].count); if (ret < 0) return -ENODEV; } return 0; } static int it913x_init_tuner(struct it913x_fe_state *state) { int ret, i, reg; u8 val, nv_val; u8 nv[] = {48, 32, 24, 16, 12, 8, 6, 4, 2}; u8 b[2]; reg = it913x_read_reg_u8(state, 0xec86); switch (reg) { case 0: state->tun_clk_mode = reg; state->tun_xtal = 2000; state->tun_fdiv = 3; val = 16; break; case -ENODEV: return -ENODEV; case 1: default: state->tun_clk_mode = reg; state->tun_xtal = 640; state->tun_fdiv = 1; val = 6; break; } reg = it913x_read_reg_u8(state, 0xed03); if (reg < 0) return -ENODEV; else if (reg < ARRAY_SIZE(nv)) nv_val = nv[reg]; else nv_val = 2; for (i = 0; i < 50; i++) { ret = it913x_read_reg(state, 0xed23, &b[0], sizeof(b)); reg = (b[1] << 8) + b[0]; if (reg > 0) break; if (ret < 0) return -ENODEV; udelay(2000); } state->tun_fn_min = state->tun_xtal * reg; state->tun_fn_min /= (state->tun_fdiv * nv_val); deb_info("Tuner fn_min %d", state->tun_fn_min); if (state->config->chip_ver > 1) msleep(50); else { for (i = 0; i < 50; i++) { reg = it913x_read_reg_u8(state, 0xec82); if (reg > 0) break; if (reg < 0) return -ENODEV; udelay(2000); } } return it913x_write_reg(state, PRO_DMOD, 0xed81, val); } static int it9137_set_tuner(struct it913x_fe_state *state, u32 bandwidth, u32 frequency_m) { struct it913xset *set_tuner = set_it9137_template; int ret, reg; u32 frequency = frequency_m / 1000; u32 freq, temp_f, tmp; u16 iqik_m_cal; u16 n_div; u8 n; u8 l_band; u8 lna_band; u8 bw; if (state->config->firmware_ver == 1) set_tuner = set_it9135_template; else set_tuner = set_it9137_template; deb_info("Tuner Frequency %d Bandwidth %d", frequency, bandwidth); if (frequency >= 51000 && frequency <= 440000) { l_band = 0; lna_band = 0; } else if (frequency > 440000 && frequency <= 484000) { l_band = 1; lna_band = 1; } else if (frequency > 484000 && frequency <= 533000) { l_band = 1; lna_band = 2; } else if (frequency > 533000 && frequency <= 587000) { l_band = 1; lna_band = 3; } else if (frequency > 587000 && frequency <= 645000) { l_band = 1; lna_band = 4; } else if (frequency > 645000 && frequency <= 710000) { l_band = 1; lna_band = 5; } else if (frequency > 710000 && frequency <= 782000) { l_band = 1; lna_band = 6; } else if (frequency > 782000 && frequency <= 860000) { l_band = 1; lna_band = 7; } else if (frequency > 1450000 && frequency <= 1492000) { l_band = 1; lna_band = 0; } else if (frequency > 1660000 && frequency <= 1685000) { l_band = 1; lna_band = 1; } else return -EINVAL; set_tuner[0].reg[0] = lna_band; switch (bandwidth) { case 5000000: bw = 0; break; case 6000000: bw = 2; break; case 7000000: bw = 4; break; default: case 8000000: bw = 6; break; } set_tuner[1].reg[0] = bw; set_tuner[2].reg[0] = 0xa0 | (l_band << 3); if (frequency > 53000 && frequency <= 74000) { n_div = 48; n = 0; } else if (frequency > 74000 && frequency <= 111000) { n_div = 32; n = 1; } else if (frequency > 111000 && frequency <= 148000) { n_div = 24; n = 2; } else if (frequency > 148000 && frequency <= 222000) { n_div = 16; n = 3; } else if (frequency > 222000 && frequency <= 296000) { n_div = 12; n = 4; } else if (frequency > 296000 && frequency <= 445000) { n_div = 8; n = 5; } else if (frequency > 445000 && frequency <= state->tun_fn_min) { n_div = 6; n = 6; } else if (frequency > state->tun_fn_min && frequency <= 950000) { n_div = 4; n = 7; } else if (frequency > 1450000 && frequency <= 1680000) { n_div = 2; n = 0; } else return -EINVAL; reg = it913x_read_reg_u8(state, 0xed81); iqik_m_cal = (u16)reg * n_div; if (reg < 0x20) { if (state->tun_clk_mode == 0) iqik_m_cal = (iqik_m_cal * 9) >> 5; else iqik_m_cal >>= 1; } else { iqik_m_cal = 0x40 - iqik_m_cal; if (state->tun_clk_mode == 0) iqik_m_cal = ~((iqik_m_cal * 9) >> 5); else iqik_m_cal = ~(iqik_m_cal >> 1); } temp_f = frequency * (u32)n_div * (u32)state->tun_fdiv; freq = temp_f / state->tun_xtal; tmp = freq * state->tun_xtal; if ((temp_f - tmp) >= (state->tun_xtal >> 1)) freq++; freq += (u32) n << 13; /* Frequency OMEGA_IQIK_M_CAL_MID*/ temp_f = freq + (u32)iqik_m_cal; set_tuner[3].reg[0] = temp_f & 0xff; set_tuner[4].reg[0] = (temp_f >> 8) & 0xff; deb_info("High Frequency = %04x", temp_f); /* Lower frequency */ set_tuner[5].reg[0] = freq & 0xff; set_tuner[6].reg[0] = (freq >> 8) & 0xff; deb_info("low Frequency = %04x", freq); ret = it913x_fe_script_loader(state, set_tuner); return (ret < 0) ? -ENODEV : 0; } static int it913x_fe_select_bw(struct it913x_fe_state *state, u32 bandwidth, u32 adcFrequency) { int ret, i; u8 buffer[256]; u32 coeff[8]; u16 bfsfcw_fftinx_ratio; u16 fftinx_bfsfcw_ratio; u8 count; u8 bw; u8 adcmultiplier; deb_info("Bandwidth %d Adc %d", bandwidth, adcFrequency); switch (bandwidth) { case 5000000: bw = 3; break; case 6000000: bw = 0; break; case 7000000: bw = 1; break; default: case 8000000: bw = 2; break; } ret = it913x_write_reg(state, PRO_DMOD, REG_BW, bw); if (state->table == NULL) return -EINVAL; /* In write order */ coeff[0] = state->table[bw].coeff_1_2048; coeff[1] = state->table[bw].coeff_2_2k; coeff[2] = state->table[bw].coeff_1_8191; coeff[3] = state->table[bw].coeff_1_8192; coeff[4] = state->table[bw].coeff_1_8193; coeff[5] = state->table[bw].coeff_2_8k; coeff[6] = state->table[bw].coeff_1_4096; coeff[7] = state->table[bw].coeff_2_4k; bfsfcw_fftinx_ratio = state->table[bw].bfsfcw_fftinx_ratio; fftinx_bfsfcw_ratio = state->table[bw].fftinx_bfsfcw_ratio; /* ADC multiplier */ ret = it913x_read_reg_u8(state, ADC_X_2); if (ret < 0) return -EINVAL; adcmultiplier = ret; count = 0; /* Build Buffer for COEFF Registers */ for (i = 0; i < 8; i++) { if (adcmultiplier == 1) coeff[i] /= 2; buffer[count++] = (coeff[i] >> 24) & 0x3; buffer[count++] = (coeff[i] >> 16) & 0xff; buffer[count++] = (coeff[i] >> 8) & 0xff; buffer[count++] = coeff[i] & 0xff; } /* bfsfcw_fftinx_ratio register 0x21-0x22 */ buffer[count++] = bfsfcw_fftinx_ratio & 0xff; buffer[count++] = (bfsfcw_fftinx_ratio >> 8) & 0xff; /* fftinx_bfsfcw_ratio register 0x23-0x24 */ buffer[count++] = fftinx_bfsfcw_ratio & 0xff; buffer[count++] = (fftinx_bfsfcw_ratio >> 8) & 0xff; /* start at COEFF_1_2048 and write through to fftinx_bfsfcw_ratio*/ ret = it913x_write(state, PRO_DMOD, COEFF_1_2048, buffer, count); for (i = 0; i < 42; i += 8) debug_data_snipet(0x1, "Buffer", &buffer[i]); return ret; } static int it913x_fe_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct it913x_fe_state *state = fe->demodulator_priv; int ret, i; fe_status_t old_status = state->it913x_status; *status = 0; if (state->it913x_status == 0) { ret = it913x_read_reg_u8(state, EMPTY_CHANNEL_STATUS); if (ret == 0x1) { *status |= FE_HAS_SIGNAL; for (i = 0; i < 40; i++) { ret = it913x_read_reg_u8(state, MP2IF_SYNC_LK); if (ret == 0x1) break; msleep(25); } if (ret == 0x1) *status |= FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC; state->it913x_status = *status; } } if (state->it913x_status & FE_HAS_SYNC) { ret = it913x_read_reg_u8(state, TPSD_LOCK); if (ret == 0x1) *status |= FE_HAS_LOCK | state->it913x_status; else state->it913x_status = 0; if (old_status != state->it913x_status) ret = it913x_write_reg(state, PRO_LINK, GPIOH3_O, ret); } return 0; } /* FEC values based on fe_code_rate_t non supported values 0*/ int it913x_qpsk_pval[] = {0, -93, -91, -90, 0, -89, -88}; int it913x_16qam_pval[] = {0, -87, -85, -84, 0, -83, -82}; int it913x_64qam_pval[] = {0, -82, -80, -78, 0, -77, -76}; static int it913x_get_signal_strength(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct it913x_fe_state *state = fe->demodulator_priv; u8 code_rate; int ret, temp; u8 lna_gain_os; ret = it913x_read_reg_u8(state, VAR_P_INBAND); if (ret < 0) return ret; /* VHF/UHF gain offset */ if (state->frequency < 300000000) lna_gain_os = 7; else lna_gain_os = 14; temp = (ret - 100) - lna_gain_os; if (state->priority == PRIORITY_HIGH) code_rate = p->code_rate_HP; else code_rate = p->code_rate_LP; if (code_rate >= ARRAY_SIZE(it913x_qpsk_pval)) return -EINVAL; deb_info("Reg VAR_P_INBAND:%d Calc Offset Value:%d", ret, temp); /* Apply FEC offset values*/ switch (p->modulation) { case QPSK: temp -= it913x_qpsk_pval[code_rate]; break; case QAM_16: temp -= it913x_16qam_pval[code_rate]; break; case QAM_64: temp -= it913x_64qam_pval[code_rate]; break; default: return -EINVAL; } if (temp < -15) ret = 0; else if ((-15 <= temp) && (temp < 0)) ret = (2 * (temp + 15)) / 3; else if ((0 <= temp) && (temp < 20)) ret = 4 * temp + 10; else if ((20 <= temp) && (temp < 35)) ret = (2 * (temp - 20)) / 3 + 90; else if (temp >= 35) ret = 100; deb_info("Signal Strength :%d", ret); return ret; } static int it913x_fe_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct it913x_fe_state *state = fe->demodulator_priv; int ret = 0; if (state->config->read_slevel) { if (state->it913x_status & FE_HAS_SIGNAL) ret = it913x_read_reg_u8(state, SIGNAL_LEVEL); } else ret = it913x_get_signal_strength(fe); if (ret >= 0) *strength = (u16)((u32)ret * 0xffff / 0x64); return (ret < 0) ? -ENODEV : 0; } static int it913x_fe_read_snr(struct dvb_frontend *fe, u16 *snr) { struct it913x_fe_state *state = fe->demodulator_priv; int ret; u8 reg[3]; u32 snr_val, snr_min, snr_max; u32 temp; ret = it913x_read_reg(state, 0x2c, reg, sizeof(reg)); snr_val = (u32)(reg[2] << 16) | (reg[1] << 8) | reg[0]; ret |= it913x_read_reg(state, 0xf78b, reg, 1); if (reg[0]) snr_val /= reg[0]; if (state->transmission_mode == TRANSMISSION_MODE_2K) snr_val *= 4; else if (state->transmission_mode == TRANSMISSION_MODE_4K) snr_val *= 2; if (state->constellation == QPSK) { snr_min = 0xb4711; snr_max = 0x191451; } else if (state->constellation == QAM_16) { snr_min = 0x4f0d5; snr_max = 0xc7925; } else if (state->constellation == QAM_64) { snr_min = 0x256d0; snr_max = 0x626be; } else return -EINVAL; if (snr_val < snr_min) *snr = 0; else if (snr_val < snr_max) { temp = (snr_val - snr_min) >> 5; temp *= 0xffff; temp /= (snr_max - snr_min) >> 5; *snr = (u16)temp; } else *snr = 0xffff; return (ret < 0) ? -ENODEV : 0; } static int it913x_fe_read_ber(struct dvb_frontend *fe, u32 *ber) { struct it913x_fe_state *state = fe->demodulator_priv; u8 reg[5]; /* Read Aborted Packets and Pre-Viterbi error rate 5 bytes */ it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg)); state->ucblocks += (u32)(reg[1] << 8) | reg[0]; *ber = (u32)(reg[4] << 16) | (reg[3] << 8) | reg[2]; return 0; } static int it913x_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct it913x_fe_state *state = fe->demodulator_priv; int ret; u8 reg[2]; /* Aborted Packets */ ret = it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg)); state->ucblocks += (u32)(reg[1] << 8) | reg[0]; *ucblocks = state->ucblocks; return ret; } static int it913x_fe_get_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct it913x_fe_state *state = fe->demodulator_priv; u8 reg[8]; it913x_read_reg(state, REG_TPSD_TX_MODE, reg, sizeof(reg)); if (reg[3] < 3) p->modulation = fe_con[reg[3]]; if (reg[0] < 3) p->transmission_mode = fe_mode[reg[0]]; if (reg[1] < 4) p->guard_interval = fe_gi[reg[1]]; if (reg[2] < 4) p->hierarchy = fe_hi[reg[2]]; state->priority = reg[5]; p->code_rate_HP = (reg[6] < 6) ? fe_code[reg[6]] : FEC_NONE; p->code_rate_LP = (reg[7] < 6) ? fe_code[reg[7]] : FEC_NONE; /* Update internal state to reflect the autodetected props */ state->constellation = p->modulation; state->transmission_mode = p->transmission_mode; return 0; } static int it913x_fe_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct it913x_fe_state *state = fe->demodulator_priv; int i; u8 empty_ch, last_ch; state->it913x_status = 0; /* Set bw*/ it913x_fe_select_bw(state, p->bandwidth_hz, state->adcFrequency); /* Training Mode Off */ it913x_write_reg(state, PRO_LINK, TRAINING_MODE, 0x0); /* Clear Empty Channel */ it913x_write_reg(state, PRO_DMOD, EMPTY_CHANNEL_STATUS, 0x0); /* Clear bits */ it913x_write_reg(state, PRO_DMOD, MP2IF_SYNC_LK, 0x0); /* LED on */ it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1); /* Select Band*/ if ((p->frequency >= 51000000) && (p->frequency <= 230000000)) i = 0; else if ((p->frequency >= 350000000) && (p->frequency <= 900000000)) i = 1; else if ((p->frequency >= 1450000000) && (p->frequency <= 1680000000)) i = 2; else return -EOPNOTSUPP; it913x_write_reg(state, PRO_DMOD, FREE_BAND, i); deb_info("Frontend Set Tuner Type %02x", state->tuner_type); switch (state->tuner_type) { case IT9135_38: case IT9135_51: case IT9135_52: case IT9135_60: case IT9135_61: case IT9135_62: it9137_set_tuner(state, p->bandwidth_hz, p->frequency); break; default: if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } break; } /* LED off */ it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0); /* Trigger ofsm */ it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0); last_ch = 2; for (i = 0; i < 40; ++i) { empty_ch = it913x_read_reg_u8(state, EMPTY_CHANNEL_STATUS); if (last_ch == 1 && empty_ch == 1) break; if (last_ch == 2 && empty_ch == 2) return 0; last_ch = empty_ch; msleep(25); } for (i = 0; i < 40; ++i) { if (it913x_read_reg_u8(state, D_TPSD_LOCK) == 1) break; msleep(25); } state->frequency = p->frequency; return 0; } static int it913x_fe_suspend(struct it913x_fe_state *state) { int ret, i; u8 b; ret = it913x_write_reg(state, PRO_DMOD, SUSPEND_FLAG, 0x1); ret |= it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0); for (i = 0; i < 128; i++) { ret = it913x_read_reg(state, SUSPEND_FLAG, &b, 1); if (ret < 0) return -ENODEV; if (b == 0) break; } ret |= it913x_write_reg(state, PRO_DMOD, AFE_MEM0, 0x8); /* Turn LED off */ ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0); ret |= it913x_fe_script_loader(state, it9137_tuner_off); return (ret < 0) ? -ENODEV : 0; } /* Power sequence */ /* Power Up Tuner on -> Frontend suspend off -> Tuner clk on */ /* Power Down Frontend suspend on -> Tuner clk off -> Tuner off */ static int it913x_fe_sleep(struct dvb_frontend *fe) { struct it913x_fe_state *state = fe->demodulator_priv; return it913x_fe_suspend(state); } static u32 compute_div(u32 a, u32 b, u32 x) { u32 res = 0; u32 c = 0; u32 i = 0; if (a > b) { c = a / b; a = a - c * b; } for (i = 0; i < x; i++) { if (a >= b) { res += 1; a -= b; } a <<= 1; res <<= 1; } res = (c << x) + res; return res; } static int it913x_fe_start(struct it913x_fe_state *state) { struct it913xset *set_lna; struct it913xset *set_mode; int ret; u8 adf = (state->config->adf & 0xf); u32 adc, xtal; u8 b[4]; if (state->config->chip_ver == 1) ret = it913x_init_tuner(state); info("ADF table value :%02x", adf); if (adf < 10) { state->crystalFrequency = fe_clockTable[adf].xtal ; state->table = fe_clockTable[adf].table; state->adcFrequency = state->table->adcFrequency; adc = compute_div(state->adcFrequency, 1000000ul, 19ul); xtal = compute_div(state->crystalFrequency, 1000000ul, 19ul); } else return -EINVAL; /* Set LED indicator on GPIOH3 */ ret = it913x_write_reg(state, PRO_LINK, GPIOH3_EN, 0x1); ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_ON, 0x1); ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1); ret |= it913x_write_reg(state, PRO_LINK, 0xf641, state->tuner_type); ret |= it913x_write_reg(state, PRO_DMOD, 0xf5ca, 0x01); ret |= it913x_write_reg(state, PRO_DMOD, 0xf715, 0x01); b[0] = xtal & 0xff; b[1] = (xtal >> 8) & 0xff; b[2] = (xtal >> 16) & 0xff; b[3] = (xtal >> 24); ret |= it913x_write(state, PRO_DMOD, XTAL_CLK, b , 4); b[0] = adc & 0xff; b[1] = (adc >> 8) & 0xff; b[2] = (adc >> 16) & 0xff; ret |= it913x_write(state, PRO_DMOD, ADC_FREQ, b, 3); if (state->config->adc_x2) ret |= it913x_write_reg(state, PRO_DMOD, ADC_X_2, 0x01); b[0] = 0; b[1] = 0; b[2] = 0; ret |= it913x_write(state, PRO_DMOD, 0x0029, b, 3); info("Crystal Frequency :%d Adc Frequency :%d ADC X2: %02x", state->crystalFrequency, state->adcFrequency, state->config->adc_x2); deb_info("Xtal value :%04x Adc value :%04x", xtal, adc); if (ret < 0) return -ENODEV; /* v1 or v2 tuner script */ if (state->config->chip_ver > 1) ret = it913x_fe_script_loader(state, it9135_v2); else ret = it913x_fe_script_loader(state, it9135_v1); if (ret < 0) return ret; /* LNA Scripts */ switch (state->tuner_type) { case IT9135_51: set_lna = it9135_51; break; case IT9135_52: set_lna = it9135_52; break; case IT9135_60: set_lna = it9135_60; break; case IT9135_61: set_lna = it9135_61; break; case IT9135_62: set_lna = it9135_62; break; case IT9135_38: default: set_lna = it9135_38; } info("Tuner LNA type :%02x", state->tuner_type); ret = it913x_fe_script_loader(state, set_lna); if (ret < 0) return ret; if (state->config->chip_ver == 2) { ret = it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x1); ret |= it913x_write_reg(state, PRO_LINK, PADODPU, 0x0); ret |= it913x_write_reg(state, PRO_LINK, AGC_O_D, 0x0); ret |= it913x_init_tuner(state); } if (ret < 0) return -ENODEV; /* Always solo frontend */ set_mode = set_solo_fe; ret |= it913x_fe_script_loader(state, set_mode); ret |= it913x_fe_suspend(state); return (ret < 0) ? -ENODEV : 0; } static int it913x_fe_init(struct dvb_frontend *fe) { struct it913x_fe_state *state = fe->demodulator_priv; int ret = 0; /* Power Up Tuner - common all versions */ ret = it913x_write_reg(state, PRO_DMOD, 0xec40, 0x1); ret |= it913x_fe_script_loader(state, init_1); ret |= it913x_write_reg(state, PRO_DMOD, AFE_MEM0, 0x0); ret |= it913x_write_reg(state, PRO_DMOD, 0xfba8, 0x0); return (ret < 0) ? -ENODEV : 0; } static void it913x_fe_release(struct dvb_frontend *fe) { struct it913x_fe_state *state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops it913x_fe_ofdm_ops; struct dvb_frontend *it913x_fe_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct ite_config *config) { struct it913x_fe_state *state = NULL; int ret; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct it913x_fe_state), GFP_KERNEL); if (state == NULL) return NULL; if (config == NULL) goto error; state->i2c_adap = i2c_adap; state->i2c_addr = i2c_addr; state->config = config; switch (state->config->tuner_id_0) { case IT9135_51: case IT9135_52: case IT9135_60: case IT9135_61: case IT9135_62: state->tuner_type = state->config->tuner_id_0; break; default: case IT9135_38: state->tuner_type = IT9135_38; } ret = it913x_fe_start(state); if (ret < 0) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &it913x_fe_ofdm_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(it913x_fe_attach); static struct dvb_frontend_ops it913x_fe_ofdm_ops = { .delsys = { SYS_DVBT }, .info = { .name = "it913x-fe DVB-T", .frequency_min = 51000000, .frequency_max = 1680000000, .frequency_stepsize = 62500, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO, }, .release = it913x_fe_release, .init = it913x_fe_init, .sleep = it913x_fe_sleep, .set_frontend = it913x_fe_set_frontend, .get_frontend = it913x_fe_get_frontend, .read_status = it913x_fe_read_status, .read_signal_strength = it913x_fe_read_signal_strength, .read_snr = it913x_fe_read_snr, .read_ber = it913x_fe_read_ber, .read_ucblocks = it913x_fe_read_ucblocks, }; MODULE_DESCRIPTION("it913x Frontend and it9137 tuner"); MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com"); MODULE_VERSION("1.15"); MODULE_LICENSE("GPL");
gpl-2.0
rqmok/android_kernel_htc_msm7x30
drivers/net/wimax/i2400m/sdio.c
3044
17303
/* * Intel Wireless WiMAX Connection 2400m * Linux driver model glue for the SDIO device, reset & fw upload * * * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> * Dirk Brandewie <dirk.j.brandewie@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * Yanir Lubetkin <yanirx.lubetkin@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * See i2400m-sdio.h for a general description of this driver. * * This file implements driver model glue, and hook ups for the * generic driver to implement the bus-specific functions (device * communication setup/tear down, firmware upload and resetting). * * ROADMAP * * i2400m_probe() * alloc_netdev() * i2400ms_netdev_setup() * i2400ms_init() * i2400m_netdev_setup() * i2400ms_enable_function() * i2400m_setup() * * i2400m_remove() * i2400m_release() * free_netdev(net_dev) * * i2400ms_bus_reset() Called by i2400m_reset * __i2400ms_reset() * __i2400ms_send_barker() */ #include <linux/slab.h> #include <linux/debugfs.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sdio_func.h> #include "i2400m-sdio.h" #include <linux/wimax/i2400m.h> #define D_SUBMODULE main #include "sdio-debug-levels.h" /* IOE WiMAX function timeout in seconds */ static int ioe_timeout = 2; module_param(ioe_timeout, int, 0); static char i2400ms_debug_params[128]; module_param_string(debug, i2400ms_debug_params, sizeof(i2400ms_debug_params), 0644); MODULE_PARM_DESC(debug, "String of space-separated NAME:VALUE pairs, where NAMEs " "are the different debug submodules and VALUE are the " "initial debug value to set."); /* Our firmware file name list */ static const char *i2400ms_bus_fw_names[] = { #define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-1.3.sbcf" I2400MS_FW_FILE_NAME, NULL }; static const struct i2400m_poke_table i2400ms_pokes[] = { I2400M_FW_POKE(0x6BE260, 0x00000088), I2400M_FW_POKE(0x080550, 0x00000005), I2400M_FW_POKE(0xAE0000, 0x00000000), I2400M_FW_POKE(0x000000, 0x00000000), /* MUST be 0 terminated or bad * things will happen */ }; /* * Enable the SDIO function * * Tries to enable the SDIO function; might fail if it is still not * ready (in some hardware, the SDIO WiMAX function is only enabled * when we ask it to explicitly doing). Tries until a timeout is * reached. * * The @maxtries argument indicates how many times (at most) it should * be tried to enable the function. 0 means forever. This acts along * with the timeout (ie: it'll stop trying as soon as the maximum * number of tries is reached _or_ as soon as the timeout is reached). * * The reverse of this is...sdio_disable_function() * * Returns: 0 if the SDIO function was enabled, < 0 errno code on * error (-ENODEV when it was unable to enable the function). */ static int i2400ms_enable_function(struct i2400ms *i2400ms, unsigned maxtries) { struct sdio_func *func = i2400ms->func; u64 timeout; int err; struct device *dev = &func->dev; unsigned tries = 0; d_fnstart(3, dev, "(func %p)\n", func); /* Setup timeout (FIXME: This needs to read the CIS table to * get a real timeout) and then wait for the device to signal * it is ready */ timeout = get_jiffies_64() + ioe_timeout * HZ; err = -ENODEV; while (err != 0 && time_before64(get_jiffies_64(), timeout)) { sdio_claim_host(func); /* * There is a sillicon bug on the IWMC3200, where the * IOE timeout will cause problems on Moorestown * platforms (system hang). We explicitly overwrite * func->enable_timeout here to work around the issue. */ if (i2400ms->iwmc3200) func->enable_timeout = IWMC3200_IOR_TIMEOUT; err = sdio_enable_func(func); if (0 == err) { sdio_release_host(func); d_printf(2, dev, "SDIO function enabled\n"); goto function_enabled; } d_printf(2, dev, "SDIO function failed to enable: %d\n", err); sdio_release_host(func); if (maxtries > 0 && ++tries >= maxtries) { err = -ETIME; break; } msleep(I2400MS_INIT_SLEEP_INTERVAL); } /* If timed out, device is not there yet -- get -ENODEV so * the device driver core will retry later on. */ if (err == -ETIME) { dev_err(dev, "Can't enable WiMAX function; " " has the function been enabled?\n"); err = -ENODEV; } function_enabled: d_fnend(3, dev, "(func %p) = %d\n", func, err); return err; } /* * Setup minimal device communication infrastructure needed to at * least be able to update the firmware. * * Note the ugly trick: if we are in the probe path * (i2400ms->debugfs_dentry == NULL), we only retry function * enablement one, to avoid racing with the iwmc3200 top controller. */ static int i2400ms_bus_setup(struct i2400m *i2400m) { int result; struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); struct device *dev = i2400m_dev(i2400m); struct sdio_func *func = i2400ms->func; int retries; sdio_claim_host(func); result = sdio_set_block_size(func, I2400MS_BLK_SIZE); sdio_release_host(func); if (result < 0) { dev_err(dev, "Failed to set block size: %d\n", result); goto error_set_blk_size; } if (i2400ms->iwmc3200 && i2400ms->debugfs_dentry == NULL) retries = 1; else retries = 0; result = i2400ms_enable_function(i2400ms, retries); if (result < 0) { dev_err(dev, "Cannot enable SDIO function: %d\n", result); goto error_func_enable; } result = i2400ms_tx_setup(i2400ms); if (result < 0) goto error_tx_setup; result = i2400ms_rx_setup(i2400ms); if (result < 0) goto error_rx_setup; return 0; error_rx_setup: i2400ms_tx_release(i2400ms); error_tx_setup: sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); error_func_enable: error_set_blk_size: return result; } /* * Tear down minimal device communication infrastructure needed to at * least be able to update the firmware. */ static void i2400ms_bus_release(struct i2400m *i2400m) { struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); struct sdio_func *func = i2400ms->func; i2400ms_rx_release(i2400ms); i2400ms_tx_release(i2400ms); sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); } /* * Setup driver resources needed to communicate with the device * * The fw needs some time to settle, and it was just uploaded, * so give it a break first. I'd prefer to just wait for the device to * send something, but seems the poking we do to enable SDIO stuff * interferes with it, so just give it a break before starting... */ static int i2400ms_bus_dev_start(struct i2400m *i2400m) { struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); struct sdio_func *func = i2400ms->func; struct device *dev = &func->dev; d_fnstart(3, dev, "(i2400m %p)\n", i2400m); msleep(200); d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, 0); return 0; } /* * Sends a barker buffer to the device * * This helper will allocate a kmalloced buffer and use it to transmit * (then free it). Reason for this is that the SDIO host controller * expects alignment (unknown exactly which) which the stack won't * really provide and certain arches/host-controller combinations * cannot use stack/vmalloc/text areas for DMA transfers. */ static int __i2400ms_send_barker(struct i2400ms *i2400ms, const __le32 *barker, size_t barker_size) { int ret; struct sdio_func *func = i2400ms->func; struct device *dev = &func->dev; void *buffer; ret = -ENOMEM; buffer = kmalloc(I2400MS_BLK_SIZE, GFP_KERNEL); if (buffer == NULL) goto error_kzalloc; memcpy(buffer, barker, barker_size); sdio_claim_host(func); ret = sdio_memcpy_toio(func, 0, buffer, I2400MS_BLK_SIZE); sdio_release_host(func); if (ret < 0) d_printf(0, dev, "E: barker error: %d\n", ret); kfree(buffer); error_kzalloc: return ret; } /* * Reset a device at different levels (warm, cold or bus) * * @i2400ms: device descriptor * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS) * * FIXME: not tested -- need to confirm expected effects * * Warm and cold resets get an SDIO reset if they fail (unimplemented) * * Warm reset: * * The device will be fully reset internally, but won't be * disconnected from the bus (so no reenumeration will * happen). Firmware upload will be necessary. * * The device will send a reboot barker that will trigger the driver * to reinitialize the state via __i2400m_dev_reset_handle. * * * Cold and bus reset: * * The device will be fully reset internally, disconnected from the * bus an a reenumeration will happen. Firmware upload will be * necessary. Thus, we don't do any locking or struct * reinitialization, as we are going to be fully disconnected and * reenumerated. * * Note we need to return -ENODEV if a warm reset was requested and we * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset() * and wimax_dev->op_reset. * * WARNING: no driver state saved/fixed */ static int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt) { int result = 0; struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); struct device *dev = i2400m_dev(i2400m); static const __le32 i2400m_WARM_BOOT_BARKER[4] = { cpu_to_le32(I2400M_WARM_RESET_BARKER), cpu_to_le32(I2400M_WARM_RESET_BARKER), cpu_to_le32(I2400M_WARM_RESET_BARKER), cpu_to_le32(I2400M_WARM_RESET_BARKER), }; static const __le32 i2400m_COLD_BOOT_BARKER[4] = { cpu_to_le32(I2400M_COLD_RESET_BARKER), cpu_to_le32(I2400M_COLD_RESET_BARKER), cpu_to_le32(I2400M_COLD_RESET_BARKER), cpu_to_le32(I2400M_COLD_RESET_BARKER), }; if (rt == I2400M_RT_WARM) result = __i2400ms_send_barker(i2400ms, i2400m_WARM_BOOT_BARKER, sizeof(i2400m_WARM_BOOT_BARKER)); else if (rt == I2400M_RT_COLD) result = __i2400ms_send_barker(i2400ms, i2400m_COLD_BOOT_BARKER, sizeof(i2400m_COLD_BOOT_BARKER)); else if (rt == I2400M_RT_BUS) { do_bus_reset: i2400ms_bus_release(i2400m); /* Wait for the device to settle */ msleep(40); result = i2400ms_bus_setup(i2400m); } else BUG(); if (result < 0 && rt != I2400M_RT_BUS) { dev_err(dev, "%s reset failed (%d); trying SDIO reset\n", rt == I2400M_RT_WARM ? "warm" : "cold", result); rt = I2400M_RT_BUS; goto do_bus_reset; } return result; } static void i2400ms_netdev_setup(struct net_device *net_dev) { struct i2400m *i2400m = net_dev_to_i2400m(net_dev); struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); i2400ms_init(i2400ms); i2400m_netdev_setup(net_dev); } /* * Debug levels control; see debug.h */ struct d_level D_LEVEL[] = { D_SUBMODULE_DEFINE(main), D_SUBMODULE_DEFINE(tx), D_SUBMODULE_DEFINE(rx), D_SUBMODULE_DEFINE(fw), }; size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); #define __debugfs_register(prefix, name, parent) \ do { \ result = d_level_register_debugfs(prefix, name, parent); \ if (result < 0) \ goto error; \ } while (0) static int i2400ms_debugfs_add(struct i2400ms *i2400ms) { int result; struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry; dentry = debugfs_create_dir("i2400m-sdio", dentry); result = PTR_ERR(dentry); if (IS_ERR(dentry)) { if (result == -ENODEV) result = 0; /* No debugfs support */ goto error; } i2400ms->debugfs_dentry = dentry; __debugfs_register("dl_", main, dentry); __debugfs_register("dl_", tx, dentry); __debugfs_register("dl_", rx, dentry); __debugfs_register("dl_", fw, dentry); return 0; error: debugfs_remove_recursive(i2400ms->debugfs_dentry); i2400ms->debugfs_dentry = NULL; return result; } static struct device_type i2400ms_type = { .name = "wimax", }; /* * Probe a i2400m interface and register it * * @func: SDIO function * @id: SDIO device ID * @returns: 0 if ok, < 0 errno code on error. * * Alloc a net device, initialize the bus-specific details and then * calls the bus-generic initialization routine. That will register * the wimax and netdev devices, upload the firmware [using * _bus_bm_*()], call _bus_dev_start() to finalize the setup of the * communication with the device and then will start to talk to it to * finnish setting it up. * * Initialization is tricky; some instances of the hw are packed with * others in a way that requires a third driver that enables the WiMAX * function. In those cases, we can't enable the SDIO function and * we'll return with -ENODEV. When the driver that enables the WiMAX * function does its thing, it has to do a bus_rescan_devices() on the * SDIO bus so this driver is called again to enumerate the WiMAX * function. */ static int i2400ms_probe(struct sdio_func *func, const struct sdio_device_id *id) { int result; struct net_device *net_dev; struct device *dev = &func->dev; struct i2400m *i2400m; struct i2400ms *i2400ms; /* Allocate instance [calls i2400m_netdev_setup() on it]. */ result = -ENOMEM; net_dev = alloc_netdev(sizeof(*i2400ms), "wmx%d", i2400ms_netdev_setup); if (net_dev == NULL) { dev_err(dev, "no memory for network device instance\n"); goto error_alloc_netdev; } SET_NETDEV_DEV(net_dev, dev); SET_NETDEV_DEVTYPE(net_dev, &i2400ms_type); i2400m = net_dev_to_i2400m(net_dev); i2400ms = container_of(i2400m, struct i2400ms, i2400m); i2400m->wimax_dev.net_dev = net_dev; i2400ms->func = func; sdio_set_drvdata(func, i2400ms); i2400m->bus_tx_block_size = I2400MS_BLK_SIZE; /* * Room required in the TX queue for SDIO message to accommodate * a smallest payload while allocating header space is 224 bytes, * which is the smallest message size(the block size 256 bytes) * minus the smallest message header size(32 bytes). */ i2400m->bus_tx_room_min = I2400MS_BLK_SIZE - I2400M_PL_ALIGN * 2; i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX; i2400m->bus_setup = i2400ms_bus_setup; i2400m->bus_dev_start = i2400ms_bus_dev_start; i2400m->bus_dev_stop = NULL; i2400m->bus_release = i2400ms_bus_release; i2400m->bus_tx_kick = i2400ms_bus_tx_kick; i2400m->bus_reset = i2400ms_bus_reset; /* The iwmc3200-wimax sometimes requires the driver to try * hard when we paint it into a corner. */ i2400m->bus_bm_retries = I2400M_SDIO_BOOT_RETRIES; i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send; i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack; i2400m->bus_fw_names = i2400ms_bus_fw_names; i2400m->bus_bm_mac_addr_impaired = 1; i2400m->bus_bm_pokes_table = &i2400ms_pokes[0]; switch (func->device) { case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX: case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5: i2400ms->iwmc3200 = 1; break; default: i2400ms->iwmc3200 = 0; } result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT); if (result < 0) { dev_err(dev, "cannot setup device: %d\n", result); goto error_setup; } result = i2400ms_debugfs_add(i2400ms); if (result < 0) { dev_err(dev, "cannot create SDIO debugfs: %d\n", result); goto error_debugfs_add; } return 0; error_debugfs_add: i2400m_release(i2400m); error_setup: sdio_set_drvdata(func, NULL); free_netdev(net_dev); error_alloc_netdev: return result; } static void i2400ms_remove(struct sdio_func *func) { struct device *dev = &func->dev; struct i2400ms *i2400ms = sdio_get_drvdata(func); struct i2400m *i2400m = &i2400ms->i2400m; struct net_device *net_dev = i2400m->wimax_dev.net_dev; d_fnstart(3, dev, "SDIO func %p\n", func); debugfs_remove_recursive(i2400ms->debugfs_dentry); i2400ms->debugfs_dentry = NULL; i2400m_release(i2400m); sdio_set_drvdata(func, NULL); free_netdev(net_dev); d_fnend(3, dev, "SDIO func %p\n", func); } static const struct sdio_device_id i2400ms_sdio_ids[] = { /* Intel: i2400m WiMAX (iwmc3200) over SDIO */ { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX) }, { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5) }, { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids); static struct sdio_driver i2400m_sdio_driver = { .name = KBUILD_MODNAME, .probe = i2400ms_probe, .remove = i2400ms_remove, .id_table = i2400ms_sdio_ids, }; static int __init i2400ms_driver_init(void) { d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400ms_debug_params, "i2400m_sdio.debug"); return sdio_register_driver(&i2400m_sdio_driver); } module_init(i2400ms_driver_init); static void __exit i2400ms_driver_exit(void) { sdio_unregister_driver(&i2400m_sdio_driver); } module_exit(i2400ms_driver_exit); MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>"); MODULE_DESCRIPTION("Intel 2400M WiMAX networking for SDIO"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(I2400MS_FW_FILE_NAME);
gpl-2.0
rcstar6696/kernel
sound/aoa/soundbus/i2sbus/pcm.c
3300
28043
/* * i2sbus driver -- pcm routines * * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * GPL v2, can be found in COPYING. */ #include <asm/io.h> #include <linux/delay.h> #include <linux/slab.h> #include <sound/core.h> #include <asm/macio.h> #include <linux/pci.h> #include <linux/module.h> #include "../soundbus.h" #include "i2sbus.h" static inline void get_pcm_info(struct i2sbus_dev *i2sdev, int in, struct pcm_info **pi, struct pcm_info **other) { if (in) { if (pi) *pi = &i2sdev->in; if (other) *other = &i2sdev->out; } else { if (pi) *pi = &i2sdev->out; if (other) *other = &i2sdev->in; } } static int clock_and_divisors(int mclk, int sclk, int rate, int *out) { /* sclk must be derived from mclk! */ if (mclk % sclk) return -1; /* derive sclk register value */ if (i2s_sf_sclkdiv(mclk / sclk, out)) return -1; if (I2S_CLOCK_SPEED_18MHz % (rate * mclk) == 0) { if (!i2s_sf_mclkdiv(I2S_CLOCK_SPEED_18MHz / (rate * mclk), out)) { *out |= I2S_SF_CLOCK_SOURCE_18MHz; return 0; } } if (I2S_CLOCK_SPEED_45MHz % (rate * mclk) == 0) { if (!i2s_sf_mclkdiv(I2S_CLOCK_SPEED_45MHz / (rate * mclk), out)) { *out |= I2S_SF_CLOCK_SOURCE_45MHz; return 0; } } if (I2S_CLOCK_SPEED_49MHz % (rate * mclk) == 0) { if (!i2s_sf_mclkdiv(I2S_CLOCK_SPEED_49MHz / (rate * mclk), out)) { *out |= I2S_SF_CLOCK_SOURCE_49MHz; return 0; } } return -1; } #define CHECK_RATE(rate) \ do { if (rates & SNDRV_PCM_RATE_ ##rate) { \ int dummy; \ if (clock_and_divisors(sysclock_factor, \ bus_factor, rate, &dummy)) \ rates &= ~SNDRV_PCM_RATE_ ##rate; \ } } while (0) static int i2sbus_pcm_open(struct i2sbus_dev *i2sdev, int in) { struct pcm_info *pi, *other; struct soundbus_dev *sdev; int masks_inited = 0, err; struct codec_info_item *cii, *rev; struct snd_pcm_hardware *hw; u64 formats = 0; unsigned int rates = 0; struct transfer_info v; int result = 0; int bus_factor = 0, sysclock_factor = 0; int found_this; mutex_lock(&i2sdev->lock); get_pcm_info(i2sdev, in, &pi, &other); hw = &pi->substream->runtime->hw; sdev = &i2sdev->sound; if (pi->active) { /* alsa messed up */ result = -EBUSY; goto out_unlock; } /* we now need to assign the hw */ list_for_each_entry(cii, &sdev->codec_list, list) { struct transfer_info *ti = cii->codec->transfers; bus_factor = cii->codec->bus_factor; sysclock_factor = cii->codec->sysclock_factor; while (ti->formats && ti->rates) { v = *ti; if (ti->transfer_in == in && cii->codec->usable(cii, ti, &v)) { if (masks_inited) { formats &= v.formats; rates &= v.rates; } else { formats = v.formats; rates = v.rates; masks_inited = 1; } } ti++; } } if (!masks_inited || !bus_factor || !sysclock_factor) { result = -ENODEV; goto out_unlock; } /* bus dependent stuff */ hw->info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_JOINT_DUPLEX; CHECK_RATE(5512); CHECK_RATE(8000); CHECK_RATE(11025); CHECK_RATE(16000); CHECK_RATE(22050); CHECK_RATE(32000); CHECK_RATE(44100); CHECK_RATE(48000); CHECK_RATE(64000); CHECK_RATE(88200); CHECK_RATE(96000); CHECK_RATE(176400); CHECK_RATE(192000); hw->rates = rates; /* well. the codec might want 24 bits only, and we'll * ever only transfer 24 bits, but they are top-aligned! * So for alsa, we claim that we're doing full 32 bit * while in reality we'll ignore the lower 8 bits of * that when doing playback (they're transferred as 0 * as far as I know, no codecs we have are 32-bit capable * so I can't really test) and when doing recording we'll * always have those lower 8 bits recorded as 0 */ if (formats & SNDRV_PCM_FMTBIT_S24_BE) formats |= SNDRV_PCM_FMTBIT_S32_BE; if (formats & SNDRV_PCM_FMTBIT_U24_BE) formats |= SNDRV_PCM_FMTBIT_U32_BE; /* now mask off what we can support. I suppose we could * also support S24_3LE and some similar formats, but I * doubt there's a codec that would be able to use that, * so we don't support it here. */ hw->formats = formats & (SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_U16_BE | SNDRV_PCM_FMTBIT_S32_BE | SNDRV_PCM_FMTBIT_U32_BE); /* we need to set the highest and lowest rate possible. * These are the highest and lowest rates alsa can * support properly in its bitfield. * Below, we'll use that to restrict to the rate * currently in use (if any). */ hw->rate_min = 5512; hw->rate_max = 192000; /* if the other stream is active, then we can only * support what it is currently using. * FIXME: I lied. This comment is wrong. We can support * anything that works with the same serial format, ie. * when recording 24 bit sound we can well play 16 bit * sound at the same time iff using the same transfer mode. */ if (other->active) { /* FIXME: is this guaranteed by the alsa api? */ hw->formats &= pcm_format_to_bits(i2sdev->format); /* see above, restrict rates to the one we already have */ hw->rate_min = i2sdev->rate; hw->rate_max = i2sdev->rate; } hw->channels_min = 2; hw->channels_max = 2; /* these are somewhat arbitrary */ hw->buffer_bytes_max = 131072; hw->period_bytes_min = 256; hw->period_bytes_max = 16384; hw->periods_min = 3; hw->periods_max = MAX_DBDMA_COMMANDS; err = snd_pcm_hw_constraint_integer(pi->substream->runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) { result = err; goto out_unlock; } list_for_each_entry(cii, &sdev->codec_list, list) { if (cii->codec->open) { err = cii->codec->open(cii, pi->substream); if (err) { result = err; /* unwind */ found_this = 0; list_for_each_entry_reverse(rev, &sdev->codec_list, list) { if (found_this && rev->codec->close) { rev->codec->close(rev, pi->substream); } if (rev == cii) found_this = 1; } goto out_unlock; } } } out_unlock: mutex_unlock(&i2sdev->lock); return result; } #undef CHECK_RATE static int i2sbus_pcm_close(struct i2sbus_dev *i2sdev, int in) { struct codec_info_item *cii; struct pcm_info *pi; int err = 0, tmp; mutex_lock(&i2sdev->lock); get_pcm_info(i2sdev, in, &pi, NULL); list_for_each_entry(cii, &i2sdev->sound.codec_list, list) { if (cii->codec->close) { tmp = cii->codec->close(cii, pi->substream); if (tmp) err = tmp; } } pi->substream = NULL; pi->active = 0; mutex_unlock(&i2sdev->lock); return err; } static void i2sbus_wait_for_stop(struct i2sbus_dev *i2sdev, struct pcm_info *pi) { unsigned long flags; struct completion done; long timeout; spin_lock_irqsave(&i2sdev->low_lock, flags); if (pi->dbdma_ring.stopping) { init_completion(&done); pi->stop_completion = &done; spin_unlock_irqrestore(&i2sdev->low_lock, flags); timeout = wait_for_completion_timeout(&done, HZ); spin_lock_irqsave(&i2sdev->low_lock, flags); pi->stop_completion = NULL; if (timeout == 0) { /* timeout expired, stop dbdma forcefully */ printk(KERN_ERR "i2sbus_wait_for_stop: timed out\n"); /* make sure RUN, PAUSE and S0 bits are cleared */ out_le32(&pi->dbdma->control, (RUN | PAUSE | 1) << 16); pi->dbdma_ring.stopping = 0; timeout = 10; while (in_le32(&pi->dbdma->status) & ACTIVE) { if (--timeout <= 0) break; udelay(1); } } } spin_unlock_irqrestore(&i2sdev->low_lock, flags); } #ifdef CONFIG_PM void i2sbus_wait_for_stop_both(struct i2sbus_dev *i2sdev) { struct pcm_info *pi; get_pcm_info(i2sdev, 0, &pi, NULL); i2sbus_wait_for_stop(i2sdev, pi); get_pcm_info(i2sdev, 1, &pi, NULL); i2sbus_wait_for_stop(i2sdev, pi); } #endif static int i2sbus_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); } static inline int i2sbus_hw_free(struct snd_pcm_substream *substream, int in) { struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream); struct pcm_info *pi; get_pcm_info(i2sdev, in, &pi, NULL); if (pi->dbdma_ring.stopping) i2sbus_wait_for_stop(i2sdev, pi); snd_pcm_lib_free_pages(substream); return 0; } static int i2sbus_playback_hw_free(struct snd_pcm_substream *substream) { return i2sbus_hw_free(substream, 0); } static int i2sbus_record_hw_free(struct snd_pcm_substream *substream) { return i2sbus_hw_free(substream, 1); } static int i2sbus_pcm_prepare(struct i2sbus_dev *i2sdev, int in) { /* whee. Hard work now. The user has selected a bitrate * and bit format, so now we have to program our * I2S controller appropriately. */ struct snd_pcm_runtime *runtime; struct dbdma_cmd *command; int i, periodsize, nperiods; dma_addr_t offset; struct bus_info bi; struct codec_info_item *cii; int sfr = 0; /* serial format register */ int dws = 0; /* data word sizes reg */ int input_16bit; struct pcm_info *pi, *other; int cnt; int result = 0; unsigned int cmd, stopaddr; mutex_lock(&i2sdev->lock); get_pcm_info(i2sdev, in, &pi, &other); if (pi->dbdma_ring.running) { result = -EBUSY; goto out_unlock; } if (pi->dbdma_ring.stopping) i2sbus_wait_for_stop(i2sdev, pi); if (!pi->substream || !pi->substream->runtime) { result = -EINVAL; goto out_unlock; } runtime = pi->substream->runtime; pi->active = 1; if (other->active && ((i2sdev->format != runtime->format) || (i2sdev->rate != runtime->rate))) { result = -EINVAL; goto out_unlock; } i2sdev->format = runtime->format; i2sdev->rate = runtime->rate; periodsize = snd_pcm_lib_period_bytes(pi->substream); nperiods = pi->substream->runtime->periods; pi->current_period = 0; /* generate dbdma command ring first */ command = pi->dbdma_ring.cmds; memset(command, 0, (nperiods + 2) * sizeof(struct dbdma_cmd)); /* commands to DMA to/from the ring */ /* * For input, we need to do a graceful stop; if we abort * the DMA, we end up with leftover bytes that corrupt * the next recording. To do this we set the S0 status * bit and wait for the DMA controller to stop. Each * command has a branch condition to * make it branch to a stop command if S0 is set. * On input we also need to wait for the S7 bit to be * set before turning off the DMA controller. * In fact we do the graceful stop for output as well. */ offset = runtime->dma_addr; cmd = (in? INPUT_MORE: OUTPUT_MORE) | BR_IFSET | INTR_ALWAYS; stopaddr = pi->dbdma_ring.bus_cmd_start + (nperiods + 1) * sizeof(struct dbdma_cmd); for (i = 0; i < nperiods; i++, command++, offset += periodsize) { command->command = cpu_to_le16(cmd); command->cmd_dep = cpu_to_le32(stopaddr); command->phy_addr = cpu_to_le32(offset); command->req_count = cpu_to_le16(periodsize); } /* branch back to beginning of ring */ command->command = cpu_to_le16(DBDMA_NOP | BR_ALWAYS); command->cmd_dep = cpu_to_le32(pi->dbdma_ring.bus_cmd_start); command++; /* set stop command */ command->command = cpu_to_le16(DBDMA_STOP); /* ok, let's set the serial format and stuff */ switch (runtime->format) { /* 16 bit formats */ case SNDRV_PCM_FORMAT_S16_BE: case SNDRV_PCM_FORMAT_U16_BE: /* FIXME: if we add different bus factors we need to * do more here!! */ bi.bus_factor = 0; list_for_each_entry(cii, &i2sdev->sound.codec_list, list) { bi.bus_factor = cii->codec->bus_factor; break; } if (!bi.bus_factor) { result = -ENODEV; goto out_unlock; } input_16bit = 1; break; case SNDRV_PCM_FORMAT_S32_BE: case SNDRV_PCM_FORMAT_U32_BE: /* force 64x bus speed, otherwise the data cannot be * transferred quickly enough! */ bi.bus_factor = 64; input_16bit = 0; break; default: result = -EINVAL; goto out_unlock; } /* we assume all sysclocks are the same! */ list_for_each_entry(cii, &i2sdev->sound.codec_list, list) { bi.sysclock_factor = cii->codec->sysclock_factor; break; } if (clock_and_divisors(bi.sysclock_factor, bi.bus_factor, runtime->rate, &sfr) < 0) { result = -EINVAL; goto out_unlock; } switch (bi.bus_factor) { case 32: sfr |= I2S_SF_SERIAL_FORMAT_I2S_32X; break; case 64: sfr |= I2S_SF_SERIAL_FORMAT_I2S_64X; break; } /* FIXME: THIS ASSUMES MASTER ALL THE TIME */ sfr |= I2S_SF_SCLK_MASTER; list_for_each_entry(cii, &i2sdev->sound.codec_list, list) { int err = 0; if (cii->codec->prepare) err = cii->codec->prepare(cii, &bi, pi->substream); if (err) { result = err; goto out_unlock; } } /* codecs are fine with it, so set our clocks */ if (input_16bit) dws = (2 << I2S_DWS_NUM_CHANNELS_IN_SHIFT) | (2 << I2S_DWS_NUM_CHANNELS_OUT_SHIFT) | I2S_DWS_DATA_IN_16BIT | I2S_DWS_DATA_OUT_16BIT; else dws = (2 << I2S_DWS_NUM_CHANNELS_IN_SHIFT) | (2 << I2S_DWS_NUM_CHANNELS_OUT_SHIFT) | I2S_DWS_DATA_IN_24BIT | I2S_DWS_DATA_OUT_24BIT; /* early exit if already programmed correctly */ /* not locking these is fine since we touch them only in this function */ if (in_le32(&i2sdev->intfregs->serial_format) == sfr && in_le32(&i2sdev->intfregs->data_word_sizes) == dws) goto out_unlock; /* let's notify the codecs about clocks going away. * For now we only do mastering on the i2s cell... */ list_for_each_entry(cii, &i2sdev->sound.codec_list, list) if (cii->codec->switch_clock) cii->codec->switch_clock(cii, CLOCK_SWITCH_PREPARE_SLAVE); i2sbus_control_enable(i2sdev->control, i2sdev); i2sbus_control_cell(i2sdev->control, i2sdev, 1); out_le32(&i2sdev->intfregs->intr_ctl, I2S_PENDING_CLOCKS_STOPPED); i2sbus_control_clock(i2sdev->control, i2sdev, 0); msleep(1); /* wait for clock stopped. This can apparently take a while... */ cnt = 100; while (cnt-- && !(in_le32(&i2sdev->intfregs->intr_ctl) & I2S_PENDING_CLOCKS_STOPPED)) { msleep(5); } out_le32(&i2sdev->intfregs->intr_ctl, I2S_PENDING_CLOCKS_STOPPED); /* not locking these is fine since we touch them only in this function */ out_le32(&i2sdev->intfregs->serial_format, sfr); out_le32(&i2sdev->intfregs->data_word_sizes, dws); i2sbus_control_enable(i2sdev->control, i2sdev); i2sbus_control_cell(i2sdev->control, i2sdev, 1); i2sbus_control_clock(i2sdev->control, i2sdev, 1); msleep(1); list_for_each_entry(cii, &i2sdev->sound.codec_list, list) if (cii->codec->switch_clock) cii->codec->switch_clock(cii, CLOCK_SWITCH_SLAVE); out_unlock: mutex_unlock(&i2sdev->lock); return result; } #ifdef CONFIG_PM void i2sbus_pcm_prepare_both(struct i2sbus_dev *i2sdev) { i2sbus_pcm_prepare(i2sdev, 0); i2sbus_pcm_prepare(i2sdev, 1); } #endif static int i2sbus_pcm_trigger(struct i2sbus_dev *i2sdev, int in, int cmd) { struct codec_info_item *cii; struct pcm_info *pi; int result = 0; unsigned long flags; spin_lock_irqsave(&i2sdev->low_lock, flags); get_pcm_info(i2sdev, in, &pi, NULL); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (pi->dbdma_ring.running) { result = -EALREADY; goto out_unlock; } list_for_each_entry(cii, &i2sdev->sound.codec_list, list) if (cii->codec->start) cii->codec->start(cii, pi->substream); pi->dbdma_ring.running = 1; if (pi->dbdma_ring.stopping) { /* Clear the S0 bit, then see if we stopped yet */ out_le32(&pi->dbdma->control, 1 << 16); if (in_le32(&pi->dbdma->status) & ACTIVE) { /* possible race here? */ udelay(10); if (in_le32(&pi->dbdma->status) & ACTIVE) { pi->dbdma_ring.stopping = 0; goto out_unlock; /* keep running */ } } } /* make sure RUN, PAUSE and S0 bits are cleared */ out_le32(&pi->dbdma->control, (RUN | PAUSE | 1) << 16); /* set branch condition select register */ out_le32(&pi->dbdma->br_sel, (1 << 16) | 1); /* write dma command buffer address to the dbdma chip */ out_le32(&pi->dbdma->cmdptr, pi->dbdma_ring.bus_cmd_start); /* initialize the frame count and current period */ pi->current_period = 0; pi->frame_count = in_le32(&i2sdev->intfregs->frame_count); /* set the DMA controller running */ out_le32(&pi->dbdma->control, (RUN << 16) | RUN); /* off you go! */ break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: if (!pi->dbdma_ring.running) { result = -EALREADY; goto out_unlock; } pi->dbdma_ring.running = 0; /* Set the S0 bit to make the DMA branch to the stop cmd */ out_le32(&pi->dbdma->control, (1 << 16) | 1); pi->dbdma_ring.stopping = 1; list_for_each_entry(cii, &i2sdev->sound.codec_list, list) if (cii->codec->stop) cii->codec->stop(cii, pi->substream); break; default: result = -EINVAL; goto out_unlock; } out_unlock: spin_unlock_irqrestore(&i2sdev->low_lock, flags); return result; } static snd_pcm_uframes_t i2sbus_pcm_pointer(struct i2sbus_dev *i2sdev, int in) { struct pcm_info *pi; u32 fc; get_pcm_info(i2sdev, in, &pi, NULL); fc = in_le32(&i2sdev->intfregs->frame_count); fc = fc - pi->frame_count; if (fc >= pi->substream->runtime->buffer_size) fc %= pi->substream->runtime->buffer_size; return fc; } static inline void handle_interrupt(struct i2sbus_dev *i2sdev, int in) { struct pcm_info *pi; u32 fc, nframes; u32 status; int timeout, i; int dma_stopped = 0; struct snd_pcm_runtime *runtime; spin_lock(&i2sdev->low_lock); get_pcm_info(i2sdev, in, &pi, NULL); if (!pi->dbdma_ring.running && !pi->dbdma_ring.stopping) goto out_unlock; i = pi->current_period; runtime = pi->substream->runtime; while (pi->dbdma_ring.cmds[i].xfer_status) { if (le16_to_cpu(pi->dbdma_ring.cmds[i].xfer_status) & BT) /* * BT is the branch taken bit. If it took a branch * it is because we set the S0 bit to make it * branch to the stop command. */ dma_stopped = 1; pi->dbdma_ring.cmds[i].xfer_status = 0; if (++i >= runtime->periods) { i = 0; pi->frame_count += runtime->buffer_size; } pi->current_period = i; /* * Check the frame count. The DMA tends to get a bit * ahead of the frame counter, which confuses the core. */ fc = in_le32(&i2sdev->intfregs->frame_count); nframes = i * runtime->period_size; if (fc < pi->frame_count + nframes) pi->frame_count = fc - nframes; } if (dma_stopped) { timeout = 1000; for (;;) { status = in_le32(&pi->dbdma->status); if (!(status & ACTIVE) && (!in || (status & 0x80))) break; if (--timeout <= 0) { printk(KERN_ERR "i2sbus: timed out " "waiting for DMA to stop!\n"); break; } udelay(1); } /* Turn off DMA controller, clear S0 bit */ out_le32(&pi->dbdma->control, (RUN | PAUSE | 1) << 16); pi->dbdma_ring.stopping = 0; if (pi->stop_completion) complete(pi->stop_completion); } if (!pi->dbdma_ring.running) goto out_unlock; spin_unlock(&i2sdev->low_lock); /* may call _trigger again, hence needs to be unlocked */ snd_pcm_period_elapsed(pi->substream); return; out_unlock: spin_unlock(&i2sdev->low_lock); } irqreturn_t i2sbus_tx_intr(int irq, void *devid) { handle_interrupt((struct i2sbus_dev *)devid, 0); return IRQ_HANDLED; } irqreturn_t i2sbus_rx_intr(int irq, void *devid) { handle_interrupt((struct i2sbus_dev *)devid, 1); return IRQ_HANDLED; } static int i2sbus_playback_open(struct snd_pcm_substream *substream) { struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream); if (!i2sdev) return -EINVAL; i2sdev->out.substream = substream; return i2sbus_pcm_open(i2sdev, 0); } static int i2sbus_playback_close(struct snd_pcm_substream *substream) { struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream); int err; if (!i2sdev) return -EINVAL; if (i2sdev->out.substream != substream) return -EINVAL; err = i2sbus_pcm_close(i2sdev, 0); if (!err) i2sdev->out.substream = NULL; return err; } static int i2sbus_playback_prepare(struct snd_pcm_substream *substream) { struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream); if (!i2sdev) return -EINVAL; if (i2sdev->out.substream != substream) return -EINVAL; return i2sbus_pcm_prepare(i2sdev, 0); } static int i2sbus_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream); if (!i2sdev) return -EINVAL; if (i2sdev->out.substream != substream) return -EINVAL; return i2sbus_pcm_trigger(i2sdev, 0, cmd); } static snd_pcm_uframes_t i2sbus_playback_pointer(struct snd_pcm_substream *substream) { struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream); if (!i2sdev) return -EINVAL; if (i2sdev->out.substream != substream) return 0; return i2sbus_pcm_pointer(i2sdev, 0); } static struct snd_pcm_ops i2sbus_playback_ops = { .open = i2sbus_playback_open, .close = i2sbus_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = i2sbus_hw_params, .hw_free = i2sbus_playback_hw_free, .prepare = i2sbus_playback_prepare, .trigger = i2sbus_playback_trigger, .pointer = i2sbus_playback_pointer, }; static int i2sbus_record_open(struct snd_pcm_substream *substream) { struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream); if (!i2sdev) return -EINVAL; i2sdev->in.substream = substream; return i2sbus_pcm_open(i2sdev, 1); } static int i2sbus_record_close(struct snd_pcm_substream *substream) { struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream); int err; if (!i2sdev) return -EINVAL; if (i2sdev->in.substream != substream) return -EINVAL; err = i2sbus_pcm_close(i2sdev, 1); if (!err) i2sdev->in.substream = NULL; return err; } static int i2sbus_record_prepare(struct snd_pcm_substream *substream) { struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream); if (!i2sdev) return -EINVAL; if (i2sdev->in.substream != substream) return -EINVAL; return i2sbus_pcm_prepare(i2sdev, 1); } static int i2sbus_record_trigger(struct snd_pcm_substream *substream, int cmd) { struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream); if (!i2sdev) return -EINVAL; if (i2sdev->in.substream != substream) return -EINVAL; return i2sbus_pcm_trigger(i2sdev, 1, cmd); } static snd_pcm_uframes_t i2sbus_record_pointer(struct snd_pcm_substream *substream) { struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream); if (!i2sdev) return -EINVAL; if (i2sdev->in.substream != substream) return 0; return i2sbus_pcm_pointer(i2sdev, 1); } static struct snd_pcm_ops i2sbus_record_ops = { .open = i2sbus_record_open, .close = i2sbus_record_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = i2sbus_hw_params, .hw_free = i2sbus_record_hw_free, .prepare = i2sbus_record_prepare, .trigger = i2sbus_record_trigger, .pointer = i2sbus_record_pointer, }; static void i2sbus_private_free(struct snd_pcm *pcm) { struct i2sbus_dev *i2sdev = snd_pcm_chip(pcm); struct codec_info_item *p, *tmp; i2sdev->sound.pcm = NULL; i2sdev->out.created = 0; i2sdev->in.created = 0; list_for_each_entry_safe(p, tmp, &i2sdev->sound.codec_list, list) { printk(KERN_ERR "i2sbus: a codec didn't unregister!\n"); list_del(&p->list); module_put(p->codec->owner); kfree(p); } soundbus_dev_put(&i2sdev->sound); module_put(THIS_MODULE); } int i2sbus_attach_codec(struct soundbus_dev *dev, struct snd_card *card, struct codec_info *ci, void *data) { int err, in = 0, out = 0; struct transfer_info *tmp; struct i2sbus_dev *i2sdev = soundbus_dev_to_i2sbus_dev(dev); struct codec_info_item *cii; if (!dev->pcmname || dev->pcmid == -1) { printk(KERN_ERR "i2sbus: pcm name and id must be set!\n"); return -EINVAL; } list_for_each_entry(cii, &dev->codec_list, list) { if (cii->codec_data == data) return -EALREADY; } if (!ci->transfers || !ci->transfers->formats || !ci->transfers->rates || !ci->usable) return -EINVAL; /* we currently code the i2s transfer on the clock, and support only * 32 and 64 */ if (ci->bus_factor != 32 && ci->bus_factor != 64) return -EINVAL; /* If you want to fix this, you need to keep track of what transport infos * are to be used, which codecs they belong to, and then fix all the * sysclock/busclock stuff above to depend on which is usable */ list_for_each_entry(cii, &dev->codec_list, list) { if (cii->codec->sysclock_factor != ci->sysclock_factor) { printk(KERN_DEBUG "cannot yet handle multiple different sysclocks!\n"); return -EINVAL; } if (cii->codec->bus_factor != ci->bus_factor) { printk(KERN_DEBUG "cannot yet handle multiple different bus clocks!\n"); return -EINVAL; } } tmp = ci->transfers; while (tmp->formats && tmp->rates) { if (tmp->transfer_in) in = 1; else out = 1; tmp++; } cii = kzalloc(sizeof(struct codec_info_item), GFP_KERNEL); if (!cii) { printk(KERN_DEBUG "i2sbus: failed to allocate cii\n"); return -ENOMEM; } /* use the private data to point to the codec info */ cii->sdev = soundbus_dev_get(dev); cii->codec = ci; cii->codec_data = data; if (!cii->sdev) { printk(KERN_DEBUG "i2sbus: failed to get soundbus dev reference\n"); err = -ENODEV; goto out_free_cii; } if (!try_module_get(THIS_MODULE)) { printk(KERN_DEBUG "i2sbus: failed to get module reference!\n"); err = -EBUSY; goto out_put_sdev; } if (!try_module_get(ci->owner)) { printk(KERN_DEBUG "i2sbus: failed to get module reference to codec owner!\n"); err = -EBUSY; goto out_put_this_module; } if (!dev->pcm) { err = snd_pcm_new(card, dev->pcmname, dev->pcmid, 0, 0, &dev->pcm); if (err) { printk(KERN_DEBUG "i2sbus: failed to create pcm\n"); goto out_put_ci_module; } dev->pcm->dev = &dev->ofdev.dev; } /* ALSA yet again sucks. * If it is ever fixed, remove this line. See below. */ out = in = 1; if (!i2sdev->out.created && out) { if (dev->pcm->card != card) { /* eh? */ printk(KERN_ERR "Can't attach same bus to different cards!\n"); err = -EINVAL; goto out_put_ci_module; } err = snd_pcm_new_stream(dev->pcm, SNDRV_PCM_STREAM_PLAYBACK, 1); if (err) goto out_put_ci_module; snd_pcm_set_ops(dev->pcm, SNDRV_PCM_STREAM_PLAYBACK, &i2sbus_playback_ops); i2sdev->out.created = 1; } if (!i2sdev->in.created && in) { if (dev->pcm->card != card) { printk(KERN_ERR "Can't attach same bus to different cards!\n"); err = -EINVAL; goto out_put_ci_module; } err = snd_pcm_new_stream(dev->pcm, SNDRV_PCM_STREAM_CAPTURE, 1); if (err) goto out_put_ci_module; snd_pcm_set_ops(dev->pcm, SNDRV_PCM_STREAM_CAPTURE, &i2sbus_record_ops); i2sdev->in.created = 1; } /* so we have to register the pcm after adding any substream * to it because alsa doesn't create the devices for the * substreams when we add them later. * Therefore, force in and out on both busses (above) and * register the pcm now instead of just after creating it. */ err = snd_device_register(card, dev->pcm); if (err) { printk(KERN_ERR "i2sbus: error registering new pcm\n"); goto out_put_ci_module; } /* no errors any more, so let's add this to our list */ list_add(&cii->list, &dev->codec_list); dev->pcm->private_data = i2sdev; dev->pcm->private_free = i2sbus_private_free; /* well, we really should support scatter/gather DMA */ snd_pcm_lib_preallocate_pages_for_all( dev->pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(macio_get_pci_dev(i2sdev->macio)), 64 * 1024, 64 * 1024); return 0; out_put_ci_module: module_put(ci->owner); out_put_this_module: module_put(THIS_MODULE); out_put_sdev: soundbus_dev_put(dev); out_free_cii: kfree(cii); return err; } void i2sbus_detach_codec(struct soundbus_dev *dev, void *data) { struct codec_info_item *cii = NULL, *i; list_for_each_entry(i, &dev->codec_list, list) { if (i->codec_data == data) { cii = i; break; } } if (cii) { list_del(&cii->list); module_put(cii->codec->owner); kfree(cii); } /* no more codecs, but still a pcm? */ if (list_empty(&dev->codec_list) && dev->pcm) { /* the actual cleanup is done by the callback above! */ snd_device_free(dev->pcm->card, dev->pcm); } }
gpl-2.0
NamanArora/android_kernel_sony_flamingo
drivers/input/misc/gpio_event.c
4068
6094
/* drivers/input/misc/gpio_event.c * * Copyright (C) 2007 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/input.h> #include <linux/gpio_event.h> #include <linux/hrtimer.h> #include <linux/platform_device.h> #include <linux/slab.h> struct gpio_event { struct gpio_event_input_devs *input_devs; const struct gpio_event_platform_data *info; void *state[0]; }; static int gpio_input_event( struct input_dev *dev, unsigned int type, unsigned int code, int value) { int i; int devnr; int ret = 0; int tmp_ret; struct gpio_event_info **ii; struct gpio_event *ip = input_get_drvdata(dev); for (devnr = 0; devnr < ip->input_devs->count; devnr++) if (ip->input_devs->dev[devnr] == dev) break; if (devnr == ip->input_devs->count) { pr_err("gpio_input_event: unknown device %p\n", dev); return -EIO; } for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) { if ((*ii)->event) { tmp_ret = (*ii)->event(ip->input_devs, *ii, &ip->state[i], devnr, type, code, value); if (tmp_ret) ret = tmp_ret; } } return ret; } static int gpio_event_call_all_func(struct gpio_event *ip, int func) { int i; int ret; struct gpio_event_info **ii; if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) { ii = ip->info->info; for (i = 0; i < ip->info->info_count; i++, ii++) { if ((*ii)->func == NULL) { ret = -ENODEV; pr_err("gpio_event_probe: Incomplete pdata, " "no function\n"); goto err_no_func; } if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend) continue; ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i], func); if (ret) { pr_err("gpio_event_probe: function failed\n"); goto err_func_failed; } } return 0; } ret = 0; i = ip->info->info_count; ii = ip->info->info + i; while (i > 0) { i--; ii--; if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend) continue; (*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1); err_func_failed: err_no_func: ; } return ret; } static void __maybe_unused gpio_event_suspend(struct gpio_event *ip) { gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND); if (ip->info->power) ip->info->power(ip->info, 0); } static void __maybe_unused gpio_event_resume(struct gpio_event *ip) { if (ip->info->power) ip->info->power(ip->info, 1); gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME); } static int gpio_event_probe(struct platform_device *pdev) { int err; struct gpio_event *ip; struct gpio_event_platform_data *event_info; int dev_count = 1; int i; int registered = 0; event_info = pdev->dev.platform_data; if (event_info == NULL) { pr_err("gpio_event_probe: No pdata\n"); return -ENODEV; } if ((!event_info->name && !event_info->names[0]) || !event_info->info || !event_info->info_count) { pr_err("gpio_event_probe: Incomplete pdata\n"); return -ENODEV; } if (!event_info->name) while (event_info->names[dev_count]) dev_count++; ip = kzalloc(sizeof(*ip) + sizeof(ip->state[0]) * event_info->info_count + sizeof(*ip->input_devs) + sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL); if (ip == NULL) { err = -ENOMEM; pr_err("gpio_event_probe: Failed to allocate private data\n"); goto err_kp_alloc_failed; } ip->input_devs = (void*)&ip->state[event_info->info_count]; platform_set_drvdata(pdev, ip); for (i = 0; i < dev_count; i++) { struct input_dev *input_dev = input_allocate_device(); if (input_dev == NULL) { err = -ENOMEM; pr_err("gpio_event_probe: " "Failed to allocate input device\n"); goto err_input_dev_alloc_failed; } input_set_drvdata(input_dev, ip); input_dev->name = event_info->name ? event_info->name : event_info->names[i]; input_dev->event = gpio_input_event; ip->input_devs->dev[i] = input_dev; } ip->input_devs->count = dev_count; ip->info = event_info; if (event_info->power) ip->info->power(ip->info, 1); err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT); if (err) goto err_call_all_func_failed; for (i = 0; i < dev_count; i++) { err = input_register_device(ip->input_devs->dev[i]); if (err) { pr_err("gpio_event_probe: Unable to register %s " "input device\n", ip->input_devs->dev[i]->name); goto err_input_register_device_failed; } registered++; } return 0; err_input_register_device_failed: gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT); err_call_all_func_failed: if (event_info->power) ip->info->power(ip->info, 0); for (i = 0; i < registered; i++) input_unregister_device(ip->input_devs->dev[i]); for (i = dev_count - 1; i >= registered; i--) { input_free_device(ip->input_devs->dev[i]); err_input_dev_alloc_failed: ; } kfree(ip); err_kp_alloc_failed: return err; } static int gpio_event_remove(struct platform_device *pdev) { struct gpio_event *ip = platform_get_drvdata(pdev); int i; gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT); if (ip->info->power) ip->info->power(ip->info, 0); for (i = 0; i < ip->input_devs->count; i++) input_unregister_device(ip->input_devs->dev[i]); kfree(ip); return 0; } static struct platform_driver gpio_event_driver = { .probe = gpio_event_probe, .remove = gpio_event_remove, .driver = { .name = GPIO_EVENT_DEV_NAME, }, }; static int __devinit gpio_event_init(void) { return platform_driver_register(&gpio_event_driver); } static void __exit gpio_event_exit(void) { platform_driver_unregister(&gpio_event_driver); } module_init(gpio_event_init); module_exit(gpio_event_exit); MODULE_DESCRIPTION("GPIO Event Driver"); MODULE_LICENSE("GPL");
gpl-2.0
AnguisCaptor/PwnKernel_Hammerhead
drivers/net/ethernet/ti/davinci_mdio.c
4836
11280
/* * DaVinci MDIO Module driver * * Copyright (C) 2010 Texas Instruments. * * Shamelessly ripped out of davinci_emac.c, original copyrights follow: * * Copyright (C) 2009 Texas Instruments. * * --------------------------------------------------------------------------- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * --------------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/phy.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/davinci_emac.h> /* * This timeout definition is a worst-case ultra defensive measure against * unexpected controller lock ups. Ideally, we should never ever hit this * scenario in practice. */ #define MDIO_TIMEOUT 100 /* msecs */ #define PHY_REG_MASK 0x1f #define PHY_ID_MASK 0x1f #define DEF_OUT_FREQ 2200000 /* 2.2 MHz */ struct davinci_mdio_regs { u32 version; u32 control; #define CONTROL_IDLE BIT(31) #define CONTROL_ENABLE BIT(30) #define CONTROL_MAX_DIV (0xffff) u32 alive; u32 link; u32 linkintraw; u32 linkintmasked; u32 __reserved_0[2]; u32 userintraw; u32 userintmasked; u32 userintmaskset; u32 userintmaskclr; u32 __reserved_1[20]; struct { u32 access; #define USERACCESS_GO BIT(31) #define USERACCESS_WRITE BIT(30) #define USERACCESS_ACK BIT(29) #define USERACCESS_READ (0) #define USERACCESS_DATA (0xffff) u32 physel; } user[0]; }; struct mdio_platform_data default_pdata = { .bus_freq = DEF_OUT_FREQ, }; struct davinci_mdio_data { struct mdio_platform_data pdata; struct davinci_mdio_regs __iomem *regs; spinlock_t lock; struct clk *clk; struct device *dev; struct mii_bus *bus; bool suspended; unsigned long access_time; /* jiffies */ }; static void __davinci_mdio_reset(struct davinci_mdio_data *data) { u32 mdio_in, div, mdio_out_khz, access_time; mdio_in = clk_get_rate(data->clk); div = (mdio_in / data->pdata.bus_freq) - 1; if (div > CONTROL_MAX_DIV) div = CONTROL_MAX_DIV; /* set enable and clock divider */ __raw_writel(div | CONTROL_ENABLE, &data->regs->control); /* * One mdio transaction consists of: * 32 bits of preamble * 32 bits of transferred data * 24 bits of bus yield (not needed unless shared?) */ mdio_out_khz = mdio_in / (1000 * (div + 1)); access_time = (88 * 1000) / mdio_out_khz; /* * In the worst case, we could be kicking off a user-access immediately * after the mdio bus scan state-machine triggered its own read. If * so, our request could get deferred by one access cycle. We * defensively allow for 4 access cycles. */ data->access_time = usecs_to_jiffies(access_time * 4); if (!data->access_time) data->access_time = 1; } static int davinci_mdio_reset(struct mii_bus *bus) { struct davinci_mdio_data *data = bus->priv; u32 phy_mask, ver; __davinci_mdio_reset(data); /* wait for scan logic to settle */ msleep(PHY_MAX_ADDR * data->access_time); /* dump hardware version info */ ver = __raw_readl(&data->regs->version); dev_info(data->dev, "davinci mdio revision %d.%d\n", (ver >> 8) & 0xff, ver & 0xff); /* get phy mask from the alive register */ phy_mask = __raw_readl(&data->regs->alive); if (phy_mask) { /* restrict mdio bus to live phys only */ dev_info(data->dev, "detected phy mask %x\n", ~phy_mask); phy_mask = ~phy_mask; } else { /* desperately scan all phys */ dev_warn(data->dev, "no live phy, scanning all\n"); phy_mask = 0; } data->bus->phy_mask = phy_mask; return 0; } /* wait until hardware is ready for another user access */ static inline int wait_for_user_access(struct davinci_mdio_data *data) { struct davinci_mdio_regs __iomem *regs = data->regs; unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); u32 reg; while (time_after(timeout, jiffies)) { reg = __raw_readl(&regs->user[0].access); if ((reg & USERACCESS_GO) == 0) return 0; reg = __raw_readl(&regs->control); if ((reg & CONTROL_IDLE) == 0) continue; /* * An emac soft_reset may have clobbered the mdio controller's * state machine. We need to reset and retry the current * operation */ dev_warn(data->dev, "resetting idled controller\n"); __davinci_mdio_reset(data); return -EAGAIN; } reg = __raw_readl(&regs->user[0].access); if ((reg & USERACCESS_GO) == 0) return 0; dev_err(data->dev, "timed out waiting for user access\n"); return -ETIMEDOUT; } /* wait until hardware state machine is idle */ static inline int wait_for_idle(struct davinci_mdio_data *data) { struct davinci_mdio_regs __iomem *regs = data->regs; unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); while (time_after(timeout, jiffies)) { if (__raw_readl(&regs->control) & CONTROL_IDLE) return 0; } dev_err(data->dev, "timed out waiting for idle\n"); return -ETIMEDOUT; } static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) { struct davinci_mdio_data *data = bus->priv; u32 reg; int ret; if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; spin_lock(&data->lock); if (data->suspended) { spin_unlock(&data->lock); return -ENODEV; } reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | (phy_id << 16)); while (1) { ret = wait_for_user_access(data); if (ret == -EAGAIN) continue; if (ret < 0) break; __raw_writel(reg, &data->regs->user[0].access); ret = wait_for_user_access(data); if (ret == -EAGAIN) continue; if (ret < 0) break; reg = __raw_readl(&data->regs->user[0].access); ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO; break; } spin_unlock(&data->lock); return ret; } static int davinci_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg, u16 phy_data) { struct davinci_mdio_data *data = bus->priv; u32 reg; int ret; if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; spin_lock(&data->lock); if (data->suspended) { spin_unlock(&data->lock); return -ENODEV; } reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | (phy_id << 16) | (phy_data & USERACCESS_DATA)); while (1) { ret = wait_for_user_access(data); if (ret == -EAGAIN) continue; if (ret < 0) break; __raw_writel(reg, &data->regs->user[0].access); ret = wait_for_user_access(data); if (ret == -EAGAIN) continue; break; } spin_unlock(&data->lock); return 0; } static int __devinit davinci_mdio_probe(struct platform_device *pdev) { struct mdio_platform_data *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct davinci_mdio_data *data; struct resource *res; struct phy_device *phy; int ret, addr; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { dev_err(dev, "failed to alloc device data\n"); return -ENOMEM; } data->pdata = pdata ? (*pdata) : default_pdata; data->bus = mdiobus_alloc(); if (!data->bus) { dev_err(dev, "failed to alloc mii bus\n"); ret = -ENOMEM; goto bail_out; } data->bus->name = dev_name(dev); data->bus->read = davinci_mdio_read, data->bus->write = davinci_mdio_write, data->bus->reset = davinci_mdio_reset, data->bus->parent = dev; data->bus->priv = data; snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); data->clk = clk_get(dev, NULL); if (IS_ERR(data->clk)) { dev_err(dev, "failed to get device clock\n"); ret = PTR_ERR(data->clk); data->clk = NULL; goto bail_out; } clk_enable(data->clk); dev_set_drvdata(dev, data); data->dev = dev; spin_lock_init(&data->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "could not find register map resource\n"); ret = -ENOENT; goto bail_out; } res = devm_request_mem_region(dev, res->start, resource_size(res), dev_name(dev)); if (!res) { dev_err(dev, "could not allocate register map resource\n"); ret = -ENXIO; goto bail_out; } data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); if (!data->regs) { dev_err(dev, "could not map mdio registers\n"); ret = -ENOMEM; goto bail_out; } /* register the mii bus */ ret = mdiobus_register(data->bus); if (ret) goto bail_out; /* scan and dump the bus */ for (addr = 0; addr < PHY_MAX_ADDR; addr++) { phy = data->bus->phy_map[addr]; if (phy) { dev_info(dev, "phy[%d]: device %s, driver %s\n", phy->addr, dev_name(&phy->dev), phy->drv ? phy->drv->name : "unknown"); } } return 0; bail_out: if (data->bus) mdiobus_free(data->bus); if (data->clk) { clk_disable(data->clk); clk_put(data->clk); } kfree(data); return ret; } static int __devexit davinci_mdio_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct davinci_mdio_data *data = dev_get_drvdata(dev); if (data->bus) mdiobus_free(data->bus); if (data->clk) { clk_disable(data->clk); clk_put(data->clk); } dev_set_drvdata(dev, NULL); kfree(data); return 0; } static int davinci_mdio_suspend(struct device *dev) { struct davinci_mdio_data *data = dev_get_drvdata(dev); u32 ctrl; spin_lock(&data->lock); /* shutdown the scan state machine */ ctrl = __raw_readl(&data->regs->control); ctrl &= ~CONTROL_ENABLE; __raw_writel(ctrl, &data->regs->control); wait_for_idle(data); if (data->clk) clk_disable(data->clk); data->suspended = true; spin_unlock(&data->lock); return 0; } static int davinci_mdio_resume(struct device *dev) { struct davinci_mdio_data *data = dev_get_drvdata(dev); u32 ctrl; spin_lock(&data->lock); if (data->clk) clk_enable(data->clk); /* restart the scan state machine */ ctrl = __raw_readl(&data->regs->control); ctrl |= CONTROL_ENABLE; __raw_writel(ctrl, &data->regs->control); data->suspended = false; spin_unlock(&data->lock); return 0; } static const struct dev_pm_ops davinci_mdio_pm_ops = { .suspend = davinci_mdio_suspend, .resume = davinci_mdio_resume, }; static struct platform_driver davinci_mdio_driver = { .driver = { .name = "davinci_mdio", .owner = THIS_MODULE, .pm = &davinci_mdio_pm_ops, }, .probe = davinci_mdio_probe, .remove = __devexit_p(davinci_mdio_remove), }; static int __init davinci_mdio_init(void) { return platform_driver_register(&davinci_mdio_driver); } device_initcall(davinci_mdio_init); static void __exit davinci_mdio_exit(void) { platform_driver_unregister(&davinci_mdio_driver); } module_exit(davinci_mdio_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DaVinci MDIO driver");
gpl-2.0
RoyFerry/mecha-2.6.35-gb-mr
drivers/base/transport_class.c
4836
9551
/* * transport_class.c - implementation of generic transport classes * using attribute_containers * * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> * * This file is licensed under GPLv2 * * The basic idea here is to allow any "device controller" (which * would most often be a Host Bus Adapter to use the services of one * or more tranport classes for performing transport specific * services. Transport specific services are things that the generic * command layer doesn't want to know about (speed settings, line * condidtioning, etc), but which the user might be interested in. * Thus, the HBA's use the routines exported by the transport classes * to perform these functions. The transport classes export certain * values to the user via sysfs using attribute containers. * * Note: because not every HBA will care about every transport * attribute, there's a many to one relationship that goes like this: * * transport class<-----attribute container<----class device * * Usually the attribute container is per-HBA, but the design doesn't * mandate that. Although most of the services will be specific to * the actual external storage connection used by the HBA, the generic * transport class is framed entirely in terms of generic devices to * allow it to be used by any physical HBA in the system. */ #include <linux/attribute_container.h> #include <linux/transport_class.h> /** * transport_class_register - register an initial transport class * * @tclass: a pointer to the transport class structure to be initialised * * The transport class contains an embedded class which is used to * identify it. The caller should initialise this structure with * zeros and then generic class must have been initialised with the * actual transport class unique name. There's a macro * DECLARE_TRANSPORT_CLASS() to do this (declared classes still must * be registered). * * Returns 0 on success or error on failure. */ int transport_class_register(struct transport_class *tclass) { return class_register(&tclass->class); } EXPORT_SYMBOL_GPL(transport_class_register); /** * transport_class_unregister - unregister a previously registered class * * @tclass: The transport class to unregister * * Must be called prior to deallocating the memory for the transport * class. */ void transport_class_unregister(struct transport_class *tclass) { class_unregister(&tclass->class); } EXPORT_SYMBOL_GPL(transport_class_unregister); static int anon_transport_dummy_function(struct transport_container *tc, struct device *dev, struct device *cdev) { /* do nothing */ return 0; } /** * anon_transport_class_register - register an anonymous class * * @atc: The anon transport class to register * * The anonymous transport class contains both a transport class and a * container. The idea of an anonymous class is that it never * actually has any device attributes associated with it (and thus * saves on container storage). So it can only be used for triggering * events. Use prezero and then use DECLARE_ANON_TRANSPORT_CLASS() to * initialise the anon transport class storage. */ int anon_transport_class_register(struct anon_transport_class *atc) { int error; atc->container.class = &atc->tclass.class; attribute_container_set_no_classdevs(&atc->container); error = attribute_container_register(&atc->container); if (error) return error; atc->tclass.setup = anon_transport_dummy_function; atc->tclass.remove = anon_transport_dummy_function; return 0; } EXPORT_SYMBOL_GPL(anon_transport_class_register); /** * anon_transport_class_unregister - unregister an anon class * * @atc: Pointer to the anon transport class to unregister * * Must be called prior to deallocating the memory for the anon * transport class. */ void anon_transport_class_unregister(struct anon_transport_class *atc) { if (unlikely(attribute_container_unregister(&atc->container))) BUG(); } EXPORT_SYMBOL_GPL(anon_transport_class_unregister); static int transport_setup_classdev(struct attribute_container *cont, struct device *dev, struct device *classdev) { struct transport_class *tclass = class_to_transport_class(cont->class); struct transport_container *tcont = attribute_container_to_transport_container(cont); if (tclass->setup) tclass->setup(tcont, dev, classdev); return 0; } /** * transport_setup_device - declare a new dev for transport class association but don't make it visible yet. * @dev: the generic device representing the entity being added * * Usually, dev represents some component in the HBA system (either * the HBA itself or a device remote across the HBA bus). This * routine is simply a trigger point to see if any set of transport * classes wishes to associate with the added device. This allocates * storage for the class device and initialises it, but does not yet * add it to the system or add attributes to it (you do this with * transport_add_device). If you have no need for a separate setup * and add operations, use transport_register_device (see * transport_class.h). */ void transport_setup_device(struct device *dev) { attribute_container_add_device(dev, transport_setup_classdev); } EXPORT_SYMBOL_GPL(transport_setup_device); static int transport_add_class_device(struct attribute_container *cont, struct device *dev, struct device *classdev) { int error = attribute_container_add_class_device(classdev); struct transport_container *tcont = attribute_container_to_transport_container(cont); if (!error && tcont->statistics) error = sysfs_create_group(&classdev->kobj, tcont->statistics); return error; } /** * transport_add_device - declare a new dev for transport class association * * @dev: the generic device representing the entity being added * * Usually, dev represents some component in the HBA system (either * the HBA itself or a device remote across the HBA bus). This * routine is simply a trigger point used to add the device to the * system and register attributes for it. */ void transport_add_device(struct device *dev) { attribute_container_device_trigger(dev, transport_add_class_device); } EXPORT_SYMBOL_GPL(transport_add_device); static int transport_configure(struct attribute_container *cont, struct device *dev, struct device *cdev) { struct transport_class *tclass = class_to_transport_class(cont->class); struct transport_container *tcont = attribute_container_to_transport_container(cont); if (tclass->configure) tclass->configure(tcont, dev, cdev); return 0; } /** * transport_configure_device - configure an already set up device * * @dev: generic device representing device to be configured * * The idea of configure is simply to provide a point within the setup * process to allow the transport class to extract information from a * device after it has been setup. This is used in SCSI because we * have to have a setup device to begin using the HBA, but after we * send the initial inquiry, we use configure to extract the device * parameters. The device need not have been added to be configured. */ void transport_configure_device(struct device *dev) { attribute_container_device_trigger(dev, transport_configure); } EXPORT_SYMBOL_GPL(transport_configure_device); static int transport_remove_classdev(struct attribute_container *cont, struct device *dev, struct device *classdev) { struct transport_container *tcont = attribute_container_to_transport_container(cont); struct transport_class *tclass = class_to_transport_class(cont->class); if (tclass->remove) tclass->remove(tcont, dev, classdev); if (tclass->remove != anon_transport_dummy_function) { if (tcont->statistics) sysfs_remove_group(&classdev->kobj, tcont->statistics); attribute_container_class_device_del(classdev); } return 0; } /** * transport_remove_device - remove the visibility of a device * * @dev: generic device to remove * * This call removes the visibility of the device (to the user from * sysfs), but does not destroy it. To eliminate a device entirely * you must also call transport_destroy_device. If you don't need to * do remove and destroy as separate operations, use * transport_unregister_device() (see transport_class.h) which will * perform both calls for you. */ void transport_remove_device(struct device *dev) { attribute_container_device_trigger(dev, transport_remove_classdev); } EXPORT_SYMBOL_GPL(transport_remove_device); static void transport_destroy_classdev(struct attribute_container *cont, struct device *dev, struct device *classdev) { struct transport_class *tclass = class_to_transport_class(cont->class); if (tclass->remove != anon_transport_dummy_function) put_device(classdev); } /** * transport_destroy_device - destroy a removed device * * @dev: device to eliminate from the transport class. * * This call triggers the elimination of storage associated with the * transport classdev. Note: all it really does is relinquish a * reference to the classdev. The memory will not be freed until the * last reference goes to zero. Note also that the classdev retains a * reference count on dev, so dev too will remain for as long as the * transport class device remains around. */ void transport_destroy_device(struct device *dev) { attribute_container_remove_device(dev, transport_destroy_classdev); } EXPORT_SYMBOL_GPL(transport_destroy_device);
gpl-2.0
whoi-acomms/linux
net/irda/timer.c
5092
6440
/********************************************************************* * * Filename: timer.c * Version: * Description: * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sat Aug 16 00:59:29 1997 * Modified at: Wed Dec 8 12:50:34 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>, * All Rights Reserved. * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/delay.h> #include <net/irda/timer.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> #include <net/irda/irlap.h> #include <net/irda/irlmp.h> extern int sysctl_slot_timeout; static void irlap_slot_timer_expired(void* data); static void irlap_query_timer_expired(void* data); static void irlap_final_timer_expired(void* data); static void irlap_wd_timer_expired(void* data); static void irlap_backoff_timer_expired(void* data); static void irlap_media_busy_expired(void* data); void irlap_start_slot_timer(struct irlap_cb *self, int timeout) { irda_start_timer(&self->slot_timer, timeout, (void *) self, irlap_slot_timer_expired); } void irlap_start_query_timer(struct irlap_cb *self, int S, int s) { int timeout; /* Calculate when the peer discovery should end. Normally, we * get the end-of-discovery frame, so this is just in case * we miss it. * Basically, we multiply the number of remaining slots by our * slot time, plus add some extra time to properly receive the last * discovery packet (which is longer due to extra discovery info), * to avoid messing with for incomming connections requests and * to accommodate devices that perform discovery slower than us. * Jean II */ timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s) + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT); /* Set or re-set the timer. We reset the timer for each received * discovery query, which allow us to automatically adjust to * the speed of the peer discovery (faster or slower). Jean II */ irda_start_timer( &self->query_timer, timeout, (void *) self, irlap_query_timer_expired); } void irlap_start_final_timer(struct irlap_cb *self, int timeout) { irda_start_timer(&self->final_timer, timeout, (void *) self, irlap_final_timer_expired); } void irlap_start_wd_timer(struct irlap_cb *self, int timeout) { irda_start_timer(&self->wd_timer, timeout, (void *) self, irlap_wd_timer_expired); } void irlap_start_backoff_timer(struct irlap_cb *self, int timeout) { irda_start_timer(&self->backoff_timer, timeout, (void *) self, irlap_backoff_timer_expired); } void irlap_start_mbusy_timer(struct irlap_cb *self, int timeout) { irda_start_timer(&self->media_busy_timer, timeout, (void *) self, irlap_media_busy_expired); } void irlap_stop_mbusy_timer(struct irlap_cb *self) { /* If timer is activated, kill it! */ del_timer(&self->media_busy_timer); /* If we are in NDM, there is a bunch of events in LAP that * that be pending due to the media_busy condition, such as * CONNECT_REQUEST and SEND_UI_FRAME. If we don't generate * an event, they will wait forever... * Jean II */ if (self->state == LAP_NDM) irlap_do_event(self, MEDIA_BUSY_TIMER_EXPIRED, NULL, NULL); } void irlmp_start_watchdog_timer(struct lsap_cb *self, int timeout) { irda_start_timer(&self->watchdog_timer, timeout, (void *) self, irlmp_watchdog_timer_expired); } void irlmp_start_discovery_timer(struct irlmp_cb *self, int timeout) { irda_start_timer(&self->discovery_timer, timeout, (void *) self, irlmp_discovery_timer_expired); } void irlmp_start_idle_timer(struct lap_cb *self, int timeout) { irda_start_timer(&self->idle_timer, timeout, (void *) self, irlmp_idle_timer_expired); } void irlmp_stop_idle_timer(struct lap_cb *self) { /* If timer is activated, kill it! */ del_timer(&self->idle_timer); } /* * Function irlap_slot_timer_expired (data) * * IrLAP slot timer has expired * */ static void irlap_slot_timer_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_do_event(self, SLOT_TIMER_EXPIRED, NULL, NULL); } /* * Function irlap_query_timer_expired (data) * * IrLAP query timer has expired * */ static void irlap_query_timer_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_do_event(self, QUERY_TIMER_EXPIRED, NULL, NULL); } /* * Function irda_final_timer_expired (data) * * * */ static void irlap_final_timer_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_do_event(self, FINAL_TIMER_EXPIRED, NULL, NULL); } /* * Function irda_wd_timer_expired (data) * * * */ static void irlap_wd_timer_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_do_event(self, WD_TIMER_EXPIRED, NULL, NULL); } /* * Function irda_backoff_timer_expired (data) * * * */ static void irlap_backoff_timer_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_do_event(self, BACKOFF_TIMER_EXPIRED, NULL, NULL); } /* * Function irtty_media_busy_expired (data) * * */ static void irlap_media_busy_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); irda_device_set_media_busy(self->netdev, FALSE); /* Note : the LAP event will be send in irlap_stop_mbusy_timer(), * to catch other cases where the flag is cleared (for example * after a discovery) - Jean II */ }
gpl-2.0
robcore/machinex_kernel
drivers/gpu/drm/gma500/mid_bios.c
5348
8331
/************************************************************************** * Copyright (c) 2011, Intel Corporation. * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * **************************************************************************/ /* TODO * - Split functions by vbt type * - Make them all take drm_device * - Check ioremap failures */ #include <drm/drmP.h> #include <drm/drm.h> #include "gma_drm.h" #include "psb_drv.h" #include "mid_bios.h" static void mid_get_fuse_settings(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); uint32_t fuse_value = 0; uint32_t fuse_value_tmp = 0; #define FB_REG06 0xD0810600 #define FB_MIPI_DISABLE (1 << 11) #define FB_REG09 0xD0810900 #define FB_REG09 0xD0810900 #define FB_SKU_MASK 0x7000 #define FB_SKU_SHIFT 12 #define FB_SKU_100 0 #define FB_SKU_100L 1 #define FB_SKU_83 2 if (pci_root == NULL) { WARN_ON(1); return; } pci_write_config_dword(pci_root, 0xD0, FB_REG06); pci_read_config_dword(pci_root, 0xD4, &fuse_value); /* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */ if (IS_MRST(dev)) dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE; DRM_INFO("internal display is %s\n", dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display"); /* Prevent runtime suspend at start*/ if (dev_priv->iLVDS_enable) { dev_priv->is_lvds_on = true; dev_priv->is_mipi_on = false; } else { dev_priv->is_mipi_on = true; dev_priv->is_lvds_on = false; } dev_priv->video_device_fuse = fuse_value; pci_write_config_dword(pci_root, 0xD0, FB_REG09); pci_read_config_dword(pci_root, 0xD4, &fuse_value); dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value); fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT; dev_priv->fuse_reg_value = fuse_value; switch (fuse_value_tmp) { case FB_SKU_100: dev_priv->core_freq = 200; break; case FB_SKU_100L: dev_priv->core_freq = 100; break; case FB_SKU_83: dev_priv->core_freq = 166; break; default: dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n", fuse_value_tmp); dev_priv->core_freq = 0; } dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq); pci_dev_put(pci_root); } /* * Get the revison ID, B0:D2:F0;0x08 */ static void mid_get_pci_revID(struct drm_psb_private *dev_priv) { uint32_t platform_rev_id = 0; struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); if (pci_gfx_root == NULL) { WARN_ON(1); return; } pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id); dev_priv->platform_rev_id = (uint8_t) platform_rev_id; pci_dev_put(pci_gfx_root); dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n", dev_priv->platform_rev_id); } static void mid_get_vbt_data(struct drm_psb_private *dev_priv) { struct drm_device *dev = dev_priv->dev; struct oaktrail_vbt *vbt = &dev_priv->vbt_data; u32 addr; u16 new_size; u8 *vbt_virtual; u8 bpi; u8 number_desc = 0; struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD; struct gct_r10_timing_info ti; void *pGCT; struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); /* Get the address of the platform config vbt, B0:D2:F0;0xFC */ pci_read_config_dword(pci_gfx_root, 0xFC, &addr); pci_dev_put(pci_gfx_root); dev_dbg(dev->dev, "drm platform config address is %x\n", addr); /* check for platform config address == 0. */ /* this means fw doesn't support vbt */ if (addr == 0) { vbt->size = 0; return; } /* get the virtual address of the vbt */ vbt_virtual = ioremap(addr, sizeof(*vbt)); if (vbt_virtual == NULL) { vbt->size = 0; return; } memcpy(vbt, vbt_virtual, sizeof(*vbt)); iounmap(vbt_virtual); /* Free virtual address space */ /* No matching signature don't process the data */ if (memcmp(vbt->signature, "$GCT", 4)) { vbt->size = 0; return; } dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision); switch (vbt->revision) { case 0: vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4, vbt->size - sizeof(*vbt) + 4); pGCT = vbt->oaktrail_gct; bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex; dev_priv->gct_data.bpi = bpi; dev_priv->gct_data.pt = ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType; memcpy(&dev_priv->gct_data.DTD, &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD, sizeof(struct oaktrail_timing_info)); dev_priv->gct_data.Panel_Port_Control = ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control; dev_priv->gct_data.Panel_MIPI_Display_Descriptor = ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor; break; case 1: vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4, vbt->size - sizeof(*vbt) + 4); pGCT = vbt->oaktrail_gct; bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex; dev_priv->gct_data.bpi = bpi; dev_priv->gct_data.pt = ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType; memcpy(&dev_priv->gct_data.DTD, &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD, sizeof(struct oaktrail_timing_info)); dev_priv->gct_data.Panel_Port_Control = ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control; dev_priv->gct_data.Panel_MIPI_Display_Descriptor = ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor; break; case 0x10: /*header definition changed from rev 01 (v2) to rev 10h. */ /*so, some values have changed location*/ new_size = vbt->checksum; /*checksum contains lo size byte*/ /*LSB of oaktrail_gct contains hi size byte*/ new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8; vbt->checksum = vbt->size; /*size contains the checksum*/ if (new_size > 0xff) vbt->size = 0xff; /*restrict size to 255*/ else vbt->size = new_size; /* number of descriptors defined in the GCT */ number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8; bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16; vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE, GCT_R10_DISPLAY_DESC_SIZE * number_desc); pGCT = vbt->oaktrail_gct; pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE); dev_priv->gct_data.bpi = bpi; /*save boot panel id*/ /*copy the GCT display timings into a temp structure*/ memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info)); /*now copy the temp struct into the dev_priv->gct_data*/ dp_ti->pixel_clock = ti.pixel_clock; dp_ti->hactive_hi = ti.hactive_hi; dp_ti->hactive_lo = ti.hactive_lo; dp_ti->hblank_hi = ti.hblank_hi; dp_ti->hblank_lo = ti.hblank_lo; dp_ti->hsync_offset_hi = ti.hsync_offset_hi; dp_ti->hsync_offset_lo = ti.hsync_offset_lo; dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi; dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo; dp_ti->vactive_hi = ti.vactive_hi; dp_ti->vactive_lo = ti.vactive_lo; dp_ti->vblank_hi = ti.vblank_hi; dp_ti->vblank_lo = ti.vblank_lo; dp_ti->vsync_offset_hi = ti.vsync_offset_hi; dp_ti->vsync_offset_lo = ti.vsync_offset_lo; dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi; dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo; /* Move the MIPI_Display_Descriptor data from GCT to dev priv */ dev_priv->gct_data.Panel_MIPI_Display_Descriptor = *((u8 *)pGCT + 0x0d); dev_priv->gct_data.Panel_MIPI_Display_Descriptor |= (*((u8 *)pGCT + 0x0e)) << 8; break; default: dev_err(dev->dev, "Unknown revision of GCT!\n"); vbt->size = 0; } } int mid_chip_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; mid_get_fuse_settings(dev); mid_get_vbt_data(dev_priv); mid_get_pci_revID(dev_priv); return 0; }
gpl-2.0
HeydayGuan/linux-3.6.7
drivers/ide/alim15x3.c
9188
15264
/* * Copyright (C) 1998-2000 Michel Aubry, Maintainer * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer * Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer * * Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org) * May be copied or modified under the terms of the GNU General Public License * Copyright (C) 2002 Alan Cox * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw> * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com> * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz * * (U)DMA capable version of ali 1533/1543(C), 1535(D) * ********************************************************************** * 9/7/99 --Parts from the above author are included and need to be * converted into standard interface, once I finish the thought. * * Recent changes * Don't use LBA48 mode on ALi <= 0xC4 * Don't poke 0x79 with a non ALi northbridge * Don't flip undefined bits on newer chipsets (fix Fujitsu laptop hang) * Allow UDMA6 on revisions > 0xC4 * * Documentation * Chipset documentation available under NDA only * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/init.h> #include <linux/dmi.h> #include <asm/io.h> #define DRV_NAME "alim15x3" /* * ALi devices are not plug in. Otherwise these static values would * need to go. They ought to go away anyway */ static u8 m5229_revision; static u8 chip_is_1543c_e; static struct pci_dev *isa_dev; static void ali_fifo_control(ide_hwif_t *hwif, ide_drive_t *drive, int on) { struct pci_dev *pdev = to_pci_dev(hwif->dev); int pio_fifo = 0x54 + hwif->channel; u8 fifo; int shift = 4 * (drive->dn & 1); pci_read_config_byte(pdev, pio_fifo, &fifo); fifo &= ~(0x0F << shift); fifo |= (on << shift); pci_write_config_byte(pdev, pio_fifo, fifo); } static void ali_program_timings(ide_hwif_t *hwif, ide_drive_t *drive, struct ide_timing *t, u8 ultra) { struct pci_dev *dev = to_pci_dev(hwif->dev); int port = hwif->channel ? 0x5c : 0x58; int udmat = 0x56 + hwif->channel; u8 unit = drive->dn & 1, udma; int shift = 4 * unit; /* Set up the UDMA */ pci_read_config_byte(dev, udmat, &udma); udma &= ~(0x0F << shift); udma |= ultra << shift; pci_write_config_byte(dev, udmat, udma); if (t == NULL) return; t->setup = clamp_val(t->setup, 1, 8) & 7; t->act8b = clamp_val(t->act8b, 1, 8) & 7; t->rec8b = clamp_val(t->rec8b, 1, 16) & 15; t->active = clamp_val(t->active, 1, 8) & 7; t->recover = clamp_val(t->recover, 1, 16) & 15; pci_write_config_byte(dev, port, t->setup); pci_write_config_byte(dev, port + 1, (t->act8b << 4) | t->rec8b); pci_write_config_byte(dev, port + unit + 2, (t->active << 4) | t->recover); } /** * ali_set_pio_mode - set host controller for PIO mode * @hwif: port * @drive: drive * * Program the controller for the given PIO mode. */ static void ali_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { ide_drive_t *pair = ide_get_pair_dev(drive); int bus_speed = ide_pci_clk ? ide_pci_clk : 33; unsigned long T = 1000000 / bus_speed; /* PCI clock based */ struct ide_timing t; ide_timing_compute(drive, drive->pio_mode, &t, T, 1); if (pair) { struct ide_timing p; ide_timing_compute(pair, pair->pio_mode, &p, T, 1); ide_timing_merge(&p, &t, &t, IDE_TIMING_SETUP | IDE_TIMING_8BIT); if (pair->dma_mode) { ide_timing_compute(pair, pair->dma_mode, &p, T, 1); ide_timing_merge(&p, &t, &t, IDE_TIMING_SETUP | IDE_TIMING_8BIT); } } /* * PIO mode => ATA FIFO on, ATAPI FIFO off */ ali_fifo_control(hwif, drive, (drive->media == ide_disk) ? 0x05 : 0x00); ali_program_timings(hwif, drive, &t, 0); } /** * ali_udma_filter - compute UDMA mask * @drive: IDE device * * Return available UDMA modes. * * The actual rules for the ALi are: * No UDMA on revisions <= 0x20 * Disk only for revisions < 0xC2 * Not WDC drives on M1543C-E (?) */ static u8 ali_udma_filter(ide_drive_t *drive) { if (m5229_revision > 0x20 && m5229_revision < 0xC2) { if (drive->media != ide_disk) return 0; if (chip_is_1543c_e && strstr((char *)&drive->id[ATA_ID_PROD], "WDC ")) return 0; } return drive->hwif->ultra_mask; } /** * ali_set_dma_mode - set host controller for DMA mode * @hwif: port * @drive: drive * * Configure the hardware for the desired IDE transfer mode. */ static void ali_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD }; struct pci_dev *dev = to_pci_dev(hwif->dev); ide_drive_t *pair = ide_get_pair_dev(drive); int bus_speed = ide_pci_clk ? ide_pci_clk : 33; unsigned long T = 1000000 / bus_speed; /* PCI clock based */ const u8 speed = drive->dma_mode; u8 tmpbyte = 0x00; struct ide_timing t; if (speed < XFER_UDMA_0) { ide_timing_compute(drive, drive->dma_mode, &t, T, 1); if (pair) { struct ide_timing p; ide_timing_compute(pair, pair->pio_mode, &p, T, 1); ide_timing_merge(&p, &t, &t, IDE_TIMING_SETUP | IDE_TIMING_8BIT); if (pair->dma_mode) { ide_timing_compute(pair, pair->dma_mode, &p, T, 1); ide_timing_merge(&p, &t, &t, IDE_TIMING_SETUP | IDE_TIMING_8BIT); } } ali_program_timings(hwif, drive, &t, 0); } else { ali_program_timings(hwif, drive, NULL, udma_timing[speed - XFER_UDMA_0]); if (speed >= XFER_UDMA_3) { pci_read_config_byte(dev, 0x4b, &tmpbyte); tmpbyte |= 1; pci_write_config_byte(dev, 0x4b, tmpbyte); } } } /** * ali_dma_check - DMA check * @drive: target device * @cmd: command * * Returns 1 if the DMA cannot be performed, zero on success. */ static int ali_dma_check(ide_drive_t *drive, struct ide_cmd *cmd) { if (m5229_revision < 0xC2 && drive->media != ide_disk) { if (cmd->tf_flags & IDE_TFLAG_WRITE) return 1; /* try PIO instead of DMA */ } return 0; } /** * init_chipset_ali15x3 - Initialise an ALi IDE controller * @dev: PCI device * * This function initializes the ALI IDE controller and where * appropriate also sets up the 1533 southbridge. */ static int init_chipset_ali15x3(struct pci_dev *dev) { unsigned long flags; u8 tmpbyte; struct pci_dev *north = pci_get_slot(dev->bus, PCI_DEVFN(0,0)); m5229_revision = dev->revision; isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); local_irq_save(flags); if (m5229_revision < 0xC2) { /* * revision 0x20 (1543-E, 1543-F) * revision 0xC0, 0xC1 (1543C-C, 1543C-D, 1543C-E) * clear CD-ROM DMA write bit, m5229, 0x4b, bit 7 */ pci_read_config_byte(dev, 0x4b, &tmpbyte); /* * clear bit 7 */ pci_write_config_byte(dev, 0x4b, tmpbyte & 0x7F); /* * check m1533, 0x5e, bit 1~4 == 1001 => & 00011110 = 00010010 */ if (m5229_revision >= 0x20 && isa_dev) { pci_read_config_byte(isa_dev, 0x5e, &tmpbyte); chip_is_1543c_e = ((tmpbyte & 0x1e) == 0x12) ? 1: 0; } goto out; } /* * 1543C-B?, 1535, 1535D, 1553 * Note 1: not all "motherboard" support this detection * Note 2: if no udma 66 device, the detection may "error". * but in this case, we will not set the device to * ultra 66, the detection result is not important */ /* * enable "Cable Detection", m5229, 0x4b, bit3 */ pci_read_config_byte(dev, 0x4b, &tmpbyte); pci_write_config_byte(dev, 0x4b, tmpbyte | 0x08); /* * We should only tune the 1533 enable if we are using an ALi * North bridge. We might have no north found on some zany * box without a device at 0:0.0. The ALi bridge will be at * 0:0.0 so if we didn't find one we know what is cooking. */ if (north && north->vendor != PCI_VENDOR_ID_AL) goto out; if (m5229_revision < 0xC5 && isa_dev) { /* * set south-bridge's enable bit, m1533, 0x79 */ pci_read_config_byte(isa_dev, 0x79, &tmpbyte); if (m5229_revision == 0xC2) { /* * 1543C-B0 (m1533, 0x79, bit 2) */ pci_write_config_byte(isa_dev, 0x79, tmpbyte | 0x04); } else if (m5229_revision >= 0xC3) { /* * 1553/1535 (m1533, 0x79, bit 1) */ pci_write_config_byte(isa_dev, 0x79, tmpbyte | 0x02); } } out: /* * CD_ROM DMA on (m5229, 0x53, bit0) * Enable this bit even if we want to use PIO. * PIO FIFO off (m5229, 0x53, bit1) * The hardware will use 0x54h and 0x55h to control PIO FIFO. * (Not on later devices it seems) * * 0x53 changes meaning on later revs - we must no touch * bit 1 on them. Need to check if 0x20 is the right break. */ if (m5229_revision >= 0x20) { pci_read_config_byte(dev, 0x53, &tmpbyte); if (m5229_revision <= 0x20) tmpbyte = (tmpbyte & (~0x02)) | 0x01; else if (m5229_revision == 0xc7 || m5229_revision == 0xc8) tmpbyte |= 0x03; else tmpbyte |= 0x01; pci_write_config_byte(dev, 0x53, tmpbyte); } pci_dev_put(north); pci_dev_put(isa_dev); local_irq_restore(flags); return 0; } /* * Cable special cases */ static const struct dmi_system_id cable_dmi_table[] = { { .ident = "HP Pavilion N5430", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"), }, }, { .ident = "Toshiba Satellite S1800-814", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"), }, }, { } }; static int ali_cable_override(struct pci_dev *pdev) { /* Fujitsu P2000 */ if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF) return 1; /* Mitac 8317 (Winbook-A) and relatives */ if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317) return 1; /* Systems by DMI */ if (dmi_check_system(cable_dmi_table)) return 1; return 0; } /** * ali_cable_detect - cable detection * @hwif: IDE interface * * This checks if the controller and the cable are capable * of UDMA66 transfers. It doesn't check the drives. */ static u8 ali_cable_detect(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); u8 cbl = ATA_CBL_PATA40, tmpbyte; if (m5229_revision >= 0xC2) { /* * m5229 80-pin cable detection (from Host View) * * 0x4a bit0 is 0 => primary channel has 80-pin * 0x4a bit1 is 0 => secondary channel has 80-pin * * Certain laptops use short but suitable cables * and don't implement the detect logic. */ if (ali_cable_override(dev)) cbl = ATA_CBL_PATA40_SHORT; else { pci_read_config_byte(dev, 0x4a, &tmpbyte); if ((tmpbyte & (1 << hwif->channel)) == 0) cbl = ATA_CBL_PATA80; } } return cbl; } #ifndef CONFIG_SPARC64 /** * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff * @hwif: interface to configure * * Obtain the IRQ tables for an ALi based IDE solution on the PC * class platforms. This part of the code isn't applicable to the * Sparc systems. */ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif) { u8 ideic, inmir; s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 }; int irq = -1; if (isa_dev) { /* * read IDE interface control */ pci_read_config_byte(isa_dev, 0x58, &ideic); /* bit0, bit1 */ ideic = ideic & 0x03; /* get IRQ for IDE Controller */ if ((hwif->channel && ideic == 0x03) || (!hwif->channel && !ideic)) { /* * get SIRQ1 routing table */ pci_read_config_byte(isa_dev, 0x44, &inmir); inmir = inmir & 0x0f; irq = irq_routing_table[inmir]; } else if (hwif->channel && !(ideic & 0x01)) { /* * get SIRQ2 routing table */ pci_read_config_byte(isa_dev, 0x75, &inmir); inmir = inmir & 0x0f; irq = irq_routing_table[inmir]; } if(irq >= 0) hwif->irq = irq; } } #else #define init_hwif_ali15x3 NULL #endif /* CONFIG_SPARC64 */ /** * init_dma_ali15x3 - set up DMA on ALi15x3 * @hwif: IDE interface * @d: IDE port info * * Set up the DMA functionality on the ALi 15x3. */ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif, const struct ide_port_info *d) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long base = ide_pci_dma_base(hwif, d); if (base == 0) return -1; hwif->dma_base = base; if (ide_pci_check_simplex(hwif, d) < 0) return -1; if (ide_pci_set_master(dev, d->name) < 0) return -1; if (!hwif->channel) outb(inb(base + 2) & 0x60, base + 2); printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, base, base + 7); if (ide_allocate_dma_engine(hwif)) return -1; return 0; } static const struct ide_port_ops ali_port_ops = { .set_pio_mode = ali_set_pio_mode, .set_dma_mode = ali_set_dma_mode, .udma_filter = ali_udma_filter, .cable_detect = ali_cable_detect, }; static const struct ide_dma_ops ali_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = ide_dma_start, .dma_end = ide_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_check = ali_dma_check, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_port_info ali15x3_chipset __devinitdata = { .name = DRV_NAME, .init_chipset = init_chipset_ali15x3, .init_hwif = init_hwif_ali15x3, .init_dma = init_dma_ali15x3, .port_ops = &ali_port_ops, .dma_ops = &sff_dma_ops, .pio_mask = ATA_PIO5, .swdma_mask = ATA_SWDMA2, .mwdma_mask = ATA_MWDMA2, }; /** * alim15x3_init_one - set up an ALi15x3 IDE controller * @dev: PCI device to set up * * Perform the actual set up for an ALi15x3 that has been found by the * hot plug layer. */ static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_device_id *id) { struct ide_port_info d = ali15x3_chipset; u8 rev = dev->revision, idx = id->driver_data; /* don't use LBA48 DMA on ALi devices before rev 0xC5 */ if (rev <= 0xC4) d.host_flags |= IDE_HFLAG_NO_LBA48_DMA; if (rev >= 0x20) { if (rev == 0x20) d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA; if (rev < 0xC2) d.udma_mask = ATA_UDMA2; else if (rev == 0xC2 || rev == 0xC3) d.udma_mask = ATA_UDMA4; else if (rev == 0xC4) d.udma_mask = ATA_UDMA5; else d.udma_mask = ATA_UDMA6; d.dma_ops = &ali_dma_ops; } else { d.host_flags |= IDE_HFLAG_NO_DMA; d.mwdma_mask = d.swdma_mask = 0; } if (idx == 0) d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX; return ide_pci_init_one(dev, &d, NULL); } static const struct pci_device_id alim15x3_pci_tbl[] = { { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), 0 }, { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), 1 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, alim15x3_pci_tbl); static struct pci_driver alim15x3_pci_driver = { .name = "ALI15x3_IDE", .id_table = alim15x3_pci_tbl, .probe = alim15x3_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init ali15x3_ide_init(void) { return ide_pci_register_driver(&alim15x3_pci_driver); } static void __exit ali15x3_ide_exit(void) { pci_unregister_driver(&alim15x3_pci_driver); } module_init(ali15x3_ide_init); module_exit(ali15x3_ide_exit); MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE"); MODULE_LICENSE("GPL");
gpl-2.0
djvoleur/V_920P_BOF7
arch/mips/fw/cfe/cfe_api.c
11236
11211
/* * Copyright (C) 2000, 2001, 2002 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * * Broadcom Common Firmware Environment (CFE) * * This module contains device function stubs (small routines to * call the standard "iocb" interface entry point to CFE). * There should be one routine here per iocb function call. * * Authors: Mitch Lichtenberg, Chris Demetriou */ #include <asm/fw/cfe/cfe_api.h> #include "cfe_api_int.h" /* Cast from a native pointer to a cfe_xptr_t and back. */ #define XPTR_FROM_NATIVE(n) ((cfe_xptr_t) (intptr_t) (n)) #define NATIVE_FROM_XPTR(x) ((void *) (intptr_t) (x)) int cfe_iocb_dispatch(struct cfe_xiocb *xiocb); /* * Declare the dispatch function with args of "intptr_t". * This makes sure whatever model we're compiling in * puts the pointers in a single register. For example, * combining -mlong64 and -mips1 or -mips2 would lead to * trouble, since the handle and IOCB pointer will be * passed in two registers each, and CFE expects one. */ static int (*cfe_dispfunc) (intptr_t handle, intptr_t xiocb); static u64 cfe_handle; int cfe_init(u64 handle, u64 ept) { cfe_dispfunc = NATIVE_FROM_XPTR(ept); cfe_handle = handle; return 0; } int cfe_iocb_dispatch(struct cfe_xiocb * xiocb) { if (!cfe_dispfunc) return -1; return (*cfe_dispfunc) ((intptr_t) cfe_handle, (intptr_t) xiocb); } int cfe_close(int handle) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_DEV_CLOSE; xiocb.xiocb_status = 0; xiocb.xiocb_handle = handle; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = 0; cfe_iocb_dispatch(&xiocb); return xiocb.xiocb_status; } int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_FW_CPUCTL; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_cpuctl); xiocb.plist.xiocb_cpuctl.cpu_number = cpu; xiocb.plist.xiocb_cpuctl.cpu_command = CFE_CPU_CMD_START; xiocb.plist.xiocb_cpuctl.gp_val = gp; xiocb.plist.xiocb_cpuctl.sp_val = sp; xiocb.plist.xiocb_cpuctl.a1_val = a1; xiocb.plist.xiocb_cpuctl.start_addr = (long) fn; cfe_iocb_dispatch(&xiocb); return xiocb.xiocb_status; } int cfe_cpu_stop(int cpu) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_FW_CPUCTL; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_cpuctl); xiocb.plist.xiocb_cpuctl.cpu_number = cpu; xiocb.plist.xiocb_cpuctl.cpu_command = CFE_CPU_CMD_STOP; cfe_iocb_dispatch(&xiocb); return xiocb.xiocb_status; } int cfe_enumenv(int idx, char *name, int namelen, char *val, int vallen) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_ENV_SET; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_envbuf); xiocb.plist.xiocb_envbuf.enum_idx = idx; xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name); xiocb.plist.xiocb_envbuf.name_length = namelen; xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(val); xiocb.plist.xiocb_envbuf.val_length = vallen; cfe_iocb_dispatch(&xiocb); return xiocb.xiocb_status; } int cfe_enummem(int idx, int flags, u64 *start, u64 *length, u64 *type) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_FW_MEMENUM; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = flags; xiocb.xiocb_psize = sizeof(struct xiocb_meminfo); xiocb.plist.xiocb_meminfo.mi_idx = idx; cfe_iocb_dispatch(&xiocb); if (xiocb.xiocb_status < 0) return xiocb.xiocb_status; *start = xiocb.plist.xiocb_meminfo.mi_addr; *length = xiocb.plist.xiocb_meminfo.mi_size; *type = xiocb.plist.xiocb_meminfo.mi_type; return 0; } int cfe_exit(int warm, int status) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_FW_RESTART; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = warm ? CFE_FLG_WARMSTART : 0; xiocb.xiocb_psize = sizeof(struct xiocb_exitstat); xiocb.plist.xiocb_exitstat.status = status; cfe_iocb_dispatch(&xiocb); return xiocb.xiocb_status; } int cfe_flushcache(int flg) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_FW_FLUSHCACHE; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = flg; xiocb.xiocb_psize = 0; cfe_iocb_dispatch(&xiocb); return xiocb.xiocb_status; } int cfe_getdevinfo(char *name) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_DEV_GETINFO; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_buffer); xiocb.plist.xiocb_buffer.buf_offset = 0; xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(name); xiocb.plist.xiocb_buffer.buf_length = strlen(name); cfe_iocb_dispatch(&xiocb); if (xiocb.xiocb_status < 0) return xiocb.xiocb_status; return xiocb.plist.xiocb_buffer.buf_ioctlcmd; } int cfe_getenv(char *name, char *dest, int destlen) { struct cfe_xiocb xiocb; *dest = 0; xiocb.xiocb_fcode = CFE_CMD_ENV_GET; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_envbuf); xiocb.plist.xiocb_envbuf.enum_idx = 0; xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name); xiocb.plist.xiocb_envbuf.name_length = strlen(name); xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(dest); xiocb.plist.xiocb_envbuf.val_length = destlen; cfe_iocb_dispatch(&xiocb); return xiocb.xiocb_status; } int cfe_getfwinfo(cfe_fwinfo_t * info) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_FW_GETINFO; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_fwinfo); cfe_iocb_dispatch(&xiocb); if (xiocb.xiocb_status < 0) return xiocb.xiocb_status; info->fwi_version = xiocb.plist.xiocb_fwinfo.fwi_version; info->fwi_totalmem = xiocb.plist.xiocb_fwinfo.fwi_totalmem; info->fwi_flags = xiocb.plist.xiocb_fwinfo.fwi_flags; info->fwi_boardid = xiocb.plist.xiocb_fwinfo.fwi_boardid; info->fwi_bootarea_va = xiocb.plist.xiocb_fwinfo.fwi_bootarea_va; info->fwi_bootarea_pa = xiocb.plist.xiocb_fwinfo.fwi_bootarea_pa; info->fwi_bootarea_size = xiocb.plist.xiocb_fwinfo.fwi_bootarea_size; #if 0 info->fwi_reserved1 = xiocb.plist.xiocb_fwinfo.fwi_reserved1; info->fwi_reserved2 = xiocb.plist.xiocb_fwinfo.fwi_reserved2; info->fwi_reserved3 = xiocb.plist.xiocb_fwinfo.fwi_reserved3; #endif return 0; } int cfe_getstdhandle(int flg) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_DEV_GETHANDLE; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = flg; xiocb.xiocb_psize = 0; cfe_iocb_dispatch(&xiocb); if (xiocb.xiocb_status < 0) return xiocb.xiocb_status; return xiocb.xiocb_handle; } int64_t cfe_getticks(void) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_FW_GETTIME; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_time); xiocb.plist.xiocb_time.ticks = 0; cfe_iocb_dispatch(&xiocb); return xiocb.plist.xiocb_time.ticks; } int cfe_inpstat(int handle) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_DEV_INPSTAT; xiocb.xiocb_status = 0; xiocb.xiocb_handle = handle; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_inpstat); xiocb.plist.xiocb_inpstat.inp_status = 0; cfe_iocb_dispatch(&xiocb); if (xiocb.xiocb_status < 0) return xiocb.xiocb_status; return xiocb.plist.xiocb_inpstat.inp_status; } int cfe_ioctl(int handle, unsigned int ioctlnum, unsigned char *buffer, int length, int *retlen, u64 offset) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_DEV_IOCTL; xiocb.xiocb_status = 0; xiocb.xiocb_handle = handle; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_buffer); xiocb.plist.xiocb_buffer.buf_offset = offset; xiocb.plist.xiocb_buffer.buf_ioctlcmd = ioctlnum; xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer); xiocb.plist.xiocb_buffer.buf_length = length; cfe_iocb_dispatch(&xiocb); if (retlen) *retlen = xiocb.plist.xiocb_buffer.buf_retlen; return xiocb.xiocb_status; } int cfe_open(char *name) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_DEV_OPEN; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_buffer); xiocb.plist.xiocb_buffer.buf_offset = 0; xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(name); xiocb.plist.xiocb_buffer.buf_length = strlen(name); cfe_iocb_dispatch(&xiocb); if (xiocb.xiocb_status < 0) return xiocb.xiocb_status; return xiocb.xiocb_handle; } int cfe_read(int handle, unsigned char *buffer, int length) { return cfe_readblk(handle, 0, buffer, length); } int cfe_readblk(int handle, s64 offset, unsigned char *buffer, int length) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_DEV_READ; xiocb.xiocb_status = 0; xiocb.xiocb_handle = handle; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_buffer); xiocb.plist.xiocb_buffer.buf_offset = offset; xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer); xiocb.plist.xiocb_buffer.buf_length = length; cfe_iocb_dispatch(&xiocb); if (xiocb.xiocb_status < 0) return xiocb.xiocb_status; return xiocb.plist.xiocb_buffer.buf_retlen; } int cfe_setenv(char *name, char *val) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_ENV_SET; xiocb.xiocb_status = 0; xiocb.xiocb_handle = 0; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_envbuf); xiocb.plist.xiocb_envbuf.enum_idx = 0; xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name); xiocb.plist.xiocb_envbuf.name_length = strlen(name); xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(val); xiocb.plist.xiocb_envbuf.val_length = strlen(val); cfe_iocb_dispatch(&xiocb); return xiocb.xiocb_status; } int cfe_write(int handle, unsigned char *buffer, int length) { return cfe_writeblk(handle, 0, buffer, length); } int cfe_writeblk(int handle, s64 offset, unsigned char *buffer, int length) { struct cfe_xiocb xiocb; xiocb.xiocb_fcode = CFE_CMD_DEV_WRITE; xiocb.xiocb_status = 0; xiocb.xiocb_handle = handle; xiocb.xiocb_flags = 0; xiocb.xiocb_psize = sizeof(struct xiocb_buffer); xiocb.plist.xiocb_buffer.buf_offset = offset; xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer); xiocb.plist.xiocb_buffer.buf_length = length; cfe_iocb_dispatch(&xiocb); if (xiocb.xiocb_status < 0) return xiocb.xiocb_status; return xiocb.plist.xiocb_buffer.buf_retlen; }
gpl-2.0
TRKP/android_kernel_samsung_i9300
arch/arm/mach-integrator/lm.c
12516
2132
/* * linux/arch/arm/mach-integrator/lm.c * * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/slab.h> #include <mach/lm.h> #define to_lm_device(d) container_of(d, struct lm_device, dev) #define to_lm_driver(d) container_of(d, struct lm_driver, drv) static int lm_match(struct device *dev, struct device_driver *drv) { return 1; } static int lm_bus_probe(struct device *dev) { struct lm_device *lmdev = to_lm_device(dev); struct lm_driver *lmdrv = to_lm_driver(dev->driver); return lmdrv->probe(lmdev); } static int lm_bus_remove(struct device *dev) { struct lm_device *lmdev = to_lm_device(dev); struct lm_driver *lmdrv = to_lm_driver(dev->driver); if (lmdrv->remove) lmdrv->remove(lmdev); return 0; } static struct bus_type lm_bustype = { .name = "logicmodule", .match = lm_match, .probe = lm_bus_probe, .remove = lm_bus_remove, // .suspend = lm_bus_suspend, // .resume = lm_bus_resume, }; static int __init lm_init(void) { return bus_register(&lm_bustype); } postcore_initcall(lm_init); int lm_driver_register(struct lm_driver *drv) { drv->drv.bus = &lm_bustype; return driver_register(&drv->drv); } void lm_driver_unregister(struct lm_driver *drv) { driver_unregister(&drv->drv); } static void lm_device_release(struct device *dev) { struct lm_device *d = to_lm_device(dev); kfree(d); } int lm_device_register(struct lm_device *dev) { int ret; dev->dev.release = lm_device_release; dev->dev.bus = &lm_bustype; ret = dev_set_name(&dev->dev, "lm%d", dev->id); if (ret) return ret; dev->resource.name = dev_name(&dev->dev); ret = request_resource(&iomem_resource, &dev->resource); if (ret == 0) { ret = device_register(&dev->dev); if (ret) release_resource(&dev->resource); } return ret; } EXPORT_SYMBOL(lm_driver_register); EXPORT_SYMBOL(lm_driver_unregister);
gpl-2.0
x456/kernel
fs/afs/cache.c
12772
11040
/* AFS caching stuff * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include "internal.h" static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen); static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static enum fscache_checkaux afs_vlocation_cache_check_aux( void *cookie_netfs_data, const void *buffer, uint16_t buflen); static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static void afs_vnode_cache_get_attr(const void *cookie_netfs_data, uint64_t *size); static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen); static void afs_vnode_cache_now_uncached(void *cookie_netfs_data); struct fscache_netfs afs_cache_netfs = { .name = "afs", .version = 0, }; struct fscache_cookie_def afs_cell_cache_index_def = { .name = "AFS.cell", .type = FSCACHE_COOKIE_TYPE_INDEX, .get_key = afs_cell_cache_get_key, .get_aux = afs_cell_cache_get_aux, .check_aux = afs_cell_cache_check_aux, }; struct fscache_cookie_def afs_vlocation_cache_index_def = { .name = "AFS.vldb", .type = FSCACHE_COOKIE_TYPE_INDEX, .get_key = afs_vlocation_cache_get_key, .get_aux = afs_vlocation_cache_get_aux, .check_aux = afs_vlocation_cache_check_aux, }; struct fscache_cookie_def afs_volume_cache_index_def = { .name = "AFS.volume", .type = FSCACHE_COOKIE_TYPE_INDEX, .get_key = afs_volume_cache_get_key, }; struct fscache_cookie_def afs_vnode_cache_index_def = { .name = "AFS.vnode", .type = FSCACHE_COOKIE_TYPE_DATAFILE, .get_key = afs_vnode_cache_get_key, .get_attr = afs_vnode_cache_get_attr, .get_aux = afs_vnode_cache_get_aux, .check_aux = afs_vnode_cache_check_aux, .now_uncached = afs_vnode_cache_now_uncached, }; /* * set the key for the index entry */ static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_cell *cell = cookie_netfs_data; uint16_t klen; _enter("%p,%p,%u", cell, buffer, bufmax); klen = strlen(cell->name); if (klen > bufmax) return 0; memcpy(buffer, cell->name, klen); return klen; } /* * provide new auxiliary cache data */ static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_cell *cell = cookie_netfs_data; uint16_t dlen; _enter("%p,%p,%u", cell, buffer, bufmax); dlen = cell->vl_naddrs * sizeof(cell->vl_addrs[0]); dlen = min(dlen, bufmax); dlen &= ~(sizeof(cell->vl_addrs[0]) - 1); memcpy(buffer, cell->vl_addrs, dlen); return dlen; } /* * check that the auxiliary data indicates that the entry is still valid */ static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen) { _leave(" = OKAY"); return FSCACHE_CHECKAUX_OKAY; } /*****************************************************************************/ /* * set the key for the index entry */ static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vlocation *vlocation = cookie_netfs_data; uint16_t klen; _enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax); klen = strnlen(vlocation->vldb.name, sizeof(vlocation->vldb.name)); if (klen > bufmax) return 0; memcpy(buffer, vlocation->vldb.name, klen); _leave(" = %u", klen); return klen; } /* * provide new auxiliary cache data */ static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vlocation *vlocation = cookie_netfs_data; uint16_t dlen; _enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax); dlen = sizeof(struct afs_cache_vlocation); dlen -= offsetof(struct afs_cache_vlocation, nservers); if (dlen > bufmax) return 0; memcpy(buffer, (uint8_t *)&vlocation->vldb.nservers, dlen); _leave(" = %u", dlen); return dlen; } /* * check that the auxiliary data indicates that the entry is still valid */ static enum fscache_checkaux afs_vlocation_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen) { const struct afs_cache_vlocation *cvldb; struct afs_vlocation *vlocation = cookie_netfs_data; uint16_t dlen; _enter("{%s},%p,%u", vlocation->vldb.name, buffer, buflen); /* check the size of the data is what we're expecting */ dlen = sizeof(struct afs_cache_vlocation); dlen -= offsetof(struct afs_cache_vlocation, nservers); if (dlen != buflen) return FSCACHE_CHECKAUX_OBSOLETE; cvldb = container_of(buffer, struct afs_cache_vlocation, nservers); /* if what's on disk is more valid than what's in memory, then use the * VL record from the cache */ if (!vlocation->valid || vlocation->vldb.rtime == cvldb->rtime) { memcpy((uint8_t *)&vlocation->vldb.nservers, buffer, dlen); vlocation->valid = 1; _leave(" = SUCCESS [c->m]"); return FSCACHE_CHECKAUX_OKAY; } /* need to update the cache if the cached info differs */ if (memcmp(&vlocation->vldb, buffer, dlen) != 0) { /* delete if the volume IDs for this name differ */ if (memcmp(&vlocation->vldb.vid, &cvldb->vid, sizeof(cvldb->vid)) != 0 ) { _leave(" = OBSOLETE"); return FSCACHE_CHECKAUX_OBSOLETE; } _leave(" = UPDATE"); return FSCACHE_CHECKAUX_NEEDS_UPDATE; } _leave(" = OKAY"); return FSCACHE_CHECKAUX_OKAY; } /*****************************************************************************/ /* * set the key for the volume index entry */ static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_volume *volume = cookie_netfs_data; uint16_t klen; _enter("{%u},%p,%u", volume->type, buffer, bufmax); klen = sizeof(volume->type); if (klen > bufmax) return 0; memcpy(buffer, &volume->type, sizeof(volume->type)); _leave(" = %u", klen); return klen; } /*****************************************************************************/ /* * set the key for the index entry */ static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vnode *vnode = cookie_netfs_data; uint16_t klen; _enter("{%x,%x,%llx},%p,%u", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version, buffer, bufmax); klen = sizeof(vnode->fid.vnode); if (klen > bufmax) return 0; memcpy(buffer, &vnode->fid.vnode, sizeof(vnode->fid.vnode)); _leave(" = %u", klen); return klen; } /* * provide updated file attributes */ static void afs_vnode_cache_get_attr(const void *cookie_netfs_data, uint64_t *size) { const struct afs_vnode *vnode = cookie_netfs_data; _enter("{%x,%x,%llx},", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version); *size = vnode->status.size; } /* * provide new auxiliary cache data */ static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vnode *vnode = cookie_netfs_data; uint16_t dlen; _enter("{%x,%x,%Lx},%p,%u", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version, buffer, bufmax); dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.data_version); if (dlen > bufmax) return 0; memcpy(buffer, &vnode->fid.unique, sizeof(vnode->fid.unique)); buffer += sizeof(vnode->fid.unique); memcpy(buffer, &vnode->status.data_version, sizeof(vnode->status.data_version)); _leave(" = %u", dlen); return dlen; } /* * check that the auxiliary data indicates that the entry is still valid */ static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen) { struct afs_vnode *vnode = cookie_netfs_data; uint16_t dlen; _enter("{%x,%x,%llx},%p,%u", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version, buffer, buflen); /* check the size of the data is what we're expecting */ dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.data_version); if (dlen != buflen) { _leave(" = OBSOLETE [len %hx != %hx]", dlen, buflen); return FSCACHE_CHECKAUX_OBSOLETE; } if (memcmp(buffer, &vnode->fid.unique, sizeof(vnode->fid.unique) ) != 0) { unsigned unique; memcpy(&unique, buffer, sizeof(unique)); _leave(" = OBSOLETE [uniq %x != %x]", unique, vnode->fid.unique); return FSCACHE_CHECKAUX_OBSOLETE; } if (memcmp(buffer + sizeof(vnode->fid.unique), &vnode->status.data_version, sizeof(vnode->status.data_version) ) != 0) { afs_dataversion_t version; memcpy(&version, buffer + sizeof(vnode->fid.unique), sizeof(version)); _leave(" = OBSOLETE [vers %llx != %llx]", version, vnode->status.data_version); return FSCACHE_CHECKAUX_OBSOLETE; } _leave(" = SUCCESS"); return FSCACHE_CHECKAUX_OKAY; } /* * indication the cookie is no longer uncached * - this function is called when the backing store currently caching a cookie * is removed * - the netfs should use this to clean up any markers indicating cached pages * - this is mandatory for any object that may have data */ static void afs_vnode_cache_now_uncached(void *cookie_netfs_data) { struct afs_vnode *vnode = cookie_netfs_data; struct pagevec pvec; pgoff_t first; int loop, nr_pages; _enter("{%x,%x,%Lx}", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version); pagevec_init(&pvec, 0); first = 0; for (;;) { /* grab a bunch of pages to clean */ nr_pages = pagevec_lookup(&pvec, vnode->vfs_inode.i_mapping, first, PAGEVEC_SIZE - pagevec_count(&pvec)); if (!nr_pages) break; for (loop = 0; loop < nr_pages; loop++) ClearPageFsCache(pvec.pages[loop]); first = pvec.pages[nr_pages - 1]->index + 1; pvec.nr = nr_pages; pagevec_release(&pvec); cond_resched(); } _leave(""); }
gpl-2.0
dwindsor/linux-next
drivers/spi/spi-cadence.c
229
21405
/* * Cadence SPI controller driver (master mode only) * * Copyright (C) 2008 - 2014 Xilinx, Inc. * * based on Blackfin On-Chip SPI Driver (spi_bfin5xx.c) * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/spi/spi.h> /* Name of this driver */ #define CDNS_SPI_NAME "cdns-spi" /* Register offset definitions */ #define CDNS_SPI_CR 0x00 /* Configuration Register, RW */ #define CDNS_SPI_ISR 0x04 /* Interrupt Status Register, RO */ #define CDNS_SPI_IER 0x08 /* Interrupt Enable Register, WO */ #define CDNS_SPI_IDR 0x0c /* Interrupt Disable Register, WO */ #define CDNS_SPI_IMR 0x10 /* Interrupt Enabled Mask Register, RO */ #define CDNS_SPI_ER 0x14 /* Enable/Disable Register, RW */ #define CDNS_SPI_DR 0x18 /* Delay Register, RW */ #define CDNS_SPI_TXD 0x1C /* Data Transmit Register, WO */ #define CDNS_SPI_RXD 0x20 /* Data Receive Register, RO */ #define CDNS_SPI_SICR 0x24 /* Slave Idle Count Register, RW */ #define CDNS_SPI_THLD 0x28 /* Transmit FIFO Watermark Register,RW */ #define SPI_AUTOSUSPEND_TIMEOUT 3000 /* * SPI Configuration Register bit Masks * * This register contains various control bits that affect the operation * of the SPI controller */ #define CDNS_SPI_CR_MANSTRT 0x00010000 /* Manual TX Start */ #define CDNS_SPI_CR_CPHA 0x00000004 /* Clock Phase Control */ #define CDNS_SPI_CR_CPOL 0x00000002 /* Clock Polarity Control */ #define CDNS_SPI_CR_SSCTRL 0x00003C00 /* Slave Select Mask */ #define CDNS_SPI_CR_PERI_SEL 0x00000200 /* Peripheral Select Decode */ #define CDNS_SPI_CR_BAUD_DIV 0x00000038 /* Baud Rate Divisor Mask */ #define CDNS_SPI_CR_MSTREN 0x00000001 /* Master Enable Mask */ #define CDNS_SPI_CR_MANSTRTEN 0x00008000 /* Manual TX Enable Mask */ #define CDNS_SPI_CR_SSFORCE 0x00004000 /* Manual SS Enable Mask */ #define CDNS_SPI_CR_BAUD_DIV_4 0x00000008 /* Default Baud Div Mask */ #define CDNS_SPI_CR_DEFAULT (CDNS_SPI_CR_MSTREN | \ CDNS_SPI_CR_SSCTRL | \ CDNS_SPI_CR_SSFORCE | \ CDNS_SPI_CR_BAUD_DIV_4) /* * SPI Configuration Register - Baud rate and slave select * * These are the values used in the calculation of baud rate divisor and * setting the slave select. */ #define CDNS_SPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */ #define CDNS_SPI_BAUD_DIV_MIN 1 /* Baud rate divisor minimum */ #define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */ #define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */ #define CDNS_SPI_SS0 0x1 /* Slave Select zero */ /* * SPI Interrupt Registers bit Masks * * All the four interrupt registers (Status/Mask/Enable/Disable) have the same * bit definitions. */ #define CDNS_SPI_IXR_TXOW 0x00000004 /* SPI TX FIFO Overwater */ #define CDNS_SPI_IXR_MODF 0x00000002 /* SPI Mode Fault */ #define CDNS_SPI_IXR_RXNEMTY 0x00000010 /* SPI RX FIFO Not Empty */ #define CDNS_SPI_IXR_DEFAULT (CDNS_SPI_IXR_TXOW | \ CDNS_SPI_IXR_MODF) #define CDNS_SPI_IXR_TXFULL 0x00000008 /* SPI TX Full */ #define CDNS_SPI_IXR_ALL 0x0000007F /* SPI all interrupts */ /* * SPI Enable Register bit Masks * * This register is used to enable or disable the SPI controller */ #define CDNS_SPI_ER_ENABLE 0x00000001 /* SPI Enable Bit Mask */ #define CDNS_SPI_ER_DISABLE 0x0 /* SPI Disable Bit Mask */ /* SPI FIFO depth in bytes */ #define CDNS_SPI_FIFO_DEPTH 128 /* Default number of chip select lines */ #define CDNS_SPI_DEFAULT_NUM_CS 4 /** * struct cdns_spi - This definition defines spi driver instance * @regs: Virtual address of the SPI controller registers * @ref_clk: Pointer to the peripheral clock * @pclk: Pointer to the APB clock * @speed_hz: Current SPI bus clock speed in Hz * @txbuf: Pointer to the TX buffer * @rxbuf: Pointer to the RX buffer * @tx_bytes: Number of bytes left to transfer * @rx_bytes: Number of bytes requested * @dev_busy: Device busy flag * @is_decoded_cs: Flag for decoder property set or not */ struct cdns_spi { void __iomem *regs; struct clk *ref_clk; struct clk *pclk; u32 speed_hz; const u8 *txbuf; u8 *rxbuf; int tx_bytes; int rx_bytes; u8 dev_busy; u32 is_decoded_cs; }; /* Macros for the SPI controller read/write */ static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset) { return readl_relaxed(xspi->regs + offset); } static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val) { writel_relaxed(val, xspi->regs + offset); } /** * cdns_spi_init_hw - Initialize the hardware and configure the SPI controller * @xspi: Pointer to the cdns_spi structure * * On reset the SPI controller is configured to be in master mode, baud rate * divisor is set to 4, threshold value for TX FIFO not full interrupt is set * to 1 and size of the word to be transferred as 8 bit. * This function initializes the SPI controller to disable and clear all the * interrupts, enable manual slave select and manual start, deselect all the * chip select lines, and enable the SPI controller. */ static void cdns_spi_init_hw(struct cdns_spi *xspi) { u32 ctrl_reg = CDNS_SPI_CR_DEFAULT; if (xspi->is_decoded_cs) ctrl_reg |= CDNS_SPI_CR_PERI_SEL; cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_ALL); /* Clear the RX FIFO */ while (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_RXNEMTY) cdns_spi_read(xspi, CDNS_SPI_RXD); cdns_spi_write(xspi, CDNS_SPI_ISR, CDNS_SPI_IXR_ALL); cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg); cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE); } /** * cdns_spi_chipselect - Select or deselect the chip select line * @spi: Pointer to the spi_device structure * @is_high: Select(0) or deselect (1) the chip select line */ static void cdns_spi_chipselect(struct spi_device *spi, bool is_high) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg; ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); if (is_high) { /* Deselect the slave */ ctrl_reg |= CDNS_SPI_CR_SSCTRL; } else { /* Select the slave */ ctrl_reg &= ~CDNS_SPI_CR_SSCTRL; if (!(xspi->is_decoded_cs)) ctrl_reg |= ((~(CDNS_SPI_SS0 << spi->chip_select)) << CDNS_SPI_SS_SHIFT) & CDNS_SPI_CR_SSCTRL; else ctrl_reg |= (spi->chip_select << CDNS_SPI_SS_SHIFT) & CDNS_SPI_CR_SSCTRL; } cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg); } /** * cdns_spi_config_clock_mode - Sets clock polarity and phase * @spi: Pointer to the spi_device structure * * Sets the requested clock polarity and phase. */ static void cdns_spi_config_clock_mode(struct spi_device *spi) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg, new_ctrl_reg; new_ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); ctrl_reg = new_ctrl_reg; /* Set the SPI clock phase and clock polarity */ new_ctrl_reg &= ~(CDNS_SPI_CR_CPHA | CDNS_SPI_CR_CPOL); if (spi->mode & SPI_CPHA) new_ctrl_reg |= CDNS_SPI_CR_CPHA; if (spi->mode & SPI_CPOL) new_ctrl_reg |= CDNS_SPI_CR_CPOL; if (new_ctrl_reg != ctrl_reg) { /* * Just writing the CR register does not seem to apply the clock * setting changes. This is problematic when changing the clock * polarity as it will cause the SPI slave to see spurious clock * transitions. To workaround the issue toggle the ER register. */ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); cdns_spi_write(xspi, CDNS_SPI_CR, new_ctrl_reg); cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE); } } /** * cdns_spi_config_clock_freq - Sets clock frequency * @spi: Pointer to the spi_device structure * @transfer: Pointer to the spi_transfer structure which provides * information about next transfer setup parameters * * Sets the requested clock frequency. * Note: If the requested frequency is not an exact match with what can be * obtained using the prescalar value the driver sets the clock frequency which * is lower than the requested frequency (maximum lower) for the transfer. If * the requested frequency is higher or lower than that is supported by the SPI * controller the driver will set the highest or lowest frequency supported by * controller. */ static void cdns_spi_config_clock_freq(struct spi_device *spi, struct spi_transfer *transfer) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg, baud_rate_val; unsigned long frequency; frequency = clk_get_rate(xspi->ref_clk); ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); /* Set the clock frequency */ if (xspi->speed_hz != transfer->speed_hz) { /* first valid value is 1 */ baud_rate_val = CDNS_SPI_BAUD_DIV_MIN; while ((baud_rate_val < CDNS_SPI_BAUD_DIV_MAX) && (frequency / (2 << baud_rate_val)) > transfer->speed_hz) baud_rate_val++; ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV; ctrl_reg |= baud_rate_val << CDNS_SPI_BAUD_DIV_SHIFT; xspi->speed_hz = frequency / (2 << baud_rate_val); } cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg); } /** * cdns_spi_setup_transfer - Configure SPI controller for specified transfer * @spi: Pointer to the spi_device structure * @transfer: Pointer to the spi_transfer structure which provides * information about next transfer setup parameters * * Sets the operational mode of SPI controller for the next SPI transfer and * sets the requested clock frequency. * * Return: Always 0 */ static int cdns_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *transfer) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); cdns_spi_config_clock_freq(spi, transfer); dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u clock speed\n", __func__, spi->mode, spi->bits_per_word, xspi->speed_hz); return 0; } /** * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible * @xspi: Pointer to the cdns_spi structure */ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) { unsigned long trans_cnt = 0; while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && (xspi->tx_bytes > 0)) { if (xspi->txbuf) cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); else cdns_spi_write(xspi, CDNS_SPI_TXD, 0); xspi->tx_bytes--; trans_cnt++; } } /** * cdns_spi_irq - Interrupt service routine of the SPI controller * @irq: IRQ number * @dev_id: Pointer to the xspi structure * * This function handles TX empty and Mode Fault interrupts only. * On TX empty interrupt this function reads the received data from RX FIFO and * fills the TX FIFO if there is any data remaining to be transferred. * On Mode Fault interrupt this function indicates that transfer is completed, * the SPI subsystem will identify the error as the remaining bytes to be * transferred is non-zero. * * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise. */ static irqreturn_t cdns_spi_irq(int irq, void *dev_id) { struct spi_master *master = dev_id; struct cdns_spi *xspi = spi_master_get_devdata(master); u32 intr_status, status; status = IRQ_NONE; intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR); cdns_spi_write(xspi, CDNS_SPI_ISR, intr_status); if (intr_status & CDNS_SPI_IXR_MODF) { /* Indicate that transfer is completed, the SPI subsystem will * identify the error as the remaining bytes to be * transferred is non-zero */ cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT); spi_finalize_current_transfer(master); status = IRQ_HANDLED; } else if (intr_status & CDNS_SPI_IXR_TXOW) { unsigned long trans_cnt; trans_cnt = xspi->rx_bytes - xspi->tx_bytes; /* Read out the data from the RX FIFO */ while (trans_cnt) { u8 data; data = cdns_spi_read(xspi, CDNS_SPI_RXD); if (xspi->rxbuf) *xspi->rxbuf++ = data; xspi->rx_bytes--; trans_cnt--; } if (xspi->tx_bytes) { /* There is more data to send */ cdns_spi_fill_tx_fifo(xspi); } else { /* Transfer is completed */ cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT); spi_finalize_current_transfer(master); } status = IRQ_HANDLED; } return status; } static int cdns_prepare_message(struct spi_master *master, struct spi_message *msg) { cdns_spi_config_clock_mode(msg->spi); return 0; } /** * cdns_transfer_one - Initiates the SPI transfer * @master: Pointer to spi_master structure * @spi: Pointer to the spi_device structure * @transfer: Pointer to the spi_transfer structure which provides * information about next transfer parameters * * This function fills the TX FIFO, starts the SPI transfer and * returns a positive transfer count so that core will wait for completion. * * Return: Number of bytes transferred in the last transfer */ static int cdns_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *transfer) { struct cdns_spi *xspi = spi_master_get_devdata(master); xspi->txbuf = transfer->tx_buf; xspi->rxbuf = transfer->rx_buf; xspi->tx_bytes = transfer->len; xspi->rx_bytes = transfer->len; cdns_spi_setup_transfer(spi, transfer); cdns_spi_fill_tx_fifo(xspi); cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT); return transfer->len; } /** * cdns_prepare_transfer_hardware - Prepares hardware for transfer. * @master: Pointer to the spi_master structure which provides * information about the controller. * * This function enables SPI master controller. * * Return: 0 always */ static int cdns_prepare_transfer_hardware(struct spi_master *master) { struct cdns_spi *xspi = spi_master_get_devdata(master); cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE); return 0; } /** * cdns_unprepare_transfer_hardware - Relaxes hardware after transfer * @master: Pointer to the spi_master structure which provides * information about the controller. * * This function disables the SPI master controller. * * Return: 0 always */ static int cdns_unprepare_transfer_hardware(struct spi_master *master) { struct cdns_spi *xspi = spi_master_get_devdata(master); cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); return 0; } /** * cdns_spi_probe - Probe method for the SPI driver * @pdev: Pointer to the platform_device structure * * This function initializes the driver data structures and the hardware. * * Return: 0 on success and error value on error */ static int cdns_spi_probe(struct platform_device *pdev) { int ret = 0, irq; struct spi_master *master; struct cdns_spi *xspi; struct resource *res; u32 num_cs; master = spi_alloc_master(&pdev->dev, sizeof(*xspi)); if (!master) return -ENOMEM; xspi = spi_master_get_devdata(master); master->dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, master); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); xspi->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(xspi->regs)) { ret = PTR_ERR(xspi->regs); goto remove_master; } xspi->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(xspi->pclk)) { dev_err(&pdev->dev, "pclk clock not found.\n"); ret = PTR_ERR(xspi->pclk); goto remove_master; } xspi->ref_clk = devm_clk_get(&pdev->dev, "ref_clk"); if (IS_ERR(xspi->ref_clk)) { dev_err(&pdev->dev, "ref_clk clock not found.\n"); ret = PTR_ERR(xspi->ref_clk); goto remove_master; } ret = clk_prepare_enable(xspi->pclk); if (ret) { dev_err(&pdev->dev, "Unable to enable APB clock.\n"); goto remove_master; } ret = clk_prepare_enable(xspi->ref_clk); if (ret) { dev_err(&pdev->dev, "Unable to enable device clock.\n"); goto clk_dis_apb; } pm_runtime_enable(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); pm_runtime_set_active(&pdev->dev); ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); if (ret < 0) master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS; else master->num_chipselect = num_cs; ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs", &xspi->is_decoded_cs); if (ret < 0) xspi->is_decoded_cs = 0; /* SPI controller initializations */ cdns_spi_init_hw(xspi); pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); irq = platform_get_irq(pdev, 0); if (irq <= 0) { ret = -ENXIO; dev_err(&pdev->dev, "irq number is invalid\n"); goto clk_dis_all; } ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq, 0, pdev->name, master); if (ret != 0) { ret = -ENXIO; dev_err(&pdev->dev, "request_irq failed\n"); goto clk_dis_all; } master->prepare_transfer_hardware = cdns_prepare_transfer_hardware; master->prepare_message = cdns_prepare_message; master->transfer_one = cdns_transfer_one; master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware; master->set_cs = cdns_spi_chipselect; master->auto_runtime_pm = true; master->mode_bits = SPI_CPOL | SPI_CPHA; /* Set to default valid value */ master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4; xspi->speed_hz = master->max_speed_hz; master->bits_per_word_mask = SPI_BPW_MASK(8); ret = spi_register_master(master); if (ret) { dev_err(&pdev->dev, "spi_register_master failed\n"); goto clk_dis_all; } return ret; clk_dis_all: pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); clk_disable_unprepare(xspi->ref_clk); clk_dis_apb: clk_disable_unprepare(xspi->pclk); remove_master: spi_master_put(master); return ret; } /** * cdns_spi_remove - Remove method for the SPI driver * @pdev: Pointer to the platform_device structure * * This function is called if a device is physically removed from the system or * if the driver module is being unloaded. It frees all resources allocated to * the device. * * Return: 0 on success and error value on error */ static int cdns_spi_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct cdns_spi *xspi = spi_master_get_devdata(master); cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); clk_disable_unprepare(xspi->ref_clk); clk_disable_unprepare(xspi->pclk); pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); spi_unregister_master(master); return 0; } /** * cdns_spi_suspend - Suspend method for the SPI driver * @dev: Address of the platform_device structure * * This function disables the SPI controller and * changes the driver state to "suspend" * * Return: 0 on success and error value on error */ static int __maybe_unused cdns_spi_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct spi_master *master = platform_get_drvdata(pdev); return spi_master_suspend(master); } /** * cdns_spi_resume - Resume method for the SPI driver * @dev: Address of the platform_device structure * * This function changes the driver state to "ready" * * Return: 0 on success and error value on error */ static int __maybe_unused cdns_spi_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct spi_master *master = platform_get_drvdata(pdev); return spi_master_resume(master); } /** * cdns_spi_runtime_resume - Runtime resume method for the SPI driver * @dev: Address of the platform_device structure * * This function enables the clocks * * Return: 0 on success and error value on error */ static int __maybe_unused cnds_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct cdns_spi *xspi = spi_master_get_devdata(master); int ret; ret = clk_prepare_enable(xspi->pclk); if (ret) { dev_err(dev, "Cannot enable APB clock.\n"); return ret; } ret = clk_prepare_enable(xspi->ref_clk); if (ret) { dev_err(dev, "Cannot enable device clock.\n"); clk_disable(xspi->pclk); return ret; } return 0; } /** * cdns_spi_runtime_suspend - Runtime suspend method for the SPI driver * @dev: Address of the platform_device structure * * This function disables the clocks * * Return: Always 0 */ static int __maybe_unused cnds_runtime_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct cdns_spi *xspi = spi_master_get_devdata(master); clk_disable_unprepare(xspi->ref_clk); clk_disable_unprepare(xspi->pclk); return 0; } static const struct dev_pm_ops cdns_spi_dev_pm_ops = { SET_RUNTIME_PM_OPS(cnds_runtime_suspend, cnds_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(cdns_spi_suspend, cdns_spi_resume) }; static const struct of_device_id cdns_spi_of_match[] = { { .compatible = "xlnx,zynq-spi-r1p6" }, { .compatible = "cdns,spi-r1p6" }, { /* end of table */ } }; MODULE_DEVICE_TABLE(of, cdns_spi_of_match); /* cdns_spi_driver - This structure defines the SPI subsystem platform driver */ static struct platform_driver cdns_spi_driver = { .probe = cdns_spi_probe, .remove = cdns_spi_remove, .driver = { .name = CDNS_SPI_NAME, .of_match_table = cdns_spi_of_match, .pm = &cdns_spi_dev_pm_ops, }, }; module_platform_driver(cdns_spi_driver); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("Cadence SPI driver"); MODULE_LICENSE("GPL");
gpl-2.0
jwhitham/ppc_linux
drivers/input/keyboard/samsung-keypad.c
229
15570
/* * Samsung keypad driver * * Copyright (C) 2010 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * Author: Donghwa Lee <dh09.lee@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/sched.h> #include <linux/input/samsung-keypad.h> #define SAMSUNG_KEYIFCON 0x00 #define SAMSUNG_KEYIFSTSCLR 0x04 #define SAMSUNG_KEYIFCOL 0x08 #define SAMSUNG_KEYIFROW 0x0c #define SAMSUNG_KEYIFFC 0x10 /* SAMSUNG_KEYIFCON */ #define SAMSUNG_KEYIFCON_INT_F_EN (1 << 0) #define SAMSUNG_KEYIFCON_INT_R_EN (1 << 1) #define SAMSUNG_KEYIFCON_DF_EN (1 << 2) #define SAMSUNG_KEYIFCON_FC_EN (1 << 3) #define SAMSUNG_KEYIFCON_WAKEUPEN (1 << 4) /* SAMSUNG_KEYIFSTSCLR */ #define SAMSUNG_KEYIFSTSCLR_P_INT_MASK (0xff << 0) #define SAMSUNG_KEYIFSTSCLR_R_INT_MASK (0xff << 8) #define SAMSUNG_KEYIFSTSCLR_R_INT_OFFSET 8 #define S5PV210_KEYIFSTSCLR_P_INT_MASK (0x3fff << 0) #define S5PV210_KEYIFSTSCLR_R_INT_MASK (0x3fff << 16) #define S5PV210_KEYIFSTSCLR_R_INT_OFFSET 16 /* SAMSUNG_KEYIFCOL */ #define SAMSUNG_KEYIFCOL_MASK (0xff << 0) #define S5PV210_KEYIFCOLEN_MASK (0xff << 8) /* SAMSUNG_KEYIFROW */ #define SAMSUNG_KEYIFROW_MASK (0xff << 0) #define S5PV210_KEYIFROW_MASK (0x3fff << 0) /* SAMSUNG_KEYIFFC */ #define SAMSUNG_KEYIFFC_MASK (0x3ff << 0) enum samsung_keypad_type { KEYPAD_TYPE_SAMSUNG, KEYPAD_TYPE_S5PV210, }; struct samsung_keypad { struct input_dev *input_dev; struct platform_device *pdev; struct clk *clk; void __iomem *base; wait_queue_head_t wait; bool stopped; bool wake_enabled; int irq; enum samsung_keypad_type type; unsigned int row_shift; unsigned int rows; unsigned int cols; unsigned int row_state[SAMSUNG_MAX_COLS]; unsigned short keycodes[]; }; static void samsung_keypad_scan(struct samsung_keypad *keypad, unsigned int *row_state) { unsigned int col; unsigned int val; for (col = 0; col < keypad->cols; col++) { if (keypad->type == KEYPAD_TYPE_S5PV210) { val = S5PV210_KEYIFCOLEN_MASK; val &= ~(1 << col) << 8; } else { val = SAMSUNG_KEYIFCOL_MASK; val &= ~(1 << col); } writel(val, keypad->base + SAMSUNG_KEYIFCOL); mdelay(1); val = readl(keypad->base + SAMSUNG_KEYIFROW); row_state[col] = ~val & ((1 << keypad->rows) - 1); } /* KEYIFCOL reg clear */ writel(0, keypad->base + SAMSUNG_KEYIFCOL); } static bool samsung_keypad_report(struct samsung_keypad *keypad, unsigned int *row_state) { struct input_dev *input_dev = keypad->input_dev; unsigned int changed; unsigned int pressed; unsigned int key_down = 0; unsigned int val; unsigned int col, row; for (col = 0; col < keypad->cols; col++) { changed = row_state[col] ^ keypad->row_state[col]; key_down |= row_state[col]; if (!changed) continue; for (row = 0; row < keypad->rows; row++) { if (!(changed & (1 << row))) continue; pressed = row_state[col] & (1 << row); dev_dbg(&keypad->input_dev->dev, "key %s, row: %d, col: %d\n", pressed ? "pressed" : "released", row, col); val = MATRIX_SCAN_CODE(row, col, keypad->row_shift); input_event(input_dev, EV_MSC, MSC_SCAN, val); input_report_key(input_dev, keypad->keycodes[val], pressed); } input_sync(keypad->input_dev); } memcpy(keypad->row_state, row_state, sizeof(keypad->row_state)); return key_down; } static irqreturn_t samsung_keypad_irq(int irq, void *dev_id) { struct samsung_keypad *keypad = dev_id; unsigned int row_state[SAMSUNG_MAX_COLS]; unsigned int val; bool key_down; pm_runtime_get_sync(&keypad->pdev->dev); do { val = readl(keypad->base + SAMSUNG_KEYIFSTSCLR); /* Clear interrupt. */ writel(~0x0, keypad->base + SAMSUNG_KEYIFSTSCLR); samsung_keypad_scan(keypad, row_state); key_down = samsung_keypad_report(keypad, row_state); if (key_down) wait_event_timeout(keypad->wait, keypad->stopped, msecs_to_jiffies(50)); } while (key_down && !keypad->stopped); pm_runtime_put(&keypad->pdev->dev); return IRQ_HANDLED; } static void samsung_keypad_start(struct samsung_keypad *keypad) { unsigned int val; pm_runtime_get_sync(&keypad->pdev->dev); /* Tell IRQ thread that it may poll the device. */ keypad->stopped = false; clk_enable(keypad->clk); /* Enable interrupt bits. */ val = readl(keypad->base + SAMSUNG_KEYIFCON); val |= SAMSUNG_KEYIFCON_INT_F_EN | SAMSUNG_KEYIFCON_INT_R_EN; writel(val, keypad->base + SAMSUNG_KEYIFCON); /* KEYIFCOL reg clear. */ writel(0, keypad->base + SAMSUNG_KEYIFCOL); pm_runtime_put(&keypad->pdev->dev); } static void samsung_keypad_stop(struct samsung_keypad *keypad) { unsigned int val; pm_runtime_get_sync(&keypad->pdev->dev); /* Signal IRQ thread to stop polling and disable the handler. */ keypad->stopped = true; wake_up(&keypad->wait); disable_irq(keypad->irq); /* Clear interrupt. */ writel(~0x0, keypad->base + SAMSUNG_KEYIFSTSCLR); /* Disable interrupt bits. */ val = readl(keypad->base + SAMSUNG_KEYIFCON); val &= ~(SAMSUNG_KEYIFCON_INT_F_EN | SAMSUNG_KEYIFCON_INT_R_EN); writel(val, keypad->base + SAMSUNG_KEYIFCON); clk_disable(keypad->clk); /* * Now that chip should not generate interrupts we can safely * re-enable the handler. */ enable_irq(keypad->irq); pm_runtime_put(&keypad->pdev->dev); } static int samsung_keypad_open(struct input_dev *input_dev) { struct samsung_keypad *keypad = input_get_drvdata(input_dev); samsung_keypad_start(keypad); return 0; } static void samsung_keypad_close(struct input_dev *input_dev) { struct samsung_keypad *keypad = input_get_drvdata(input_dev); samsung_keypad_stop(keypad); } #ifdef CONFIG_OF static struct samsung_keypad_platdata *samsung_keypad_parse_dt( struct device *dev) { struct samsung_keypad_platdata *pdata; struct matrix_keymap_data *keymap_data; uint32_t *keymap, num_rows = 0, num_cols = 0; struct device_node *np = dev->of_node, *key_np; unsigned int key_count; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(dev, "could not allocate memory for platform data\n"); return NULL; } of_property_read_u32(np, "samsung,keypad-num-rows", &num_rows); of_property_read_u32(np, "samsung,keypad-num-columns", &num_cols); if (!num_rows || !num_cols) { dev_err(dev, "number of keypad rows/columns not specified\n"); return NULL; } pdata->rows = num_rows; pdata->cols = num_cols; keymap_data = devm_kzalloc(dev, sizeof(*keymap_data), GFP_KERNEL); if (!keymap_data) { dev_err(dev, "could not allocate memory for keymap data\n"); return NULL; } pdata->keymap_data = keymap_data; key_count = of_get_child_count(np); keymap_data->keymap_size = key_count; keymap = devm_kzalloc(dev, sizeof(uint32_t) * key_count, GFP_KERNEL); if (!keymap) { dev_err(dev, "could not allocate memory for keymap\n"); return NULL; } keymap_data->keymap = keymap; for_each_child_of_node(np, key_np) { u32 row, col, key_code; of_property_read_u32(key_np, "keypad,row", &row); of_property_read_u32(key_np, "keypad,column", &col); of_property_read_u32(key_np, "linux,code", &key_code); *keymap++ = KEY(row, col, key_code); } if (of_get_property(np, "linux,input-no-autorepeat", NULL)) pdata->no_autorepeat = true; if (of_get_property(np, "linux,input-wakeup", NULL)) pdata->wakeup = true; return pdata; } #else static struct samsung_keypad_platdata *samsung_keypad_parse_dt(struct device *dev) { return NULL; } #endif static int samsung_keypad_probe(struct platform_device *pdev) { const struct samsung_keypad_platdata *pdata; const struct matrix_keymap_data *keymap_data; struct samsung_keypad *keypad; struct resource *res; struct input_dev *input_dev; unsigned int row_shift; unsigned int keymap_size; int error; if (pdev->dev.of_node) pdata = samsung_keypad_parse_dt(&pdev->dev); else pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "no platform data defined\n"); return -EINVAL; } keymap_data = pdata->keymap_data; if (!keymap_data) { dev_err(&pdev->dev, "no keymap data defined\n"); return -EINVAL; } if (!pdata->rows || pdata->rows > SAMSUNG_MAX_ROWS) return -EINVAL; if (!pdata->cols || pdata->cols > SAMSUNG_MAX_COLS) return -EINVAL; /* initialize the gpio */ if (pdata->cfg_gpio) pdata->cfg_gpio(pdata->rows, pdata->cols); row_shift = get_count_order(pdata->cols); keymap_size = (pdata->rows << row_shift) * sizeof(keypad->keycodes[0]); keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad) + keymap_size, GFP_KERNEL); input_dev = devm_input_allocate_device(&pdev->dev); if (!keypad || !input_dev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; keypad->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!keypad->base) return -EBUSY; keypad->clk = devm_clk_get(&pdev->dev, "keypad"); if (IS_ERR(keypad->clk)) { dev_err(&pdev->dev, "failed to get keypad clk\n"); return PTR_ERR(keypad->clk); } error = clk_prepare(keypad->clk); if (error) { dev_err(&pdev->dev, "keypad clock prepare failed\n"); return error; } keypad->input_dev = input_dev; keypad->pdev = pdev; keypad->row_shift = row_shift; keypad->rows = pdata->rows; keypad->cols = pdata->cols; keypad->stopped = true; init_waitqueue_head(&keypad->wait); if (pdev->dev.of_node) keypad->type = of_device_is_compatible(pdev->dev.of_node, "samsung,s5pv210-keypad"); else keypad->type = platform_get_device_id(pdev)->driver_data; input_dev->name = pdev->name; input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &pdev->dev; input_dev->open = samsung_keypad_open; input_dev->close = samsung_keypad_close; error = matrix_keypad_build_keymap(keymap_data, NULL, pdata->rows, pdata->cols, keypad->keycodes, input_dev); if (error) { dev_err(&pdev->dev, "failed to build keymap\n"); goto err_unprepare_clk; } input_set_capability(input_dev, EV_MSC, MSC_SCAN); if (!pdata->no_autorepeat) __set_bit(EV_REP, input_dev->evbit); input_set_drvdata(input_dev, keypad); keypad->irq = platform_get_irq(pdev, 0); if (keypad->irq < 0) { error = keypad->irq; goto err_unprepare_clk; } error = devm_request_threaded_irq(&pdev->dev, keypad->irq, NULL, samsung_keypad_irq, IRQF_ONESHOT, dev_name(&pdev->dev), keypad); if (error) { dev_err(&pdev->dev, "failed to register keypad interrupt\n"); goto err_unprepare_clk; } device_init_wakeup(&pdev->dev, pdata->wakeup); platform_set_drvdata(pdev, keypad); pm_runtime_enable(&pdev->dev); error = input_register_device(keypad->input_dev); if (error) goto err_disable_runtime_pm; if (pdev->dev.of_node) { devm_kfree(&pdev->dev, (void *)pdata->keymap_data->keymap); devm_kfree(&pdev->dev, (void *)pdata->keymap_data); devm_kfree(&pdev->dev, (void *)pdata); } return 0; err_disable_runtime_pm: pm_runtime_disable(&pdev->dev); device_init_wakeup(&pdev->dev, 0); err_unprepare_clk: clk_unprepare(keypad->clk); return error; } static int samsung_keypad_remove(struct platform_device *pdev) { struct samsung_keypad *keypad = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); device_init_wakeup(&pdev->dev, 0); input_unregister_device(keypad->input_dev); clk_unprepare(keypad->clk); return 0; } #ifdef CONFIG_PM_RUNTIME static int samsung_keypad_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct samsung_keypad *keypad = platform_get_drvdata(pdev); unsigned int val; int error; if (keypad->stopped) return 0; /* This may fail on some SoCs due to lack of controller support */ error = enable_irq_wake(keypad->irq); if (!error) keypad->wake_enabled = true; val = readl(keypad->base + SAMSUNG_KEYIFCON); val |= SAMSUNG_KEYIFCON_WAKEUPEN; writel(val, keypad->base + SAMSUNG_KEYIFCON); clk_disable(keypad->clk); return 0; } static int samsung_keypad_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct samsung_keypad *keypad = platform_get_drvdata(pdev); unsigned int val; if (keypad->stopped) return 0; clk_enable(keypad->clk); val = readl(keypad->base + SAMSUNG_KEYIFCON); val &= ~SAMSUNG_KEYIFCON_WAKEUPEN; writel(val, keypad->base + SAMSUNG_KEYIFCON); if (keypad->wake_enabled) disable_irq_wake(keypad->irq); return 0; } #endif #ifdef CONFIG_PM_SLEEP static void samsung_keypad_toggle_wakeup(struct samsung_keypad *keypad, bool enable) { unsigned int val; clk_enable(keypad->clk); val = readl(keypad->base + SAMSUNG_KEYIFCON); if (enable) { val |= SAMSUNG_KEYIFCON_WAKEUPEN; if (device_may_wakeup(&keypad->pdev->dev)) enable_irq_wake(keypad->irq); } else { val &= ~SAMSUNG_KEYIFCON_WAKEUPEN; if (device_may_wakeup(&keypad->pdev->dev)) disable_irq_wake(keypad->irq); } writel(val, keypad->base + SAMSUNG_KEYIFCON); clk_disable(keypad->clk); } static int samsung_keypad_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct samsung_keypad *keypad = platform_get_drvdata(pdev); struct input_dev *input_dev = keypad->input_dev; mutex_lock(&input_dev->mutex); if (input_dev->users) samsung_keypad_stop(keypad); samsung_keypad_toggle_wakeup(keypad, true); mutex_unlock(&input_dev->mutex); return 0; } static int samsung_keypad_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct samsung_keypad *keypad = platform_get_drvdata(pdev); struct input_dev *input_dev = keypad->input_dev; mutex_lock(&input_dev->mutex); samsung_keypad_toggle_wakeup(keypad, false); if (input_dev->users) samsung_keypad_start(keypad); mutex_unlock(&input_dev->mutex); return 0; } #endif static const struct dev_pm_ops samsung_keypad_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(samsung_keypad_suspend, samsung_keypad_resume) SET_RUNTIME_PM_OPS(samsung_keypad_runtime_suspend, samsung_keypad_runtime_resume, NULL) }; #ifdef CONFIG_OF static const struct of_device_id samsung_keypad_dt_match[] = { { .compatible = "samsung,s3c6410-keypad" }, { .compatible = "samsung,s5pv210-keypad" }, {}, }; MODULE_DEVICE_TABLE(of, samsung_keypad_dt_match); #endif static struct platform_device_id samsung_keypad_driver_ids[] = { { .name = "samsung-keypad", .driver_data = KEYPAD_TYPE_SAMSUNG, }, { .name = "s5pv210-keypad", .driver_data = KEYPAD_TYPE_S5PV210, }, { }, }; MODULE_DEVICE_TABLE(platform, samsung_keypad_driver_ids); static struct platform_driver samsung_keypad_driver = { .probe = samsung_keypad_probe, .remove = samsung_keypad_remove, .driver = { .name = "samsung-keypad", .owner = THIS_MODULE, .of_match_table = of_match_ptr(samsung_keypad_dt_match), .pm = &samsung_keypad_pm_ops, }, .id_table = samsung_keypad_driver_ids, }; module_platform_driver(samsung_keypad_driver); MODULE_DESCRIPTION("Samsung keypad driver"); MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>"); MODULE_LICENSE("GPL");
gpl-2.0
sultanqasim/android_kernel_alcatel_alto45
mm/percpu.c
997
58448
/* * mm/percpu.c - percpu memory allocator * * Copyright (C) 2009 SUSE Linux Products GmbH * Copyright (C) 2009 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * This is percpu allocator which can handle both static and dynamic * areas. Percpu areas are allocated in chunks. Each chunk is * consisted of boot-time determined number of units and the first * chunk is used for static percpu variables in the kernel image * (special boot time alloc/init handling necessary as these areas * need to be brought up before allocation services are running). * Unit grows as necessary and all units grow or shrink in unison. * When a chunk is filled up, another chunk is allocated. * * c0 c1 c2 * ------------------- ------------------- ------------ * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u * ------------------- ...... ------------------- .... ------------ * * Allocation is done in offset-size areas of single unit space. Ie, * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to * cpus. On NUMA, the mapping can be non-linear and even sparse. * Percpu access can be done by configuring percpu base registers * according to cpu to unit mapping and pcpu_unit_size. * * There are usually many small percpu allocations many of them being * as small as 4 bytes. The allocator organizes chunks into lists * according to free size and tries to allocate from the fullest one. * Each chunk keeps the maximum contiguous area size hint which is * guaranteed to be equal to or larger than the maximum contiguous * area in the chunk. This helps the allocator not to iterate the * chunk maps unnecessarily. * * Allocation state in each chunk is kept using an array of integers * on chunk->map. A positive value in the map represents a free * region and negative allocated. Allocation inside a chunk is done * by scanning this map sequentially and serving the first matching * entry. This is mostly copied from the percpu_modalloc() allocator. * Chunks can be determined from the address using the index field * in the page struct. The index field contains a pointer to the chunk. * * To use this allocator, arch code should do the followings. * * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate * regular address to percpu pointer and back if they need to be * different from the default * * - use pcpu_setup_first_chunk() during percpu area initialization to * setup the first chunk containing the kernel static percpu area */ #include <linux/bitmap.h> #include <linux/bootmem.h> #include <linux/err.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/pfn.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/kmemleak.h> #include <asm/cacheflush.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/io.h> #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ #ifdef CONFIG_SMP /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ #ifndef __addr_to_pcpu_ptr #define __addr_to_pcpu_ptr(addr) \ (void __percpu *)((unsigned long)(addr) - \ (unsigned long)pcpu_base_addr + \ (unsigned long)__per_cpu_start) #endif #ifndef __pcpu_ptr_to_addr #define __pcpu_ptr_to_addr(ptr) \ (void __force *)((unsigned long)(ptr) + \ (unsigned long)pcpu_base_addr - \ (unsigned long)__per_cpu_start) #endif #else /* CONFIG_SMP */ /* on UP, it's always identity mapped */ #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) #endif /* CONFIG_SMP */ struct pcpu_chunk { struct list_head list; /* linked to pcpu_slot lists */ int free_size; /* free bytes in the chunk */ int contig_hint; /* max contiguous size hint */ void *base_addr; /* base address of this chunk */ int map_used; /* # of map entries used */ int map_alloc; /* # of map entries allocated */ int *map; /* allocation map */ void *data; /* chunk data */ bool immutable; /* no [de]population allowed */ unsigned long populated[]; /* populated bitmap */ }; static int pcpu_unit_pages __read_mostly; static int pcpu_unit_size __read_mostly; static int pcpu_nr_units __read_mostly; static int pcpu_atom_size __read_mostly; static int pcpu_nr_slots __read_mostly; static size_t pcpu_chunk_struct_size __read_mostly; /* cpus with the lowest and highest unit addresses */ static unsigned int pcpu_low_unit_cpu __read_mostly; static unsigned int pcpu_high_unit_cpu __read_mostly; /* the address of the first chunk which starts with the kernel static area */ void *pcpu_base_addr __read_mostly; EXPORT_SYMBOL_GPL(pcpu_base_addr); static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ /* group information, used for vm allocation */ static int pcpu_nr_groups __read_mostly; static const unsigned long *pcpu_group_offsets __read_mostly; static const size_t *pcpu_group_sizes __read_mostly; /* * The first chunk which always exists. Note that unlike other * chunks, this one can be allocated and mapped in several different * ways and thus often doesn't live in the vmalloc area. */ static struct pcpu_chunk *pcpu_first_chunk; /* * Optional reserved chunk. This chunk reserves part of the first * chunk and serves it for reserved allocations. The amount of * reserved offset is in pcpu_reserved_chunk_limit. When reserved * area doesn't exist, the following variables contain NULL and 0 * respectively. */ static struct pcpu_chunk *pcpu_reserved_chunk; static int pcpu_reserved_chunk_limit; /* * Synchronization rules. * * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former * protects allocation/reclaim paths, chunks, populated bitmap and * vmalloc mapping. The latter is a spinlock and protects the index * data structures - chunk slots, chunks and area maps in chunks. * * During allocation, pcpu_alloc_mutex is kept locked all the time and * pcpu_lock is grabbed and released as necessary. All actual memory * allocations are done using GFP_KERNEL with pcpu_lock released. In * general, percpu memory can't be allocated with irq off but * irqsave/restore are still used in alloc path so that it can be used * from early init path - sched_init() specifically. * * Free path accesses and alters only the index data structures, so it * can be safely called from atomic context. When memory needs to be * returned to the system, free path schedules reclaim_work which * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be * reclaimed, release both locks and frees the chunks. Note that it's * necessary to grab both locks to remove a chunk from circulation as * allocation path might be referencing the chunk with only * pcpu_alloc_mutex locked. */ static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ /* reclaim work to release fully free chunks, scheduled from free path */ static void pcpu_reclaim(struct work_struct *work); static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); static bool pcpu_addr_in_first_chunk(void *addr) { void *first_start = pcpu_first_chunk->base_addr; return addr >= first_start && addr < first_start + pcpu_unit_size; } static bool pcpu_addr_in_reserved_chunk(void *addr) { void *first_start = pcpu_first_chunk->base_addr; return addr >= first_start && addr < first_start + pcpu_reserved_chunk_limit; } static int __pcpu_size_to_slot(int size) { int highbit = fls(size); /* size is in bytes */ return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); } static int pcpu_size_to_slot(int size) { if (size == pcpu_unit_size) return pcpu_nr_slots - 1; return __pcpu_size_to_slot(size); } static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) { if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) return 0; return pcpu_size_to_slot(chunk->free_size); } /* set the pointer to a chunk in a page struct */ static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) { page->index = (unsigned long)pcpu; } /* obtain pointer to a chunk from a page struct */ static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) { return (struct pcpu_chunk *)page->index; } static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) { return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; } static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) { return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); } static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) { *rs = find_next_zero_bit(chunk->populated, end, *rs); *re = find_next_bit(chunk->populated, end, *rs + 1); } static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) { *rs = find_next_bit(chunk->populated, end, *rs); *re = find_next_zero_bit(chunk->populated, end, *rs + 1); } /* * (Un)populated page region iterators. Iterate over (un)populated * page regions between @start and @end in @chunk. @rs and @re should * be integer variables and will be set to start and end page index of * the current region. */ #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ (rs) < (re); \ (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ (rs) < (re); \ (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) /** * pcpu_mem_zalloc - allocate memory * @size: bytes to allocate * * Allocate @size bytes. If @size is smaller than PAGE_SIZE, * kzalloc() is used; otherwise, vzalloc() is used. The returned * memory is always zeroed. * * CONTEXT: * Does GFP_KERNEL allocation. * * RETURNS: * Pointer to the allocated area on success, NULL on failure. */ static void *pcpu_mem_zalloc(size_t size) { if (WARN_ON_ONCE(!slab_is_available())) return NULL; if (size <= PAGE_SIZE) return kzalloc(size, GFP_KERNEL); else return vzalloc(size); } /** * pcpu_mem_free - free memory * @ptr: memory to free * @size: size of the area * * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). */ static void pcpu_mem_free(void *ptr, size_t size) { if (size <= PAGE_SIZE) kfree(ptr); else vfree(ptr); } /** * pcpu_chunk_relocate - put chunk in the appropriate chunk slot * @chunk: chunk of interest * @oslot: the previous slot it was on * * This function is called after an allocation or free changed @chunk. * New slot according to the changed state is determined and @chunk is * moved to the slot. Note that the reserved chunk is never put on * chunk slots. * * CONTEXT: * pcpu_lock. */ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) { int nslot = pcpu_chunk_slot(chunk); if (chunk != pcpu_reserved_chunk && oslot != nslot) { if (oslot < nslot) list_move(&chunk->list, &pcpu_slot[nslot]); else list_move_tail(&chunk->list, &pcpu_slot[nslot]); } } /** * pcpu_need_to_extend - determine whether chunk area map needs to be extended * @chunk: chunk of interest * * Determine whether area map of @chunk needs to be extended to * accommodate a new allocation. * * CONTEXT: * pcpu_lock. * * RETURNS: * New target map allocation length if extension is necessary, 0 * otherwise. */ static int pcpu_need_to_extend(struct pcpu_chunk *chunk) { int new_alloc; if (chunk->map_alloc >= chunk->map_used + 2) return 0; new_alloc = PCPU_DFL_MAP_ALLOC; while (new_alloc < chunk->map_used + 2) new_alloc *= 2; return new_alloc; } /** * pcpu_extend_area_map - extend area map of a chunk * @chunk: chunk of interest * @new_alloc: new target allocation length of the area map * * Extend area map of @chunk to have @new_alloc entries. * * CONTEXT: * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. * * RETURNS: * 0 on success, -errno on failure. */ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) { int *old = NULL, *new = NULL; size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); unsigned long flags; new = pcpu_mem_zalloc(new_size); if (!new) return -ENOMEM; /* acquire pcpu_lock and switch to new area map */ spin_lock_irqsave(&pcpu_lock, flags); if (new_alloc <= chunk->map_alloc) goto out_unlock; old_size = chunk->map_alloc * sizeof(chunk->map[0]); old = chunk->map; memcpy(new, old, old_size); chunk->map_alloc = new_alloc; chunk->map = new; new = NULL; out_unlock: spin_unlock_irqrestore(&pcpu_lock, flags); /* * pcpu_mem_free() might end up calling vfree() which uses * IRQ-unsafe lock and thus can't be called under pcpu_lock. */ pcpu_mem_free(old, old_size); pcpu_mem_free(new, new_size); return 0; } /** * pcpu_split_block - split a map block * @chunk: chunk of interest * @i: index of map block to split * @head: head size in bytes (can be 0) * @tail: tail size in bytes (can be 0) * * Split the @i'th map block into two or three blocks. If @head is * non-zero, @head bytes block is inserted before block @i moving it * to @i+1 and reducing its size by @head bytes. * * If @tail is non-zero, the target block, which can be @i or @i+1 * depending on @head, is reduced by @tail bytes and @tail byte block * is inserted after the target block. * * @chunk->map must have enough free slots to accommodate the split. * * CONTEXT: * pcpu_lock. */ static void pcpu_split_block(struct pcpu_chunk *chunk, int i, int head, int tail) { int nr_extra = !!head + !!tail; BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); /* insert new subblocks */ memmove(&chunk->map[i + nr_extra], &chunk->map[i], sizeof(chunk->map[0]) * (chunk->map_used - i)); chunk->map_used += nr_extra; if (head) { chunk->map[i + 1] = chunk->map[i] - head; chunk->map[i++] = head; } if (tail) { chunk->map[i++] -= tail; chunk->map[i] = tail; } } /** * pcpu_alloc_area - allocate area from a pcpu_chunk * @chunk: chunk of interest * @size: wanted size in bytes * @align: wanted align * * Try to allocate @size bytes area aligned at @align from @chunk. * Note that this function only allocates the offset. It doesn't * populate or map the area. * * @chunk->map must have at least two free slots. * * CONTEXT: * pcpu_lock. * * RETURNS: * Allocated offset in @chunk on success, -1 if no matching area is * found. */ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) { int oslot = pcpu_chunk_slot(chunk); int max_contig = 0; int i, off; for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { bool is_last = i + 1 == chunk->map_used; int head, tail; /* extra for alignment requirement */ head = ALIGN(off, align) - off; BUG_ON(i == 0 && head != 0); if (chunk->map[i] < 0) continue; if (chunk->map[i] < head + size) { max_contig = max(chunk->map[i], max_contig); continue; } /* * If head is small or the previous block is free, * merge'em. Note that 'small' is defined as smaller * than sizeof(int), which is very small but isn't too * uncommon for percpu allocations. */ if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { if (chunk->map[i - 1] > 0) chunk->map[i - 1] += head; else { chunk->map[i - 1] -= head; chunk->free_size -= head; } chunk->map[i] -= head; off += head; head = 0; } /* if tail is small, just keep it around */ tail = chunk->map[i] - head - size; if (tail < sizeof(int)) tail = 0; /* split if warranted */ if (head || tail) { pcpu_split_block(chunk, i, head, tail); if (head) { i++; off += head; max_contig = max(chunk->map[i - 1], max_contig); } if (tail) max_contig = max(chunk->map[i + 1], max_contig); } /* update hint and mark allocated */ if (is_last) chunk->contig_hint = max_contig; /* fully scanned */ else chunk->contig_hint = max(chunk->contig_hint, max_contig); chunk->free_size -= chunk->map[i]; chunk->map[i] = -chunk->map[i]; pcpu_chunk_relocate(chunk, oslot); return off; } chunk->contig_hint = max_contig; /* fully scanned */ pcpu_chunk_relocate(chunk, oslot); /* tell the upper layer that this chunk has no matching area */ return -1; } /** * pcpu_free_area - free area to a pcpu_chunk * @chunk: chunk of interest * @freeme: offset of area to free * * Free area starting from @freeme to @chunk. Note that this function * only modifies the allocation map. It doesn't depopulate or unmap * the area. * * CONTEXT: * pcpu_lock. */ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) { int oslot = pcpu_chunk_slot(chunk); int i, off; for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) if (off == freeme) break; BUG_ON(off != freeme); BUG_ON(chunk->map[i] > 0); chunk->map[i] = -chunk->map[i]; chunk->free_size += chunk->map[i]; /* merge with previous? */ if (i > 0 && chunk->map[i - 1] >= 0) { chunk->map[i - 1] += chunk->map[i]; chunk->map_used--; memmove(&chunk->map[i], &chunk->map[i + 1], (chunk->map_used - i) * sizeof(chunk->map[0])); i--; } /* merge with next? */ if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { chunk->map[i] += chunk->map[i + 1]; chunk->map_used--; memmove(&chunk->map[i + 1], &chunk->map[i + 2], (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); } chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); pcpu_chunk_relocate(chunk, oslot); } static struct pcpu_chunk *pcpu_alloc_chunk(void) { struct pcpu_chunk *chunk; chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); if (!chunk) return NULL; chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); if (!chunk->map) { kfree(chunk); return NULL; } chunk->map_alloc = PCPU_DFL_MAP_ALLOC; chunk->map[chunk->map_used++] = pcpu_unit_size; INIT_LIST_HEAD(&chunk->list); chunk->free_size = pcpu_unit_size; chunk->contig_hint = pcpu_unit_size; return chunk; } static void pcpu_free_chunk(struct pcpu_chunk *chunk) { if (!chunk) return; pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); pcpu_mem_free(chunk, pcpu_chunk_struct_size); } /* * Chunk management implementation. * * To allow different implementations, chunk alloc/free and * [de]population are implemented in a separate file which is pulled * into this file and compiled together. The following functions * should be implemented. * * pcpu_populate_chunk - populate the specified range of a chunk * pcpu_depopulate_chunk - depopulate the specified range of a chunk * pcpu_create_chunk - create a new chunk * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop * pcpu_addr_to_page - translate address to physical address * pcpu_verify_alloc_info - check alloc_info is acceptable during init */ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); static struct pcpu_chunk *pcpu_create_chunk(void); static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); static struct page *pcpu_addr_to_page(void *addr); static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); #ifdef CONFIG_NEED_PER_CPU_KM #include "percpu-km.c" #else #include "percpu-vm.c" #endif /** * pcpu_chunk_addr_search - determine chunk containing specified address * @addr: address for which the chunk needs to be determined. * * RETURNS: * The address of the found chunk. */ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) { /* is it in the first chunk? */ if (pcpu_addr_in_first_chunk(addr)) { /* is it in the reserved area? */ if (pcpu_addr_in_reserved_chunk(addr)) return pcpu_reserved_chunk; return pcpu_first_chunk; } /* * The address is relative to unit0 which might be unused and * thus unmapped. Offset the address to the unit space of the * current processor before looking it up in the vmalloc * space. Note that any possible cpu id can be used here, so * there's no need to worry about preemption or cpu hotplug. */ addr += pcpu_unit_offsets[raw_smp_processor_id()]; return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); } /** * pcpu_alloc - the percpu allocator * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * @reserved: allocate from the reserved chunk if available * * Allocate percpu area of @size bytes aligned at @align. * * CONTEXT: * Does GFP_KERNEL allocation. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) { static int warn_limit = 10; struct pcpu_chunk *chunk; const char *err; int slot, off, new_alloc; unsigned long flags; void __percpu *ptr; if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { WARN(true, "illegal size (%zu) or align (%zu) for " "percpu allocation\n", size, align); return NULL; } mutex_lock(&pcpu_alloc_mutex); spin_lock_irqsave(&pcpu_lock, flags); /* serve reserved allocations from the reserved chunk if available */ if (reserved && pcpu_reserved_chunk) { chunk = pcpu_reserved_chunk; if (size > chunk->contig_hint) { err = "alloc from reserved chunk failed"; goto fail_unlock; } while ((new_alloc = pcpu_need_to_extend(chunk))) { spin_unlock_irqrestore(&pcpu_lock, flags); if (pcpu_extend_area_map(chunk, new_alloc) < 0) { err = "failed to extend area map of reserved chunk"; goto fail_unlock_mutex; } spin_lock_irqsave(&pcpu_lock, flags); } off = pcpu_alloc_area(chunk, size, align); if (off >= 0) goto area_found; err = "alloc from reserved chunk failed"; goto fail_unlock; } restart: /* search through normal chunks */ for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { list_for_each_entry(chunk, &pcpu_slot[slot], list) { if (size > chunk->contig_hint) continue; new_alloc = pcpu_need_to_extend(chunk); if (new_alloc) { spin_unlock_irqrestore(&pcpu_lock, flags); if (pcpu_extend_area_map(chunk, new_alloc) < 0) { err = "failed to extend area map"; goto fail_unlock_mutex; } spin_lock_irqsave(&pcpu_lock, flags); /* * pcpu_lock has been dropped, need to * restart cpu_slot list walking. */ goto restart; } off = pcpu_alloc_area(chunk, size, align); if (off >= 0) goto area_found; } } /* hmmm... no space left, create a new chunk */ spin_unlock_irqrestore(&pcpu_lock, flags); chunk = pcpu_create_chunk(); if (!chunk) { err = "failed to allocate new chunk"; goto fail_unlock_mutex; } spin_lock_irqsave(&pcpu_lock, flags); pcpu_chunk_relocate(chunk, -1); goto restart; area_found: spin_unlock_irqrestore(&pcpu_lock, flags); /* populate, map and clear the area */ if (pcpu_populate_chunk(chunk, off, size)) { spin_lock_irqsave(&pcpu_lock, flags); pcpu_free_area(chunk, off); err = "failed to populate"; goto fail_unlock; } mutex_unlock(&pcpu_alloc_mutex); /* return address relative to base address */ ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); kmemleak_alloc_percpu(ptr, size); return ptr; fail_unlock: spin_unlock_irqrestore(&pcpu_lock, flags); fail_unlock_mutex: mutex_unlock(&pcpu_alloc_mutex); if (warn_limit) { pr_warning("PERCPU: allocation failed, size=%zu align=%zu, " "%s\n", size, align, err); dump_stack(); if (!--warn_limit) pr_info("PERCPU: limit reached, disable warning\n"); } return NULL; } /** * __alloc_percpu - allocate dynamic percpu area * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * * Allocate zero-filled percpu area of @size bytes aligned at @align. * Might sleep. Might trigger writeouts. * * CONTEXT: * Does GFP_KERNEL allocation. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ void __percpu *__alloc_percpu(size_t size, size_t align) { return pcpu_alloc(size, align, false); } EXPORT_SYMBOL_GPL(__alloc_percpu); /** * __alloc_reserved_percpu - allocate reserved percpu area * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * * Allocate zero-filled percpu area of @size bytes aligned at @align * from reserved percpu area if arch has set it up; otherwise, * allocation is served from the same dynamic area. Might sleep. * Might trigger writeouts. * * CONTEXT: * Does GFP_KERNEL allocation. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ void __percpu *__alloc_reserved_percpu(size_t size, size_t align) { return pcpu_alloc(size, align, true); } /** * pcpu_reclaim - reclaim fully free chunks, workqueue function * @work: unused * * Reclaim all fully free chunks except for the first one. * * CONTEXT: * workqueue context. */ static void pcpu_reclaim(struct work_struct *work) { LIST_HEAD(todo); struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; struct pcpu_chunk *chunk, *next; mutex_lock(&pcpu_alloc_mutex); spin_lock_irq(&pcpu_lock); list_for_each_entry_safe(chunk, next, head, list) { WARN_ON(chunk->immutable); /* spare the first one */ if (chunk == list_first_entry(head, struct pcpu_chunk, list)) continue; list_move(&chunk->list, &todo); } spin_unlock_irq(&pcpu_lock); list_for_each_entry_safe(chunk, next, &todo, list) { pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); pcpu_destroy_chunk(chunk); } mutex_unlock(&pcpu_alloc_mutex); } /** * free_percpu - free percpu area * @ptr: pointer to area to free * * Free percpu area @ptr. * * CONTEXT: * Can be called from atomic context. */ void free_percpu(void __percpu *ptr) { void *addr; struct pcpu_chunk *chunk; unsigned long flags; int off; if (!ptr) return; kmemleak_free_percpu(ptr); addr = __pcpu_ptr_to_addr(ptr); spin_lock_irqsave(&pcpu_lock, flags); chunk = pcpu_chunk_addr_search(addr); off = addr - chunk->base_addr; pcpu_free_area(chunk, off); /* if there are more than one fully free chunks, wake up grim reaper */ if (chunk->free_size == pcpu_unit_size) { struct pcpu_chunk *pos; list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) if (pos != chunk) { schedule_work(&pcpu_reclaim_work); break; } } spin_unlock_irqrestore(&pcpu_lock, flags); } EXPORT_SYMBOL_GPL(free_percpu); /** * is_kernel_percpu_address - test whether address is from static percpu area * @addr: address to test * * Test whether @addr belongs to in-kernel static percpu area. Module * static percpu areas are not considered. For those, use * is_module_percpu_address(). * * RETURNS: * %true if @addr is from in-kernel static percpu area, %false otherwise. */ bool is_kernel_percpu_address(unsigned long addr) { #ifdef CONFIG_SMP const size_t static_size = __per_cpu_end - __per_cpu_start; void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); unsigned int cpu; for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); if ((void *)addr >= start && (void *)addr < start + static_size) return true; } #endif /* on UP, can't distinguish from other static vars, always false */ return false; } /** * per_cpu_ptr_to_phys - convert translated percpu address to physical address * @addr: the address to be converted to physical address * * Given @addr which is dereferenceable address obtained via one of * percpu access macros, this function translates it into its physical * address. The caller is responsible for ensuring @addr stays valid * until this function finishes. * * percpu allocator has special setup for the first chunk, which currently * supports either embedding in linear address space or vmalloc mapping, * and, from the second one, the backing allocator (currently either vm or * km) provides translation. * * The addr can be tranlated simply without checking if it falls into the * first chunk. But the current code reflects better how percpu allocator * actually works, and the verification can discover both bugs in percpu * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current * code. * * RETURNS: * The physical address for @addr. */ phys_addr_t per_cpu_ptr_to_phys(void *addr) { void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); bool in_first_chunk = false; unsigned long first_low, first_high; unsigned int cpu; /* * The following test on unit_low/high isn't strictly * necessary but will speed up lookups of addresses which * aren't in the first chunk. */ first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0); first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu, pcpu_unit_pages); if ((unsigned long)addr >= first_low && (unsigned long)addr < first_high) { for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); if (addr >= start && addr < start + pcpu_unit_size) { in_first_chunk = true; break; } } } if (in_first_chunk) { if (!is_vmalloc_addr(addr)) return __pa(addr); else return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr); } else return page_to_phys(pcpu_addr_to_page(addr)) + offset_in_page(addr); } /** * pcpu_alloc_alloc_info - allocate percpu allocation info * @nr_groups: the number of groups * @nr_units: the number of units * * Allocate ai which is large enough for @nr_groups groups containing * @nr_units units. The returned ai's groups[0].cpu_map points to the * cpu_map array which is long enough for @nr_units and filled with * NR_CPUS. It's the caller's responsibility to initialize cpu_map * pointer of other groups. * * RETURNS: * Pointer to the allocated pcpu_alloc_info on success, NULL on * failure. */ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, int nr_units) { struct pcpu_alloc_info *ai; size_t base_size, ai_size; void *ptr; int unit; base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), __alignof__(ai->groups[0].cpu_map[0])); ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); if (!ptr) return NULL; ai = ptr; ptr += base_size; ai->groups[0].cpu_map = ptr; for (unit = 0; unit < nr_units; unit++) ai->groups[0].cpu_map[unit] = NR_CPUS; ai->nr_groups = nr_groups; ai->__ai_size = PFN_ALIGN(ai_size); return ai; } /** * pcpu_free_alloc_info - free percpu allocation info * @ai: pcpu_alloc_info to free * * Free @ai which was allocated by pcpu_alloc_alloc_info(). */ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) { free_bootmem(__pa(ai), ai->__ai_size); } /** * pcpu_dump_alloc_info - print out information about pcpu_alloc_info * @lvl: loglevel * @ai: allocation info to dump * * Print out information about @ai using loglevel @lvl. */ static void pcpu_dump_alloc_info(const char *lvl, const struct pcpu_alloc_info *ai) { int group_width = 1, cpu_width = 1, width; char empty_str[] = "--------"; int alloc = 0, alloc_end = 0; int group, v; int upa, apl; /* units per alloc, allocs per line */ v = ai->nr_groups; while (v /= 10) group_width++; v = num_possible_cpus(); while (v /= 10) cpu_width++; empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; upa = ai->alloc_size / ai->unit_size; width = upa * (cpu_width + 1) + group_width + 3; apl = rounddown_pow_of_two(max(60 / width, 1)); printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", lvl, ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); for (group = 0; group < ai->nr_groups; group++) { const struct pcpu_group_info *gi = &ai->groups[group]; int unit = 0, unit_end = 0; BUG_ON(gi->nr_units % upa); for (alloc_end += gi->nr_units / upa; alloc < alloc_end; alloc++) { if (!(alloc % apl)) { printk(KERN_CONT "\n"); printk("%spcpu-alloc: ", lvl); } printk(KERN_CONT "[%0*d] ", group_width, group); for (unit_end += upa; unit < unit_end; unit++) if (gi->cpu_map[unit] != NR_CPUS) printk(KERN_CONT "%0*d ", cpu_width, gi->cpu_map[unit]); else printk(KERN_CONT "%s ", empty_str); } } printk(KERN_CONT "\n"); } /** * pcpu_setup_first_chunk - initialize the first percpu chunk * @ai: pcpu_alloc_info describing how to percpu area is shaped * @base_addr: mapped address * * Initialize the first percpu chunk which contains the kernel static * perpcu area. This function is to be called from arch percpu area * setup path. * * @ai contains all information necessary to initialize the first * chunk and prime the dynamic percpu allocator. * * @ai->static_size is the size of static percpu area. * * @ai->reserved_size, if non-zero, specifies the amount of bytes to * reserve after the static area in the first chunk. This reserves * the first chunk such that it's available only through reserved * percpu allocation. This is primarily used to serve module percpu * static areas on architectures where the addressing model has * limited offset range for symbol relocations to guarantee module * percpu symbols fall inside the relocatable range. * * @ai->dyn_size determines the number of bytes available for dynamic * allocation in the first chunk. The area between @ai->static_size + * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. * * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE * and equal to or larger than @ai->static_size + @ai->reserved_size + * @ai->dyn_size. * * @ai->atom_size is the allocation atom size and used as alignment * for vm areas. * * @ai->alloc_size is the allocation size and always multiple of * @ai->atom_size. This is larger than @ai->atom_size if * @ai->unit_size is larger than @ai->atom_size. * * @ai->nr_groups and @ai->groups describe virtual memory layout of * percpu areas. Units which should be colocated are put into the * same group. Dynamic VM areas will be allocated according to these * groupings. If @ai->nr_groups is zero, a single group containing * all units is assumed. * * The caller should have mapped the first chunk at @base_addr and * copied static data to each unit. * * If the first chunk ends up with both reserved and dynamic areas, it * is served by two chunks - one to serve the core static and reserved * areas and the other for the dynamic area. They share the same vm * and page map but uses different area allocation map to stay away * from each other. The latter chunk is circulated in the chunk slots * and available for dynamic allocation like any other chunks. * * RETURNS: * 0 on success, -errno on failure. */ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr) { static char cpus_buf[4096] __initdata; static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; size_t dyn_size = ai->dyn_size; size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; struct pcpu_chunk *schunk, *dchunk = NULL; unsigned long *group_offsets; size_t *group_sizes; unsigned long *unit_off; unsigned int cpu; int *unit_map; int group, unit, i; cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); #define PCPU_SETUP_BUG_ON(cond) do { \ if (unlikely(cond)) { \ pr_emerg("PERCPU: failed to initialize, %s", #cond); \ pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ pcpu_dump_alloc_info(KERN_EMERG, ai); \ BUG(); \ } \ } while (0) /* sanity checks */ PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); #ifdef CONFIG_SMP PCPU_SETUP_BUG_ON(!ai->static_size); PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK); #endif PCPU_SETUP_BUG_ON(!base_addr); PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK); PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); /* process group information and build config tables accordingly */ group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); for (cpu = 0; cpu < nr_cpu_ids; cpu++) unit_map[cpu] = UINT_MAX; pcpu_low_unit_cpu = NR_CPUS; pcpu_high_unit_cpu = NR_CPUS; for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { const struct pcpu_group_info *gi = &ai->groups[group]; group_offsets[group] = gi->base_offset; group_sizes[group] = gi->nr_units * ai->unit_size; for (i = 0; i < gi->nr_units; i++) { cpu = gi->cpu_map[i]; if (cpu == NR_CPUS) continue; PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); unit_map[cpu] = unit + i; unit_off[cpu] = gi->base_offset + i * ai->unit_size; /* determine low/high unit_cpu */ if (pcpu_low_unit_cpu == NR_CPUS || unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) pcpu_low_unit_cpu = cpu; if (pcpu_high_unit_cpu == NR_CPUS || unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) pcpu_high_unit_cpu = cpu; } } pcpu_nr_units = unit; for_each_possible_cpu(cpu) PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); /* we're done parsing the input, undefine BUG macro and dump config */ #undef PCPU_SETUP_BUG_ON pcpu_dump_alloc_info(KERN_DEBUG, ai); pcpu_nr_groups = ai->nr_groups; pcpu_group_offsets = group_offsets; pcpu_group_sizes = group_sizes; pcpu_unit_map = unit_map; pcpu_unit_offsets = unit_off; /* determine basic parameters */ pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; pcpu_atom_size = ai->atom_size; pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); /* * Allocate chunk slots. The additional last slot is for * empty chunks. */ pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); for (i = 0; i < pcpu_nr_slots; i++) INIT_LIST_HEAD(&pcpu_slot[i]); /* * Initialize static chunk. If reserved_size is zero, the * static chunk covers static area + dynamic allocation area * in the first chunk. If reserved_size is not zero, it * covers static area + reserved area (mostly used for module * static percpu allocation). */ schunk = alloc_bootmem(pcpu_chunk_struct_size); INIT_LIST_HEAD(&schunk->list); schunk->base_addr = base_addr; schunk->map = smap; schunk->map_alloc = ARRAY_SIZE(smap); schunk->immutable = true; bitmap_fill(schunk->populated, pcpu_unit_pages); if (ai->reserved_size) { schunk->free_size = ai->reserved_size; pcpu_reserved_chunk = schunk; pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; } else { schunk->free_size = dyn_size; dyn_size = 0; /* dynamic area covered */ } schunk->contig_hint = schunk->free_size; schunk->map[schunk->map_used++] = -ai->static_size; if (schunk->free_size) schunk->map[schunk->map_used++] = schunk->free_size; /* init dynamic chunk if necessary */ if (dyn_size) { dchunk = alloc_bootmem(pcpu_chunk_struct_size); INIT_LIST_HEAD(&dchunk->list); dchunk->base_addr = base_addr; dchunk->map = dmap; dchunk->map_alloc = ARRAY_SIZE(dmap); dchunk->immutable = true; bitmap_fill(dchunk->populated, pcpu_unit_pages); dchunk->contig_hint = dchunk->free_size = dyn_size; dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; dchunk->map[dchunk->map_used++] = dchunk->free_size; } /* link the first chunk in */ pcpu_first_chunk = dchunk ?: schunk; pcpu_chunk_relocate(pcpu_first_chunk, -1); /* we're done */ pcpu_base_addr = base_addr; return 0; } #ifdef CONFIG_SMP const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { [PCPU_FC_AUTO] = "auto", [PCPU_FC_EMBED] = "embed", [PCPU_FC_PAGE] = "page", }; enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; static int __init percpu_alloc_setup(char *str) { if (!str) return -EINVAL; if (0) /* nada */; #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK else if (!strcmp(str, "embed")) pcpu_chosen_fc = PCPU_FC_EMBED; #endif #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK else if (!strcmp(str, "page")) pcpu_chosen_fc = PCPU_FC_PAGE; #endif else pr_warning("PERCPU: unknown allocator %s specified\n", str); return 0; } early_param("percpu_alloc", percpu_alloc_setup); /* * pcpu_embed_first_chunk() is used by the generic percpu setup. * Build it if needed by the arch config or the generic setup is going * to be used. */ #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) #define BUILD_EMBED_FIRST_CHUNK #endif /* build pcpu_page_first_chunk() iff needed by the arch config */ #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) #define BUILD_PAGE_FIRST_CHUNK #endif /* pcpu_build_alloc_info() is used by both embed and page first chunk */ #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) /** * pcpu_build_alloc_info - build alloc_info considering distances between CPUs * @reserved_size: the size of reserved percpu area in bytes * @dyn_size: minimum free size for dynamic allocation in bytes * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * * This function determines grouping of units, their mappings to cpus * and other parameters considering needed percpu size, allocation * atom size and distances between CPUs. * * Groups are always mutliples of atom size and CPUs which are of * LOCAL_DISTANCE both ways are grouped together and share space for * units in the same group. The returned configuration is guaranteed * to have CPUs on different nodes on different groups and >=75% usage * of allocated virtual address space. * * RETURNS: * On success, pointer to the new allocation_info is returned. On * failure, ERR_PTR value is returned. */ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn) { static int group_map[NR_CPUS] __initdata; static int group_cnt[NR_CPUS] __initdata; const size_t static_size = __per_cpu_end - __per_cpu_start; int nr_groups = 1, nr_units = 0; size_t size_sum, min_unit_size, alloc_size; int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ int last_allocs, group, unit; unsigned int cpu, tcpu; struct pcpu_alloc_info *ai; unsigned int *cpu_map; /* this function may be called multiple times */ memset(group_map, 0, sizeof(group_map)); memset(group_cnt, 0, sizeof(group_cnt)); /* calculate size_sum and ensure dyn_size is enough for early alloc */ size_sum = PFN_ALIGN(static_size + reserved_size + max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); dyn_size = size_sum - static_size - reserved_size; /* * Determine min_unit_size, alloc_size and max_upa such that * alloc_size is multiple of atom_size and is the smallest * which can accommodate 4k aligned segments which are equal to * or larger than min_unit_size. */ min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); alloc_size = roundup(min_unit_size, atom_size); upa = alloc_size / min_unit_size; while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) upa--; max_upa = upa; /* group cpus according to their proximity */ for_each_possible_cpu(cpu) { group = 0; next_group: for_each_possible_cpu(tcpu) { if (cpu == tcpu) break; if (group_map[tcpu] == group && cpu_distance_fn && (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { group++; nr_groups = max(nr_groups, group + 1); goto next_group; } } group_map[cpu] = group; group_cnt[group]++; } /* * Expand unit size until address space usage goes over 75% * and then as much as possible without using more address * space. */ last_allocs = INT_MAX; for (upa = max_upa; upa; upa--) { int allocs = 0, wasted = 0; if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) continue; for (group = 0; group < nr_groups; group++) { int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); allocs += this_allocs; wasted += this_allocs * upa - group_cnt[group]; } /* * Don't accept if wastage is over 1/3. The * greater-than comparison ensures upa==1 always * passes the following check. */ if (wasted > num_possible_cpus() / 3) continue; /* and then don't consume more memory */ if (allocs > last_allocs) break; last_allocs = allocs; best_upa = upa; } upa = best_upa; /* allocate and fill alloc_info */ for (group = 0; group < nr_groups; group++) nr_units += roundup(group_cnt[group], upa); ai = pcpu_alloc_alloc_info(nr_groups, nr_units); if (!ai) return ERR_PTR(-ENOMEM); cpu_map = ai->groups[0].cpu_map; for (group = 0; group < nr_groups; group++) { ai->groups[group].cpu_map = cpu_map; cpu_map += roundup(group_cnt[group], upa); } ai->static_size = static_size; ai->reserved_size = reserved_size; ai->dyn_size = dyn_size; ai->unit_size = alloc_size / upa; ai->atom_size = atom_size; ai->alloc_size = alloc_size; for (group = 0, unit = 0; group_cnt[group]; group++) { struct pcpu_group_info *gi = &ai->groups[group]; /* * Initialize base_offset as if all groups are located * back-to-back. The caller should update this to * reflect actual allocation. */ gi->base_offset = unit * ai->unit_size; for_each_possible_cpu(cpu) if (group_map[cpu] == group) gi->cpu_map[gi->nr_units++] = cpu; gi->nr_units = roundup(gi->nr_units, upa); unit += gi->nr_units; } BUG_ON(unit != nr_units); return ai; } #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ #if defined(BUILD_EMBED_FIRST_CHUNK) /** * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem * @reserved_size: the size of reserved percpu area in bytes * @dyn_size: minimum free size for dynamic allocation in bytes * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * @alloc_fn: function to allocate percpu page * @free_fn: function to free percpu page * * This is a helper to ease setting up embedded first percpu chunk and * can be called where pcpu_setup_first_chunk() is expected. * * If this function is used to setup the first chunk, it is allocated * by calling @alloc_fn and used as-is without being mapped into * vmalloc area. Allocations are always whole multiples of @atom_size * aligned to @atom_size. * * This enables the first chunk to piggy back on the linear physical * mapping which often uses larger page size. Please note that this * can result in very sparse cpu->unit mapping on NUMA machines thus * requiring large vmalloc address space. Don't use this allocator if * vmalloc space is not orders of magnitude larger than distances * between node memory addresses (ie. 32bit NUMA machines). * * @dyn_size specifies the minimum dynamic area size. * * If the needed size is smaller than the minimum or specified unit * size, the leftover is returned using @free_fn. * * RETURNS: * 0 on success, -errno on failure. */ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn, pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_free_fn_t free_fn) { void *base = (void *)ULONG_MAX; void **areas = NULL; struct pcpu_alloc_info *ai; size_t size_sum, areas_size, max_distance; int group, i, rc; ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, cpu_distance_fn); if (IS_ERR(ai)) return PTR_ERR(ai); size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); areas = alloc_bootmem_nopanic(areas_size); if (!areas) { rc = -ENOMEM; goto out_free; } /* allocate, copy and determine base address */ for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; unsigned int cpu = NR_CPUS; void *ptr; for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) cpu = gi->cpu_map[i]; BUG_ON(cpu == NR_CPUS); /* allocate space for the whole group */ ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); if (!ptr) { rc = -ENOMEM; goto out_free_areas; } /* kmemleak tracks the percpu allocations separately */ kmemleak_free(ptr); areas[group] = ptr; base = min(ptr, base); } /* * Copy data and free unused parts. This should happen after all * allocations are complete; otherwise, we may end up with * overlapping groups. */ for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; void *ptr = areas[group]; for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { if (gi->cpu_map[i] == NR_CPUS) { /* unused unit, free whole */ free_fn(ptr, ai->unit_size); continue; } /* copy and return the unused part */ memcpy(ptr, __per_cpu_load, ai->static_size); free_fn(ptr + size_sum, ai->unit_size - size_sum); } } /* base address is now known, determine group base offsets */ max_distance = 0; for (group = 0; group < ai->nr_groups; group++) { ai->groups[group].base_offset = areas[group] - base; max_distance = max_t(size_t, max_distance, ai->groups[group].base_offset); } max_distance += ai->unit_size; /* warn if maximum distance is further than 75% of vmalloc space */ if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " "space 0x%lx\n", max_distance, (unsigned long)(VMALLOC_END - VMALLOC_START)); #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK /* and fail if we have fallback */ rc = -EINVAL; goto out_free; #endif } pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size); rc = pcpu_setup_first_chunk(ai, base); goto out_free; out_free_areas: for (group = 0; group < ai->nr_groups; group++) free_fn(areas[group], ai->groups[group].nr_units * ai->unit_size); out_free: pcpu_free_alloc_info(ai); if (areas) free_bootmem(__pa(areas), areas_size); return rc; } #endif /* BUILD_EMBED_FIRST_CHUNK */ #ifdef BUILD_PAGE_FIRST_CHUNK /** * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages * @reserved_size: the size of reserved percpu area in bytes * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE * @free_fn: function to free percpu page, always called with PAGE_SIZE * @populate_pte_fn: function to populate pte * * This is a helper to ease setting up page-remapped first percpu * chunk and can be called where pcpu_setup_first_chunk() is expected. * * This is the basic allocator. Static percpu area is allocated * page-by-page into vmalloc area. * * RETURNS: * 0 on success, -errno on failure. */ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_free_fn_t free_fn, pcpu_fc_populate_pte_fn_t populate_pte_fn) { static struct vm_struct vm; struct pcpu_alloc_info *ai; char psize_str[16]; int unit_pages; size_t pages_size; struct page **pages; int unit, i, j, rc; snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); if (IS_ERR(ai)) return PTR_ERR(ai); BUG_ON(ai->nr_groups != 1); BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); unit_pages = ai->unit_size >> PAGE_SHIFT; /* unaligned allocations can't be freed, round up to page size */ pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * sizeof(pages[0])); pages = alloc_bootmem(pages_size); /* allocate pages */ j = 0; for (unit = 0; unit < num_possible_cpus(); unit++) for (i = 0; i < unit_pages; i++) { unsigned int cpu = ai->groups[0].cpu_map[unit]; void *ptr; ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); if (!ptr) { pr_warning("PERCPU: failed to allocate %s page " "for cpu%u\n", psize_str, cpu); goto enomem; } /* kmemleak tracks the percpu allocations separately */ kmemleak_free(ptr); pages[j++] = virt_to_page(ptr); } /* allocate vm area, map the pages and copy static data */ vm.flags = VM_ALLOC; vm.size = num_possible_cpus() * ai->unit_size; vm_area_register_early(&vm, PAGE_SIZE); for (unit = 0; unit < num_possible_cpus(); unit++) { unsigned long unit_addr = (unsigned long)vm.addr + unit * ai->unit_size; for (i = 0; i < unit_pages; i++) populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); /* pte already populated, the following shouldn't fail */ rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], unit_pages); if (rc < 0) panic("failed to map percpu area, err=%d\n", rc); /* * FIXME: Archs with virtual cache should flush local * cache for the linear mapping here - something * equivalent to flush_cache_vmap() on the local cpu. * flush_cache_vmap() can't be used as most supporting * data structures are not set up yet. */ /* copy static data */ memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); } /* we're ready, commit */ pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", unit_pages, psize_str, vm.addr, ai->static_size, ai->reserved_size, ai->dyn_size); rc = pcpu_setup_first_chunk(ai, vm.addr); goto out_free_ar; enomem: while (--j >= 0) free_fn(page_address(pages[j]), PAGE_SIZE); rc = -ENOMEM; out_free_ar: free_bootmem(__pa(pages), pages_size); pcpu_free_alloc_info(ai); return rc; } #endif /* BUILD_PAGE_FIRST_CHUNK */ #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA /* * Generic SMP percpu area setup. * * The embedding helper is used because its behavior closely resembles * the original non-dynamic generic percpu area setup. This is * important because many archs have addressing restrictions and might * fail if the percpu area is located far away from the previous * location. As an added bonus, in non-NUMA cases, embedding is * generally a good idea TLB-wise because percpu area can piggy back * on the physical linear memory mapping which uses large page * mappings on applicable archs. */ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, size_t align) { return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); } static void __init pcpu_dfl_fc_free(void *ptr, size_t size) { free_bootmem(__pa(ptr), size); } void __init setup_per_cpu_areas(void) { unsigned long delta; unsigned int cpu; int rc; /* * Always reserve area for module percpu variables. That's * what the legacy allocator did. */ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); if (rc < 0) panic("Failed to initialize percpu areas."); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; } #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ #else /* CONFIG_SMP */ /* * UP percpu area setup. * * UP always uses km-based percpu allocator with identity mapping. * Static percpu variables are indistinguishable from the usual static * variables and don't require any special preparation. */ void __init setup_per_cpu_areas(void) { const size_t unit_size = roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, PERCPU_DYNAMIC_RESERVE)); struct pcpu_alloc_info *ai; void *fc; ai = pcpu_alloc_alloc_info(1, 1); fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!ai || !fc) panic("Failed to allocate memory for percpu areas."); /* kmemleak tracks the percpu allocations separately */ kmemleak_free(fc); ai->dyn_size = unit_size; ai->unit_size = unit_size; ai->atom_size = unit_size; ai->alloc_size = unit_size; ai->groups[0].nr_units = 1; ai->groups[0].cpu_map[0] = 0; if (pcpu_setup_first_chunk(ai, fc) < 0) panic("Failed to initialize percpu areas."); } #endif /* CONFIG_SMP */ /* * First and reserved chunks are initialized with temporary allocation * map in initdata so that they can be used before slab is online. * This function is called after slab is brought up and replaces those * with properly allocated maps. */ void __init percpu_init_late(void) { struct pcpu_chunk *target_chunks[] = { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; struct pcpu_chunk *chunk; unsigned long flags; int i; for (i = 0; (chunk = target_chunks[i]); i++) { int *map; const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); BUILD_BUG_ON(size > PAGE_SIZE); map = pcpu_mem_zalloc(size); BUG_ON(!map); spin_lock_irqsave(&pcpu_lock, flags); memcpy(map, chunk->map, size); chunk->map = map; spin_unlock_irqrestore(&pcpu_lock, flags); } }
gpl-2.0
meimz/linux
arch/arm/mach-prima2/common.c
1253
1522
/* * Defines machines for CSR SiRFprimaII * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. * * Licensed under GPLv2 or later. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/sizes.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <linux/of.h> #include <linux/of_platform.h> #include "common.h" static void __init sirfsoc_init_late(void) { sirfsoc_pm_init(); } #ifdef CONFIG_ARCH_ATLAS6 static const char *const atlas6_dt_match[] __initconst = { "sirf,atlas6", NULL }; DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") /* Maintainer: Barry Song <baohua.song@csr.com> */ .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_late = sirfsoc_init_late, .dt_compat = atlas6_dt_match, MACHINE_END #endif #ifdef CONFIG_ARCH_PRIMA2 static const char *const prima2_dt_match[] __initconst = { "sirf,prima2", NULL }; DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") /* Maintainer: Barry Song <baohua.song@csr.com> */ .l2c_aux_val = 0, .l2c_aux_mask = ~0, .dma_zone_size = SZ_256M, .init_late = sirfsoc_init_late, .dt_compat = prima2_dt_match, MACHINE_END #endif #ifdef CONFIG_ARCH_ATLAS7 static const char *const atlas7_dt_match[] __initconst = { "sirf,atlas7", NULL }; DT_MACHINE_START(ATLAS7_DT, "Generic ATLAS7 (Flattened Device Tree)") /* Maintainer: Barry Song <baohua.song@csr.com> */ .smp = smp_ops(sirfsoc_smp_ops), .dt_compat = atlas7_dt_match, MACHINE_END #endif
gpl-2.0
hwlzc/3.4.50
drivers/net/can/pch_can.c
1509
33494
/* * Copyright (C) 1999 - 2010 Intel Corporation. * Copyright (C) 2010 LAPIS SEMICONDUCTOR CO., LTD. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */ #define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */ #define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1)) #define PCH_CTRL_CCE BIT(6) #define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */ #define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */ #define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */ #define PCH_CMASK_RX_TX_SET 0x00f3 #define PCH_CMASK_RX_TX_GET 0x0073 #define PCH_CMASK_ALL 0xff #define PCH_CMASK_NEWDAT BIT(2) #define PCH_CMASK_CLRINTPND BIT(3) #define PCH_CMASK_CTRL BIT(4) #define PCH_CMASK_ARB BIT(5) #define PCH_CMASK_MASK BIT(6) #define PCH_CMASK_RDWR BIT(7) #define PCH_IF_MCONT_NEWDAT BIT(15) #define PCH_IF_MCONT_MSGLOST BIT(14) #define PCH_IF_MCONT_INTPND BIT(13) #define PCH_IF_MCONT_UMASK BIT(12) #define PCH_IF_MCONT_TXIE BIT(11) #define PCH_IF_MCONT_RXIE BIT(10) #define PCH_IF_MCONT_RMTEN BIT(9) #define PCH_IF_MCONT_TXRQXT BIT(8) #define PCH_IF_MCONT_EOB BIT(7) #define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3)) #define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15)) #define PCH_ID2_DIR BIT(13) #define PCH_ID2_XTD BIT(14) #define PCH_ID_MSGVAL BIT(15) #define PCH_IF_CREQ_BUSY BIT(15) #define PCH_STATUS_INT 0x8000 #define PCH_RP 0x00008000 #define PCH_REC 0x00007f00 #define PCH_TEC 0x000000ff #define PCH_TX_OK BIT(3) #define PCH_RX_OK BIT(4) #define PCH_EPASSIV BIT(5) #define PCH_EWARN BIT(6) #define PCH_BUS_OFF BIT(7) /* bit position of certain controller bits. */ #define PCH_BIT_BRP_SHIFT 0 #define PCH_BIT_SJW_SHIFT 6 #define PCH_BIT_TSEG1_SHIFT 8 #define PCH_BIT_TSEG2_SHIFT 12 #define PCH_BIT_BRPE_BRPE_SHIFT 6 #define PCH_MSK_BITT_BRP 0x3f #define PCH_MSK_BRPE_BRPE 0x3c0 #define PCH_MSK_CTRL_IE_SIE_EIE 0x07 #define PCH_COUNTER_LIMIT 10 #define PCH_CAN_CLK 50000000 /* 50MHz */ /* * Define the number of message object. * PCH CAN communications are done via Message RAM. * The Message RAM consists of 32 message objects. */ #define PCH_RX_OBJ_NUM 26 #define PCH_TX_OBJ_NUM 6 #define PCH_RX_OBJ_START 1 #define PCH_RX_OBJ_END PCH_RX_OBJ_NUM #define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1) #define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM) #define PCH_FIFO_THRESH 16 /* TxRqst2 show status of MsgObjNo.17~32 */ #define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\ (PCH_RX_OBJ_END - 16)) enum pch_ifreg { PCH_RX_IFREG, PCH_TX_IFREG, }; enum pch_can_err { PCH_STUF_ERR = 1, PCH_FORM_ERR, PCH_ACK_ERR, PCH_BIT1_ERR, PCH_BIT0_ERR, PCH_CRC_ERR, PCH_LEC_ALL, }; enum pch_can_mode { PCH_CAN_ENABLE, PCH_CAN_DISABLE, PCH_CAN_ALL, PCH_CAN_NONE, PCH_CAN_STOP, PCH_CAN_RUN, }; struct pch_can_if_regs { u32 creq; u32 cmask; u32 mask1; u32 mask2; u32 id1; u32 id2; u32 mcont; u32 data[4]; u32 rsv[13]; }; struct pch_can_regs { u32 cont; u32 stat; u32 errc; u32 bitt; u32 intr; u32 opt; u32 brpe; u32 reserve; struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */ u32 reserve1[8]; u32 treq1; u32 treq2; u32 reserve2[6]; u32 data1; u32 data2; u32 reserve3[6]; u32 canipend1; u32 canipend2; u32 reserve4[6]; u32 canmval1; u32 canmval2; u32 reserve5[37]; u32 srst; }; struct pch_can_priv { struct can_priv can; struct pci_dev *dev; u32 tx_enable[PCH_TX_OBJ_END]; u32 rx_enable[PCH_TX_OBJ_END]; u32 rx_link[PCH_TX_OBJ_END]; u32 int_enables; struct net_device *ndev; struct pch_can_regs __iomem *regs; struct napi_struct napi; int tx_obj; /* Point next Tx Obj index */ int use_msi; }; static struct can_bittiming_const pch_can_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 2, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, /* 6bit + extended 4bit */ .brp_inc = 1, }; static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = { {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,}, {0,} }; MODULE_DEVICE_TABLE(pci, pch_pci_tbl); static inline void pch_can_bit_set(void __iomem *addr, u32 mask) { iowrite32(ioread32(addr) | mask, addr); } static inline void pch_can_bit_clear(void __iomem *addr, u32 mask) { iowrite32(ioread32(addr) & ~mask, addr); } static void pch_can_set_run_mode(struct pch_can_priv *priv, enum pch_can_mode mode) { switch (mode) { case PCH_CAN_RUN: pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT); break; case PCH_CAN_STOP: pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT); break; default: netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__); break; } } static void pch_can_set_optmode(struct pch_can_priv *priv) { u32 reg_val = ioread32(&priv->regs->opt); if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) reg_val |= PCH_OPT_SILENT; if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) reg_val |= PCH_OPT_LBACK; pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT); iowrite32(reg_val, &priv->regs->opt); } static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num) { int counter = PCH_COUNTER_LIMIT; u32 ifx_creq; iowrite32(num, creq_addr); while (counter) { ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY; if (!ifx_creq) break; counter--; udelay(1); } if (!counter) pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__); } static void pch_can_set_int_enables(struct pch_can_priv *priv, enum pch_can_mode interrupt_no) { switch (interrupt_no) { case PCH_CAN_DISABLE: pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE); break; case PCH_CAN_ALL: pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE); break; case PCH_CAN_NONE: pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE); break; default: netdev_err(priv->ndev, "Invalid interrupt number.\n"); break; } } static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num, int set, enum pch_ifreg dir) { u32 ie; if (dir) ie = PCH_IF_MCONT_TXIE; else ie = PCH_IF_MCONT_RXIE; /* Reading the Msg buffer from Message RAM to IF1/2 registers. */ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num); /* Setting the IF1/2MASK1 register to access MsgVal and RxIE bits */ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL, &priv->regs->ifregs[dir].cmask); if (set) { /* Setting the MsgVal and RxIE/TxIE bits */ pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie); pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL); } else { /* Clearing the MsgVal and RxIE/TxIE bits */ pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie); pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL); } pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num); } static void pch_can_set_rx_all(struct pch_can_priv *priv, int set) { int i; /* Traversing to obtain the object configured as receivers. */ for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG); } static void pch_can_set_tx_all(struct pch_can_priv *priv, int set) { int i; /* Traversing to obtain the object configured as transmit object. */ for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG); } static u32 pch_can_int_pending(struct pch_can_priv *priv) { return ioread32(&priv->regs->intr) & 0xffff; } static void pch_can_clear_if_buffers(struct pch_can_priv *priv) { int i; /* Msg Obj ID (1~32) */ for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) { iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask); iowrite32(0xffff, &priv->regs->ifregs[0].mask1); iowrite32(0xffff, &priv->regs->ifregs[0].mask2); iowrite32(0x0, &priv->regs->ifregs[0].id1); iowrite32(0x0, &priv->regs->ifregs[0].id2); iowrite32(0x0, &priv->regs->ifregs[0].mcont); iowrite32(0x0, &priv->regs->ifregs[0].data[0]); iowrite32(0x0, &priv->regs->ifregs[0].data[1]); iowrite32(0x0, &priv->regs->ifregs[0].data[2]); iowrite32(0x0, &priv->regs->ifregs[0].data[3]); iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB | PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i); } } static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv) { int i; for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) { iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i); iowrite32(0x0, &priv->regs->ifregs[0].id1); iowrite32(0x0, &priv->regs->ifregs[0].id2); pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_UMASK); /* In case FIFO mode, Last EoB of Rx Obj must be 1 */ if (i == PCH_RX_OBJ_END) pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB); else pch_can_bit_clear(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB); iowrite32(0, &priv->regs->ifregs[0].mask1); pch_can_bit_clear(&priv->regs->ifregs[0].mask2, 0x1fff | PCH_MASK2_MDIR_MXTD); /* Setting CMASK for writing */ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB | PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i); } for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) { iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i); /* Resetting DIR bit for reception */ iowrite32(0x0, &priv->regs->ifregs[1].id1); iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2); /* Setting EOB bit for transmitter */ iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK, &priv->regs->ifregs[1].mcont); iowrite32(0, &priv->regs->ifregs[1].mask1); pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff); /* Setting CMASK for writing */ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB | PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i); } } static void pch_can_init(struct pch_can_priv *priv) { /* Stopping the Can device. */ pch_can_set_run_mode(priv, PCH_CAN_STOP); /* Clearing all the message object buffers. */ pch_can_clear_if_buffers(priv); /* Configuring the respective message object as either rx/tx object. */ pch_can_config_rx_tx_buffers(priv); /* Enabling the interrupts. */ pch_can_set_int_enables(priv, PCH_CAN_ALL); } static void pch_can_release(struct pch_can_priv *priv) { /* Stooping the CAN device. */ pch_can_set_run_mode(priv, PCH_CAN_STOP); /* Disabling the interrupts. */ pch_can_set_int_enables(priv, PCH_CAN_NONE); /* Disabling all the receive object. */ pch_can_set_rx_all(priv, 0); /* Disabling all the transmit object. */ pch_can_set_tx_all(priv, 0); } /* This function clears interrupt(s) from the CAN device. */ static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask) { /* Clear interrupt for transmit object */ if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) { /* Setting CMASK for clearing the reception interrupts. */ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask); /* Clearing the Dir bit. */ pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR); /* Clearing NewDat & IntPnd */ pch_can_bit_clear(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND); pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask); } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) { /* * Setting CMASK for clearing interrupts for frame transmission. */ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB, &priv->regs->ifregs[1].cmask); /* Resetting the ID registers. */ pch_can_bit_set(&priv->regs->ifregs[1].id2, PCH_ID2_DIR | (0x7ff << 2)); iowrite32(0x0, &priv->regs->ifregs[1].id1); /* Claring NewDat, TxRqst & IntPnd */ pch_can_bit_clear(&priv->regs->ifregs[1].mcont, PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND | PCH_IF_MCONT_TXRQXT); pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask); } } static void pch_can_reset(struct pch_can_priv *priv) { /* write to sw reset register */ iowrite32(1, &priv->regs->srst); iowrite32(0, &priv->regs->srst); } static void pch_can_error(struct net_device *ndev, u32 status) { struct sk_buff *skb; struct pch_can_priv *priv = netdev_priv(ndev); struct can_frame *cf; u32 errc, lec; struct net_device_stats *stats = &(priv->ndev->stats); enum can_state state = priv->can.state; skb = alloc_can_err_skb(ndev, &cf); if (!skb) return; if (status & PCH_BUS_OFF) { pch_can_set_tx_all(priv, 0); pch_can_set_rx_all(priv, 0); state = CAN_STATE_BUS_OFF; cf->can_id |= CAN_ERR_BUSOFF; can_bus_off(ndev); } errc = ioread32(&priv->regs->errc); /* Warning interrupt. */ if (status & PCH_EWARN) { state = CAN_STATE_ERROR_WARNING; priv->can.can_stats.error_warning++; cf->can_id |= CAN_ERR_CRTL; if (((errc & PCH_REC) >> 8) > 96) cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; if ((errc & PCH_TEC) > 96) cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; netdev_dbg(ndev, "%s -> Error Counter is more than 96.\n", __func__); } /* Error passive interrupt. */ if (status & PCH_EPASSIV) { priv->can.can_stats.error_passive++; state = CAN_STATE_ERROR_PASSIVE; cf->can_id |= CAN_ERR_CRTL; if (errc & PCH_RP) cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; if ((errc & PCH_TEC) > 127) cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; netdev_dbg(ndev, "%s -> CAN controller is ERROR PASSIVE .\n", __func__); } lec = status & PCH_LEC_ALL; switch (lec) { case PCH_STUF_ERR: cf->data[2] |= CAN_ERR_PROT_STUFF; priv->can.can_stats.bus_error++; stats->rx_errors++; break; case PCH_FORM_ERR: cf->data[2] |= CAN_ERR_PROT_FORM; priv->can.can_stats.bus_error++; stats->rx_errors++; break; case PCH_ACK_ERR: cf->can_id |= CAN_ERR_ACK; priv->can.can_stats.bus_error++; stats->rx_errors++; break; case PCH_BIT1_ERR: case PCH_BIT0_ERR: cf->data[2] |= CAN_ERR_PROT_BIT; priv->can.can_stats.bus_error++; stats->rx_errors++; break; case PCH_CRC_ERR: cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL; priv->can.can_stats.bus_error++; stats->rx_errors++; break; case PCH_LEC_ALL: /* Written by CPU. No error status */ break; } cf->data[6] = errc & PCH_TEC; cf->data[7] = (errc & PCH_REC) >> 8; priv->can.state = state; netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; } static irqreturn_t pch_can_interrupt(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct pch_can_priv *priv = netdev_priv(ndev); if (!pch_can_int_pending(priv)) return IRQ_NONE; pch_can_set_int_enables(priv, PCH_CAN_NONE); napi_schedule(&priv->napi); return IRQ_HANDLED; } static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id) { if (obj_id < PCH_FIFO_THRESH) { iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask); /* Clearing the Dir bit. */ pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR); /* Clearing NewDat & IntPnd */ pch_can_bit_clear(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_INTPND); pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id); } else if (obj_id > PCH_FIFO_THRESH) { pch_can_int_clr(priv, obj_id); } else if (obj_id == PCH_FIFO_THRESH) { int cnt; for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++) pch_can_int_clr(priv, cnt + 1); } } static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id) { struct pch_can_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &(priv->ndev->stats); struct sk_buff *skb; struct can_frame *cf; netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n"); pch_can_bit_clear(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_MSGLOST); iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id); skb = alloc_can_err_skb(ndev, &cf); if (!skb) return; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; netif_receive_skb(skb); } static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota) { u32 reg; canid_t id; int rcv_pkts = 0; struct sk_buff *skb; struct can_frame *cf; struct pch_can_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &(priv->ndev->stats); int i; u32 id2; u16 data_reg; do { /* Reading the message object from the Message RAM */ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num); /* Reading the MCONT register. */ reg = ioread32(&priv->regs->ifregs[0].mcont); if (reg & PCH_IF_MCONT_EOB) break; /* If MsgLost bit set. */ if (reg & PCH_IF_MCONT_MSGLOST) { pch_can_rx_msg_lost(ndev, obj_num); rcv_pkts++; quota--; obj_num++; continue; } else if (!(reg & PCH_IF_MCONT_NEWDAT)) { obj_num++; continue; } skb = alloc_can_skb(priv->ndev, &cf); if (!skb) { netdev_err(ndev, "alloc_can_skb Failed\n"); return rcv_pkts; } /* Get Received data */ id2 = ioread32(&priv->regs->ifregs[0].id2); if (id2 & PCH_ID2_XTD) { id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff); id |= (((id2) & 0x1fff) << 16); cf->can_id = id | CAN_EFF_FLAG; } else { id = (id2 >> 2) & CAN_SFF_MASK; cf->can_id = id; } if (id2 & PCH_ID2_DIR) cf->can_id |= CAN_RTR_FLAG; cf->can_dlc = get_can_dlc((ioread32(&priv->regs-> ifregs[0].mcont)) & 0xF); for (i = 0; i < cf->can_dlc; i += 2) { data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]); cf->data[i] = data_reg; cf->data[i + 1] = data_reg >> 8; } netif_receive_skb(skb); rcv_pkts++; stats->rx_packets++; quota--; stats->rx_bytes += cf->can_dlc; pch_fifo_thresh(priv, obj_num); obj_num++; } while (quota > 0); return rcv_pkts; } static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat) { struct pch_can_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &(priv->ndev->stats); u32 dlc; can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1); iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND, &priv->regs->ifregs[1].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat); dlc = get_can_dlc(ioread32(&priv->regs->ifregs[1].mcont) & PCH_IF_MCONT_DLC); stats->tx_bytes += dlc; stats->tx_packets++; if (int_stat == PCH_TX_OBJ_END) netif_wake_queue(ndev); } static int pch_can_poll(struct napi_struct *napi, int quota) { struct net_device *ndev = napi->dev; struct pch_can_priv *priv = netdev_priv(ndev); u32 int_stat; u32 reg_stat; int quota_save = quota; int_stat = pch_can_int_pending(priv); if (!int_stat) goto end; if (int_stat == PCH_STATUS_INT) { reg_stat = ioread32(&priv->regs->stat); if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) && ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) { pch_can_error(ndev, reg_stat); quota--; } if (reg_stat & (PCH_TX_OK | PCH_RX_OK)) pch_can_bit_clear(&priv->regs->stat, reg_stat & (PCH_TX_OK | PCH_RX_OK)); int_stat = pch_can_int_pending(priv); } if (quota == 0) goto end; if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) { quota -= pch_can_rx_normal(ndev, int_stat, quota); } else if ((int_stat >= PCH_TX_OBJ_START) && (int_stat <= PCH_TX_OBJ_END)) { /* Handle transmission interrupt */ pch_can_tx_complete(ndev, int_stat); } end: napi_complete(napi); pch_can_set_int_enables(priv, PCH_CAN_ALL); return quota_save - quota; } static int pch_set_bittiming(struct net_device *ndev) { struct pch_can_priv *priv = netdev_priv(ndev); const struct can_bittiming *bt = &priv->can.bittiming; u32 canbit; u32 bepe; /* Setting the CCE bit for accessing the Can Timing register. */ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE); canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP; canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT; canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT; canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT; bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT; iowrite32(canbit, &priv->regs->bitt); iowrite32(bepe, &priv->regs->brpe); pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE); return 0; } static void pch_can_start(struct net_device *ndev) { struct pch_can_priv *priv = netdev_priv(ndev); if (priv->can.state != CAN_STATE_STOPPED) pch_can_reset(priv); pch_set_bittiming(ndev); pch_can_set_optmode(priv); pch_can_set_tx_all(priv, 1); pch_can_set_rx_all(priv, 1); /* Setting the CAN to run mode. */ pch_can_set_run_mode(priv, PCH_CAN_RUN); priv->can.state = CAN_STATE_ERROR_ACTIVE; return; } static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode) { int ret = 0; switch (mode) { case CAN_MODE_START: pch_can_start(ndev); netif_wake_queue(ndev); break; default: ret = -EOPNOTSUPP; break; } return ret; } static int pch_can_open(struct net_device *ndev) { struct pch_can_priv *priv = netdev_priv(ndev); int retval; /* Regstering the interrupt. */ retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED, ndev->name, ndev); if (retval) { netdev_err(ndev, "request_irq failed.\n"); goto req_irq_err; } /* Open common can device */ retval = open_candev(ndev); if (retval) { netdev_err(ndev, "open_candev() failed %d\n", retval); goto err_open_candev; } pch_can_init(priv); pch_can_start(ndev); napi_enable(&priv->napi); netif_start_queue(ndev); return 0; err_open_candev: free_irq(priv->dev->irq, ndev); req_irq_err: pch_can_release(priv); return retval; } static int pch_close(struct net_device *ndev) { struct pch_can_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); napi_disable(&priv->napi); pch_can_release(priv); free_irq(priv->dev->irq, ndev); close_candev(ndev); priv->can.state = CAN_STATE_STOPPED; return 0; } static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev) { struct pch_can_priv *priv = netdev_priv(ndev); struct can_frame *cf = (struct can_frame *)skb->data; int tx_obj_no; int i; u32 id2; if (can_dropped_invalid_skb(ndev, skb)) return NETDEV_TX_OK; tx_obj_no = priv->tx_obj; if (priv->tx_obj == PCH_TX_OBJ_END) { if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK) netif_stop_queue(ndev); priv->tx_obj = PCH_TX_OBJ_START; } else { priv->tx_obj++; } /* Setting the CMASK register. */ pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL); /* If ID extended is set. */ if (cf->can_id & CAN_EFF_FLAG) { iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1); id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD; } else { iowrite32(0, &priv->regs->ifregs[1].id1); id2 = (cf->can_id & CAN_SFF_MASK) << 2; } id2 |= PCH_ID_MSGVAL; /* If remote frame has to be transmitted.. */ if (!(cf->can_id & CAN_RTR_FLAG)) id2 |= PCH_ID2_DIR; iowrite32(id2, &priv->regs->ifregs[1].id2); /* Copy data to register */ for (i = 0; i < cf->can_dlc; i += 2) { iowrite16(cf->data[i] | (cf->data[i + 1] << 8), &priv->regs->ifregs[1].data[i / 2]); } can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1); /* Set the size of the data. Update if2_mcont */ iowrite32(cf->can_dlc | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT | PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont); pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no); return NETDEV_TX_OK; } static const struct net_device_ops pch_can_netdev_ops = { .ndo_open = pch_can_open, .ndo_stop = pch_close, .ndo_start_xmit = pch_xmit, }; static void __devexit pch_can_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct pch_can_priv *priv = netdev_priv(ndev); unregister_candev(priv->ndev); if (priv->use_msi) pci_disable_msi(priv->dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); pch_can_reset(priv); pci_iounmap(pdev, priv->regs); free_candev(priv->ndev); } #ifdef CONFIG_PM static void pch_can_set_int_custom(struct pch_can_priv *priv) { /* Clearing the IE, SIE and EIE bits of Can control register. */ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE); /* Appropriately setting them. */ pch_can_bit_set(&priv->regs->cont, ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1)); } /* This function retrieves interrupt enabled for the CAN device. */ static u32 pch_can_get_int_enables(struct pch_can_priv *priv) { /* Obtaining the status of IE, SIE and EIE interrupt bits. */ return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1; } static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num, enum pch_ifreg dir) { u32 ie, enable; if (dir) ie = PCH_IF_MCONT_RXIE; else ie = PCH_IF_MCONT_TXIE; iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num); if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) && ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie)) enable = 1; else enable = 0; return enable; } static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num, int set) { iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num); iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask); if (set) pch_can_bit_clear(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB); else pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB); pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num); } static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num) { u32 link; iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num); if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB) link = 0; else link = 1; return link; } static int pch_can_get_buffer_status(struct pch_can_priv *priv) { return (ioread32(&priv->regs->treq1) & 0xffff) | (ioread32(&priv->regs->treq2) << 16); } static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state) { int i; int retval; u32 buf_stat; /* Variable for reading the transmit buffer status. */ int counter = PCH_COUNTER_LIMIT; struct net_device *dev = pci_get_drvdata(pdev); struct pch_can_priv *priv = netdev_priv(dev); /* Stop the CAN controller */ pch_can_set_run_mode(priv, PCH_CAN_STOP); /* Indicate that we are aboutto/in suspend */ priv->can.state = CAN_STATE_STOPPED; /* Waiting for all transmission to complete. */ while (counter) { buf_stat = pch_can_get_buffer_status(priv); if (!buf_stat) break; counter--; udelay(1); } if (!counter) dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__); /* Save interrupt configuration and then disable them */ priv->int_enables = pch_can_get_int_enables(priv); pch_can_set_int_enables(priv, PCH_CAN_DISABLE); /* Save Tx buffer enable state */ for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i, PCH_TX_IFREG); /* Disable all Transmit buffers */ pch_can_set_tx_all(priv, 0); /* Save Rx buffer enable state */ for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) { priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i, PCH_RX_IFREG); priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i); } /* Disable all Receive buffers */ pch_can_set_rx_all(priv, 0); retval = pci_save_state(pdev); if (retval) { dev_err(&pdev->dev, "pci_save_state failed.\n"); } else { pci_enable_wake(pdev, PCI_D3hot, 0); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); } return retval; } static int pch_can_resume(struct pci_dev *pdev) { int i; int retval; struct net_device *dev = pci_get_drvdata(pdev); struct pch_can_priv *priv = netdev_priv(dev); pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); retval = pci_enable_device(pdev); if (retval) { dev_err(&pdev->dev, "pci_enable_device failed.\n"); return retval; } pci_enable_wake(pdev, PCI_D3hot, 0); priv->can.state = CAN_STATE_ERROR_ACTIVE; /* Disabling all interrupts. */ pch_can_set_int_enables(priv, PCH_CAN_DISABLE); /* Setting the CAN device in Stop Mode. */ pch_can_set_run_mode(priv, PCH_CAN_STOP); /* Configuring the transmit and receive buffers. */ pch_can_config_rx_tx_buffers(priv); /* Restore the CAN state */ pch_set_bittiming(dev); /* Listen/Active */ pch_can_set_optmode(priv); /* Enabling the transmit buffer. */ for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG); /* Configuring the receive buffer and enabling them. */ for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) { /* Restore buffer link */ pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]); /* Restore buffer enables */ pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG); } /* Enable CAN Interrupts */ pch_can_set_int_custom(priv); /* Restore Run Mode */ pch_can_set_run_mode(priv, PCH_CAN_RUN); return retval; } #else #define pch_can_suspend NULL #define pch_can_resume NULL #endif static int pch_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct pch_can_priv *priv = netdev_priv(dev); u32 errc = ioread32(&priv->regs->errc); bec->txerr = errc & PCH_TEC; bec->rxerr = (errc & PCH_REC) >> 8; return 0; } static int __devinit pch_can_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct net_device *ndev; struct pch_can_priv *priv; int rc; void __iomem *addr; rc = pci_enable_device(pdev); if (rc) { dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc); goto probe_exit_endev; } rc = pci_request_regions(pdev, KBUILD_MODNAME); if (rc) { dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc); goto probe_exit_pcireq; } addr = pci_iomap(pdev, 1, 0); if (!addr) { rc = -EIO; dev_err(&pdev->dev, "Failed pci_iomap\n"); goto probe_exit_ipmap; } ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END); if (!ndev) { rc = -ENOMEM; dev_err(&pdev->dev, "Failed alloc_candev\n"); goto probe_exit_alloc_candev; } priv = netdev_priv(ndev); priv->ndev = ndev; priv->regs = addr; priv->dev = pdev; priv->can.bittiming_const = &pch_can_bittiming_const; priv->can.do_set_mode = pch_can_do_set_mode; priv->can.do_get_berr_counter = pch_can_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_LOOPBACK; priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */ ndev->irq = pdev->irq; ndev->flags |= IFF_ECHO; pci_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->netdev_ops = &pch_can_netdev_ops; priv->can.clock.freq = PCH_CAN_CLK; /* Hz */ netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END); rc = pci_enable_msi(priv->dev); if (rc) { netdev_err(ndev, "PCH CAN opened without MSI\n"); priv->use_msi = 0; } else { netdev_err(ndev, "PCH CAN opened with MSI\n"); pci_set_master(pdev); priv->use_msi = 1; } rc = register_candev(ndev); if (rc) { dev_err(&pdev->dev, "Failed register_candev %d\n", rc); goto probe_exit_reg_candev; } return 0; probe_exit_reg_candev: if (priv->use_msi) pci_disable_msi(priv->dev); free_candev(ndev); probe_exit_alloc_candev: pci_iounmap(pdev, addr); probe_exit_ipmap: pci_release_regions(pdev); probe_exit_pcireq: pci_disable_device(pdev); probe_exit_endev: return rc; } static struct pci_driver pch_can_pci_driver = { .name = "pch_can", .id_table = pch_pci_tbl, .probe = pch_can_probe, .remove = __devexit_p(pch_can_remove), .suspend = pch_can_suspend, .resume = pch_can_resume, }; static int __init pch_can_pci_init(void) { return pci_register_driver(&pch_can_pci_driver); } module_init(pch_can_pci_init); static void __exit pch_can_pci_exit(void) { pci_unregister_driver(&pch_can_pci_driver); } module_exit(pch_can_pci_exit); MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.94");
gpl-2.0
IADcodes/cubebone_kernel_shw-m250s
drivers/power/power_supply_sysfs.c
1509
8049
/* * Sysfs interface for the universal power supply monitor class * * Copyright © 2007 David Woodhouse <dwmw2@infradead.org> * Copyright © 2007 Anton Vorontsov <cbou@mail.ru> * Copyright © 2004 Szabolcs Gyurko * Copyright © 2003 Ian Molton <spyro@f2s.com> * * Modified: 2004, Oct Szabolcs Gyurko * * You may use this code as per GPL version 2 */ #include <linux/ctype.h> #include <linux/power_supply.h> #include <linux/slab.h> #include "power_supply.h" /* * This is because the name "current" breaks the device attr macro. * The "current" word resolves to "(get_current())" so instead of * "current" "(get_current())" appears in the sysfs. * * The source of this definition is the device.h which calls __ATTR * macro in sysfs.h which calls the __stringify macro. * * Only modification that the name is not tried to be resolved * (as a macro let's say). */ #define POWER_SUPPLY_ATTR(_name) \ { \ .attr = { .name = #_name }, \ .show = power_supply_show_property, \ .store = power_supply_store_property, \ } static struct device_attribute power_supply_attrs[]; static ssize_t power_supply_show_property(struct device *dev, struct device_attribute *attr, char *buf) { static char *type_text[] = { "Battery", "UPS", "Mains", "USB", "USB_DCP", "USB_CDP", "USB_ACA" }; static char *status_text[] = { "Unknown", "Charging", "Discharging", "Not charging", "Full" }; static char *charge_type[] = { "Unknown", "N/A", "Trickle", "Fast" }; static char *health_text[] = { "Unknown", "Good", "Overheat", "Dead", "Over voltage", "Unspecified failure", "Cold", }; static char *technology_text[] = { "Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd", "LiMn" }; static char *capacity_level_text[] = { "Unknown", "Critical", "Low", "Normal", "High", "Full" }; ssize_t ret = 0; struct power_supply *psy = dev_get_drvdata(dev); const ptrdiff_t off = attr - power_supply_attrs; union power_supply_propval value; if (off == POWER_SUPPLY_PROP_TYPE) value.intval = psy->type; else ret = psy->get_property(psy, off, &value); if (ret < 0) { if (ret == -ENODATA) dev_dbg(dev, "driver has no data for `%s' property\n", attr->attr.name); else if (ret != -ENODEV) dev_err(dev, "driver failed to report `%s' property\n", attr->attr.name); return ret; } if (off == POWER_SUPPLY_PROP_STATUS) return sprintf(buf, "%s\n", status_text[value.intval]); else if (off == POWER_SUPPLY_PROP_CHARGE_TYPE) return sprintf(buf, "%s\n", charge_type[value.intval]); else if (off == POWER_SUPPLY_PROP_HEALTH) return sprintf(buf, "%s\n", health_text[value.intval]); else if (off == POWER_SUPPLY_PROP_TECHNOLOGY) return sprintf(buf, "%s\n", technology_text[value.intval]); else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL) return sprintf(buf, "%s\n", capacity_level_text[value.intval]); else if (off == POWER_SUPPLY_PROP_TYPE) return sprintf(buf, "%s\n", type_text[value.intval]); else if (off >= POWER_SUPPLY_PROP_MODEL_NAME) return sprintf(buf, "%s\n", value.strval); return sprintf(buf, "%d\n", value.intval); } static ssize_t power_supply_store_property(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); const ptrdiff_t off = attr - power_supply_attrs; union power_supply_propval value; long long_val; /* TODO: support other types than int */ ret = strict_strtol(buf, 10, &long_val); if (ret < 0) return ret; value.intval = long_val; ret = psy->set_property(psy, off, &value); if (ret < 0) return ret; return count; } /* Must be in the same order as POWER_SUPPLY_PROP_* */ static struct device_attribute power_supply_attrs[] = { /* Properties of type `int' */ POWER_SUPPLY_ATTR(status), POWER_SUPPLY_ATTR(charge_type), POWER_SUPPLY_ATTR(health), POWER_SUPPLY_ATTR(present), POWER_SUPPLY_ATTR(online), POWER_SUPPLY_ATTR(technology), POWER_SUPPLY_ATTR(cycle_count), POWER_SUPPLY_ATTR(voltage_max), POWER_SUPPLY_ATTR(voltage_min), POWER_SUPPLY_ATTR(voltage_max_design), POWER_SUPPLY_ATTR(voltage_min_design), POWER_SUPPLY_ATTR(voltage_now), POWER_SUPPLY_ATTR(voltage_avg), POWER_SUPPLY_ATTR(current_max), POWER_SUPPLY_ATTR(current_now), POWER_SUPPLY_ATTR(current_avg), POWER_SUPPLY_ATTR(power_now), POWER_SUPPLY_ATTR(power_avg), POWER_SUPPLY_ATTR(charge_full_design), POWER_SUPPLY_ATTR(charge_empty_design), POWER_SUPPLY_ATTR(charge_full), POWER_SUPPLY_ATTR(charge_empty), POWER_SUPPLY_ATTR(charge_now), POWER_SUPPLY_ATTR(charge_avg), POWER_SUPPLY_ATTR(charge_counter), POWER_SUPPLY_ATTR(energy_full_design), POWER_SUPPLY_ATTR(energy_empty_design), POWER_SUPPLY_ATTR(energy_full), POWER_SUPPLY_ATTR(energy_empty), POWER_SUPPLY_ATTR(energy_now), POWER_SUPPLY_ATTR(energy_avg), POWER_SUPPLY_ATTR(capacity), POWER_SUPPLY_ATTR(capacity_level), POWER_SUPPLY_ATTR(temp), POWER_SUPPLY_ATTR(temp_ambient), POWER_SUPPLY_ATTR(time_to_empty_now), POWER_SUPPLY_ATTR(time_to_empty_avg), POWER_SUPPLY_ATTR(time_to_full_now), POWER_SUPPLY_ATTR(time_to_full_avg), POWER_SUPPLY_ATTR(type), /* Properties of type `const char *' */ POWER_SUPPLY_ATTR(model_name), POWER_SUPPLY_ATTR(manufacturer), POWER_SUPPLY_ATTR(serial_number), }; static struct attribute * __power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1]; static mode_t power_supply_attr_is_visible(struct kobject *kobj, struct attribute *attr, int attrno) { struct device *dev = container_of(kobj, struct device, kobj); struct power_supply *psy = dev_get_drvdata(dev); mode_t mode = S_IRUSR | S_IRGRP | S_IROTH; int i; if (attrno == POWER_SUPPLY_PROP_TYPE) return mode; for (i = 0; i < psy->num_properties; i++) { int property = psy->properties[i]; if (property == attrno) { if (psy->property_is_writeable && psy->property_is_writeable(psy, property) > 0) mode |= S_IWUSR; return mode; } } return 0; } static struct attribute_group power_supply_attr_group = { .attrs = __power_supply_attrs, .is_visible = power_supply_attr_is_visible, }; static const struct attribute_group *power_supply_attr_groups[] = { &power_supply_attr_group, NULL, }; void power_supply_init_attrs(struct device_type *dev_type) { int i; dev_type->groups = power_supply_attr_groups; for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++) __power_supply_attrs[i] = &power_supply_attrs[i].attr; } static char *kstruprdup(const char *str, gfp_t gfp) { char *ret, *ustr; ustr = ret = kmalloc(strlen(str) + 1, gfp); if (!ret) return NULL; while (*str) *ustr++ = toupper(*str++); *ustr = 0; return ret; } int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) { struct power_supply *psy = dev_get_drvdata(dev); int ret = 0, j; char *prop_buf; char *attrname; dev_dbg(dev, "uevent\n"); if (!psy || !psy->dev) { dev_dbg(dev, "No power supply yet\n"); return ret; } dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->name); ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->name); if (ret) return ret; prop_buf = (char *)get_zeroed_page(GFP_KERNEL); if (!prop_buf) return -ENOMEM; for (j = 0; j < psy->num_properties; j++) { struct device_attribute *attr; char *line; attr = &power_supply_attrs[psy->properties[j]]; ret = power_supply_show_property(dev, attr, prop_buf); if (ret == -ENODEV || ret == -ENODATA) { /* When a battery is absent, we expect -ENODEV. Don't abort; send the uevent with at least the the PRESENT=0 property */ ret = 0; continue; } if (ret < 0) goto out; line = strchr(prop_buf, '\n'); if (line) *line = 0; attrname = kstruprdup(attr->attr.name, GFP_KERNEL); if (!attrname) { ret = -ENOMEM; goto out; } dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf); ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf); kfree(attrname); if (ret) goto out; } out: free_page((unsigned long)prop_buf); return ret; }
gpl-2.0
wilebeast/skylinux
arch/mips/math-emu/dp_flong.c
1765
1912
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * http://www.algor.co.uk * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" ieee754dp ieee754dp_flong(s64 x) { u64 xm; int xe; int xs; CLEARCX; if (x == 0) return ieee754dp_zero(0); if (x == 1 || x == -1) return ieee754dp_one(x < 0); if (x == 10 || x == -10) return ieee754dp_ten(x < 0); xs = (x < 0); if (xs) { if (x == (1ULL << 63)) xm = (1ULL << 63); /* max neg can't be safely negated */ else xm = -x; } else { xm = x; } /* normalize */ xe = DP_MBITS + 3; if (xm >> (DP_MBITS + 1 + 3)) { /* shunt out overflow bits */ while (xm >> (DP_MBITS + 1 + 3)) { XDPSRSX1(); } } else { /* normalize in grs extended double precision */ while ((xm >> (DP_MBITS + 3)) == 0) { xm <<= 1; xe--; } } DPNORMRET1(xs, xe, xm, "dp_flong", x); } ieee754dp ieee754dp_fulong(u64 u) { if ((s64) u < 0) return ieee754dp_add(ieee754dp_1e63(), ieee754dp_flong(u & ~(1ULL << 63))); return ieee754dp_flong(u); }
gpl-2.0
Split-Screen/android_kernel_motorola_msm8916
drivers/net/ethernet/mellanox/mlx4/mr.c
2277
24132
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/vmalloc.h> #include <linux/mlx4/cmd.h> #include "mlx4.h" #include "icm.h" static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) { int o; int m; u32 seg; spin_lock(&buddy->lock); for (o = order; o <= buddy->max_order; ++o) if (buddy->num_free[o]) { m = 1 << (buddy->max_order - o); seg = find_first_bit(buddy->bits[o], m); if (seg < m) goto found; } spin_unlock(&buddy->lock); return -1; found: clear_bit(seg, buddy->bits[o]); --buddy->num_free[o]; while (o > order) { --o; seg <<= 1; set_bit(seg ^ 1, buddy->bits[o]); ++buddy->num_free[o]; } spin_unlock(&buddy->lock); seg <<= order; return seg; } static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) { seg >>= order; spin_lock(&buddy->lock); while (test_bit(seg ^ 1, buddy->bits[order])) { clear_bit(seg ^ 1, buddy->bits[order]); --buddy->num_free[order]; seg >>= 1; ++order; } set_bit(seg, buddy->bits[order]); ++buddy->num_free[order]; spin_unlock(&buddy->lock); } static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) { int i, s; buddy->max_order = max_order; spin_lock_init(&buddy->lock); buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *), GFP_KERNEL); buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, GFP_KERNEL); if (!buddy->bits || !buddy->num_free) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { s = BITS_TO_LONGS(1 << (buddy->max_order - i)); buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); if (!buddy->bits[i]) { buddy->bits[i] = vzalloc(s * sizeof(long)); if (!buddy->bits[i]) goto err_out_free; } } set_bit(0, buddy->bits[buddy->max_order]); buddy->num_free[buddy->max_order] = 1; return 0; err_out_free: for (i = 0; i <= buddy->max_order; ++i) if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i])) vfree(buddy->bits[i]); else kfree(buddy->bits[i]); err_out: kfree(buddy->bits); kfree(buddy->num_free); return -ENOMEM; } static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) { int i; for (i = 0; i <= buddy->max_order; ++i) if (is_vmalloc_addr(buddy->bits[i])) vfree(buddy->bits[i]); else kfree(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); } u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; u32 seg; int seg_order; u32 offset; seg_order = max_t(int, order - log_mtts_per_seg, 0); seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); if (seg == -1) return -1; offset = seg * (1 << log_mtts_per_seg); if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, offset + (1 << order) - 1)) { mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); return -1; } return offset; } static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) { u64 in_param = 0; u64 out_param; int err; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, order); err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) return -1; return get_param_l(&out_param); } return __mlx4_alloc_mtt_range(dev, order); } int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, struct mlx4_mtt *mtt) { int i; if (!npages) { mtt->order = -1; mtt->page_shift = MLX4_ICM_PAGE_SHIFT; return 0; } else mtt->page_shift = page_shift; for (mtt->order = 0, i = 1; i < npages; i <<= 1) ++mtt->order; mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); if (mtt->offset == -1) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(mlx4_mtt_init); void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) { u32 first_seg; int seg_order; struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; seg_order = max_t(int, order - log_mtts_per_seg, 0); first_seg = offset / (1 << log_mtts_per_seg); mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); mlx4_table_put_range(dev, &mr_table->mtt_table, offset, offset + (1 << order) - 1); } static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) { u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, offset); set_param_h(&in_param, order); err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) mlx4_warn(dev, "Failed to free mtt range at:" "%d order:%d\n", offset, order); return; } __mlx4_free_mtt_range(dev, offset, order); } void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) { if (mtt->order < 0) return; mlx4_free_mtt_range(dev, mtt->offset, mtt->order); } EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) { return (u64) mtt->offset * dev->caps.mtt_entry_sz; } EXPORT_SYMBOL_GPL(mlx4_mtt_addr); static u32 hw_index_to_key(u32 ind) { return (ind >> 24) | (ind << 8); } static u32 key_to_hw_index(u32 key) { return (key << 24) | (key >> 8); } static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int mpt_index) { return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); } static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int mpt_index) { return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); } static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) { mr->iova = iova; mr->size = size; mr->pd = pd; mr->access = access; mr->enabled = MLX4_MPT_DISABLED; mr->key = hw_index_to_key(mridx); return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); } static int mlx4_WRITE_MTT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int num_entries) { return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } int __mlx4_mpt_reserve(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); } static int mlx4_mpt_reserve(struct mlx4_dev *dev) { u64 out_param; if (mlx4_is_mfunc(dev)) { if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) return -1; return get_param_l(&out_param); } return __mlx4_mpt_reserve(dev); } void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index) { struct mlx4_priv *priv = mlx4_priv(dev); mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); } static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) { u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, index); if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) mlx4_warn(dev, "Failed to release mr index:%d\n", index); return; } __mlx4_mpt_release(dev, index); } int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; return mlx4_table_get(dev, &mr_table->dmpt_table, index); } static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) { u64 param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&param, index); return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } return __mlx4_mpt_alloc_icm(dev, index); } void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; mlx4_table_put(dev, &mr_table->dmpt_table, index); } static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) { u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, index); if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) mlx4_warn(dev, "Failed to free icm of mr index:%d\n", index); return; } return __mlx4_mpt_free_icm(dev, index); } int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) { u32 index; int err; index = mlx4_mpt_reserve(dev); if (index == -1) return -ENOMEM; err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, access, npages, page_shift, mr); if (err) mlx4_mpt_release(dev, index); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_alloc); static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) { int err; if (mr->enabled == MLX4_MPT_EN_HW) { err = mlx4_HW2SW_MPT(dev, NULL, key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); if (err) { mlx4_warn(dev, "HW2SW_MPT failed (%d),", err); mlx4_warn(dev, "MR has MWs bound to it.\n"); return err; } mr->enabled = MLX4_MPT_EN_SW; } mlx4_mtt_cleanup(dev, &mr->mtt); return 0; } int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) { int ret; ret = mlx4_mr_free_reserved(dev, mr); if (ret) return ret; if (mr->enabled) mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); mlx4_mpt_release(dev, key_to_hw_index(mr->key)); return 0; } EXPORT_SYMBOL_GPL(mlx4_mr_free); int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_mpt_entry *mpt_entry; int err; err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key)); if (err) return err; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_table; } mpt_entry = mailbox->buf; memset(mpt_entry, 0, sizeof *mpt_entry); mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | MLX4_MPT_FLAG_REGION | mr->access); mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); mpt_entry->start = cpu_to_be64(mr->iova); mpt_entry->length = cpu_to_be64(mr->size); mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); if (mr->mtt.order < 0) { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); mpt_entry->mtt_addr = 0; } else { mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); } if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { /* fast register MR in free state */ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | MLX4_MPT_PD_FLAG_RAE); mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); } else { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); } err = mlx4_SW2HW_MPT(dev, mailbox, key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); if (err) { mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); goto err_cmd; } mr->enabled = MLX4_MPT_EN_HW; mlx4_free_cmd_mailbox(dev, mailbox); return 0; err_cmd: mlx4_free_cmd_mailbox(dev, mailbox); err_table: mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_enable); static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) { struct mlx4_priv *priv = mlx4_priv(dev); __be64 *mtts; dma_addr_t dma_handle; int i; mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + start_index, &dma_handle); if (!mtts) return -ENOMEM; dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); for (i = 0; i < npages; ++i) mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); dma_sync_single_for_device(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); return 0; } int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) { int err = 0; int chunk; int mtts_per_page; int max_mtts_first_page; /* compute how may mtts fit in the first page */ mtts_per_page = PAGE_SIZE / sizeof(u64); max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) % mtts_per_page; chunk = min_t(int, max_mtts_first_page, npages); while (npages > 0) { err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); if (err) return err; npages -= chunk; start_index += chunk; page_list += chunk; chunk = min_t(int, mtts_per_page, npages); } return err; } int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) { struct mlx4_cmd_mailbox *mailbox = NULL; __be64 *inbox = NULL; int chunk; int err = 0; int i; if (mtt->order < 0) return -EINVAL; if (mlx4_is_mfunc(dev)) { mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); inbox = mailbox->buf; while (npages > 0) { chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, npages); inbox[0] = cpu_to_be64(mtt->offset + start_index); inbox[1] = 0; for (i = 0; i < chunk; ++i) inbox[i + 2] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); err = mlx4_WRITE_MTT(dev, mailbox, chunk); if (err) { mlx4_free_cmd_mailbox(dev, mailbox); return err; } npages -= chunk; start_index += chunk; page_list += chunk; } mlx4_free_cmd_mailbox(dev, mailbox); return err; } return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); } EXPORT_SYMBOL_GPL(mlx4_write_mtt); int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_buf *buf) { u64 *page_list; int err; int i; page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL); if (!page_list) return -ENOMEM; for (i = 0; i < buf->npages; ++i) if (buf->nbufs == 1) page_list[i] = buf->direct.map + (i << buf->page_shift); else page_list[i] = buf->page_list[i].map; err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); kfree(page_list); return err; } EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, struct mlx4_mw *mw) { u32 index; if ((type == MLX4_MW_TYPE_1 && !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) || (type == MLX4_MW_TYPE_2 && !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN))) return -ENOTSUPP; index = mlx4_mpt_reserve(dev); if (index == -1) return -ENOMEM; mw->key = hw_index_to_key(index); mw->pd = pd; mw->type = type; mw->enabled = MLX4_MPT_DISABLED; return 0; } EXPORT_SYMBOL_GPL(mlx4_mw_alloc); int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_mpt_entry *mpt_entry; int err; err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key)); if (err) return err; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_table; } mpt_entry = mailbox->buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned * off, thus creating a memory window and not a memory region. */ mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key)); mpt_entry->pd_flags = cpu_to_be32(mw->pd); if (mw->type == MLX4_MW_TYPE_2) { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP); mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV); } err = mlx4_SW2HW_MPT(dev, mailbox, key_to_hw_index(mw->key) & (dev->caps.num_mpts - 1)); if (err) { mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); goto err_cmd; } mw->enabled = MLX4_MPT_EN_HW; mlx4_free_cmd_mailbox(dev, mailbox); return 0; err_cmd: mlx4_free_cmd_mailbox(dev, mailbox); err_table: mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); return err; } EXPORT_SYMBOL_GPL(mlx4_mw_enable); void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw) { int err; if (mw->enabled == MLX4_MPT_EN_HW) { err = mlx4_HW2SW_MPT(dev, NULL, key_to_hw_index(mw->key) & (dev->caps.num_mpts - 1)); if (err) mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); mw->enabled = MLX4_MPT_EN_SW; } if (mw->enabled) mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); mlx4_mpt_release(dev, key_to_hw_index(mw->key)); } EXPORT_SYMBOL_GPL(mlx4_mw_free); int mlx4_init_mr_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_mr_table *mr_table = &priv->mr_table; int err; if (!is_power_of_2(dev->caps.num_mpts)) return -EINVAL; /* Nothing to do for slaves - all MR handling is forwarded * to the master */ if (mlx4_is_slave(dev)) return 0; err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, ~0, dev->caps.reserved_mrws, 0); if (err) return err; err = mlx4_buddy_init(&mr_table->mtt_buddy, ilog2((u32)dev->caps.num_mtts / (1 << log_mtts_per_seg))); if (err) goto err_buddy; if (dev->caps.reserved_mtts) { priv->reserved_mtts = mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)); if (priv->reserved_mtts < 0) { mlx4_warn(dev, "MTT table of order %u is too small.\n", mr_table->mtt_buddy.max_order); err = -ENOMEM; goto err_reserve_mtts; } } return 0; err_reserve_mtts: mlx4_buddy_cleanup(&mr_table->mtt_buddy); err_buddy: mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); return err; } void mlx4_cleanup_mr_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_mr_table *mr_table = &priv->mr_table; if (mlx4_is_slave(dev)) return; if (priv->reserved_mtts >= 0) mlx4_free_mtt_range(dev, priv->reserved_mtts, fls(dev->caps.reserved_mtts - 1)); mlx4_buddy_cleanup(&mr_table->mtt_buddy); mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); } static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova) { int i, page_mask; if (npages > fmr->max_pages) return -EINVAL; page_mask = (1 << fmr->page_shift) - 1; /* We are getting page lists, so va must be page aligned. */ if (iova & page_mask) return -EINVAL; /* Trust the user not to pass misaligned data in page_list */ if (0) for (i = 0; i < npages; ++i) { if (page_list[i] & ~page_mask) return -EINVAL; } if (fmr->maps >= fmr->max_maps) return -EINVAL; return 0; } int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova, u32 *lkey, u32 *rkey) { u32 key; int i, err; err = mlx4_check_fmr(fmr, page_list, npages, iova); if (err) return err; ++fmr->maps; key = key_to_hw_index(fmr->mr.key); key += dev->caps.num_mpts; *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; /* Make sure MPT status is visible before writing MTT entries */ wmb(); dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, npages * sizeof(u64), DMA_TO_DEVICE); for (i = 0; i < npages; ++i) fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, npages * sizeof(u64), DMA_TO_DEVICE); fmr->mpt->key = cpu_to_be32(key); fmr->mpt->lkey = cpu_to_be32(key); fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); fmr->mpt->start = cpu_to_be64(iova); /* Make MTT entries are visible before setting MPT status */ wmb(); *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; /* Make sure MPT status is visible before consumer can use FMR */ wmb(); return 0; } EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, int max_maps, u8 page_shift, struct mlx4_fmr *fmr) { struct mlx4_priv *priv = mlx4_priv(dev); int err = -ENOMEM; if (max_maps > dev->caps.max_fmr_maps) return -EINVAL; if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) return -EINVAL; /* All MTTs must fit in the same page */ if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) return -EINVAL; fmr->page_shift = page_shift; fmr->max_pages = max_pages; fmr->max_maps = max_maps; fmr->maps = 0; err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, page_shift, &fmr->mr); if (err) return err; fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, fmr->mr.mtt.offset, &fmr->dma_handle); if (!fmr->mtts) { err = -ENOMEM; goto err_free; } return 0; err_free: (void) mlx4_mr_free(dev, &fmr->mr); return err; } EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) { struct mlx4_priv *priv = mlx4_priv(dev); int err; err = mlx4_mr_enable(dev, &fmr->mr); if (err) return err; fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, key_to_hw_index(fmr->mr.key), NULL); if (!fmr->mpt) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(mlx4_fmr_enable); void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey) { struct mlx4_cmd_mailbox *mailbox; int err; if (!fmr->maps) return; fmr->maps = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox" " failed (%d)\n", err); return; } err = mlx4_HW2SW_MPT(dev, NULL, key_to_hw_index(fmr->mr.key) & (dev->caps.num_mpts - 1)); mlx4_free_cmd_mailbox(dev, mailbox); if (err) { printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err); return; } fmr->mr.enabled = MLX4_MPT_EN_SW; } EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) { int ret; if (fmr->maps) return -EBUSY; ret = mlx4_mr_free(dev, &fmr->mr); if (ret) return ret; fmr->mr.enabled = MLX4_MPT_DISABLED; return 0; } EXPORT_SYMBOL_GPL(mlx4_fmr_free); int mlx4_SYNC_TPT(struct mlx4_dev *dev) { return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
gpl-2.0
rubiojr/surface3-kernel
fs/cifs/fscache.c
2277
6947
/* * fs/cifs/fscache.c - CIFS filesystem cache interface * * Copyright (c) 2010 Novell, Inc. * Author(s): Suresh Jayaraman <sjayaraman@suse.de> * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "fscache.h" #include "cifsglob.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) { server->fscache = fscache_acquire_cookie(cifs_fscache_netfs.primary_index, &cifs_fscache_server_index_def, server, true); cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, server, server->fscache); } void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) { cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, server, server->fscache); fscache_relinquish_cookie(server->fscache, 0); server->fscache = NULL; } void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) { struct TCP_Server_Info *server = tcon->ses->server; tcon->fscache = fscache_acquire_cookie(server->fscache, &cifs_fscache_super_index_def, tcon, true); cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, server->fscache, tcon->fscache); } void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) { cifs_dbg(FYI, "%s: (0x%p)\n", __func__, tcon->fscache); fscache_relinquish_cookie(tcon->fscache, 0); tcon->fscache = NULL; } static void cifs_fscache_enable_inode_cookie(struct inode *inode) { struct cifsInodeInfo *cifsi = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); if (cifsi->fscache) return; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) { cifsi->fscache = fscache_acquire_cookie(tcon->fscache, &cifs_fscache_inode_object_def, cifsi, true); cifs_dbg(FYI, "%s: got FH cookie (0x%p/0x%p)\n", __func__, tcon->fscache, cifsi->fscache); } } void cifs_fscache_release_inode_cookie(struct inode *inode) { struct cifsInodeInfo *cifsi = CIFS_I(inode); if (cifsi->fscache) { cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache); fscache_relinquish_cookie(cifsi->fscache, 0); cifsi->fscache = NULL; } } static void cifs_fscache_disable_inode_cookie(struct inode *inode) { struct cifsInodeInfo *cifsi = CIFS_I(inode); if (cifsi->fscache) { cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache); fscache_uncache_all_inode_pages(cifsi->fscache, inode); fscache_relinquish_cookie(cifsi->fscache, 1); cifsi->fscache = NULL; } } void cifs_fscache_set_inode_cookie(struct inode *inode, struct file *filp) { if ((filp->f_flags & O_ACCMODE) != O_RDONLY) cifs_fscache_disable_inode_cookie(inode); else cifs_fscache_enable_inode_cookie(inode); } void cifs_fscache_reset_inode_cookie(struct inode *inode) { struct cifsInodeInfo *cifsi = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct fscache_cookie *old = cifsi->fscache; if (cifsi->fscache) { /* retire the current fscache cache and get a new one */ fscache_relinquish_cookie(cifsi->fscache, 1); cifsi->fscache = fscache_acquire_cookie( cifs_sb_master_tcon(cifs_sb)->fscache, &cifs_fscache_inode_object_def, cifsi, true); cifs_dbg(FYI, "%s: new cookie 0x%p oldcookie 0x%p\n", __func__, cifsi->fscache, old); } } int cifs_fscache_release_page(struct page *page, gfp_t gfp) { if (PageFsCache(page)) { struct inode *inode = page->mapping->host; struct cifsInodeInfo *cifsi = CIFS_I(inode); cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, page, cifsi->fscache); if (!fscache_maybe_release_page(cifsi->fscache, page, gfp)) return 0; } return 1; } static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx, int error) { cifs_dbg(FYI, "%s: (0x%p/%d)\n", __func__, page, error); if (!error) SetPageUptodate(page); unlock_page(page); } /* * Retrieve a page from FS-Cache */ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page) { int ret; cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n", __func__, CIFS_I(inode)->fscache, page, inode); ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page, cifs_readpage_from_fscache_complete, NULL, GFP_KERNEL); switch (ret) { case 0: /* page found in fscache, read submitted */ cifs_dbg(FYI, "%s: submitted\n", __func__); return ret; case -ENOBUFS: /* page won't be cached */ case -ENODATA: /* page not in cache */ cifs_dbg(FYI, "%s: %d\n", __func__, ret); return 1; default: cifs_dbg(VFS, "unknown error ret = %d\n", ret); } return ret; } /* * Retrieve a set of pages from FS-Cache */ int __cifs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) { int ret; cifs_dbg(FYI, "%s: (0x%p/%u/0x%p)\n", __func__, CIFS_I(inode)->fscache, *nr_pages, inode); ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping, pages, nr_pages, cifs_readpage_from_fscache_complete, NULL, mapping_gfp_mask(mapping)); switch (ret) { case 0: /* read submitted to the cache for all pages */ cifs_dbg(FYI, "%s: submitted\n", __func__); return ret; case -ENOBUFS: /* some pages are not cached and can't be */ case -ENODATA: /* some pages are not cached */ cifs_dbg(FYI, "%s: no page\n", __func__); return 1; default: cifs_dbg(FYI, "unknown error ret = %d\n", ret); } return ret; } void __cifs_readpage_to_fscache(struct inode *inode, struct page *page) { int ret; cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n", __func__, CIFS_I(inode)->fscache, page, inode); ret = fscache_write_page(CIFS_I(inode)->fscache, page, GFP_KERNEL); if (ret != 0) fscache_uncache_page(CIFS_I(inode)->fscache, page); } void __cifs_fscache_readpages_cancel(struct inode *inode, struct list_head *pages) { cifs_dbg(FYI, "%s: (fsc: %p, i: %p)\n", __func__, CIFS_I(inode)->fscache, inode); fscache_readpages_cancel(CIFS_I(inode)->fscache, pages); } void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode) { struct cifsInodeInfo *cifsi = CIFS_I(inode); struct fscache_cookie *cookie = cifsi->fscache; cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, page, cookie); fscache_wait_on_page_write(cookie, page); fscache_uncache_page(cookie, page); }
gpl-2.0
Galland/rk3x_kernel_3.0.36
drivers/scsi/bfa/bfa_fcs.c
2533
33940
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * bfa_fcs.c BFA FCS main */ #include "bfad_drv.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" BFA_TRC_FILE(FCS, FCS); /* * FCS sub-modules */ struct bfa_fcs_mod_s { void (*attach) (struct bfa_fcs_s *fcs); void (*modinit) (struct bfa_fcs_s *fcs); void (*modexit) (struct bfa_fcs_s *fcs); }; #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } static struct bfa_fcs_mod_s fcs_modules[] = { { bfa_fcs_port_attach, NULL, NULL }, { bfa_fcs_uf_attach, NULL, NULL }, { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, bfa_fcs_fabric_modexit }, }; /* * fcs_api BFA FCS API */ static void bfa_fcs_exit_comp(void *fcs_cbarg) { struct bfa_fcs_s *fcs = fcs_cbarg; struct bfad_s *bfad = fcs->bfad; complete(&bfad->comp); } /* * fcs_api BFA FCS API */ /* * fcs attach -- called once to initialize data structures at driver attach time */ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, bfa_boolean_t min_cfg) { int i; struct bfa_fcs_mod_s *mod; fcs->bfa = bfa; fcs->bfad = bfad; fcs->min_cfg = min_cfg; bfa->fcs = BFA_TRUE; fcbuild_init(); for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; if (mod->attach) mod->attach(fcs); } } /* * fcs initialization, called once after bfa initialization is complete */ void bfa_fcs_init(struct bfa_fcs_s *fcs) { int i, npbc_vports; struct bfa_fcs_mod_s *mod; struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS]; for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; if (mod->modinit) mod->modinit(fcs); } /* Initialize pbc vports */ if (!fcs->min_cfg) { npbc_vports = bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); for (i = 0; i < npbc_vports; i++) bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]); } } /* * brief * FCS driver details initialization. * * param[in] fcs FCS instance * param[in] driver_info Driver Details * * return None */ void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, struct bfa_fcs_driver_info_s *driver_info) { fcs->driver_info = *driver_info; bfa_fcs_fabric_psymb_init(&fcs->fabric); } /* * brief * FCS instance cleanup and exit. * * param[in] fcs FCS instance * return None */ void bfa_fcs_exit(struct bfa_fcs_s *fcs) { struct bfa_fcs_mod_s *mod; int nmods, i; bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]); for (i = 0; i < nmods; i++) { mod = &fcs_modules[i]; if (mod->modexit) { bfa_wc_up(&fcs->wc); mod->modexit(fcs); } } bfa_wc_wait(&fcs->wc); } /* * Fabric module implementation. */ #define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ #define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ #define bfa_fcs_fabric_set_opertype(__fabric) do { \ if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ == BFA_PORT_TOPOLOGY_P2P) \ (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \ else \ (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \ } while (0) /* * forward declarations */ static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_delay(void *cbarg); static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_delete_comp(void *cbarg); static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len); static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len); static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t status, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs); static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); /* * Beginning state before fabric creation. */ static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CREATE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); bfa_fcs_fabric_init(fabric); bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg); break; case BFA_FCS_FABRIC_SM_LINK_UP: case BFA_FCS_FABRIC_SM_LINK_DOWN: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Beginning state before fabric creation. */ static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_START: if (bfa_fcport_is_linkup(fabric->fcs->bfa)) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); } else bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); break; case BFA_FCS_FABRIC_SM_LINK_UP: case BFA_FCS_FABRIC_SM_LINK_DOWN: break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_wc_down(&fabric->fcs->wc); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Link is down, awaiting LINK UP event from port. This is also the * first state at fabric creation. */ static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_UP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); break; case BFA_FCS_FABRIC_SM_RETRY_OP: break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * FLOGI is in progress, awaiting FLOGI reply. */ static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CONT_OP: bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; if (fabric->auth_reqd && fabric->is_auth) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth); bfa_trc(fabric->fcs, event); } else { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_notify_online(fabric); } break; case BFA_FCS_FABRIC_SM_RETRY_OP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry); bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer, bfa_fcs_fabric_delay, fabric, BFA_FCS_FABRIC_RETRY_DELAY); break; case BFA_FCS_FABRIC_SM_LOOPBACK: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_set_opertype(fabric); break; case BFA_FCS_FABRIC_SM_NO_FABRIC: fabric->fab_type = BFA_FCS_FABRIC_N2N; bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); bfa_fcs_fabric_notify_online(fabric); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_DELAYED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_timer_stop(&fabric->delay_timer); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_timer_stop(&fabric->delay_timer); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Authentication is in progress, awaiting authentication results. */ static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_AUTH_FAILED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_notify_online(fabric); break; case BFA_FCS_FABRIC_SM_PERF_EVFP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Authentication failed */ void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Port is in loopback mode. */ void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * There is no attached fabric - private loop or NPort-to-NPort topology. */ static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_NO_FABRIC: bfa_trc(fabric->fcs, fabric->bb_credit); bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Fabric is online - normal operating state. */ void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_AUTH_FAILED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Exchanging virtual fabric parameters. */ static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CONT_OP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done); break; case BFA_FCS_FABRIC_SM_ISOLATE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * EVFP exchange complete and VFT tagging is enabled. */ static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); } /* * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). */ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; char pwwn_ptr[BFA_STRING_32]; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Port is isolated due to VF_ID mismatch. " "PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.", pwwn_ptr, fabric->fcs->port_vfid, fabric->event_arg.swp_vfid); } /* * Fabric is being deleted, awaiting vport delete completions. */ static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_DELCOMP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_wc_down(&fabric->fcs->wc); break; case BFA_FCS_FABRIC_SM_LINK_UP: break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_fcs_fabric_notify_offline(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * fcs_fabric_private fabric private functions */ static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; port_cfg->roles = BFA_LPORT_ROLE_FCP_IM; port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn; port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn; } /* * Port Symbolic Name Creation for base port. */ void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); /* Model name/number */ strncpy((char *)&port_cfg->sym_name, model, BFA_FCS_PORT_SYMBNAME_MODEL_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Driver Version */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->version, BFA_FCS_PORT_SYMBNAME_VERSION_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Host machine name */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_machine_name, BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* * Host OS Info : * If OS Patch Info is not there, do not truncate any bytes from the * OS name string and instead copy the entire OS info string (64 bytes). */ if (driver_info->host_os_patch[0] == '\0') { strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_name, BFA_FCS_OS_STR_LEN); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); } else { strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_name, BFA_FCS_PORT_SYMBNAME_OSINFO_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Append host OS Patch Info */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_patch, BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ); } /* null terminate */ port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; } /* * bfa lps login completion callback */ void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) { struct bfa_fcs_fabric_s *fabric = uarg; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, status); switch (status) { case BFA_STATUS_OK: fabric->stats.flogi_accepts++; break; case BFA_STATUS_INVALID_MAC: /* Only for CNA */ fabric->stats.flogi_acc_err++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; case BFA_STATUS_EPROTOCOL: switch (fabric->lps->ext_status) { case BFA_EPROTO_BAD_ACCEPT: fabric->stats.flogi_acc_err++; break; case BFA_EPROTO_UNKNOWN_RSP: fabric->stats.flogi_unknown_rsp++; break; default: break; } bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; case BFA_STATUS_FABRIC_RJT: fabric->stats.flogi_rejects++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; default: fabric->stats.flogi_rsp_err++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; } fabric->bb_credit = fabric->lps->pr_bbcred; bfa_trc(fabric->fcs, fabric->bb_credit); if (!(fabric->lps->brcd_switch)) fabric->fabric_name = fabric->lps->pr_nwwn; /* * Check port type. It should be 1 = F-port. */ if (fabric->lps->fport) { fabric->bport.pid = fabric->lps->lp_pid; fabric->is_npiv = fabric->lps->npiv_en; fabric->is_auth = fabric->lps->auth_req; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP); } else { /* * Nport-2-Nport direct attached */ fabric->bport.port_topo.pn2n.rem_port_wwn = fabric->lps->pr_pwwn; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); } bfa_trc(fabric->fcs, fabric->bport.pid); bfa_trc(fabric->fcs, fabric->is_npiv); bfa_trc(fabric->fcs, fabric->is_auth); } /* * Allocate and send FLOGI. */ static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) { struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; u8 alpa = 0; if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) alpa = bfa_fcport_get_myalpa(bfa); bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); fabric->stats.flogi_sent++; } static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_trc(fabric->fcs, fabric->fabric_name); bfa_fcs_fabric_set_opertype(fabric); fabric->stats.fabric_onlines++; /* * notify online event to base and then virtual ports */ bfa_fcs_lport_online(&fabric->bport); list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_online(vport); } } static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_trc(fabric->fcs, fabric->fabric_name); fabric->stats.fabric_offlines++; /* * notify offline event first to vports and then base port. */ list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_offline(vport); } bfa_fcs_lport_offline(&fabric->bport); fabric->fabric_name = 0; fabric->fabric_ip_addr[0] = 0; } static void bfa_fcs_fabric_delay(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); } /* * Delete all vports and wait for vport delete completions. */ static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_fcs_delete(vport); } bfa_fcs_lport_delete(&fabric->bport); bfa_wc_wait(&fabric->wc); } static void bfa_fcs_fabric_delete_comp(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); } /* * fcs_fabric_public fabric public functions */ /* * Attach time initialization. */ void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; fabric = &fcs->fabric; memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); /* * Initialize base fabric. */ fabric->fcs = fcs; INIT_LIST_HEAD(&fabric->vport_q); INIT_LIST_HEAD(&fabric->vf_q); fabric->lps = bfa_lps_alloc(fcs->bfa); WARN_ON(!fabric->lps); /* * Initialize fabric delete completion handler. Fabric deletion is * complete when the last vport delete is complete. */ bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric); bfa_wc_up(&fabric->wc); /* For the base port */ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); } void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) { bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); bfa_trc(fcs, 0); } /* * Module cleanup */ void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, 0); /* * Cleanup base fabric. */ fabric = &fcs->fabric; bfa_lps_delete(fabric->lps); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); } /* * Fabric module start -- kick starts FCS actions */ void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, 0); fabric = &fcs->fabric; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); } /* * Link up notification from BFA physical port module. */ void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); } /* * Link down notification from BFA physical port module. */ void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); } /* * A child vport is being created in the fabric. * * Call from vport module at vport creation. A list of base port and vports * belonging to a fabric is maintained to propagate link events. * * param[in] fabric - Fabric instance. This can be a base fabric or vf. * param[in] vport - Vport being created. * * @return None (always succeeds) */ void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, struct bfa_fcs_vport_s *vport) { /* * - add vport to fabric's vport_q */ bfa_trc(fabric->fcs, fabric->vf_id); list_add_tail(&vport->qe, &fabric->vport_q); fabric->num_vports++; bfa_wc_up(&fabric->wc); } /* * A child vport is being deleted from fabric. * * Vport is being deleted. */ void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, struct bfa_fcs_vport_s *vport) { list_del(&vport->qe); fabric->num_vports--; bfa_wc_down(&fabric->wc); } /* * Lookup for a vport within a fabric given its pwwn */ struct bfa_fcs_vport_s * bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) { struct bfa_fcs_vport_s *vport; struct list_head *qe; list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn) return vport; } return NULL; } /* * Get OUI of the attached switch. * * Note : Use of this function should be avoided as much as possible. * This function should be used only if there is any requirement * to check for FOS version below 6.3. * To check if the attached fabric is a brocade fabric, use * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3 * or above only. */ u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric) { wwn_t fab_nwwn; u8 *tmp; u16 oui; fab_nwwn = fabric->lps->pr_nwwn; tmp = (u8 *)&fab_nwwn; oui = (tmp[3] << 8) | tmp[4]; return oui; } /* * Unsolicited frame receive handling. */ void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { u32 pid = fchs->d_id; struct bfa_fcs_vport_s *vport; struct list_head *qe; struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd; bfa_trc(fabric->fcs, len); bfa_trc(fabric->fcs, pid); /* * Look for our own FLOGI frames being looped back. This means an * external loopback cable is in place. Our own FLOGI frames are * sometimes looped back when switch port gets temporarily bypassed. */ if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) && (els_cmd->els_code == FC_ELS_FLOGI) && (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) { bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK); return; } /* * FLOGI/EVFP exchanges should be consumed by base fabric. */ if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) { bfa_trc(fabric->fcs, pid); bfa_fcs_fabric_process_uf(fabric, fchs, len); return; } if (fabric->bport.pid == pid) { /* * All authentication frames should be routed to auth */ bfa_trc(fabric->fcs, els_cmd->els_code); if (els_cmd->els_code == FC_ELS_AUTH) { bfa_trc(fabric->fcs, els_cmd->els_code); return; } bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs)); bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); return; } /* * look for a matching local port ID */ list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (vport->lport.pid == pid) { bfa_fcs_lport_uf_recv(&vport->lport, fchs, len); return; } } bfa_trc(fabric->fcs, els_cmd->els_code); bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); } /* * Unsolicited frames to be processed by fabric. */ static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); bfa_trc(fabric->fcs, els_cmd->els_code); switch (els_cmd->els_code) { case FC_ELS_FLOGI: bfa_fcs_fabric_process_flogi(fabric, fchs, len); break; default: /* * need to generate a LS_RJT */ break; } } /* * Process incoming FLOGI */ static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1); struct bfa_fcs_lport_s *bport = &fabric->bport; bfa_trc(fabric->fcs, fchs->s_id); fabric->stats.flogi_rcvd++; /* * Check port type. It should be 0 = n-port. */ if (flogi->csp.port_type) { /* * @todo: may need to send a LS_RJT */ bfa_trc(fabric->fcs, flogi->port_name); fabric->stats.flogi_rejected++; return; } fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred); bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; bport->port_topo.pn2n.reply_oxid = fchs->ox_id; /* * Send a Flogi Acc */ bfa_fcs_fabric_send_flogi_acc(fabric); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); } static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n; struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_fcxp_s *fcxp; u16 reqlen; struct fchs_s fchs; fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); /* * Do not expect this failure -- expect remote node to retry */ if (!fcxp) return; reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_hton3b(FC_FABRIC_PORT), n2n_port->reply_oxid, pcfg->pwwn, pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa), bfa_fcport_get_rx_bbcredit(bfa)); bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag, BFA_FALSE, FC_CLASS_3, reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, FC_MAX_PDUSZ, 0); } /* * Flogi Acc completion callback. */ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t status, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_trc(fabric->fcs, status); } /* * * @param[in] fabric - fabric * @param[in] wwn_t - new fabric name * * @return - none */ void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, wwn_t fabric_name) { struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; char pwwn_ptr[BFA_STRING_32]; char fwwn_ptr[BFA_STRING_32]; bfa_trc(fabric->fcs, fabric_name); if (fabric->fabric_name == 0) { /* * With BRCD switches, we don't get Fabric Name in FLOGI. * Don't generate a fabric name change event in this case. */ fabric->fabric_name = fabric_name; } else { fabric->fabric_name = fabric_name; wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport)); wwn2str(fwwn_ptr, bfa_fcs_lport_get_fabric_name(&fabric->bport)); BFA_LOG(KERN_WARNING, bfad, bfa_log_level, "Base port WWN = %s Fabric WWN = %s\n", pwwn_ptr, fwwn_ptr); } } /* * Returns FCS vf structure for a given vf_id. * * param[in] vf_id - VF_ID * * return * If lookup succeeds, retuns fcs vf object, otherwise returns NULL */ bfa_fcs_vf_t * bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) { bfa_trc(fcs, vf_id); if (vf_id == FC_VF_ID_NULL) return &fcs->fabric; return NULL; } /* * BFA FCS PPORT ( physical port) */ static void bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) { struct bfa_fcs_s *fcs = cbarg; bfa_trc(fcs, event); switch (event) { case BFA_PORT_LINKUP: bfa_fcs_fabric_link_up(&fcs->fabric); break; case BFA_PORT_LINKDOWN: bfa_fcs_fabric_link_down(&fcs->fabric); break; default: WARN_ON(1); } } void bfa_fcs_port_attach(struct bfa_fcs_s *fcs) { bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); } /* * BFA FCS UF ( Unsolicited Frames) */ /* * BFA callback for unsolicited frame receive handler. * * @param[in] cbarg callback arg for receive handler * @param[in] uf unsolicited frame descriptor * * @return None */ static void bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) { struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg; struct fchs_s *fchs = bfa_uf_get_frmbuf(uf); u16 len = bfa_uf_get_frmlen(uf); struct fc_vft_s *vft; struct bfa_fcs_fabric_s *fabric; /* * check for VFT header */ if (fchs->routing == FC_RTG_EXT_HDR && fchs->cat_info == FC_CAT_VFT_HDR) { bfa_stats(fcs, uf.tagged); vft = bfa_uf_get_frmbuf(uf); if (fcs->port_vfid == vft->vf_id) fabric = &fcs->fabric; else fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); /* * drop frame if vfid is unknown */ if (!fabric) { WARN_ON(1); bfa_stats(fcs, uf.vfid_unknown); bfa_uf_free(uf); return; } /* * skip vft header */ fchs = (struct fchs_s *) (vft + 1); len -= sizeof(struct fc_vft_s); bfa_trc(fcs, vft->vf_id); } else { bfa_stats(fcs, uf.untagged); fabric = &fcs->fabric; } bfa_trc(fcs, ((u32 *) fchs)[0]); bfa_trc(fcs, ((u32 *) fchs)[1]); bfa_trc(fcs, ((u32 *) fchs)[2]); bfa_trc(fcs, ((u32 *) fchs)[3]); bfa_trc(fcs, ((u32 *) fchs)[4]); bfa_trc(fcs, ((u32 *) fchs)[5]); bfa_trc(fcs, len); bfa_fcs_fabric_uf_recv(fabric, fchs, len); bfa_uf_free(uf); } void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs) { bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); }
gpl-2.0
thicklizard/Komodo1
drivers/scsi/bfa/bfa_core.c
2533
30970
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include "bfad_drv.h" #include "bfa_modules.h" #include "bfi_ctreg.h" BFA_TRC_FILE(HAL, CORE); /* * BFA module list terminated by NULL */ static struct bfa_module_s *hal_mods[] = { &hal_mod_sgpg, &hal_mod_fcport, &hal_mod_fcxp, &hal_mod_lps, &hal_mod_uf, &hal_mod_rport, &hal_mod_fcpim, NULL }; /* * Message handlers for various modules. */ static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { bfa_isr_unhandled, /* NONE */ bfa_isr_unhandled, /* BFI_MC_IOC */ bfa_isr_unhandled, /* BFI_MC_DIAG */ bfa_isr_unhandled, /* BFI_MC_FLASH */ bfa_isr_unhandled, /* BFI_MC_CEE */ bfa_fcport_isr, /* BFI_MC_FCPORT */ bfa_isr_unhandled, /* BFI_MC_IOCFC */ bfa_isr_unhandled, /* BFI_MC_LL */ bfa_uf_isr, /* BFI_MC_UF */ bfa_fcxp_isr, /* BFI_MC_FCXP */ bfa_lps_isr, /* BFI_MC_LPS */ bfa_rport_isr, /* BFI_MC_RPORT */ bfa_itnim_isr, /* BFI_MC_ITNIM */ bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ bfa_ioim_isr, /* BFI_MC_IOIM */ bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ bfa_tskim_isr, /* BFI_MC_TSKIM */ bfa_isr_unhandled, /* BFI_MC_SBOOT */ bfa_isr_unhandled, /* BFI_MC_IPFC */ bfa_isr_unhandled, /* BFI_MC_PORT */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ }; /* * Message handlers for mailbox command classes */ static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { NULL, NULL, /* BFI_MC_IOC */ NULL, /* BFI_MC_DIAG */ NULL, /* BFI_MC_FLASH */ NULL, /* BFI_MC_CEE */ NULL, /* BFI_MC_PORT */ bfa_iocfc_isr, /* BFI_MC_IOCFC */ NULL, }; static void bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) { struct bfa_port_s *port = &bfa->modules.port; u32 dm_len; u8 *dm_kva; u64 dm_pa; dm_len = bfa_port_meminfo(); dm_kva = bfa_meminfo_dma_virt(mi); dm_pa = bfa_meminfo_dma_phys(mi); memset(port, 0, sizeof(struct bfa_port_s)); bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); bfa_port_mem_claim(port, dm_kva, dm_pa); bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; } /* * BFA IOC FC related definitions */ /* * IOC local definitions */ #define BFA_IOCFC_TOV 5000 /* msecs */ enum { BFA_IOCFC_ACT_NONE = 0, BFA_IOCFC_ACT_INIT = 1, BFA_IOCFC_ACT_STOP = 2, BFA_IOCFC_ACT_DISABLE = 3, }; #define DEF_CFG_NUM_FABRICS 1 #define DEF_CFG_NUM_LPORTS 256 #define DEF_CFG_NUM_CQS 4 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) #define DEF_CFG_NUM_TSKIM_REQS 128 #define DEF_CFG_NUM_FCXP_REQS 64 #define DEF_CFG_NUM_UF_BUFS 64 #define DEF_CFG_NUM_RPORTS 1024 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) #define DEF_CFG_NUM_TINS 256 #define DEF_CFG_NUM_SGPGS 2048 #define DEF_CFG_NUM_REQQ_ELEMS 256 #define DEF_CFG_NUM_RSPQ_ELEMS 64 #define DEF_CFG_NUM_SBOOT_TGTS 16 #define DEF_CFG_NUM_SBOOT_LUNS 16 /* * forward declaration for IOC FC functions */ static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); static void bfa_iocfc_disable_cbfn(void *bfa_arg); static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); static void bfa_iocfc_reset_cbfn(void *bfa_arg); static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; /* * BFA Interrupt handling functions */ static void bfa_reqq_resume(struct bfa_s *bfa, int qid) { struct list_head *waitq, *qe, *qen; struct bfa_reqq_wait_s *wqe; waitq = bfa_reqq(bfa, qid); list_for_each_safe(qe, qen, waitq) { /* * Callback only as long as there is room in request queue */ if (bfa_reqq_full(bfa, qid)) break; list_del(qe); wqe = (struct bfa_reqq_wait_s *) qe; wqe->qresume(wqe->cbarg); } } void bfa_msix_all(struct bfa_s *bfa, int vec) { bfa_intx(bfa); } bfa_boolean_t bfa_intx(struct bfa_s *bfa) { u32 intr, qintr; int queue; intr = readl(bfa->iocfc.bfa_regs.intr_status); if (!intr) return BFA_FALSE; /* * RME completion queue interrupt */ qintr = intr & __HFN_INT_RME_MASK; writel(qintr, bfa->iocfc.bfa_regs.intr_status); for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { if (intr & (__HFN_INT_RME_Q0 << queue)) bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); } intr &= ~qintr; if (!intr) return BFA_TRUE; /* * CPE completion queue interrupt */ qintr = intr & __HFN_INT_CPE_MASK; writel(qintr, bfa->iocfc.bfa_regs.intr_status); for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { if (intr & (__HFN_INT_CPE_Q0 << queue)) bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); } intr &= ~qintr; if (!intr) return BFA_TRUE; bfa_msix_lpu_err(bfa, intr); return BFA_TRUE; } void bfa_isr_enable(struct bfa_s *bfa) { u32 intr_unmask; int pci_func = bfa_ioc_pcifn(&bfa->ioc); bfa_trc(bfa, pci_func); bfa_msix_install(bfa); intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); if (pci_func == 0) intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0); else intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1); writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status); writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask); bfa->iocfc.intr_mask = ~intr_unmask; bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); } void bfa_isr_disable(struct bfa_s *bfa) { bfa_isr_mode_set(bfa, BFA_FALSE); writel(-1L, bfa->iocfc.bfa_regs.intr_mask); bfa_msix_uninstall(bfa); } void bfa_msix_reqq(struct bfa_s *bfa, int qid) { struct list_head *waitq; qid &= (BFI_IOC_MAX_CQS - 1); bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); /* * Resume any pending requests in the corresponding reqq. */ waitq = bfa_reqq(bfa, qid); if (!list_empty(waitq)) bfa_reqq_resume(bfa, qid); } void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) { bfa_trc(bfa, m->mhdr.msg_class); bfa_trc(bfa, m->mhdr.msg_id); bfa_trc(bfa, m->mhdr.mtag.i2htok); WARN_ON(1); bfa_trc_stop(bfa->trcmod); } void bfa_msix_rspq(struct bfa_s *bfa, int qid) { struct bfi_msg_s *m; u32 pi, ci; struct list_head *waitq; qid &= (BFI_IOC_MAX_CQS - 1); bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); ci = bfa_rspq_ci(bfa, qid); pi = bfa_rspq_pi(bfa, qid); if (bfa->rme_process) { while (ci != pi) { m = bfa_rspq_elem(bfa, qid, ci); bfa_isrs[m->mhdr.msg_class] (bfa, m); CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); } } /* * update CI */ bfa_rspq_ci(bfa, qid) = pi; writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]); mmiowb(); /* * Resume any pending requests in the corresponding reqq. */ waitq = bfa_reqq(bfa, qid); if (!list_empty(waitq)) bfa_reqq_resume(bfa, qid); } void bfa_msix_lpu_err(struct bfa_s *bfa, int vec) { u32 intr, curr_value; intr = readl(bfa->iocfc.bfa_regs.intr_status); if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) bfa_ioc_mbox_isr(&bfa->ioc); intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); if (intr) { if (intr & __HFN_INT_LL_HALT) { /* * If LL_HALT bit is set then FW Init Halt LL Port * Register needs to be cleared as well so Interrupt * Status Register will be cleared. */ curr_value = readl(bfa->ioc.ioc_regs.ll_halt); curr_value &= ~__FW_INIT_HALT_P; writel(curr_value, bfa->ioc.ioc_regs.ll_halt); } if (intr & __HFN_INT_ERR_PSS) { /* * ERR_PSS bit needs to be cleared as well in case * interrups are shared so driver's interrupt handler is * still called even though it is already masked out. */ curr_value = readl( bfa->ioc.ioc_regs.pss_err_status_reg); curr_value &= __PSS_ERR_STATUS_SET; writel(curr_value, bfa->ioc.ioc_regs.pss_err_status_reg); } writel(intr, bfa->iocfc.bfa_regs.intr_status); bfa_ioc_error_isr(&bfa->ioc); } } /* * BFA IOC FC related functions */ /* * BFA IOC private functions */ static void bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) { int i, per_reqq_sz, per_rspq_sz; per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); /* * Calculate CQ size */ for (i = 0; i < cfg->fwcfg.num_cqs; i++) { *dm_len = *dm_len + per_reqq_sz; *dm_len = *dm_len + per_rspq_sz; } /* * Calculate Shadow CI/PI size */ for (i = 0; i < cfg->fwcfg.num_cqs; i++) *dm_len += (2 * BFA_CACHELINE_SZ); } static void bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) { *dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); *dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); } /* * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ */ static void bfa_iocfc_send_cfg(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfg_req_s cfg_req; struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; int i; WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); bfa_trc(bfa, cfg->fwcfg.num_cqs); bfa_iocfc_reset_queues(bfa); /* * initialize IOC configuration info */ cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; cfg_info->num_cqs = cfg->fwcfg.num_cqs; bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); /* * dma map REQ and RSP circular queues and shadow pointers */ for (i = 0; i < cfg->fwcfg.num_cqs; i++) { bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], iocfc->req_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], iocfc->req_cq_shadow_ci[i].pa); cfg_info->req_cq_elems[i] = cpu_to_be16(cfg->drvcfg.num_reqq_elems); bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], iocfc->rsp_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], iocfc->rsp_cq_shadow_pi[i].pa); cfg_info->rsp_cq_elems[i] = cpu_to_be16(cfg->drvcfg.num_rspq_elems); } /* * Enable interrupt coalescing if it is driver init path * and not ioc disable/enable path. */ if (!iocfc->cfgdone) cfg_info->intr_attr.coalesce = BFA_TRUE; iocfc->cfgdone = BFA_FALSE; /* * dma map IOC configuration itself */ bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, bfa_lpuid(bfa)); bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, sizeof(struct bfi_iocfc_cfg_req_s)); } static void bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; bfa->bfad = bfad; iocfc->bfa = bfa; iocfc->action = BFA_IOCFC_ACT_NONE; iocfc->cfg = *cfg; /* * Initialize chip specific handlers. */ if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { iocfc->hwif.hw_reginit = bfa_hwct_reginit; iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; } else { iocfc->hwif.hw_reginit = bfa_hwcb_reginit; iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; } iocfc->hwif.hw_reginit(bfa); bfa->msix.nvecs = 0; } static void bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) { u8 *dm_kva; u64 dm_pa; int i, per_reqq_sz, per_rspq_sz; struct bfa_iocfc_s *iocfc = &bfa->iocfc; int dbgsz; dm_kva = bfa_meminfo_dma_virt(meminfo); dm_pa = bfa_meminfo_dma_phys(meminfo); /* * First allocate dma memory for IOC. */ bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa); dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); /* * Claim DMA-able memory for the request/response queues and for shadow * ci/pi registers */ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); for (i = 0; i < cfg->fwcfg.num_cqs; i++) { iocfc->req_cq_ba[i].kva = dm_kva; iocfc->req_cq_ba[i].pa = dm_pa; memset(dm_kva, 0, per_reqq_sz); dm_kva += per_reqq_sz; dm_pa += per_reqq_sz; iocfc->rsp_cq_ba[i].kva = dm_kva; iocfc->rsp_cq_ba[i].pa = dm_pa; memset(dm_kva, 0, per_rspq_sz); dm_kva += per_rspq_sz; dm_pa += per_rspq_sz; } for (i = 0; i < cfg->fwcfg.num_cqs; i++) { iocfc->req_cq_shadow_ci[i].kva = dm_kva; iocfc->req_cq_shadow_ci[i].pa = dm_pa; dm_kva += BFA_CACHELINE_SZ; dm_pa += BFA_CACHELINE_SZ; iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; dm_kva += BFA_CACHELINE_SZ; dm_pa += BFA_CACHELINE_SZ; } /* * Claim DMA-able memory for the config info page */ bfa->iocfc.cfg_info.kva = dm_kva; bfa->iocfc.cfg_info.pa = dm_pa; bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); /* * Claim DMA-able memory for the config response */ bfa->iocfc.cfgrsp_dma.kva = dm_kva; bfa->iocfc.cfgrsp_dma.pa = dm_pa; bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); bfa_meminfo_dma_virt(meminfo) = dm_kva; bfa_meminfo_dma_phys(meminfo) = dm_pa; dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; if (dbgsz > 0) { bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); bfa_meminfo_kva(meminfo) += dbgsz; } } /* * Start BFA submodules. */ static void bfa_iocfc_start_submod(struct bfa_s *bfa) { int i; bfa->rme_process = BFA_TRUE; for (i = 0; hal_mods[i]; i++) hal_mods[i]->start(bfa); } /* * Disable BFA submodules. */ static void bfa_iocfc_disable_submod(struct bfa_s *bfa) { int i; for (i = 0; hal_mods[i]; i++) hal_mods[i]->iocdisable(bfa); } static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) { struct bfa_s *bfa = bfa_arg; if (complete) { if (bfa->iocfc.cfgdone) bfa_cb_init(bfa->bfad, BFA_STATUS_OK); else bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); } else { if (bfa->iocfc.cfgdone) bfa->iocfc.action = BFA_IOCFC_ACT_NONE; } } static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) { struct bfa_s *bfa = bfa_arg; struct bfad_s *bfad = bfa->bfad; if (compl) complete(&bfad->comp); else bfa->iocfc.action = BFA_IOCFC_ACT_NONE; } static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) { struct bfa_s *bfa = bfa_arg; struct bfad_s *bfad = bfa->bfad; if (compl) complete(&bfad->disable_comp); } /* * Update BFA configuration from firmware configuration. */ static void bfa_iocfc_cfgrsp(struct bfa_s *bfa) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; fwcfg->num_cqs = fwcfg->num_cqs; fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); iocfc->cfgdone = BFA_TRUE; /* * Configuration is complete - initialize/start submodules */ bfa_fcport_init(bfa); if (iocfc->action == BFA_IOCFC_ACT_INIT) bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); else bfa_iocfc_start_submod(bfa); } void bfa_iocfc_reset_queues(struct bfa_s *bfa) { int q; for (q = 0; q < BFI_IOC_MAX_CQS; q++) { bfa_reqq_ci(bfa, q) = 0; bfa_reqq_pi(bfa, q) = 0; bfa_rspq_ci(bfa, q) = 0; bfa_rspq_pi(bfa, q) = 0; } } /* * IOC enable request is complete */ static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) { struct bfa_s *bfa = bfa_arg; if (status != BFA_STATUS_OK) { bfa_isr_disable(bfa); if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, bfa); return; } bfa_iocfc_send_cfg(bfa); } /* * IOC disable request is complete */ static void bfa_iocfc_disable_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa_isr_disable(bfa); bfa_iocfc_disable_submod(bfa); if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, bfa); else { WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE); bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, bfa); } } /* * Notify sub-modules of hardware failure. */ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa->rme_process = BFA_FALSE; bfa_isr_disable(bfa); bfa_iocfc_disable_submod(bfa); if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, bfa); } /* * Actions on chip-reset completion. */ static void bfa_iocfc_reset_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa_iocfc_reset_queues(bfa); bfa_isr_enable(bfa); } /* * Query IOC memory requirement information. */ void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, u32 *dm_len) { /* dma memory for IOC */ *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); bfa_iocfc_fw_cfg_sz(cfg, dm_len); bfa_iocfc_cqs_sz(cfg, dm_len); *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; } /* * Query IOC memory requirement information. */ void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) { int i; struct bfa_ioc_s *ioc = &bfa->ioc; bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; ioc->trcmod = bfa->trcmod; bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); /* * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. */ if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) bfa_ioc_set_fcmode(&bfa->ioc); bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); bfa_iocfc_mem_claim(bfa, cfg, meminfo); INIT_LIST_HEAD(&bfa->timer_mod.timer_q); INIT_LIST_HEAD(&bfa->comp_q); for (i = 0; i < BFI_IOC_MAX_CQS; i++) INIT_LIST_HEAD(&bfa->reqq_waitq[i]); } /* * Query IOC memory requirement information. */ void bfa_iocfc_init(struct bfa_s *bfa) { bfa->iocfc.action = BFA_IOCFC_ACT_INIT; bfa_ioc_enable(&bfa->ioc); } /* * IOC start called from bfa_start(). Called to start IOC operations * at driver instantiation for this instance. */ void bfa_iocfc_start(struct bfa_s *bfa) { if (bfa->iocfc.cfgdone) bfa_iocfc_start_submod(bfa); } /* * IOC stop called from bfa_stop(). Called only when driver is unloaded * for this instance. */ void bfa_iocfc_stop(struct bfa_s *bfa) { bfa->iocfc.action = BFA_IOCFC_ACT_STOP; bfa->rme_process = BFA_FALSE; bfa_ioc_disable(&bfa->ioc); } void bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) { struct bfa_s *bfa = bfaarg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; union bfi_iocfc_i2h_msg_u *msg; msg = (union bfi_iocfc_i2h_msg_u *) m; bfa_trc(bfa, msg->mh.msg_id); switch (msg->mh.msg_id) { case BFI_IOCFC_I2H_CFG_REPLY: iocfc->cfg_reply = &msg->cfg_reply; bfa_iocfc_cfgrsp(bfa); break; case BFI_IOCFC_I2H_UPDATEQ_RSP: iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); break; default: WARN_ON(1); } } void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? be16_to_cpu(iocfc->cfginfo->intr_attr.delay) : be16_to_cpu(iocfc->cfgrsp->intr_attr.delay); attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? be16_to_cpu(iocfc->cfginfo->intr_attr.latency) : be16_to_cpu(iocfc->cfgrsp->intr_attr.latency); attr->config = iocfc->cfg; } bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_set_intr_req_s *m; iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay); iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency); if (!bfa_iocfc_is_operational(bfa)) return BFA_STATUS_OK; m = bfa_reqq_next(bfa, BFA_REQQ_IOC); if (!m) return BFA_STATUS_DEVBUSY; bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, bfa_lpuid(bfa)); m->coalesce = iocfc->cfginfo->intr_attr.coalesce; m->delay = iocfc->cfginfo->intr_attr.delay; m->latency = iocfc->cfginfo->intr_attr.latency; bfa_trc(bfa, attr->delay); bfa_trc(bfa, attr->latency); bfa_reqq_produce(bfa, BFA_REQQ_IOC); return BFA_STATUS_OK; } void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); } /* * Enable IOC after it is disabled. */ void bfa_iocfc_enable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Enable"); bfa_ioc_enable(&bfa->ioc); } void bfa_iocfc_disable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Disable"); bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; bfa->rme_process = BFA_FALSE; bfa_ioc_disable(&bfa->ioc); } bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa) { return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; } /* * Return boot target port wwns -- read from boot information in flash. */ void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; int i; if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); *nwwns = cfgrsp->pbc_cfg.nbluns; for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; return; } *nwwns = cfgrsp->bootwwns.nwwns; memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); } int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); return cfgrsp->pbc_cfg.nvports; } /* * Use this function query the memory requirement of the BFA library. * This function needs to be called before bfa_attach() to get the * memory required of the BFA layer for a given driver configuration. * * This call will fail, if the cap is out of range compared to pre-defined * values within the BFA library * * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate * its configuration in this structure. * The default values for struct bfa_iocfc_cfg_s can be * fetched using bfa_cfg_get_default() API. * * If cap's boundary check fails, the library will use * the default bfa_cap_t values (and log a warning msg). * * @param[out] meminfo - pointer to bfa_meminfo_t. This content * indicates the memory type (see bfa_mem_type_t) and * amount of memory required. * * Driver should allocate the memory, populate the * starting address for each block and provide the same * structure as input parameter to bfa_attach() call. * * @return void * * Special Considerations: @note */ void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) { int i; u32 km_len = 0, dm_len = 0; WARN_ON((cfg == NULL) || (meminfo == NULL)); memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type = BFA_MEM_TYPE_KVA; meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type = BFA_MEM_TYPE_DMA; bfa_iocfc_meminfo(cfg, &km_len, &dm_len); for (i = 0; hal_mods[i]; i++) hal_mods[i]->meminfo(cfg, &km_len, &dm_len); dm_len += bfa_port_meminfo(); meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; } /* * Use this function to do attach the driver instance with the BFA * library. This function will not trigger any HW initialization * process (which will be done in bfa_init() call) * * This call will fail, if the cap is out of range compared to * pre-defined values within the BFA library * * @param[out] bfa Pointer to bfa_t. * @param[in] bfad Opaque handle back to the driver's IOC structure * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure * that was used in bfa_cfg_get_meminfo(). * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should * use the bfa_cfg_get_meminfo() call to * find the memory blocks required, allocate the * required memory and provide the starting addresses. * @param[in] pcidev pointer to struct bfa_pcidev_s * * @return * void * * Special Considerations: * * @note * */ void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) { int i; struct bfa_mem_elem_s *melem; bfa->fcs = BFA_FALSE; WARN_ON((cfg == NULL) || (meminfo == NULL)); /* * initialize all memory pointers for iterative allocation */ for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { melem = meminfo->meminfo + i; melem->kva_curp = melem->kva; melem->dma_curp = melem->dma; } bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev); for (i = 0; hal_mods[i]; i++) hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); bfa_com_port_attach(bfa, meminfo); } /* * Use this function to delete a BFA IOC. IOC should be stopped (by * calling bfa_stop()) before this function call. * * @param[in] bfa - pointer to bfa_t. * * @return * void * * Special Considerations: * * @note */ void bfa_detach(struct bfa_s *bfa) { int i; for (i = 0; hal_mods[i]; i++) hal_mods[i]->detach(bfa); bfa_ioc_detach(&bfa->ioc); } void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) { INIT_LIST_HEAD(comp_q); list_splice_tail_init(&bfa->comp_q, comp_q); } void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) { struct list_head *qe; struct list_head *qen; struct bfa_cb_qe_s *hcb_qe; list_for_each_safe(qe, qen, comp_q) { hcb_qe = (struct bfa_cb_qe_s *) qe; hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); } } void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) { struct list_head *qe; struct bfa_cb_qe_s *hcb_qe; while (!list_empty(comp_q)) { bfa_q_deq(comp_q, &qe); hcb_qe = (struct bfa_cb_qe_s *) qe; hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); } } /* * Return the list of PCI vendor/device id lists supported by this * BFA instance. */ void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) { static struct bfa_pciid_s __pciids[] = { {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, }; *npciids = sizeof(__pciids) / sizeof(__pciids[0]); *pciids = __pciids; } /* * Use this function query the default struct bfa_iocfc_cfg_s value (compiled * into BFA layer). The OS driver can then turn back and overwrite entries that * have been configured by the user. * * @param[in] cfg - pointer to bfa_ioc_cfg_t * * @return * void * * Special Considerations: * note */ void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) { cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS; cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS; cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS; cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS; cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS; cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS; cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS; cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS; cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF; cfg->drvcfg.ioc_recover = BFA_FALSE; cfg->drvcfg.delay_comp = BFA_FALSE; } void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) { bfa_cfg_get_default(cfg); cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; cfg->fwcfg.num_rports = BFA_RPORT_MIN; cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; cfg->drvcfg.min_cfg = BFA_TRUE; }
gpl-2.0
jdkoreclipse/android_kernel_htc_msm8960
arch/arm/mach-pxa/icontrol.c
2533
4842
/* * linux/arch/arm/mach-pxa/icontrol.c * * Support for the iControl and SafeTcam platforms from TMT Services * using the Embedian MXM-8x10 Computer on Module * * Copyright (C) 2009 TMT Services & Supplies (Pty) Ltd. * * 2010-01-21 Hennie van der Merve <hvdmerwe@tmtservies.co.za> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pxa320.h> #include <mach/mxm8x10.h> #include <linux/spi/spi.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/can/platform/mcp251x.h> #include "generic.h" #define ICONTROL_MCP251x_nCS1 (15) #define ICONTROL_MCP251x_nCS2 (16) #define ICONTROL_MCP251x_nCS3 (17) #define ICONTROL_MCP251x_nCS4 (24) #define ICONTROL_MCP251x_nIRQ1 (74) #define ICONTROL_MCP251x_nIRQ2 (75) #define ICONTROL_MCP251x_nIRQ3 (76) #define ICONTROL_MCP251x_nIRQ4 (77) static struct pxa2xx_spi_chip mcp251x_chip_info1 = { .tx_threshold = 8, .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, .gpio_cs = ICONTROL_MCP251x_nCS1 }; static struct pxa2xx_spi_chip mcp251x_chip_info2 = { .tx_threshold = 8, .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, .gpio_cs = ICONTROL_MCP251x_nCS2 }; static struct pxa2xx_spi_chip mcp251x_chip_info3 = { .tx_threshold = 8, .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, .gpio_cs = ICONTROL_MCP251x_nCS3 }; static struct pxa2xx_spi_chip mcp251x_chip_info4 = { .tx_threshold = 8, .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, .gpio_cs = ICONTROL_MCP251x_nCS4 }; static struct mcp251x_platform_data mcp251x_info = { .oscillator_frequency = 16E6, .board_specific_setup = NULL, .power_enable = NULL, .transceiver_enable = NULL }; static struct spi_board_info mcp251x_board_info[] = { { .modalias = "mcp2515", .max_speed_hz = 6500000, .bus_num = 3, .chip_select = 0, .platform_data = &mcp251x_info, .controller_data = &mcp251x_chip_info1, .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ1) }, { .modalias = "mcp2515", .max_speed_hz = 6500000, .bus_num = 3, .chip_select = 1, .platform_data = &mcp251x_info, .controller_data = &mcp251x_chip_info2, .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ2) }, { .modalias = "mcp2515", .max_speed_hz = 6500000, .bus_num = 4, .chip_select = 0, .platform_data = &mcp251x_info, .controller_data = &mcp251x_chip_info3, .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ3) }, { .modalias = "mcp2515", .max_speed_hz = 6500000, .bus_num = 4, .chip_select = 1, .platform_data = &mcp251x_info, .controller_data = &mcp251x_chip_info4, .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ4) } }; static struct pxa2xx_spi_master pxa_ssp3_spi_master_info = { .clock_enable = CKEN_SSP3, .num_chipselect = 2, .enable_dma = 1 }; static struct pxa2xx_spi_master pxa_ssp4_spi_master_info = { .clock_enable = CKEN_SSP4, .num_chipselect = 2, .enable_dma = 1 }; struct platform_device pxa_spi_ssp3 = { .name = "pxa2xx-spi", .id = 3, .dev = { .platform_data = &pxa_ssp3_spi_master_info, } }; struct platform_device pxa_spi_ssp4 = { .name = "pxa2xx-spi", .id = 4, .dev = { .platform_data = &pxa_ssp4_spi_master_info, } }; static struct platform_device *icontrol_spi_devices[] __initdata = { &pxa_spi_ssp3, &pxa_spi_ssp4, }; static mfp_cfg_t mfp_can_cfg[] __initdata = { /* CAN CS lines */ GPIO15_GPIO, GPIO16_GPIO, GPIO17_GPIO, GPIO24_GPIO, /* SPI (SSP3) lines */ GPIO89_SSP3_SCLK, GPIO91_SSP3_TXD, GPIO92_SSP3_RXD, /* SPI (SSP4) lines */ GPIO93_SSP4_SCLK, GPIO95_SSP4_TXD, GPIO96_SSP4_RXD, /* CAN nIRQ lines */ GPIO74_GPIO | MFP_LPM_EDGE_RISE, GPIO75_GPIO | MFP_LPM_EDGE_RISE, GPIO76_GPIO | MFP_LPM_EDGE_RISE, GPIO77_GPIO | MFP_LPM_EDGE_RISE }; static void __init icontrol_can_init(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(mfp_can_cfg)); platform_add_devices(ARRAY_AND_SIZE(icontrol_spi_devices)); spi_register_board_info(ARRAY_AND_SIZE(mcp251x_board_info)); } static void __init icontrol_init(void) { mxm_8x10_barebones_init(); mxm_8x10_usb_host_init(); mxm_8x10_mmc_init(); icontrol_can_init(); } MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM") .boot_params = 0xa0000100, .map_io = pxa3xx_map_io, .init_irq = pxa3xx_init_irq, .timer = &pxa_timer, .init_machine = icontrol_init MACHINE_END
gpl-2.0
ProfessorX/CIS508
Codes/linux-3.2.54/arch/x86/kernel/kdebugfs.c
3301
4121
/* * Architecture specific debugfs files * * Copyright (C) 2007, Intel Corp. * Huang Ying <ying.huang@intel.com> * * This file is released under the GPLv2. */ #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/stat.h> #include <linux/io.h> #include <linux/mm.h> #include <asm/setup.h> struct dentry *arch_debugfs_dir; EXPORT_SYMBOL(arch_debugfs_dir); #ifdef CONFIG_DEBUG_BOOT_PARAMS struct setup_data_node { u64 paddr; u32 type; u32 len; }; static ssize_t setup_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct setup_data_node *node = file->private_data; unsigned long remain; loff_t pos = *ppos; struct page *pg; void *p; u64 pa; if (pos < 0) return -EINVAL; if (pos >= node->len) return 0; if (count > node->len - pos) count = node->len - pos; pa = node->paddr + sizeof(struct setup_data) + pos; pg = pfn_to_page((pa + count - 1) >> PAGE_SHIFT); if (PageHighMem(pg)) { p = ioremap_cache(pa, count); if (!p) return -ENXIO; } else p = __va(pa); remain = copy_to_user(user_buf, p, count); if (PageHighMem(pg)) iounmap(p); if (remain) return -EFAULT; *ppos = pos + count; return count; } static int setup_data_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static const struct file_operations fops_setup_data = { .read = setup_data_read, .open = setup_data_open, .llseek = default_llseek, }; static int __init create_setup_data_node(struct dentry *parent, int no, struct setup_data_node *node) { struct dentry *d, *type, *data; char buf[16]; sprintf(buf, "%d", no); d = debugfs_create_dir(buf, parent); if (!d) return -ENOMEM; type = debugfs_create_x32("type", S_IRUGO, d, &node->type); if (!type) goto err_dir; data = debugfs_create_file("data", S_IRUGO, d, node, &fops_setup_data); if (!data) goto err_type; return 0; err_type: debugfs_remove(type); err_dir: debugfs_remove(d); return -ENOMEM; } static int __init create_setup_data_nodes(struct dentry *parent) { struct setup_data_node *node; struct setup_data *data; int error = -ENOMEM; struct dentry *d; struct page *pg; u64 pa_data; int no = 0; d = debugfs_create_dir("setup_data", parent); if (!d) return -ENOMEM; pa_data = boot_params.hdr.setup_data; while (pa_data) { node = kmalloc(sizeof(*node), GFP_KERNEL); if (!node) goto err_dir; pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT); if (PageHighMem(pg)) { data = ioremap_cache(pa_data, sizeof(*data)); if (!data) { kfree(node); error = -ENXIO; goto err_dir; } } else data = __va(pa_data); node->paddr = pa_data; node->type = data->type; node->len = data->len; error = create_setup_data_node(d, no, node); pa_data = data->next; if (PageHighMem(pg)) iounmap(data); if (error) goto err_dir; no++; } return 0; err_dir: debugfs_remove(d); return error; } static struct debugfs_blob_wrapper boot_params_blob = { .data = &boot_params, .size = sizeof(boot_params), }; static int __init boot_params_kdebugfs_init(void) { struct dentry *dbp, *version, *data; int error = -ENOMEM; dbp = debugfs_create_dir("boot_params", NULL); if (!dbp) return -ENOMEM; version = debugfs_create_x16("version", S_IRUGO, dbp, &boot_params.hdr.version); if (!version) goto err_dir; data = debugfs_create_blob("data", S_IRUGO, dbp, &boot_params_blob); if (!data) goto err_version; error = create_setup_data_nodes(dbp); if (error) goto err_data; return 0; err_data: debugfs_remove(data); err_version: debugfs_remove(version); err_dir: debugfs_remove(dbp); return error; } #endif /* CONFIG_DEBUG_BOOT_PARAMS */ static int __init arch_kdebugfs_init(void) { int error = 0; arch_debugfs_dir = debugfs_create_dir("x86", NULL); if (!arch_debugfs_dir) return -ENOMEM; #ifdef CONFIG_DEBUG_BOOT_PARAMS error = boot_params_kdebugfs_init(); #endif return error; } arch_initcall(arch_kdebugfs_init);
gpl-2.0
meefik/tinykernel-flo
arch/powerpc/platforms/pseries/eeh_pseries.c
3301
16297
/* * The file intends to implement the platform dependent EEH operations on pseries. * Actually, the pseries platform is built based on RTAS heavily. That means the * pseries platform dependent EEH operations will be built on RTAS calls. The functions * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has * been done. * * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011. * Copyright IBM Corporation 2001, 2005, 2006 * Copyright Dave Engebretsen & Todd Inglett 2001 * Copyright Linas Vepstas 2005, 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/atomic.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/init.h> #include <linux/list.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <asm/eeh.h> #include <asm/eeh_event.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> #include <asm/rtas.h> /* RTAS tokens */ static int ibm_set_eeh_option; static int ibm_set_slot_reset; static int ibm_read_slot_reset_state; static int ibm_read_slot_reset_state2; static int ibm_slot_error_detail; static int ibm_get_config_addr_info; static int ibm_get_config_addr_info2; static int ibm_configure_bridge; static int ibm_configure_pe; /* * Buffer for reporting slot-error-detail rtas calls. Its here * in BSS, and not dynamically alloced, so that it ends up in * RMO where RTAS can access it. */ static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; static DEFINE_SPINLOCK(slot_errbuf_lock); static int eeh_error_buf_size; /** * pseries_eeh_init - EEH platform dependent initialization * * EEH platform dependent initialization on pseries. */ static int pseries_eeh_init(void) { /* figure out EEH RTAS function call tokens */ ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); ibm_configure_pe = rtas_token("ibm,configure-pe"); ibm_configure_bridge = rtas_token ("ibm,configure-bridge"); /* necessary sanity check */ if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n", __func__); return -EINVAL; } else if (ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE) { pr_warning("%s: RTAS service <ibm, set-slot-reset> invalid\n", __func__); return -EINVAL; } else if (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) { pr_warning("%s: RTAS service <ibm,read-slot-reset-state2> and " "<ibm,read-slot-reset-state> invalid\n", __func__); return -EINVAL; } else if (ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE) { pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n", __func__); return -EINVAL; } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE && ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) { pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and " "<ibm,get-config-addr-info> invalid\n", __func__); return -EINVAL; } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { pr_warning("%s: RTAS service <ibm,configure-pe> and " "<ibm,configure-bridge> invalid\n", __func__); return -EINVAL; } /* Initialize error log lock and size */ spin_lock_init(&slot_errbuf_lock); eeh_error_buf_size = rtas_token("rtas-error-log-max"); if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { pr_warning("%s: unknown EEH error log size\n", __func__); eeh_error_buf_size = 1024; } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { pr_warning("%s: EEH error log size %d exceeds the maximal %d\n", __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); eeh_error_buf_size = RTAS_ERROR_LOG_MAX; } return 0; } /** * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable * @dn: device node * @option: operation to be issued * * The function is used to control the EEH functionality globally. * Currently, following options are support according to PAPR: * Enable EEH, Disable EEH, Enable MMIO and Enable DMA */ static int pseries_eeh_set_option(struct device_node *dn, int option) { int ret = 0; struct eeh_dev *edev; const u32 *reg; int config_addr; edev = of_node_to_eeh_dev(dn); /* * When we're enabling or disabling EEH functioality on * the particular PE, the PE config address is possibly * unavailable. Therefore, we have to figure it out from * the FDT node. */ switch (option) { case EEH_OPT_DISABLE: case EEH_OPT_ENABLE: reg = of_get_property(dn, "reg", NULL); config_addr = reg[0]; break; case EEH_OPT_THAW_MMIO: case EEH_OPT_THAW_DMA: config_addr = edev->config_addr; if (edev->pe_config_addr) config_addr = edev->pe_config_addr; break; default: pr_err("%s: Invalid option %d\n", __func__, option); return -EINVAL; } ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, config_addr, BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid), option); return ret; } /** * pseries_eeh_get_pe_addr - Retrieve PE address * @dn: device node * * Retrieve the assocated PE address. Actually, there're 2 RTAS * function calls dedicated for the purpose. We need implement * it through the new function and then the old one. Besides, * you should make sure the config address is figured out from * FDT node before calling the function. * * It's notable that zero'ed return value means invalid PE config * address. */ static int pseries_eeh_get_pe_addr(struct device_node *dn) { struct eeh_dev *edev; int ret = 0; int rets[3]; edev = of_node_to_eeh_dev(dn); if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { /* * First of all, we need to make sure there has one PE * associated with the device. Otherwise, PE address is * meaningless. */ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, edev->config_addr, BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid), 1); if (ret || (rets[0] == 0)) return 0; /* Retrieve the associated PE config address */ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, edev->config_addr, BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid), 0); if (ret) { pr_warning("%s: Failed to get PE address for %s\n", __func__, dn->full_name); return 0; } return rets[0]; } if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, edev->config_addr, BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid), 0); if (ret) { pr_warning("%s: Failed to get PE address for %s\n", __func__, dn->full_name); return 0; } return rets[0]; } return ret; } /** * pseries_eeh_get_state - Retrieve PE state * @dn: PE associated device node * @state: return value * * Retrieve the state of the specified PE. On RTAS compliant * pseries platform, there already has one dedicated RTAS function * for the purpose. It's notable that the associated PE config address * might be ready when calling the function. Therefore, endeavour to * use the PE config address if possible. Further more, there're 2 * RTAS calls for the purpose, we need to try the new one and back * to the old one if the new one couldn't work properly. */ static int pseries_eeh_get_state(struct device_node *dn, int *state) { struct eeh_dev *edev; int config_addr; int ret; int rets[4]; int result; /* Figure out PE config address if possible */ edev = of_node_to_eeh_dev(dn); config_addr = edev->config_addr; if (edev->pe_config_addr) config_addr = edev->pe_config_addr; if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, config_addr, BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid)); } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { /* Fake PE unavailable info */ rets[2] = 0; ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, config_addr, BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid)); } else { return EEH_STATE_NOT_SUPPORT; } if (ret) return ret; /* Parse the result out */ result = 0; if (rets[1]) { switch(rets[0]) { case 0: result &= ~EEH_STATE_RESET_ACTIVE; result |= EEH_STATE_MMIO_ACTIVE; result |= EEH_STATE_DMA_ACTIVE; break; case 1: result |= EEH_STATE_RESET_ACTIVE; result |= EEH_STATE_MMIO_ACTIVE; result |= EEH_STATE_DMA_ACTIVE; break; case 2: result &= ~EEH_STATE_RESET_ACTIVE; result &= ~EEH_STATE_MMIO_ACTIVE; result &= ~EEH_STATE_DMA_ACTIVE; break; case 4: result &= ~EEH_STATE_RESET_ACTIVE; result &= ~EEH_STATE_MMIO_ACTIVE; result &= ~EEH_STATE_DMA_ACTIVE; result |= EEH_STATE_MMIO_ENABLED; break; case 5: if (rets[2]) { if (state) *state = rets[2]; result = EEH_STATE_UNAVAILABLE; } else { result = EEH_STATE_NOT_SUPPORT; } default: result = EEH_STATE_NOT_SUPPORT; } } else { result = EEH_STATE_NOT_SUPPORT; } return result; } /** * pseries_eeh_reset - Reset the specified PE * @dn: PE associated device node * @option: reset option * * Reset the specified PE */ static int pseries_eeh_reset(struct device_node *dn, int option) { struct eeh_dev *edev; int config_addr; int ret; /* Figure out PE address */ edev = of_node_to_eeh_dev(dn); config_addr = edev->config_addr; if (edev->pe_config_addr) config_addr = edev->pe_config_addr; /* Reset PE through RTAS call */ ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, config_addr, BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid), option); /* If fundamental-reset not supported, try hot-reset */ if (option == EEH_RESET_FUNDAMENTAL && ret == -8) { ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, config_addr, BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid), EEH_RESET_HOT); } return ret; } /** * pseries_eeh_wait_state - Wait for PE state * @dn: PE associated device node * @max_wait: maximal period in microsecond * * Wait for the state of associated PE. It might take some time * to retrieve the PE's state. */ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait) { int ret; int mwait; /* * According to PAPR, the state of PE might be temporarily * unavailable. Under the circumstance, we have to wait * for indicated time determined by firmware. The maximal * wait time is 5 minutes, which is acquired from the original * EEH implementation. Also, the original implementation * also defined the minimal wait time as 1 second. */ #define EEH_STATE_MIN_WAIT_TIME (1000) #define EEH_STATE_MAX_WAIT_TIME (300 * 1000) while (1) { ret = pseries_eeh_get_state(dn, &mwait); /* * If the PE's state is temporarily unavailable, * we have to wait for the specified time. Otherwise, * the PE's state will be returned immediately. */ if (ret != EEH_STATE_UNAVAILABLE) return ret; if (max_wait <= 0) { pr_warning("%s: Timeout when getting PE's state (%d)\n", __func__, max_wait); return EEH_STATE_NOT_SUPPORT; } if (mwait <= 0) { pr_warning("%s: Firmware returned bad wait value %d\n", __func__, mwait); mwait = EEH_STATE_MIN_WAIT_TIME; } else if (mwait > EEH_STATE_MAX_WAIT_TIME) { pr_warning("%s: Firmware returned too long wait value %d\n", __func__, mwait); mwait = EEH_STATE_MAX_WAIT_TIME; } max_wait -= mwait; msleep(mwait); } return EEH_STATE_NOT_SUPPORT; } /** * pseries_eeh_get_log - Retrieve error log * @dn: device node * @severity: temporary or permanent error log * @drv_log: driver log to be combined with retrieved error log * @len: length of driver log * * Retrieve the temporary or permanent error from the PE. * Actually, the error will be retrieved through the dedicated * RTAS call. */ static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_log, unsigned long len) { struct eeh_dev *edev; int config_addr; unsigned long flags; int ret; edev = of_node_to_eeh_dev(dn); spin_lock_irqsave(&slot_errbuf_lock, flags); memset(slot_errbuf, 0, eeh_error_buf_size); /* Figure out the PE address */ config_addr = edev->config_addr; if (edev->pe_config_addr) config_addr = edev->pe_config_addr; ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid), virt_to_phys(drv_log), len, virt_to_phys(slot_errbuf), eeh_error_buf_size, severity); if (!ret) log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0); spin_unlock_irqrestore(&slot_errbuf_lock, flags); return ret; } /** * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE * @dn: PE associated device node * * The function will be called to reconfigure the bridges included * in the specified PE so that the mulfunctional PE would be recovered * again. */ static int pseries_eeh_configure_bridge(struct device_node *dn) { struct eeh_dev *edev; int config_addr; int ret; /* Figure out the PE address */ edev = of_node_to_eeh_dev(dn); config_addr = edev->config_addr; if (edev->pe_config_addr) config_addr = edev->pe_config_addr; /* Use new configure-pe function, if supported */ if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { ret = rtas_call(ibm_configure_pe, 3, 1, NULL, config_addr, BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid)); } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, config_addr, BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid)); } else { return -EFAULT; } if (ret) pr_warning("%s: Unable to configure bridge %d for %s\n", __func__, ret, dn->full_name); return ret; } /** * pseries_eeh_read_config - Read PCI config space * @dn: device node * @where: PCI address * @size: size to read * @val: return value * * Read config space from the speicifed device */ static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val) { struct pci_dn *pdn; pdn = PCI_DN(dn); return rtas_read_config(pdn, where, size, val); } /** * pseries_eeh_write_config - Write PCI config space * @dn: device node * @where: PCI address * @size: size to write * @val: value to be written * * Write config space to the specified device */ static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val) { struct pci_dn *pdn; pdn = PCI_DN(dn); return rtas_write_config(pdn, where, size, val); } static struct eeh_ops pseries_eeh_ops = { .name = "pseries", .init = pseries_eeh_init, .set_option = pseries_eeh_set_option, .get_pe_addr = pseries_eeh_get_pe_addr, .get_state = pseries_eeh_get_state, .reset = pseries_eeh_reset, .wait_state = pseries_eeh_wait_state, .get_log = pseries_eeh_get_log, .configure_bridge = pseries_eeh_configure_bridge, .read_config = pseries_eeh_read_config, .write_config = pseries_eeh_write_config }; /** * eeh_pseries_init - Register platform dependent EEH operations * * EEH initialization on pseries platform. This function should be * called before any EEH related functions. */ int __init eeh_pseries_init(void) { return eeh_ops_register(&pseries_eeh_ops); }
gpl-2.0
W4TCH0UT/zz_lettuce
kernel/configs.c
4325
2853
/* * kernel/configs.c * Echo the kernel .config file used to build the kernel * * Copyright (C) 2002 Khalid Aziz <khalid_aziz@hp.com> * Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net> * Copyright (C) 2002 Al Stone <ahs3@fc.hp.com> * Copyright (C) 2002 Hewlett-Packard Company * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <asm/uaccess.h> /**************************************************/ /* the actual current config file */ /* * Define kernel_config_data and kernel_config_data_size, which contains the * wrapped and compressed configuration file. The file is first compressed * with gzip and then bounded by two eight byte magic numbers to allow * extraction from a binary kernel image: * * IKCFG_ST * <image> * IKCFG_ED */ #define MAGIC_START "IKCFG_ST" #define MAGIC_END "IKCFG_ED" #include "config_data.h" #define MAGIC_SIZE (sizeof(MAGIC_START) - 1) #define kernel_config_data_size \ (sizeof(kernel_config_data) - 1 - MAGIC_SIZE * 2) #ifdef CONFIG_IKCONFIG_PROC static ssize_t ikconfig_read_current(struct file *file, char __user *buf, size_t len, loff_t * offset) { return simple_read_from_buffer(buf, len, offset, kernel_config_data + MAGIC_SIZE, kernel_config_data_size); } static const struct file_operations ikconfig_file_ops = { .owner = THIS_MODULE, .read = ikconfig_read_current, .llseek = default_llseek, }; static int __init ikconfig_init(void) { struct proc_dir_entry *entry; /* create the current config file */ entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL, &ikconfig_file_ops); if (!entry) return -ENOMEM; proc_set_size(entry, kernel_config_data_size); return 0; } static void __exit ikconfig_cleanup(void) { remove_proc_entry("config.gz", NULL); } module_init(ikconfig_init); module_exit(ikconfig_cleanup); #endif /* CONFIG_IKCONFIG_PROC */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Randy Dunlap"); MODULE_DESCRIPTION("Echo the kernel .config file used to build the kernel");
gpl-2.0
garyd9/linux_kernel_sgh-i317
drivers/gpu/drm/drm_cache.c
4581
2879
/************************************************************************** * * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ #include "drmP.h" #if defined(CONFIG_X86) static void drm_clflush_page(struct page *page) { uint8_t *page_virtual; unsigned int i; if (unlikely(page == NULL)) return; page_virtual = kmap_atomic(page, KM_USER0); for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) clflush(page_virtual + i); kunmap_atomic(page_virtual, KM_USER0); } static void drm_cache_flush_clflush(struct page *pages[], unsigned long num_pages) { unsigned long i; mb(); for (i = 0; i < num_pages; i++) drm_clflush_page(*pages++); mb(); } static void drm_clflush_ipi_handler(void *null) { wbinvd(); } #endif void drm_clflush_pages(struct page *pages[], unsigned long num_pages) { #if defined(CONFIG_X86) if (cpu_has_clflush) { drm_cache_flush_clflush(pages, num_pages); return; } if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) printk(KERN_ERR "Timed out waiting for cache flush.\n"); #elif defined(__powerpc__) unsigned long i; for (i = 0; i < num_pages; i++) { struct page *page = pages[i]; void *page_virtual; if (unlikely(page == NULL)) continue; page_virtual = kmap_atomic(page, KM_USER0); flush_dcache_range((unsigned long)page_virtual, (unsigned long)page_virtual + PAGE_SIZE); kunmap_atomic(page_virtual, KM_USER0); } #else printk(KERN_ERR "Architecture has no drm_cache.c support\n"); WARN_ON_ONCE(1); #endif } EXPORT_SYMBOL(drm_clflush_pages);
gpl-2.0
spock1104/android_kernel_zte_nex
drivers/base/node.c
4837
18033
/* * Basic Node interface support */ #include <linux/module.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/memory.h> #include <linux/vmstat.h> #include <linux/node.h> #include <linux/hugetlb.h> #include <linux/compaction.h> #include <linux/cpumask.h> #include <linux/topology.h> #include <linux/nodemask.h> #include <linux/cpu.h> #include <linux/device.h> #include <linux/swap.h> #include <linux/slab.h> static struct bus_type node_subsys = { .name = "node", .dev_name = "node", }; static ssize_t node_read_cpumap(struct device *dev, int type, char *buf) { struct node *node_dev = to_node(dev); const struct cpumask *mask = cpumask_of_node(node_dev->dev.id); int len; /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); len = type? cpulist_scnprintf(buf, PAGE_SIZE-2, mask) : cpumask_scnprintf(buf, PAGE_SIZE-2, mask); buf[len++] = '\n'; buf[len] = '\0'; return len; } static inline ssize_t node_read_cpumask(struct device *dev, struct device_attribute *attr, char *buf) { return node_read_cpumap(dev, 0, buf); } static inline ssize_t node_read_cpulist(struct device *dev, struct device_attribute *attr, char *buf) { return node_read_cpumap(dev, 1, buf); } static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL); static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL); #define K(x) ((x) << (PAGE_SHIFT - 10)) static ssize_t node_read_meminfo(struct device *dev, struct device_attribute *attr, char *buf) { int n; int nid = dev->id; struct sysinfo i; si_meminfo_node(&i, nid); n = sprintf(buf, "Node %d MemTotal: %8lu kB\n" "Node %d MemFree: %8lu kB\n" "Node %d MemUsed: %8lu kB\n" "Node %d Active: %8lu kB\n" "Node %d Inactive: %8lu kB\n" "Node %d Active(anon): %8lu kB\n" "Node %d Inactive(anon): %8lu kB\n" "Node %d Active(file): %8lu kB\n" "Node %d Inactive(file): %8lu kB\n" "Node %d Unevictable: %8lu kB\n" "Node %d Mlocked: %8lu kB\n", nid, K(i.totalram), nid, K(i.freeram), nid, K(i.totalram - i.freeram), nid, K(node_page_state(nid, NR_ACTIVE_ANON) + node_page_state(nid, NR_ACTIVE_FILE)), nid, K(node_page_state(nid, NR_INACTIVE_ANON) + node_page_state(nid, NR_INACTIVE_FILE)), nid, K(node_page_state(nid, NR_ACTIVE_ANON)), nid, K(node_page_state(nid, NR_INACTIVE_ANON)), nid, K(node_page_state(nid, NR_ACTIVE_FILE)), nid, K(node_page_state(nid, NR_INACTIVE_FILE)), nid, K(node_page_state(nid, NR_UNEVICTABLE)), nid, K(node_page_state(nid, NR_MLOCK))); #ifdef CONFIG_HIGHMEM n += sprintf(buf + n, "Node %d HighTotal: %8lu kB\n" "Node %d HighFree: %8lu kB\n" "Node %d LowTotal: %8lu kB\n" "Node %d LowFree: %8lu kB\n", nid, K(i.totalhigh), nid, K(i.freehigh), nid, K(i.totalram - i.totalhigh), nid, K(i.freeram - i.freehigh)); #endif n += sprintf(buf + n, "Node %d Dirty: %8lu kB\n" "Node %d Writeback: %8lu kB\n" "Node %d FilePages: %8lu kB\n" "Node %d Mapped: %8lu kB\n" "Node %d AnonPages: %8lu kB\n" "Node %d Shmem: %8lu kB\n" "Node %d KernelStack: %8lu kB\n" "Node %d PageTables: %8lu kB\n" "Node %d NFS_Unstable: %8lu kB\n" "Node %d Bounce: %8lu kB\n" "Node %d WritebackTmp: %8lu kB\n" "Node %d Slab: %8lu kB\n" "Node %d SReclaimable: %8lu kB\n" "Node %d SUnreclaim: %8lu kB\n" #ifdef CONFIG_TRANSPARENT_HUGEPAGE "Node %d AnonHugePages: %8lu kB\n" #endif , nid, K(node_page_state(nid, NR_FILE_DIRTY)), nid, K(node_page_state(nid, NR_WRITEBACK)), nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_MAPPED)), #ifdef CONFIG_TRANSPARENT_HUGEPAGE nid, K(node_page_state(nid, NR_ANON_PAGES) + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * HPAGE_PMD_NR), #else nid, K(node_page_state(nid, NR_ANON_PAGES)), #endif nid, K(node_page_state(nid, NR_SHMEM)), nid, node_page_state(nid, NR_KERNEL_STACK) * THREAD_SIZE / 1024, nid, K(node_page_state(nid, NR_PAGETABLE)), nid, K(node_page_state(nid, NR_UNSTABLE_NFS)), nid, K(node_page_state(nid, NR_BOUNCE)), nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)), nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), #ifdef CONFIG_TRANSPARENT_HUGEPAGE nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) , nid, K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * HPAGE_PMD_NR)); #else nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); #endif n += hugetlb_report_node_meminfo(nid, buf + n); return n; } #undef K static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL); static ssize_t node_read_numastat(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "numa_hit %lu\n" "numa_miss %lu\n" "numa_foreign %lu\n" "interleave_hit %lu\n" "local_node %lu\n" "other_node %lu\n", node_page_state(dev->id, NUMA_HIT), node_page_state(dev->id, NUMA_MISS), node_page_state(dev->id, NUMA_FOREIGN), node_page_state(dev->id, NUMA_INTERLEAVE_HIT), node_page_state(dev->id, NUMA_LOCAL), node_page_state(dev->id, NUMA_OTHER)); } static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); static ssize_t node_read_vmstat(struct device *dev, struct device_attribute *attr, char *buf) { int nid = dev->id; int i; int n = 0; for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) n += sprintf(buf+n, "%s %lu\n", vmstat_text[i], node_page_state(nid, i)); return n; } static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL); static ssize_t node_read_distance(struct device *dev, struct device_attribute *attr, char * buf) { int nid = dev->id; int len = 0; int i; /* * buf is currently PAGE_SIZE in length and each node needs 4 chars * at the most (distance + space or newline). */ BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE); for_each_online_node(i) len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i)); len += sprintf(buf + len, "\n"); return len; } static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL); #ifdef CONFIG_HUGETLBFS /* * hugetlbfs per node attributes registration interface: * When/if hugetlb[fs] subsystem initializes [sometime after this module], * it will register its per node attributes for all online nodes with * memory. It will also call register_hugetlbfs_with_node(), below, to * register its attribute registration functions with this node driver. * Once these hooks have been initialized, the node driver will call into * the hugetlb module to [un]register attributes for hot-plugged nodes. */ static node_registration_func_t __hugetlb_register_node; static node_registration_func_t __hugetlb_unregister_node; static inline bool hugetlb_register_node(struct node *node) { if (__hugetlb_register_node && node_state(node->dev.id, N_HIGH_MEMORY)) { __hugetlb_register_node(node); return true; } return false; } static inline void hugetlb_unregister_node(struct node *node) { if (__hugetlb_unregister_node) __hugetlb_unregister_node(node); } void register_hugetlbfs_with_node(node_registration_func_t doregister, node_registration_func_t unregister) { __hugetlb_register_node = doregister; __hugetlb_unregister_node = unregister; } #else static inline void hugetlb_register_node(struct node *node) {} static inline void hugetlb_unregister_node(struct node *node) {} #endif /* * register_node - Setup a sysfs device for a node. * @num - Node number to use when creating the device. * * Initialize and register the node device. */ int register_node(struct node *node, int num, struct node *parent) { int error; node->dev.id = num; node->dev.bus = &node_subsys; error = device_register(&node->dev); if (!error){ device_create_file(&node->dev, &dev_attr_cpumap); device_create_file(&node->dev, &dev_attr_cpulist); device_create_file(&node->dev, &dev_attr_meminfo); device_create_file(&node->dev, &dev_attr_numastat); device_create_file(&node->dev, &dev_attr_distance); device_create_file(&node->dev, &dev_attr_vmstat); scan_unevictable_register_node(node); hugetlb_register_node(node); compaction_register_node(node); } return error; } /** * unregister_node - unregister a node device * @node: node going away * * Unregisters a node device @node. All the devices on the node must be * unregistered before calling this function. */ void unregister_node(struct node *node) { device_remove_file(&node->dev, &dev_attr_cpumap); device_remove_file(&node->dev, &dev_attr_cpulist); device_remove_file(&node->dev, &dev_attr_meminfo); device_remove_file(&node->dev, &dev_attr_numastat); device_remove_file(&node->dev, &dev_attr_distance); device_remove_file(&node->dev, &dev_attr_vmstat); scan_unevictable_unregister_node(node); hugetlb_unregister_node(node); /* no-op, if memoryless node */ device_unregister(&node->dev); } struct node node_devices[MAX_NUMNODES]; /* * register cpu under node */ int register_cpu_under_node(unsigned int cpu, unsigned int nid) { int ret; struct device *obj; if (!node_online(nid)) return 0; obj = get_cpu_device(cpu); if (!obj) return 0; ret = sysfs_create_link(&node_devices[nid].dev.kobj, &obj->kobj, kobject_name(&obj->kobj)); if (ret) return ret; return sysfs_create_link(&obj->kobj, &node_devices[nid].dev.kobj, kobject_name(&node_devices[nid].dev.kobj)); } int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) { struct device *obj; if (!node_online(nid)) return 0; obj = get_cpu_device(cpu); if (!obj) return 0; sysfs_remove_link(&node_devices[nid].dev.kobj, kobject_name(&obj->kobj)); sysfs_remove_link(&obj->kobj, kobject_name(&node_devices[nid].dev.kobj)); return 0; } #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE #define page_initialized(page) (page->lru.next) static int get_nid_for_pfn(unsigned long pfn) { struct page *page; if (!pfn_valid_within(pfn)) return -1; page = pfn_to_page(pfn); if (!page_initialized(page)) return -1; return pfn_to_nid(pfn); } /* register memory section under specified node if it spans that node */ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) { int ret; unsigned long pfn, sect_start_pfn, sect_end_pfn; if (!mem_blk) return -EFAULT; if (!node_online(nid)) return 0; sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr); sect_end_pfn += PAGES_PER_SECTION - 1; for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { int page_nid; page_nid = get_nid_for_pfn(pfn); if (page_nid < 0) continue; if (page_nid != nid) continue; ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj, &mem_blk->dev.kobj, kobject_name(&mem_blk->dev.kobj)); if (ret) return ret; return sysfs_create_link_nowarn(&mem_blk->dev.kobj, &node_devices[nid].dev.kobj, kobject_name(&node_devices[nid].dev.kobj)); } /* mem section does not span the specified node */ return 0; } /* unregister memory section under all nodes that it spans */ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, unsigned long phys_index) { NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); unsigned long pfn, sect_start_pfn, sect_end_pfn; if (!mem_blk) { NODEMASK_FREE(unlinked_nodes); return -EFAULT; } if (!unlinked_nodes) return -ENOMEM; nodes_clear(*unlinked_nodes); sect_start_pfn = section_nr_to_pfn(phys_index); sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { int nid; nid = get_nid_for_pfn(pfn); if (nid < 0) continue; if (!node_online(nid)) continue; if (node_test_and_set(nid, *unlinked_nodes)) continue; sysfs_remove_link(&node_devices[nid].dev.kobj, kobject_name(&mem_blk->dev.kobj)); sysfs_remove_link(&mem_blk->dev.kobj, kobject_name(&node_devices[nid].dev.kobj)); } NODEMASK_FREE(unlinked_nodes); return 0; } static int link_mem_sections(int nid) { unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn; unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages; unsigned long pfn; struct memory_block *mem_blk = NULL; int err = 0; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { unsigned long section_nr = pfn_to_section_nr(pfn); struct mem_section *mem_sect; int ret; if (!present_section_nr(section_nr)) continue; mem_sect = __nr_to_section(section_nr); /* same memblock ? */ if (mem_blk) if ((section_nr >= mem_blk->start_section_nr) && (section_nr <= mem_blk->end_section_nr)) continue; mem_blk = find_memory_block_hinted(mem_sect, mem_blk); ret = register_mem_sect_under_node(mem_blk, nid); if (!err) err = ret; /* discard ref obtained in find_memory_block() */ } if (mem_blk) kobject_put(&mem_blk->dev.kobj); return err; } #ifdef CONFIG_HUGETLBFS /* * Handle per node hstate attribute [un]registration on transistions * to/from memoryless state. */ static void node_hugetlb_work(struct work_struct *work) { struct node *node = container_of(work, struct node, node_work); /* * We only get here when a node transitions to/from memoryless state. * We can detect which transition occurred by examining whether the * node has memory now. hugetlb_register_node() already check this * so we try to register the attributes. If that fails, then the * node has transitioned to memoryless, try to unregister the * attributes. */ if (!hugetlb_register_node(node)) hugetlb_unregister_node(node); } static void init_node_hugetlb_work(int nid) { INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work); } static int node_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_notify *mnb = arg; int nid = mnb->status_change_nid; switch (action) { case MEM_ONLINE: case MEM_OFFLINE: /* * offload per node hstate [un]registration to a work thread * when transitioning to/from memoryless state. */ if (nid != NUMA_NO_NODE) schedule_work(&node_devices[nid].node_work); break; case MEM_GOING_ONLINE: case MEM_GOING_OFFLINE: case MEM_CANCEL_ONLINE: case MEM_CANCEL_OFFLINE: default: break; } return NOTIFY_OK; } #endif /* CONFIG_HUGETLBFS */ #else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */ static int link_mem_sections(int nid) { return 0; } #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ #if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \ !defined(CONFIG_HUGETLBFS) static inline int node_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { return NOTIFY_OK; } static void init_node_hugetlb_work(int nid) { } #endif int register_one_node(int nid) { int error = 0; int cpu; if (node_online(nid)) { int p_node = parent_node(nid); struct node *parent = NULL; if (p_node != nid) parent = &node_devices[p_node]; error = register_node(&node_devices[nid], nid, parent); /* link cpu under this node */ for_each_present_cpu(cpu) { if (cpu_to_node(cpu) == nid) register_cpu_under_node(cpu, nid); } /* link memory sections under this node */ error = link_mem_sections(nid); /* initialize work queue for memory hot plug */ init_node_hugetlb_work(nid); } return error; } void unregister_one_node(int nid) { unregister_node(&node_devices[nid]); } /* * node states attributes */ static ssize_t print_nodes_state(enum node_states state, char *buf) { int n; n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]); if (n > 0 && PAGE_SIZE > n + 1) { *(buf + n++) = '\n'; *(buf + n++) = '\0'; } return n; } struct node_attr { struct device_attribute attr; enum node_states state; }; static ssize_t show_node_state(struct device *dev, struct device_attribute *attr, char *buf) { struct node_attr *na = container_of(attr, struct node_attr, attr); return print_nodes_state(na->state, buf); } #define _NODE_ATTR(name, state) \ { __ATTR(name, 0444, show_node_state, NULL), state } static struct node_attr node_state_attr[] = { _NODE_ATTR(possible, N_POSSIBLE), _NODE_ATTR(online, N_ONLINE), _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY), _NODE_ATTR(has_cpu, N_CPU), #ifdef CONFIG_HIGHMEM _NODE_ATTR(has_high_memory, N_HIGH_MEMORY), #endif }; static struct attribute *node_state_attrs[] = { &node_state_attr[0].attr.attr, &node_state_attr[1].attr.attr, &node_state_attr[2].attr.attr, &node_state_attr[3].attr.attr, #ifdef CONFIG_HIGHMEM &node_state_attr[4].attr.attr, #endif NULL }; static struct attribute_group memory_root_attr_group = { .attrs = node_state_attrs, }; static const struct attribute_group *cpu_root_attr_groups[] = { &memory_root_attr_group, NULL, }; #define NODE_CALLBACK_PRI 2 /* lower than SLAB */ static int __init register_node_type(void) { int ret; BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES); BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES); ret = subsys_system_register(&node_subsys, cpu_root_attr_groups); if (!ret) { hotplug_memory_notifier(node_memory_callback, NODE_CALLBACK_PRI); } /* * Note: we're not going to unregister the node class if we fail * to register the node state class attribute files. */ return ret; } postcore_initcall(register_node_type);
gpl-2.0
wangxingchao/s3c6410
net/wireless/ethtool.c
8421
2109
#include <linux/utsname.h> #include <net/cfg80211.h> #include "core.h" #include "ethtool.h" static void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct wireless_dev *wdev = dev->ieee80211_ptr; strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name, sizeof(info->driver)); strlcpy(info->version, init_utsname()->release, sizeof(info->version)); if (wdev->wiphy->fw_version[0]) strncpy(info->fw_version, wdev->wiphy->fw_version, sizeof(info->fw_version)); else strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); strlcpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)), sizeof(info->bus_info)); } static int cfg80211_get_regs_len(struct net_device *dev) { /* For now, return 0... */ return 0; } static void cfg80211_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *data) { struct wireless_dev *wdev = dev->ieee80211_ptr; regs->version = wdev->wiphy->hw_version; regs->len = 0; } static void cfg80211_get_ringparam(struct net_device *dev, struct ethtool_ringparam *rp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); memset(rp, 0, sizeof(*rp)); if (rdev->ops->get_ringparam) rdev->ops->get_ringparam(wdev->wiphy, &rp->tx_pending, &rp->tx_max_pending, &rp->rx_pending, &rp->rx_max_pending); } static int cfg80211_set_ringparam(struct net_device *dev, struct ethtool_ringparam *rp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0) return -EINVAL; if (rdev->ops->set_ringparam) return rdev->ops->set_ringparam(wdev->wiphy, rp->tx_pending, rp->rx_pending); return -ENOTSUPP; } const struct ethtool_ops cfg80211_ethtool_ops = { .get_drvinfo = cfg80211_get_drvinfo, .get_regs_len = cfg80211_get_regs_len, .get_regs = cfg80211_get_regs, .get_link = ethtool_op_get_link, .get_ringparam = cfg80211_get_ringparam, .set_ringparam = cfg80211_set_ringparam, };
gpl-2.0
mupuf/linux-nouveau-pm
arch/mn10300/kernel/profile.c
12261
1396
/* MN10300 Profiling setup * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ /* * initialise the profiling if enabled * - using with gdbstub will give anomalous results * - can't be used with gdbstub if running at IRQ priority 0 */ static __init int profile_init(void) { u16 tmp; if (!prof_buffer) return 0; /* use timer 11 to drive the profiling interrupts */ set_intr_stub(EXCEP_IRQ_LEVEL0, profile_handler); /* set IRQ priority at which to run */ set_intr_level(TM11IRQ, GxICR_LEVEL_0); /* set up timer 11 * - source: (IOCLK 33MHz)*2 = 66MHz * - frequency: (33330000*2) / 8 / 20625 = 202Hz */ TM11BR = 20625 - 1; TM11MD = TM8MD_SRC_IOCLK_8; TM11MD |= TM8MD_INIT_COUNTER; TM11MD &= ~TM8MD_INIT_COUNTER; TM11MD |= TM8MD_COUNT_ENABLE; TM11ICR |= GxICR_ENABLE; tmp = TM11ICR; printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n", MN10300_IOCLK / 8 / (TM11BR + 1)); printk(KERN_INFO "Profile histogram stored %p-%p\n", prof_buffer, (u8 *)(prof_buffer + prof_len) - 1); return 0; } __initcall(profile_init);
gpl-2.0
cristianhristea/linux_kernel
drivers/acpi/acpica/nspredef.c
230
35421
/****************************************************************************** * * Module Name: nspredef - Validation of ACPI predefined methods and objects * $Revision: 1.1 $ * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #define ACPI_CREATE_PREDEFINED_TABLE #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acpredef.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nspredef") /******************************************************************************* * * This module validates predefined ACPI objects that appear in the namespace, * at the time they are evaluated (via acpi_evaluate_object). The purpose of this * validation is to detect problems with BIOS-exposed predefined ACPI objects * before the results are returned to the ACPI-related drivers. * * There are several areas that are validated: * * 1) The number of input arguments as defined by the method/object in the * ASL is validated against the ACPI specification. * 2) The type of the return object (if any) is validated against the ACPI * specification. * 3) For returned package objects, the count of package elements is * validated, as well as the type of each package element. Nested * packages are supported. * * For any problems found, a warning message is issued. * ******************************************************************************/ /* Local prototypes */ static acpi_status acpi_ns_check_package(struct acpi_predefined_data *data, union acpi_operand_object **return_object_ptr); static acpi_status acpi_ns_check_package_list(struct acpi_predefined_data *data, const union acpi_predefined_info *package, union acpi_operand_object **elements, u32 count); static acpi_status acpi_ns_check_package_elements(struct acpi_predefined_data *data, union acpi_operand_object **elements, u8 type1, u32 count1, u8 type2, u32 count2, u32 start_index); static acpi_status acpi_ns_check_object_type(struct acpi_predefined_data *data, union acpi_operand_object **return_object_ptr, u32 expected_btypes, u32 package_index); static acpi_status acpi_ns_check_reference(struct acpi_predefined_data *data, union acpi_operand_object *return_object); static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes); /* * Names for the types that can be returned by the predefined objects. * Used for warning messages. Must be in the same order as the ACPI_RTYPEs */ static const char *acpi_rtype_names[] = { "/Integer", "/String", "/Buffer", "/Package", "/Reference", }; /******************************************************************************* * * FUNCTION: acpi_ns_check_predefined_names * * PARAMETERS: node - Namespace node for the method/object * user_param_count - Number of parameters actually passed * return_status - Status from the object evaluation * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status * * DESCRIPTION: Check an ACPI name for a match in the predefined name list. * ******************************************************************************/ acpi_status acpi_ns_check_predefined_names(struct acpi_namespace_node *node, u32 user_param_count, acpi_status return_status, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; acpi_status status = AE_OK; const union acpi_predefined_info *predefined; char *pathname; struct acpi_predefined_data *data; /* Match the name for this method/object against the predefined list */ predefined = acpi_ns_check_for_predefined_name(node); /* Get the full pathname to the object, for use in warning messages */ pathname = acpi_ns_get_external_pathname(node); if (!pathname) { return AE_OK; /* Could not get pathname, ignore */ } /* * Check that the parameter count for this method matches the ASL * definition. For predefined names, ensure that both the caller and * the method itself are in accordance with the ACPI specification. */ acpi_ns_check_parameter_count(pathname, node, user_param_count, predefined); /* If not a predefined name, we cannot validate the return object */ if (!predefined) { goto cleanup; } /* * If the method failed or did not actually return an object, we cannot * validate the return object */ if ((return_status != AE_OK) && (return_status != AE_CTRL_RETURN_VALUE)) { goto cleanup; } /* * If there is no return value, check if we require a return value for * this predefined name. Either one return value is expected, or none, * for both methods and other objects. * * Exit now if there is no return object. Warning if one was expected. */ if (!return_object) { if ((predefined->info.expected_btypes) && (!(predefined->info.expected_btypes & ACPI_RTYPE_NONE))) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, ACPI_WARN_ALWAYS, "Missing expected return value")); status = AE_AML_NO_RETURN_VALUE; } goto cleanup; } /* * Return value validation and possible repair. * * 1) Don't perform return value validation/repair if this feature * has been disabled via a global option. * * 2) We have a return value, but if one wasn't expected, just exit, * this is not a problem. For example, if the "Implicit Return" * feature is enabled, methods will always return a value. * * 3) If the return value can be of any type, then we cannot perform * any validation, just exit. */ if (acpi_gbl_disable_auto_repair || (!predefined->info.expected_btypes) || (predefined->info.expected_btypes == ACPI_RTYPE_ALL)) { goto cleanup; } /* Create the parameter data block for object validation */ data = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_predefined_data)); if (!data) { goto cleanup; } data->predefined = predefined; data->node = node; data->node_flags = node->flags; data->pathname = pathname; /* * Check that the type of the main return object is what is expected * for this predefined name */ status = acpi_ns_check_object_type(data, return_object_ptr, predefined->info.expected_btypes, ACPI_NOT_PACKAGE_ELEMENT); if (ACPI_FAILURE(status)) { goto exit; } /* * For returned Package objects, check the type of all sub-objects. * Note: Package may have been newly created by call above. */ if ((*return_object_ptr)->common.type == ACPI_TYPE_PACKAGE) { data->parent_package = *return_object_ptr; status = acpi_ns_check_package(data, return_object_ptr); if (ACPI_FAILURE(status)) { goto exit; } } /* * The return object was OK, or it was successfully repaired above. * Now make some additional checks such as verifying that package * objects are sorted correctly (if required) or buffer objects have * the correct data width (bytes vs. dwords). These repairs are * performed on a per-name basis, i.e., the code is specific to * particular predefined names. */ status = acpi_ns_complex_repairs(data, node, status, return_object_ptr); exit: /* * If the object validation failed or if we successfully repaired one * or more objects, mark the parent node to suppress further warning * messages during the next evaluation of the same method/object. */ if (ACPI_FAILURE(status) || (data->flags & ACPI_OBJECT_REPAIRED)) { node->flags |= ANOBJ_EVALUATED; } ACPI_FREE(data); cleanup: ACPI_FREE(pathname); return (status); } /******************************************************************************* * * FUNCTION: acpi_ns_check_parameter_count * * PARAMETERS: pathname - Full pathname to the node (for error msgs) * node - Namespace node for the method/object * user_param_count - Number of args passed in by the caller * predefined - Pointer to entry in predefined name table * * RETURN: None * * DESCRIPTION: Check that the declared (in ASL/AML) parameter count for a * predefined name is what is expected (i.e., what is defined in * the ACPI specification for this predefined name.) * ******************************************************************************/ void acpi_ns_check_parameter_count(char *pathname, struct acpi_namespace_node *node, u32 user_param_count, const union acpi_predefined_info *predefined) { u32 param_count; u32 required_params_current; u32 required_params_old; /* Methods have 0-7 parameters. All other types have zero. */ param_count = 0; if (node->type == ACPI_TYPE_METHOD) { param_count = node->object->method.param_count; } if (!predefined) { /* * Check the parameter count for non-predefined methods/objects. * * Warning if too few or too many arguments have been passed by the * caller. An incorrect number of arguments may not cause the method * to fail. However, the method will fail if there are too few * arguments and the method attempts to use one of the missing ones. */ if (user_param_count < param_count) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, ACPI_WARN_ALWAYS, "Insufficient arguments - needs %u, found %u", param_count, user_param_count)); } else if (user_param_count > param_count) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, ACPI_WARN_ALWAYS, "Excess arguments - needs %u, found %u", param_count, user_param_count)); } return; } /* * Validate the user-supplied parameter count. * Allow two different legal argument counts (_SCP, etc.) */ required_params_current = predefined->info.param_count & 0x0F; required_params_old = predefined->info.param_count >> 4; if (user_param_count != ACPI_UINT32_MAX) { if ((user_param_count != required_params_current) && (user_param_count != required_params_old)) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, ACPI_WARN_ALWAYS, "Parameter count mismatch - " "caller passed %u, ACPI requires %u", user_param_count, required_params_current)); } } /* * Check that the ASL-defined parameter count is what is expected for * this predefined name (parameter count as defined by the ACPI * specification) */ if ((param_count != required_params_current) && (param_count != required_params_old)) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, node->flags, "Parameter count mismatch - ASL declared %u, ACPI requires %u", param_count, required_params_current)); } } /******************************************************************************* * * FUNCTION: acpi_ns_check_for_predefined_name * * PARAMETERS: node - Namespace node for the method/object * * RETURN: Pointer to entry in predefined table. NULL indicates not found. * * DESCRIPTION: Check an object name against the predefined object list. * ******************************************************************************/ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct acpi_namespace_node *node) { const union acpi_predefined_info *this_name; /* Quick check for a predefined name, first character must be underscore */ if (node->name.ascii[0] != '_') { return (NULL); } /* Search info table for a predefined method/object name */ this_name = predefined_names; while (this_name->info.name[0]) { if (ACPI_COMPARE_NAME(node->name.ascii, this_name->info.name)) { return (this_name); } /* * Skip next entry in the table if this name returns a Package * (next entry contains the package info) */ if (this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) { this_name++; } this_name++; } return (NULL); /* Not found */ } /******************************************************************************* * * FUNCTION: acpi_ns_check_package * * PARAMETERS: data - Pointer to validation data structure * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status * * DESCRIPTION: Check a returned package object for the correct count and * correct type of all sub-objects. * ******************************************************************************/ static acpi_status acpi_ns_check_package(struct acpi_predefined_data *data, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; const union acpi_predefined_info *package; union acpi_operand_object **elements; acpi_status status = AE_OK; u32 expected_count; u32 count; u32 i; ACPI_FUNCTION_NAME(ns_check_package); /* The package info for this name is in the next table entry */ package = data->predefined + 1; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s Validating return Package of Type %X, Count %X\n", data->pathname, package->ret_info.type, return_object->package.count)); /* * For variable-length Packages, we can safely remove all embedded * and trailing NULL package elements */ acpi_ns_remove_null_elements(data, package->ret_info.type, return_object); /* Extract package count and elements array */ elements = return_object->package.elements; count = return_object->package.count; /* The package must have at least one element, else invalid */ if (!count) { ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return Package has no elements (empty)")); return (AE_AML_OPERAND_VALUE); } /* * Decode the type of the expected package contents * * PTYPE1 packages contain no subpackages * PTYPE2 packages contain sub-packages */ switch (package->ret_info.type) { case ACPI_PTYPE1_FIXED: /* * The package count is fixed and there are no sub-packages * * If package is too small, exit. * If package is larger than expected, issue warning but continue */ expected_count = package->ret_info.count1 + package->ret_info.count2; if (count < expected_count) { goto package_too_small; } else if (count > expected_count) { ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Return Package is larger than needed - " "found %u, expected %u\n", data->pathname, count, expected_count)); } /* Validate all elements of the returned package */ status = acpi_ns_check_package_elements(data, elements, package->ret_info. object_type1, package->ret_info. count1, package->ret_info. object_type2, package->ret_info. count2, 0); break; case ACPI_PTYPE1_VAR: /* * The package count is variable, there are no sub-packages, and all * elements must be of the same type */ for (i = 0; i < count; i++) { status = acpi_ns_check_object_type(data, elements, package->ret_info. object_type1, i); if (ACPI_FAILURE(status)) { return (status); } elements++; } break; case ACPI_PTYPE1_OPTION: /* * The package count is variable, there are no sub-packages. There are * a fixed number of required elements, and a variable number of * optional elements. * * Check if package is at least as large as the minimum required */ expected_count = package->ret_info3.count; if (count < expected_count) { goto package_too_small; } /* Variable number of sub-objects */ for (i = 0; i < count; i++) { if (i < package->ret_info3.count) { /* These are the required package elements (0, 1, or 2) */ status = acpi_ns_check_object_type(data, elements, package-> ret_info3. object_type[i], i); if (ACPI_FAILURE(status)) { return (status); } } else { /* These are the optional package elements */ status = acpi_ns_check_object_type(data, elements, package-> ret_info3. tail_object_type, i); if (ACPI_FAILURE(status)) { return (status); } } elements++; } break; case ACPI_PTYPE2_REV_FIXED: /* First element is the (Integer) revision */ status = acpi_ns_check_object_type(data, elements, ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { return (status); } elements++; count--; /* Examine the sub-packages */ status = acpi_ns_check_package_list(data, package, elements, count); break; case ACPI_PTYPE2_PKG_COUNT: /* First element is the (Integer) count of sub-packages to follow */ status = acpi_ns_check_object_type(data, elements, ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { return (status); } /* * Count cannot be larger than the parent package length, but allow it * to be smaller. The >= accounts for the Integer above. */ expected_count = (u32) (*elements)->integer.value; if (expected_count >= count) { goto package_too_small; } count = expected_count; elements++; /* Examine the sub-packages */ status = acpi_ns_check_package_list(data, package, elements, count); break; case ACPI_PTYPE2: case ACPI_PTYPE2_FIXED: case ACPI_PTYPE2_MIN: case ACPI_PTYPE2_COUNT: case ACPI_PTYPE2_FIX_VAR: /* * These types all return a single Package that consists of a * variable number of sub-Packages. * * First, ensure that the first element is a sub-Package. If not, * the BIOS may have incorrectly returned the object as a single * package instead of a Package of Packages (a common error if * there is only one entry). We may be able to repair this by * wrapping the returned Package with a new outer Package. */ if (*elements && ((*elements)->common.type != ACPI_TYPE_PACKAGE)) { /* Create the new outer package and populate it */ status = acpi_ns_wrap_with_package(data, return_object, return_object_ptr); if (ACPI_FAILURE(status)) { return (status); } /* Update locals to point to the new package (of 1 element) */ return_object = *return_object_ptr; elements = return_object->package.elements; count = 1; } /* Examine the sub-packages */ status = acpi_ns_check_package_list(data, package, elements, count); break; default: /* Should not get here if predefined info table is correct */ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Invalid internal return type in table entry: %X", package->ret_info.type)); return (AE_AML_INTERNAL); } return (status); package_too_small: /* Error exit for the case with an incorrect package count */ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return Package is too small - found %u elements, expected %u", count, expected_count)); return (AE_AML_OPERAND_VALUE); } /******************************************************************************* * * FUNCTION: acpi_ns_check_package_list * * PARAMETERS: data - Pointer to validation data structure * package - Pointer to package-specific info for method * elements - Element list of parent package. All elements * of this list should be of type Package. * count - Count of subpackages * * RETURN: Status * * DESCRIPTION: Examine a list of subpackages * ******************************************************************************/ static acpi_status acpi_ns_check_package_list(struct acpi_predefined_data *data, const union acpi_predefined_info *package, union acpi_operand_object **elements, u32 count) { union acpi_operand_object *sub_package; union acpi_operand_object **sub_elements; acpi_status status; u32 expected_count; u32 i; u32 j; /* * Validate each sub-Package in the parent Package * * NOTE: assumes list of sub-packages contains no NULL elements. * Any NULL elements should have been removed by earlier call * to acpi_ns_remove_null_elements. */ for (i = 0; i < count; i++) { sub_package = *elements; sub_elements = sub_package->package.elements; data->parent_package = sub_package; /* Each sub-object must be of type Package */ status = acpi_ns_check_object_type(data, &sub_package, ACPI_RTYPE_PACKAGE, i); if (ACPI_FAILURE(status)) { return (status); } /* Examine the different types of expected sub-packages */ data->parent_package = sub_package; switch (package->ret_info.type) { case ACPI_PTYPE2: case ACPI_PTYPE2_PKG_COUNT: case ACPI_PTYPE2_REV_FIXED: /* Each subpackage has a fixed number of elements */ expected_count = package->ret_info.count1 + package->ret_info.count2; if (sub_package->package.count < expected_count) { goto package_too_small; } status = acpi_ns_check_package_elements(data, sub_elements, package->ret_info. object_type1, package->ret_info. count1, package->ret_info. object_type2, package->ret_info. count2, 0); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_PTYPE2_FIX_VAR: /* * Each subpackage has a fixed number of elements and an * optional element */ expected_count = package->ret_info.count1 + package->ret_info.count2; if (sub_package->package.count < expected_count) { goto package_too_small; } status = acpi_ns_check_package_elements(data, sub_elements, package->ret_info. object_type1, package->ret_info. count1, package->ret_info. object_type2, sub_package->package. count - package->ret_info. count1, 0); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_PTYPE2_FIXED: /* Each sub-package has a fixed length */ expected_count = package->ret_info2.count; if (sub_package->package.count < expected_count) { goto package_too_small; } /* Check the type of each sub-package element */ for (j = 0; j < expected_count; j++) { status = acpi_ns_check_object_type(data, &sub_elements[j], package-> ret_info2. object_type[j], j); if (ACPI_FAILURE(status)) { return (status); } } break; case ACPI_PTYPE2_MIN: /* Each sub-package has a variable but minimum length */ expected_count = package->ret_info.count1; if (sub_package->package.count < expected_count) { goto package_too_small; } /* Check the type of each sub-package element */ status = acpi_ns_check_package_elements(data, sub_elements, package->ret_info. object_type1, sub_package->package. count, 0, 0, 0); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_PTYPE2_COUNT: /* * First element is the (Integer) count of elements, including * the count field (the ACPI name is num_elements) */ status = acpi_ns_check_object_type(data, sub_elements, ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { return (status); } /* * Make sure package is large enough for the Count and is * is as large as the minimum size */ expected_count = (u32)(*sub_elements)->integer.value; if (sub_package->package.count < expected_count) { goto package_too_small; } if (sub_package->package.count < package->ret_info.count1) { expected_count = package->ret_info.count1; goto package_too_small; } if (expected_count == 0) { /* * Either the num_entries element was originally zero or it was * a NULL element and repaired to an Integer of value zero. * In either case, repair it by setting num_entries to be the * actual size of the subpackage. */ expected_count = sub_package->package.count; (*sub_elements)->integer.value = expected_count; } /* Check the type of each sub-package element */ status = acpi_ns_check_package_elements(data, (sub_elements + 1), package->ret_info. object_type1, (expected_count - 1), 0, 0, 1); if (ACPI_FAILURE(status)) { return (status); } break; default: /* Should not get here, type was validated by caller */ return (AE_AML_INTERNAL); } elements++; } return (AE_OK); package_too_small: /* The sub-package count was smaller than required */ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return Sub-Package[%u] is too small - found %u elements, expected %u", i, sub_package->package.count, expected_count)); return (AE_AML_OPERAND_VALUE); } /******************************************************************************* * * FUNCTION: acpi_ns_check_package_elements * * PARAMETERS: data - Pointer to validation data structure * elements - Pointer to the package elements array * type1 - Object type for first group * count1 - Count for first group * type2 - Object type for second group * count2 - Count for second group * start_index - Start of the first group of elements * * RETURN: Status * * DESCRIPTION: Check that all elements of a package are of the correct object * type. Supports up to two groups of different object types. * ******************************************************************************/ static acpi_status acpi_ns_check_package_elements(struct acpi_predefined_data *data, union acpi_operand_object **elements, u8 type1, u32 count1, u8 type2, u32 count2, u32 start_index) { union acpi_operand_object **this_element = elements; acpi_status status; u32 i; /* * Up to two groups of package elements are supported by the data * structure. All elements in each group must be of the same type. * The second group can have a count of zero. */ for (i = 0; i < count1; i++) { status = acpi_ns_check_object_type(data, this_element, type1, i + start_index); if (ACPI_FAILURE(status)) { return (status); } this_element++; } for (i = 0; i < count2; i++) { status = acpi_ns_check_object_type(data, this_element, type2, (i + count1 + start_index)); if (ACPI_FAILURE(status)) { return (status); } this_element++; } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_check_object_type * * PARAMETERS: data - Pointer to validation data structure * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * expected_btypes - Bitmap of expected return type(s) * package_index - Index of object within parent package (if * applicable - ACPI_NOT_PACKAGE_ELEMENT * otherwise) * * RETURN: Status * * DESCRIPTION: Check the type of the return object against the expected object * type(s). Use of Btype allows multiple expected object types. * ******************************************************************************/ static acpi_status acpi_ns_check_object_type(struct acpi_predefined_data *data, union acpi_operand_object **return_object_ptr, u32 expected_btypes, u32 package_index) { union acpi_operand_object *return_object = *return_object_ptr; acpi_status status = AE_OK; u32 return_btype; char type_buffer[48]; /* Room for 5 types */ /* * If we get a NULL return_object here, it is a NULL package element. * Since all extraneous NULL package elements were removed earlier by a * call to acpi_ns_remove_null_elements, this is an unexpected NULL element. * We will attempt to repair it. */ if (!return_object) { status = acpi_ns_repair_null_element(data, expected_btypes, package_index, return_object_ptr); if (ACPI_SUCCESS(status)) { return (AE_OK); /* Repair was successful */ } goto type_error_exit; } /* A Namespace node should not get here, but make sure */ if (ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) { ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Invalid return type - Found a Namespace node [%4.4s] type %s", return_object->node.name.ascii, acpi_ut_get_type_name(return_object->node. type))); return (AE_AML_OPERAND_TYPE); } /* * Convert the object type (ACPI_TYPE_xxx) to a bitmapped object type. * The bitmapped type allows multiple possible return types. * * Note, the cases below must handle all of the possible types returned * from all of the predefined names (including elements of returned * packages) */ switch (return_object->common.type) { case ACPI_TYPE_INTEGER: return_btype = ACPI_RTYPE_INTEGER; break; case ACPI_TYPE_BUFFER: return_btype = ACPI_RTYPE_BUFFER; break; case ACPI_TYPE_STRING: return_btype = ACPI_RTYPE_STRING; break; case ACPI_TYPE_PACKAGE: return_btype = ACPI_RTYPE_PACKAGE; break; case ACPI_TYPE_LOCAL_REFERENCE: return_btype = ACPI_RTYPE_REFERENCE; break; default: /* Not one of the supported objects, must be incorrect */ goto type_error_exit; } /* Is the object one of the expected types? */ if (return_btype & expected_btypes) { /* For reference objects, check that the reference type is correct */ if (return_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) { status = acpi_ns_check_reference(data, return_object); } return (status); } /* Type mismatch -- attempt repair of the returned object */ status = acpi_ns_repair_object(data, expected_btypes, package_index, return_object_ptr); if (ACPI_SUCCESS(status)) { return (AE_OK); /* Repair was successful */ } type_error_exit: /* Create a string with all expected types for this predefined object */ acpi_ns_get_expected_types(type_buffer, expected_btypes); if (package_index == ACPI_NOT_PACKAGE_ELEMENT) { ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return type mismatch - found %s, expected %s", acpi_ut_get_object_type_name (return_object), type_buffer)); } else { ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return Package type mismatch at index %u - " "found %s, expected %s", package_index, acpi_ut_get_object_type_name (return_object), type_buffer)); } return (AE_AML_OPERAND_TYPE); } /******************************************************************************* * * FUNCTION: acpi_ns_check_reference * * PARAMETERS: data - Pointer to validation data structure * return_object - Object returned from the evaluation of a * method or object * * RETURN: Status * * DESCRIPTION: Check a returned reference object for the correct reference * type. The only reference type that can be returned from a * predefined method is a named reference. All others are invalid. * ******************************************************************************/ static acpi_status acpi_ns_check_reference(struct acpi_predefined_data *data, union acpi_operand_object *return_object) { /* * Check the reference object for the correct reference type (opcode). * The only type of reference that can be converted to an union acpi_object is * a reference to a named object (reference class: NAME) */ if (return_object->reference.class == ACPI_REFCLASS_NAME) { return (AE_OK); } ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return type mismatch - unexpected reference object type [%s] %2.2X", acpi_ut_get_reference_name(return_object), return_object->reference.class)); return (AE_AML_OPERAND_TYPE); } /******************************************************************************* * * FUNCTION: acpi_ns_get_expected_types * * PARAMETERS: buffer - Pointer to where the string is returned * expected_btypes - Bitmap of expected return type(s) * * RETURN: Buffer is populated with type names. * * DESCRIPTION: Translate the expected types bitmap into a string of ascii * names of expected types, for use in warning messages. * ******************************************************************************/ static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes) { u32 this_rtype; u32 i; u32 j; j = 1; buffer[0] = 0; this_rtype = ACPI_RTYPE_INTEGER; for (i = 0; i < ACPI_NUM_RTYPES; i++) { /* If one of the expected types, concatenate the name of this type */ if (expected_btypes & this_rtype) { ACPI_STRCAT(buffer, &acpi_rtype_names[i][j]); j = 0; /* Use name separator from now on */ } this_rtype <<= 1; /* Next Rtype */ } }
gpl-2.0
HCDRJacob/u8800-2.6.32
drivers/char/pcmcia/ipwireless/hardware.c
742
46438
/* * IPWireless 3G PCMCIA Network Driver * * Original code * by Stephen Blackheath <stephen@blacksapphire.com>, * Ben Martel <benm@symmetric.co.nz> * * Copyrighted as follows: * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) * * Various driver changes and rewrites, port to new kernels * Copyright (C) 2006-2007 Jiri Kosina * * Misc code cleanups and updates * Copyright (C) 2007 David Sterba */ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/slab.h> #include "hardware.h" #include "setup_protocol.h" #include "network.h" #include "main.h" static void ipw_send_setup_packet(struct ipw_hardware *hw); static void handle_received_SETUP_packet(struct ipw_hardware *ipw, unsigned int address, const unsigned char *data, int len, int is_last); static void ipwireless_setup_timer(unsigned long data); static void handle_received_CTRL_packet(struct ipw_hardware *hw, unsigned int channel_idx, const unsigned char *data, int len); /*#define TIMING_DIAGNOSTICS*/ #ifdef TIMING_DIAGNOSTICS static struct timing_stats { unsigned long last_report_time; unsigned long read_time; unsigned long write_time; unsigned long read_bytes; unsigned long write_bytes; unsigned long start_time; }; static void start_timing(void) { timing_stats.start_time = jiffies; } static void end_read_timing(unsigned length) { timing_stats.read_time += (jiffies - start_time); timing_stats.read_bytes += length + 2; report_timing(); } static void end_write_timing(unsigned length) { timing_stats.write_time += (jiffies - start_time); timing_stats.write_bytes += length + 2; report_timing(); } static void report_timing(void) { unsigned long since = jiffies - timing_stats.last_report_time; /* If it's been more than one second... */ if (since >= HZ) { int first = (timing_stats.last_report_time == 0); timing_stats.last_report_time = jiffies; if (!first) printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": %u us elapsed - read %lu bytes in %u us, wrote %lu bytes in %u us\n", jiffies_to_usecs(since), timing_stats.read_bytes, jiffies_to_usecs(timing_stats.read_time), timing_stats.write_bytes, jiffies_to_usecs(timing_stats.write_time)); timing_stats.read_time = 0; timing_stats.write_time = 0; timing_stats.read_bytes = 0; timing_stats.write_bytes = 0; } } #else static void start_timing(void) { } static void end_read_timing(unsigned length) { } static void end_write_timing(unsigned length) { } #endif /* Imported IPW definitions */ #define LL_MTU_V1 318 #define LL_MTU_V2 250 #define LL_MTU_MAX (LL_MTU_V1 > LL_MTU_V2 ? LL_MTU_V1 : LL_MTU_V2) #define PRIO_DATA 2 #define PRIO_CTRL 1 #define PRIO_SETUP 0 /* Addresses */ #define ADDR_SETUP_PROT 0 /* Protocol ids */ enum { /* Identifier for the Com Data protocol */ TL_PROTOCOLID_COM_DATA = 0, /* Identifier for the Com Control protocol */ TL_PROTOCOLID_COM_CTRL = 1, /* Identifier for the Setup protocol */ TL_PROTOCOLID_SETUP = 2 }; /* Number of bytes in NL packet header (cannot do * sizeof(nl_packet_header) since it's a bitfield) */ #define NL_FIRST_PACKET_HEADER_SIZE 3 /* Number of bytes in NL packet header (cannot do * sizeof(nl_packet_header) since it's a bitfield) */ #define NL_FOLLOWING_PACKET_HEADER_SIZE 1 struct nl_first_packet_header { unsigned char protocol:3; unsigned char address:3; unsigned char packet_rank:2; unsigned char length_lsb; unsigned char length_msb; }; struct nl_packet_header { unsigned char protocol:3; unsigned char address:3; unsigned char packet_rank:2; }; /* Value of 'packet_rank' above */ #define NL_INTERMEDIATE_PACKET 0x0 #define NL_LAST_PACKET 0x1 #define NL_FIRST_PACKET 0x2 union nl_packet { /* Network packet header of the first packet (a special case) */ struct nl_first_packet_header hdr_first; /* Network packet header of the following packets (if any) */ struct nl_packet_header hdr; /* Complete network packet (header + data) */ unsigned char rawpkt[LL_MTU_MAX]; } __attribute__ ((__packed__)); #define HW_VERSION_UNKNOWN -1 #define HW_VERSION_1 1 #define HW_VERSION_2 2 /* IPW I/O ports */ #define IOIER 0x00 /* Interrupt Enable Register */ #define IOIR 0x02 /* Interrupt Source/ACK register */ #define IODCR 0x04 /* Data Control Register */ #define IODRR 0x06 /* Data Read Register */ #define IODWR 0x08 /* Data Write Register */ #define IOESR 0x0A /* Embedded Driver Status Register */ #define IORXR 0x0C /* Rx Fifo Register (Host to Embedded) */ #define IOTXR 0x0E /* Tx Fifo Register (Embedded to Host) */ /* I/O ports and bit definitions for version 1 of the hardware */ /* IER bits*/ #define IER_RXENABLED 0x1 #define IER_TXENABLED 0x2 /* ISR bits */ #define IR_RXINTR 0x1 #define IR_TXINTR 0x2 /* DCR bits */ #define DCR_RXDONE 0x1 #define DCR_TXDONE 0x2 #define DCR_RXRESET 0x4 #define DCR_TXRESET 0x8 /* I/O ports and bit definitions for version 2 of the hardware */ struct MEMCCR { unsigned short reg_config_option; /* PCCOR: Configuration Option Register */ unsigned short reg_config_and_status; /* PCCSR: Configuration and Status Register */ unsigned short reg_pin_replacement; /* PCPRR: Pin Replacemant Register */ unsigned short reg_socket_and_copy; /* PCSCR: Socket and Copy Register */ unsigned short reg_ext_status; /* PCESR: Extendend Status Register */ unsigned short reg_io_base; /* PCIOB: I/O Base Register */ }; struct MEMINFREG { unsigned short memreg_tx_old; /* TX Register (R/W) */ unsigned short pad1; unsigned short memreg_rx_done; /* RXDone Register (R/W) */ unsigned short pad2; unsigned short memreg_rx; /* RX Register (R/W) */ unsigned short pad3; unsigned short memreg_pc_interrupt_ack; /* PC intr Ack Register (W) */ unsigned short pad4; unsigned long memreg_card_present;/* Mask for Host to check (R) for * CARD_PRESENT_VALUE */ unsigned short memreg_tx_new; /* TX2 (new) Register (R/W) */ }; #define CARD_PRESENT_VALUE (0xBEEFCAFEUL) #define MEMTX_TX 0x0001 #define MEMRX_RX 0x0001 #define MEMRX_RX_DONE 0x0001 #define MEMRX_PCINTACKK 0x0001 #define NL_NUM_OF_PRIORITIES 3 #define NL_NUM_OF_PROTOCOLS 3 #define NL_NUM_OF_ADDRESSES NO_OF_IPW_CHANNELS struct ipw_hardware { unsigned int base_port; short hw_version; unsigned short ll_mtu; spinlock_t lock; int initializing; int init_loops; struct timer_list setup_timer; /* Flag if hw is ready to send next packet */ int tx_ready; /* Count of pending packets to be sent */ int tx_queued; struct list_head tx_queue[NL_NUM_OF_PRIORITIES]; int rx_bytes_queued; struct list_head rx_queue; /* Pool of rx_packet structures that are not currently used. */ struct list_head rx_pool; int rx_pool_size; /* True if reception of data is blocked while userspace processes it. */ int blocking_rx; /* True if there is RX data ready on the hardware. */ int rx_ready; unsigned short last_memtx_serial; /* * Newer versions of the V2 card firmware send serial numbers in the * MemTX register. 'serial_number_detected' is set true when we detect * a non-zero serial number (indicating the new firmware). Thereafter, * the driver can safely ignore the Timer Recovery re-sends to avoid * out-of-sync problems. */ int serial_number_detected; struct work_struct work_rx; /* True if we are to send the set-up data to the hardware. */ int to_setup; /* Card has been removed */ int removed; /* Saved irq value when we disable the interrupt. */ int irq; /* True if this driver is shutting down. */ int shutting_down; /* Modem control lines */ unsigned int control_lines[NL_NUM_OF_ADDRESSES]; struct ipw_rx_packet *packet_assembler[NL_NUM_OF_ADDRESSES]; struct tasklet_struct tasklet; /* The handle for the network layer, for the sending of events to it. */ struct ipw_network *network; struct MEMINFREG __iomem *memory_info_regs; struct MEMCCR __iomem *memregs_CCR; void (*reboot_callback) (void *data); void *reboot_callback_data; unsigned short __iomem *memreg_tx; }; /* * Packet info structure for tx packets. * Note: not all the fields defined here are required for all protocols */ struct ipw_tx_packet { struct list_head queue; /* channel idx + 1 */ unsigned char dest_addr; /* SETUP, CTRL or DATA */ unsigned char protocol; /* Length of data block, which starts at the end of this structure */ unsigned short length; /* Sending state */ /* Offset of where we've sent up to so far */ unsigned long offset; /* Count of packet fragments, starting at 0 */ int fragment_count; /* Called after packet is sent and before is freed */ void (*packet_callback) (void *cb_data, unsigned int packet_length); void *callback_data; }; /* Signals from DTE */ #define COMCTRL_RTS 0 #define COMCTRL_DTR 1 /* Signals from DCE */ #define COMCTRL_CTS 2 #define COMCTRL_DCD 3 #define COMCTRL_DSR 4 #define COMCTRL_RI 5 struct ipw_control_packet_body { /* DTE signal or DCE signal */ unsigned char sig_no; /* 0: set signal, 1: clear signal */ unsigned char value; } __attribute__ ((__packed__)); struct ipw_control_packet { struct ipw_tx_packet header; struct ipw_control_packet_body body; }; struct ipw_rx_packet { struct list_head queue; unsigned int capacity; unsigned int length; unsigned int protocol; unsigned int channel_idx; }; static char *data_type(const unsigned char *buf, unsigned length) { struct nl_packet_header *hdr = (struct nl_packet_header *) buf; if (length == 0) return " "; if (hdr->packet_rank & NL_FIRST_PACKET) { switch (hdr->protocol) { case TL_PROTOCOLID_COM_DATA: return "DATA "; case TL_PROTOCOLID_COM_CTRL: return "CTRL "; case TL_PROTOCOLID_SETUP: return "SETUP"; default: return "???? "; } } else return " "; } #define DUMP_MAX_BYTES 64 static void dump_data_bytes(const char *type, const unsigned char *data, unsigned length) { char prefix[56]; sprintf(prefix, IPWIRELESS_PCCARD_NAME ": %s %s ", type, data_type(data, length)); print_hex_dump_bytes(prefix, 0, (void *)data, length < DUMP_MAX_BYTES ? length : DUMP_MAX_BYTES); } static void swap_packet_bitfield_to_le(unsigned char *data) { #ifdef __BIG_ENDIAN_BITFIELD unsigned char tmp = *data, ret = 0; /* * transform bits from aa.bbb.ccc to ccc.bbb.aa */ ret |= tmp & 0xc0 >> 6; ret |= tmp & 0x38 >> 1; ret |= tmp & 0x07 << 5; *data = ret & 0xff; #endif } static void swap_packet_bitfield_from_le(unsigned char *data) { #ifdef __BIG_ENDIAN_BITFIELD unsigned char tmp = *data, ret = 0; /* * transform bits from ccc.bbb.aa to aa.bbb.ccc */ ret |= tmp & 0xe0 >> 5; ret |= tmp & 0x1c << 1; ret |= tmp & 0x03 << 6; *data = ret & 0xff; #endif } static void do_send_fragment(struct ipw_hardware *hw, unsigned char *data, unsigned length) { unsigned i; unsigned long flags; start_timing(); BUG_ON(length > hw->ll_mtu); if (ipwireless_debug) dump_data_bytes("send", data, length); spin_lock_irqsave(&hw->lock, flags); hw->tx_ready = 0; swap_packet_bitfield_to_le(data); if (hw->hw_version == HW_VERSION_1) { outw((unsigned short) length, hw->base_port + IODWR); for (i = 0; i < length; i += 2) { unsigned short d = data[i]; __le16 raw_data; if (i + 1 < length) d |= data[i + 1] << 8; raw_data = cpu_to_le16(d); outw(raw_data, hw->base_port + IODWR); } outw(DCR_TXDONE, hw->base_port + IODCR); } else if (hw->hw_version == HW_VERSION_2) { outw((unsigned short) length, hw->base_port); for (i = 0; i < length; i += 2) { unsigned short d = data[i]; __le16 raw_data; if (i + 1 < length) d |= data[i + 1] << 8; raw_data = cpu_to_le16(d); outw(raw_data, hw->base_port); } while ((i & 3) != 2) { outw((unsigned short) 0xDEAD, hw->base_port); i += 2; } writew(MEMRX_RX, &hw->memory_info_regs->memreg_rx); } spin_unlock_irqrestore(&hw->lock, flags); end_write_timing(length); } static void do_send_packet(struct ipw_hardware *hw, struct ipw_tx_packet *packet) { unsigned short fragment_data_len; unsigned short data_left = packet->length - packet->offset; unsigned short header_size; union nl_packet pkt; header_size = (packet->fragment_count == 0) ? NL_FIRST_PACKET_HEADER_SIZE : NL_FOLLOWING_PACKET_HEADER_SIZE; fragment_data_len = hw->ll_mtu - header_size; if (data_left < fragment_data_len) fragment_data_len = data_left; /* * hdr_first is now in machine bitfield order, which will be swapped * to le just before it goes to hw */ pkt.hdr_first.protocol = packet->protocol; pkt.hdr_first.address = packet->dest_addr; pkt.hdr_first.packet_rank = 0; /* First packet? */ if (packet->fragment_count == 0) { pkt.hdr_first.packet_rank |= NL_FIRST_PACKET; pkt.hdr_first.length_lsb = (unsigned char) packet->length; pkt.hdr_first.length_msb = (unsigned char) (packet->length >> 8); } memcpy(pkt.rawpkt + header_size, ((unsigned char *) packet) + sizeof(struct ipw_tx_packet) + packet->offset, fragment_data_len); packet->offset += fragment_data_len; packet->fragment_count++; /* Last packet? (May also be first packet.) */ if (packet->offset == packet->length) pkt.hdr_first.packet_rank |= NL_LAST_PACKET; do_send_fragment(hw, pkt.rawpkt, header_size + fragment_data_len); /* If this packet has unsent data, then re-queue it. */ if (packet->offset < packet->length) { /* * Re-queue it at the head of the highest priority queue so * it goes before all other packets */ unsigned long flags; spin_lock_irqsave(&hw->lock, flags); list_add(&packet->queue, &hw->tx_queue[0]); hw->tx_queued++; spin_unlock_irqrestore(&hw->lock, flags); } else { if (packet->packet_callback) packet->packet_callback(packet->callback_data, packet->length); kfree(packet); } } static void ipw_setup_hardware(struct ipw_hardware *hw) { unsigned long flags; spin_lock_irqsave(&hw->lock, flags); if (hw->hw_version == HW_VERSION_1) { /* Reset RX FIFO */ outw(DCR_RXRESET, hw->base_port + IODCR); /* SB: Reset TX FIFO */ outw(DCR_TXRESET, hw->base_port + IODCR); /* Enable TX and RX interrupts. */ outw(IER_TXENABLED | IER_RXENABLED, hw->base_port + IOIER); } else { /* * Set INTRACK bit (bit 0), which means we must explicitly * acknowledge interrupts by clearing bit 2 of reg_config_and_status. */ unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status); csr |= 1; writew(csr, &hw->memregs_CCR->reg_config_and_status); } spin_unlock_irqrestore(&hw->lock, flags); } /* * If 'packet' is NULL, then this function allocates a new packet, setting its * length to 0 and ensuring it has the specified minimum amount of free space. * * If 'packet' is not NULL, then this function enlarges it if it doesn't * have the specified minimum amount of free space. * */ static struct ipw_rx_packet *pool_allocate(struct ipw_hardware *hw, struct ipw_rx_packet *packet, int minimum_free_space) { if (!packet) { unsigned long flags; spin_lock_irqsave(&hw->lock, flags); if (!list_empty(&hw->rx_pool)) { packet = list_first_entry(&hw->rx_pool, struct ipw_rx_packet, queue); hw->rx_pool_size--; spin_unlock_irqrestore(&hw->lock, flags); list_del(&packet->queue); } else { const int min_capacity = ipwireless_ppp_mru(hw->network) + 2; int new_capacity; spin_unlock_irqrestore(&hw->lock, flags); new_capacity = (minimum_free_space > min_capacity ? minimum_free_space : min_capacity); packet = kmalloc(sizeof(struct ipw_rx_packet) + new_capacity, GFP_ATOMIC); if (!packet) return NULL; packet->capacity = new_capacity; } packet->length = 0; } if (packet->length + minimum_free_space > packet->capacity) { struct ipw_rx_packet *old_packet = packet; packet = kmalloc(sizeof(struct ipw_rx_packet) + old_packet->length + minimum_free_space, GFP_ATOMIC); if (!packet) { kfree(old_packet); return NULL; } memcpy(packet, old_packet, sizeof(struct ipw_rx_packet) + old_packet->length); packet->capacity = old_packet->length + minimum_free_space; kfree(old_packet); } return packet; } static void pool_free(struct ipw_hardware *hw, struct ipw_rx_packet *packet) { if (hw->rx_pool_size > 6) kfree(packet); else { hw->rx_pool_size++; list_add(&packet->queue, &hw->rx_pool); } } static void queue_received_packet(struct ipw_hardware *hw, unsigned int protocol, unsigned int address, const unsigned char *data, int length, int is_last) { unsigned int channel_idx = address - 1; struct ipw_rx_packet *packet = NULL; unsigned long flags; /* Discard packet if channel index is out of range. */ if (channel_idx >= NL_NUM_OF_ADDRESSES) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": data packet has bad address %u\n", address); return; } /* * ->packet_assembler is safe to touch unlocked, this is the only place */ if (protocol == TL_PROTOCOLID_COM_DATA) { struct ipw_rx_packet **assem = &hw->packet_assembler[channel_idx]; /* * Create a new packet, or assembler already contains one * enlarge it by 'length' bytes. */ (*assem) = pool_allocate(hw, *assem, length); if (!(*assem)) { printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": no memory for incomming data packet, dropped!\n"); return; } (*assem)->protocol = protocol; (*assem)->channel_idx = channel_idx; /* Append this packet data onto existing data. */ memcpy((unsigned char *)(*assem) + sizeof(struct ipw_rx_packet) + (*assem)->length, data, length); (*assem)->length += length; if (is_last) { packet = *assem; *assem = NULL; /* Count queued DATA bytes only */ spin_lock_irqsave(&hw->lock, flags); hw->rx_bytes_queued += packet->length; spin_unlock_irqrestore(&hw->lock, flags); } } else { /* If it's a CTRL packet, don't assemble, just queue it. */ packet = pool_allocate(hw, NULL, length); if (!packet) { printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": no memory for incomming ctrl packet, dropped!\n"); return; } packet->protocol = protocol; packet->channel_idx = channel_idx; memcpy((unsigned char *)packet + sizeof(struct ipw_rx_packet), data, length); packet->length = length; } /* * If this is the last packet, then send the assembled packet on to the * network layer. */ if (packet) { spin_lock_irqsave(&hw->lock, flags); list_add_tail(&packet->queue, &hw->rx_queue); /* Block reception of incoming packets if queue is full. */ hw->blocking_rx = (hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE); spin_unlock_irqrestore(&hw->lock, flags); schedule_work(&hw->work_rx); } } /* * Workqueue callback */ static void ipw_receive_data_work(struct work_struct *work_rx) { struct ipw_hardware *hw = container_of(work_rx, struct ipw_hardware, work_rx); unsigned long flags; spin_lock_irqsave(&hw->lock, flags); while (!list_empty(&hw->rx_queue)) { struct ipw_rx_packet *packet = list_first_entry(&hw->rx_queue, struct ipw_rx_packet, queue); if (hw->shutting_down) break; list_del(&packet->queue); /* * Note: ipwireless_network_packet_received must be called in a * process context (i.e. via schedule_work) because the tty * output code can sleep in the tty_flip_buffer_push call. */ if (packet->protocol == TL_PROTOCOLID_COM_DATA) { if (hw->network != NULL) { /* If the network hasn't been disconnected. */ spin_unlock_irqrestore(&hw->lock, flags); /* * This must run unlocked due to tty processing * and mutex locking */ ipwireless_network_packet_received( hw->network, packet->channel_idx, (unsigned char *)packet + sizeof(struct ipw_rx_packet), packet->length); spin_lock_irqsave(&hw->lock, flags); } /* Count queued DATA bytes only */ hw->rx_bytes_queued -= packet->length; } else { /* * This is safe to be called locked, callchain does * not block */ handle_received_CTRL_packet(hw, packet->channel_idx, (unsigned char *)packet + sizeof(struct ipw_rx_packet), packet->length); } pool_free(hw, packet); /* * Unblock reception of incoming packets if queue is no longer * full. */ hw->blocking_rx = hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE; if (hw->shutting_down) break; } spin_unlock_irqrestore(&hw->lock, flags); } static void handle_received_CTRL_packet(struct ipw_hardware *hw, unsigned int channel_idx, const unsigned char *data, int len) { const struct ipw_control_packet_body *body = (const struct ipw_control_packet_body *) data; unsigned int changed_mask; if (len != sizeof(struct ipw_control_packet_body)) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": control packet was %d bytes - wrong size!\n", len); return; } switch (body->sig_no) { case COMCTRL_CTS: changed_mask = IPW_CONTROL_LINE_CTS; break; case COMCTRL_DCD: changed_mask = IPW_CONTROL_LINE_DCD; break; case COMCTRL_DSR: changed_mask = IPW_CONTROL_LINE_DSR; break; case COMCTRL_RI: changed_mask = IPW_CONTROL_LINE_RI; break; default: changed_mask = 0; } if (changed_mask != 0) { if (body->value) hw->control_lines[channel_idx] |= changed_mask; else hw->control_lines[channel_idx] &= ~changed_mask; if (hw->network) ipwireless_network_notify_control_line_change( hw->network, channel_idx, hw->control_lines[channel_idx], changed_mask); } } static void handle_received_packet(struct ipw_hardware *hw, const union nl_packet *packet, unsigned short len) { unsigned int protocol = packet->hdr.protocol; unsigned int address = packet->hdr.address; unsigned int header_length; const unsigned char *data; unsigned int data_len; int is_last = packet->hdr.packet_rank & NL_LAST_PACKET; if (packet->hdr.packet_rank & NL_FIRST_PACKET) header_length = NL_FIRST_PACKET_HEADER_SIZE; else header_length = NL_FOLLOWING_PACKET_HEADER_SIZE; data = packet->rawpkt + header_length; data_len = len - header_length; switch (protocol) { case TL_PROTOCOLID_COM_DATA: case TL_PROTOCOLID_COM_CTRL: queue_received_packet(hw, protocol, address, data, data_len, is_last); break; case TL_PROTOCOLID_SETUP: handle_received_SETUP_packet(hw, address, data, data_len, is_last); break; } } static void acknowledge_data_read(struct ipw_hardware *hw) { if (hw->hw_version == HW_VERSION_1) outw(DCR_RXDONE, hw->base_port + IODCR); else writew(MEMRX_PCINTACKK, &hw->memory_info_regs->memreg_pc_interrupt_ack); } /* * Retrieve a packet from the IPW hardware. */ static void do_receive_packet(struct ipw_hardware *hw) { unsigned len; unsigned i; unsigned char pkt[LL_MTU_MAX]; start_timing(); if (hw->hw_version == HW_VERSION_1) { len = inw(hw->base_port + IODRR); if (len > hw->ll_mtu) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": received a packet of %u bytes - longer than the MTU!\n", len); outw(DCR_RXDONE | DCR_RXRESET, hw->base_port + IODCR); return; } for (i = 0; i < len; i += 2) { __le16 raw_data = inw(hw->base_port + IODRR); unsigned short data = le16_to_cpu(raw_data); pkt[i] = (unsigned char) data; pkt[i + 1] = (unsigned char) (data >> 8); } } else { len = inw(hw->base_port); if (len > hw->ll_mtu) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": received a packet of %u bytes - longer than the MTU!\n", len); writew(MEMRX_PCINTACKK, &hw->memory_info_regs->memreg_pc_interrupt_ack); return; } for (i = 0; i < len; i += 2) { __le16 raw_data = inw(hw->base_port); unsigned short data = le16_to_cpu(raw_data); pkt[i] = (unsigned char) data; pkt[i + 1] = (unsigned char) (data >> 8); } while ((i & 3) != 2) { inw(hw->base_port); i += 2; } } acknowledge_data_read(hw); swap_packet_bitfield_from_le(pkt); if (ipwireless_debug) dump_data_bytes("recv", pkt, len); handle_received_packet(hw, (union nl_packet *) pkt, len); end_read_timing(len); } static int get_current_packet_priority(struct ipw_hardware *hw) { /* * If we're initializing, don't send anything of higher priority than * PRIO_SETUP. The network layer therefore need not care about * hardware initialization - any of its stuff will simply be queued * until setup is complete. */ return (hw->to_setup || hw->initializing ? PRIO_SETUP + 1 : NL_NUM_OF_PRIORITIES); } /* * return 1 if something has been received from hw */ static int get_packets_from_hw(struct ipw_hardware *hw) { int received = 0; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); while (hw->rx_ready && !hw->blocking_rx) { received = 1; hw->rx_ready--; spin_unlock_irqrestore(&hw->lock, flags); do_receive_packet(hw); spin_lock_irqsave(&hw->lock, flags); } spin_unlock_irqrestore(&hw->lock, flags); return received; } /* * Send pending packet up to given priority, prioritize SETUP data until * hardware is fully setup. * * return 1 if more packets can be sent */ static int send_pending_packet(struct ipw_hardware *hw, int priority_limit) { int more_to_send = 0; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); if (hw->tx_queued && hw->tx_ready) { int priority; struct ipw_tx_packet *packet = NULL; /* Pick a packet */ for (priority = 0; priority < priority_limit; priority++) { if (!list_empty(&hw->tx_queue[priority])) { packet = list_first_entry( &hw->tx_queue[priority], struct ipw_tx_packet, queue); hw->tx_queued--; list_del(&packet->queue); break; } } if (!packet) { hw->tx_queued = 0; spin_unlock_irqrestore(&hw->lock, flags); return 0; } spin_unlock_irqrestore(&hw->lock, flags); /* Send */ do_send_packet(hw, packet); /* Check if more to send */ spin_lock_irqsave(&hw->lock, flags); for (priority = 0; priority < priority_limit; priority++) if (!list_empty(&hw->tx_queue[priority])) { more_to_send = 1; break; } if (!more_to_send) hw->tx_queued = 0; } spin_unlock_irqrestore(&hw->lock, flags); return more_to_send; } /* * Send and receive all queued packets. */ static void ipwireless_do_tasklet(unsigned long hw_) { struct ipw_hardware *hw = (struct ipw_hardware *) hw_; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); if (hw->shutting_down) { spin_unlock_irqrestore(&hw->lock, flags); return; } if (hw->to_setup == 1) { /* * Initial setup data sent to hardware */ hw->to_setup = 2; spin_unlock_irqrestore(&hw->lock, flags); ipw_setup_hardware(hw); ipw_send_setup_packet(hw); send_pending_packet(hw, PRIO_SETUP + 1); get_packets_from_hw(hw); } else { int priority_limit = get_current_packet_priority(hw); int again; spin_unlock_irqrestore(&hw->lock, flags); do { again = send_pending_packet(hw, priority_limit); again |= get_packets_from_hw(hw); } while (again); } } /* * return true if the card is physically present. */ static int is_card_present(struct ipw_hardware *hw) { if (hw->hw_version == HW_VERSION_1) return inw(hw->base_port + IOIR) != 0xFFFF; else return readl(&hw->memory_info_regs->memreg_card_present) == CARD_PRESENT_VALUE; } static irqreturn_t ipwireless_handle_v1_interrupt(int irq, struct ipw_hardware *hw) { unsigned short irqn; irqn = inw(hw->base_port + IOIR); /* Check if card is present */ if (irqn == 0xFFFF) return IRQ_NONE; else if (irqn != 0) { unsigned short ack = 0; unsigned long flags; /* Transmit complete. */ if (irqn & IR_TXINTR) { ack |= IR_TXINTR; spin_lock_irqsave(&hw->lock, flags); hw->tx_ready = 1; spin_unlock_irqrestore(&hw->lock, flags); } /* Received data */ if (irqn & IR_RXINTR) { ack |= IR_RXINTR; spin_lock_irqsave(&hw->lock, flags); hw->rx_ready++; spin_unlock_irqrestore(&hw->lock, flags); } if (ack != 0) { outw(ack, hw->base_port + IOIR); tasklet_schedule(&hw->tasklet); } return IRQ_HANDLED; } return IRQ_NONE; } static void acknowledge_pcmcia_interrupt(struct ipw_hardware *hw) { unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status); csr &= 0xfffd; writew(csr, &hw->memregs_CCR->reg_config_and_status); } static irqreturn_t ipwireless_handle_v2_v3_interrupt(int irq, struct ipw_hardware *hw) { int tx = 0; int rx = 0; int rx_repeat = 0; int try_mem_tx_old; unsigned long flags; do { unsigned short memtx = readw(hw->memreg_tx); unsigned short memtx_serial; unsigned short memrxdone = readw(&hw->memory_info_regs->memreg_rx_done); try_mem_tx_old = 0; /* check whether the interrupt was generated by ipwireless card */ if (!(memtx & MEMTX_TX) && !(memrxdone & MEMRX_RX_DONE)) { /* check if the card uses memreg_tx_old register */ if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) { memtx = readw(&hw->memory_info_regs->memreg_tx_old); if (memtx & MEMTX_TX) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Using memreg_tx_old\n"); hw->memreg_tx = &hw->memory_info_regs->memreg_tx_old; } else { return IRQ_NONE; } } else return IRQ_NONE; } /* * See if the card is physically present. Note that while it is * powering up, it appears not to be present. */ if (!is_card_present(hw)) { acknowledge_pcmcia_interrupt(hw); return IRQ_HANDLED; } memtx_serial = memtx & (unsigned short) 0xff00; if (memtx & MEMTX_TX) { writew(memtx_serial, hw->memreg_tx); if (hw->serial_number_detected) { if (memtx_serial != hw->last_memtx_serial) { hw->last_memtx_serial = memtx_serial; spin_lock_irqsave(&hw->lock, flags); hw->rx_ready++; spin_unlock_irqrestore(&hw->lock, flags); rx = 1; } else /* Ignore 'Timer Recovery' duplicates. */ rx_repeat = 1; } else { /* * If a non-zero serial number is seen, then enable * serial number checking. */ if (memtx_serial != 0) { hw->serial_number_detected = 1; printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": memreg_tx serial num detected\n"); spin_lock_irqsave(&hw->lock, flags); hw->rx_ready++; spin_unlock_irqrestore(&hw->lock, flags); } rx = 1; } } if (memrxdone & MEMRX_RX_DONE) { writew(0, &hw->memory_info_regs->memreg_rx_done); spin_lock_irqsave(&hw->lock, flags); hw->tx_ready = 1; spin_unlock_irqrestore(&hw->lock, flags); tx = 1; } if (tx) writew(MEMRX_PCINTACKK, &hw->memory_info_regs->memreg_pc_interrupt_ack); acknowledge_pcmcia_interrupt(hw); if (tx || rx) tasklet_schedule(&hw->tasklet); else if (!rx_repeat) { if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) { if (hw->serial_number_detected) printk(KERN_WARNING IPWIRELESS_PCCARD_NAME ": spurious interrupt - new_tx mode\n"); else { printk(KERN_WARNING IPWIRELESS_PCCARD_NAME ": no valid memreg_tx value - switching to the old memreg_tx\n"); hw->memreg_tx = &hw->memory_info_regs->memreg_tx_old; try_mem_tx_old = 1; } } else printk(KERN_WARNING IPWIRELESS_PCCARD_NAME ": spurious interrupt - old_tx mode\n"); } } while (try_mem_tx_old == 1); return IRQ_HANDLED; } irqreturn_t ipwireless_interrupt(int irq, void *dev_id) { struct ipw_hardware *hw = dev_id; if (hw->hw_version == HW_VERSION_1) return ipwireless_handle_v1_interrupt(irq, hw); else return ipwireless_handle_v2_v3_interrupt(irq, hw); } static void flush_packets_to_hw(struct ipw_hardware *hw) { int priority_limit; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); priority_limit = get_current_packet_priority(hw); spin_unlock_irqrestore(&hw->lock, flags); while (send_pending_packet(hw, priority_limit)); } static void send_packet(struct ipw_hardware *hw, int priority, struct ipw_tx_packet *packet) { unsigned long flags; spin_lock_irqsave(&hw->lock, flags); list_add_tail(&packet->queue, &hw->tx_queue[priority]); hw->tx_queued++; spin_unlock_irqrestore(&hw->lock, flags); flush_packets_to_hw(hw); } /* Create data packet, non-atomic allocation */ static void *alloc_data_packet(int data_size, unsigned char dest_addr, unsigned char protocol) { struct ipw_tx_packet *packet = kzalloc( sizeof(struct ipw_tx_packet) + data_size, GFP_ATOMIC); if (!packet) return NULL; INIT_LIST_HEAD(&packet->queue); packet->dest_addr = dest_addr; packet->protocol = protocol; packet->length = data_size; return packet; } static void *alloc_ctrl_packet(int header_size, unsigned char dest_addr, unsigned char protocol, unsigned char sig_no) { /* * sig_no is located right after ipw_tx_packet struct in every * CTRL or SETUP packets, we can use ipw_control_packet as a * common struct */ struct ipw_control_packet *packet = kzalloc(header_size, GFP_ATOMIC); if (!packet) return NULL; INIT_LIST_HEAD(&packet->header.queue); packet->header.dest_addr = dest_addr; packet->header.protocol = protocol; packet->header.length = header_size - sizeof(struct ipw_tx_packet); packet->body.sig_no = sig_no; return packet; } int ipwireless_send_packet(struct ipw_hardware *hw, unsigned int channel_idx, const unsigned char *data, unsigned int length, void (*callback) (void *cb, unsigned int length), void *callback_data) { struct ipw_tx_packet *packet; packet = alloc_data_packet(length, (channel_idx + 1), TL_PROTOCOLID_COM_DATA); if (!packet) return -ENOMEM; packet->packet_callback = callback; packet->callback_data = callback_data; memcpy((unsigned char *) packet + sizeof(struct ipw_tx_packet), data, length); send_packet(hw, PRIO_DATA, packet); return 0; } static int set_control_line(struct ipw_hardware *hw, int prio, unsigned int channel_idx, int line, int state) { struct ipw_control_packet *packet; int protocolid = TL_PROTOCOLID_COM_CTRL; if (prio == PRIO_SETUP) protocolid = TL_PROTOCOLID_SETUP; packet = alloc_ctrl_packet(sizeof(struct ipw_control_packet), (channel_idx + 1), protocolid, line); if (!packet) return -ENOMEM; packet->header.length = sizeof(struct ipw_control_packet_body); packet->body.value = (state == 0 ? 0 : 1); send_packet(hw, prio, &packet->header); return 0; } static int set_DTR(struct ipw_hardware *hw, int priority, unsigned int channel_idx, int state) { if (state != 0) hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_DTR; else hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_DTR; return set_control_line(hw, priority, channel_idx, COMCTRL_DTR, state); } static int set_RTS(struct ipw_hardware *hw, int priority, unsigned int channel_idx, int state) { if (state != 0) hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_RTS; else hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_RTS; return set_control_line(hw, priority, channel_idx, COMCTRL_RTS, state); } int ipwireless_set_DTR(struct ipw_hardware *hw, unsigned int channel_idx, int state) { return set_DTR(hw, PRIO_CTRL, channel_idx, state); } int ipwireless_set_RTS(struct ipw_hardware *hw, unsigned int channel_idx, int state) { return set_RTS(hw, PRIO_CTRL, channel_idx, state); } struct ipw_setup_get_version_query_packet { struct ipw_tx_packet header; struct tl_setup_get_version_qry body; }; struct ipw_setup_config_packet { struct ipw_tx_packet header; struct tl_setup_config_msg body; }; struct ipw_setup_config_done_packet { struct ipw_tx_packet header; struct tl_setup_config_done_msg body; }; struct ipw_setup_open_packet { struct ipw_tx_packet header; struct tl_setup_open_msg body; }; struct ipw_setup_info_packet { struct ipw_tx_packet header; struct tl_setup_info_msg body; }; struct ipw_setup_reboot_msg_ack { struct ipw_tx_packet header; struct TlSetupRebootMsgAck body; }; /* This handles the actual initialization of the card */ static void __handle_setup_get_version_rsp(struct ipw_hardware *hw) { struct ipw_setup_config_packet *config_packet; struct ipw_setup_config_done_packet *config_done_packet; struct ipw_setup_open_packet *open_packet; struct ipw_setup_info_packet *info_packet; int port; unsigned int channel_idx; /* generate config packet */ for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) { config_packet = alloc_ctrl_packet( sizeof(struct ipw_setup_config_packet), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_CONFIG_MSG); if (!config_packet) goto exit_nomem; config_packet->header.length = sizeof(struct tl_setup_config_msg); config_packet->body.port_no = port; config_packet->body.prio_data = PRIO_DATA; config_packet->body.prio_ctrl = PRIO_CTRL; send_packet(hw, PRIO_SETUP, &config_packet->header); } config_done_packet = alloc_ctrl_packet( sizeof(struct ipw_setup_config_done_packet), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_CONFIG_DONE_MSG); if (!config_done_packet) goto exit_nomem; config_done_packet->header.length = sizeof(struct tl_setup_config_done_msg); send_packet(hw, PRIO_SETUP, &config_done_packet->header); /* generate open packet */ for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) { open_packet = alloc_ctrl_packet( sizeof(struct ipw_setup_open_packet), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_OPEN_MSG); if (!open_packet) goto exit_nomem; open_packet->header.length = sizeof(struct tl_setup_open_msg); open_packet->body.port_no = port; send_packet(hw, PRIO_SETUP, &open_packet->header); } for (channel_idx = 0; channel_idx < NL_NUM_OF_ADDRESSES; channel_idx++) { int ret; ret = set_DTR(hw, PRIO_SETUP, channel_idx, (hw->control_lines[channel_idx] & IPW_CONTROL_LINE_DTR) != 0); if (ret) { printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": error setting DTR (%d)\n", ret); return; } set_RTS(hw, PRIO_SETUP, channel_idx, (hw->control_lines [channel_idx] & IPW_CONTROL_LINE_RTS) != 0); if (ret) { printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": error setting RTS (%d)\n", ret); return; } } /* * For NDIS we assume that we are using sync PPP frames, for COM async. * This driver uses NDIS mode too. We don't bother with translation * from async -> sync PPP. */ info_packet = alloc_ctrl_packet(sizeof(struct ipw_setup_info_packet), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_INFO_MSG); if (!info_packet) goto exit_nomem; info_packet->header.length = sizeof(struct tl_setup_info_msg); info_packet->body.driver_type = NDISWAN_DRIVER; info_packet->body.major_version = NDISWAN_DRIVER_MAJOR_VERSION; info_packet->body.minor_version = NDISWAN_DRIVER_MINOR_VERSION; send_packet(hw, PRIO_SETUP, &info_packet->header); /* Initialization is now complete, so we clear the 'to_setup' flag */ hw->to_setup = 0; return; exit_nomem: printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": not enough memory to alloc control packet\n"); hw->to_setup = -1; } static void handle_setup_get_version_rsp(struct ipw_hardware *hw, unsigned char vers_no) { del_timer(&hw->setup_timer); hw->initializing = 0; printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": card is ready.\n"); if (vers_no == TL_SETUP_VERSION) __handle_setup_get_version_rsp(hw); else printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": invalid hardware version no %u\n", (unsigned int) vers_no); } static void ipw_send_setup_packet(struct ipw_hardware *hw) { struct ipw_setup_get_version_query_packet *ver_packet; ver_packet = alloc_ctrl_packet( sizeof(struct ipw_setup_get_version_query_packet), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_GET_VERSION_QRY); ver_packet->header.length = sizeof(struct tl_setup_get_version_qry); /* * Response is handled in handle_received_SETUP_packet */ send_packet(hw, PRIO_SETUP, &ver_packet->header); } static void handle_received_SETUP_packet(struct ipw_hardware *hw, unsigned int address, const unsigned char *data, int len, int is_last) { const union ipw_setup_rx_msg *rx_msg = (const union ipw_setup_rx_msg *) data; if (address != ADDR_SETUP_PROT) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": setup packet has bad address %d\n", address); return; } switch (rx_msg->sig_no) { case TL_SETUP_SIGNO_GET_VERSION_RSP: if (hw->to_setup) handle_setup_get_version_rsp(hw, rx_msg->version_rsp_msg.version); break; case TL_SETUP_SIGNO_OPEN_MSG: if (ipwireless_debug) { unsigned int channel_idx = rx_msg->open_msg.port_no - 1; printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": OPEN_MSG [channel %u] reply received\n", channel_idx); } break; case TL_SETUP_SIGNO_INFO_MSG_ACK: if (ipwireless_debug) printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": card successfully configured as NDISWAN\n"); break; case TL_SETUP_SIGNO_REBOOT_MSG: if (hw->to_setup) printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": Setup not completed - ignoring reboot msg\n"); else { struct ipw_setup_reboot_msg_ack *packet; printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": Acknowledging REBOOT message\n"); packet = alloc_ctrl_packet( sizeof(struct ipw_setup_reboot_msg_ack), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_REBOOT_MSG_ACK); packet->header.length = sizeof(struct TlSetupRebootMsgAck); send_packet(hw, PRIO_SETUP, &packet->header); if (hw->reboot_callback) hw->reboot_callback(hw->reboot_callback_data); } break; default: printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": unknown setup message %u received\n", (unsigned int) rx_msg->sig_no); } } static void do_close_hardware(struct ipw_hardware *hw) { unsigned int irqn; if (hw->hw_version == HW_VERSION_1) { /* Disable TX and RX interrupts. */ outw(0, hw->base_port + IOIER); /* Acknowledge any outstanding interrupt requests */ irqn = inw(hw->base_port + IOIR); if (irqn & IR_TXINTR) outw(IR_TXINTR, hw->base_port + IOIR); if (irqn & IR_RXINTR) outw(IR_RXINTR, hw->base_port + IOIR); synchronize_irq(hw->irq); } } struct ipw_hardware *ipwireless_hardware_create(void) { int i; struct ipw_hardware *hw = kzalloc(sizeof(struct ipw_hardware), GFP_KERNEL); if (!hw) return NULL; hw->irq = -1; hw->initializing = 1; hw->tx_ready = 1; hw->rx_bytes_queued = 0; hw->rx_pool_size = 0; hw->last_memtx_serial = (unsigned short) 0xffff; for (i = 0; i < NL_NUM_OF_PRIORITIES; i++) INIT_LIST_HEAD(&hw->tx_queue[i]); INIT_LIST_HEAD(&hw->rx_queue); INIT_LIST_HEAD(&hw->rx_pool); spin_lock_init(&hw->lock); tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw); INIT_WORK(&hw->work_rx, ipw_receive_data_work); setup_timer(&hw->setup_timer, ipwireless_setup_timer, (unsigned long) hw); return hw; } void ipwireless_init_hardware_v1(struct ipw_hardware *hw, unsigned int base_port, void __iomem *attr_memory, void __iomem *common_memory, int is_v2_card, void (*reboot_callback) (void *data), void *reboot_callback_data) { if (hw->removed) { hw->removed = 0; enable_irq(hw->irq); } hw->base_port = base_port; hw->hw_version = (is_v2_card ? HW_VERSION_2 : HW_VERSION_1); hw->ll_mtu = (hw->hw_version == HW_VERSION_1 ? LL_MTU_V1 : LL_MTU_V2); hw->memregs_CCR = (struct MEMCCR __iomem *) ((unsigned short __iomem *) attr_memory + 0x200); hw->memory_info_regs = (struct MEMINFREG __iomem *) common_memory; hw->memreg_tx = &hw->memory_info_regs->memreg_tx_new; hw->reboot_callback = reboot_callback; hw->reboot_callback_data = reboot_callback_data; } void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw) { hw->initializing = 1; hw->init_loops = 0; printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": waiting for card to start up...\n"); ipwireless_setup_timer((unsigned long) hw); } static void ipwireless_setup_timer(unsigned long data) { struct ipw_hardware *hw = (struct ipw_hardware *) data; hw->init_loops++; if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY && hw->hw_version == HW_VERSION_2 && hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": failed to startup using TX2, trying TX\n"); hw->memreg_tx = &hw->memory_info_regs->memreg_tx_old; hw->init_loops = 0; } /* Give up after a certain number of retries */ if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": card failed to start up!\n"); hw->initializing = 0; } else { /* Do not attempt to write to the board if it is not present. */ if (is_card_present(hw)) { unsigned long flags; spin_lock_irqsave(&hw->lock, flags); hw->to_setup = 1; hw->tx_ready = 1; spin_unlock_irqrestore(&hw->lock, flags); tasklet_schedule(&hw->tasklet); } mod_timer(&hw->setup_timer, jiffies + msecs_to_jiffies(TL_SETUP_VERSION_QRY_TMO)); } } /* * Stop any interrupts from executing so that, once this function returns, * other layers of the driver can be sure they won't get any more callbacks. * Thus must be called on a proper process context. */ void ipwireless_stop_interrupts(struct ipw_hardware *hw) { if (!hw->shutting_down) { /* Tell everyone we are going down. */ hw->shutting_down = 1; del_timer(&hw->setup_timer); /* Prevent the hardware from sending any more interrupts */ do_close_hardware(hw); } } void ipwireless_hardware_free(struct ipw_hardware *hw) { int i; struct ipw_rx_packet *rp, *rq; struct ipw_tx_packet *tp, *tq; ipwireless_stop_interrupts(hw); flush_scheduled_work(); for (i = 0; i < NL_NUM_OF_ADDRESSES; i++) if (hw->packet_assembler[i] != NULL) kfree(hw->packet_assembler[i]); for (i = 0; i < NL_NUM_OF_PRIORITIES; i++) list_for_each_entry_safe(tp, tq, &hw->tx_queue[i], queue) { list_del(&tp->queue); kfree(tp); } list_for_each_entry_safe(rp, rq, &hw->rx_queue, queue) { list_del(&rp->queue); kfree(rp); } list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) { list_del(&rp->queue); kfree(rp); } kfree(hw); } /* * Associate the specified network with this hardware, so it will receive events * from it. */ void ipwireless_associate_network(struct ipw_hardware *hw, struct ipw_network *network) { hw->network = network; }
gpl-2.0
kannu1994/sgs2_kernel
fs/hppfs/hppfs.c
1766
16970
/* * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/ctype.h> #include <linux/dcache.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/statfs.h> #include <linux/types.h> #include <linux/pid_namespace.h> #include <linux/namei.h> #include <asm/uaccess.h> #include "os.h" static struct inode *get_inode(struct super_block *, struct dentry *); struct hppfs_data { struct list_head list; char contents[PAGE_SIZE - sizeof(struct list_head)]; }; struct hppfs_private { struct file *proc_file; int host_fd; loff_t len; struct hppfs_data *contents; }; struct hppfs_inode_info { struct dentry *proc_dentry; struct inode vfs_inode; }; static inline struct hppfs_inode_info *HPPFS_I(struct inode *inode) { return container_of(inode, struct hppfs_inode_info, vfs_inode); } #define HPPFS_SUPER_MAGIC 0xb00000ee static const struct super_operations hppfs_sbops; static int is_pid(struct dentry *dentry) { struct super_block *sb; int i; sb = dentry->d_sb; if (dentry->d_parent != sb->s_root) return 0; for (i = 0; i < dentry->d_name.len; i++) { if (!isdigit(dentry->d_name.name[i])) return 0; } return 1; } static char *dentry_name(struct dentry *dentry, int extra) { struct dentry *parent; char *root, *name; const char *seg_name; int len, seg_len; len = 0; parent = dentry; while (parent->d_parent != parent) { if (is_pid(parent)) len += strlen("pid") + 1; else len += parent->d_name.len + 1; parent = parent->d_parent; } root = "proc"; len += strlen(root); name = kmalloc(len + extra + 1, GFP_KERNEL); if (name == NULL) return NULL; name[len] = '\0'; parent = dentry; while (parent->d_parent != parent) { if (is_pid(parent)) { seg_name = "pid"; seg_len = strlen("pid"); } else { seg_name = parent->d_name.name; seg_len = parent->d_name.len; } len -= seg_len + 1; name[len] = '/'; strncpy(&name[len + 1], seg_name, seg_len); parent = parent->d_parent; } strncpy(name, root, strlen(root)); return name; } static int file_removed(struct dentry *dentry, const char *file) { char *host_file; int extra, fd; extra = 0; if (file != NULL) extra += strlen(file) + 1; host_file = dentry_name(dentry, extra + strlen("/remove")); if (host_file == NULL) { printk(KERN_ERR "file_removed : allocation failed\n"); return -ENOMEM; } if (file != NULL) { strcat(host_file, "/"); strcat(host_file, file); } strcat(host_file, "/remove"); fd = os_open_file(host_file, of_read(OPENFLAGS()), 0); kfree(host_file); if (fd > 0) { os_close_file(fd); return 1; } return 0; } static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, struct nameidata *nd) { struct dentry *proc_dentry, *parent; struct qstr *name = &dentry->d_name; struct inode *inode; int err, deleted; deleted = file_removed(dentry, NULL); if (deleted < 0) return ERR_PTR(deleted); else if (deleted) return ERR_PTR(-ENOENT); parent = HPPFS_I(ino)->proc_dentry; mutex_lock(&parent->d_inode->i_mutex); proc_dentry = lookup_one_len(name->name, parent, name->len); mutex_unlock(&parent->d_inode->i_mutex); if (IS_ERR(proc_dentry)) return proc_dentry; err = -ENOMEM; inode = get_inode(ino->i_sb, proc_dentry); if (!inode) goto out; d_add(dentry, inode); return NULL; out: return ERR_PTR(err); } static const struct inode_operations hppfs_file_iops = { }; static ssize_t read_proc(struct file *file, char __user *buf, ssize_t count, loff_t *ppos, int is_user) { ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); ssize_t n; read = file->f_path.dentry->d_inode->i_fop->read; if (!is_user) set_fs(KERNEL_DS); n = (*read)(file, buf, count, &file->f_pos); if (!is_user) set_fs(USER_DS); if (ppos) *ppos = file->f_pos; return n; } static ssize_t hppfs_read_file(int fd, char __user *buf, ssize_t count) { ssize_t n; int cur, err; char *new_buf; n = -ENOMEM; new_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (new_buf == NULL) { printk(KERN_ERR "hppfs_read_file : kmalloc failed\n"); goto out; } n = 0; while (count > 0) { cur = min_t(ssize_t, count, PAGE_SIZE); err = os_read_file(fd, new_buf, cur); if (err < 0) { printk(KERN_ERR "hppfs_read : read failed, " "errno = %d\n", err); n = err; goto out_free; } else if (err == 0) break; if (copy_to_user(buf, new_buf, err)) { n = -EFAULT; goto out_free; } n += err; count -= err; } out_free: kfree(new_buf); out: return n; } static ssize_t hppfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct hppfs_private *hppfs = file->private_data; struct hppfs_data *data; loff_t off; int err; if (hppfs->contents != NULL) { int rem; if (*ppos >= hppfs->len) return 0; data = hppfs->contents; off = *ppos; while (off >= sizeof(data->contents)) { data = list_entry(data->list.next, struct hppfs_data, list); off -= sizeof(data->contents); } if (off + count > hppfs->len) count = hppfs->len - off; rem = copy_to_user(buf, &data->contents[off], count); *ppos += count - rem; if (rem > 0) return -EFAULT; } else if (hppfs->host_fd != -1) { err = os_seek_file(hppfs->host_fd, *ppos); if (err) { printk(KERN_ERR "hppfs_read : seek failed, " "errno = %d\n", err); return err; } err = hppfs_read_file(hppfs->host_fd, buf, count); if (err < 0) { printk(KERN_ERR "hppfs_read: read failed: %d\n", err); return err; } count = err; if (count > 0) *ppos += count; } else count = read_proc(hppfs->proc_file, buf, count, ppos, 1); return count; } static ssize_t hppfs_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); write = proc_file->f_path.dentry->d_inode->i_fop->write; return (*write)(proc_file, buf, len, ppos); } static int open_host_sock(char *host_file, int *filter_out) { char *end; int fd; end = &host_file[strlen(host_file)]; strcpy(end, "/rw"); *filter_out = 1; fd = os_connect_socket(host_file); if (fd > 0) return fd; strcpy(end, "/r"); *filter_out = 0; fd = os_connect_socket(host_file); return fd; } static void free_contents(struct hppfs_data *head) { struct hppfs_data *data; struct list_head *ele, *next; if (head == NULL) return; list_for_each_safe(ele, next, &head->list) { data = list_entry(ele, struct hppfs_data, list); kfree(data); } kfree(head); } static struct hppfs_data *hppfs_get_data(int fd, int filter, struct file *proc_file, struct file *hppfs_file, loff_t *size_out) { struct hppfs_data *data, *new, *head; int n, err; err = -ENOMEM; data = kmalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) { printk(KERN_ERR "hppfs_get_data : head allocation failed\n"); goto failed; } INIT_LIST_HEAD(&data->list); head = data; *size_out = 0; if (filter) { while ((n = read_proc(proc_file, data->contents, sizeof(data->contents), NULL, 0)) > 0) os_write_file(fd, data->contents, n); err = os_shutdown_socket(fd, 0, 1); if (err) { printk(KERN_ERR "hppfs_get_data : failed to shut down " "socket\n"); goto failed_free; } } while (1) { n = os_read_file(fd, data->contents, sizeof(data->contents)); if (n < 0) { err = n; printk(KERN_ERR "hppfs_get_data : read failed, " "errno = %d\n", err); goto failed_free; } else if (n == 0) break; *size_out += n; if (n < sizeof(data->contents)) break; new = kmalloc(sizeof(*data), GFP_KERNEL); if (new == 0) { printk(KERN_ERR "hppfs_get_data : data allocation " "failed\n"); err = -ENOMEM; goto failed_free; } INIT_LIST_HEAD(&new->list); list_add(&new->list, &data->list); data = new; } return head; failed_free: free_contents(head); failed: return ERR_PTR(err); } static struct hppfs_private *hppfs_data(void) { struct hppfs_private *data; data = kmalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return data; *data = ((struct hppfs_private ) { .host_fd = -1, .len = -1, .contents = NULL } ); return data; } static int file_mode(int fmode) { if (fmode == (FMODE_READ | FMODE_WRITE)) return O_RDWR; if (fmode == FMODE_READ) return O_RDONLY; if (fmode == FMODE_WRITE) return O_WRONLY; return 0; } static int hppfs_open(struct inode *inode, struct file *file) { const struct cred *cred = file->f_cred; struct hppfs_private *data; struct vfsmount *proc_mnt; struct dentry *proc_dentry; char *host_file; int err, fd, type, filter; err = -ENOMEM; data = hppfs_data(); if (data == NULL) goto out; host_file = dentry_name(file->f_path.dentry, strlen("/rw")); if (host_file == NULL) goto out_free2; proc_dentry = HPPFS_I(inode)->proc_dentry; proc_mnt = inode->i_sb->s_fs_info; /* XXX This isn't closed anywhere */ data->proc_file = dentry_open(dget(proc_dentry), mntget(proc_mnt), file_mode(file->f_mode), cred); err = PTR_ERR(data->proc_file); if (IS_ERR(data->proc_file)) goto out_free1; type = os_file_type(host_file); if (type == OS_TYPE_FILE) { fd = os_open_file(host_file, of_read(OPENFLAGS()), 0); if (fd >= 0) data->host_fd = fd; else printk(KERN_ERR "hppfs_open : failed to open '%s', " "errno = %d\n", host_file, -fd); data->contents = NULL; } else if (type == OS_TYPE_DIR) { fd = open_host_sock(host_file, &filter); if (fd > 0) { data->contents = hppfs_get_data(fd, filter, data->proc_file, file, &data->len); if (!IS_ERR(data->contents)) data->host_fd = fd; } else printk(KERN_ERR "hppfs_open : failed to open a socket " "in '%s', errno = %d\n", host_file, -fd); } kfree(host_file); file->private_data = data; return 0; out_free1: kfree(host_file); out_free2: free_contents(data->contents); kfree(data); out: return err; } static int hppfs_dir_open(struct inode *inode, struct file *file) { const struct cred *cred = file->f_cred; struct hppfs_private *data; struct vfsmount *proc_mnt; struct dentry *proc_dentry; int err; err = -ENOMEM; data = hppfs_data(); if (data == NULL) goto out; proc_dentry = HPPFS_I(inode)->proc_dentry; proc_mnt = inode->i_sb->s_fs_info; data->proc_file = dentry_open(dget(proc_dentry), mntget(proc_mnt), file_mode(file->f_mode), cred); err = PTR_ERR(data->proc_file); if (IS_ERR(data->proc_file)) goto out_free; file->private_data = data; return 0; out_free: kfree(data); out: return err; } static loff_t hppfs_llseek(struct file *file, loff_t off, int where) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; loff_t (*llseek)(struct file *, loff_t, int); loff_t ret; llseek = proc_file->f_path.dentry->d_inode->i_fop->llseek; if (llseek != NULL) { ret = (*llseek)(proc_file, off, where); if (ret < 0) return ret; } return default_llseek(file, off, where); } static const struct file_operations hppfs_file_fops = { .owner = NULL, .llseek = hppfs_llseek, .read = hppfs_read, .write = hppfs_write, .open = hppfs_open, }; struct hppfs_dirent { void *vfs_dirent; filldir_t filldir; struct dentry *dentry; }; static int hppfs_filldir(void *d, const char *name, int size, loff_t offset, u64 inode, unsigned int type) { struct hppfs_dirent *dirent = d; if (file_removed(dirent->dentry, name)) return 0; return (*dirent->filldir)(dirent->vfs_dirent, name, size, offset, inode, type); } static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; int (*readdir)(struct file *, void *, filldir_t); struct hppfs_dirent dirent = ((struct hppfs_dirent) { .vfs_dirent = ent, .filldir = filldir, .dentry = file->f_path.dentry }); int err; readdir = proc_file->f_path.dentry->d_inode->i_fop->readdir; proc_file->f_pos = file->f_pos; err = (*readdir)(proc_file, &dirent, hppfs_filldir); file->f_pos = proc_file->f_pos; return err; } static int hppfs_fsync(struct file *file, int datasync) { return 0; } static const struct file_operations hppfs_dir_fops = { .owner = NULL, .readdir = hppfs_readdir, .open = hppfs_dir_open, .fsync = hppfs_fsync, .llseek = default_llseek, }; static int hppfs_statfs(struct dentry *dentry, struct kstatfs *sf) { sf->f_blocks = 0; sf->f_bfree = 0; sf->f_bavail = 0; sf->f_files = 0; sf->f_ffree = 0; sf->f_type = HPPFS_SUPER_MAGIC; return 0; } static struct inode *hppfs_alloc_inode(struct super_block *sb) { struct hppfs_inode_info *hi; hi = kmalloc(sizeof(*hi), GFP_KERNEL); if (!hi) return NULL; hi->proc_dentry = NULL; inode_init_once(&hi->vfs_inode); return &hi->vfs_inode; } void hppfs_evict_inode(struct inode *ino) { end_writeback(ino); dput(HPPFS_I(ino)->proc_dentry); mntput(ino->i_sb->s_fs_info); } static void hppfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); kfree(HPPFS_I(inode)); } static void hppfs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, hppfs_i_callback); } static const struct super_operations hppfs_sbops = { .alloc_inode = hppfs_alloc_inode, .destroy_inode = hppfs_destroy_inode, .evict_inode = hppfs_evict_inode, .statfs = hppfs_statfs, }; static int hppfs_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; return proc_dentry->d_inode->i_op->readlink(proc_dentry, buffer, buflen); } static void *hppfs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; return proc_dentry->d_inode->i_op->follow_link(proc_dentry, nd); } static void hppfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; if (proc_dentry->d_inode->i_op->put_link) proc_dentry->d_inode->i_op->put_link(proc_dentry, nd, cookie); } static const struct inode_operations hppfs_dir_iops = { .lookup = hppfs_lookup, }; static const struct inode_operations hppfs_link_iops = { .readlink = hppfs_readlink, .follow_link = hppfs_follow_link, .put_link = hppfs_put_link, }; static struct inode *get_inode(struct super_block *sb, struct dentry *dentry) { struct inode *proc_ino = dentry->d_inode; struct inode *inode = new_inode(sb); if (!inode) { dput(dentry); return ERR_PTR(-ENOMEM); } if (S_ISDIR(dentry->d_inode->i_mode)) { inode->i_op = &hppfs_dir_iops; inode->i_fop = &hppfs_dir_fops; } else if (S_ISLNK(dentry->d_inode->i_mode)) { inode->i_op = &hppfs_link_iops; inode->i_fop = &hppfs_file_fops; } else { inode->i_op = &hppfs_file_iops; inode->i_fop = &hppfs_file_fops; } HPPFS_I(inode)->proc_dentry = dentry; inode->i_uid = proc_ino->i_uid; inode->i_gid = proc_ino->i_gid; inode->i_atime = proc_ino->i_atime; inode->i_mtime = proc_ino->i_mtime; inode->i_ctime = proc_ino->i_ctime; inode->i_ino = proc_ino->i_ino; inode->i_mode = proc_ino->i_mode; inode->i_nlink = proc_ino->i_nlink; inode->i_size = proc_ino->i_size; inode->i_blocks = proc_ino->i_blocks; return inode; } static int hppfs_fill_super(struct super_block *sb, void *d, int silent) { struct inode *root_inode; struct vfsmount *proc_mnt; int err = -ENOENT; proc_mnt = mntget(current->nsproxy->pid_ns->proc_mnt); if (IS_ERR(proc_mnt)) goto out; sb->s_blocksize = 1024; sb->s_blocksize_bits = 10; sb->s_magic = HPPFS_SUPER_MAGIC; sb->s_op = &hppfs_sbops; sb->s_fs_info = proc_mnt; err = -ENOMEM; root_inode = get_inode(sb, dget(proc_mnt->mnt_sb->s_root)); if (!root_inode) goto out_mntput; sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) goto out_iput; return 0; out_iput: iput(root_inode); out_mntput: mntput(proc_mnt); out: return(err); } static struct dentry *hppfs_read_super(struct file_system_type *type, int flags, const char *dev_name, void *data) { return mount_nodev(type, flags, data, hppfs_fill_super); } static struct file_system_type hppfs_type = { .owner = THIS_MODULE, .name = "hppfs", .mount = hppfs_read_super, .kill_sb = kill_anon_super, .fs_flags = 0, }; static int __init init_hppfs(void) { return register_filesystem(&hppfs_type); } static void __exit exit_hppfs(void) { unregister_filesystem(&hppfs_type); } module_init(init_hppfs) module_exit(exit_hppfs) MODULE_LICENSE("GPL");
gpl-2.0
pacificIT/udoo_kernel_imx
arch/arm/mach-imx/mach-pcm043.c
2278
10808
/* * Copyright (C) 2009 Sascha Hauer, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/types.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/mtd/plat-ram.h> #include <linux/memory.h> #include <linux/gpio.h> #include <linux/smc911x.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/i2c/at24.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/common.h> #include <mach/iomux-mx35.h> #include <mach/ulpi.h> #include <mach/audmux.h> #include "devices-imx35.h" static const struct fb_videomode fb_modedb[] = { { /* 240x320 @ 60 Hz */ .name = "Sharp-LQ035Q7", .refresh = 60, .xres = 240, .yres = 320, .pixclock = 185925, .left_margin = 9, .right_margin = 16, .upper_margin = 7, .lower_margin = 9, .hsync_len = 1, .vsync_len = 1, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE | FB_SYNC_CLK_INVERT | FB_SYNC_CLK_IDLE_EN, .vmode = FB_VMODE_NONINTERLACED, .flag = 0, }, { /* 240x320 @ 60 Hz */ .name = "TX090", .refresh = 60, .xres = 240, .yres = 320, .pixclock = 38255, .left_margin = 144, .right_margin = 0, .upper_margin = 7, .lower_margin = 40, .hsync_len = 96, .vsync_len = 1, .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_OE_ACT_HIGH, .vmode = FB_VMODE_NONINTERLACED, .flag = 0, }, }; static const struct ipu_platform_data mx3_ipu_data __initconst = { .irq_base = MXC_IPU_IRQ_START, }; static struct mx3fb_platform_data mx3fb_pdata __initdata = { .name = "Sharp-LQ035Q7", .mode = fb_modedb, .num_modes = ARRAY_SIZE(fb_modedb), }; static struct physmap_flash_data pcm043_flash_data = { .width = 2, }; static struct resource pcm043_flash_resource = { .start = 0xa0000000, .end = 0xa1ffffff, .flags = IORESOURCE_MEM, }; static struct platform_device pcm043_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &pcm043_flash_data, }, .resource = &pcm043_flash_resource, .num_resources = 1, }; static const struct imxuart_platform_data uart_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; static const struct imxi2c_platform_data pcm043_i2c0_data __initconst = { .bitrate = 50000, }; static struct at24_platform_data board_eeprom = { .byte_len = 4096, .page_size = 32, .flags = AT24_FLAG_ADDR16, }; static struct i2c_board_info pcm043_i2c_devices[] = { { I2C_BOARD_INFO("at24", 0x52), /* E0=0, E1=1, E2=0 */ .platform_data = &board_eeprom, }, { I2C_BOARD_INFO("pcf8563", 0x51), }, }; static struct platform_device *devices[] __initdata = { &pcm043_flash, }; static iomux_v3_cfg_t pcm043_pads[] = { /* UART1 */ MX35_PAD_CTS1__UART1_CTS, MX35_PAD_RTS1__UART1_RTS, MX35_PAD_TXD1__UART1_TXD_MUX, MX35_PAD_RXD1__UART1_RXD_MUX, /* UART2 */ MX35_PAD_CTS2__UART2_CTS, MX35_PAD_RTS2__UART2_RTS, MX35_PAD_TXD2__UART2_TXD_MUX, MX35_PAD_RXD2__UART2_RXD_MUX, /* FEC */ MX35_PAD_FEC_TX_CLK__FEC_TX_CLK, MX35_PAD_FEC_RX_CLK__FEC_RX_CLK, MX35_PAD_FEC_RX_DV__FEC_RX_DV, MX35_PAD_FEC_COL__FEC_COL, MX35_PAD_FEC_RDATA0__FEC_RDATA_0, MX35_PAD_FEC_TDATA0__FEC_TDATA_0, MX35_PAD_FEC_TX_EN__FEC_TX_EN, MX35_PAD_FEC_MDC__FEC_MDC, MX35_PAD_FEC_MDIO__FEC_MDIO, MX35_PAD_FEC_TX_ERR__FEC_TX_ERR, MX35_PAD_FEC_RX_ERR__FEC_RX_ERR, MX35_PAD_FEC_CRS__FEC_CRS, MX35_PAD_FEC_RDATA1__FEC_RDATA_1, MX35_PAD_FEC_TDATA1__FEC_TDATA_1, MX35_PAD_FEC_RDATA2__FEC_RDATA_2, MX35_PAD_FEC_TDATA2__FEC_TDATA_2, MX35_PAD_FEC_RDATA3__FEC_RDATA_3, MX35_PAD_FEC_TDATA3__FEC_TDATA_3, /* I2C1 */ MX35_PAD_I2C1_CLK__I2C1_SCL, MX35_PAD_I2C1_DAT__I2C1_SDA, /* Display */ MX35_PAD_LD0__IPU_DISPB_DAT_0, MX35_PAD_LD1__IPU_DISPB_DAT_1, MX35_PAD_LD2__IPU_DISPB_DAT_2, MX35_PAD_LD3__IPU_DISPB_DAT_3, MX35_PAD_LD4__IPU_DISPB_DAT_4, MX35_PAD_LD5__IPU_DISPB_DAT_5, MX35_PAD_LD6__IPU_DISPB_DAT_6, MX35_PAD_LD7__IPU_DISPB_DAT_7, MX35_PAD_LD8__IPU_DISPB_DAT_8, MX35_PAD_LD9__IPU_DISPB_DAT_9, MX35_PAD_LD10__IPU_DISPB_DAT_10, MX35_PAD_LD11__IPU_DISPB_DAT_11, MX35_PAD_LD12__IPU_DISPB_DAT_12, MX35_PAD_LD13__IPU_DISPB_DAT_13, MX35_PAD_LD14__IPU_DISPB_DAT_14, MX35_PAD_LD15__IPU_DISPB_DAT_15, MX35_PAD_LD16__IPU_DISPB_DAT_16, MX35_PAD_LD17__IPU_DISPB_DAT_17, MX35_PAD_D3_HSYNC__IPU_DISPB_D3_HSYNC, MX35_PAD_D3_FPSHIFT__IPU_DISPB_D3_CLK, MX35_PAD_D3_DRDY__IPU_DISPB_D3_DRDY, MX35_PAD_CONTRAST__IPU_DISPB_CONTR, MX35_PAD_D3_VSYNC__IPU_DISPB_D3_VSYNC, MX35_PAD_D3_REV__IPU_DISPB_D3_REV, MX35_PAD_D3_CLS__IPU_DISPB_D3_CLS, /* gpio */ MX35_PAD_ATA_CS0__GPIO2_6, /* USB host */ MX35_PAD_I2C2_CLK__USB_TOP_USBH2_PWR, MX35_PAD_I2C2_DAT__USB_TOP_USBH2_OC, /* SSI */ MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS, MX35_PAD_STXD4__AUDMUX_AUD4_TXD, MX35_PAD_SRXD4__AUDMUX_AUD4_RXD, MX35_PAD_SCK4__AUDMUX_AUD4_TXC, /* CAN2 */ MX35_PAD_TX5_RX0__CAN2_TXCAN, MX35_PAD_TX4_RX1__CAN2_RXCAN, /* esdhc */ MX35_PAD_SD1_CMD__ESDHC1_CMD, MX35_PAD_SD1_CLK__ESDHC1_CLK, MX35_PAD_SD1_DATA0__ESDHC1_DAT0, MX35_PAD_SD1_DATA1__ESDHC1_DAT1, MX35_PAD_SD1_DATA2__ESDHC1_DAT2, MX35_PAD_SD1_DATA3__ESDHC1_DAT3, MX35_PAD_ATA_DATA10__GPIO2_23, /* WriteProtect */ MX35_PAD_ATA_DATA11__GPIO2_24, /* CardDetect */ }; #define AC97_GPIO_TXFS IMX_GPIO_NR(2, 31) #define AC97_GPIO_TXD IMX_GPIO_NR(2, 28) #define AC97_GPIO_RESET IMX_GPIO_NR(2, 0) #define SD1_GPIO_WP IMX_GPIO_NR(2, 23) #define SD1_GPIO_CD IMX_GPIO_NR(2, 24) static void pcm043_ac97_warm_reset(struct snd_ac97 *ac97) { iomux_v3_cfg_t txfs_gpio = MX35_PAD_STXFS4__GPIO2_31; iomux_v3_cfg_t txfs = MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS; int ret; ret = gpio_request(AC97_GPIO_TXFS, "SSI"); if (ret) { printk("failed to get GPIO_TXFS: %d\n", ret); return; } mxc_iomux_v3_setup_pad(txfs_gpio); /* warm reset */ gpio_direction_output(AC97_GPIO_TXFS, 1); udelay(2); gpio_set_value(AC97_GPIO_TXFS, 0); gpio_free(AC97_GPIO_TXFS); mxc_iomux_v3_setup_pad(txfs); } static void pcm043_ac97_cold_reset(struct snd_ac97 *ac97) { iomux_v3_cfg_t txfs_gpio = MX35_PAD_STXFS4__GPIO2_31; iomux_v3_cfg_t txfs = MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS; iomux_v3_cfg_t txd_gpio = MX35_PAD_STXD4__GPIO2_28; iomux_v3_cfg_t txd = MX35_PAD_STXD4__AUDMUX_AUD4_TXD; iomux_v3_cfg_t reset_gpio = MX35_PAD_SD2_CMD__GPIO2_0; int ret; ret = gpio_request(AC97_GPIO_TXFS, "SSI"); if (ret) goto err1; ret = gpio_request(AC97_GPIO_TXD, "SSI"); if (ret) goto err2; ret = gpio_request(AC97_GPIO_RESET, "SSI"); if (ret) goto err3; mxc_iomux_v3_setup_pad(txfs_gpio); mxc_iomux_v3_setup_pad(txd_gpio); mxc_iomux_v3_setup_pad(reset_gpio); gpio_direction_output(AC97_GPIO_TXFS, 0); gpio_direction_output(AC97_GPIO_TXD, 0); /* cold reset */ gpio_direction_output(AC97_GPIO_RESET, 0); udelay(10); gpio_direction_output(AC97_GPIO_RESET, 1); mxc_iomux_v3_setup_pad(txd); mxc_iomux_v3_setup_pad(txfs); gpio_free(AC97_GPIO_RESET); err3: gpio_free(AC97_GPIO_TXD); err2: gpio_free(AC97_GPIO_TXFS); err1: if (ret) printk("%s failed with %d\n", __func__, ret); mdelay(1); } static const struct imx_ssi_platform_data pcm043_ssi_pdata __initconst = { .ac97_reset = pcm043_ac97_cold_reset, .ac97_warm_reset = pcm043_ac97_warm_reset, .flags = IMX_SSI_USE_AC97, }; static const struct mxc_nand_platform_data pcm037_nand_board_info __initconst = { .width = 1, .hw_ecc = 1, }; static int pcm043_otg_init(struct platform_device *pdev) { return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI); } static struct mxc_usbh_platform_data otg_pdata __initdata = { .init = pcm043_otg_init, .portsc = MXC_EHCI_MODE_UTMI, }; static int pcm043_usbh1_init(struct platform_device *pdev) { return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_SINGLE_UNI | MXC_EHCI_INTERNAL_PHY | MXC_EHCI_IPPUE_DOWN); } static const struct mxc_usbh_platform_data usbh1_pdata __initconst = { .init = pcm043_usbh1_init, .portsc = MXC_EHCI_MODE_SERIAL, }; static const struct fsl_usb2_platform_data otg_device_pdata __initconst = { .operating_mode = FSL_USB2_DR_DEVICE, .phy_mode = FSL_USB2_PHY_UTMI, }; static int otg_mode_host; static int __init pcm043_otg_mode(char *options) { if (!strcmp(options, "host")) otg_mode_host = 1; else if (!strcmp(options, "device")) otg_mode_host = 0; else pr_info("otg_mode neither \"host\" nor \"device\". " "Defaulting to device\n"); return 0; } __setup("otg_mode=", pcm043_otg_mode); static struct esdhc_platform_data sd1_pdata = { .wp_gpio = SD1_GPIO_WP, .cd_gpio = SD1_GPIO_CD, }; /* * Board specific initialization. */ static void __init pcm043_init(void) { mxc_iomux_v3_setup_multiple_pads(pcm043_pads, ARRAY_SIZE(pcm043_pads)); mxc_audmux_v2_configure_port(3, MXC_AUDMUX_V2_PTCR_SYN | /* 4wire mode */ MXC_AUDMUX_V2_PTCR_TFSEL(0) | MXC_AUDMUX_V2_PTCR_TFSDIR, MXC_AUDMUX_V2_PDCR_RXDSEL(0)); mxc_audmux_v2_configure_port(0, MXC_AUDMUX_V2_PTCR_SYN | /* 4wire mode */ MXC_AUDMUX_V2_PTCR_TCSEL(3) | MXC_AUDMUX_V2_PTCR_TCLKDIR, /* clock is output */ MXC_AUDMUX_V2_PDCR_RXDSEL(3)); imx35_add_fec(NULL); platform_add_devices(devices, ARRAY_SIZE(devices)); imx35_add_imx2_wdt(NULL); imx35_add_imx_uart0(&uart_pdata); imx35_add_mxc_nand(&pcm037_nand_board_info); imx35_add_imx_ssi(0, &pcm043_ssi_pdata); imx35_add_imx_uart1(&uart_pdata); i2c_register_board_info(0, pcm043_i2c_devices, ARRAY_SIZE(pcm043_i2c_devices)); imx35_add_imx_i2c0(&pcm043_i2c0_data); imx35_add_ipu_core(&mx3_ipu_data); imx35_add_mx3_sdc_fb(&mx3fb_pdata); if (otg_mode_host) { otg_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT); if (otg_pdata.otg) imx35_add_mxc_ehci_otg(&otg_pdata); } imx35_add_mxc_ehci_hs(&usbh1_pdata); if (!otg_mode_host) imx35_add_fsl_usb2_udc(&otg_device_pdata); imx35_add_flexcan1(NULL); imx35_add_sdhci_esdhc_imx(0, &sd1_pdata); } static void __init pcm043_timer_init(void) { mx35_clocks_init(); } struct sys_timer pcm043_timer = { .init = pcm043_timer_init, }; MACHINE_START(PCM043, "Phytec Phycore pcm043") /* Maintainer: Pengutronix */ .boot_params = MX3x_PHYS_OFFSET + 0x100, .map_io = mx35_map_io, .init_early = imx35_init_early, .init_irq = mx35_init_irq, .timer = &pcm043_timer, .init_machine = pcm043_init, MACHINE_END
gpl-2.0
uarka/linux-next
net/dccp/ccid.c
3046
5374
/* * net/dccp/ccid.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * CCID infrastructure * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include "ccid.h" #include "ccids/lib/tfrc.h" static struct ccid_operations *ccids[] = { &ccid2_ops, #ifdef CONFIG_IP_DCCP_CCID3 &ccid3_ops, #endif }; static struct ccid_operations *ccid_by_number(const u8 id) { int i; for (i = 0; i < ARRAY_SIZE(ccids); i++) if (ccids[i]->ccid_id == id) return ccids[i]; return NULL; } /* check that up to @array_len members in @ccid_array are supported */ bool ccid_support_check(u8 const *ccid_array, u8 array_len) { while (array_len > 0) if (ccid_by_number(ccid_array[--array_len]) == NULL) return false; return true; } /** * ccid_get_builtin_ccids - Populate a list of built-in CCIDs * @ccid_array: pointer to copy into * @array_len: value to return length into * * This function allocates memory - caller must see that it is freed after use. */ int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len) { *ccid_array = kmalloc(ARRAY_SIZE(ccids), gfp_any()); if (*ccid_array == NULL) return -ENOBUFS; for (*array_len = 0; *array_len < ARRAY_SIZE(ccids); *array_len += 1) (*ccid_array)[*array_len] = ccids[*array_len]->ccid_id; return 0; } int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, char __user *optval, int __user *optlen) { u8 *ccid_array, array_len; int err = 0; if (ccid_get_builtin_ccids(&ccid_array, &array_len)) return -ENOBUFS; if (put_user(array_len, optlen)) err = -EFAULT; else if (len > 0 && copy_to_user(optval, ccid_array, len > array_len ? array_len : len)) err = -EFAULT; kfree(ccid_array); return err; } static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...) { struct kmem_cache *slab; va_list args; va_start(args, fmt); vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args); va_end(args); slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, SLAB_HWCACHE_ALIGN, NULL); return slab; } static void ccid_kmem_cache_destroy(struct kmem_cache *slab) { if (slab != NULL) kmem_cache_destroy(slab); } static int ccid_activate(struct ccid_operations *ccid_ops) { int err = -ENOBUFS; ccid_ops->ccid_hc_rx_slab = ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, ccid_ops->ccid_hc_rx_slab_name, "ccid%u_hc_rx_sock", ccid_ops->ccid_id); if (ccid_ops->ccid_hc_rx_slab == NULL) goto out; ccid_ops->ccid_hc_tx_slab = ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, ccid_ops->ccid_hc_tx_slab_name, "ccid%u_hc_tx_sock", ccid_ops->ccid_id); if (ccid_ops->ccid_hc_tx_slab == NULL) goto out_free_rx_slab; pr_info("DCCP: Activated CCID %d (%s)\n", ccid_ops->ccid_id, ccid_ops->ccid_name); err = 0; out: return err; out_free_rx_slab: ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); ccid_ops->ccid_hc_rx_slab = NULL; goto out; } static void ccid_deactivate(struct ccid_operations *ccid_ops) { ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab); ccid_ops->ccid_hc_tx_slab = NULL; ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); ccid_ops->ccid_hc_rx_slab = NULL; pr_info("DCCP: Deactivated CCID %d (%s)\n", ccid_ops->ccid_id, ccid_ops->ccid_name); } struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx) { struct ccid_operations *ccid_ops = ccid_by_number(id); struct ccid *ccid = NULL; if (ccid_ops == NULL) goto out; ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab : ccid_ops->ccid_hc_tx_slab, gfp_any()); if (ccid == NULL) goto out; ccid->ccid_ops = ccid_ops; if (rx) { memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size); if (ccid->ccid_ops->ccid_hc_rx_init != NULL && ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0) goto out_free_ccid; } else { memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size); if (ccid->ccid_ops->ccid_hc_tx_init != NULL && ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0) goto out_free_ccid; } out: return ccid; out_free_ccid: kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab : ccid_ops->ccid_hc_tx_slab, ccid); ccid = NULL; goto out; } void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk) { if (ccid != NULL) { if (ccid->ccid_ops->ccid_hc_rx_exit != NULL) ccid->ccid_ops->ccid_hc_rx_exit(sk); kmem_cache_free(ccid->ccid_ops->ccid_hc_rx_slab, ccid); } } void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk) { if (ccid != NULL) { if (ccid->ccid_ops->ccid_hc_tx_exit != NULL) ccid->ccid_ops->ccid_hc_tx_exit(sk); kmem_cache_free(ccid->ccid_ops->ccid_hc_tx_slab, ccid); } } int __init ccid_initialize_builtins(void) { int i, err = tfrc_lib_init(); if (err) return err; for (i = 0; i < ARRAY_SIZE(ccids); i++) { err = ccid_activate(ccids[i]); if (err) goto unwind_registrations; } return 0; unwind_registrations: while(--i >= 0) ccid_deactivate(ccids[i]); tfrc_lib_exit(); return err; } void ccid_cleanup_builtins(void) { int i; for (i = 0; i < ARRAY_SIZE(ccids); i++) ccid_deactivate(ccids[i]); tfrc_lib_exit(); }
gpl-2.0
FrozenCow/msm
arch/mn10300/mm/fault.c
4070
9908
/* MN10300 MMU Fault handler * * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Modified by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/vt_kern.h> /* For unblank_screen() */ #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/hardirq.h> #include <asm/cpu-regs.h> #include <asm/debugger.h> #include <asm/gdb-stub.h> /* * Unlock any spinlocks which will prevent us from getting the * message out */ void bust_spinlocks(int yes) { if (yes) { oops_in_progress = 1; } else { int loglevel_save = console_loglevel; #ifdef CONFIG_VT unblank_screen(); #endif oops_in_progress = 0; /* * OK, the message is on the console. Now we call printk() * without oops_in_progress set so that printk will give klogd * a poke. Hold onto your hats... */ console_loglevel = 15; /* NMI oopser may have shut the console * up */ printk(" "); console_loglevel = loglevel_save; } } void do_BUG(const char *file, int line) { bust_spinlocks(1); printk(KERN_EMERG "------------[ cut here ]------------\n"); printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line); } #if 0 static void print_pagetable_entries(pgd_t *pgdir, unsigned long address) { pgd_t *pgd; pmd_t *pmd; pte_t *pte; pgd = pgdir + __pgd_offset(address); printk(KERN_DEBUG "pgd entry %p: %016Lx\n", pgd, (long long) pgd_val(*pgd)); if (!pgd_present(*pgd)) { printk(KERN_DEBUG "... pgd not present!\n"); return; } pmd = pmd_offset(pgd, address); printk(KERN_DEBUG "pmd entry %p: %016Lx\n", pmd, (long long)pmd_val(*pmd)); if (!pmd_present(*pmd)) { printk(KERN_DEBUG "... pmd not present!\n"); return; } pte = pte_offset(pmd, address); printk(KERN_DEBUG "pte entry %p: %016Lx\n", pte, (long long) pte_val(*pte)); if (!pte_present(*pte)) printk(KERN_DEBUG "... pte not present!\n"); } #endif /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * fault_code: * - LSW: either MMUFCR_IFC or MMUFCR_DFC as appropriate * - MSW: 0 if data access, 1 if instruction access * - bit 0: TLB miss flag * - bit 1: initial write * - bit 2: page invalid * - bit 3: protection violation * - bit 4: accessor (0=user 1=kernel) * - bit 5: 0=read 1=write * - bit 6-8: page protection spec * - bit 9: illegal address * - bit 16: 0=data 1=ins * */ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, unsigned long address) { struct vm_area_struct *vma; struct task_struct *tsk; struct mm_struct *mm; unsigned long page; siginfo_t info; int write, fault; #ifdef CONFIG_GDBSTUB /* handle GDB stub causing a fault */ if (gdbstub_busy) { gdbstub_exception(regs, TBR & TBR_INT_CODE); return; } #endif #if 0 printk(KERN_DEBUG "--- do_page_fault(%p,%s:%04lx,%08lx)\n", regs, fault_code & 0x10000 ? "ins" : "data", fault_code & 0xffff, address); #endif tsk = current; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. * * This verifies that the fault happens in kernel space * and that the fault was a page not present (invalid) error */ if (address >= VMALLOC_START && address < VMALLOC_END && (fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR && (fault_code & MMUFCR_xFC_PGINVAL) == MMUFCR_xFC_PGINVAL ) goto vmalloc_fault; mm = tsk->mm; info.si_code = SEGV_MAPERR; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_atomic() || !mm) goto no_context; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) { /* accessing the stack below the stack pointer is always a * bug */ if ((address & PAGE_MASK) + 2 * PAGE_SIZE < regs->sp) { #if 0 printk(KERN_WARNING "[%d] ### Access below stack @%lx (sp=%lx)\n", current->pid, address, regs->sp); printk(KERN_WARNING "vma [%08x - %08x]\n", vma->vm_start, vma->vm_end); show_registers(regs); printk(KERN_WARNING "[%d] ### Code: [%08lx]" " %02x %02x %02x %02x %02x %02x %02x %02x\n", current->pid, regs->pc, ((u8 *) regs->pc)[0], ((u8 *) regs->pc)[1], ((u8 *) regs->pc)[2], ((u8 *) regs->pc)[3], ((u8 *) regs->pc)[4], ((u8 *) regs->pc)[5], ((u8 *) regs->pc)[6], ((u8 *) regs->pc)[7] ); #endif goto bad_area; } } if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; write = 0; switch (fault_code & (MMUFCR_xFC_PGINVAL|MMUFCR_xFC_TYPE)) { default: /* 3: write, present */ case MMUFCR_xFC_TYPE_WRITE: #ifdef TEST_VERIFY_AREA if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR) printk(KERN_DEBUG "WP fault at %08lx\n", regs->pc); #endif /* write to absent page */ case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_WRITE: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; write++; break; /* read from protected page */ case MMUFCR_xFC_TYPE_READ: goto bad_area; /* read from absent page present */ case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_READ: if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; break; } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_MAJOR) current->maj_flt++; else current->min_flt++; up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); /* User mode accesses just cause a SIGSEGV */ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) { info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void *)address; force_sig_info(SIGSEGV, &info, tsk); return; } no_context: /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ bust_spinlocks(1); if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n", address); printk(" printing pc:\n"); printk(KERN_ALERT "%08lx\n", regs->pc); debugger_intercept(fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR, SIGSEGV, SEGV_ACCERR, regs); page = PTBR; page = ((unsigned long *) __va(page))[address >> 22]; printk(KERN_ALERT "*pde = %08lx\n", page); if (page & 1) { page &= PAGE_MASK; address &= 0x003ff000; page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; printk(KERN_ALERT "*pte = %08lx\n", page); } die("Oops", regs, fault_code); do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); printk(KERN_ALERT "VM: killing process %s\n", tsk->comm); if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) do_exit(SIGKILL); goto no_context; do_sigbus: up_read(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void *)address; force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR) goto no_context; return; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. * * Do _not_ use "tsk" here. We might be inside * an interrupt in the middle of a task switch.. */ int index = pgd_index(address); pgd_t *pgd, *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; pgd_k = init_mm.pgd + index; if (!pgd_present(*pgd_k)) goto no_context; pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) goto no_context; pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) goto no_context; pgd = (pgd_t *) PTBR + index; pud = pud_offset(pgd, address); pmd = pmd_offset(pud, address); set_pmd(pmd, *pmd_k); pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) goto no_context; return; } }
gpl-2.0
fwmiller/Conserver-Freescale-Linux-U-boot
rpm/BUILD/linux-3.0.35/drivers/s390/scsi/zfcp_sysfs.c
5606
17160
/* * zfcp device driver * * sysfs attributes. * * Copyright IBM Corporation 2008, 2010 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/slab.h> #include "zfcp_ext.h" #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\ _show, _store) #define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value) \ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ struct device_attribute *at,\ char *buf) \ { \ struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ \ return sprintf(buf, _format, _value); \ } \ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ zfcp_sysfs_##_feat##_##_name##_show, NULL); #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ struct device_attribute *at,\ char *buf) \ { \ struct ccw_device *cdev = to_ccwdev(dev); \ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \ int i; \ \ if (!adapter) \ return -ENODEV; \ \ i = sprintf(buf, _format, _value); \ zfcp_ccw_adapter_put(adapter); \ return i; \ } \ static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \ zfcp_sysfs_adapter_##_name##_show, NULL); ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status)); ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n", (unsigned long long) adapter->peer_wwnn); ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n", (unsigned long long) adapter->peer_wwpn); ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version); ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version); ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE) != 0); ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n", atomic_read(&port->status)); ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n", (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) != 0); ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n", (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", zfcp_unit_sdev_status(unit)); ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", (zfcp_unit_sdev_status(unit) & ZFCP_STATUS_COMMON_ERP_INUSE) != 0); ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", (zfcp_unit_sdev_status(unit) & ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n", (zfcp_unit_sdev_status(unit) & ZFCP_STATUS_LUN_SHARED) != 0); ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n", (zfcp_unit_sdev_status(unit) & ZFCP_STATUS_LUN_READONLY) != 0); static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) return sprintf(buf, "1\n"); return sprintf(buf, "0\n"); } static ssize_t zfcp_sysfs_port_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); unsigned long val; if (strict_strtoul(buf, 0, &val) || val != 0) return -EINVAL; zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2"); zfcp_erp_wait(port->adapter); return count; } static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO, zfcp_sysfs_port_failed_show, zfcp_sysfs_port_failed_store); static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); struct scsi_device *sdev; unsigned int status, failed = 1; sdev = zfcp_unit_sdev(unit); if (sdev) { status = atomic_read(&sdev_to_zfcp(sdev)->status); failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0; scsi_device_put(sdev); } return sprintf(buf, "%d\n", failed); } static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); unsigned long val; struct scsi_device *sdev; if (strict_strtoul(buf, 0, &val) || val != 0) return -EINVAL; sdev = zfcp_unit_sdev(unit); if (sdev) { zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, "syufai2"); zfcp_erp_wait(unit->port->adapter); } else zfcp_unit_scsi_scan(unit); return count; } static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO, zfcp_sysfs_unit_failed_show, zfcp_sysfs_unit_failed_store); static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ccw_device *cdev = to_ccwdev(dev); struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); int i; if (!adapter) return -ENODEV; if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) i = sprintf(buf, "1\n"); else i = sprintf(buf, "0\n"); zfcp_ccw_adapter_put(adapter); return i; } static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ccw_device *cdev = to_ccwdev(dev); struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); unsigned long val; int retval = 0; if (!adapter) return -ENODEV; if (strict_strtoul(buf, 0, &val) || val != 0) { retval = -EINVAL; goto out; } zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "syafai2"); zfcp_erp_wait(adapter); out: zfcp_ccw_adapter_put(adapter); return retval ? retval : (ssize_t) count; } static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO, zfcp_sysfs_adapter_failed_show, zfcp_sysfs_adapter_failed_store); static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ccw_device *cdev = to_ccwdev(dev); struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return -ENODEV; /* sync the user-space- with the kernel-invocation of scan_work */ queue_work(adapter->work_queue, &adapter->scan_work); flush_work(&adapter->scan_work); zfcp_ccw_adapter_put(adapter); return (ssize_t) count; } static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, zfcp_sysfs_port_rescan_store); static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ccw_device *cdev = to_ccwdev(dev); struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); struct zfcp_port *port; u64 wwpn; int retval = -EINVAL; if (!adapter) return -ENODEV; if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) goto out; port = zfcp_get_port_by_wwpn(adapter, wwpn); if (!port) goto out; else retval = 0; write_lock_irq(&adapter->port_list_lock); list_del(&port->list); write_unlock_irq(&adapter->port_list_lock); put_device(&port->dev); zfcp_erp_port_shutdown(port, 0, "syprs_1"); zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); out: zfcp_ccw_adapter_put(adapter); return retval ? retval : (ssize_t) count; } static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, zfcp_sysfs_port_remove_store); static struct attribute *zfcp_adapter_attrs[] = { &dev_attr_adapter_failed.attr, &dev_attr_adapter_in_recovery.attr, &dev_attr_adapter_port_remove.attr, &dev_attr_adapter_port_rescan.attr, &dev_attr_adapter_peer_wwnn.attr, &dev_attr_adapter_peer_wwpn.attr, &dev_attr_adapter_peer_d_id.attr, &dev_attr_adapter_card_version.attr, &dev_attr_adapter_lic_version.attr, &dev_attr_adapter_status.attr, &dev_attr_adapter_hardware_version.attr, NULL }; struct attribute_group zfcp_sysfs_adapter_attrs = { .attrs = zfcp_adapter_attrs, }; static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); u64 fcp_lun; if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) return -EINVAL; if (zfcp_unit_add(port, fcp_lun)) return -EINVAL; return count; } static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); u64 fcp_lun; if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) return -EINVAL; if (zfcp_unit_remove(port, fcp_lun)) return -EINVAL; return count; } static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); static struct attribute *zfcp_port_attrs[] = { &dev_attr_unit_add.attr, &dev_attr_unit_remove.attr, &dev_attr_port_failed.attr, &dev_attr_port_in_recovery.attr, &dev_attr_port_status.attr, &dev_attr_port_access_denied.attr, NULL }; /** * zfcp_sysfs_port_attrs - sysfs attributes for all other ports */ struct attribute_group zfcp_sysfs_port_attrs = { .attrs = zfcp_port_attrs, }; static struct attribute *zfcp_unit_attrs[] = { &dev_attr_unit_failed.attr, &dev_attr_unit_in_recovery.attr, &dev_attr_unit_status.attr, &dev_attr_unit_access_denied.attr, &dev_attr_unit_access_shared.attr, &dev_attr_unit_access_readonly.attr, NULL }; struct attribute_group zfcp_sysfs_unit_attrs = { .attrs = zfcp_unit_attrs, }; #define ZFCP_DEFINE_LATENCY_ATTR(_name) \ static ssize_t \ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) { \ struct scsi_device *sdev = to_scsi_device(dev); \ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ struct zfcp_latencies *lat = &zfcp_sdev->latencies; \ struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \ unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \ \ spin_lock_bh(&lat->lock); \ fsum = lat->_name.fabric.sum * adapter->timer_ticks; \ fmin = lat->_name.fabric.min * adapter->timer_ticks; \ fmax = lat->_name.fabric.max * adapter->timer_ticks; \ csum = lat->_name.channel.sum * adapter->timer_ticks; \ cmin = lat->_name.channel.min * adapter->timer_ticks; \ cmax = lat->_name.channel.max * adapter->timer_ticks; \ cc = lat->_name.counter; \ spin_unlock_bh(&lat->lock); \ \ do_div(fsum, 1000); \ do_div(fmin, 1000); \ do_div(fmax, 1000); \ do_div(csum, 1000); \ do_div(cmin, 1000); \ do_div(cmax, 1000); \ \ return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \ fmin, fmax, fsum, cmin, cmax, csum, cc); \ } \ static ssize_t \ zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ struct zfcp_latencies *lat = &zfcp_sdev->latencies; \ unsigned long flags; \ \ spin_lock_irqsave(&lat->lock, flags); \ lat->_name.fabric.sum = 0; \ lat->_name.fabric.min = 0xFFFFFFFF; \ lat->_name.fabric.max = 0; \ lat->_name.channel.sum = 0; \ lat->_name.channel.min = 0xFFFFFFFF; \ lat->_name.channel.max = 0; \ lat->_name.counter = 0; \ spin_unlock_irqrestore(&lat->lock, flags); \ \ return (ssize_t) count; \ } \ static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO, \ zfcp_sysfs_unit_##_name##_latency_show, \ zfcp_sysfs_unit_##_name##_latency_store); ZFCP_DEFINE_LATENCY_ATTR(read); ZFCP_DEFINE_LATENCY_ATTR(write); ZFCP_DEFINE_LATENCY_ATTR(cmd); #define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \ struct device_attribute *attr,\ char *buf) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ struct zfcp_port *port = zfcp_sdev->port; \ \ return sprintf(buf, _format, _value); \ } \ static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", dev_name(&port->adapter->ccw_device->dev)); ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", (unsigned long long) port->wwpn); static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev)); } static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL); struct device_attribute *zfcp_sysfs_sdev_attrs[] = { &dev_attr_fcp_lun, &dev_attr_wwpn, &dev_attr_hba_id, &dev_attr_read_latency, &dev_attr_write_latency, &dev_attr_cmd_latency, NULL }; static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *scsi_host = dev_to_shost(dev); struct fsf_qtcb_bottom_port *qtcb_port; struct zfcp_adapter *adapter; int retval; adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)) return -EOPNOTSUPP; qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL); if (!qtcb_port) return -ENOMEM; retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port); if (!retval) retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, qtcb_port->cb_util, qtcb_port->a_util); kfree(qtcb_port); return retval; } static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL); static int zfcp_sysfs_adapter_ex_config(struct device *dev, struct fsf_statistics_info *stat_inf) { struct Scsi_Host *scsi_host = dev_to_shost(dev); struct fsf_qtcb_bottom_config *qtcb_config; struct zfcp_adapter *adapter; int retval; adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)) return -EOPNOTSUPP; qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config), GFP_KERNEL); if (!qtcb_config) return -ENOMEM; retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config); if (!retval) *stat_inf = qtcb_config->stat_info; kfree(qtcb_config); return retval; } #define ZFCP_SHOST_ATTR(_name, _format, _arg...) \ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ struct device_attribute *attr,\ char *buf) \ { \ struct fsf_statistics_info stat_info; \ int retval; \ \ retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); \ if (retval) \ return retval; \ \ return sprintf(buf, _format, ## _arg); \ } \ static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL); ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n", (unsigned long long) stat_info.input_req, (unsigned long long) stat_info.output_req, (unsigned long long) stat_info.control_req); ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n", (unsigned long long) stat_info.input_mb, (unsigned long long) stat_info.output_mb); ZFCP_SHOST_ATTR(seconds_active, "%llu\n", (unsigned long long) stat_info.seconds_act); static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *scsi_host = class_to_shost(dev); struct zfcp_qdio *qdio = ((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio; u64 util; spin_lock_bh(&qdio->stat_lock); util = qdio->req_q_util; spin_unlock_bh(&qdio->stat_lock); return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full), (unsigned long long)util); } static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); struct device_attribute *zfcp_sysfs_shost_attrs[] = { &dev_attr_utilization, &dev_attr_requests, &dev_attr_megabytes, &dev_attr_seconds_active, &dev_attr_queue_full, NULL };
gpl-2.0
NoelMacwan/Kernel-C6806-KOT49H.S2.2052
drivers/rtc/rtc-proc.c
8166
3272
/* * RTC subsystem, proc interface * * Copyright (C) 2005-06 Tower Technologies * Author: Alessandro Zummo <a.zummo@towertech.it> * * based on arch/arm/common/rtctime.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "rtc-core.h" static int rtc_proc_show(struct seq_file *seq, void *offset) { int err; struct rtc_device *rtc = seq->private; const struct rtc_class_ops *ops = rtc->ops; struct rtc_wkalrm alrm; struct rtc_time tm; err = rtc_read_time(rtc, &tm); if (err == 0) { seq_printf(seq, "rtc_time\t: %02d:%02d:%02d\n" "rtc_date\t: %04d-%02d-%02d\n", tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday); } err = rtc_read_alarm(rtc, &alrm); if (err == 0) { seq_printf(seq, "alrm_time\t: "); if ((unsigned int)alrm.time.tm_hour <= 24) seq_printf(seq, "%02d:", alrm.time.tm_hour); else seq_printf(seq, "**:"); if ((unsigned int)alrm.time.tm_min <= 59) seq_printf(seq, "%02d:", alrm.time.tm_min); else seq_printf(seq, "**:"); if ((unsigned int)alrm.time.tm_sec <= 59) seq_printf(seq, "%02d\n", alrm.time.tm_sec); else seq_printf(seq, "**\n"); seq_printf(seq, "alrm_date\t: "); if ((unsigned int)alrm.time.tm_year <= 200) seq_printf(seq, "%04d-", alrm.time.tm_year + 1900); else seq_printf(seq, "****-"); if ((unsigned int)alrm.time.tm_mon <= 11) seq_printf(seq, "%02d-", alrm.time.tm_mon + 1); else seq_printf(seq, "**-"); if (alrm.time.tm_mday && (unsigned int)alrm.time.tm_mday <= 31) seq_printf(seq, "%02d\n", alrm.time.tm_mday); else seq_printf(seq, "**\n"); seq_printf(seq, "alarm_IRQ\t: %s\n", alrm.enabled ? "yes" : "no"); seq_printf(seq, "alrm_pending\t: %s\n", alrm.pending ? "yes" : "no"); seq_printf(seq, "update IRQ enabled\t: %s\n", (rtc->uie_rtctimer.enabled) ? "yes" : "no"); seq_printf(seq, "periodic IRQ enabled\t: %s\n", (rtc->pie_enabled) ? "yes" : "no"); seq_printf(seq, "periodic IRQ frequency\t: %d\n", rtc->irq_freq); seq_printf(seq, "max user IRQ frequency\t: %d\n", rtc->max_user_freq); } seq_printf(seq, "24hr\t\t: yes\n"); if (ops->proc) ops->proc(rtc->dev.parent, seq); return 0; } static int rtc_proc_open(struct inode *inode, struct file *file) { int ret; struct rtc_device *rtc = PDE(inode)->data; if (!try_module_get(THIS_MODULE)) return -ENODEV; ret = single_open(file, rtc_proc_show, rtc); if (ret) module_put(THIS_MODULE); return ret; } static int rtc_proc_release(struct inode *inode, struct file *file) { int res = single_release(inode, file); module_put(THIS_MODULE); return res; } static const struct file_operations rtc_proc_fops = { .open = rtc_proc_open, .read = seq_read, .llseek = seq_lseek, .release = rtc_proc_release, }; void rtc_proc_add_device(struct rtc_device *rtc) { if (rtc->id == 0) proc_create_data("driver/rtc", 0, NULL, &rtc_proc_fops, rtc); } void rtc_proc_del_device(struct rtc_device *rtc) { if (rtc->id == 0) remove_proc_entry("driver/rtc", NULL); }
gpl-2.0
Perferom/android_kernel_samsung_msm7x27
drivers/infiniband/hw/cxgb3/cxio_resource.c
9446
9382
/* * Copyright (c) 2006 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Crude resource management */ #include <linux/kernel.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/kfifo.h> #include <linux/spinlock.h> #include <linux/errno.h> #include "cxio_resource.h" #include "cxio_hal.h" static struct kfifo rhdl_fifo; static spinlock_t rhdl_fifo_lock; #define RANDOM_SIZE 16 static int __cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t *fifo_lock, u32 nr, u32 skip_low, u32 skip_high, int random) { u32 i, j, entry = 0, idx; u32 random_bytes; u32 rarray[16]; spin_lock_init(fifo_lock); if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL)) return -ENOMEM; for (i = 0; i < skip_low + skip_high; i++) kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32)); if (random) { j = 0; random_bytes = random32(); for (i = 0; i < RANDOM_SIZE; i++) rarray[i] = i + skip_low; for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) { if (j >= RANDOM_SIZE) { j = 0; random_bytes = random32(); } idx = (random_bytes >> (j * 2)) & 0xF; kfifo_in(fifo, (unsigned char *) &rarray[idx], sizeof(u32)); rarray[idx] = i; j++; } for (i = 0; i < RANDOM_SIZE; i++) kfifo_in(fifo, (unsigned char *) &rarray[i], sizeof(u32)); } else for (i = skip_low; i < nr - skip_high; i++) kfifo_in(fifo, (unsigned char *) &i, sizeof(u32)); for (i = 0; i < skip_low + skip_high; i++) if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), fifo_lock) != sizeof(u32)) break; return 0; } static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock, u32 nr, u32 skip_low, u32 skip_high) { return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low, skip_high, 0)); } static int cxio_init_resource_fifo_random(struct kfifo *fifo, spinlock_t * fifo_lock, u32 nr, u32 skip_low, u32 skip_high) { return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low, skip_high, 1)); } static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p) { u32 i; spin_lock_init(&rdev_p->rscp->qpid_fifo_lock); if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32), GFP_KERNEL)) return -ENOMEM; for (i = 16; i < T3_MAX_NUM_QP; i++) if (!(i & rdev_p->qpmask)) kfifo_in(&rdev_p->rscp->qpid_fifo, (unsigned char *) &i, sizeof(u32)); return 0; } int cxio_hal_init_rhdl_resource(u32 nr_rhdl) { return cxio_init_resource_fifo(&rhdl_fifo, &rhdl_fifo_lock, nr_rhdl, 1, 0); } void cxio_hal_destroy_rhdl_resource(void) { kfifo_free(&rhdl_fifo); } /* nr_* must be power of 2 */ int cxio_hal_init_resource(struct cxio_rdev *rdev_p, u32 nr_tpt, u32 nr_pbl, u32 nr_rqt, u32 nr_qpid, u32 nr_cqid, u32 nr_pdid) { int err = 0; struct cxio_hal_resource *rscp; rscp = kmalloc(sizeof(*rscp), GFP_KERNEL); if (!rscp) return -ENOMEM; rdev_p->rscp = rscp; err = cxio_init_resource_fifo_random(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, nr_tpt, 1, 0); if (err) goto tpt_err; err = cxio_init_qpid_fifo(rdev_p); if (err) goto qpid_err; err = cxio_init_resource_fifo(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, nr_cqid, 1, 0); if (err) goto cqid_err; err = cxio_init_resource_fifo(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, nr_pdid, 1, 0); if (err) goto pdid_err; return 0; pdid_err: kfifo_free(&rscp->cqid_fifo); cqid_err: kfifo_free(&rscp->qpid_fifo); qpid_err: kfifo_free(&rscp->tpt_fifo); tpt_err: return -ENOMEM; } /* * returns 0 if no resource available */ static u32 cxio_hal_get_resource(struct kfifo *fifo, spinlock_t * lock) { u32 entry; if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)) return entry; else return 0; /* fifo emptry */ } static void cxio_hal_put_resource(struct kfifo *fifo, spinlock_t * lock, u32 entry) { BUG_ON( kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock) == 0); } u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp) { return cxio_hal_get_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock); } void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag) { cxio_hal_put_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, stag); } u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp) { u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock); PDBG("%s qpid 0x%x\n", __func__, qpid); return qpid; } void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid) { PDBG("%s qpid 0x%x\n", __func__, qpid); cxio_hal_put_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock, qpid); } u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp) { return cxio_hal_get_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock); } void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid) { cxio_hal_put_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, cqid); } u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp) { return cxio_hal_get_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock); } void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid) { cxio_hal_put_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, pdid); } void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp) { kfifo_free(&rscp->tpt_fifo); kfifo_free(&rscp->cqid_fifo); kfifo_free(&rscp->qpid_fifo); kfifo_free(&rscp->pdid_fifo); kfree(rscp); } /* * PBL Memory Manager. Uses Linux generic allocator. */ #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size) { unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); return (u32)addr; } void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size) { PDBG("%s addr 0x%x size %d\n", __func__, addr, size); gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size); } int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p) { unsigned pbl_start, pbl_chunk; rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); if (!rdev_p->pbl_pool) return -ENOMEM; pbl_start = rdev_p->rnic_info.pbl_base; pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1; while (pbl_start < rdev_p->rnic_info.pbl_top) { pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1, pbl_chunk); if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) { PDBG("%s failed to add PBL chunk (%x/%x)\n", __func__, pbl_start, pbl_chunk); if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { printk(KERN_WARNING MOD "%s: Failed to add all PBL chunks (%x/%x)\n", __func__, pbl_start, rdev_p->rnic_info.pbl_top - pbl_start); return 0; } pbl_chunk >>= 1; } else { PDBG("%s added PBL chunk (%x/%x)\n", __func__, pbl_start, pbl_chunk); pbl_start += pbl_chunk; } } return 0; } void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p) { gen_pool_destroy(rdev_p->pbl_pool); } /* * RQT Memory Manager. Uses Linux generic allocator. */ #define MIN_RQT_SHIFT 10 /* 1KB == mini RQT size (16 entries) */ #define RQT_CHUNK 2*1024*1024 u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size) { unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); return (u32)addr; } void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size) { PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6); gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6); } int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p) { unsigned long i; rdev_p->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1); if (rdev_p->rqt_pool) for (i = rdev_p->rnic_info.rqt_base; i <= rdev_p->rnic_info.rqt_top - RQT_CHUNK + 1; i += RQT_CHUNK) gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1); return rdev_p->rqt_pool ? 0 : -ENOMEM; } void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p) { gen_pool_destroy(rdev_p->rqt_pool); }
gpl-2.0
olegsvs/android_kernel_archos_persimmon_3_18
drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
487
7447
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/device.h> #include <core/gpuobj.h> #include <subdev/timer.h> #include <subdev/fb.h> #include <subdev/vm.h> #include <subdev/ltc.h> #include <subdev/bar.h> struct nvc0_vmmgr_priv { struct nouveau_vmmgr base; }; /* Map from compressed to corresponding uncompressed storage type. * The value 0xff represents an invalid storage type. */ const u8 nvc0_pte_storage_type_map[256] = { 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */ 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */ 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */ 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27, 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */ 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */ 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */ 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7, 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3, 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */ 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff }; static void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, struct nouveau_gpuobj *pgt[2]) { u32 pde[2] = { 0, 0 }; if (pgt[0]) pde[1] = 0x00000001 | (pgt[0]->addr >> 8); if (pgt[1]) pde[0] = 0x00000001 | (pgt[1]->addr >> 8); nv_wo32(pgd, (index * 8) + 0, pde[0]); nv_wo32(pgd, (index * 8) + 4, pde[1]); } static inline u64 nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) { phys >>= 8; phys |= 0x00000001; /* present */ if (vma->access & NV_MEM_ACCESS_SYS) phys |= 0x00000002; phys |= ((u64)target << 32); phys |= ((u64)memtype << 36); return phys; } static void nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) { u64 next = 1 << (vma->node->type - 8); phys = nvc0_vm_addr(vma, phys, mem->memtype, 0); pte <<= 3; if (mem->tag) { struct nouveau_ltc *ltc = nouveau_ltc(vma->vm->vmm->base.base.parent); u32 tag = mem->tag->offset + (delta >> 17); phys |= (u64)tag << (32 + 12); next |= (u64)1 << (32 + 12); ltc->tags_clear(ltc, tag, cnt); } while (cnt--) { nv_wo32(pgt, pte + 0, lower_32_bits(phys)); nv_wo32(pgt, pte + 4, upper_32_bits(phys)); phys += next; pte += 8; } } static void nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) { u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5; /* compressed storage types are invalid for system memory */ u32 memtype = nvc0_pte_storage_type_map[mem->memtype & 0xff]; pte <<= 3; while (cnt--) { u64 phys = nvc0_vm_addr(vma, *list++, memtype, target); nv_wo32(pgt, pte + 0, lower_32_bits(phys)); nv_wo32(pgt, pte + 4, upper_32_bits(phys)); pte += 8; } } static void nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) { pte <<= 3; while (cnt--) { nv_wo32(pgt, pte + 0, 0x00000000); nv_wo32(pgt, pte + 4, 0x00000000); pte += 8; } } static void nvc0_vm_flush(struct nouveau_vm *vm) { struct nvc0_vmmgr_priv *priv = (void *)vm->vmm; struct nouveau_bar *bar = nouveau_bar(priv); struct nouveau_vm_pgd *vpgd; u32 type; bar->flush(bar); type = 0x00000001; /* PAGE_ALL */ if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR])) type |= 0x00000004; /* HUB_ONLY */ mutex_lock(&nv_subdev(priv)->mutex); list_for_each_entry(vpgd, &vm->pgd_list, head) { /* looks like maybe a "free flush slots" counter, the * faster you write to 0x100cbc to more it decreases */ if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) { nv_error(priv, "vm timeout 0: 0x%08x %d\n", nv_rd32(priv, 0x100c80), type); } nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8); nv_wr32(priv, 0x100cbc, 0x80000000 | type); /* wait for flush to be queued? */ if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) { nv_error(priv, "vm timeout 1: 0x%08x %d\n", nv_rd32(priv, 0x100c80), type); } } mutex_unlock(&nv_subdev(priv)->mutex); } static int nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mm_offset, struct nouveau_vm **pvm) { return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm); } static int nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nvc0_vmmgr_priv *priv; int ret; ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv); *pobject = nv_object(priv); if (ret) return ret; priv->base.limit = 1ULL << 40; priv->base.dma_bits = 40; priv->base.pgt_bits = 27 - 12; priv->base.spg_shift = 12; priv->base.lpg_shift = 17; priv->base.create = nvc0_vm_create; priv->base.map_pgt = nvc0_vm_map_pgt; priv->base.map = nvc0_vm_map; priv->base.map_sg = nvc0_vm_map_sg; priv->base.unmap = nvc0_vm_unmap; priv->base.flush = nvc0_vm_flush; return 0; } struct nouveau_oclass nvc0_vmmgr_oclass = { .handle = NV_SUBDEV(VM, 0xc0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvc0_vmmgr_ctor, .dtor = _nouveau_vmmgr_dtor, .init = _nouveau_vmmgr_init, .fini = _nouveau_vmmgr_fini, }, };
gpl-2.0
emwno/android_kernel_N7100
.fr-M0hgqN/drivers/power/smb328_charger.c
487
26308
/* * smb328_charger.c * * Copyright (C) 2011 Samsung Electronics * Ikkeun Kim <iks.kim@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/power/smb328_charger.h> #define DEBUG enum cable_type_t { CABLE_TYPE_NONE = 0, CABLE_TYPE_USB, CABLE_TYPE_AC, CABLE_TYPE_MISC, CABLE_TYPE_OTG, }; #if defined(CONFIG_MACH_Q1_CHN) && defined(CONFIG_SMB328_CHARGER) static bool is_ovp_status; #endif static int smb328_i2c_read(struct i2c_client *client, u8 reg, u8 *data) { int ret = 0; if (!client) return -ENODEV; ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) return -EIO; *data = ret & 0xff; return *data; } static int smb328_i2c_write(struct i2c_client *client, u8 reg, u8 data) { if (!client) return -ENODEV; return i2c_smbus_write_byte_data(client, reg, data); } static void smb328_test_read(struct i2c_client *client) { struct smb328_chip *chg = i2c_get_clientdata(client); u8 data = 0; u32 addr = 0; for (addr = 0; addr < 0x0c; addr++) { smb328_i2c_read(chg->client, addr, &data); dev_info(&client->dev, "smb328 addr : 0x%02x data : 0x%02x\n", addr, data); } for (addr = 0x30; addr < 0x3D; addr++) { smb328_i2c_read(chg->client, addr, &data); dev_info(&client->dev, "smb328 addr : 0x%02x data : 0x%02x\n", addr, data); } } static void smb328a_charger_function_conrol(struct i2c_client *client); static int smb328_get_charging_status(struct i2c_client *client) { struct smb328_chip *chg = i2c_get_clientdata(client); int status = POWER_SUPPLY_STATUS_UNKNOWN; u8 data_a = 0; u8 data_b = 0; u8 data_c = 0; smb328_i2c_read(chg->client, SMB328A_BATTERY_CHARGING_STATUS_A, &data_a); dev_info(&client->dev, "%s : charging status A(0x%02x)\n", __func__, data_a); smb328_i2c_read(chg->client, SMB328A_BATTERY_CHARGING_STATUS_B, &data_b); dev_info(&client->dev, "%s : charging status B(0x%02x)\n", __func__, data_b); smb328_i2c_read(chg->client, SMB328A_BATTERY_CHARGING_STATUS_C, &data_c); dev_info(&client->dev, "%s : charging status C(0x%02x)\n", __func__, data_c); /* check for safety timer in USB charging */ /* If safety timer is activated in USB charging, reset charger */ #if 1 /* write 0xAA in register 0x30 to reset watchdog timer, */ /* it can replace this work-around */ if (chg->is_enable && chg->cable_type == CABLE_TYPE_USB) { if ((data_c & 0x30) == 0x20) { /* safety timer activated */ /* reset charger */ dev_info(&client->dev, "%s : Reset charger, safety timer is activated!\n", __func__); chg->is_enable = false; smb328a_charger_function_conrol(chg->client); chg->is_enable = true; smb328a_charger_function_conrol(chg->client); } } #endif /* At least one charge cycle terminated, */ /* Charge current < Termination Current */ if ((data_c & 0xc0) == 0xc0) { /* top-off by full charging */ status = POWER_SUPPLY_STATUS_FULL; goto charging_status_end; } /* Is enabled ? */ if (data_c & 0x01) { /* check for 0x30 : 'safety timer' (0b01 or 0b10) or */ /* 'waiting to begin charging' (0b11) */ /* check for 0x06 : no charging (0b00) */ if ((data_c & 0x30) || !(data_c & 0x06)) { /* not charging */ status = POWER_SUPPLY_STATUS_NOT_CHARGING; goto charging_status_end; } else { status = POWER_SUPPLY_STATUS_CHARGING; goto charging_status_end; } } else status = POWER_SUPPLY_STATUS_DISCHARGING; charging_status_end: return (int)status; } static int smb328_get_charging_health(struct i2c_client *client) { struct smb328_chip *chg = i2c_get_clientdata(client); int health = POWER_SUPPLY_HEALTH_GOOD; u8 data_a = 0; u8 data_b = 0; u8 data_c = 0; smb328_i2c_read(chg->client, SMB328A_BATTERY_CHARGING_STATUS_A, &data_a); dev_info(&client->dev, "%s : charging status A(0x%02x)\n", __func__, data_a); smb328_i2c_read(chg->client, SMB328A_BATTERY_CHARGING_STATUS_B, &data_b); dev_info(&client->dev, "%s : charging status B(0x%02x)\n", __func__, data_b); smb328_i2c_read(chg->client, SMB328A_BATTERY_CHARGING_STATUS_C, &data_c); dev_info(&client->dev, "%s : charging status C(0x%02x)\n", __func__, data_c); /* Is enabled ? */ if (data_c & 0x01) { if (!(data_a & 0x02)) /* Input current is NOT OK */ health = POWER_SUPPLY_HEALTH_OVERVOLTAGE; } #if defined(CONFIG_MACH_Q1_CHN) { u8 data_chn; smb328_i2c_read(chg->client, 0x37, &data_chn); dev_info(&client->dev, "%s : charging interrupt status C(0x%02x)\n", __func__, data_c); if (data_chn & 0x04) health = POWER_SUPPLY_HEALTH_OVERVOLTAGE; if (health == POWER_SUPPLY_HEALTH_OVERVOLTAGE) is_ovp_status = true; else is_ovp_status = false; } #endif return (int)health; } #if defined(CONFIG_MACH_Q1_CHN) static int smb328_is_ovp_status(struct i2c_client *client) { struct smb328_chip *chg = i2c_get_clientdata(client); int status = POWER_SUPPLY_HEALTH_UNKNOWN; u8 data = 0; smb328_i2c_read(chg->client, 0x37, &data); dev_info(&client->dev, "%s : 0x37h(0x%02x)\n", __func__, data); if (data & 0x04) { is_ovp_status = true; status = POWER_SUPPLY_HEALTH_OVERVOLTAGE; } else { is_ovp_status = false; status = POWER_SUPPLY_HEALTH_GOOD; } return (int)status; } #endif static void smb328a_allow_volatile_writes(struct i2c_client *client) { int val, reg; u8 data; reg = SMB328A_COMMAND; val = smb328_i2c_read(client, reg, &data); if ((val >= 0) && !(val & 0x80)) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); data |= (0x1 << 7); if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)data; pr_info("%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } static void smb328a_charger_function_conrol(struct i2c_client *client) { struct smb328_chip *chip = i2c_get_clientdata(client); int val, reg; u8 data, set_data; if (chip->is_otg) { dev_info(&client->dev, "%s : OTG is activated. Ignore command (type:%d, enable:%s)\n", __func__, chip->cable_type, chip->is_enable ? "true" : "false"); return; } smb328a_allow_volatile_writes(client); if (!chip->is_enable) { reg = SMB328A_FUNCTION_CONTROL_B; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); if (data != 0x0) { data = 0x0; if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } reg = SMB328A_COMMAND; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); data = 0x98; /* turn off charger */ if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } else { /* reset watchdog timer if it occured */ reg = SMB328A_CLEAR_IRQ; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); data = 0xaa; if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } reg = SMB328A_INPUT_AND_CHARGE_CURRENTS; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); if (chip->cable_type == CABLE_TYPE_AC) { /* fast 1000mA, termination 200mA */ set_data = 0xb7; } else if (chip->cable_type == CABLE_TYPE_MISC) { /* fast 700mA, termination 200mA */ set_data = 0x57; } else { /* fast 500mA, termination 200mA */ set_data = 0x17; } if (data != set_data) { /* this can be changed with top-off setting */ data = set_data; if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } reg = SMB328A_CURRENT_TERMINATION; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); if (chip->cable_type == CABLE_TYPE_AC) { /* input 1A, threshold 4.25V, AICL enable */ set_data = 0xb0; } else if (chip->cable_type == CABLE_TYPE_MISC) { /* input 700mA, threshold 4.25V, AICL enable */ set_data = 0x50; } else { /* input 450mA, threshold 4.25V, AICL disable */ set_data = 0x14; #if defined(CONFIG_MACH_Q1_CHN) /* turn off pre-bias for ovp */ set_data &= ~(0x10); #endif } if (data != set_data) { data = set_data; if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } reg = SMB328A_FLOAT_VOLTAGE; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); if (data != 0xca) { data = 0xca; /* 4.2V float voltage */ if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } reg = SMB328A_FUNCTION_CONTROL_A1; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); #if 1 if (data != 0xda) { data = 0xda; /* top-off by ADC */ #else if (data != 0x9a) { data = 0x9a; /* top-off by charger */ #endif if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } reg = SMB328A_FUNCTION_CONTROL_A2; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); /* 0x4c -> 0x4e (watchdog timer enabled - SUMMIT) */ if (data != 0x4e) { data = 0x4e; if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } reg = SMB328A_FUNCTION_CONTROL_B; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); if (data != 0x0) { #if defined(CONFIG_MACH_Q1_CHN) data = 0x80; #else data = 0x0; #endif if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } reg = SMB328A_OTG_PWR_AND_LDO_CONTROL; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); set_data = 0xf5; if (chip->cable_type == CABLE_TYPE_AC) set_data = 0xf5; else if (chip->cable_type == CABLE_TYPE_MISC) set_data = 0xf5; else set_data = 0xcd; if (data != set_data) { data = set_data; if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } reg = SMB328A_VARIOUS_CONTROL_FUNCTION_A; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); if (data != 0xf6) { /* this can be changed with top-off setting */ data = 0xf6; if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } reg = SMB328A_CELL_TEMPERATURE_MONITOR; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); if (data != 0x0) { data = 0x0; if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } reg = SMB328A_INTERRUPT_SIGNAL_SELECTION; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); if (data != 0x0) { data = 0x0; if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } reg = SMB328A_COMMAND; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); /* turn on charger */ if (chip->cable_type == CABLE_TYPE_AC) data = 0x8c; else if (chip->cable_type == CABLE_TYPE_MISC) data = 0x88; else data = 0x88; /* USB */ if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } } static void smb328a_charger_otg_conrol(struct i2c_client *client) { struct smb328_chip *chip = i2c_get_clientdata(client); int val, reg; u8 data; smb328a_allow_volatile_writes(client); if (chip->is_otg) { reg = SMB328A_FUNCTION_CONTROL_B; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); if (data != 0x0) { data = 0x0; if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } /* delay for reset of charger */ mdelay(150); reg = SMB328A_OTG_PWR_AND_LDO_CONTROL; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); data = 0xcd; /* OTG 350mA */ if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } reg = SMB328A_COMMAND; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); data = 0x9a; /* turn on OTG */ if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } else { reg = SMB328A_FUNCTION_CONTROL_B; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); if (data != 0x0) { data = 0x0c; /* turn off charger */ if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } /* delay for reset of charger */ mdelay(150); reg = SMB328A_COMMAND; val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : reg (0x%x) = 0x%x\n", __func__, reg, data); data = 0x98; /* turn off OTG */ if (smb328_i2c_write(client, reg, data) < 0) pr_err("%s : error!\n", __func__); val = smb328_i2c_read(client, reg, &data); if (val >= 0) { data = (u8)val; dev_info(&client->dev, "%s : => reg (0x%x) = 0x%x\n", __func__, reg, data); } } } } static int smb328_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct smb328_chip *chip = container_of(psy, struct smb328_chip, charger); u8 data; switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = smb328_get_charging_status(chip->client); break; case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = chip->cable_type; break; case POWER_SUPPLY_PROP_HEALTH: val->intval = smb328_get_charging_health(chip->client); break; case POWER_SUPPLY_PROP_ONLINE: val->intval = chip->is_enable; break; case POWER_SUPPLY_PROP_CURRENT_NOW: if (chip->is_enable) { smb328_i2c_read(chip->client, SMB328A_INPUT_AND_CHARGE_CURRENTS, &data); switch (data >> 5) { case 0: val->intval = 450; break; case 1: val->intval = 600; break; case 2: val->intval = 700; break; case 3: val->intval = 800; break; case 4: val->intval = 900; break; case 5: val->intval = 1000; break; case 6: val->intval = 1100; break; case 7: val->intval = 1200; break; } } else val->intval = 0; break; #if defined(CONFIG_MACH_Q1_CHN) case POWER_SUPPLY_PROP_VOLTAGE_MAX: val->intval = smb328_is_ovp_status(chip->client); break; #endif default: return -EINVAL; } dev_info(&chip->client->dev, "%s: smb328_get_property (%d,%d)\n", __func__, psp, val->intval); return 0; } static int smb328_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) { struct smb328_chip *chip = container_of(psy, struct smb328_chip, charger); dev_info(&chip->client->dev, "%s: smb328_set_property (%d,%d)\n", __func__, psp, val->intval); switch (psp) { case POWER_SUPPLY_PROP_STATUS: #if defined(CONFIG_MACH_Q1_CHN) is_ovp_status = false; #endif chip->is_enable = (val->intval == POWER_SUPPLY_STATUS_CHARGING); smb328a_charger_function_conrol(chip->client); break; case POWER_SUPPLY_PROP_CHARGE_TYPE: /* only for OTG support */ chip->is_otg = val->intval; smb328a_charger_otg_conrol(chip->client); smb328_test_read(chip->client); break; case POWER_SUPPLY_PROP_HEALTH: break; case POWER_SUPPLY_PROP_ONLINE: chip->is_enable = (bool)val->intval; smb328a_charger_function_conrol(chip->client); break; case POWER_SUPPLY_PROP_CURRENT_NOW: if (val->intval <= 450) chip->cable_type = CABLE_TYPE_USB; else chip->cable_type = CABLE_TYPE_AC; break; default: return -EINVAL; } return 0; } static irqreturn_t smb328_irq_thread(int irq, void *data) { struct smb328_chip *chip = data; int ret = 0; #if defined(CONFIG_MACH_Q1_CHN) && defined(CONFIG_SMB328_CHARGER) u8 data1 = 0; #endif dev_info(&chip->client->dev, "%s: chg_ing IRQ occurred!\n", __func__); #if defined(CONFIG_MACH_Q1_CHN) && defined(CONFIG_SMB328_CHARGER) smb328_i2c_read(chip->client, 0x37, &data1); if (data1 & 0x04) { /* if usbin(usb vbus) is in over-voltage status. */ if (is_ovp_status == false) { is_ovp_status = true; if (chip->pdata->ovp_cb) ret = chip->pdata->ovp_cb(true); dev_info(&chip->client->dev, "$s OVP!!\n"); } } else { if (is_ovp_status == true) { is_ovp_status = false; if (chip->pdata->ovp_cb) ret = chip->pdata->ovp_cb(false); dev_info(&chip->client->dev, "$s ovp status released!!\n"); } } #else if (chip->pdata->topoff_cb) ret = chip->pdata->topoff_cb(); if (ret) { dev_err(&chip->client->dev, "%s: error from topoff_cb(%d)\n", __func__, ret); return IRQ_HANDLED; } #endif return IRQ_HANDLED; } static int smb328_irq_init(struct smb328_chip *chip) { struct i2c_client *client = chip->client; int ret; if (client->irq) { ret = request_threaded_irq(client->irq, NULL, smb328_irq_thread, #if defined(CONFIG_MACH_Q1_CHN) && defined(CONFIG_SMB328_CHARGER) IRQ_TYPE_EDGE_BOTH, #else IRQF_TRIGGER_RISING | IRQF_ONESHOT, #endif "SMB328 charger", chip); if (ret) { dev_err(&client->dev, "failed to reqeust IRQ\n"); return ret; } ret = enable_irq_wake(client->irq); if (ret < 0) dev_err(&client->dev, "failed to enable wakeup src %d\n", ret); } return 0; } static int smb328_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct smb328_chip *chip; int ret = 0; int gpio = 0; u8 data; int i; i = 10; while (1) { if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) goto I2CERROR; if (smb328_i2c_read(client, 0x36, &data) >= 0) /* check HW */ break; I2CERROR: if (!i--) return -EIO; msleep(300); } dev_info(&client->dev, "%s : SMB328 Charger Driver Loading\n", __func__); chip = kzalloc(sizeof(struct smb328_chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->client = client; chip->pdata = client->dev.platform_data; i2c_set_clientdata(client, chip); if (!chip->pdata) { dev_err(&client->dev, "%s : No platform data supplied\n", __func__); ret = -EINVAL; goto err_pdata; } if (chip->pdata->set_charger_name) chip->pdata->set_charger_name(); chip->is_otg = false; chip->is_enable = false; chip->cable_type = CABLE_TYPE_NONE; chip->charger.name = "smb328-charger"; chip->charger.type = POWER_SUPPLY_TYPE_BATTERY; chip->charger.get_property = smb328_get_property; chip->charger.set_property = smb328_set_property; chip->charger.properties = smb328_charger_props; chip->charger.num_properties = ARRAY_SIZE(smb328_charger_props); ret = power_supply_register(&client->dev, &chip->charger); if (ret) { dev_err(&client->dev, "failed: power supply register\n"); kfree(chip); return ret; } /* CHG_EN pin control - active low */ if (chip->pdata->gpio_chg_en) { s3c_gpio_cfgpin(chip->pdata->gpio_chg_en, S3C_GPIO_OUTPUT); s3c_gpio_setpull(chip->pdata->gpio_chg_en, S3C_GPIO_PULL_NONE); gpio = gpio_request(chip->pdata->gpio_chg_en, "CHG_EN"); if (!gpio) { gpio_direction_output(chip->pdata->gpio_chg_en, GPIO_LEVEL_HIGH); gpio_free(chip->pdata->gpio_chg_en); } else dev_err(&client->dev, "faile to request gpio(CHG_EN)\n"); } if (chip->pdata->gpio_otg_en) { s3c_gpio_cfgpin(chip->pdata->gpio_otg_en, S3C_GPIO_OUTPUT); s3c_gpio_setpull(chip->pdata->gpio_otg_en, S3C_GPIO_PULL_NONE); gpio = gpio_request(chip->pdata->gpio_otg_en, "OTG_EN"); if (!gpio) { gpio_direction_output(chip->pdata->gpio_otg_en, GPIO_LEVEL_LOW); gpio_free(chip->pdata->gpio_otg_en); } else dev_err(&client->dev, "faile to request gpio(OTG_EN)\n"); } if (chip->pdata->gpio_ta_nconnected) { s3c_gpio_cfgpin(chip->pdata->gpio_ta_nconnected, S3C_GPIO_INPUT); s3c_gpio_setpull(chip->pdata->gpio_ta_nconnected, S3C_GPIO_PULL_NONE); } if (chip->pdata->gpio_chg_ing) { #if defined(CONFIG_MACH_Q1_CHN) #if defined(CONFIG_MACH_Q1_CHN) && defined(CONFIG_SMB328_CHARGER) /* set external interrupt */ s3c_gpio_cfgpin(chip->pdata->gpio_chg_ing, S3C_GPIO_SFN(0xf)); #endif client->irq = gpio_to_irq(chip->pdata->gpio_chg_ing); ret = smb328_irq_init(chip); if (ret) goto err_pdata; #else s3c_gpio_cfgpin(chip->pdata->gpio_chg_ing, S3C_GPIO_INPUT); s3c_gpio_setpull(chip->pdata->gpio_chg_ing, S3C_GPIO_PULL_NONE); #endif } #if defined(CONFIG_MACH_Q1_CHN) && defined(CONFIG_SMB328_CHARGER) is_ovp_status = false; #endif smb328_test_read(client); return 0; err_pdata: kfree(chip); return ret; } static int __devexit smb328_remove(struct i2c_client *client) { struct smb328_chip *chip = i2c_get_clientdata(client); kfree(chip); return 0; } static const struct i2c_device_id smb328_id[] = { {"smb328-charger", 0}, {} }; MODULE_DEVICE_TABLE(i2c, smb328_id); static struct i2c_driver smb328_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = "smb328-charger", }, .probe = smb328_probe, .remove = __devexit_p(smb328_remove), .command = NULL, .id_table = smb328_id, }; static int __init smb328_init(void) { return i2c_add_driver(&smb328_i2c_driver); } static void __exit smb328_exit(void) { i2c_del_driver(&smb328_i2c_driver); } module_init(smb328_init); module_exit(smb328_exit); MODULE_AUTHOR("Ikkeun Kim <iks.kim@samsung.com>"); MODULE_DESCRIPTION("smb328 charger driver"); MODULE_LICENSE("GPL");
gpl-2.0
RossKorolov/android_kernel_samsung_jf
drivers/misc/uid_stat.c
743
4045
/* drivers/misc/uid_stat.c * * Copyright (C) 2008 - 2009 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <asm/atomic.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <linux/uid_stat.h> #include <net/activity_stats.h> static DEFINE_SPINLOCK(uid_lock); static LIST_HEAD(uid_list); static struct proc_dir_entry *parent; struct uid_stat { struct list_head link; uid_t uid; atomic_t tcp_rcv; atomic_t tcp_snd; }; static struct uid_stat *find_uid_stat(uid_t uid) { struct uid_stat *entry; list_for_each_entry(entry, &uid_list, link) { if (entry->uid == uid) { return entry; } } return NULL; } static int tcp_snd_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; unsigned int bytes; char *p = page; struct uid_stat *uid_entry = (struct uid_stat *) data; if (!data) return 0; bytes = (unsigned int) (atomic_read(&uid_entry->tcp_snd) + INT_MIN); p += sprintf(p, "%u\n", bytes); len = (p - page) - off; *eof = (len <= count) ? 1 : 0; *start = page + off; return len; } static int tcp_rcv_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; unsigned int bytes; char *p = page; struct uid_stat *uid_entry = (struct uid_stat *) data; if (!data) return 0; bytes = (unsigned int) (atomic_read(&uid_entry->tcp_rcv) + INT_MIN); p += sprintf(p, "%u\n", bytes); len = (p - page) - off; *eof = (len <= count) ? 1 : 0; *start = page + off; return len; } /* Create a new entry for tracking the specified uid. */ static struct uid_stat *create_stat(uid_t uid) { struct uid_stat *new_uid; /* Create the uid stat struct and append it to the list. */ new_uid = kmalloc(sizeof(struct uid_stat), GFP_ATOMIC); if (!new_uid) return NULL; new_uid->uid = uid; /* Counters start at INT_MIN, so we can track 4GB of network traffic. */ atomic_set(&new_uid->tcp_rcv, INT_MIN); atomic_set(&new_uid->tcp_snd, INT_MIN); list_add_tail(&new_uid->link, &uid_list); return new_uid; } static void create_stat_proc(struct uid_stat *new_uid) { char uid_s[32]; struct proc_dir_entry *entry; sprintf(uid_s, "%d", new_uid->uid); entry = proc_mkdir(uid_s, parent); /* Keep reference to uid_stat so we know what uid to read stats from. */ create_proc_read_entry("tcp_snd", S_IRUGO, entry , tcp_snd_read_proc, (void *) new_uid); create_proc_read_entry("tcp_rcv", S_IRUGO, entry, tcp_rcv_read_proc, (void *) new_uid); } static struct uid_stat *find_or_create_uid_stat(uid_t uid) { struct uid_stat *entry; unsigned long flags; spin_lock_irqsave(&uid_lock, flags); entry = find_uid_stat(uid); if (entry) { spin_unlock_irqrestore(&uid_lock, flags); return entry; } entry = create_stat(uid); spin_unlock_irqrestore(&uid_lock, flags); if (entry) create_stat_proc(entry); return entry; } int uid_stat_tcp_snd(uid_t uid, int size) { struct uid_stat *entry; activity_stats_update(); entry = find_or_create_uid_stat(uid); if (!entry) return -1; atomic_add(size, &entry->tcp_snd); return 0; } int uid_stat_tcp_rcv(uid_t uid, int size) { struct uid_stat *entry; activity_stats_update(); entry = find_or_create_uid_stat(uid); if (!entry) return -1; atomic_add(size, &entry->tcp_rcv); return 0; } static int __init uid_stat_init(void) { parent = proc_mkdir("uid_stat", NULL); if (!parent) { pr_err("uid_stat: failed to create proc entry\n"); return -1; } return 0; } __initcall(uid_stat_init);
gpl-2.0
jdkernel/mecha_aosp_2.6.35
drivers/infiniband/hw/qib/qib_init.c
743
42607
/* * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/idr.h> #include "qib.h" #include "qib_common.h" /* * min buffers we want to have per context, after driver */ #define QIB_MIN_USER_CTXT_BUFCNT 7 #define QLOGIC_IB_R_SOFTWARE_MASK 0xFF #define QLOGIC_IB_R_SOFTWARE_SHIFT 24 #define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62) /* * Number of ctxts we are configured to use (to allow for more pio * buffers per ctxt, etc.) Zero means use chip value. */ ushort qib_cfgctxts; module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO); MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use"); /* * If set, do not write to any regs if avoidable, hack to allow * check for deranged default register values. */ ushort qib_mini_init; module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO); MODULE_PARM_DESC(mini_init, "If set, do minimal diag init"); unsigned qib_n_krcv_queues; module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO); MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); /* * qib_wc_pat parameter: * 0 is WC via MTRR * 1 is WC via PAT * If PAT initialization fails, code reverts back to MTRR */ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); struct workqueue_struct *qib_wq; struct workqueue_struct *qib_cq_wq; static void verify_interrupt(unsigned long); static struct idr qib_unit_table; u32 qib_cpulist_count; unsigned long *qib_cpulist; /* set number of contexts we'll actually use */ void qib_set_ctxtcnt(struct qib_devdata *dd) { if (!qib_cfgctxts) dd->cfgctxts = dd->ctxtcnt; else if (qib_cfgctxts < dd->num_pports) dd->cfgctxts = dd->ctxtcnt; else if (qib_cfgctxts <= dd->ctxtcnt) dd->cfgctxts = qib_cfgctxts; else dd->cfgctxts = dd->ctxtcnt; } /* * Common code for creating the receive context array. */ int qib_create_ctxts(struct qib_devdata *dd) { unsigned i; int ret; /* * Allocate full ctxtcnt array, rather than just cfgctxts, because * cleanup iterates across all possible ctxts. */ dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); if (!dd->rcd) { qib_dev_err(dd, "Unable to allocate ctxtdata array, " "failing\n"); ret = -ENOMEM; goto done; } /* create (one or more) kctxt */ for (i = 0; i < dd->first_user_ctxt; ++i) { struct qib_pportdata *ppd; struct qib_ctxtdata *rcd; if (dd->skip_kctxt_mask & (1 << i)) continue; ppd = dd->pport + (i % dd->num_pports); rcd = qib_create_ctxtdata(ppd, i); if (!rcd) { qib_dev_err(dd, "Unable to allocate ctxtdata" " for Kernel ctxt, failing\n"); ret = -ENOMEM; goto done; } rcd->pkeys[0] = QIB_DEFAULT_P_KEY; rcd->seq_cnt = 1; } ret = 0; done: return ret; } /* * Common code for user and kernel context setup. */ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) { struct qib_devdata *dd = ppd->dd; struct qib_ctxtdata *rcd; rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); if (rcd) { INIT_LIST_HEAD(&rcd->qp_wait_list); rcd->ppd = ppd; rcd->dd = dd; rcd->cnt = 1; rcd->ctxt = ctxt; dd->rcd[ctxt] = rcd; dd->f_init_ctxt(rcd); /* * To avoid wasting a lot of memory, we allocate 32KB chunks * of physically contiguous memory, advance through it until * used up and then allocate more. Of course, we need * memory to store those extra pointers, now. 32KB seems to * be the most that is "safe" under memory pressure * (creating large files and then copying them over * NFS while doing lots of MPI jobs). The OOM killer can * get invoked, even though we say we can sleep and this can * cause significant system problems.... */ rcd->rcvegrbuf_size = 0x8000; rcd->rcvegrbufs_perchunk = rcd->rcvegrbuf_size / dd->rcvegrbufsize; rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + rcd->rcvegrbufs_perchunk - 1) / rcd->rcvegrbufs_perchunk; } return rcd; } /* * Common code for initializing the physical port structure. */ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, u8 hw_pidx, u8 port) { ppd->dd = dd; ppd->hw_pidx = hw_pidx; ppd->port = port; /* IB port number, not index */ spin_lock_init(&ppd->sdma_lock); spin_lock_init(&ppd->lflags_lock); init_waitqueue_head(&ppd->state_wait); init_timer(&ppd->symerr_clear_timer); ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup; ppd->symerr_clear_timer.data = (unsigned long)ppd; } static int init_pioavailregs(struct qib_devdata *dd) { int ret, pidx; u64 *status_page; dd->pioavailregs_dma = dma_alloc_coherent( &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, GFP_KERNEL); if (!dd->pioavailregs_dma) { qib_dev_err(dd, "failed to allocate PIOavail reg area " "in memory\n"); ret = -ENOMEM; goto done; } /* * We really want L2 cache aligned, but for current CPUs of * interest, they are the same. */ status_page = (u64 *) ((char *) dd->pioavailregs_dma + ((2 * L1_CACHE_BYTES + dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); /* device status comes first, for backwards compatibility */ dd->devstatusp = status_page; *status_page++ = 0; for (pidx = 0; pidx < dd->num_pports; ++pidx) { dd->pport[pidx].statusp = status_page; *status_page++ = 0; } /* * Setup buffer to hold freeze and other messages, accessible to * apps, following statusp. This is per-unit, not per port. */ dd->freezemsg = (char *) status_page; *dd->freezemsg = 0; /* length of msg buffer is "whatever is left" */ ret = (char *) status_page - (char *) dd->pioavailregs_dma; dd->freezelen = PAGE_SIZE - ret; ret = 0; done: return ret; } /** * init_shadow_tids - allocate the shadow TID array * @dd: the qlogic_ib device * * allocate the shadow TID array, so we can qib_munlock previous * entries. It may make more sense to move the pageshadow to the * ctxt data structure, so we only allocate memory for ctxts actually * in use, since we at 8k per ctxt, now. * We don't want failures here to prevent use of the driver/chip, * so no return value. */ static void init_shadow_tids(struct qib_devdata *dd) { struct page **pages; dma_addr_t *addrs; pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); if (!pages) { qib_dev_err(dd, "failed to allocate shadow page * " "array, no expected sends!\n"); goto bail; } addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); if (!addrs) { qib_dev_err(dd, "failed to allocate shadow dma handle " "array, no expected sends!\n"); goto bail_free; } memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); dd->pageshadow = pages; dd->physshadow = addrs; return; bail_free: vfree(pages); bail: dd->pageshadow = NULL; } /* * Do initialization for device that is only needed on * first detect, not on resets. */ static int loadtime_init(struct qib_devdata *dd) { int ret = 0; if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) { qib_dev_err(dd, "Driver only handles version %d, " "chip swversion is %d (%llx), failng\n", QIB_CHIP_SWVERSION, (int)(dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & QLOGIC_IB_R_SOFTWARE_MASK, (unsigned long long) dd->revision); ret = -ENOSYS; goto done; } if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK) qib_devinfo(dd->pcidev, "%s", dd->boardversion); spin_lock_init(&dd->pioavail_lock); spin_lock_init(&dd->sendctrl_lock); spin_lock_init(&dd->uctxt_lock); spin_lock_init(&dd->qib_diag_trans_lock); spin_lock_init(&dd->eep_st_lock); mutex_init(&dd->eep_lock); if (qib_mini_init) goto done; ret = init_pioavailregs(dd); init_shadow_tids(dd); qib_get_eeprom_info(dd); /* setup time (don't start yet) to verify we got interrupt */ init_timer(&dd->intrchk_timer); dd->intrchk_timer.function = verify_interrupt; dd->intrchk_timer.data = (unsigned long) dd; done: return ret; } /** * init_after_reset - re-initialize after a reset * @dd: the qlogic_ib device * * sanity check at least some of the values after reset, and * ensure no receive or transmit (explictly, in case reset * failed */ static int init_after_reset(struct qib_devdata *dd) { int i; /* * Ensure chip does no sends or receives, tail updates, or * pioavail updates while we re-initialize. This is mostly * for the driver data structures, not chip registers. */ for (i = 0; i < dd->num_pports; ++i) { /* * ctxt == -1 means "all contexts". Only really safe for * _dis_abling things, as here. */ dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS | QIB_RCVCTRL_INTRAVAIL_DIS | QIB_RCVCTRL_TAILUPD_DIS, -1); /* Redundant across ports for some, but no big deal. */ dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS | QIB_SENDCTRL_AVAIL_DIS); } return 0; } static void enable_chip(struct qib_devdata *dd) { u64 rcvmask; int i; /* * Enable PIO send, and update of PIOavail regs to memory. */ for (i = 0; i < dd->num_pports; ++i) dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB | QIB_SENDCTRL_AVAIL_ENB); /* * Enable kernel ctxts' receive and receive interrupt. * Other ctxts done as user opens and inits them. */ rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB; rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ? QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB; for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { struct qib_ctxtdata *rcd = dd->rcd[i]; if (rcd) dd->f_rcvctrl(rcd->ppd, rcvmask, i); } } static void verify_interrupt(unsigned long opaque) { struct qib_devdata *dd = (struct qib_devdata *) opaque; if (!dd) return; /* being torn down */ /* * If we don't have a lid or any interrupts, let the user know and * don't bother checking again. */ if (dd->int_counter == 0) { if (!dd->f_intr_fallback(dd)) dev_err(&dd->pcidev->dev, "No interrupts detected, " "not usable.\n"); else /* re-arm the timer to see if fallback works */ mod_timer(&dd->intrchk_timer, jiffies + HZ/2); } } static void init_piobuf_state(struct qib_devdata *dd) { int i, pidx; u32 uctxts; /* * Ensure all buffers are free, and fifos empty. Buffers * are common, so only do once for port 0. * * After enable and qib_chg_pioavailkernel so we can safely * enable pioavail updates and PIOENABLE. After this, packets * are ready and able to go out. */ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL); for (pidx = 0; pidx < dd->num_pports; ++pidx) dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH); /* * If not all sendbufs are used, add the one to each of the lower * numbered contexts. pbufsctxt and lastctxt_piobuf are * calculated in chip-specific code because it may cause some * chip-specific adjustments to be made. */ uctxts = dd->cfgctxts - dd->first_user_ctxt; dd->ctxts_extrabuf = dd->pbufsctxt ? dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0; /* * Set up the shadow copies of the piobufavail registers, * which we compare against the chip registers for now, and * the in memory DMA'ed copies of the registers. * By now pioavail updates to memory should have occurred, so * copy them into our working/shadow registers; this is in * case something went wrong with abort, but mostly to get the * initial values of the generation bit correct. */ for (i = 0; i < dd->pioavregs; i++) { __le64 tmp; tmp = dd->pioavailregs_dma[i]; /* * Don't need to worry about pioavailkernel here * because we will call qib_chg_pioavailkernel() later * in initialization, to busy out buffers as needed. */ dd->pioavailshadow[i] = le64_to_cpu(tmp); } while (i < ARRAY_SIZE(dd->pioavailshadow)) dd->pioavailshadow[i++] = 0; /* for debugging sanity */ /* after pioavailshadow is setup */ qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k, TXCHK_CHG_TYPE_KERN, NULL); dd->f_initvl15_bufs(dd); } /** * qib_init - do the actual initialization sequence on the chip * @dd: the qlogic_ib device * @reinit: reinitializing, so don't allocate new memory * * Do the actual initialization sequence on the chip. This is done * both from the init routine called from the PCI infrastructure, and * when we reset the chip, or detect that it was reset internally, * or it's administratively re-enabled. * * Memory allocation here and in called routines is only done in * the first case (reinit == 0). We have to be careful, because even * without memory allocation, we need to re-write all the chip registers * TIDs, etc. after the reset or enable has completed. */ int qib_init(struct qib_devdata *dd, int reinit) { int ret = 0, pidx, lastfail = 0; u32 portok = 0; unsigned i; struct qib_ctxtdata *rcd; struct qib_pportdata *ppd; unsigned long flags; /* Set linkstate to unknown, so we can watch for a transition. */ for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED | QIBL_LINKDOWN | QIBL_LINKINIT | QIBL_LINKV); spin_unlock_irqrestore(&ppd->lflags_lock, flags); } if (reinit) ret = init_after_reset(dd); else ret = loadtime_init(dd); if (ret) goto done; /* Bypass most chip-init, to get to device creation */ if (qib_mini_init) return 0; ret = dd->f_late_initreg(dd); if (ret) goto done; /* dd->rcd can be NULL if early init failed */ for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { /* * Set up the (kernel) rcvhdr queue and egr TIDs. If doing * re-init, the simplest way to handle this is to free * existing, and re-allocate. * Need to re-create rest of ctxt 0 ctxtdata as well. */ rcd = dd->rcd[i]; if (!rcd) continue; lastfail = qib_create_rcvhdrq(dd, rcd); if (!lastfail) lastfail = qib_setup_eagerbufs(rcd); if (lastfail) { qib_dev_err(dd, "failed to allocate kernel ctxt's " "rcvhdrq and/or egr bufs\n"); continue; } } for (pidx = 0; pidx < dd->num_pports; ++pidx) { int mtu; if (lastfail) ret = lastfail; ppd = dd->pport + pidx; mtu = ib_mtu_enum_to_int(qib_ibmtu); if (mtu == -1) { mtu = QIB_DEFAULT_MTU; qib_ibmtu = 0; /* don't leave invalid value */ } /* set max we can ever have for this driver load */ ppd->init_ibmaxlen = min(mtu > 2048 ? dd->piosize4k : dd->piosize2k, dd->rcvegrbufsize + (dd->rcvhdrentsize << 2)); /* * Have to initialize ibmaxlen, but this will normally * change immediately in qib_set_mtu(). */ ppd->ibmaxlen = ppd->init_ibmaxlen; qib_set_mtu(ppd, mtu); spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags |= QIBL_IB_LINK_DISABLED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); lastfail = dd->f_bringup_serdes(ppd); if (lastfail) { qib_devinfo(dd->pcidev, "Failed to bringup IB port %u\n", ppd->port); lastfail = -ENETDOWN; continue; } /* let link come up, and enable IBC */ spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_LINK_DISABLED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); portok++; } if (!portok) { /* none of the ports initialized */ if (!ret && lastfail) ret = lastfail; else if (!ret) ret = -ENETDOWN; /* but continue on, so we can debug cause */ } enable_chip(dd); init_piobuf_state(dd); done: if (!ret) { /* chip is OK for user apps; mark it as initialized */ for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; /* * Set status even if port serdes is not initialized * so that diags will work. */ *ppd->statusp |= QIB_STATUS_CHIP_PRESENT | QIB_STATUS_INITTED; if (!ppd->link_speed_enabled) continue; if (dd->flags & QIB_HAS_SEND_DMA) ret = qib_setup_sdma(ppd); init_timer(&ppd->hol_timer); ppd->hol_timer.function = qib_hol_event; ppd->hol_timer.data = (unsigned long)ppd; ppd->hol_state = QIB_HOL_UP; } /* now we can enable all interrupts from the chip */ dd->f_set_intr_state(dd, 1); /* * Setup to verify we get an interrupt, and fallback * to an alternate if necessary and possible. */ mod_timer(&dd->intrchk_timer, jiffies + HZ/2); /* start stats retrieval timer */ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); } /* if ret is non-zero, we probably should do some cleanup here... */ return ret; } /* * These next two routines are placeholders in case we don't have per-arch * code for controlling write combining. If explicit control of write * combining is not available, performance will probably be awful. */ int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd) { return -EOPNOTSUPP; } void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd) { } static inline struct qib_devdata *__qib_lookup(int unit) { return idr_find(&qib_unit_table, unit); } struct qib_devdata *qib_lookup(int unit) { struct qib_devdata *dd; unsigned long flags; spin_lock_irqsave(&qib_devs_lock, flags); dd = __qib_lookup(unit); spin_unlock_irqrestore(&qib_devs_lock, flags); return dd; } /* * Stop the timers during unit shutdown, or after an error late * in initialization. */ static void qib_stop_timers(struct qib_devdata *dd) { struct qib_pportdata *ppd; int pidx; if (dd->stats_timer.data) { del_timer_sync(&dd->stats_timer); dd->stats_timer.data = 0; } if (dd->intrchk_timer.data) { del_timer_sync(&dd->intrchk_timer); dd->intrchk_timer.data = 0; } for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (ppd->hol_timer.data) del_timer_sync(&ppd->hol_timer); if (ppd->led_override_timer.data) { del_timer_sync(&ppd->led_override_timer); atomic_set(&ppd->led_override_timer_active, 0); } if (ppd->symerr_clear_timer.data) del_timer_sync(&ppd->symerr_clear_timer); } } /** * qib_shutdown_device - shut down a device * @dd: the qlogic_ib device * * This is called to make the device quiet when we are about to * unload the driver, and also when the device is administratively * disabled. It does not free any data structures. * Everything it does has to be setup again by qib_init(dd, 1) */ static void qib_shutdown_device(struct qib_devdata *dd) { struct qib_pportdata *ppd; unsigned pidx; for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; spin_lock_irq(&ppd->lflags_lock); ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE | QIBL_LINKV); spin_unlock_irq(&ppd->lflags_lock); *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY); } dd->flags &= ~QIB_INITTED; /* mask interrupts, but not errors */ dd->f_set_intr_state(dd, 0); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS | QIB_RCVCTRL_CTXT_DIS | QIB_RCVCTRL_INTRAVAIL_DIS | QIB_RCVCTRL_PKEY_ENB, -1); /* * Gracefully stop all sends allowing any in progress to * trickle out first. */ dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR); } /* * Enough for anything that's going to trickle out to have actually * done so. */ udelay(20); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; dd->f_setextled(ppd, 0); /* make sure LEDs are off */ if (dd->flags & QIB_HAS_SEND_DMA) qib_teardown_sdma(ppd); dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS | QIB_SENDCTRL_SEND_DIS); /* * Clear SerdesEnable. * We can't count on interrupts since we are stopping. */ dd->f_quiet_serdes(ppd); } qib_update_eeprom_log(dd); } /** * qib_free_ctxtdata - free a context's allocated data * @dd: the qlogic_ib device * @rcd: the ctxtdata structure * * free up any allocated data for a context * This should not touch anything that would affect a simultaneous * re-allocation of context data, because it is called after qib_mutex * is released (and can be called from reinit as well). * It should never change any chip state, or global driver state. */ void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd) { if (!rcd) return; if (rcd->rcvhdrq) { dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, rcd->rcvhdrq, rcd->rcvhdrq_phys); rcd->rcvhdrq = NULL; if (rcd->rcvhdrtail_kvaddr) { dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, rcd->rcvhdrtail_kvaddr, rcd->rcvhdrqtailaddr_phys); rcd->rcvhdrtail_kvaddr = NULL; } } if (rcd->rcvegrbuf) { unsigned e; for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { void *base = rcd->rcvegrbuf[e]; size_t size = rcd->rcvegrbuf_size; dma_free_coherent(&dd->pcidev->dev, size, base, rcd->rcvegrbuf_phys[e]); } kfree(rcd->rcvegrbuf); rcd->rcvegrbuf = NULL; kfree(rcd->rcvegrbuf_phys); rcd->rcvegrbuf_phys = NULL; rcd->rcvegrbuf_chunks = 0; } kfree(rcd->tid_pg_list); vfree(rcd->user_event_mask); vfree(rcd->subctxt_uregbase); vfree(rcd->subctxt_rcvegrbuf); vfree(rcd->subctxt_rcvhdr_base); kfree(rcd); } /* * Perform a PIO buffer bandwidth write test, to verify proper system * configuration. Even when all the setup calls work, occasionally * BIOS or other issues can prevent write combining from working, or * can cause other bandwidth problems to the chip. * * This test simply writes the same buffer over and over again, and * measures close to the peak bandwidth to the chip (not testing * data bandwidth to the wire). On chips that use an address-based * trigger to send packets to the wire, this is easy. On chips that * use a count to trigger, we want to make sure that the packet doesn't * go out on the wire, or trigger flow control checks. */ static void qib_verify_pioperf(struct qib_devdata *dd) { u32 pbnum, cnt, lcnt; u32 __iomem *piobuf; u32 *addr; u64 msecs, emsecs; piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum); if (!piobuf) { qib_devinfo(dd->pcidev, "No PIObufs for checking perf, skipping\n"); return; } /* * Enough to give us a reasonable test, less than piobuf size, and * likely multiple of store buffer length. */ cnt = 1024; addr = vmalloc(cnt); if (!addr) { qib_devinfo(dd->pcidev, "Couldn't get memory for checking PIO perf," " skipping\n"); goto done; } preempt_disable(); /* we want reasonably accurate elapsed time */ msecs = 1 + jiffies_to_msecs(jiffies); for (lcnt = 0; lcnt < 10000U; lcnt++) { /* wait until we cross msec boundary */ if (jiffies_to_msecs(jiffies) >= msecs) break; udelay(1); } dd->f_set_armlaunch(dd, 0); /* * length 0, no dwords actually sent */ writeq(0, piobuf); qib_flush_wc(); /* * This is only roughly accurate, since even with preempt we * still take interrupts that could take a while. Running for * >= 5 msec seems to get us "close enough" to accurate values. */ msecs = jiffies_to_msecs(jiffies); for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) { qib_pio_copy(piobuf + 64, addr, cnt >> 2); emsecs = jiffies_to_msecs(jiffies) - msecs; } /* 1 GiB/sec, slightly over IB SDR line rate */ if (lcnt < (emsecs * 1024U)) qib_dev_err(dd, "Performance problem: bandwidth to PIO buffers is " "only %u MiB/sec\n", lcnt / (u32) emsecs); preempt_enable(); vfree(addr); done: /* disarm piobuf, so it's available again */ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum)); qib_sendbuf_done(dd, pbnum); dd->f_set_armlaunch(dd, 1); } void qib_free_devdata(struct qib_devdata *dd) { unsigned long flags; spin_lock_irqsave(&qib_devs_lock, flags); idr_remove(&qib_unit_table, dd->unit); list_del(&dd->list); spin_unlock_irqrestore(&qib_devs_lock, flags); ib_dealloc_device(&dd->verbs_dev.ibdev); } /* * Allocate our primary per-unit data structure. Must be done via verbs * allocator, because the verbs cleanup process both does cleanup and * free of the data structure. * "extra" is for chip-specific data. * * Use the idr mechanism to get a unit number for this unit. */ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) { unsigned long flags; struct qib_devdata *dd; int ret; if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { dd = ERR_PTR(-ENOMEM); goto bail; } dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); if (!dd) { dd = ERR_PTR(-ENOMEM); goto bail; } spin_lock_irqsave(&qib_devs_lock, flags); ret = idr_get_new(&qib_unit_table, dd, &dd->unit); if (ret >= 0) list_add(&dd->list, &qib_dev_list); spin_unlock_irqrestore(&qib_devs_lock, flags); if (ret < 0) { qib_early_err(&pdev->dev, "Could not allocate unit ID: error %d\n", -ret); ib_dealloc_device(&dd->verbs_dev.ibdev); dd = ERR_PTR(ret); goto bail; } if (!qib_cpulist_count) { u32 count = num_online_cpus(); qib_cpulist = kzalloc(BITS_TO_LONGS(count) * sizeof(long), GFP_KERNEL); if (qib_cpulist) qib_cpulist_count = count; else qib_early_err(&pdev->dev, "Could not alloc cpulist " "info, cpu affinity might be wrong\n"); } bail: return dd; } /* * Called from freeze mode handlers, and from PCI error * reporting code. Should be paranoid about state of * system and data structures. */ void qib_disable_after_error(struct qib_devdata *dd) { if (dd->flags & QIB_INITTED) { u32 pidx; dd->flags &= ~QIB_INITTED; if (dd->pport) for (pidx = 0; pidx < dd->num_pports; ++pidx) { struct qib_pportdata *ppd; ppd = dd->pport + pidx; if (dd->flags & QIB_PRESENT) { qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE); dd->f_setextled(ppd, 0); } *ppd->statusp &= ~QIB_STATUS_IB_READY; } } /* * Mark as having had an error for driver, and also * for /sys and status word mapped to user programs. * This marks unit as not usable, until reset. */ if (dd->devstatusp) *dd->devstatusp |= QIB_STATUS_HWERROR; } static void __devexit qib_remove_one(struct pci_dev *); static int __devinit qib_init_one(struct pci_dev *, const struct pci_device_id *); #define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: " #define PFX QIB_DRV_NAME ": " static const struct pci_device_id qib_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) }, { 0, } }; MODULE_DEVICE_TABLE(pci, qib_pci_tbl); struct pci_driver qib_driver = { .name = QIB_DRV_NAME, .probe = qib_init_one, .remove = __devexit_p(qib_remove_one), .id_table = qib_pci_tbl, .err_handler = &qib_pci_err_handler, }; /* * Do all the generic driver unit- and chip-independent memory * allocation and initialization. */ static int __init qlogic_ib_init(void) { int ret; ret = qib_dev_init(); if (ret) goto bail; /* * We create our own workqueue mainly because we want to be * able to flush it when devices are being removed. We can't * use schedule_work()/flush_scheduled_work() because both * unregister_netdev() and linkwatch_event take the rtnl lock, * so flush_scheduled_work() can deadlock during device * removal. */ qib_wq = create_workqueue("qib"); if (!qib_wq) { ret = -ENOMEM; goto bail_dev; } qib_cq_wq = create_singlethread_workqueue("qib_cq"); if (!qib_cq_wq) { ret = -ENOMEM; goto bail_wq; } /* * These must be called before the driver is registered with * the PCI subsystem. */ idr_init(&qib_unit_table); if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n"); ret = -ENOMEM; goto bail_cq_wq; } ret = pci_register_driver(&qib_driver); if (ret < 0) { printk(KERN_ERR QIB_DRV_NAME ": Unable to register driver: error %d\n", -ret); goto bail_unit; } /* not fatal if it doesn't work */ if (qib_init_qibfs()) printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n"); goto bail; /* all OK */ bail_unit: idr_destroy(&qib_unit_table); bail_cq_wq: destroy_workqueue(qib_cq_wq); bail_wq: destroy_workqueue(qib_wq); bail_dev: qib_dev_cleanup(); bail: return ret; } module_init(qlogic_ib_init); /* * Do the non-unit driver cleanup, memory free, etc. at unload. */ static void __exit qlogic_ib_cleanup(void) { int ret; ret = qib_exit_qibfs(); if (ret) printk(KERN_ERR QIB_DRV_NAME ": " "Unable to cleanup counter filesystem: " "error %d\n", -ret); pci_unregister_driver(&qib_driver); destroy_workqueue(qib_wq); destroy_workqueue(qib_cq_wq); qib_cpulist_count = 0; kfree(qib_cpulist); idr_destroy(&qib_unit_table); qib_dev_cleanup(); } module_exit(qlogic_ib_cleanup); /* this can only be called after a successful initialization */ static void cleanup_device_data(struct qib_devdata *dd) { int ctxt; int pidx; struct qib_ctxtdata **tmp; unsigned long flags; /* users can't do anything more with chip */ for (pidx = 0; pidx < dd->num_pports; ++pidx) if (dd->pport[pidx].statusp) *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; if (!qib_wc_pat) qib_disable_wc(dd); if (dd->pioavailregs_dma) { dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, (void *) dd->pioavailregs_dma, dd->pioavailregs_phys); dd->pioavailregs_dma = NULL; } if (dd->pageshadow) { struct page **tmpp = dd->pageshadow; dma_addr_t *tmpd = dd->physshadow; int i, cnt = 0; for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { int ctxt_tidbase = ctxt * dd->rcvtidcnt; int maxtid = ctxt_tidbase + dd->rcvtidcnt; for (i = ctxt_tidbase; i < maxtid; i++) { if (!tmpp[i]) continue; pci_unmap_page(dd->pcidev, tmpd[i], PAGE_SIZE, PCI_DMA_FROMDEVICE); qib_release_user_pages(&tmpp[i], 1); tmpp[i] = NULL; cnt++; } } tmpp = dd->pageshadow; dd->pageshadow = NULL; vfree(tmpp); } /* * Free any resources still in use (usually just kernel contexts) * at unload; we do for ctxtcnt, because that's what we allocate. * We acquire lock to be really paranoid that rcd isn't being * accessed from some interrupt-related code (that should not happen, * but best to be sure). */ spin_lock_irqsave(&dd->uctxt_lock, flags); tmp = dd->rcd; dd->rcd = NULL; spin_unlock_irqrestore(&dd->uctxt_lock, flags); for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { struct qib_ctxtdata *rcd = tmp[ctxt]; tmp[ctxt] = NULL; /* debugging paranoia */ qib_free_ctxtdata(dd, rcd); } kfree(tmp); kfree(dd->boardname); } /* * Clean up on unit shutdown, or error during unit load after * successful initialization. */ static void qib_postinit_cleanup(struct qib_devdata *dd) { /* * Clean up chip-specific stuff. * We check for NULL here, because it's outside * the kregbase check, and we need to call it * after the free_irq. Thus it's possible that * the function pointers were never initialized. */ if (dd->f_cleanup) dd->f_cleanup(dd); qib_pcie_ddcleanup(dd); cleanup_device_data(dd); qib_free_devdata(dd); } static int __devinit qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret, j, pidx, initfail; struct qib_devdata *dd = NULL; ret = qib_pcie_init(pdev, ent); if (ret) goto bail; /* * Do device-specific initialiation, function table setup, dd * allocation, etc. */ switch (ent->device) { case PCI_DEVICE_ID_QLOGIC_IB_6120: #ifdef CONFIG_PCI_MSI dd = qib_init_iba6120_funcs(pdev, ent); #else qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " "work if CONFIG_PCI_MSI is not enabled\n", ent->device); #endif break; case PCI_DEVICE_ID_QLOGIC_IB_7220: dd = qib_init_iba7220_funcs(pdev, ent); break; case PCI_DEVICE_ID_QLOGIC_IB_7322: dd = qib_init_iba7322_funcs(pdev, ent); break; default: qib_early_err(&pdev->dev, "Failing on unknown QLogic " "deviceid 0x%x\n", ent->device); ret = -ENODEV; } if (IS_ERR(dd)) ret = PTR_ERR(dd); if (ret) goto bail; /* error already printed */ /* do the generic initialization */ initfail = qib_init(dd, 0); ret = qib_register_ib_device(dd); /* * Now ready for use. this should be cleared whenever we * detect a reset, or initiate one. If earlier failure, * we still create devices, so diags, etc. can be used * to determine cause of problem. */ if (!qib_mini_init && !initfail && !ret) dd->flags |= QIB_INITTED; j = qib_device_create(dd); if (j) qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j); j = qibfs_add(dd); if (j) qib_dev_err(dd, "Failed filesystem setup for counters: %d\n", -j); if (qib_mini_init || initfail || ret) { qib_stop_timers(dd); flush_scheduled_work(); for (pidx = 0; pidx < dd->num_pports; ++pidx) dd->f_quiet_serdes(dd->pport + pidx); if (qib_mini_init) goto bail; if (!j) { (void) qibfs_remove(dd); qib_device_remove(dd); } if (!ret) qib_unregister_ib_device(dd); qib_postinit_cleanup(dd); if (initfail) ret = initfail; goto bail; } if (!qib_wc_pat) { ret = qib_enable_wc(dd); if (ret) { qib_dev_err(dd, "Write combining not enabled " "(err %d): performance may be poor\n", -ret); ret = 0; } } qib_verify_pioperf(dd); bail: return ret; } static void __devexit qib_remove_one(struct pci_dev *pdev) { struct qib_devdata *dd = pci_get_drvdata(pdev); int ret; /* unregister from IB core */ qib_unregister_ib_device(dd); /* * Disable the IB link, disable interrupts on the device, * clear dma engines, etc. */ if (!qib_mini_init) qib_shutdown_device(dd); qib_stop_timers(dd); /* wait until all of our (qsfp) schedule_work() calls complete */ flush_scheduled_work(); ret = qibfs_remove(dd); if (ret) qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n", -ret); qib_device_remove(dd); qib_postinit_cleanup(dd); } /** * qib_create_rcvhdrq - create a receive header queue * @dd: the qlogic_ib device * @rcd: the context data * * This must be contiguous memory (from an i/o perspective), and must be * DMA'able (which means for some systems, it will go through an IOMMU, * or be forced into a low address range). */ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) { unsigned amt; if (!rcd->rcvhdrq) { dma_addr_t phys_hdrqtail; gfp_t gfp_flags; amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * sizeof(u32), PAGE_SIZE); gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? GFP_USER : GFP_KERNEL; rcd->rcvhdrq = dma_alloc_coherent( &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, gfp_flags | __GFP_COMP); if (!rcd->rcvhdrq) { qib_dev_err(dd, "attempt to allocate %d bytes " "for ctxt %u rcvhdrq failed\n", amt, rcd->ctxt); goto bail; } if (rcd->ctxt >= dd->first_user_ctxt) { rcd->user_event_mask = vmalloc_user(PAGE_SIZE); if (!rcd->user_event_mask) goto bail_free_hdrq; } if (!(dd->flags & QIB_NODMA_RTAIL)) { rcd->rcvhdrtail_kvaddr = dma_alloc_coherent( &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, gfp_flags); if (!rcd->rcvhdrtail_kvaddr) goto bail_free; rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; } rcd->rcvhdrq_size = amt; } /* clear for security and sanity on each use */ memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size); if (rcd->rcvhdrtail_kvaddr) memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE); return 0; bail_free: qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u " "rcvhdrqtailaddr failed\n", rcd->ctxt); vfree(rcd->user_event_mask); rcd->user_event_mask = NULL; bail_free_hdrq: dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, rcd->rcvhdrq_phys); rcd->rcvhdrq = NULL; bail: return -ENOMEM; } /** * allocate eager buffers, both kernel and user contexts. * @rcd: the context we are setting up. * * Allocate the eager TID buffers and program them into hip. * They are no longer completely contiguous, we do multiple allocation * calls. Otherwise we get the OOM code involved, by asking for too * much per call, with disastrous results on some kernels. */ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) { struct qib_devdata *dd = rcd->dd; unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; size_t size; gfp_t gfp_flags; /* * GFP_USER, but without GFP_FS, so buffer cache can be * coalesced (we hope); otherwise, even at order 4, * heavy filesystem activity makes these fail, and we can * use compound pages. */ gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; egrcnt = rcd->rcvegrcnt; egroff = rcd->rcvegr_tid_base; egrsize = dd->rcvegrbufsize; chunk = rcd->rcvegrbuf_chunks; egrperchunk = rcd->rcvegrbufs_perchunk; size = rcd->rcvegrbuf_size; if (!rcd->rcvegrbuf) { rcd->rcvegrbuf = kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]), GFP_KERNEL); if (!rcd->rcvegrbuf) goto bail; } if (!rcd->rcvegrbuf_phys) { rcd->rcvegrbuf_phys = kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]), GFP_KERNEL); if (!rcd->rcvegrbuf_phys) goto bail_rcvegrbuf; } for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { if (rcd->rcvegrbuf[e]) continue; rcd->rcvegrbuf[e] = dma_alloc_coherent(&dd->pcidev->dev, size, &rcd->rcvegrbuf_phys[e], gfp_flags); if (!rcd->rcvegrbuf[e]) goto bail_rcvegrbuf_phys; } rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0]; for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) { dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; unsigned i; /* clear for security and sanity on each use */ memset(rcd->rcvegrbuf[chunk], 0, size); for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { dd->f_put_tid(dd, e + egroff + (u64 __iomem *) ((char __iomem *) dd->kregbase + dd->rcvegrbase), RCVHQ_RCV_TYPE_EAGER, pa); pa += egrsize; } cond_resched(); /* don't hog the cpu */ } return 0; bail_rcvegrbuf_phys: for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++) dma_free_coherent(&dd->pcidev->dev, size, rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]); kfree(rcd->rcvegrbuf_phys); rcd->rcvegrbuf_phys = NULL; bail_rcvegrbuf: kfree(rcd->rcvegrbuf); rcd->rcvegrbuf = NULL; bail: return -ENOMEM; } /* * Note: Changes to this routine should be mirrored * for the diagnostics routine qib_remap_ioaddr32(). * There is also related code for VL15 buffers in qib_init_7322_variables(). * The teardown code that unmaps is in qib_pcie_ddcleanup() */ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) { u64 __iomem *qib_kregbase = NULL; void __iomem *qib_piobase = NULL; u64 __iomem *qib_userbase = NULL; u64 qib_kreglen; u64 qib_pio2koffset = dd->piobufbase & 0xffffffff; u64 qib_pio4koffset = dd->piobufbase >> 32; u64 qib_pio2klen = dd->piobcnt2k * dd->palign; u64 qib_pio4klen = dd->piobcnt4k * dd->align4k; u64 qib_physaddr = dd->physaddr; u64 qib_piolen; u64 qib_userlen = 0; /* * Free the old mapping because the kernel will try to reuse the * old mapping and not create a new mapping with the * write combining attribute. */ iounmap(dd->kregbase); dd->kregbase = NULL; /* * Assumes chip address space looks like: * - kregs + sregs + cregs + uregs (in any order) * - piobufs (2K and 4K bufs in either order) * or: * - kregs + sregs + cregs (in any order) * - piobufs (2K and 4K bufs in either order) * - uregs */ if (dd->piobcnt4k == 0) { qib_kreglen = qib_pio2koffset; qib_piolen = qib_pio2klen; } else if (qib_pio2koffset < qib_pio4koffset) { qib_kreglen = qib_pio2koffset; qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen; } else { qib_kreglen = qib_pio4koffset; qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen; } qib_piolen += vl15buflen; /* Map just the configured ports (not all hw ports) */ if (dd->uregbase > qib_kreglen) qib_userlen = dd->ureg_align * dd->cfgctxts; /* Sanity checks passed, now create the new mappings */ qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen); if (!qib_kregbase) goto bail; qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen); if (!qib_piobase) goto bail_kregbase; if (qib_userlen) { qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase, qib_userlen); if (!qib_userbase) goto bail_piobase; } dd->kregbase = qib_kregbase; dd->kregend = (u64 __iomem *) ((char __iomem *) qib_kregbase + qib_kreglen); dd->piobase = qib_piobase; dd->pio2kbase = (void __iomem *) (((char __iomem *) dd->piobase) + qib_pio2koffset - qib_kreglen); if (dd->piobcnt4k) dd->pio4kbase = (void __iomem *) (((char __iomem *) dd->piobase) + qib_pio4koffset - qib_kreglen); if (qib_userlen) /* ureg will now be accessed relative to dd->userbase */ dd->userbase = qib_userbase; return 0; bail_piobase: iounmap(qib_piobase); bail_kregbase: iounmap(qib_kregbase); bail: return -ENOMEM; }
gpl-2.0
CM-CHT/android_kernel_intel_cherrytrail
drivers/input/touchscreen/wacom_i2c.c
999
6759
/* * Wacom Penabled Driver for I2C * * Copyright (c) 2011 - 2013 Tatsunosuke Tobita, Wacom. * <tobita.tatsunosuke@wacom.co.jp> * * This program is free software; you can redistribute it * and/or modify it under the terms of the GNU General * Public License as published by the Free Software * Foundation; either version of 2 of the License, * or (at your option) any later version. */ #include <linux/module.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <asm/unaligned.h> #define WACOM_CMD_QUERY0 0x04 #define WACOM_CMD_QUERY1 0x00 #define WACOM_CMD_QUERY2 0x33 #define WACOM_CMD_QUERY3 0x02 #define WACOM_CMD_THROW0 0x05 #define WACOM_CMD_THROW1 0x00 #define WACOM_QUERY_SIZE 19 struct wacom_features { int x_max; int y_max; int pressure_max; char fw_version; }; struct wacom_i2c { struct i2c_client *client; struct input_dev *input; u8 data[WACOM_QUERY_SIZE]; bool prox; int tool; }; static int wacom_query_device(struct i2c_client *client, struct wacom_features *features) { int ret; u8 cmd1[] = { WACOM_CMD_QUERY0, WACOM_CMD_QUERY1, WACOM_CMD_QUERY2, WACOM_CMD_QUERY3 }; u8 cmd2[] = { WACOM_CMD_THROW0, WACOM_CMD_THROW1 }; u8 data[WACOM_QUERY_SIZE]; struct i2c_msg msgs[] = { { .addr = client->addr, .flags = 0, .len = sizeof(cmd1), .buf = cmd1, }, { .addr = client->addr, .flags = 0, .len = sizeof(cmd2), .buf = cmd2, }, { .addr = client->addr, .flags = I2C_M_RD, .len = sizeof(data), .buf = data, }, }; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret < 0) return ret; if (ret != ARRAY_SIZE(msgs)) return -EIO; features->x_max = get_unaligned_le16(&data[3]); features->y_max = get_unaligned_le16(&data[5]); features->pressure_max = get_unaligned_le16(&data[11]); features->fw_version = get_unaligned_le16(&data[13]); dev_dbg(&client->dev, "x_max:%d, y_max:%d, pressure:%d, fw:%d\n", features->x_max, features->y_max, features->pressure_max, features->fw_version); return 0; } static irqreturn_t wacom_i2c_irq(int irq, void *dev_id) { struct wacom_i2c *wac_i2c = dev_id; struct input_dev *input = wac_i2c->input; u8 *data = wac_i2c->data; unsigned int x, y, pressure; unsigned char tsw, f1, f2, ers; int error; error = i2c_master_recv(wac_i2c->client, wac_i2c->data, sizeof(wac_i2c->data)); if (error < 0) goto out; tsw = data[3] & 0x01; ers = data[3] & 0x04; f1 = data[3] & 0x02; f2 = data[3] & 0x10; x = le16_to_cpup((__le16 *)&data[4]); y = le16_to_cpup((__le16 *)&data[6]); pressure = le16_to_cpup((__le16 *)&data[8]); if (!wac_i2c->prox) wac_i2c->tool = (data[3] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; wac_i2c->prox = data[3] & 0x20; input_report_key(input, BTN_TOUCH, tsw || ers); input_report_key(input, wac_i2c->tool, wac_i2c->prox); input_report_key(input, BTN_STYLUS, f1); input_report_key(input, BTN_STYLUS2, f2); input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_report_abs(input, ABS_PRESSURE, pressure); input_sync(input); out: return IRQ_HANDLED; } static int wacom_i2c_open(struct input_dev *dev) { struct wacom_i2c *wac_i2c = input_get_drvdata(dev); struct i2c_client *client = wac_i2c->client; enable_irq(client->irq); return 0; } static void wacom_i2c_close(struct input_dev *dev) { struct wacom_i2c *wac_i2c = input_get_drvdata(dev); struct i2c_client *client = wac_i2c->client; disable_irq(client->irq); } static int wacom_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct wacom_i2c *wac_i2c; struct input_dev *input; struct wacom_features features = { 0 }; int error; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "i2c_check_functionality error\n"); return -EIO; } error = wacom_query_device(client, &features); if (error) return error; wac_i2c = kzalloc(sizeof(*wac_i2c), GFP_KERNEL); input = input_allocate_device(); if (!wac_i2c || !input) { error = -ENOMEM; goto err_free_mem; } wac_i2c->client = client; wac_i2c->input = input; input->name = "Wacom I2C Digitizer"; input->id.bustype = BUS_I2C; input->id.vendor = 0x56a; input->id.version = features.fw_version; input->dev.parent = &client->dev; input->open = wacom_i2c_open; input->close = wacom_i2c_close; input->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOOL_PEN, input->keybit); __set_bit(BTN_TOOL_RUBBER, input->keybit); __set_bit(BTN_STYLUS, input->keybit); __set_bit(BTN_STYLUS2, input->keybit); __set_bit(BTN_TOUCH, input->keybit); input_set_abs_params(input, ABS_X, 0, features.x_max, 0, 0); input_set_abs_params(input, ABS_Y, 0, features.y_max, 0, 0); input_set_abs_params(input, ABS_PRESSURE, 0, features.pressure_max, 0, 0); input_set_drvdata(input, wac_i2c); error = request_threaded_irq(client->irq, NULL, wacom_i2c_irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "wacom_i2c", wac_i2c); if (error) { dev_err(&client->dev, "Failed to enable IRQ, error: %d\n", error); goto err_free_mem; } /* Disable the IRQ, we'll enable it in wac_i2c_open() */ disable_irq(client->irq); error = input_register_device(wac_i2c->input); if (error) { dev_err(&client->dev, "Failed to register input device, error: %d\n", error); goto err_free_irq; } i2c_set_clientdata(client, wac_i2c); return 0; err_free_irq: free_irq(client->irq, wac_i2c); err_free_mem: input_free_device(input); kfree(wac_i2c); return error; } static int wacom_i2c_remove(struct i2c_client *client) { struct wacom_i2c *wac_i2c = i2c_get_clientdata(client); free_irq(client->irq, wac_i2c); input_unregister_device(wac_i2c->input); kfree(wac_i2c); return 0; } #ifdef CONFIG_PM_SLEEP static int wacom_i2c_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); disable_irq(client->irq); return 0; } static int wacom_i2c_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); enable_irq(client->irq); return 0; } #endif static SIMPLE_DEV_PM_OPS(wacom_i2c_pm, wacom_i2c_suspend, wacom_i2c_resume); static const struct i2c_device_id wacom_i2c_id[] = { { "WAC_I2C_EMR", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, wacom_i2c_id); static struct i2c_driver wacom_i2c_driver = { .driver = { .name = "wacom_i2c", .owner = THIS_MODULE, .pm = &wacom_i2c_pm, }, .probe = wacom_i2c_probe, .remove = wacom_i2c_remove, .id_table = wacom_i2c_id, }; module_i2c_driver(wacom_i2c_driver); MODULE_AUTHOR("Tatsunosuke Tobita <tobita.tatsunosuke@wacom.co.jp>"); MODULE_DESCRIPTION("WACOM EMR I2C Driver"); MODULE_LICENSE("GPL");
gpl-2.0
Vachounet/acer-kernel-msm7x30
drivers/staging/otus/hal/hpusb.c
1511
47639
/* * Copyright (c) 2000-2005 ZyDAS Technology Corporation * Copyright (c) 2007-2008 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* */ /* Module Name : ud.c */ /* */ /* Abstract */ /* This module contains USB descriptor functions. */ /* */ /* NOTES */ /* None */ /* */ /************************************************************************/ #include "../80211core/cprecomp.h" #include "hpani.h" #include "hpusb.h" extern void zfwUsbCmd(zdev_t* dev, u8_t endpt, u32_t* cmd, u16_t cmdLen); extern void zfIdlRsp(zdev_t* dev, u32_t* rsp, u16_t rspLen); extern u16_t zfDelayWriteInternalReg(zdev_t* dev, u32_t addr, u32_t val); extern u16_t zfFlushDelayWrite(zdev_t* dev); #define USB_ENDPOINT_TX_INDEX 1 #define USB_ENDPOINT_RX_INDEX 2 #define USB_ENDPOINT_INT_INDEX 3 #define USB_ENDPOINT_CMD_INDEX 4 void zfIdlCmd(zdev_t* dev, u32_t* cmd, u16_t cmdLen) { #if ZM_SW_LOOP_BACK != 1 zfwUsbCmd(dev, USB_ENDPOINT_CMD_INDEX, cmd, cmdLen); #endif return; } /* zfAdjustCtrlSetting: fit OUTS format */ /* convert MIMO2 to OUTS */ void zfAdjustCtrlSetting(zdev_t* dev, u16_t* header, zbuf_t* buf) { /* MIMO2 => OUTS FB-50 */ /* length not change, only modify format */ u32_t oldMT; u32_t oldMCS; u32_t phyCtrl; u32_t oldPhyCtrl; u16_t tpc = 0; struct zsHpPriv* hpPriv; zmw_get_wlan_dev(dev); hpPriv=wd->hpPrivate; /* mm */ if (header == NULL) { oldPhyCtrl = zmw_buf_readh(dev, buf, 4) | ((u32_t)zmw_buf_readh(dev, buf, 6) << 16); } else { oldPhyCtrl = header[2] | ((u32_t)header[3] <<16); } phyCtrl = 0; /* MT : Bit[1~0] */ oldMT = oldPhyCtrl&0x3; phyCtrl |= oldMT; if ( oldMT == 0x3 ) /* DL-OFDM (Duplicate Legacy OFDM) */ phyCtrl |= 0x1; /* PT : Bit[2] HT PT: 0 Mixed mode 1 Green field */ phyCtrl |= (oldPhyCtrl&0x4); /* Bandwidth control : Bit[4~3] */ if ( oldPhyCtrl&0x800000 ) /* Bit23 : 40M */ { #if 0 if (oldMT == 0x3) /* DL-OFDM */ phyCtrl |= (0x3<<3); /* 40M duplicate */ else phyCtrl |= (0x2<<3); /* 40M shared */ #else if (oldMT == 0x2 && ((struct zsHpPriv*)wd->hpPrivate)->hwBw40) { phyCtrl |= (0x2<<3); /* 40M shared */ } #endif } else { oldPhyCtrl &= ~0x80000000; } /* MCS : Bit[24~18] */ oldMCS = (oldPhyCtrl&0x7f0000)>>16; /* Bit[22~16] */ phyCtrl |= (oldMCS<<18); /* Short GI : Bit[31]*/ phyCtrl |= (oldPhyCtrl&0x80000000); /* AM : Antenna mask */ //if ((oldMT == 2) && (oldMCS > 7)) if (hpPriv->halCapability & ZM_HP_CAP_11N_ONE_TX_STREAM) { phyCtrl |= (0x1<<15); } else { /* HT Tx 2 chain */ /* OFDM 6M/9M/12M/18M/24M Tx 2 chain */ /* OFDM 36M/48M/54M/ Tx 1 chain */ /* CCK Tx 2 chain */ if ((oldMT == 2) || (oldMT == 3)) { phyCtrl |= (0x5<<15); } else if (oldMT == 1) { if ((oldMCS == 0xb) || (oldMCS == 0xf) || (oldMCS == 0xa) || (oldMCS == 0xe) || (oldMCS == 0x9)) //6M/9M/12M/18M/24M { phyCtrl |= (0x5<<15); } else { phyCtrl |= (0x1<<15); } } else //(oldMT==0) { phyCtrl |= (0x5<<15); } } //else // phyCtrl |= (0x1<<15); /* TPC */ /* TODO : accelerating these code */ if (hpPriv->hwFrequency < 3000) { if (oldMT == 0) { /* CCK */ tpc = (hpPriv->tPow2xCck[oldMCS]&0x3f); } else if (oldMT == 1) { /* OFDM */ if (oldMCS == 0xc) { tpc = (hpPriv->tPow2x2g[3]&0x3f); } else if (oldMCS == 0x8) { tpc = (hpPriv->tPow2x2g[2]&0x3f); } else if (oldMCS == 0xd) { tpc = (hpPriv->tPow2x2g[1]&0x3f); } else if (oldMCS == 0x9) { tpc = ((hpPriv->tPow2x2g[0]-hpPriv->tPow2x2g24HeavyClipOffset)&0x3f); } else { tpc = (hpPriv->tPow2x2g[0]&0x3f); } } else if (oldMT == 2) { if ( oldPhyCtrl&0x800000 ) /* Bit23 : 40M */ { /* HT 40 */ tpc = (hpPriv->tPow2x2gHt40[oldMCS&0x7]&0x3f); } else { /* HT 20 */ tpc = (hpPriv->tPow2x2gHt20[oldMCS&0x7]&0x3f); } } } else //5GHz { if (oldMT == 1) { /* OFDM */ if (oldMCS == 0xc) { tpc = (hpPriv->tPow2x5g[3]&0x3f); } else if (oldMCS == 0x8) { tpc = (hpPriv->tPow2x5g[2]&0x3f); } else if (oldMCS == 0xd) { tpc = (hpPriv->tPow2x5g[1]&0x3f); } else { tpc = (hpPriv->tPow2x5g[0]&0x3f); } } else if (oldMT == 2) { if ( oldPhyCtrl&0x800000 ) /* Bit23 : 40M */ { /* HT 40 */ tpc = (hpPriv->tPow2x5gHt40[oldMCS&0x7]&0x3f); } else { /* HT 20 */ tpc = (hpPriv->tPow2x5gHt20[oldMCS&0x7]&0x3f); } } } /* Tx power adjust for HT40 */ /* HT40 +1dBm */ if ((oldMT==2) && (oldPhyCtrl&0x800000) ) { tpc += 2; } tpc &= 0x3f; /* Evl force tx TPC */ if(wd->forceTxTPC) { tpc = (u16_t)(wd->forceTxTPC & 0x3f); } if (hpPriv->hwFrequency < 3000) { wd->maxTxPower2 &= 0x3f; tpc = (tpc > wd->maxTxPower2)? wd->maxTxPower2 : tpc; } else { wd->maxTxPower5 &= 0x3f; tpc = (tpc > wd->maxTxPower5)? wd->maxTxPower5 : tpc; } #define ZM_MIN_TPC 5 #define ZM_TPC_OFFSET 5 #define ZM_SIGNAL_THRESHOLD 56 if ((wd->sta.bScheduleScan == FALSE) && (wd->sta.bChannelScan == FALSE)) { if (( wd->wlanMode == ZM_MODE_INFRASTRUCTURE ) && (zfStaIsConnected(dev)) && (wd->SignalStrength > ZM_SIGNAL_THRESHOLD)) { if (tpc > ((ZM_MIN_TPC+ZM_TPC_OFFSET)*2)) { tpc -= (ZM_TPC_OFFSET*2); } else if (tpc > (ZM_MIN_TPC*2)) { tpc = (ZM_MIN_TPC*2); } } } #undef ZM_MIN_TPC #undef ZM_TPC_OFFSET #undef ZM_SIGNAL_THRESHOLD #ifndef ZM_OTUS_LINUX_PHASE_2 phyCtrl |= (tpc & 0x3f) << 9; #endif /* Set bits[8:6]BF-MCS for heavy clip */ if ((phyCtrl&0x3) == 2) { phyCtrl |= ((phyCtrl >> 12) & 0x1c0); } /* PHY control */ if (header == NULL) { zmw_buf_writeh(dev, buf, 4, (u16_t) (phyCtrl&0xffff)); zmw_buf_writeh(dev, buf, 6, (u16_t) (phyCtrl>>16)); } else { //PHY control L header[2] = (u16_t) (phyCtrl&0xffff); //PHY control H header[3] = (u16_t) (phyCtrl>>16); } zm_msg2_tx(ZM_LV_2, "old phy ctrl = ", oldPhyCtrl); zm_msg2_tx(ZM_LV_2, "new phy ctrl = ", phyCtrl); //DbgPrint("old phy ctrl =%08x \n", oldPhyCtrl); //DbgPrint("new phy ctrl =%08x \n", phyCtrl); } #define EXTRA_INFO_LEN 24 //RSSI(7) + EVM(12) + PHY(1) + MACStatus(4) u16_t zfHpSend(zdev_t* dev, u16_t* header, u16_t headerLen, u16_t* snap, u16_t snapLen, u16_t* tail, u16_t tailLen, zbuf_t* buf, u16_t offset, u16_t bufType, u8_t ac, u8_t keyIdx) { #if ZM_SW_LOOP_BACK == 1 zbuf_t *rxbuf; u8_t *puRxBuf; u8_t *pHdr; u8_t *psnap; u16_t plcplen = 12; u16_t i; u16_t swlpOffset; #endif /* #if ZM_SW_LOOP_BACK == 1 */ struct zsHpPriv* hpPriv; zmw_get_wlan_dev(dev); hpPriv=wd->hpPrivate; zm_msg1_tx(ZM_LV_1, "zfHpSend(), len = ", 12 + headerLen-8 + snapLen + zfwBufGetSize(dev, buf) + 4 + 8); /* Adjust ctrl setting : 6N14 yjsung */ zfAdjustCtrlSetting(dev, header, buf); #if ZM_SW_LOOP_BACK != 1 hpPriv->usbSendBytes += zfwBufGetSize(dev, buf); hpPriv->usbAcSendBytes[ac&0x3] += zfwBufGetSize(dev, buf); /* Submit USB Out Urb */ zfwUsbSend(dev, USB_ENDPOINT_TX_INDEX, (u8_t *)header, headerLen, (u8_t *)snap, snapLen, (u8_t *)tail, tailLen, buf, offset); #endif #if ZM_SW_LOOP_BACK == 1 rxbuf = zfwBufAllocate(dev, plcplen + headerLen-8 + snapLen + (zfwBufGetSize(dev, buf)-offset) + 4 + EXTRA_INFO_LEN); pHdr = (u8_t *) header+8; psnap = (u8_t *) snap; zmw_enter_critical_section(dev); /* software loop back */ /* Copy WLAN header and packet buffer */ swlpOffset = plcplen; for(i = 0; i < headerLen-8; i++) { zmw_rx_buf_writeb(dev, rxbuf, swlpOffset+i, pHdr[i]); } swlpOffset += headerLen-8; /* Copy SNAP header */ for(i = 0; i < snapLen; i++) { zmw_rx_buf_writeb(dev, rxbuf, swlpOffset+i, psnap[i]); } swlpOffset += snapLen; /* Copy body from tx buf to rxbuf */ for(i = 0; i < (zfwBufGetSize(dev, buf)-offset); i++) { u8_t value = zmw_rx_buf_readb(dev, buf, i+offset); zmw_rx_buf_writeb(dev, rxbuf, swlpOffset+i, value); } /* total length = PLCP + MacHeader + Payload + FCS + RXstatus */ /* 12 + headerLen-8 + snapLen + buf length + 4 + 8 */ zfwSetBufSetSize(dev, rxbuf, swlpOffset + (zfwBufGetSize(dev, buf)-offset) + 4 + EXTRA_INFO_LEN ); zmw_leave_critical_section(dev); zfwBufFree(dev, buf, 0); //zfwDumpBuf(dev, rxbuf); //------------------------------------------------- //zfCoreRecv(dev, rxbuf); #endif /* #if ZM_SW_LOOP_BACK */ return ZM_SUCCESS; } /* Report moniter Hal rx information about rssi, evm, bandwidth, SG etc */ void zfHpQueryMonHalRxInfo(zdev_t* dev, u8_t *monHalRxInfo) { zmw_get_wlan_dev(dev); zfMemoryCopy(monHalRxInfo, (u8_t*)&(((struct zsHpPriv*)wd->hpPrivate)->halRxInfo), sizeof(struct zsHalRxInfo)); } u8_t zfIsDataFrame(zdev_t* dev, zbuf_t* buf) { u8_t frameType; u8_t mpduInd; mpduInd = zmw_rx_buf_readb(dev, buf, zfwBufGetSize(dev, buf)-1); /* sinlge or First */ if ((mpduInd & 0x30) == 0x00 || (mpduInd & 0x30) == 0x20) { frameType = zmw_rx_buf_readb(dev, buf, 12); } else { frameType = zmw_rx_buf_readb(dev, buf, 0); } if((frameType & 0xf) == ZM_WLAN_DATA_FRAME) return 1; else return 0; } u32_t zfcConvertRateOFDM(zdev_t* dev, zbuf_t* buf) { // What's the default value?? u32_t MCS = 0; switch(zmw_rx_buf_readb(dev, buf, 0)& 0xf) { case 0xb: MCS = 0x4; break; case 0xf: MCS = 0x5; break; case 0xa: MCS = 0x6; break; case 0xe: MCS = 0x7; break; case 0x9: MCS = 0x8; break; case 0xd: MCS = 0x9; break; case 0x8: MCS = 0xa; break; case 0xc: MCS = 0xb; break; } return MCS; } u16_t zfHpGetPayloadLen(zdev_t* dev, zbuf_t* buf, u16_t len, u16_t plcpHdrLen, u32_t *rxMT, u32_t *rxMCS, u32_t *rxBW, u32_t *rxSG ) { u8_t modulation,mpduInd; u16_t low, high, msb; s16_t payloadLen = 0; zmw_get_wlan_dev(dev); mpduInd = zmw_rx_buf_readb(dev, buf, len-1); modulation = zmw_rx_buf_readb(dev, buf, (len-1)) & 0x3; *rxMT = modulation; //zm_debug_msg1(" modulation= ", modulation); switch (modulation) { case 0: /* CCK Mode */ low = zmw_rx_buf_readb(dev, buf, 2); high = zmw_rx_buf_readb(dev, buf, 3); payloadLen = (low | high << 8) - 4; if (wd->enableHALDbgInfo) { *rxMCS = zmw_rx_buf_readb(dev, buf, 0); *rxBW = 0; *rxSG = 0; } break; case 1: /* Legacy-OFDM mode */ low = zmw_rx_buf_readb(dev, buf, 0) >> 5; high = zmw_rx_buf_readb(dev, buf, 1); msb = zmw_rx_buf_readb(dev, buf, 2) & 0x1; payloadLen = (low | (high << 3) | (msb << 11)) - 4; if (wd->enableHALDbgInfo) { *rxMCS = zfcConvertRateOFDM(dev, buf); *rxBW = 0; *rxSG = 0; } break; case 2: /* HT OFDM mode */ //zm_debug_msg1("aggregation= ", (zmw_rx_buf_readb(dev, buf, 6) >> 3) &0x1 ); if ((mpduInd & 0x30) == 0x00 || (mpduInd & 0x30) == 0x10) //single or last mpdu payloadLen = len - 24 - 4 - plcpHdrLen; // - rxStatus - fcs else { payloadLen = len - 4 - 4 - plcpHdrLen; // - rxStatus - fcs //zm_debug_msg1("first or middle mpdu, plcpHdrLen= ", plcpHdrLen); } if (wd->enableHALDbgInfo) { *rxMCS = zmw_rx_buf_readb(dev, buf, 3) & 0x7f; *rxBW = (zmw_rx_buf_readb(dev, buf, 3) >> 7) & 0x1; *rxSG = (zmw_rx_buf_readb(dev, buf, 6) >> 7) & 0x1; } break; default: break; } /* return the payload length - FCS */ if (payloadLen < 0) payloadLen = 0; return payloadLen; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfiUsbRecv */ /* Callback function for USB IN Transfer. */ /* */ /* INPUTS */ /* dev: device pointer */ /* */ /* OUTPUTS */ /* None */ /* */ /* AUTHOR */ /* Yuan-Gu Wei ZyDAS Technology Corporation 2005.10 */ /* */ /************************************************************************/ #define ZM_INT_USE_EP2 1 #define ZM_INT_USE_EP2_HEADER_SIZE 12 #if ZM_INT_USE_EP2 == 1 void zfiUsbRegIn(zdev_t* dev, u32_t* rsp, u16_t rspLen); #endif #ifdef ZM_OTUS_RX_STREAM_MODE void zfiUsbRecvPerPkt(zdev_t *dev, zbuf_t *buf) #else void zfiUsbRecv(zdev_t *dev, zbuf_t *buf) #endif { #if ZM_FW_LOOP_BACK != 1 u8_t mpduInd; u16_t plcpHdrLen; u16_t crcPlusRxStatusLen; u16_t len, payloadLen=0; u16_t i; //CWYang(+) struct zsAdditionInfo addInfo; u32_t rxMT; u32_t rxMCS; u32_t rxBW; u32_t rxSG; struct zsHpPriv* hpPriv; zmw_get_wlan_dev(dev); hpPriv=wd->hpPrivate; //zm_msg0_rx(ZM_LV_0, "zfiUsbRecv()"); #if ZM_INT_USE_EP2 == 1 for (i=0; i<(ZM_INT_USE_EP2_HEADER_SIZE>>1); i++) { if (zmw_rx_buf_readh(dev, buf, i*2) != 0xffff) break; } if (i==(ZM_INT_USE_EP2_HEADER_SIZE>>1)) { u32_t rsp[ZM_USB_MAX_EPINT_BUFFER/4]; u16_t rspLen; u32_t rspi; u8_t* pdst = (u8_t*)rsp; /* Interrupt Rsp */ rspLen = (u16_t) zfwBufGetSize(dev, buf)-ZM_INT_USE_EP2_HEADER_SIZE; if (rspLen > 60) { zm_debug_msg1("Get error len by EP2 = \n", rspLen); /* free USB buf */ zfwBufFree(dev, buf, 0); return; } for (rspi=0; rspi<rspLen; rspi++) { *pdst = zmw_rx_buf_readb(dev, buf, rspi+ZM_INT_USE_EP2_HEADER_SIZE); pdst++; } //if (adapter->zfcbUsbRegIn) // adapter->zfcbUsbRegIn(adapter, rsp, rspLen); zfiUsbRegIn(dev, rsp, rspLen); /* free USB buf */ zfwBufFree(dev, buf, 0); return; } #endif /* end of #if ZM_INT_USE_EP2 == 1 */ ZM_PERFORMANCE_RX_MPDU(dev, buf); if (wd->swSniffer) { /* airopeek: Report everything up */ if (wd->zfcbRecv80211 != NULL) { wd->zfcbRecv80211(dev, buf, NULL); } } /* Read the last byte */ len = zfwBufGetSize(dev, buf); mpduInd = zmw_rx_buf_readb(dev, buf, len-1); /* First MPDU */ if((mpduInd & 0x30) == 0x20) { u16_t duration; if (zmw_rx_buf_readb(dev, buf, 36) == 0) //AC = BE { duration = zmw_rx_buf_readh(dev, buf, 14); if (duration > hpPriv->aggMaxDurationBE) { hpPriv->aggMaxDurationBE = duration; } else { if (hpPriv->aggMaxDurationBE > 10) { hpPriv->aggMaxDurationBE--; } } //DbgPrint("aggMaxDurationBE=%d", hpPriv->aggMaxDurationBE); } } #if 1 /* First MPDU or Single MPDU */ if(((mpduInd & 0x30) == 0x00) || ((mpduInd & 0x30) == 0x20)) //if ((mpduInd & 0x10) == 0x00) { plcpHdrLen = 12; // PLCP header length } else { if (zmw_rx_buf_readh(dev, buf, 4) == wd->macAddr[0] && zmw_rx_buf_readh(dev, buf, 6) == wd->macAddr[1] && zmw_rx_buf_readh(dev, buf, 8) == wd->macAddr[2]) { plcpHdrLen = 0; } else if (zmw_rx_buf_readh(dev, buf, 16) == wd->macAddr[0] && zmw_rx_buf_readh(dev, buf, 18) == wd->macAddr[1] && zmw_rx_buf_readh(dev, buf, 20) == wd->macAddr[2]){ plcpHdrLen = 12; } else { plcpHdrLen = 0; } } /* Last MPDU or Single MPDU */ if ((mpduInd & 0x30) == 0x00 || (mpduInd & 0x30) == 0x10) { crcPlusRxStatusLen = EXTRA_INFO_LEN + 4; // Extra bytes + FCS } else { crcPlusRxStatusLen = 4 + 4; // Extra 4 bytes + FCS } #else plcpHdrLen = 12; crcPlusRxStatusLen = EXTRA_INFO_LEN + 4; // Extra bytes + FCS #endif if (len < (plcpHdrLen+10+crcPlusRxStatusLen)) { zm_msg1_rx(ZM_LV_0, "Invalid Rx length=", len); //zfwDumpBuf(dev, buf); zfwBufFree(dev, buf, 0); return; } /* display RSSI combined */ /* * ¢z¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢{ * ¢x PLCP Header ¢x MPDU ¢x RSSI ¢x EVM ¢x PHY Err ¢x MAC Status ¢x * ¢u¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢t * ¢x 12 ¢x n ¢x 7 ¢x 12 ¢x 1 ¢x 4 ¢x * ¢|¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢} * RSSI filed (From BB and MAC just pass them to host) * Byte1: RSSI for antenna 0. * Byte2: RSSI for antenna 1. * Byte3: RSSI for antenna 2. * Byte4: RSSI for antenna 0 extension. * Byte5: RSSI for antenna 1 extension. * Byte6: RSSI for antenna 2 extension. * Byte7: RSSI for antenna combined. */ //zm_debug_msg1(" recv RSSI = ", zmw_rx_buf_readb(dev, buf, (len-1)-17)); payloadLen = zfHpGetPayloadLen(dev, buf, len, plcpHdrLen, &rxMT, &rxMCS, &rxBW, &rxSG); /* Hal Rx info */ /* First MPDU or Single MPDU */ if(((mpduInd & 0x30) == 0x00) || ((mpduInd & 0x30) == 0x20)) { if (wd->enableHALDbgInfo && zfIsDataFrame(dev, buf)) { ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxDataMT = rxMT; ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxDataMCS = rxMCS; ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxDataBW = rxBW; ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxDataSG = rxSG; } } if ((plcpHdrLen + payloadLen) > len) { zm_msg1_rx(ZM_LV_0, "Invalid payload length=", payloadLen); zfwBufFree(dev, buf, 0); return; } //Store Rx Tail Infomation before Remove--CWYang(+) #if 0 for (i = 0; i < crcPlusRxStatusLen-4; i++) { addInfo.Tail.Byte[i] = zmw_rx_buf_readb(dev, buf, len - crcPlusRxStatusLen + 4 + i); } #else /* * Brief format of OUTS chip * ¢z¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢{ * ¢x PLCP Header ¢x MPDU ¢x RSSI ¢x EVM ¢x PHY Err ¢x MAC Status ¢x * ¢u¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢t * ¢x 12 ¢x n ¢x 7 ¢x 12 ¢x 1 ¢x 4 ¢x * ¢|¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢} * RSSI: * Byte 1 antenna 0 * Byte 2 antenna 1 * Byte 3 antenna 2 * Byte 4 antenna 0 extension * Byte 5 antenna 1 extension * Byte 6 antenna 2 extension * Byte 7 antenna combined * EVM: * Byte 1 Stream 0 pilot 0 * Byte 2 Stream 0 pilot 1 * Byte 3 Stream 0 pilot 2 * Byte 4 Stream 0 pilot 3 * Byte 5 Stream 0 pilot 4 * Byte 6 Stream 0 pilot 5 * Byte 7 Stream 1 pilot 0 * Byte 8 Stream 1 pilot 1 * Byte 9 Stream 1 pilot 2 * Byte 10 Stream 1 pilot 3 * Byte 11 Stream 1 pilot 4 * Byte 12 Stream 1 pilot 5 */ /* Fill the Tail information */ /* Last MPDU or Single MPDU */ if ((mpduInd & 0x30) == 0x00 || (mpduInd & 0x30) == 0x10) { #define ZM_RX_RSSI_COMPENSATION 27 u8_t zm_rx_rssi_compensation = ZM_RX_RSSI_COMPENSATION; /* RSSI information */ addInfo.Tail.Data.SignalStrength1 = zmw_rx_buf_readb(dev, buf, (len-1) - 17) + ((hpPriv->rxStrongRSSI == 1)?zm_rx_rssi_compensation:0); #undef ZM_RX_RSSI_COMPENSATION /* EVM */ /* TODO: for RD/BB debug message */ /* save current rx hw infomration, report to DrvCore/Application */ if (wd->enableHALDbgInfo && zfIsDataFrame(dev, buf)) { u8_t trssi; for (i=0; i<7; i++) { trssi = zmw_rx_buf_readb(dev, buf, (len-1) - 23 + i); if (trssi&0x80) { trssi = ((~((u8_t)trssi) & 0x7f) + 1) & 0x7f; } ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRSSI[i] = trssi; } if (rxMT==2) { //if (rxBW) //{ for (i=0; i<12; i++) ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[i] = zmw_rx_buf_readb(dev, buf, (len-1) - 16 + i); //} //else //{ // for (i=0; i<4; i++) // ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[i] = // zmw_rx_buf_readb(dev, buf, (len-1) - 16 + i); //} } #if 0 /* print */ zm_dbg(("MT(%d) MCS(%d) BW(%d) SG(%d) RSSI:%d,%d,%d,%d,%d,%d,%d EVM:(%d,%d,%d,%d,%d,%d)(%d,%d,%d,%d,%d,%d)\n", rxMT, rxMCS, rxBW, rxSG, ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRSSI[0], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRSSI[1], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRSSI[2], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRSSI[3], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRSSI[4], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRSSI[5], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRSSI[6], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[0], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[1], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[2], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[3], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[4], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[5], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[6], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[7], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[8], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[9], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[10], ((struct zsHpPriv*)wd->hpPrivate)->halRxInfo.currentRxEVM[11] )); #endif } /* if (wd->enableHALDbgInfo && zfIsDataFrame(dev, buf)) */ } else { /* Mid or First aggregate frame without phy rx information */ addInfo.Tail.Data.SignalStrength1 = 0; } addInfo.Tail.Data.SignalStrength2 = 0; addInfo.Tail.Data.SignalStrength3 = 0; addInfo.Tail.Data.SignalQuality = 0; addInfo.Tail.Data.SAIndex = zmw_rx_buf_readb(dev, buf, len - 4); addInfo.Tail.Data.DAIndex = zmw_rx_buf_readb(dev, buf, len - 3); addInfo.Tail.Data.ErrorIndication = zmw_rx_buf_readb(dev, buf, len - 2); addInfo.Tail.Data.RxMacStatus = zmw_rx_buf_readb(dev, buf, len - 1); #endif /* Remove CRC and Rx Status */ zfwBufSetSize(dev, buf, (len-crcPlusRxStatusLen)); //zfwBufSetSize(dev, buf, payloadLen + plcpHdrLen); /* payloadLen + PLCP 12 - FCS 4*/ //Store PLCP Header Infomation before Remove--CWYang(+) if (plcpHdrLen != 0) { for (i = 0; i < plcpHdrLen; i++) { addInfo.PlcpHeader[i] = zmw_rx_buf_readb(dev, buf, i); } } else { addInfo.PlcpHeader[0] = 0; } /* Remove PLCP header */ zfwBufRemoveHead(dev, buf, plcpHdrLen); /* handle 802.11 frame */ zfCoreRecv(dev, buf, &addInfo); #else /* Firmware loopback: Rx frame = Tx frame */ /* convert Rx frame to fit receive frame format */ zbuf_t *new_buf; u8_t ctrl_offset = 8; u8_t PLCP_Len = 12; u8_t data; u8_t i; /* Tx: | ctrl_setting | Mac hdr | data | */ /* 8 24 x */ /* Rx: | PLCP | Mac hdr | data | FCS | Rxstatus | */ /* 12 24 x 4 8 */ /* new allocate a rx format size buf */ new_buf = zfwBufAllocate(dev, zfwBufGetSize(dev, buf)-8+12+4+EXTRA_INFO_LEN); for (i=0; i<zfwBufGetSize(dev, buf)-ctrl_offset; i++) { data = zmw_rx_buf_readb(dev, buf, ctrl_offset+i); zmw_rx_buf_writeb(dev, new_buf, PLCP_Len+i, data); } zfwBufSetSize(dev, new_buf, zfwBufGetSize(dev, buf)-8+12+4+EXTRA_INFO_LEN); zfwBufFree(dev, buf, 0); /* receive the new_buf */ //zfCoreRecv(dev, new_buf); #endif } #ifdef ZM_OTUS_RX_STREAM_MODE void zfiUsbRecv(zdev_t *dev, zbuf_t *buf) { u16_t index = 0; u16_t chkIdx; u32_t status = 0; u16_t ii; zbuf_t *newBuf; zbuf_t *rxBufPool[8]; u16_t rxBufPoolIndex = 0; struct zsHpPriv *halPriv; u8_t *srcBufPtr; u32_t bufferLength; u16_t usbRxRemainLen; u16_t usbRxPktLen; zmw_get_wlan_dev(dev); halPriv = (struct zsHpPriv*)wd->hpPrivate; srcBufPtr = zmw_buf_get_buffer(dev, buf); bufferLength = zfwBufGetSize(dev, buf); /* Zero Length Transfer */ if (!bufferLength) { zfwBufFree(dev, buf, 0); return; } usbRxRemainLen = halPriv->usbRxRemainLen; usbRxPktLen = halPriv->usbRxTransferLen; /* Check whether there is any data in the last transfer */ if (usbRxRemainLen != 0 ) { zbuf_t *remainBufPtr = halPriv->remainBuf; u8_t* BufPtr = NULL; if ( remainBufPtr != NULL ) { BufPtr = zmw_buf_get_buffer(dev, remainBufPtr); } index = usbRxRemainLen; usbRxRemainLen -= halPriv->usbRxPadLen; /* Copy data */ if ( BufPtr != NULL ) { zfwMemoryCopy(&(BufPtr[usbRxPktLen]), srcBufPtr, usbRxRemainLen); } usbRxPktLen += usbRxRemainLen; halPriv->usbRxRemainLen = 0; if ( remainBufPtr != NULL ) { zfwBufSetSize(dev, remainBufPtr, usbRxPktLen); rxBufPool[rxBufPoolIndex++] = remainBufPtr; } halPriv->remainBuf = NULL; } //zm_debug_msg1("length: %d\n", (int)pUsbRxTransfer->pRxUrb->UrbBulkOrInterruptTransfer.TransferBufferLength); bufferLength = zfwBufGetSize(dev, buf); //printk("bufferLength %d\n", bufferLength); while(index < bufferLength) { u16_t pktLen; u16_t pktTag; //u8_t *ptr = (u8_t*)((struct zsBuffer*)pUsbRxTransfer->buf)->data; u8_t *ptr = srcBufPtr; /* Retrieve packet length and tag */ pktLen = ptr[index] + (ptr[index+1] << 8); pktTag = ptr[index+2] + (ptr[index+3] << 8); if (pktTag == ZM_USB_STREAM_MODE_TAG) { u16_t padLen; zm_assert(pktLen < ZM_WLAN_MAX_RX_SIZE); //printk("Get a packet, pktLen: 0x%04x\n", pktLen); #if 0 /* Dump data */ for (ii = index; ii < pkt_len+4;) { DbgPrint("0x%02x ", (zmw_rx_buf_readb(adapter, pUsbRxTransfer->buf, ii) & 0xff)); if ((++ii % 16) == 0) DbgPrint("\n"); } DbgPrint("\n"); #endif /* Calcuate the padding length, in the current design, the length should be padded to 4 byte boundray. */ padLen = ZM_USB_STREAM_MODE_TAG_LEN - (pktLen & 0x3); if(padLen == ZM_USB_STREAM_MODE_TAG_LEN) padLen = 0; chkIdx = index; index = index + ZM_USB_STREAM_MODE_TAG_LEN + pktLen + padLen; if (chkIdx > ZM_MAX_USB_IN_TRANSFER_SIZE) { zm_debug_msg1("chkIdx is too large, chkIdx: %d\n", chkIdx); zm_assert(0); status = 1; break; } if (index > ZM_MAX_USB_IN_TRANSFER_SIZE) { //struct zsBuffer* BufPtr; //struct zsBuffer* UsbBufPtr; u8_t *BufPtr; u8_t *UsbBufPtr; halPriv->usbRxRemainLen = index - ZM_MAX_USB_IN_TRANSFER_SIZE; // - padLen; halPriv->usbRxTransferLen = ZM_MAX_USB_IN_TRANSFER_SIZE - chkIdx - ZM_USB_STREAM_MODE_TAG_LEN; halPriv->usbRxPadLen = padLen; //check_index = index; if (halPriv->usbRxTransferLen > ZM_WLAN_MAX_RX_SIZE) { zm_debug_msg1("check_len is too large, chk_len: %d\n", halPriv->usbRxTransferLen); status = 1; break; } /* Allocate a skb buffer */ newBuf = zfwBufAllocate(dev, ZM_WLAN_MAX_RX_SIZE); if ( newBuf != NULL ) { BufPtr = zmw_buf_get_buffer(dev, newBuf); UsbBufPtr = srcBufPtr; /* Copy the buffer */ zfwMemoryCopy(BufPtr, &(UsbBufPtr[chkIdx+ZM_USB_STREAM_MODE_TAG_LEN]), halPriv->usbRxTransferLen); /* Record the buffer pointer */ halPriv->remainBuf = newBuf; } } else { u8_t* BufPtr; u8_t* UsbBufPtr; /* Allocate a skb buffer */ newBuf = zfwBufAllocate(dev, ZM_WLAN_MAX_RX_SIZE); if ( newBuf != NULL ) { BufPtr = zmw_buf_get_buffer(dev, newBuf); UsbBufPtr = srcBufPtr; /* Copy the buffer */ zfwMemoryCopy(BufPtr, &(UsbBufPtr[chkIdx+ZM_USB_STREAM_MODE_TAG_LEN]), pktLen); zfwBufSetSize(dev, newBuf, pktLen); rxBufPool[rxBufPoolIndex++] = newBuf; } } } else { u16_t i; DbgPrint("Can't find tag, pkt_len: 0x%04x, tag: 0x%04x\n", pktLen, pktTag); #if 0 for(i = 0; i < 32; i++) { DbgPrint("%02x ", buf->data[index-16+i]); if ((i & 0xf) == 0xf) DbgPrint("\n"); } #endif break; } } /* Free buffer */ //zfwBufFree(adapter, pUsbRxTransfer->buf, 0); zfwBufFree(dev, buf, 0); for(ii = 0; ii < rxBufPoolIndex; ii++) { zfiUsbRecvPerPkt(dev, rxBufPool[ii]); } } #endif /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfUsbInit */ /* Initialize USB resource. */ /* */ /* INPUTS */ /* dev : device pointer */ /* */ /* OUTPUTS */ /* None */ /* */ /* AUTHOR */ /* Stephen Chen ZyDAS Technology Corporation 2005.12 */ /* */ /************************************************************************/ void zfUsbInit(zdev_t* dev) { /* Initialize Rx & INT endpoint for receiving data & interrupt */ zfwUsbEnableRxEpt(dev, USB_ENDPOINT_RX_INDEX); zfwUsbEnableIntEpt(dev, USB_ENDPOINT_INT_INDEX); return; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfUsbFree */ /* Free PCI resource. */ /* */ /* INPUTS */ /* dev : device pointer */ /* */ /* OUTPUTS */ /* None */ /* */ /* AUTHOR */ /* Stephen Chen ZyDAS Technology Corporation 2005.12 */ /* */ /************************************************************************/ void zfUsbFree(zdev_t* dev) { struct zsHpPriv *halPriv; zmw_get_wlan_dev(dev); halPriv = (struct zsHpPriv*)wd->hpPrivate; #ifdef ZM_OTUS_RX_STREAM_MODE if ( halPriv->remainBuf != NULL ) { zfwBufFree(dev, halPriv->remainBuf, 0); } #endif return; } void zfHpSendBeacon(zdev_t* dev, zbuf_t* buf, u16_t len) { u32_t hw, lw; u16_t i; zmw_get_wlan_dev(dev); /* Write to beacon buffer (ZM_BEACON_BUFFER_ADDRESS) */ for (i = 0; i<len; i+=4) { lw = zmw_tx_buf_readh(dev, buf, i); hw = zmw_tx_buf_readh(dev, buf, i+2); zfDelayWriteInternalReg(dev, ZM_BEACON_BUFFER_ADDRESS+i, (hw<<16)+lw); } /* Beacon PCLP header */ if (((struct zsHpPriv*)wd->hpPrivate)->hwFrequency < 3000) { zfDelayWriteInternalReg(dev, ZM_MAC_REG_BCN_PLCP, ((len+4)<<(3+16))+0x0400); } else { zfDelayWriteInternalReg(dev, ZM_MAC_REG_BCN_PLCP, ((len+4)<<(16))+0x001b); } /* Beacon length (include CRC32) */ zfDelayWriteInternalReg(dev, ZM_MAC_REG_BCN_LENGTH, len+4); /* Beacon Ready */ zfDelayWriteInternalReg(dev, ZM_MAC_REG_BCN_CTRL, 1); zfFlushDelayWrite(dev); /* Free beacon buf */ zfwBufFree(dev, buf, 0); return; } #define ZM_STATUS_TX_COMP 0x00 #define ZM_STATUS_RETRY_COMP 0x01 #define ZM_STATUS_TX_FAILED 0x02 void zfiUsbRegIn(zdev_t* dev, u32_t* rsp, u16_t rspLen) { //u8_t len, type, i; u8_t type; u8_t *u8rsp; u16_t status; u32_t bitmap; zmw_get_wlan_dev(dev); zm_msg0_mm(ZM_LV_3, "zfiUsbRegIn()"); u8rsp = (u8_t *)rsp; //len = *u8rsp; type = *(u8rsp+1); u8rsp = u8rsp+4; /* Interrupt event */ if ((type & 0xC0) == 0xC0) { if (type == 0xC0) { zfCoreEvent(dev, 0, u8rsp); } else if (type == 0xC1) { #if 0 { u16_t i; DbgPrint("rspLen=%d\n", rspLen); for (i=0; i<(rspLen/4); i++) { DbgPrint("rsp[%d]=0x%lx\n", i, rsp[i]); } } #endif status = (u16_t)(rsp[3] >> 16); ////6789 rsp[8] = rsp[8] >> 2 | (rsp[9] & 0x1) << 6; switch (status) { case ZM_STATUS_RETRY_COMP : zfCoreEvent(dev, 1, u8rsp); break; case ZM_STATUS_TX_FAILED : zfCoreEvent(dev, 2, u8rsp); break; case ZM_STATUS_TX_COMP : zfCoreEvent(dev, 3, u8rsp); break; } } else if (type == 0xC2) { zfBeaconCfgInterrupt(dev, u8rsp); } else if (type == 0xC3) { zfEndOfAtimWindowInterrupt(dev); } else if (type == 0xC4) { #if 0 { u16_t i; DbgPrint("0xC2:rspLen=%d\n", rspLen); for (i=0; i<(rspLen/4); i++) { DbgPrint("0xC2:rsp[%d]=0x%lx\n", i, rsp[i]); } } #endif bitmap = (rsp[1] >> 16) + ((rsp[2] & 0xFFFF) << 16 ); //zfBawCore(dev, (u16_t)rsp[1] & 0xFFFF, bitmap, (u16_t)(rsp[2] >> 16) & 0xFF); } else if (type == 0xC5) { u16_t i; #if 0 for (i=0; i<(rspLen/4); i++) { DbgPrint("0xC5:rsp[%d]=0x%lx\n", i, rsp[i]); } #endif for (i=1; i<(rspLen/4); i++) { u8rsp = (u8_t *)(rsp+i); //DbgPrint("0xC5:rsp[%d]=0x%lx\n", i, ((u32_t*)u8rsp)[0]); zfCoreEvent(dev, 4, u8rsp); } } else if (type == 0xC6) { zm_debug_msg0("\n\n WatchDog interrupt!!! : 0xC6 \n\n"); if (wd->zfcbHwWatchDogNotify != NULL) { wd->zfcbHwWatchDogNotify(dev); } } else if (type == 0xC8) { //PZSW_ADAPTER adapter; // for SPI flash program chk Flag zfwDbgProgrameFlashChkDone(dev); } else if (type == 0xC9) { struct zsHpPriv* hpPriv=wd->hpPrivate; zm_debug_msg0("##### Tx retransmission 5 times event #####"); /* correct tx retransmission issue */ hpPriv->retransmissionEvent = 1; } } else { zfIdlRsp(dev, rsp, rspLen); } } #define ZM_PROGRAM_RAM_ADDR 0x200000 //0x1000 //0x700000 #define FIRMWARE_DOWNLOAD 0x30 #define FIRMWARE_DOWNLOAD_COMP 0x31 #define FIRMWARE_CONFIRM 0x32 u16_t zfFirmwareDownload(zdev_t* dev, u32_t* fw, u32_t len, u32_t offset) { u16_t ret = ZM_SUCCESS; u32_t uCodeOfst = offset; u8_t *image, *ptr; u32_t result; image = (u8_t*) fw; ptr = image; while (len > 0) { u32_t translen = (len > 4096) ? 4096 : len; result = zfwUsbSubmitControl(dev, FIRMWARE_DOWNLOAD, (u16_t) (uCodeOfst >> 8), 0, image, translen); if (result != ZM_SUCCESS) { zm_msg0_init(ZM_LV_0, "FIRMWARE_DOWNLOAD failed"); ret = 1; goto exit; } len -= translen; image += translen; uCodeOfst += translen; // in Word (16 bit) result = 0; } /* If download firmware success, issue a command to firmware */ if (ret == 0) { result = zfwUsbSubmitControl(dev, FIRMWARE_DOWNLOAD_COMP, 0, 0, NULL, 0); if (result != ZM_SUCCESS) { zm_msg0_init(ZM_LV_0, "FIRMWARE_DOWNLOAD_COMP failed"); ret = 1; goto exit; } } #if 0 /* PCI code */ /* Wait for firmware ready */ result = zfwUsbSubmitControl(dev, FIRMWARE_CONFIRM, USB_DIR_IN | 0x40, 0, 0, &ret_value, sizeof(ret_value), HZ); if (result != 0) { zm_msg0_init(ZM_LV_0, "Can't receive firmware ready: ", result); ret = 1; } #endif exit: return ret; } u16_t zfFirmwareDownloadNotJump(zdev_t* dev, u32_t* fw, u32_t len, u32_t offset) { u16_t ret = ZM_SUCCESS; u32_t uCodeOfst = offset; u8_t *image, *ptr; u32_t result; image = (u8_t*) fw; ptr = image; while (len > 0) { u32_t translen = (len > 4096) ? 4096 : len; result = zfwUsbSubmitControl(dev, FIRMWARE_DOWNLOAD, (u16_t) (uCodeOfst >> 8), 0, image, translen); if (result != ZM_SUCCESS) { zm_msg0_init(ZM_LV_0, "FIRMWARE_DOWNLOAD failed"); ret = 1; goto exit; } len -= translen; image += translen; uCodeOfst += translen; // in Word (16 bit) result = 0; } exit: return ret; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfIdlGetFreeTxdCount */ /* Get free PCI PCI TxD count. */ /* */ /* INPUTS */ /* dev : device pointer */ /* */ /* OUTPUTS */ /* None */ /* */ /* AUTHOR */ /* Stephen ZyDAS Technology Corporation 2006.6 */ /* */ /************************************************************************/ u32_t zfHpGetFreeTxdCount(zdev_t* dev) { return zfwUsbGetFreeTxQSize(dev); } u32_t zfHpGetMaxTxdCount(zdev_t* dev) { //return 8; return zfwUsbGetMaxTxQSize(dev); } void zfiUsbRegOutComplete(zdev_t* dev) { return; } extern void zfPushVtxq(zdev_t* dev); void zfiUsbOutComplete(zdev_t* dev, zbuf_t *buf, u8_t status, u8_t *hdr) { #ifndef ZM_ENABLE_AGGREGATION if (buf) { zfwBufFree(dev, buf, 0); } #else #ifdef ZM_BYPASS_AGGR_SCHEDULING //Simply free the buf since BA retransmission is done in the firmware if (buf) { zfwBufFree(dev, buf, 0); } zfPushVtxq(dev); #else zmw_get_wlan_dev(dev); #ifdef ZM_ENABLE_FW_BA_RETRANSMISSION //Simply free the buf since BA retransmission is done in the firmware if (buf) { zfwBufFree(dev, buf, 0); } #else u8_t agg; u16_t frameType; if(!hdr && buf) { zfwBufFree(dev, buf, 0); //zm_debug_msg0("buf Free due to hdr == NULL"); return; } if(hdr && buf) { frameType = hdr[8] & 0xf; agg = (u8_t)(hdr[2] >> 5 ) & 0x1; //zm_debug_msg1("AGG=", agg); if (!status) { if (agg) { //delete buf in ba fail queue?? //not ganna happen? } else { zfwBufFree(dev, buf, 0); } } else { if (agg) { //don't do anything //zfwBufFree(dev, buf, 0); } else { zfwBufFree(dev, buf, 0); } } } #endif if (wd->state != ZM_WLAN_STATE_ENABLED) { return; } if( (wd->wlanMode == ZM_MODE_AP) || (wd->wlanMode == ZM_MODE_INFRASTRUCTURE && wd->sta.EnableHT) || (wd->wlanMode == ZM_MODE_PSEUDO) ) { zfAggTxScheduler(dev, 0); } #endif #endif return; }
gpl-2.0
Warter21/linux-4.0_imx6
drivers/usb/usbip/vhci_sysfs.c
1511
6325
/* * Copyright (C) 2003-2008 Takahiro Hirofuchi * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include <linux/kthread.h> #include <linux/file.h> #include <linux/net.h> #include "usbip_common.h" #include "vhci.h" /* TODO: refine locking ?*/ /* Sysfs entry to show port status */ static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *out) { char *s = out; int i = 0; BUG_ON(!the_controller || !out); spin_lock(&the_controller->lock); /* * output example: * prt sta spd dev socket local_busid * 000 004 000 000 c5a7bb80 1-2.3 * 001 004 000 000 d8cee980 2-3.4 * * IP address can be retrieved from a socket pointer address by looking * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a * port number and its peer IP address. */ out += sprintf(out, "prt sta spd bus dev socket local_busid\n"); for (i = 0; i < VHCI_NPORTS; i++) { struct vhci_device *vdev = port_to_vdev(i); spin_lock(&vdev->ud.lock); out += sprintf(out, "%03u %03u ", i, vdev->ud.status); if (vdev->ud.status == VDEV_ST_USED) { out += sprintf(out, "%03u %08x ", vdev->speed, vdev->devid); out += sprintf(out, "%16p ", vdev->ud.tcp_socket); out += sprintf(out, "%s", dev_name(&vdev->udev->dev)); } else { out += sprintf(out, "000 000 000 0000000000000000 0-0"); } out += sprintf(out, "\n"); spin_unlock(&vdev->ud.lock); } spin_unlock(&the_controller->lock); return out - s; } static DEVICE_ATTR_RO(status); /* Sysfs entry to shutdown a virtual connection */ static int vhci_port_disconnect(__u32 rhport) { struct vhci_device *vdev; usbip_dbg_vhci_sysfs("enter\n"); /* lock */ spin_lock(&the_controller->lock); vdev = port_to_vdev(rhport); spin_lock(&vdev->ud.lock); if (vdev->ud.status == VDEV_ST_NULL) { pr_err("not connected %d\n", vdev->ud.status); /* unlock */ spin_unlock(&vdev->ud.lock); spin_unlock(&the_controller->lock); return -EINVAL; } /* unlock */ spin_unlock(&vdev->ud.lock); spin_unlock(&the_controller->lock); usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN); return 0; } static ssize_t store_detach(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int err; __u32 rhport = 0; if (sscanf(buf, "%u", &rhport) != 1) return -EINVAL; /* check rhport */ if (rhport >= VHCI_NPORTS) { dev_err(dev, "invalid port %u\n", rhport); return -EINVAL; } err = vhci_port_disconnect(rhport); if (err < 0) return -EINVAL; usbip_dbg_vhci_sysfs("Leave\n"); return count; } static DEVICE_ATTR(detach, S_IWUSR, NULL, store_detach); /* Sysfs entry to establish a virtual connection */ static int valid_args(__u32 rhport, enum usb_device_speed speed) { /* check rhport */ if (rhport >= VHCI_NPORTS) { pr_err("port %u\n", rhport); return -EINVAL; } /* check speed */ switch (speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: case USB_SPEED_WIRELESS: break; default: pr_err("Failed attach request for unsupported USB speed: %s\n", usb_speed_string(speed)); return -EINVAL; } return 0; } /* * To start a new USB/IP attachment, a userland program needs to setup a TCP * connection and then write its socket descriptor with remote device * information into this sysfs file. * * A remote device is virtually attached to the root-hub port of @rhport with * @speed. @devid is embedded into a request to specify the remote device in a * server host. * * write() returns 0 on success, else negative errno. */ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vhci_device *vdev; struct socket *socket; int sockfd = 0; __u32 rhport = 0, devid = 0, speed = 0; int err; /* * @rhport: port number of vhci_hcd * @sockfd: socket descriptor of an established TCP connection * @devid: unique device identifier in a remote host * @speed: usb device speed in a remote host */ if (sscanf(buf, "%u %u %u %u", &rhport, &sockfd, &devid, &speed) != 4) return -EINVAL; usbip_dbg_vhci_sysfs("rhport(%u) sockfd(%u) devid(%u) speed(%u)\n", rhport, sockfd, devid, speed); /* check received parameters */ if (valid_args(rhport, speed) < 0) return -EINVAL; /* Extract socket from fd. */ socket = sockfd_lookup(sockfd, &err); if (!socket) return -EINVAL; /* now need lock until setting vdev status as used */ /* begin a lock */ spin_lock(&the_controller->lock); vdev = port_to_vdev(rhport); spin_lock(&vdev->ud.lock); if (vdev->ud.status != VDEV_ST_NULL) { /* end of the lock */ spin_unlock(&vdev->ud.lock); spin_unlock(&the_controller->lock); sockfd_put(socket); dev_err(dev, "port %d already used\n", rhport); return -EINVAL; } dev_info(dev, "rhport(%u) sockfd(%d) devid(%u) speed(%u) speed_str(%s)\n", rhport, sockfd, devid, speed, usb_speed_string(speed)); vdev->devid = devid; vdev->speed = speed; vdev->ud.tcp_socket = socket; vdev->ud.status = VDEV_ST_NOTASSIGNED; spin_unlock(&vdev->ud.lock); spin_unlock(&the_controller->lock); /* end the lock */ vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); vdev->ud.tcp_tx = kthread_get_run(vhci_tx_loop, &vdev->ud, "vhci_tx"); rh_port_connect(rhport, speed); return count; } static DEVICE_ATTR(attach, S_IWUSR, NULL, store_attach); static struct attribute *dev_attrs[] = { &dev_attr_status.attr, &dev_attr_detach.attr, &dev_attr_attach.attr, &dev_attr_usbip_debug.attr, NULL, }; const struct attribute_group dev_attr_group = { .attrs = dev_attrs, };
gpl-2.0
priyatransbit/linux
arch/arm/boot/compressed/decompress.c
1767
1571
#define _LINUX_STRING_H_ #include <linux/compiler.h> /* for inline */ #include <linux/types.h> /* for size_t */ #include <linux/stddef.h> /* for NULL */ #include <linux/linkage.h> #include <asm/string.h> extern unsigned long free_mem_ptr; extern unsigned long free_mem_end_ptr; extern void error(char *); #define STATIC static #define STATIC_RW_DATA /* non-static please */ /* Diagnostic functions */ #ifdef DEBUG # define Assert(cond,msg) {if(!(cond)) error(msg);} # define Trace(x) fprintf x # define Tracev(x) {if (verbose) fprintf x ;} # define Tracevv(x) {if (verbose>1) fprintf x ;} # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} #else # define Assert(cond,msg) # define Trace(x) # define Tracev(x) # define Tracevv(x) # define Tracec(c,x) # define Tracecv(c,x) #endif /* Not needed, but used in some headers pulled in by decompressors */ extern char * strstr(const char * s1, const char *s2); #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" #endif #ifdef CONFIG_KERNEL_LZO #include "../../../../lib/decompress_unlzo.c" #endif #ifdef CONFIG_KERNEL_LZMA #include "../../../../lib/decompress_unlzma.c" #endif #ifdef CONFIG_KERNEL_XZ #define memmove memmove #define memcpy memcpy #include "../../../../lib/decompress_unxz.c" #endif #ifdef CONFIG_KERNEL_LZ4 #include "../../../../lib/decompress_unlz4.c" #endif int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) { return decompress(input, len, NULL, NULL, output, NULL, error); }
gpl-2.0
crpalmer/android_kernel_huawei_kiwi
arch/x86/kernel/apic/es7000_32.c
2279
17501
/* * Written by: Garry Forsgren, Unisys Corporation * Natalie Protasevich, Unisys Corporation * * This file contains the code to configure and interface * with Unisys ES7000 series hardware system manager. * * Copyright (c) 2003 Unisys Corporation. * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar * * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Contact information: Unisys Corporation, Township Line & Union Meeting * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or: * * http://www.unisys.com */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/notifier.h> #include <linux/spinlock.h> #include <linux/cpumask.h> #include <linux/threads.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/reboot.h> #include <linux/string.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/acpi.h> #include <linux/init.h> #include <linux/gfp.h> #include <linux/nmi.h> #include <linux/smp.h> #include <linux/io.h> #include <asm/apicdef.h> #include <linux/atomic.h> #include <asm/fixmap.h> #include <asm/mpspec.h> #include <asm/setup.h> #include <asm/apic.h> #include <asm/ipi.h> /* * ES7000 chipsets */ #define NON_UNISYS 0 #define ES7000_CLASSIC 1 #define ES7000_ZORRO 2 #define MIP_REG 1 #define MIP_PSAI_REG 4 #define MIP_BUSY 1 #define MIP_SPIN 0xf0000 #define MIP_VALID 0x0100000000000000ULL #define MIP_SW_APIC 0x1020b #define MIP_PORT(val) ((val >> 32) & 0xffff) #define MIP_RD_LO(val) (val & 0xffffffff) struct mip_reg { unsigned long long off_0x00; unsigned long long off_0x08; unsigned long long off_0x10; unsigned long long off_0x18; unsigned long long off_0x20; unsigned long long off_0x28; unsigned long long off_0x30; unsigned long long off_0x38; }; struct mip_reg_info { unsigned long long mip_info; unsigned long long delivery_info; unsigned long long host_reg; unsigned long long mip_reg; }; struct psai { unsigned long long entry_type; unsigned long long addr; unsigned long long bep_addr; }; #ifdef CONFIG_ACPI struct es7000_oem_table { struct acpi_table_header Header; u32 OEMTableAddr; u32 OEMTableSize; }; static unsigned long oem_addrX; static unsigned long oem_size; #endif /* * ES7000 Globals */ static volatile unsigned long *psai; static struct mip_reg *mip_reg; static struct mip_reg *host_reg; static int mip_port; static unsigned long mip_addr; static unsigned long host_addr; int es7000_plat; /* * GSI override for ES7000 platforms. */ static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) { unsigned long vect = 0, psaival = 0; if (psai == NULL) return -1; vect = ((unsigned long)__pa(eip)/0x1000) << 16; psaival = (0x1000000 | vect | cpu); while (*psai & 0x1000000) ; *psai = psaival; return 0; } static int es7000_apic_is_cluster(void) { /* MPENTIUMIII */ if (boot_cpu_data.x86 == 6 && (boot_cpu_data.x86_model >= 7 && boot_cpu_data.x86_model <= 11)) return 1; return 0; } static void setup_unisys(void) { /* * Determine the generation of the ES7000 currently running. * * es7000_plat = 1 if the machine is a 5xx ES7000 box * es7000_plat = 2 if the machine is a x86_64 ES7000 box * */ if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2)) es7000_plat = ES7000_ZORRO; else es7000_plat = ES7000_CLASSIC; } /* * Parse the OEM Table: */ static int parse_unisys_oem(char *oemptr) { int i; int success = 0; unsigned char type, size; unsigned long val; char *tp = NULL; struct psai *psaip = NULL; struct mip_reg_info *mi; struct mip_reg *host, *mip; tp = oemptr; tp += 8; for (i = 0; i <= 6; i++) { type = *tp++; size = *tp++; tp -= 2; switch (type) { case MIP_REG: mi = (struct mip_reg_info *)tp; val = MIP_RD_LO(mi->host_reg); host_addr = val; host = (struct mip_reg *)val; host_reg = __va(host); val = MIP_RD_LO(mi->mip_reg); mip_port = MIP_PORT(mi->mip_info); mip_addr = val; mip = (struct mip_reg *)val; mip_reg = __va(mip); pr_debug("host_reg = 0x%lx\n", (unsigned long)host_reg); pr_debug("mip_reg = 0x%lx\n", (unsigned long)mip_reg); success++; break; case MIP_PSAI_REG: psaip = (struct psai *)tp; if (tp != NULL) { if (psaip->addr) psai = __va(psaip->addr); else psai = NULL; success++; } break; default: break; } tp += size; } if (success < 2) es7000_plat = NON_UNISYS; else setup_unisys(); return es7000_plat; } #ifdef CONFIG_ACPI static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) { struct acpi_table_header *header = NULL; struct es7000_oem_table *table; acpi_size tbl_size; acpi_status ret; int i = 0; for (;;) { ret = acpi_get_table_with_size("OEM1", i++, &header, &tbl_size); if (!ACPI_SUCCESS(ret)) return -1; if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) break; early_acpi_os_unmap_memory(header, tbl_size); } table = (void *)header; oem_addrX = table->OEMTableAddr; oem_size = table->OEMTableSize; early_acpi_os_unmap_memory(header, tbl_size); *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, oem_size); return 0; } static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) { if (!oem_addr) return; __acpi_unmap_table((char *)oem_addr, oem_size); } static int es7000_check_dsdt(void) { struct acpi_table_header header; if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) && !strncmp(header.oem_id, "UNISYS", 6)) return 1; return 0; } static int es7000_acpi_ret; /* Hook from generic ACPI tables.c */ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { unsigned long oem_addr = 0; int check_dsdt; int ret = 0; /* check dsdt at first to avoid clear fix_map for oem_addr */ check_dsdt = es7000_check_dsdt(); if (!find_unisys_acpi_oem_table(&oem_addr)) { if (check_dsdt) { ret = parse_unisys_oem((char *)oem_addr); } else { setup_unisys(); ret = 1; } /* * we need to unmap it */ unmap_unisys_acpi_oem_table(oem_addr); } es7000_acpi_ret = ret; return ret && !es7000_apic_is_cluster(); } static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) { int ret = es7000_acpi_ret; return ret && es7000_apic_is_cluster(); } #else /* !CONFIG_ACPI: */ static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) { return 0; } #endif /* !CONFIG_ACPI */ static void es7000_spin(int n) { int i = 0; while (i++ < n) rep_nop(); } static int es7000_mip_write(struct mip_reg *mip_reg) { int status = 0; int spin; spin = MIP_SPIN; while ((host_reg->off_0x38 & MIP_VALID) != 0) { if (--spin <= 0) { WARN(1, "Timeout waiting for Host Valid Flag\n"); return -1; } es7000_spin(MIP_SPIN); } memcpy(host_reg, mip_reg, sizeof(struct mip_reg)); outb(1, mip_port); spin = MIP_SPIN; while ((mip_reg->off_0x38 & MIP_VALID) == 0) { if (--spin <= 0) { WARN(1, "Timeout waiting for MIP Valid Flag\n"); return -1; } es7000_spin(MIP_SPIN); } status = (mip_reg->off_0x00 & 0xffff0000000000ULL) >> 48; mip_reg->off_0x38 &= ~MIP_VALID; return status; } static void es7000_enable_apic_mode(void) { struct mip_reg es7000_mip_reg; int mip_status; if (!es7000_plat) return; pr_info("Enabling APIC mode.\n"); memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); es7000_mip_reg.off_0x00 = MIP_SW_APIC; es7000_mip_reg.off_0x38 = MIP_VALID; while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) WARN(1, "Command failed, status = %x\n", mip_status); } static void es7000_wait_for_init_deassert(atomic_t *deassert) { while (!atomic_read(deassert)) cpu_relax(); } static unsigned int es7000_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; } static void es7000_send_IPI_mask(const struct cpumask *mask, int vector) { default_send_IPI_mask_sequence_phys(mask, vector); } static void es7000_send_IPI_allbutself(int vector) { default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); } static void es7000_send_IPI_all(int vector) { es7000_send_IPI_mask(cpu_online_mask, vector); } static int es7000_apic_id_registered(void) { return 1; } static const struct cpumask *target_cpus_cluster(void) { return cpu_all_mask; } static const struct cpumask *es7000_target_cpus(void) { return cpumask_of(smp_processor_id()); } static unsigned long es7000_check_apicid_used(physid_mask_t *map, int apicid) { return 0; } static unsigned long es7000_check_apicid_present(int bit) { return physid_isset(bit, phys_cpu_present_map); } static int es7000_early_logical_apicid(int cpu) { /* on es7000, logical apicid is the same as physical */ return early_per_cpu(x86_bios_cpu_apicid, cpu); } static unsigned long calculate_ldr(int cpu) { unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); return SET_APIC_LOGICAL_ID(id); } /* * Set up the logical destination ID. * * Intel recommends to set DFR, LdR and TPR before enabling * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ static void es7000_init_apic_ldr_cluster(void) { unsigned long val; int cpu = smp_processor_id(); apic_write(APIC_DFR, APIC_DFR_CLUSTER); val = calculate_ldr(cpu); apic_write(APIC_LDR, val); } static void es7000_init_apic_ldr(void) { unsigned long val; int cpu = smp_processor_id(); apic_write(APIC_DFR, APIC_DFR_FLAT); val = calculate_ldr(cpu); apic_write(APIC_LDR, val); } static void es7000_setup_apic_routing(void) { int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); pr_info("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? "Physical Cluster" : "Logical Cluster", nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); } static int es7000_cpu_present_to_apicid(int mps_cpu) { if (!mps_cpu) return boot_cpu_physical_apicid; else if (mps_cpu < nr_cpu_ids) return per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; } static int cpu_id; static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap) { physid_set_mask_of_physid(cpu_id, retmap); ++cpu_id; } static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) { /* For clustered we don't have a good way to do this yet - hack */ physids_promote(0xFFL, retmap); } static int es7000_check_phys_apicid_present(int cpu_physical_apicid) { boot_cpu_physical_apicid = read_apic_id(); return 1; } static inline int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id) { unsigned int round = 0; unsigned int cpu, uninitialized_var(apicid); /* * The cpus in the mask must all be on the apic cluster. */ for_each_cpu_and(cpu, cpumask, cpu_online_mask) { int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { WARN(1, "Not a valid mask!"); return -EINVAL; } apicid |= new_apicid; round++; } if (!round) return -EINVAL; *dest_id = apicid; return 0; } static int es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask, unsigned int *apicid) { cpumask_var_t cpumask; *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) return 0; cpumask_and(cpumask, inmask, andmask); es7000_cpu_mask_to_apicid(cpumask, apicid); free_cpumask_var(cpumask); return 0; } static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } static int probe_es7000(void) { /* probed later in mptable/ACPI hooks */ return 0; } static int es7000_mps_ret; static int es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { int ret = 0; if (mpc->oemptr) { struct mpc_oemtable *oem_table = (struct mpc_oemtable *)mpc->oemptr; if (!strncmp(oem, "UNISYS", 6)) ret = parse_unisys_oem((char *)oem_table); } es7000_mps_ret = ret; return ret && !es7000_apic_is_cluster(); } static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem, char *productid) { int ret = es7000_mps_ret; return ret && es7000_apic_is_cluster(); } /* We've been warned by a false positive warning.Use __refdata to keep calm. */ static struct apic __refdata apic_es7000_cluster = { .name = "es7000", .probe = probe_es7000, .acpi_madt_oem_check = es7000_acpi_madt_oem_check_cluster, .apic_id_valid = default_apic_id_valid, .apic_id_registered = es7000_apic_id_registered, .irq_delivery_mode = dest_LowestPrio, /* logical delivery broadcast to all procs: */ .irq_dest_mode = 1, .target_cpus = target_cpus_cluster, .disable_esr = 1, .dest_logical = 0, .check_apicid_used = es7000_check_apicid_used, .check_apicid_present = es7000_check_apicid_present, .vector_allocation_domain = flat_vector_allocation_domain, .init_apic_ldr = es7000_init_apic_ldr_cluster, .ioapic_phys_id_map = es7000_ioapic_phys_id_map, .setup_apic_routing = es7000_setup_apic_routing, .multi_timer_check = NULL, .cpu_present_to_apicid = es7000_cpu_present_to_apicid, .apicid_to_cpu_present = es7000_apicid_to_cpu_present, .setup_portio_remap = NULL, .check_phys_apicid_present = es7000_check_phys_apicid_present, .enable_apic_mode = es7000_enable_apic_mode, .phys_pkg_id = es7000_phys_pkg_id, .mps_oem_check = es7000_mps_oem_check_cluster, .get_apic_id = es7000_get_apic_id, .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, .send_IPI_mask = es7000_send_IPI_mask, .send_IPI_mask_allbutself = NULL, .send_IPI_allbutself = es7000_send_IPI_allbutself, .send_IPI_all = es7000_send_IPI_all, .send_IPI_self = default_send_IPI_self, .wakeup_secondary_cpu = wakeup_secondary_cpu_via_mip, .trampoline_phys_low = 0x467, .trampoline_phys_high = 0x469, .wait_for_init_deassert = NULL, /* Nothing to do for most platforms, since cleared by the INIT cycle: */ .smp_callin_clear_local_apic = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, .write = native_apic_mem_write, .eoi_write = native_apic_mem_write, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = es7000_early_logical_apicid, }; static struct apic __refdata apic_es7000 = { .name = "es7000", .probe = probe_es7000, .acpi_madt_oem_check = es7000_acpi_madt_oem_check, .apic_id_valid = default_apic_id_valid, .apic_id_registered = es7000_apic_id_registered, .irq_delivery_mode = dest_Fixed, /* phys delivery to target CPUs: */ .irq_dest_mode = 0, .target_cpus = es7000_target_cpus, .disable_esr = 1, .dest_logical = 0, .check_apicid_used = es7000_check_apicid_used, .check_apicid_present = es7000_check_apicid_present, .vector_allocation_domain = flat_vector_allocation_domain, .init_apic_ldr = es7000_init_apic_ldr, .ioapic_phys_id_map = es7000_ioapic_phys_id_map, .setup_apic_routing = es7000_setup_apic_routing, .multi_timer_check = NULL, .cpu_present_to_apicid = es7000_cpu_present_to_apicid, .apicid_to_cpu_present = es7000_apicid_to_cpu_present, .setup_portio_remap = NULL, .check_phys_apicid_present = es7000_check_phys_apicid_present, .enable_apic_mode = es7000_enable_apic_mode, .phys_pkg_id = es7000_phys_pkg_id, .mps_oem_check = es7000_mps_oem_check, .get_apic_id = es7000_get_apic_id, .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, .send_IPI_mask = es7000_send_IPI_mask, .send_IPI_mask_allbutself = NULL, .send_IPI_allbutself = es7000_send_IPI_allbutself, .send_IPI_all = es7000_send_IPI_all, .send_IPI_self = default_send_IPI_self, .trampoline_phys_low = 0x467, .trampoline_phys_high = 0x469, .wait_for_init_deassert = es7000_wait_for_init_deassert, /* Nothing to do for most platforms, since cleared by the INIT cycle: */ .smp_callin_clear_local_apic = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, .write = native_apic_mem_write, .eoi_write = native_apic_mem_write, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = es7000_early_logical_apicid, }; /* * Need to check for es7000 followed by es7000_cluster, so this order * in apic_drivers is important. */ apic_drivers(apic_es7000, apic_es7000_cluster);
gpl-2.0
Cl3Kener/TIBERIUS
arch/arm/mach-imx/mach-bug.c
2279
1776
/* * Copyright (C) 2000 Deep Blue Solutions Ltd * Copyright (C) 2002 Shane Nay (shane@minirl.com) * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2011 Denis 'GNUtoo' Carikli <GNUtoo@no-log.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <mach/iomux-mx3.h> #include <mach/hardware.h> #include <mach/common.h> #include <asm/mach/time.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #include "devices-imx31.h" static const struct imxuart_platform_data uart_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; static const unsigned int bug_pins[] __initconst = { MX31_PIN_PC_RST__CTS5, MX31_PIN_PC_VS2__RTS5, MX31_PIN_PC_BVD2__TXD5, MX31_PIN_PC_BVD1__RXD5, }; static void __init bug_board_init(void) { mxc_iomux_setup_multiple_pins(bug_pins, ARRAY_SIZE(bug_pins), "uart-4"); imx31_add_imx_uart4(&uart_pdata); } static void __init bug_timer_init(void) { mx31_clocks_init(26000000); } static struct sys_timer bug_timer = { .init = bug_timer_init, }; MACHINE_START(BUG, "BugLabs BUGBase") .map_io = mx31_map_io, .init_early = imx31_init_early, .init_irq = mx31_init_irq, .timer = &bug_timer, .init_machine = bug_board_init, MACHINE_END
gpl-2.0
Pesach85/PH85-KERNEL
drivers/media/video/bt8xx/bttv-cards.c
2535
150511
/* bttv-cards.c this file has configuration informations - card-specific stuff like the big tvcards array for the most part Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) & Marcus Metzler (mocm@thp.uni-koeln.de) (c) 1999-2001 Gerd Knorr <kraxel@goldbach.in-berlin.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/delay.h> #include <linux/module.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/firmware.h> #include <net/checksum.h> #include <asm/unaligned.h> #include <asm/io.h> #include "bttvp.h" #include <media/v4l2-common.h> #include <media/tvaudio.h> #include "bttv-audio-hook.h" /* fwd decl */ static void boot_msp34xx(struct bttv *btv, int pin); static void hauppauge_eeprom(struct bttv *btv); static void avermedia_eeprom(struct bttv *btv); static void osprey_eeprom(struct bttv *btv, const u8 ee[256]); static void modtec_eeprom(struct bttv *btv); static void init_PXC200(struct bttv *btv); static void init_RTV24(struct bttv *btv); static void rv605_muxsel(struct bttv *btv, unsigned int input); static void eagle_muxsel(struct bttv *btv, unsigned int input); static void xguard_muxsel(struct bttv *btv, unsigned int input); static void ivc120_muxsel(struct bttv *btv, unsigned int input); static void gvc1100_muxsel(struct bttv *btv, unsigned int input); static void PXC200_muxsel(struct bttv *btv, unsigned int input); static void picolo_tetra_muxsel(struct bttv *btv, unsigned int input); static void picolo_tetra_init(struct bttv *btv); static void tibetCS16_muxsel(struct bttv *btv, unsigned int input); static void tibetCS16_init(struct bttv *btv); static void kodicom4400r_muxsel(struct bttv *btv, unsigned int input); static void kodicom4400r_init(struct bttv *btv); static void sigmaSLC_muxsel(struct bttv *btv, unsigned int input); static void sigmaSQ_muxsel(struct bttv *btv, unsigned int input); static void geovision_muxsel(struct bttv *btv, unsigned int input); static void phytec_muxsel(struct bttv *btv, unsigned int input); static void gv800s_muxsel(struct bttv *btv, unsigned int input); static void gv800s_init(struct bttv *btv); static int terratec_active_radio_upgrade(struct bttv *btv); static int tea5757_read(struct bttv *btv); static int tea5757_write(struct bttv *btv, int value); static void identify_by_eeprom(struct bttv *btv, unsigned char eeprom_data[256]); static int __devinit pvr_boot(struct bttv *btv); /* config variables */ static unsigned int triton1; static unsigned int vsfx; static unsigned int latency = UNSET; int no_overlay=-1; static unsigned int card[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET }; static unsigned int pll[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET }; static unsigned int tuner[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET }; static unsigned int svhs[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET }; static unsigned int remote[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET }; static unsigned int audiodev[BTTV_MAX]; static unsigned int saa6588[BTTV_MAX]; static struct bttv *master[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = NULL }; static unsigned int autoload = UNSET; static unsigned int gpiomask = UNSET; static unsigned int audioall = UNSET; static unsigned int audiomux[5] = { [ 0 ... 4 ] = UNSET }; /* insmod options */ module_param(triton1, int, 0444); module_param(vsfx, int, 0444); module_param(no_overlay, int, 0444); module_param(latency, int, 0444); module_param(gpiomask, int, 0444); module_param(audioall, int, 0444); module_param(autoload, int, 0444); module_param_array(card, int, NULL, 0444); module_param_array(pll, int, NULL, 0444); module_param_array(tuner, int, NULL, 0444); module_param_array(svhs, int, NULL, 0444); module_param_array(remote, int, NULL, 0444); module_param_array(audiodev, int, NULL, 0444); module_param_array(audiomux, int, NULL, 0444); MODULE_PARM_DESC(triton1,"set ETBF pci config bit " "[enable bug compatibility for triton1 + others]"); MODULE_PARM_DESC(vsfx,"set VSFX pci config bit " "[yet another chipset flaw workaround]"); MODULE_PARM_DESC(latency,"pci latency timer"); MODULE_PARM_DESC(card,"specify TV/grabber card model, see CARDLIST file for a list"); MODULE_PARM_DESC(pll,"specify installed crystal (0=none, 28=28 MHz, 35=35 MHz)"); MODULE_PARM_DESC(tuner,"specify installed tuner type"); MODULE_PARM_DESC(autoload, "obsolete option, please do not use anymore"); MODULE_PARM_DESC(audiodev, "specify audio device:\n" "\t\t-1 = no audio\n" "\t\t 0 = autodetect (default)\n" "\t\t 1 = msp3400\n" "\t\t 2 = tda7432\n" "\t\t 3 = tvaudio"); MODULE_PARM_DESC(saa6588, "if 1, then load the saa6588 RDS module, default (0) is to use the card definition."); MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)" " [some VIA/SIS chipsets are known to have problem with overlay]"); /* ----------------------------------------------------------------------- */ /* list of card IDs for bt878+ cards */ static struct CARD { unsigned id; int cardnr; char *name; } cards[] __devinitdata = { { 0x13eb0070, BTTV_BOARD_HAUPPAUGE878, "Hauppauge WinTV" }, { 0x39000070, BTTV_BOARD_HAUPPAUGE878, "Hauppauge WinTV-D" }, { 0x45000070, BTTV_BOARD_HAUPPAUGEPVR, "Hauppauge WinTV/PVR" }, { 0xff000070, BTTV_BOARD_OSPREY1x0, "Osprey-100" }, { 0xff010070, BTTV_BOARD_OSPREY2x0_SVID,"Osprey-200" }, { 0xff020070, BTTV_BOARD_OSPREY500, "Osprey-500" }, { 0xff030070, BTTV_BOARD_OSPREY2000, "Osprey-2000" }, { 0xff040070, BTTV_BOARD_OSPREY540, "Osprey-540" }, { 0xff070070, BTTV_BOARD_OSPREY440, "Osprey-440" }, { 0x00011002, BTTV_BOARD_ATI_TVWONDER, "ATI TV Wonder" }, { 0x00031002, BTTV_BOARD_ATI_TVWONDERVE,"ATI TV Wonder/VE" }, { 0x6606107d, BTTV_BOARD_WINFAST2000, "Leadtek WinFast TV 2000" }, { 0x6607107d, BTTV_BOARD_WINFASTVC100, "Leadtek WinFast VC 100" }, { 0x6609107d, BTTV_BOARD_WINFAST2000, "Leadtek TV 2000 XP" }, { 0x263610b4, BTTV_BOARD_STB2, "STB TV PCI FM, Gateway P/N 6000704" }, { 0x264510b4, BTTV_BOARD_STB2, "STB TV PCI FM, Gateway P/N 6000704" }, { 0x402010fc, BTTV_BOARD_GVBCTV3PCI, "I-O Data Co. GV-BCTV3/PCI" }, { 0x405010fc, BTTV_BOARD_GVBCTV4PCI, "I-O Data Co. GV-BCTV4/PCI" }, { 0x407010fc, BTTV_BOARD_GVBCTV5PCI, "I-O Data Co. GV-BCTV5/PCI" }, { 0xd01810fc, BTTV_BOARD_GVBCTV5PCI, "I-O Data Co. GV-BCTV5/PCI" }, { 0x001211bd, BTTV_BOARD_PINNACLE, "Pinnacle PCTV" }, /* some cards ship with byteswapped IDs ... */ { 0x1200bd11, BTTV_BOARD_PINNACLE, "Pinnacle PCTV [bswap]" }, { 0xff00bd11, BTTV_BOARD_PINNACLE, "Pinnacle PCTV [bswap]" }, /* this seems to happen as well ... */ { 0xff1211bd, BTTV_BOARD_PINNACLE, "Pinnacle PCTV" }, { 0x3000121a, BTTV_BOARD_VOODOOTV_200, "3Dfx VoodooTV 200" }, { 0x263710b4, BTTV_BOARD_VOODOOTV_FM, "3Dfx VoodooTV FM" }, { 0x3060121a, BTTV_BOARD_STB2, "3Dfx VoodooTV 100/ STB OEM" }, { 0x3000144f, BTTV_BOARD_MAGICTVIEW063, "(Askey Magic/others) TView99 CPH06x" }, { 0xa005144f, BTTV_BOARD_MAGICTVIEW063, "CPH06X TView99-Card" }, { 0x3002144f, BTTV_BOARD_MAGICTVIEW061, "(Askey Magic/others) TView99 CPH05x" }, { 0x3005144f, BTTV_BOARD_MAGICTVIEW061, "(Askey Magic/others) TView99 CPH061/06L (T1/LC)" }, { 0x5000144f, BTTV_BOARD_MAGICTVIEW061, "Askey CPH050" }, { 0x300014ff, BTTV_BOARD_MAGICTVIEW061, "TView 99 (CPH061)" }, { 0x300214ff, BTTV_BOARD_PHOEBE_TVMAS, "Phoebe TV Master (CPH060)" }, { 0x00011461, BTTV_BOARD_AVPHONE98, "AVerMedia TVPhone98" }, { 0x00021461, BTTV_BOARD_AVERMEDIA98, "AVermedia TVCapture 98" }, { 0x00031461, BTTV_BOARD_AVPHONE98, "AVerMedia TVPhone98" }, { 0x00041461, BTTV_BOARD_AVERMEDIA98, "AVerMedia TVCapture 98" }, { 0x03001461, BTTV_BOARD_AVERMEDIA98, "VDOMATE TV TUNER CARD" }, { 0x1117153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue (Philips PAL B/G)" }, { 0x1118153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue (Temic PAL B/G)" }, { 0x1119153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue (Philips PAL I)" }, { 0x111a153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue (Temic PAL I)" }, { 0x1123153b, BTTV_BOARD_TERRATVRADIO, "Terratec TV Radio+" }, { 0x1127153b, BTTV_BOARD_TERRATV, "Terratec TV+ (V1.05)" }, /* clashes with FlyVideo *{ 0x18521852, BTTV_BOARD_TERRATV, "Terratec TV+ (V1.10)" }, */ { 0x1134153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue (LR102)" }, { 0x1135153b, BTTV_BOARD_TERRATVALUER, "Terratec TValue Radio" }, /* LR102 */ { 0x5018153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue" }, /* ?? */ { 0xff3b153b, BTTV_BOARD_TERRATVALUER, "Terratec TValue Radio" }, /* ?? */ { 0x400015b0, BTTV_BOARD_ZOLTRIX_GENIE, "Zoltrix Genie TV" }, { 0x400a15b0, BTTV_BOARD_ZOLTRIX_GENIE, "Zoltrix Genie TV" }, { 0x400d15b0, BTTV_BOARD_ZOLTRIX_GENIE, "Zoltrix Genie TV / Radio" }, { 0x401015b0, BTTV_BOARD_ZOLTRIX_GENIE, "Zoltrix Genie TV / Radio" }, { 0x401615b0, BTTV_BOARD_ZOLTRIX_GENIE, "Zoltrix Genie TV / Radio" }, { 0x1430aa00, BTTV_BOARD_PV143, "Provideo PV143A" }, { 0x1431aa00, BTTV_BOARD_PV143, "Provideo PV143B" }, { 0x1432aa00, BTTV_BOARD_PV143, "Provideo PV143C" }, { 0x1433aa00, BTTV_BOARD_PV143, "Provideo PV143D" }, { 0x1433aa03, BTTV_BOARD_PV143, "Security Eyes" }, { 0x1460aa00, BTTV_BOARD_PV150, "Provideo PV150A-1" }, { 0x1461aa01, BTTV_BOARD_PV150, "Provideo PV150A-2" }, { 0x1462aa02, BTTV_BOARD_PV150, "Provideo PV150A-3" }, { 0x1463aa03, BTTV_BOARD_PV150, "Provideo PV150A-4" }, { 0x1464aa04, BTTV_BOARD_PV150, "Provideo PV150B-1" }, { 0x1465aa05, BTTV_BOARD_PV150, "Provideo PV150B-2" }, { 0x1466aa06, BTTV_BOARD_PV150, "Provideo PV150B-3" }, { 0x1467aa07, BTTV_BOARD_PV150, "Provideo PV150B-4" }, { 0xa132ff00, BTTV_BOARD_IVC100, "IVC-100" }, { 0xa1550000, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550001, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550002, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550003, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550100, BTTV_BOARD_IVC200, "IVC-200G" }, { 0xa1550101, BTTV_BOARD_IVC200, "IVC-200G" }, { 0xa1550102, BTTV_BOARD_IVC200, "IVC-200G" }, { 0xa1550103, BTTV_BOARD_IVC200, "IVC-200G" }, { 0xa1550800, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550801, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550802, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550803, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa182ff00, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff01, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff02, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff03, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff04, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff05, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff06, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff07, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff08, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff09, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0a, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0b, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0c, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0d, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0e, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0f, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xf0500000, BTTV_BOARD_IVCE8784, "IVCE-8784" }, { 0xf0500001, BTTV_BOARD_IVCE8784, "IVCE-8784" }, { 0xf0500002, BTTV_BOARD_IVCE8784, "IVCE-8784" }, { 0xf0500003, BTTV_BOARD_IVCE8784, "IVCE-8784" }, { 0x41424344, BTTV_BOARD_GRANDTEC, "GrandTec Multi Capture" }, { 0x01020304, BTTV_BOARD_XGUARD, "Grandtec Grand X-Guard" }, { 0x18501851, BTTV_BOARD_CHRONOS_VS2, "FlyVideo 98 (LR50)/ Chronos Video Shuttle II" }, { 0xa0501851, BTTV_BOARD_CHRONOS_VS2, "FlyVideo 98 (LR50)/ Chronos Video Shuttle II" }, { 0x18511851, BTTV_BOARD_FLYVIDEO98EZ, "FlyVideo 98EZ (LR51)/ CyberMail AV" }, { 0x18521852, BTTV_BOARD_TYPHOON_TVIEW, "FlyVideo 98FM (LR50)/ Typhoon TView TV/FM Tuner" }, { 0x41a0a051, BTTV_BOARD_FLYVIDEO_98FM, "Lifeview FlyVideo 98 LR50 Rev Q" }, { 0x18501f7f, BTTV_BOARD_FLYVIDEO_98, "Lifeview Flyvideo 98" }, { 0x010115cb, BTTV_BOARD_GMV1, "AG GMV1" }, { 0x010114c7, BTTV_BOARD_MODTEC_205, "Modular Technology MM201/MM202/MM205/MM210/MM215 PCTV" }, { 0x10b42636, BTTV_BOARD_HAUPPAUGE878, "STB ???" }, { 0x217d6606, BTTV_BOARD_WINFAST2000, "Leadtek WinFast TV 2000" }, { 0xfff6f6ff, BTTV_BOARD_WINFAST2000, "Leadtek WinFast TV 2000" }, { 0x03116000, BTTV_BOARD_SENSORAY311, "Sensoray 311" }, { 0x00790e11, BTTV_BOARD_WINDVR, "Canopus WinDVR PCI" }, { 0xa0fca1a0, BTTV_BOARD_ZOLTRIX, "Face to Face Tvmax" }, { 0x82b2aa6a, BTTV_BOARD_SIMUS_GVC1100, "SIMUS GVC1100" }, { 0x146caa0c, BTTV_BOARD_PV951, "ituner spectra8" }, { 0x200a1295, BTTV_BOARD_PXC200, "ImageNation PXC200A" }, { 0x40111554, BTTV_BOARD_PV_BT878P_9B, "Prolink Pixelview PV-BT" }, { 0x17de0a01, BTTV_BOARD_KWORLD, "Mecer TV/FM/Video Tuner" }, { 0x01051805, BTTV_BOARD_PICOLO_TETRA_CHIP, "Picolo Tetra Chip #1" }, { 0x01061805, BTTV_BOARD_PICOLO_TETRA_CHIP, "Picolo Tetra Chip #2" }, { 0x01071805, BTTV_BOARD_PICOLO_TETRA_CHIP, "Picolo Tetra Chip #3" }, { 0x01081805, BTTV_BOARD_PICOLO_TETRA_CHIP, "Picolo Tetra Chip #4" }, { 0x15409511, BTTV_BOARD_ACORP_Y878F, "Acorp Y878F" }, { 0x53534149, BTTV_BOARD_SSAI_SECURITY, "SSAI Security Video Interface" }, { 0x5353414a, BTTV_BOARD_SSAI_ULTRASOUND, "SSAI Ultrasound Video Interface" }, /* likely broken, vendor id doesn't match the other magic views ... * { 0xa0fca04f, BTTV_BOARD_MAGICTVIEW063, "Guillemot Maxi TV Video 3" }, */ /* Duplicate PCI ID, reconfigure for this board during the eeprom read. * { 0x13eb0070, BTTV_BOARD_HAUPPAUGE_IMPACTVCB, "Hauppauge ImpactVCB" }, */ { 0x109e036e, BTTV_BOARD_CONCEPTRONIC_CTVFMI2, "Conceptronic CTVFMi v2"}, /* DVB cards (using pci function .1 for mpeg data xfer) */ { 0x001c11bd, BTTV_BOARD_PINNACLESAT, "Pinnacle PCTV Sat" }, { 0x01010071, BTTV_BOARD_NEBULA_DIGITV, "Nebula Electronics DigiTV" }, { 0x20007063, BTTV_BOARD_PC_HDTV, "pcHDTV HD-2000 TV"}, { 0x002611bd, BTTV_BOARD_TWINHAN_DST, "Pinnacle PCTV SAT CI" }, { 0x00011822, BTTV_BOARD_TWINHAN_DST, "Twinhan VisionPlus DVB" }, { 0xfc00270f, BTTV_BOARD_TWINHAN_DST, "ChainTech digitop DST-1000 DVB-S" }, { 0x07711461, BTTV_BOARD_AVDVBT_771, "AVermedia AverTV DVB-T 771" }, { 0x07611461, BTTV_BOARD_AVDVBT_761, "AverMedia AverTV DVB-T 761" }, { 0xdb1018ac, BTTV_BOARD_DVICO_DVBT_LITE, "DViCO FusionHDTV DVB-T Lite" }, { 0xdb1118ac, BTTV_BOARD_DVICO_DVBT_LITE, "Ultraview DVB-T Lite" }, { 0xd50018ac, BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE, "DViCO FusionHDTV 5 Lite" }, { 0x00261822, BTTV_BOARD_TWINHAN_DST, "DNTV Live! Mini "}, { 0xd200dbc0, BTTV_BOARD_DVICO_FUSIONHDTV_2, "DViCO FusionHDTV 2" }, { 0x763c008a, BTTV_BOARD_GEOVISION_GV600, "GeoVision GV-600" }, { 0x18011000, BTTV_BOARD_ENLTV_FM_2, "Encore ENL TV-FM-2" }, { 0x763d800a, BTTV_BOARD_GEOVISION_GV800S, "GeoVision GV-800(S) (master)" }, { 0x763d800b, BTTV_BOARD_GEOVISION_GV800S_SL, "GeoVision GV-800(S) (slave)" }, { 0x763d800c, BTTV_BOARD_GEOVISION_GV800S_SL, "GeoVision GV-800(S) (slave)" }, { 0x763d800d, BTTV_BOARD_GEOVISION_GV800S_SL, "GeoVision GV-800(S) (slave)" }, { 0x15401830, BTTV_BOARD_PV183, "Provideo PV183-1" }, { 0x15401831, BTTV_BOARD_PV183, "Provideo PV183-2" }, { 0x15401832, BTTV_BOARD_PV183, "Provideo PV183-3" }, { 0x15401833, BTTV_BOARD_PV183, "Provideo PV183-4" }, { 0x15401834, BTTV_BOARD_PV183, "Provideo PV183-5" }, { 0x15401835, BTTV_BOARD_PV183, "Provideo PV183-6" }, { 0x15401836, BTTV_BOARD_PV183, "Provideo PV183-7" }, { 0x15401837, BTTV_BOARD_PV183, "Provideo PV183-8" }, { 0, -1, NULL } }; /* ----------------------------------------------------------------------- */ /* array with description for bt848 / bt878 tv/grabber cards */ struct tvcard bttv_tvcards[] = { /* ---- card 0x00 ---------------------------------- */ [BTTV_BOARD_UNKNOWN] = { .name = " *** UNKNOWN/GENERIC *** ", .video_inputs = 4, .svhs = 2, .muxsel = MUXSEL(2, 3, 1, 0), .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MIRO] = { .name = "MIRO PCTV", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 0, 0, 0 }, .gpiomute = 10, .needs_tvaudio = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_HAUPPAUGE] = { .name = "Hauppauge (bt848)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .needs_tvaudio = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_STB] = { .name = "STB, Gateway P/N 6000699 (bt848)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 4, 0, 2, 3 }, .gpiomute = 1, .no_msp34xx = 1, .needs_tvaudio = 1, .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, }, /* ---- card 0x04 ---------------------------------- */ [BTTV_BOARD_INTEL] = { .name = "Intel Create and Share PCI/ Smart Video Recorder III", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 2, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0 }, .needs_tvaudio = 0, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_DIAMOND] = { .name = "Diamond DTV2000", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 3, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0, 1, 0, 1 }, .gpiomute = 3, .needs_tvaudio = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_AVERMEDIA] = { .name = "AVerMedia TVPhone", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 3, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomask = 0x0f, .gpiomux = { 0x0c, 0x04, 0x08, 0x04 }, /* 0x04 for some cards ?? */ .needs_tvaudio = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= avermedia_tvphone_audio, .has_remote = 1, }, [BTTV_BOARD_MATRIX_VISION] = { .name = "MATRIX-Vision MV-Delta", .video_inputs = 5, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0, 0), .gpiomux = { 0 }, .needs_tvaudio = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x08 ---------------------------------- */ [BTTV_BOARD_FLYVIDEO] = { .name = "Lifeview FlyVideo II (Bt848) LR26 / MAXI TV Video PCI2 LR26", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xc00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0xc00, 0x800, 0x400 }, .gpiomute = 0xc00, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TURBOTV] = { .name = "IMS/IXmicro TurboTV", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 3, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 1, 2, 3 }, .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_TEMIC_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_HAUPPAUGE878] = { .name = "Hauppauge (bt878)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x0f, /* old: 7 */ .muxsel = MUXSEL(2, 0, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MIROPRO] = { .name = "MIRO PCTV pro", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x3014f, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x20001,0x10001, 0, 0 }, .gpiomute = 10, .needs_tvaudio = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x0c ---------------------------------- */ [BTTV_BOARD_ADSTECH_TV] = { .name = "ADS Technologies Channel Surfer TV (bt848)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 13, 14, 11, 7 }, .needs_tvaudio = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_AVERMEDIA98] = { .name = "AVerMedia TVCapture 98", .video_inputs = 3, /* .audio_inputs= 4, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 13, 14, 11, 7 }, .needs_tvaudio = 1, .msp34xx_alt = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= avermedia_tv_stereo_audio, .no_gpioirq = 1, }, [BTTV_BOARD_VHX] = { .name = "Aimslab Video Highway Xtreme (VHX)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 2, 1, 3 }, /* old: {0, 1, 2, 3, 4} */ .gpiomute = 4, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ZOLTRIX] = { .name = "Zoltrix TV-Max", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 1, 0 }, .gpiomute = 10, .needs_tvaudio = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x10 ---------------------------------- */ [BTTV_BOARD_PIXVIEWPLAYTV] = { .name = "Prolink Pixelview PlayTV (bt878)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x01fe00, .muxsel = MUXSEL(2, 3, 1, 1), /* 2003-10-20 by "Anton A. Arapov" <arapov@mail.ru> */ .gpiomux = { 0x001e00, 0, 0x018000, 0x014000 }, .gpiomute = 0x002000, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_WINVIEW_601] = { .name = "Leadtek WinView 601", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x8300f8, .muxsel = MUXSEL(2, 3, 1, 1, 0), .gpiomux = { 0x4fa007,0xcfa007,0xcfa007,0xcfa007 }, .gpiomute = 0xcfa007, .needs_tvaudio = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .volume_gpio = winview_volume, .has_radio = 1, }, [BTTV_BOARD_AVEC_INTERCAP] = { .name = "AVEC Intercapture", .video_inputs = 3, /* .audio_inputs= 2, */ .svhs = 2, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 0, 0, 0 }, .needs_tvaudio = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_LIFE_FLYKIT] = { .name = "Lifeview FlyVideo II EZ /FlyKit LR38 Bt848 (capture only)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 0x8dff00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0 }, .no_msp34xx = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x14 ---------------------------------- */ [BTTV_BOARD_CEI_RAFFLES] = { .name = "CEI Raffles Card", .video_inputs = 3, /* .audio_inputs= 3, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1, 1), .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_CONFERENCETV] = { .name = "Lifeview FlyVideo 98/ Lucky Star Image World ConferenceTV LR50", .video_inputs = 4, /* .audio_inputs= 2, tuner, line in */ .svhs = 2, .gpiomask = 0x1800, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_PHOEBE_TVMAS] = { .name = "Askey CPH050/ Phoebe Tv Master + FM", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xc00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 0x800, 0x400 }, .gpiomute = 0xc00, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MODTEC_205] = { .name = "Modular Technology MM201/MM202/MM205/MM210/MM215 PCTV, bt878", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .has_dig_in = 1, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 0), /* input 2 is digital */ /* .digital_mode= DIGITAL_MODE_CAMERA, */ .gpiomux = { 0, 0, 0, 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ALPS_TSBB5_PAL_I, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x18 ---------------------------------- */ [BTTV_BOARD_MAGICTVIEW061] = { .name = "Askey CPH05X/06X (bt878) [many vendors]", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xe00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = {0x400, 0x400, 0x400, 0x400 }, .gpiomute = 0xc00, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .has_remote = 1, }, [BTTV_BOARD_VOBIS_BOOSTAR] = { .name = "Terratec TerraTV+ Version 1.0 (Bt848)/ Terra TValue Version 1.0/ Vobis TV-Boostar", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x1f0fff, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x20000, 0x30000, 0x10000, 0 }, .gpiomute = 0x40000, .needs_tvaudio = 0, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= terratv_audio, }, [BTTV_BOARD_HAUPPAUG_WCAM] = { .name = "Hauppauge WinCam newer (bt878)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 7, .muxsel = MUXSEL(2, 0, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .needs_tvaudio = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MAXI] = { .name = "Lifeview FlyVideo 98/ MAXI TV Video PCI2 LR50", .video_inputs = 4, /* .audio_inputs= 2, */ .svhs = 2, .gpiomask = 0x1800, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_SECAM, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x1c ---------------------------------- */ [BTTV_BOARD_TERRATV] = { .name = "Terratec TerraTV+ Version 1.1 (bt878)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x1f0fff, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x20000, 0x30000, 0x10000, 0x00000 }, .gpiomute = 0x40000, .needs_tvaudio = 0, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= terratv_audio, /* GPIO wiring: External 20 pin connector (for Active Radio Upgrade board) gpio00: i2c-sda gpio01: i2c-scl gpio02: om5610-data gpio03: om5610-clk gpio04: om5610-wre gpio05: om5610-stereo gpio06: rds6588-davn gpio07: Pin 7 n.c. gpio08: nIOW gpio09+10: nIOR, nSEL ?? (bt878) gpio09: nIOR (bt848) gpio10: nSEL (bt848) Sound Routing: gpio16: u2-A0 (1st 4052bt) gpio17: u2-A1 gpio18: u2-nEN gpio19: u4-A0 (2nd 4052) gpio20: u4-A1 u4-nEN - GND Btspy: 00000 : Cdrom (internal audio input) 10000 : ext. Video audio input 20000 : TV Mono a0000 : TV Mono/2 1a0000 : TV Stereo 30000 : Radio 40000 : Mute */ }, [BTTV_BOARD_PXC200] = { /* Jannik Fritsch <jannik@techfak.uni-bielefeld.de> */ .name = "Imagenation PXC200", .video_inputs = 5, /* .audio_inputs= 1, */ .svhs = 1, /* was: 4 */ .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0, 0), .gpiomux = { 0 }, .needs_tvaudio = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .muxsel_hook = PXC200_muxsel, }, [BTTV_BOARD_FLYVIDEO_98] = { .name = "Lifeview FlyVideo 98 LR50", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x1800, /* 0x8dfe00 */ .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x0800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_IPROTV] = { .name = "Formac iProTV, Formac ProTV I (bt848)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 1, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 0, 0, 0 }, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x20 ---------------------------------- */ [BTTV_BOARD_INTEL_C_S_PCI] = { .name = "Intel Create and Share PCI/ Smart Video Recorder III", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 2, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0 }, .needs_tvaudio = 0, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TERRATVALUE] = { .name = "Terratec TerraTValue Version Bt878", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xffff00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x500, 0, 0x300, 0x900 }, .gpiomute = 0x900, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_WINFAST2000] = { .name = "Leadtek WinFast 2000/ WinFast 2000 XP", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, /* TV, CVid, SVid, CVid over SVid connector */ .muxsel = MUXSEL(2, 3, 1, 1, 0), /* Alexander Varakin <avarakin@hotmail.com> [stereo version] */ .gpiomask = 0xb33000, .gpiomux = { 0x122000,0x1000,0x0000,0x620000 }, .gpiomute = 0x800000, /* Audio Routing for "WinFast 2000 XP" (no tv stereo !) gpio23 -- hef4052:nEnable (0x800000) gpio12 -- hef4052:A1 gpio13 -- hef4052:A0 0x0000: external audio 0x1000: FM 0x2000: TV 0x3000: n.c. Note: There exists another variant "Winfast 2000" with tv stereo !? Note: eeprom only contains FF and pci subsystem id 107d:6606 */ .needs_tvaudio = 0, .pll = PLL_28, .has_radio = 1, .tuner_type = TUNER_PHILIPS_PAL, /* default for now, gpio reads BFFF06 for Pal bg+dk */ .tuner_addr = ADDR_UNSET, .audio_mode_gpio= winfast2000_audio, .has_remote = 1, }, [BTTV_BOARD_CHRONOS_VS2] = { .name = "Lifeview FlyVideo 98 LR50 / Chronos Video Shuttle II", .video_inputs = 4, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 0x1800, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x24 ---------------------------------- */ [BTTV_BOARD_TYPHOON_TVIEW] = { .name = "Lifeview FlyVideo 98FM LR50 / Typhoon TView TV/FM Tuner", .video_inputs = 4, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 0x1800, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .has_radio = 1, }, [BTTV_BOARD_PXELVWPLTVPRO] = { .name = "Prolink PixelView PlayTV pro", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xff, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x21, 0x20, 0x24, 0x2c }, .gpiomute = 0x29, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MAGICTVIEW063] = { .name = "Askey CPH06X TView99", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x551e00, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0x551400, 0x551200, 0, 0 }, .gpiomute = 0x551c00, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, .has_remote = 1, }, [BTTV_BOARD_PINNACLE] = { .name = "Pinnacle PCTV Studio/Rave", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x03000F, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 0xd0001, 0, 0 }, .gpiomute = 1, .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x28 ---------------------------------- */ [BTTV_BOARD_STB2] = { .name = "STB TV PCI FM, Gateway P/N 6000704 (bt878), 3Dfx VoodooTV 100", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 4, 0, 2, 3 }, .gpiomute = 1, .no_msp34xx = 1, .needs_tvaudio = 1, .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, }, [BTTV_BOARD_AVPHONE98] = { .name = "AVerMedia TVPhone 98", .video_inputs = 3, /* .audio_inputs= 4, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 13, 4, 11, 7 }, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .has_radio = 1, .audio_mode_gpio= avermedia_tvphone_audio, }, [BTTV_BOARD_PV951] = { .name = "ProVideo PV951", /* pic16c54 */ .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 0, 0}, .needs_tvaudio = 1, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ONAIR_TV] = { .name = "Little OnAir TV", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xe00b, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0xff9ff6, 0xff9ff6, 0xff1ff7, 0 }, .gpiomute = 0xff3ffc, .no_msp34xx = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x2c ---------------------------------- */ [BTTV_BOARD_SIGMA_TVII_FM] = { .name = "Sigma TVII-FM", .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 3, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 1, 0, 2 }, .gpiomute = 3, .no_msp34xx = 1, .pll = PLL_NONE, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MATRIX_VISION2] = { .name = "MATRIX-Vision MV-Delta 2", .video_inputs = 5, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0, 0), .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ZOLTRIX_GENIE] = { .name = "Zoltrix Genie TV/FM", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xbcf03f, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0xbc803f, 0xbc903f, 0xbcb03f, 0 }, .gpiomute = 0xbcb03f, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_TEMIC_4039FR5_NTSC, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TERRATVRADIO] = { .name = "Terratec TV/Radio+", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x70000, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x20000, 0x30000, 0x10000, 0 }, .gpiomute = 0x40000, .needs_tvaudio = 1, .no_msp34xx = 1, .pll = PLL_35, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, .has_radio = 1, }, /* ---- card 0x30 ---------------------------------- */ [BTTV_BOARD_DYNALINK] = { .name = "Askey CPH03x/ Dynalink Magic TView", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = {2,0,0,0 }, .gpiomute = 1, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_GVBCTV3PCI] = { .name = "IODATA GV-BCTV3/PCI", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x010f00, .muxsel = MUXSEL(2, 3, 0, 0), .gpiomux = {0x10000, 0, 0x10000, 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ALPS_TSHC6_NTSC, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= gvbctv3pci_audio, }, [BTTV_BOARD_PXELVWPLTVPAK] = { .name = "Prolink PV-BT878P+4E / PixelView PlayTV PAK / Lenco MXTV-9578 CP", .video_inputs = 5, /* .audio_inputs= 1, */ .svhs = 3, .has_dig_in = 1, .gpiomask = 0xAA0000, .muxsel = MUXSEL(2, 3, 1, 1, 0), /* in 4 is digital */ /* .digital_mode= DIGITAL_MODE_CAMERA, */ .gpiomux = { 0x20000, 0, 0x80000, 0x80000 }, .gpiomute = 0xa8000, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, .has_remote = 1, /* GPIO wiring: (different from Rev.4C !) GPIO17: U4.A0 (first hef4052bt) GPIO19: U4.A1 GPIO20: U5.A1 (second hef4052bt) GPIO21: U4.nEN GPIO22: BT832 Reset Line GPIO23: A5,A0, U5,nEN Note: At i2c=0x8a is a Bt832 chip, which changes to 0x88 after being reset via GPIO22 */ }, [BTTV_BOARD_EAGLE] = { .name = "Eagle Wireless Capricorn2 (bt878A)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 0, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .pll = PLL_28, .tuner_type = UNSET /* TUNER_ALPS_TMDH2_NTSC */, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x34 ---------------------------------- */ [BTTV_BOARD_PINNACLEPRO] = { /* David Härdeman <david@2gen.com> */ .name = "Pinnacle PCTV Studio Pro", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 0x03000F, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 0xd0001, 0, 0 }, .gpiomute = 10, /* sound path (5 sources): MUX1 (mask 0x03), Enable Pin 0x08 (0=enable, 1=disable) 0= ext. Audio IN 1= from MUX2 2= Mono TV sound from Tuner 3= not connected MUX2 (mask 0x30000): 0,2,3= from MSP34xx 1= FM stereo Radio from Tuner */ .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TVIEW_RDS_FM] = { /* Claas Langbehn <claas@bigfoot.com>, Sven Grothklags <sven@upb.de> */ .name = "Typhoon TView RDS + FM Stereo / KNC1 TV Station RDS", .video_inputs = 4, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 0x1c, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 0x10, 8 }, .gpiomute = 4, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .has_radio = 1, }, [BTTV_BOARD_LIFETEC_9415] = { /* Tim Röstermundt <rosterm@uni-muenster.de> in de.comp.os.unix.linux.hardware: options bttv card=0 pll=1 radio=1 gpiomask=0x18e0 gpiomux =0x44c71f,0x44d71f,0,0x44d71f,0x44dfff options tuner type=5 */ .name = "Lifeview FlyVideo 2000 /FlyVideo A2/ Lifetec LT 9415 TV [LR90]", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x18e0, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x0000,0x0800,0x1000,0x1000 }, .gpiomute = 0x18e0, /* For cards with tda9820/tda9821: 0x0000: Tuner normal stereo 0x0080: Tuner A2 SAP (second audio program = Zweikanalton) 0x0880: Tuner A2 stereo */ .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_BESTBUY_EASYTV] = { /* Miguel Angel Alvarez <maacruz@navegalia.com> old Easy TV BT848 version (model CPH031) */ .name = "Askey CPH031/ BESTBUY Easy TV", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xF, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 2, 0, 0, 0 }, .gpiomute = 10, .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_TEMIC_PAL, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x38 ---------------------------------- */ [BTTV_BOARD_FLYVIDEO_98FM] = { /* Gordon Heydon <gjheydon@bigfoot.com ('98) */ .name = "Lifeview FlyVideo 98FM LR50", .video_inputs = 4, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 0x1800, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, }, /* This is the ultimate cheapo capture card * just a BT848A on a small PCB! * Steve Hosgood <steve@equiinet.com> */ [BTTV_BOARD_GRANDTEC] = { .name = "GrandTec 'Grand Video Capture' (Bt848)", .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .gpiomask = 0, .muxsel = MUXSEL(3, 1), .gpiomux = { 0 }, .needs_tvaudio = 0, .no_msp34xx = 1, .pll = PLL_35, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ASKEY_CPH060] = { /* Daniel Herrington <daniel.herrington@home.com> */ .name = "Askey CPH060/ Phoebe TV Master Only (No FM)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xe00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x400, 0x400, 0x400, 0x400 }, .gpiomute = 0x800, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_TEMIC_4036FY5_NTSC, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ASKEY_CPH03X] = { /* Matti Mottus <mottus@physic.ut.ee> */ .name = "Askey CPH03x TV Capturer", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x03000F, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 2, 0, 0, 0 }, .gpiomute = 1, .pll = PLL_28, .tuner_type = TUNER_TEMIC_PAL, .tuner_addr = ADDR_UNSET, .has_remote = 1, }, /* ---- card 0x3c ---------------------------------- */ [BTTV_BOARD_MM100PCTV] = { /* Philip Blundell <philb@gnu.org> */ .name = "Modular Technology MM100PCTV", .video_inputs = 2, /* .audio_inputs= 2, */ .svhs = NO_SVHS, .gpiomask = 11, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 0, 0, 1 }, .gpiomute = 8, .pll = PLL_35, .tuner_type = TUNER_TEMIC_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_GMV1] = { /* Adrian Cox <adrian@humboldt.co.uk */ .name = "AG Electronics GMV1", .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .gpiomask = 0xF, .muxsel = MUXSEL(2, 2), .gpiomux = { }, .no_msp34xx = 1, .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_BESTBUY_EASYTV2] = { /* Miguel Angel Alvarez <maacruz@navegalia.com> new Easy TV BT878 version (model CPH061) special thanks to Informatica Mieres for providing the card */ .name = "Askey CPH061/ BESTBUY Easy TV (bt878)", .video_inputs = 3, /* .audio_inputs= 2, */ .svhs = 2, .gpiomask = 0xFF, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 1, 0, 4, 4 }, .gpiomute = 9, .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ATI_TVWONDER] = { /* Lukas Gebauer <geby@volny.cz> */ .name = "ATI TV-Wonder", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xf03f, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0xbffe, 0, 0xbfff, 0 }, .gpiomute = 0xbffe, .pll = PLL_28, .tuner_type = TUNER_TEMIC_4006FN5_MULTI_PAL, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x40 ---------------------------------- */ [BTTV_BOARD_ATI_TVWONDERVE] = { /* Lukas Gebauer <geby@volny.cz> */ .name = "ATI TV-Wonder VE", .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 1, .muxsel = MUXSEL(2, 3, 0, 1), .gpiomux = { 0, 0, 1, 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_TEMIC_4006FN5_MULTI_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_FLYVIDEO2000] = { /* DeeJay <deejay@westel900.net (2000S) */ .name = "Lifeview FlyVideo 2000S LR90", .video_inputs = 3, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 0x18e0, .muxsel = MUXSEL(2, 3, 0, 1), /* Radio changed from 1e80 to 0x800 to make FlyVideo2000S in .hu happy (gm)*/ /* -dk-???: set mute=0x1800 for tda9874h daughterboard */ .gpiomux = { 0x0000,0x0800,0x1000,0x1000 }, .gpiomute = 0x1800, .audio_mode_gpio= fv2000s_audio, .no_msp34xx = 1, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TERRATVALUER] = { .name = "Terratec TValueRadio", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xffff00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x500, 0x500, 0x300, 0x900 }, .gpiomute = 0x900, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .has_radio = 1, }, [BTTV_BOARD_GVBCTV4PCI] = { /* TANAKA Kei <peg00625@nifty.com> */ .name = "IODATA GV-BCTV4/PCI", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x010f00, .muxsel = MUXSEL(2, 3, 0, 0), .gpiomux = {0x10000, 0, 0x10000, 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_SHARP_2U5JF5540_NTSC, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= gvbctv3pci_audio, }, /* ---- card 0x44 ---------------------------------- */ [BTTV_BOARD_VOODOOTV_FM] = { .name = "3Dfx VoodooTV FM (Euro)", /* try "insmod msp3400 simple=0" if you have * sound problems with this card. */ .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 0x4f8a00, /* 0x100000: 1=MSP enabled (0=disable again) * 0x010000: Connected to "S0" on tda9880 (0=Pal/BG, 1=NTSC) */ .gpiomux = {0x947fff, 0x987fff,0x947fff,0x947fff }, .gpiomute = 0x947fff, /* tvtuner, radio, external,internal, mute, stereo * tuner, Composit, SVid, Composit-on-Svid-adapter */ .muxsel = MUXSEL(2, 3, 0, 1), .tuner_type = TUNER_MT2032, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, }, [BTTV_BOARD_VOODOOTV_200] = { .name = "VoodooTV 200 (USA)", /* try "insmod msp3400 simple=0" if you have * sound problems with this card. */ .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 0x4f8a00, /* 0x100000: 1=MSP enabled (0=disable again) * 0x010000: Connected to "S0" on tda9880 (0=Pal/BG, 1=NTSC) */ .gpiomux = {0x947fff, 0x987fff,0x947fff,0x947fff }, .gpiomute = 0x947fff, /* tvtuner, radio, external,internal, mute, stereo * tuner, Composit, SVid, Composit-on-Svid-adapter */ .muxsel = MUXSEL(2, 3, 0, 1), .tuner_type = TUNER_MT2032, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, }, [BTTV_BOARD_AIMMS] = { /* Philip Blundell <pb@nexus.co.uk> */ .name = "Active Imaging AIMMS", .video_inputs = 1, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .muxsel = MUXSEL(2), .gpiomask = 0 }, [BTTV_BOARD_PV_BT878P_PLUS] = { /* Tomasz Pyra <hellfire@sedez.iq.pl> */ .name = "Prolink Pixelview PV-BT878P+ (Rev.4C,8E)", .video_inputs = 3, /* .audio_inputs= 4, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 11, 7 }, /* TV and Radio with same GPIO ! */ .gpiomute = 13, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_LG_PAL_I_FM, .tuner_addr = ADDR_UNSET, .has_remote = 1, /* GPIO wiring: GPIO0: U4.A0 (hef4052bt) GPIO1: U4.A1 GPIO2: U4.A1 (second hef4052bt) GPIO3: U4.nEN, U5.A0, A5.nEN GPIO8-15: vrd866b ? */ }, [BTTV_BOARD_FLYVIDEO98EZ] = { .name = "Lifeview FlyVideo 98EZ (capture only) LR51", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 2, /* AV1, AV2, SVHS, CVid adapter on SVHS */ .muxsel = MUXSEL(2, 3, 1, 1), .pll = PLL_28, .no_msp34xx = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x48 ---------------------------------- */ [BTTV_BOARD_PV_BT878P_9B] = { /* Dariusz Kowalewski <darekk@automex.pl> */ .name = "Prolink Pixelview PV-BT878P+9B (PlayTV Pro rev.9B FM+NICAM)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x3f, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x01, 0x00, 0x03, 0x03 }, .gpiomute = 0x09, .needs_tvaudio = 1, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= pvbt878p9b_audio, /* Note: not all cards have stereo */ .has_radio = 1, /* Note: not all cards have radio */ .has_remote = 1, /* GPIO wiring: GPIO0: A0 hef4052 GPIO1: A1 hef4052 GPIO3: nEN hef4052 GPIO8-15: vrd866b GPIO20,22,23: R30,R29,R28 */ }, [BTTV_BOARD_SENSORAY311] = { /* Clay Kunz <ckunz@mail.arc.nasa.gov> */ /* you must jumper JP5 for the card to work */ .name = "Sensoray 311", .video_inputs = 5, /* .audio_inputs= 0, */ .svhs = 4, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0, 0), .gpiomux = { 0 }, .needs_tvaudio = 0, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_RV605] = { /* Miguel Freitas <miguel@cetuc.puc-rio.br> */ .name = "RemoteVision MX (RV605)", .video_inputs = 16, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x00, .gpiomask2 = 0x07ff, .muxsel = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), .no_msp34xx = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .muxsel_hook = rv605_muxsel, }, [BTTV_BOARD_POWERCLR_MTV878] = { .name = "Powercolor MTV878/ MTV878R/ MTV878F", .video_inputs = 3, /* .audio_inputs= 2, */ .svhs = 2, .gpiomask = 0x1C800F, /* Bit0-2: Audio select, 8-12:remote control 14:remote valid 15:remote reset */ .muxsel = MUXSEL(2, 1, 1), .gpiomux = { 0, 1, 2, 2 }, .gpiomute = 4, .needs_tvaudio = 0, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, }, /* ---- card 0x4c ---------------------------------- */ [BTTV_BOARD_WINDVR] = { /* Masaki Suzuki <masaki@btree.org> */ .name = "Canopus WinDVR PCI (COMPAQ Presario 3524JP, 5112JP)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x140007, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= windvr_audio, }, [BTTV_BOARD_GRANDTEC_MULTI] = { .name = "GrandTec Multi Capture Card (Bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0 }, .needs_tvaudio = 0, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_KWORLD] = { .name = "Jetway TV/Capture JW-TV878-FBK, Kworld KW-TV878RF", .video_inputs = 4, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 7, /* Tuner, SVid, SVHS, SVid to SVHS connector */ .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 4, 4 },/* Yes, this tuner uses the same audio output for TV and FM radio! * This card lacks external Audio In, so we mute it on Ext. & Int. * The PCB can take a sbx1637/sbx1673, wiring unknown. * This card lacks PCI subsystem ID, sigh. * gpiomux =1: lower volume, 2+3: mute * btwincap uses 0x80000/0x80003 */ .gpiomute = 4, .needs_tvaudio = 0, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, /* Samsung TCPA9095PC27A (BG+DK), philips compatible, w/FM, stereo and radio signal strength indicators work fine. */ .has_radio = 1, /* GPIO Info: GPIO0,1: HEF4052 A0,A1 GPIO2: HEF4052 nENABLE GPIO3-7: n.c. GPIO8-13: IRDC357 data0-5 (data6 n.c. ?) [chip not present on my card] GPIO14,15: ?? GPIO16-21: n.c. GPIO22,23: ?? ?? : mtu8b56ep microcontroller for IR (GPIO wiring unknown)*/ }, [BTTV_BOARD_DSP_TCVIDEO] = { /* Arthur Tetzlaff-Deas, DSP Design Ltd <software@dspdesign.com> */ .name = "DSP Design TCVIDEO", .video_inputs = 4, .svhs = NO_SVHS, .muxsel = MUXSEL(2, 3, 1, 0), .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x50 ---------------------------------- */ [BTTV_BOARD_HAUPPAUGEPVR] = { .name = "Hauppauge WinTV PVR", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 0, 1, 1), .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .gpiomask = 7, .gpiomux = {7}, }, [BTTV_BOARD_GVBCTV5PCI] = { .name = "IODATA GV-BCTV5/PCI", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x0f0f80, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = {0x030000, 0x010000, 0, 0 }, .gpiomute = 0x020000, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_NTSC_M, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= gvbctv5pci_audio, .has_radio = 1, }, [BTTV_BOARD_OSPREY1x0] = { .name = "Osprey 100/150 (878)", /* 0x1(2|3)-45C6-C1 */ .video_inputs = 4, /* id-inputs-clock */ /* .audio_inputs= 0, */ .svhs = 3, .muxsel = MUXSEL(3, 2, 0, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY1x0_848] = { .name = "Osprey 100/150 (848)", /* 0x04-54C0-C1 & older boards */ .video_inputs = 3, /* .audio_inputs= 0, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, /* ---- card 0x54 ---------------------------------- */ [BTTV_BOARD_OSPREY101_848] = { .name = "Osprey 101 (848)", /* 0x05-40C0-C1 */ .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .muxsel = MUXSEL(3, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY1x1] = { .name = "Osprey 101/151", /* 0x1(4|5)-0004-C4 */ .video_inputs = 1, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .muxsel = MUXSEL(0), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY1x1_SVID] = { .name = "Osprey 101/151 w/ svid", /* 0x(16|17|20)-00C4-C1 */ .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .muxsel = MUXSEL(0, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY2xx] = { .name = "Osprey 200/201/250/251", /* 0x1(8|9|E|F)-0004-C4 */ .video_inputs = 1, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .muxsel = MUXSEL(0), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, /* ---- card 0x58 ---------------------------------- */ [BTTV_BOARD_OSPREY2x0_SVID] = { .name = "Osprey 200/250", /* 0x1(A|B)-00C4-C1 */ .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = 1, .muxsel = MUXSEL(0, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY2x0] = { .name = "Osprey 210/220/230", /* 0x1(A|B)-04C0-C1 */ .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = 1, .muxsel = MUXSEL(2, 3), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY500] = { .name = "Osprey 500", /* 500 */ .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = 1, .muxsel = MUXSEL(2, 3), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY540] = { .name = "Osprey 540", /* 540 */ .video_inputs = 4, /* .audio_inputs= 1, */ .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, /* ---- card 0x5C ---------------------------------- */ [BTTV_BOARD_OSPREY2000] = { .name = "Osprey 2000", /* 2000 */ .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = 1, .muxsel = MUXSEL(2, 3), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, /* must avoid, conflicts with the bt860 */ }, [BTTV_BOARD_IDS_EAGLE] = { /* M G Berberich <berberic@forwiss.uni-passau.de> */ .name = "IDS Eagle", .video_inputs = 4, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0, .muxsel = MUXSEL(2, 2, 2, 2), .muxsel_hook = eagle_muxsel, .no_msp34xx = 1, .pll = PLL_28, }, [BTTV_BOARD_PINNACLESAT] = { .name = "Pinnacle PCTV Sat", .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel = MUXSEL(3, 1), .pll = PLL_28, .no_gpioirq = 1, .has_dvb = 1, }, [BTTV_BOARD_FORMAC_PROTV] = { .name = "Formac ProTV II (bt878)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 2, /* TV, Comp1, Composite over SVID con, SVID */ .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 2, 0, 0 }, .pll = PLL_28, .has_radio = 1, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, /* sound routing: GPIO=0x00,0x01,0x03: mute (?) 0x02: both TV and radio (tuner: FM1216/I) The card has onboard audio connectors labeled "cdrom" and "board", not soldered here, though unknown wiring. Card lacks: external audio in, pci subsystem id. */ }, /* ---- card 0x60 ---------------------------------- */ [BTTV_BOARD_MACHTV] = { .name = "MachTV", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 3}, .gpiomute = 4, .needs_tvaudio = 1, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .pll = PLL_28, }, [BTTV_BOARD_EURESYS_PICOLO] = { .name = "Euresys Picolo", .video_inputs = 3, /* .audio_inputs= 0, */ .svhs = 2, .gpiomask = 0, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel = MUXSEL(2, 0, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_PV150] = { /* Luc Van Hoeylandt <luc@e-magic.be> */ .name = "ProVideo PV150", /* 0x4f */ .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0, .muxsel = MUXSEL(2, 3), .gpiomux = { 0 }, .needs_tvaudio = 0, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_AD_TVK503] = { /* Hiroshi Takekawa <sian@big.or.jp> */ /* This card lacks subsystem ID */ .name = "AD-TVK503", /* 0x63 */ .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x001e8007, .muxsel = MUXSEL(2, 3, 1, 0), /* Tuner, Radio, external, internal, off, on */ .gpiomux = { 0x08, 0x0f, 0x0a, 0x08 }, .gpiomute = 0x0f, .needs_tvaudio = 0, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= adtvk503_audio, }, /* ---- card 0x64 ---------------------------------- */ [BTTV_BOARD_HERCULES_SM_TV] = { .name = "Hercules Smart TV Stereo", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x00, .muxsel = MUXSEL(2, 3, 1, 1), .needs_tvaudio = 1, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, /* Notes: - card lacks subsystem ID - stereo variant w/ daughter board with tda9874a @0xb0 - Audio Routing: always from tda9874 independent of GPIO (?) external line in: unknown - Other chips: em78p156elp @ 0x96 (probably IR remote control) hef4053 (instead 4052) for unknown function */ }, [BTTV_BOARD_PACETV] = { .name = "Pace TV & Radio Card", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, /* Tuner, CVid, SVid, CVid over SVid connector */ .muxsel = MUXSEL(2, 3, 1, 1), .gpiomask = 0, .no_tda7432 = 1, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, .has_radio = 1, .pll = PLL_28, /* Bt878, Bt832, FI1246 tuner; no pci subsystem id only internal line out: (4pin header) RGGL Radio must be decoded by msp3410d (not routed through)*/ /* .digital_mode = DIGITAL_MODE_CAMERA, todo! */ }, [BTTV_BOARD_IVC200] = { /* Chris Willing <chris@vislab.usyd.edu.au> */ .name = "IVC-200", .video_inputs = 1, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0xdf, .muxsel = MUXSEL(2), .pll = PLL_28, }, [BTTV_BOARD_IVCE8784] = { .name = "IVCE-8784", .video_inputs = 1, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0xdf, .muxsel = MUXSEL(2), .pll = PLL_28, }, [BTTV_BOARD_XGUARD] = { .name = "Grand X-Guard / Trust 814PCI", .video_inputs = 16, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .gpiomask2 = 0xff, .muxsel = MUXSEL(2,2,2,2, 3,3,3,3, 1,1,1,1, 0,0,0,0), .muxsel_hook = xguard_muxsel, .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, }, /* ---- card 0x68 ---------------------------------- */ [BTTV_BOARD_NEBULA_DIGITV] = { .name = "Nebula Electronics DigiTV", .video_inputs = 1, .svhs = NO_SVHS, .muxsel = MUXSEL(2, 3, 1, 0), .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .has_dvb = 1, .has_remote = 1, .gpiomask = 0x1b, .no_gpioirq = 1, }, [BTTV_BOARD_PV143] = { /* Jorge Boncompte - DTI2 <jorge@dti2.net> */ .name = "ProVideo PV143", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0 }, .needs_tvaudio = 0, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VD009X1_VD011_MINIDIN] = { /* M.Klahr@phytec.de */ .name = "PHYTEC VD-009-X1 VD-011 MiniDIN (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 3, .gpiomask = 0x00, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VD009X1_VD011_COMBI] = { .name = "PHYTEC VD-009-X1 VD-011 Combi (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 3, .gpiomask = 0x00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x6c ---------------------------------- */ [BTTV_BOARD_VD009_MINIDIN] = { .name = "PHYTEC VD-009 MiniDIN (bt878)", .video_inputs = 10, /* .audio_inputs= 0, */ .svhs = 9, .gpiomask = 0x00, .gpiomask2 = 0x03, /* used for external vodeo mux */ .muxsel = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 0), .muxsel_hook = phytec_muxsel, .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VD009_COMBI] = { .name = "PHYTEC VD-009 Combi (bt878)", .video_inputs = 10, /* .audio_inputs= 0, */ .svhs = 9, .gpiomask = 0x00, .gpiomask2 = 0x03, /* used for external vodeo mux */ .muxsel = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 1), .muxsel_hook = phytec_muxsel, .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_IVC100] = { .name = "IVC-100", .video_inputs = 4, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0xdf, .muxsel = MUXSEL(2, 3, 1, 0), .pll = PLL_28, }, [BTTV_BOARD_IVC120] = { /* IVC-120G - Alan Garfield <alan@fromorbit.com> */ .name = "IVC-120G", .video_inputs = 16, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, /* card has no svhs */ .needs_tvaudio = 0, .no_msp34xx = 1, .no_tda7432 = 1, .gpiomask = 0x00, .muxsel = MUXSEL(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .muxsel_hook = ivc120_muxsel, .pll = PLL_28, }, /* ---- card 0x70 ---------------------------------- */ [BTTV_BOARD_PC_HDTV] = { .name = "pcHDTV HD-2000 TV", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1, 0), .tuner_type = TUNER_PHILIPS_FCV1236D, .tuner_addr = ADDR_UNSET, .has_dvb = 1, }, [BTTV_BOARD_TWINHAN_DST] = { .name = "Twinhan DST + clones", .no_msp34xx = 1, .no_tda7432 = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_video = 1, .has_dvb = 1, }, [BTTV_BOARD_WINFASTVC100] = { .name = "Winfast VC100", .video_inputs = 3, /* .audio_inputs= 0, */ .svhs = 1, /* Vid In, SVid In, Vid over SVid in connector */ .muxsel = MUXSEL(3, 1, 1, 3), .no_msp34xx = 1, .no_tda7432 = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .pll = PLL_28, }, [BTTV_BOARD_TEV560] = { .name = "Teppro TEV-560/InterVision IV-560", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 3, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 1, 1, 1 }, .needs_tvaudio = 1, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .pll = PLL_35, }, /* ---- card 0x74 ---------------------------------- */ [BTTV_BOARD_SIMUS_GVC1100] = { .name = "SIMUS GVC1100", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .muxsel = MUXSEL(2, 2, 2, 2), .gpiomask = 0x3F, .muxsel_hook = gvc1100_muxsel, }, [BTTV_BOARD_NGSTV_PLUS] = { /* Carlos Silva r3pek@r3pek.homelinux.org || card 0x75 */ .name = "NGS NGSTV+", .video_inputs = 3, .svhs = 2, .gpiomask = 0x008007, .muxsel = MUXSEL(2, 3, 0, 0), .gpiomux = { 0, 0, 0, 0 }, .gpiomute = 0x000003, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .has_remote = 1, }, [BTTV_BOARD_LMLBT4] = { /* http://linuxmedialabs.com */ .name = "LMLBT4", .video_inputs = 4, /* IN1,IN2,IN3,IN4 */ /* .audio_inputs= 0, */ .svhs = NO_SVHS, .muxsel = MUXSEL(2, 3, 1, 0), .no_msp34xx = 1, .no_tda7432 = 1, .needs_tvaudio = 0, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TEKRAM_M205] = { /* Helmroos Harri <harri.helmroos@pp.inet.fi> */ .name = "Tekram M205 PRO", .video_inputs = 3, /* .audio_inputs= 1, */ .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .svhs = 2, .needs_tvaudio = 0, .gpiomask = 0x68, .muxsel = MUXSEL(2, 3, 1), .gpiomux = { 0x68, 0x68, 0x61, 0x61 }, .pll = PLL_28, }, /* ---- card 0x78 ---------------------------------- */ [BTTV_BOARD_CONTVFMI] = { /* Javier Cendan Ares <jcendan@lycos.es> */ /* bt878 TV + FM without subsystem ID */ .name = "Conceptronic CONTVFMi", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x008007, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 2 }, .gpiomute = 3, .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .has_remote = 1, .has_radio = 1, }, [BTTV_BOARD_PICOLO_TETRA_CHIP] = { /*Eric DEBIEF <debief@telemsa.com>*/ /*EURESYS Picolo Tetra : 4 Conexant Fusion 878A, no audio, video input set with analog multiplexers GPIO controlled*/ /* adds picolo_tetra_muxsel(), picolo_tetra_init(), the following declaration strucure, and #define BTTV_BOARD_PICOLO_TETRA_CHIP*/ /*0x79 in bttv.h*/ .name = "Euresys Picolo Tetra", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0, .gpiomask2 = 0x3C<<16,/*Set the GPIO[18]->GPIO[21] as output pin.==> drive the video inputs through analog multiplexers*/ .no_msp34xx = 1, .no_tda7432 = 1, /*878A input is always MUX0, see above.*/ .muxsel = MUXSEL(2, 2, 2, 2), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .pll = PLL_28, .needs_tvaudio = 0, .muxsel_hook = picolo_tetra_muxsel,/*Required as it doesn't follow the classic input selection policy*/ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_SPIRIT_TV] = { /* Spirit TV Tuner from http://spiritmodems.com.au */ /* Stafford Goodsell <surge@goliath.homeunix.org> */ .name = "Spirit TV Tuner", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x0000000f, .muxsel = MUXSEL(2, 1, 1), .gpiomux = { 0x02, 0x00, 0x00, 0x00 }, .tuner_type = TUNER_TEMIC_PAL, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, }, [BTTV_BOARD_AVDVBT_771] = { /* Wolfram Joost <wojo@frokaschwei.de> */ .name = "AVerMedia AVerTV DVB-T 771", .video_inputs = 2, .svhs = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .muxsel = MUXSEL(3, 3), .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, .has_dvb = 1, .no_gpioirq = 1, .has_remote = 1, }, /* ---- card 0x7c ---------------------------------- */ [BTTV_BOARD_AVDVBT_761] = { /* Matt Jesson <dvb@jesson.eclipse.co.uk> */ /* Based on the Nebula card data - added remote and new card number - BTTV_BOARD_AVDVBT_761, see also ir-kbd-gpio.c */ .name = "AverMedia AverTV DVB-T 761", .video_inputs = 2, .svhs = 1, .muxsel = MUXSEL(3, 1, 2, 0), /* Comp0, S-Video, ?, ? */ .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .has_dvb = 1, .no_gpioirq = 1, .has_remote = 1, }, [BTTV_BOARD_MATRIX_VISIONSQ] = { /* andre.schwarz@matrix-vision.de */ .name = "MATRIX Vision Sigma-SQ", .video_inputs = 16, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x0, .muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3), .muxsel_hook = sigmaSQ_muxsel, .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MATRIX_VISIONSLC] = { /* andre.schwarz@matrix-vision.de */ .name = "MATRIX Vision Sigma-SLC", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x0, .muxsel = MUXSEL(2, 2, 2, 2), .muxsel_hook = sigmaSLC_muxsel, .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* BTTV_BOARD_APAC_VIEWCOMP */ [BTTV_BOARD_APAC_VIEWCOMP] = { /* Attila Kondoros <attila.kondoros@chello.hu> */ /* bt878 TV + FM 0x00000000 subsystem ID */ .name = "APAC Viewcomp 878(AMAX)", .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 0xFF, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 0, 0, 0 }, .gpiomute = 10, .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .has_remote = 1, /* miniremote works, see ir-kbd-gpio.c */ .has_radio = 1, /* not every card has radio */ }, /* ---- card 0x80 ---------------------------------- */ [BTTV_BOARD_DVICO_DVBT_LITE] = { /* Chris Pascoe <c.pascoe@itee.uq.edu.au> */ .name = "DViCO FusionHDTV DVB-T Lite", .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, .no_video = 1, .has_dvb = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VGEAR_MYVCD] = { /* Steven <photon38@pchome.com.tw> */ .name = "V-Gear MyVCD", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x3f, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = {0x31, 0x31, 0x31, 0x31 }, .gpiomute = 0x31, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_NTSC_M, .tuner_addr = ADDR_UNSET, .has_radio = 0, }, [BTTV_BOARD_SUPER_TV] = { /* Rick C <cryptdragoon@gmail.com> */ .name = "Super TV Tuner", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1, 0), .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, .gpiomask = 0x008007, .gpiomux = { 0, 0x000001,0,0 }, .needs_tvaudio = 1, .has_radio = 1, }, [BTTV_BOARD_TIBET_CS16] = { /* Chris Fanning <video4linux@haydon.net> */ .name = "Tibet Systems 'Progress DVR' CS16", .video_inputs = 16, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), .pll = PLL_28, .no_msp34xx = 1, .no_tda7432 = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .muxsel_hook = tibetCS16_muxsel, }, [BTTV_BOARD_KODICOM_4400R] = { /* Bill Brack <wbrack@mmm.com.hk> */ /* * Note that, because of the card's wiring, the "master" * BT878A chip (i.e. the one which controls the analog switch * and must use this card type) is the 2nd one detected. The * other 3 chips should use card type 0x85, whose description * follows this one. There is a EEPROM on the card (which is * connected to the I2C of one of those other chips), but is * not currently handled. There is also a facility for a * "monitor", which is also not currently implemented. */ .name = "Kodicom 4400R (master)", .video_inputs = 16, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, /* GPIO bits 0-9 used for analog switch: * 00 - 03: camera selector * 04 - 06: channel (controller) selector * 07: data (1->on, 0->off) * 08: strobe * 09: reset * bit 16 is input from sync separator for the channel */ .gpiomask = 0x0003ff, .no_gpioirq = 1, .muxsel = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), .pll = PLL_28, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel_hook = kodicom4400r_muxsel, }, [BTTV_BOARD_KODICOM_4400R_SL] = { /* Bill Brack <wbrack@mmm.com.hk> */ /* Note that, for reasons unknown, the "master" BT878A chip (i.e. the * one which controls the analog switch, and must use the card type) * is the 2nd one detected. The other 3 chips should use this card * type */ .name = "Kodicom 4400R (slave)", .video_inputs = 16, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0x010000, .no_gpioirq = 1, .muxsel = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), .pll = PLL_28, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel_hook = kodicom4400r_muxsel, }, /* ---- card 0x86---------------------------------- */ [BTTV_BOARD_ADLINK_RTV24] = { /* Michael Henson <mhenson@clarityvi.com> */ /* Adlink RTV24 with special unlock codes */ .name = "Adlink RTV24", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1, 0), .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .pll = PLL_28, }, /* ---- card 0x87---------------------------------- */ [BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = { /* Michael Krufky <mkrufky@m1k.net> */ .name = "DViCO FusionHDTV 5 Lite", .tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */ .tuner_addr = ADDR_UNSET, .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1), .gpiomask = 0x00e00007, .gpiomux = { 0x00400005, 0, 0x00000001, 0 }, .gpiomute = 0x00c00007, .no_msp34xx = 1, .no_tda7432 = 1, .has_dvb = 1, }, /* ---- card 0x88---------------------------------- */ [BTTV_BOARD_ACORP_Y878F] = { /* Mauro Carvalho Chehab <mchehab@infradead.org> */ .name = "Acorp Y878F", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x01fe00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x001e00, 0, 0x018000, 0x014000 }, .gpiomute = 0x002000, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_YMEC_TVF66T5_B_DFF, .tuner_addr = 0xc1 >>1, .has_radio = 1, }, /* ---- card 0x89 ---------------------------------- */ [BTTV_BOARD_CONCEPTRONIC_CTVFMI2] = { .name = "Conceptronic CTVFMi v2", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x001c0007, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 2 }, .gpiomute = 3, .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_TENA_9533_DI, .tuner_addr = ADDR_UNSET, .has_remote = 1, .has_radio = 1, }, /* ---- card 0x8a ---------------------------------- */ [BTTV_BOARD_PV_BT878P_2E] = { .name = "Prolink Pixelview PV-BT878P+ (Rev.2E)", .video_inputs = 5, /* .audio_inputs= 1, */ .svhs = 3, .has_dig_in = 1, .gpiomask = 0x01fe00, .muxsel = MUXSEL(2, 3, 1, 1, 0), /* in 4 is digital */ /* .digital_mode= DIGITAL_MODE_CAMERA, */ .gpiomux = { 0x00400, 0x10400, 0x04400, 0x80000 }, .gpiomute = 0x12400, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_LG_PAL_FM, .tuner_addr = ADDR_UNSET, .has_remote = 1, }, /* ---- card 0x8b ---------------------------------- */ [BTTV_BOARD_PV_M4900] = { /* Sérgio Fortier <sergiofortier@yahoo.com.br> */ .name = "Prolink PixelView PlayTV MPEG2 PV-M4900", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x3f, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x21, 0x20, 0x24, 0x2c }, .gpiomute = 0x29, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_YMEC_TVF_5533MF, .tuner_addr = ADDR_UNSET, .has_radio = 1, .has_remote = 1, }, /* ---- card 0x8c ---------------------------------- */ /* Has four Bt878 chips behind a PCI bridge, each chip has: one external BNC composite input (mux 2) three internal composite inputs (unknown muxes) an 18-bit stereo A/D (CS5331A), which has: one external stereo unblanced (RCA) audio connection one (or 3?) internal stereo balanced (XLR) audio connection input is selected via gpio to a 14052B mux (mask=0x300, unbal=0x000, bal=0x100, ??=0x200,0x300) gain is controlled via an X9221A chip on the I2C bus @0x28 sample rate is controlled via gpio to an MK1413S (mask=0x3, 32kHz=0x0, 44.1kHz=0x1, 48kHz=0x2, ??=0x3) There is neither a tuner nor an svideo input. */ [BTTV_BOARD_OSPREY440] = { .name = "Osprey 440", .video_inputs = 4, /* .audio_inputs= 2, */ .svhs = NO_SVHS, .muxsel = MUXSEL(2, 3, 0, 1), /* 3,0,1 are guesses */ .gpiomask = 0x303, .gpiomute = 0x000, /* int + 32kHz */ .gpiomux = { 0, 0, 0x000, 0x100}, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, /* ---- card 0x8d ---------------------------------- */ [BTTV_BOARD_ASOUND_SKYEYE] = { .name = "Asound Skyeye PCTV", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 0, 0, 0 }, .gpiomute = 1, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x8e ---------------------------------- */ [BTTV_BOARD_SABRENT_TVFM] = { .name = "Sabrent TV-FM (bttv version)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x108007, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 100000, 100002, 100002, 100000 }, .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, .tuner_type = TUNER_TNF_5335MF, .tuner_addr = ADDR_UNSET, .has_radio = 1, }, /* ---- card 0x8f ---------------------------------- */ [BTTV_BOARD_HAUPPAUGE_IMPACTVCB] = { .name = "Hauppauge ImpactVCB (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x0f, /* old: 7 */ .muxsel = MUXSEL(0, 1, 3, 2), /* Composite 0-3 */ .no_msp34xx = 1, .no_tda7432 = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MACHTV_MAGICTV] = { /* Julian Calaby <julian.calaby@gmail.com> * Slightly different from original MachTV definition (0x60) * FIXME: RegSpy says gpiomask should be "0x001c800f", but it * stuffs up remote chip. Bug is a pin on the jaecs is not set * properly (methinks) causing no keyup bits being set */ .name = "MagicTV", /* rebranded MachTV */ .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .tuner_type = TUNER_TEMIC_4009FR5_PAL, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, .has_remote = 1, }, [BTTV_BOARD_SSAI_SECURITY] = { .name = "SSAI Security Video Interface", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .muxsel = MUXSEL(0, 1, 2, 3), .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_SSAI_ULTRASOUND] = { .name = "SSAI Ultrasound Video Interface", .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .muxsel = MUXSEL(2, 0, 1, 3), .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x94---------------------------------- */ [BTTV_BOARD_DVICO_FUSIONHDTV_2] = { .name = "DViCO FusionHDTV 2", .tuner_type = TUNER_PHILIPS_FCV1236D, .tuner_addr = ADDR_UNSET, .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1), .gpiomask = 0x00e00007, .gpiomux = { 0x00400005, 0, 0x00000001, 0 }, .gpiomute = 0x00c00007, .no_msp34xx = 1, .no_tda7432 = 1, }, /* ---- card 0x95---------------------------------- */ [BTTV_BOARD_TYPHOON_TVTUNERPCI] = { .name = "Typhoon TV-Tuner PCI (50684)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x3014f, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x20001,0x10001, 0, 0 }, .gpiomute = 10, .needs_tvaudio = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_GEOVISION_GV600] = { /* emhn@usb.ve */ .name = "Geovision GV-600", .video_inputs = 16, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x0, .muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), .muxsel_hook = geovision_muxsel, .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_KOZUMI_KTV_01C] = { /* Mauro Lacy <mauro@lacy.com.ar> * Based on MagicTV and Conceptronic CONTVFMi */ .name = "Kozumi KTV-01C", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x008007, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 2 }, /* CONTVFMi */ .gpiomute = 3, /* CONTVFMi */ .needs_tvaudio = 0, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* TCL MK3 */ .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, .has_remote = 1, }, [BTTV_BOARD_ENLTV_FM_2] = { /* Encore TV Tuner Pro ENL TV-FM-2 Mauro Carvalho Chehab <mchehab@infradead.org */ .name = "Encore ENL TV-FM-2", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, /* bit 6 -> IR disabled bit 18/17 = 00 -> mute 01 -> enable external audio input 10 -> internal audio input (mono?) 11 -> internal audio input */ .gpiomask = 0x060040, .muxsel = MUXSEL(2, 3, 3), .gpiomux = { 0x60000, 0x60000, 0x20000, 0x20000 }, .gpiomute = 0, .tuner_type = TUNER_TCL_MF02GIP_5N, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, .has_remote = 1, }, [BTTV_BOARD_VD012] = { /* D.Heer@Phytec.de */ .name = "PHYTEC VD-012 (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x00, .muxsel = MUXSEL(0, 2, 3, 1), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VD012_X1] = { /* D.Heer@Phytec.de */ .name = "PHYTEC VD-012-X1 (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 3, .gpiomask = 0x00, .muxsel = MUXSEL(2, 3, 1), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VD012_X2] = { /* D.Heer@Phytec.de */ .name = "PHYTEC VD-012-X2 (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 3, .gpiomask = 0x00, .muxsel = MUXSEL(3, 2, 1), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .needs_tvaudio = 0, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_GEOVISION_GV800S] = { /* Bruno Christo <bchristo@inf.ufsm.br> * * GeoVision GV-800(S) has 4 Conexant Fusion 878A: * 1 audio input per BT878A = 4 audio inputs * 4 video inputs per BT878A = 16 video inputs * This is the first BT878A chip of the GV-800(S). It's the * "master" chip and it controls the video inputs through an * analog multiplexer (a CD22M3494) via some GPIO pins. The * slaves should use card type 0x9e (following this one). * There is a EEPROM on the card which is currently not handled. * The audio input is not working yet. */ .name = "Geovision GV-800(S) (master)", .video_inputs = 4, /* .audio_inputs= 1, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0xf107f, .no_gpioirq = 1, .muxsel = MUXSEL(2, 2, 2, 2), .pll = PLL_28, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel_hook = gv800s_muxsel, }, [BTTV_BOARD_GEOVISION_GV800S_SL] = { /* Bruno Christo <bchristo@inf.ufsm.br> * * GeoVision GV-800(S) has 4 Conexant Fusion 878A: * 1 audio input per BT878A = 4 audio inputs * 4 video inputs per BT878A = 16 video inputs * The 3 other BT878A chips are "slave" chips of the GV-800(S) * and should use this card type. * The audio input is not working yet. */ .name = "Geovision GV-800(S) (slave)", .video_inputs = 4, /* .audio_inputs= 1, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0x00, .no_gpioirq = 1, .muxsel = MUXSEL(2, 2, 2, 2), .pll = PLL_28, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel_hook = gv800s_muxsel, }, [BTTV_BOARD_PV183] = { .name = "ProVideo PV183", /* 0x9f */ .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0, .muxsel = MUXSEL(2, 3), .gpiomux = { 0 }, .needs_tvaudio = 0, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, }; static const unsigned int bttv_num_tvcards = ARRAY_SIZE(bttv_tvcards); /* ----------------------------------------------------------------------- */ static unsigned char eeprom_data[256]; /* * identify card */ void __devinit bttv_idcard(struct bttv *btv) { unsigned int gpiobits; int i,type; unsigned short tmp; /* read PCI subsystem ID */ pci_read_config_word(btv->c.pci, PCI_SUBSYSTEM_ID, &tmp); btv->cardid = tmp << 16; pci_read_config_word(btv->c.pci, PCI_SUBSYSTEM_VENDOR_ID, &tmp); btv->cardid |= tmp; if (0 != btv->cardid && 0xffffffff != btv->cardid) { /* look for the card */ for (type = -1, i = 0; cards[i].id != 0; i++) if (cards[i].id == btv->cardid) type = i; if (type != -1) { /* found it */ printk(KERN_INFO "bttv%d: detected: %s [card=%d], " "PCI subsystem ID is %04x:%04x\n", btv->c.nr,cards[type].name,cards[type].cardnr, btv->cardid & 0xffff, (btv->cardid >> 16) & 0xffff); btv->c.type = cards[type].cardnr; } else { /* 404 */ printk(KERN_INFO "bttv%d: subsystem: %04x:%04x (UNKNOWN)\n", btv->c.nr, btv->cardid & 0xffff, (btv->cardid >> 16) & 0xffff); printk(KERN_DEBUG "please mail id, board name and " "the correct card= insmod option to linux-media@vger.kernel.org\n"); } } /* let the user override the autodetected type */ if (card[btv->c.nr] < bttv_num_tvcards) btv->c.type=card[btv->c.nr]; /* print which card config we are using */ printk(KERN_INFO "bttv%d: using: %s [card=%d,%s]\n",btv->c.nr, bttv_tvcards[btv->c.type].name, btv->c.type, card[btv->c.nr] < bttv_num_tvcards ? "insmod option" : "autodetected"); /* overwrite gpio stuff ?? */ if (UNSET == audioall && UNSET == audiomux[0]) return; if (UNSET != audiomux[0]) { gpiobits = 0; for (i = 0; i < ARRAY_SIZE(bttv_tvcards->gpiomux); i++) { bttv_tvcards[btv->c.type].gpiomux[i] = audiomux[i]; gpiobits |= audiomux[i]; } } else { gpiobits = audioall; for (i = 0; i < ARRAY_SIZE(bttv_tvcards->gpiomux); i++) { bttv_tvcards[btv->c.type].gpiomux[i] = audioall; } } bttv_tvcards[btv->c.type].gpiomask = (UNSET != gpiomask) ? gpiomask : gpiobits; printk(KERN_INFO "bttv%d: gpio config override: mask=0x%x, mux=", btv->c.nr,bttv_tvcards[btv->c.type].gpiomask); for (i = 0; i < ARRAY_SIZE(bttv_tvcards->gpiomux); i++) { printk("%s0x%x", i ? "," : "", bttv_tvcards[btv->c.type].gpiomux[i]); } printk("\n"); } /* * (most) board specific initialisations goes here */ /* Some Modular Technology cards have an eeprom, but no subsystem ID */ static void identify_by_eeprom(struct bttv *btv, unsigned char eeprom_data[256]) { int type = -1; if (0 == strncmp(eeprom_data,"GET MM20xPCTV",13)) type = BTTV_BOARD_MODTEC_205; else if (0 == strncmp(eeprom_data+20,"Picolo",7)) type = BTTV_BOARD_EURESYS_PICOLO; else if (eeprom_data[0] == 0x84 && eeprom_data[2]== 0) type = BTTV_BOARD_HAUPPAUGE; /* old bt848 */ if (-1 != type) { btv->c.type = type; printk("bttv%d: detected by eeprom: %s [card=%d]\n", btv->c.nr, bttv_tvcards[btv->c.type].name, btv->c.type); } } static void flyvideo_gpio(struct bttv *btv) { int gpio, has_remote, has_radio, is_capture_only; int is_lr90, has_tda9820_tda9821; int tuner_type = UNSET, ttype; gpio_inout(0xffffff, 0); udelay(8); /* without this we would see the 0x1800 mask */ gpio = gpio_read(); /* FIXME: must restore OUR_EN ??? */ /* all cards provide GPIO info, some have an additional eeprom * LR50: GPIO coding can be found lower right CP1 .. CP9 * CP9=GPIO23 .. CP1=GPIO15; when OPEN, the corresponding GPIO reads 1. * GPIO14-12: n.c. * LR90: GP9=GPIO23 .. GP1=GPIO15 (right above the bt878) * lowest 3 bytes are remote control codes (no handshake needed) * xxxFFF: No remote control chip soldered * xxxF00(LR26/LR50), xxxFE0(LR90): Remote control chip (LVA001 or CF45) soldered * Note: Some bits are Audio_Mask ! */ ttype = (gpio & 0x0f0000) >> 16; switch (ttype) { case 0x0: tuner_type = 2; /* NTSC, e.g. TPI8NSR11P */ break; case 0x2: tuner_type = 39; /* LG NTSC (newer TAPC series) TAPC-H701P */ break; case 0x4: tuner_type = 5; /* Philips PAL TPI8PSB02P, TPI8PSB12P, TPI8PSB12D or FI1216, FM1216 */ break; case 0x6: tuner_type = 37; /* LG PAL (newer TAPC series) TAPC-G702P */ break; case 0xC: tuner_type = 3; /* Philips SECAM(+PAL) FQ1216ME or FI1216MF */ break; default: printk(KERN_INFO "bttv%d: FlyVideo_gpio: unknown tuner type.\n", btv->c.nr); break; } has_remote = gpio & 0x800000; has_radio = gpio & 0x400000; /* unknown 0x200000; * unknown2 0x100000; */ is_capture_only = !(gpio & 0x008000); /* GPIO15 */ has_tda9820_tda9821 = !(gpio & 0x004000); is_lr90 = !(gpio & 0x002000); /* else LR26/LR50 (LR38/LR51 f. capture only) */ /* * gpio & 0x001000 output bit for audio routing */ if (is_capture_only) tuner_type = TUNER_ABSENT; /* No tuner present */ printk(KERN_INFO "bttv%d: FlyVideo Radio=%s RemoteControl=%s Tuner=%d gpio=0x%06x\n", btv->c.nr, has_radio ? "yes" : "no ", has_remote ? "yes" : "no ", tuner_type, gpio); printk(KERN_INFO "bttv%d: FlyVideo LR90=%s tda9821/tda9820=%s capture_only=%s\n", btv->c.nr, is_lr90 ? "yes" : "no ", has_tda9820_tda9821 ? "yes" : "no ", is_capture_only ? "yes" : "no "); if (tuner_type != UNSET) /* only set if known tuner autodetected, else let insmod option through */ btv->tuner_type = tuner_type; btv->has_radio = has_radio; /* LR90 Audio Routing is done by 2 hef4052, so Audio_Mask has 4 bits: 0x001c80 * LR26/LR50 only has 1 hef4052, Audio_Mask 0x000c00 * Audio options: from tuner, from tda9821/tda9821(mono,stereo,sap), from tda9874, ext., mute */ if (has_tda9820_tda9821) btv->audio_mode_gpio = lt9415_audio; /* todo: if(has_tda9874) btv->audio_mode_gpio = fv2000s_audio; */ } static int miro_tunermap[] = { 0,6,2,3, 4,5,6,0, 3,0,4,5, 5,2,16,1, 14,2,17,1, 4,1,4,3, 1,2,16,1, 4,4,4,4 }; static int miro_fmtuner[] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,1, 1,1,1,1, 1,1,1,0, 0,0,0,0, 0,1,0,0 }; static void miro_pinnacle_gpio(struct bttv *btv) { int id,msp,gpio; char *info; gpio_inout(0xffffff, 0); gpio = gpio_read(); id = ((gpio>>10) & 63) -1; msp = bttv_I2CRead(btv, I2C_ADDR_MSP3400, "MSP34xx"); if (id < 32) { btv->tuner_type = miro_tunermap[id]; if (0 == (gpio & 0x20)) { btv->has_radio = 1; if (!miro_fmtuner[id]) { btv->has_matchbox = 1; btv->mbox_we = (1<<6); btv->mbox_most = (1<<7); btv->mbox_clk = (1<<8); btv->mbox_data = (1<<9); btv->mbox_mask = (1<<6)|(1<<7)|(1<<8)|(1<<9); } } else { btv->has_radio = 0; } if (-1 != msp) { if (btv->c.type == BTTV_BOARD_MIRO) btv->c.type = BTTV_BOARD_MIROPRO; if (btv->c.type == BTTV_BOARD_PINNACLE) btv->c.type = BTTV_BOARD_PINNACLEPRO; } printk(KERN_INFO "bttv%d: miro: id=%d tuner=%d radio=%s stereo=%s\n", btv->c.nr, id+1, btv->tuner_type, !btv->has_radio ? "no" : (btv->has_matchbox ? "matchbox" : "fmtuner"), (-1 == msp) ? "no" : "yes"); } else { /* new cards with microtune tuner */ id = 63 - id; btv->has_radio = 0; switch (id) { case 1: info = "PAL / mono"; btv->tda9887_conf = TDA9887_INTERCARRIER; break; case 2: info = "PAL+SECAM / stereo"; btv->has_radio = 1; btv->tda9887_conf = TDA9887_QSS; break; case 3: info = "NTSC / stereo"; btv->has_radio = 1; btv->tda9887_conf = TDA9887_QSS; break; case 4: info = "PAL+SECAM / mono"; btv->tda9887_conf = TDA9887_QSS; break; case 5: info = "NTSC / mono"; btv->tda9887_conf = TDA9887_INTERCARRIER; break; case 6: info = "NTSC / stereo"; btv->tda9887_conf = TDA9887_INTERCARRIER; break; case 7: info = "PAL / stereo"; btv->tda9887_conf = TDA9887_INTERCARRIER; break; default: info = "oops: unknown card"; break; } if (-1 != msp) btv->c.type = BTTV_BOARD_PINNACLEPRO; printk(KERN_INFO "bttv%d: pinnacle/mt: id=%d info=\"%s\" radio=%s\n", btv->c.nr, id, info, btv->has_radio ? "yes" : "no"); btv->tuner_type = TUNER_MT2032; } } /* GPIO21 L: Buffer aktiv, H: Buffer inaktiv */ #define LM1882_SYNC_DRIVE 0x200000L static void init_ids_eagle(struct bttv *btv) { gpio_inout(0xffffff,0xFFFF37); gpio_write(0x200020); /* flash strobe inverter ?! */ gpio_write(0x200024); /* switch sync drive off */ gpio_bits(LM1882_SYNC_DRIVE,LM1882_SYNC_DRIVE); /* set BT848 muxel to 2 */ btaor((2)<<5, ~(2<<5), BT848_IFORM); } /* Muxsel helper for the IDS Eagle. * the eagles does not use the standard muxsel-bits but * has its own multiplexer */ static void eagle_muxsel(struct bttv *btv, unsigned int input) { gpio_bits(3, input & 3); /* composite */ /* set chroma ADC to sleep */ btor(BT848_ADC_C_SLEEP, BT848_ADC); /* set to composite video */ btand(~BT848_CONTROL_COMP, BT848_E_CONTROL); btand(~BT848_CONTROL_COMP, BT848_O_CONTROL); /* switch sync drive off */ gpio_bits(LM1882_SYNC_DRIVE,LM1882_SYNC_DRIVE); } static void gvc1100_muxsel(struct bttv *btv, unsigned int input) { static const int masks[] = {0x30, 0x01, 0x12, 0x23}; gpio_write(masks[input%4]); } /* LMLBT4x initialization - to allow access to GPIO bits for sensors input and alarms output GPIObit | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | assignment | TI | O3|INx| O2| O1|IN4|IN3|IN2|IN1| | | IN - sensor inputs, INx - sensor inputs and TI XORed together O1,O2,O3 - alarm outputs (relays) OUT ENABLE 1 1 0 . 1 1 0 0 . 0 0 0 0 = 0x6C0 */ static void init_lmlbt4x(struct bttv *btv) { printk(KERN_DEBUG "LMLBT4x init\n"); btwrite(0x000000, BT848_GPIO_REG_INP); gpio_inout(0xffffff, 0x0006C0); gpio_write(0x000000); } static void sigmaSQ_muxsel(struct bttv *btv, unsigned int input) { unsigned int inmux = input % 8; gpio_inout( 0xf, 0xf ); gpio_bits( 0xf, inmux ); } static void sigmaSLC_muxsel(struct bttv *btv, unsigned int input) { unsigned int inmux = input % 4; gpio_inout( 3<<9, 3<<9 ); gpio_bits( 3<<9, inmux<<9 ); } static void geovision_muxsel(struct bttv *btv, unsigned int input) { unsigned int inmux = input % 16; gpio_inout(0xf, 0xf); gpio_bits(0xf, inmux); } /* ----------------------------------------------------------------------- */ static void bttv_reset_audio(struct bttv *btv) { /* * BT878A has a audio-reset register. * 1. This register is an audio reset function but it is in * function-0 (video capture) address space. * 2. It is enough to do this once per power-up of the card. * 3. There is a typo in the Conexant doc -- it is not at * 0x5B, but at 0x058. (B is an odd-number, obviously a typo!). * --//Shrikumar 030609 */ if (btv->id != 878) return; if (bttv_debug) printk("bttv%d: BT878A ARESET\n",btv->c.nr); btwrite((1<<7), 0x058); udelay(10); btwrite( 0, 0x058); } /* initialization part one -- before registering i2c bus */ void __devinit bttv_init_card1(struct bttv *btv) { switch (btv->c.type) { case BTTV_BOARD_HAUPPAUGE: case BTTV_BOARD_HAUPPAUGE878: boot_msp34xx(btv,5); break; case BTTV_BOARD_VOODOOTV_200: case BTTV_BOARD_VOODOOTV_FM: boot_msp34xx(btv,20); break; case BTTV_BOARD_AVERMEDIA98: boot_msp34xx(btv,11); break; case BTTV_BOARD_HAUPPAUGEPVR: pvr_boot(btv); break; case BTTV_BOARD_TWINHAN_DST: case BTTV_BOARD_AVDVBT_771: case BTTV_BOARD_PINNACLESAT: btv->use_i2c_hw = 1; break; case BTTV_BOARD_ADLINK_RTV24: init_RTV24( btv ); break; } if (!bttv_tvcards[btv->c.type].has_dvb) bttv_reset_audio(btv); } /* initialization part two -- after registering i2c bus */ void __devinit bttv_init_card2(struct bttv *btv) { btv->tuner_type = UNSET; if (BTTV_BOARD_UNKNOWN == btv->c.type) { bttv_readee(btv,eeprom_data,0xa0); identify_by_eeprom(btv,eeprom_data); } switch (btv->c.type) { case BTTV_BOARD_MIRO: case BTTV_BOARD_MIROPRO: case BTTV_BOARD_PINNACLE: case BTTV_BOARD_PINNACLEPRO: /* miro/pinnacle */ miro_pinnacle_gpio(btv); break; case BTTV_BOARD_FLYVIDEO_98: case BTTV_BOARD_MAXI: case BTTV_BOARD_LIFE_FLYKIT: case BTTV_BOARD_FLYVIDEO: case BTTV_BOARD_TYPHOON_TVIEW: case BTTV_BOARD_CHRONOS_VS2: case BTTV_BOARD_FLYVIDEO_98FM: case BTTV_BOARD_FLYVIDEO2000: case BTTV_BOARD_FLYVIDEO98EZ: case BTTV_BOARD_CONFERENCETV: case BTTV_BOARD_LIFETEC_9415: flyvideo_gpio(btv); break; case BTTV_BOARD_HAUPPAUGE: case BTTV_BOARD_HAUPPAUGE878: case BTTV_BOARD_HAUPPAUGEPVR: /* pick up some config infos from the eeprom */ bttv_readee(btv,eeprom_data,0xa0); hauppauge_eeprom(btv); break; case BTTV_BOARD_AVERMEDIA98: case BTTV_BOARD_AVPHONE98: bttv_readee(btv,eeprom_data,0xa0); avermedia_eeprom(btv); break; case BTTV_BOARD_PXC200: init_PXC200(btv); break; case BTTV_BOARD_PICOLO_TETRA_CHIP: picolo_tetra_init(btv); break; case BTTV_BOARD_VHX: btv->has_radio = 1; btv->has_matchbox = 1; btv->mbox_we = 0x20; btv->mbox_most = 0; btv->mbox_clk = 0x08; btv->mbox_data = 0x10; btv->mbox_mask = 0x38; break; case BTTV_BOARD_VOBIS_BOOSTAR: case BTTV_BOARD_TERRATV: terratec_active_radio_upgrade(btv); break; case BTTV_BOARD_MAGICTVIEW061: if (btv->cardid == 0x3002144f) { btv->has_radio=1; printk("bttv%d: radio detected by subsystem id (CPH05x)\n",btv->c.nr); } break; case BTTV_BOARD_STB2: if (btv->cardid == 0x3060121a) { /* Fix up entry for 3DFX VoodooTV 100, which is an OEM STB card variant. */ btv->has_radio=0; btv->tuner_type=TUNER_TEMIC_NTSC; } break; case BTTV_BOARD_OSPREY1x0: case BTTV_BOARD_OSPREY1x0_848: case BTTV_BOARD_OSPREY101_848: case BTTV_BOARD_OSPREY1x1: case BTTV_BOARD_OSPREY1x1_SVID: case BTTV_BOARD_OSPREY2xx: case BTTV_BOARD_OSPREY2x0_SVID: case BTTV_BOARD_OSPREY2x0: case BTTV_BOARD_OSPREY440: case BTTV_BOARD_OSPREY500: case BTTV_BOARD_OSPREY540: case BTTV_BOARD_OSPREY2000: bttv_readee(btv,eeprom_data,0xa0); osprey_eeprom(btv, eeprom_data); break; case BTTV_BOARD_IDS_EAGLE: init_ids_eagle(btv); break; case BTTV_BOARD_MODTEC_205: bttv_readee(btv,eeprom_data,0xa0); modtec_eeprom(btv); break; case BTTV_BOARD_LMLBT4: init_lmlbt4x(btv); break; case BTTV_BOARD_TIBET_CS16: tibetCS16_init(btv); break; case BTTV_BOARD_KODICOM_4400R: kodicom4400r_init(btv); break; case BTTV_BOARD_GEOVISION_GV800S: gv800s_init(btv); break; } /* pll configuration */ if (!(btv->id==848 && btv->revision==0x11)) { /* defaults from card list */ if (PLL_28 == bttv_tvcards[btv->c.type].pll) { btv->pll.pll_ifreq=28636363; btv->pll.pll_crystal=BT848_IFORM_XT0; } if (PLL_35 == bttv_tvcards[btv->c.type].pll) { btv->pll.pll_ifreq=35468950; btv->pll.pll_crystal=BT848_IFORM_XT1; } /* insmod options can override */ switch (pll[btv->c.nr]) { case 0: /* none */ btv->pll.pll_crystal = 0; btv->pll.pll_ifreq = 0; btv->pll.pll_ofreq = 0; break; case 1: /* 28 MHz */ case 28: btv->pll.pll_ifreq = 28636363; btv->pll.pll_ofreq = 0; btv->pll.pll_crystal = BT848_IFORM_XT0; break; case 2: /* 35 MHz */ case 35: btv->pll.pll_ifreq = 35468950; btv->pll.pll_ofreq = 0; btv->pll.pll_crystal = BT848_IFORM_XT1; break; } } btv->pll.pll_current = -1; /* tuner configuration (from card list / autodetect / insmod option) */ if (UNSET != bttv_tvcards[btv->c.type].tuner_type) if (UNSET == btv->tuner_type) btv->tuner_type = bttv_tvcards[btv->c.type].tuner_type; if (UNSET != tuner[btv->c.nr]) btv->tuner_type = tuner[btv->c.nr]; if (btv->tuner_type == TUNER_ABSENT) printk(KERN_INFO "bttv%d: tuner absent\n", btv->c.nr); else if(btv->tuner_type == UNSET) printk(KERN_WARNING "bttv%d: tuner type unset\n", btv->c.nr); else printk(KERN_INFO "bttv%d: tuner type=%d\n", btv->c.nr, btv->tuner_type); if (autoload != UNSET) { printk(KERN_WARNING "bttv%d: the autoload option is obsolete.\n", btv->c.nr); printk(KERN_WARNING "bttv%d: use option msp3400, tda7432 or tvaudio to\n", btv->c.nr); printk(KERN_WARNING "bttv%d: override which audio module should be used.\n", btv->c.nr); } if (UNSET == btv->tuner_type) btv->tuner_type = TUNER_ABSENT; btv->dig = bttv_tvcards[btv->c.type].has_dig_in ? bttv_tvcards[btv->c.type].video_inputs - 1 : UNSET; btv->svhs = bttv_tvcards[btv->c.type].svhs == NO_SVHS ? UNSET : bttv_tvcards[btv->c.type].svhs; if (svhs[btv->c.nr] != UNSET) btv->svhs = svhs[btv->c.nr]; if (remote[btv->c.nr] != UNSET) btv->has_remote = remote[btv->c.nr]; if (bttv_tvcards[btv->c.type].has_radio) btv->has_radio = 1; if (bttv_tvcards[btv->c.type].has_remote) btv->has_remote = 1; if (!bttv_tvcards[btv->c.type].no_gpioirq) btv->gpioirq = 1; if (bttv_tvcards[btv->c.type].volume_gpio) btv->volume_gpio = bttv_tvcards[btv->c.type].volume_gpio; if (bttv_tvcards[btv->c.type].audio_mode_gpio) btv->audio_mode_gpio = bttv_tvcards[btv->c.type].audio_mode_gpio; if (btv->tuner_type == TUNER_ABSENT) return; /* no tuner or related drivers to load */ if (btv->has_saa6588 || saa6588[btv->c.nr]) { /* Probe for RDS receiver chip */ static const unsigned short addrs[] = { 0x20 >> 1, 0x22 >> 1, I2C_CLIENT_END }; struct v4l2_subdev *sd; sd = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "saa6588", 0, addrs); btv->has_saa6588 = (sd != NULL); } /* try to detect audio/fader chips */ /* First check if the user specified the audio chip via a module option. */ switch (audiodev[btv->c.nr]) { case -1: return; /* do not load any audio module */ case 0: /* autodetect */ break; case 1: { /* The user specified that we should probe for msp3400 */ static const unsigned short addrs[] = { I2C_ADDR_MSP3400 >> 1, I2C_ADDR_MSP3400_ALT >> 1, I2C_CLIENT_END }; btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "msp3400", 0, addrs); if (btv->sd_msp34xx) return; goto no_audio; } case 2: { /* The user specified that we should probe for tda7432 */ static const unsigned short addrs[] = { I2C_ADDR_TDA7432 >> 1, I2C_CLIENT_END }; if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tda7432", 0, addrs)) return; goto no_audio; } case 3: { /* The user specified that we should probe for tvaudio */ btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); if (btv->sd_tvaudio) return; goto no_audio; } default: printk(KERN_WARNING "bttv%d: unknown audiodev value!\n", btv->c.nr); return; } /* There were no overrides, so now we try to discover this through the card definition */ /* probe for msp3400 first: this driver can detect whether or not it really is a msp3400, so it will return NULL when the device found is really something else (e.g. a tea6300). */ if (!bttv_tvcards[btv->c.type].no_msp34xx) { btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "msp3400", 0, I2C_ADDRS(I2C_ADDR_MSP3400 >> 1)); } else if (bttv_tvcards[btv->c.type].msp34xx_alt) { btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "msp3400", 0, I2C_ADDRS(I2C_ADDR_MSP3400_ALT >> 1)); } /* If we found a msp34xx, then we're done. */ if (btv->sd_msp34xx) return; /* it might also be a tda7432. */ if (!bttv_tvcards[btv->c.type].no_tda7432) { static const unsigned short addrs[] = { I2C_ADDR_TDA7432 >> 1, I2C_CLIENT_END }; if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tda7432", 0, addrs)) return; } /* Now see if we can find one of the tvaudio devices. */ btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); if (btv->sd_tvaudio) return; no_audio: printk(KERN_WARNING "bttv%d: audio absent, no audio device found!\n", btv->c.nr); } /* initialize the tuner */ void __devinit bttv_init_tuner(struct bttv *btv) { int addr = ADDR_UNSET; if (ADDR_UNSET != bttv_tvcards[btv->c.type].tuner_addr) addr = bttv_tvcards[btv->c.type].tuner_addr; if (btv->tuner_type != TUNER_ABSENT) { struct tuner_setup tun_setup; /* Load tuner module before issuing tuner config call! */ if (bttv_tvcards[btv->c.type].has_radio) v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO)); v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); tun_setup.mode_mask = T_ANALOG_TV; tun_setup.type = btv->tuner_type; tun_setup.addr = addr; if (bttv_tvcards[btv->c.type].has_radio) tun_setup.mode_mask |= T_RADIO; bttv_call_all(btv, tuner, s_type_addr, &tun_setup); } if (btv->tda9887_conf) { struct v4l2_priv_tun_config tda9887_cfg; tda9887_cfg.tuner = TUNER_TDA9887; tda9887_cfg.priv = &btv->tda9887_conf; bttv_call_all(btv, tuner, s_config, &tda9887_cfg); } } /* ----------------------------------------------------------------------- */ static void modtec_eeprom(struct bttv *btv) { if( strncmp(&(eeprom_data[0x1e]),"Temic 4066 FY5",14) ==0) { btv->tuner_type=TUNER_TEMIC_4066FY5_PAL_I; printk("bttv%d: Modtec: Tuner autodetected by eeprom: %s\n", btv->c.nr,&eeprom_data[0x1e]); } else if (strncmp(&(eeprom_data[0x1e]),"Alps TSBB5",10) ==0) { btv->tuner_type=TUNER_ALPS_TSBB5_PAL_I; printk("bttv%d: Modtec: Tuner autodetected by eeprom: %s\n", btv->c.nr,&eeprom_data[0x1e]); } else if (strncmp(&(eeprom_data[0x1e]),"Philips FM1246",14) ==0) { btv->tuner_type=TUNER_PHILIPS_NTSC; printk("bttv%d: Modtec: Tuner autodetected by eeprom: %s\n", btv->c.nr,&eeprom_data[0x1e]); } else { printk("bttv%d: Modtec: Unknown TunerString: %s\n", btv->c.nr,&eeprom_data[0x1e]); } } static void __devinit hauppauge_eeprom(struct bttv *btv) { struct tveeprom tv; tveeprom_hauppauge_analog(&btv->i2c_client, &tv, eeprom_data); btv->tuner_type = tv.tuner_type; btv->has_radio = tv.has_radio; printk("bttv%d: Hauppauge eeprom indicates model#%d\n", btv->c.nr, tv.model); /* * Some of the 878 boards have duplicate PCI IDs. Switch the board * type based on model #. */ if(tv.model == 64900) { printk("bttv%d: Switching board type from %s to %s\n", btv->c.nr, bttv_tvcards[btv->c.type].name, bttv_tvcards[BTTV_BOARD_HAUPPAUGE_IMPACTVCB].name); btv->c.type = BTTV_BOARD_HAUPPAUGE_IMPACTVCB; } } static int terratec_active_radio_upgrade(struct bttv *btv) { int freq; btv->has_radio = 1; btv->has_matchbox = 1; btv->mbox_we = 0x10; btv->mbox_most = 0x20; btv->mbox_clk = 0x08; btv->mbox_data = 0x04; btv->mbox_mask = 0x3c; btv->mbox_iow = 1 << 8; btv->mbox_ior = 1 << 9; btv->mbox_csel = 1 << 10; freq=88000/62.5; tea5757_write(btv, 5 * freq + 0x358); /* write 0x1ed8 */ if (0x1ed8 == tea5757_read(btv)) { printk("bttv%d: Terratec Active Radio Upgrade found.\n", btv->c.nr); btv->has_radio = 1; btv->has_saa6588 = 1; btv->has_matchbox = 1; } else { btv->has_radio = 0; btv->has_matchbox = 0; } return 0; } /* ----------------------------------------------------------------------- */ /* * minimal bootstrap for the WinTV/PVR -- upload altera firmware. * * The hcwamc.rbf firmware file is on the Hauppauge driver CD. Have * a look at Pvr/pvr45xxx.EXE (self-extracting zip archive, can be * unpacked with unzip). */ #define PVR_GPIO_DELAY 10 #define BTTV_ALT_DATA 0x000001 #define BTTV_ALT_DCLK 0x100000 #define BTTV_ALT_NCONFIG 0x800000 static int __devinit pvr_altera_load(struct bttv *btv, const u8 *micro, u32 microlen) { u32 n; u8 bits; int i; gpio_inout(0xffffff,BTTV_ALT_DATA|BTTV_ALT_DCLK|BTTV_ALT_NCONFIG); gpio_write(0); udelay(PVR_GPIO_DELAY); gpio_write(BTTV_ALT_NCONFIG); udelay(PVR_GPIO_DELAY); for (n = 0; n < microlen; n++) { bits = micro[n]; for (i = 0 ; i < 8 ; i++) { gpio_bits(BTTV_ALT_DCLK,0); if (bits & 0x01) gpio_bits(BTTV_ALT_DATA,BTTV_ALT_DATA); else gpio_bits(BTTV_ALT_DATA,0); gpio_bits(BTTV_ALT_DCLK,BTTV_ALT_DCLK); bits >>= 1; } } gpio_bits(BTTV_ALT_DCLK,0); udelay(PVR_GPIO_DELAY); /* begin Altera init loop (Not necessary,but doesn't hurt) */ for (i = 0 ; i < 30 ; i++) { gpio_bits(BTTV_ALT_DCLK,0); gpio_bits(BTTV_ALT_DCLK,BTTV_ALT_DCLK); } gpio_bits(BTTV_ALT_DCLK,0); return 0; } static int __devinit pvr_boot(struct bttv *btv) { const struct firmware *fw_entry; int rc; rc = request_firmware(&fw_entry, "hcwamc.rbf", &btv->c.pci->dev); if (rc != 0) { printk(KERN_WARNING "bttv%d: no altera firmware [via hotplug]\n", btv->c.nr); return rc; } rc = pvr_altera_load(btv, fw_entry->data, fw_entry->size); printk(KERN_INFO "bttv%d: altera firmware upload %s\n", btv->c.nr, (rc < 0) ? "failed" : "ok"); release_firmware(fw_entry); return rc; } /* ----------------------------------------------------------------------- */ /* some osprey specific stuff */ static void __devinit osprey_eeprom(struct bttv *btv, const u8 ee[256]) { int i; u32 serial = 0; int cardid = -1; /* This code will nevery actually get called in this case.... */ if (btv->c.type == BTTV_BOARD_UNKNOWN) { /* this might be an antique... check for MMAC label in eeprom */ if (!strncmp(ee, "MMAC", 4)) { u8 checksum = 0; for (i = 0; i < 21; i++) checksum += ee[i]; if (checksum != ee[21]) return; cardid = BTTV_BOARD_OSPREY1x0_848; for (i = 12; i < 21; i++) serial *= 10, serial += ee[i] - '0'; } } else { unsigned short type; for (i = 4*16; i < 8*16; i += 16) { u16 checksum = ip_compute_csum(ee + i, 16); if ((checksum&0xff) + (checksum>>8) == 0xff) break; } if (i >= 8*16) return; ee += i; /* found a valid descriptor */ type = get_unaligned_be16((__be16 *)(ee+4)); switch(type) { /* 848 based */ case 0x0004: cardid = BTTV_BOARD_OSPREY1x0_848; break; case 0x0005: cardid = BTTV_BOARD_OSPREY101_848; break; /* 878 based */ case 0x0012: case 0x0013: cardid = BTTV_BOARD_OSPREY1x0; break; case 0x0014: case 0x0015: cardid = BTTV_BOARD_OSPREY1x1; break; case 0x0016: case 0x0017: case 0x0020: cardid = BTTV_BOARD_OSPREY1x1_SVID; break; case 0x0018: case 0x0019: case 0x001E: case 0x001F: cardid = BTTV_BOARD_OSPREY2xx; break; case 0x001A: case 0x001B: cardid = BTTV_BOARD_OSPREY2x0_SVID; break; case 0x0040: cardid = BTTV_BOARD_OSPREY500; break; case 0x0050: case 0x0056: cardid = BTTV_BOARD_OSPREY540; /* bttv_osprey_540_init(btv); */ break; case 0x0060: case 0x0070: case 0x00A0: cardid = BTTV_BOARD_OSPREY2x0; /* enable output on select control lines */ gpio_inout(0xffffff,0x000303); break; case 0x00D8: cardid = BTTV_BOARD_OSPREY440; break; default: /* unknown...leave generic, but get serial # */ printk(KERN_INFO "bttv%d: " "osprey eeprom: unknown card type 0x%04x\n", btv->c.nr, type); break; } serial = get_unaligned_be32((__be32 *)(ee+6)); } printk(KERN_INFO "bttv%d: osprey eeprom: card=%d '%s' serial=%u\n", btv->c.nr, cardid, cardid>0 ? bttv_tvcards[cardid].name : "Unknown", serial); if (cardid<0 || btv->c.type == cardid) return; /* card type isn't set correctly */ if (card[btv->c.nr] < bttv_num_tvcards) { printk(KERN_WARNING "bttv%d: osprey eeprom: " "Not overriding user specified card type\n", btv->c.nr); } else { printk(KERN_INFO "bttv%d: osprey eeprom: " "Changing card type from %d to %d\n", btv->c.nr, btv->c.type, cardid); btv->c.type = cardid; } } /* ----------------------------------------------------------------------- */ /* AVermedia specific stuff, from bktr_card.c */ static int tuner_0_table[] = { TUNER_PHILIPS_NTSC, TUNER_PHILIPS_PAL /* PAL-BG*/, TUNER_PHILIPS_PAL, TUNER_PHILIPS_PAL /* PAL-I*/, TUNER_PHILIPS_PAL, TUNER_PHILIPS_PAL, TUNER_PHILIPS_SECAM, TUNER_PHILIPS_SECAM, TUNER_PHILIPS_SECAM, TUNER_PHILIPS_PAL, TUNER_PHILIPS_FM1216ME_MK3 }; static int tuner_1_table[] = { TUNER_TEMIC_NTSC, TUNER_TEMIC_PAL, TUNER_TEMIC_PAL, TUNER_TEMIC_PAL, TUNER_TEMIC_PAL, TUNER_TEMIC_PAL, TUNER_TEMIC_4012FY5, TUNER_TEMIC_4012FY5, /* TUNER_TEMIC_SECAM */ TUNER_TEMIC_4012FY5, TUNER_TEMIC_PAL}; static void __devinit avermedia_eeprom(struct bttv *btv) { int tuner_make, tuner_tv_fm, tuner_format, tuner_type = 0; tuner_make = (eeprom_data[0x41] & 0x7); tuner_tv_fm = (eeprom_data[0x41] & 0x18) >> 3; tuner_format = (eeprom_data[0x42] & 0xf0) >> 4; btv->has_remote = (eeprom_data[0x42] & 0x01); if (tuner_make == 0 || tuner_make == 2) if (tuner_format <= 0x0a) tuner_type = tuner_0_table[tuner_format]; if (tuner_make == 1) if (tuner_format <= 9) tuner_type = tuner_1_table[tuner_format]; if (tuner_make == 4) if (tuner_format == 0x09) tuner_type = TUNER_LG_NTSC_NEW_TAPC; /* TAPC-G702P */ printk(KERN_INFO "bttv%d: Avermedia eeprom[0x%02x%02x]: tuner=", btv->c.nr, eeprom_data[0x41], eeprom_data[0x42]); if (tuner_type) { btv->tuner_type = tuner_type; printk(KERN_CONT "%d", tuner_type); } else printk(KERN_CONT "Unknown type"); printk(KERN_CONT " radio:%s remote control:%s\n", tuner_tv_fm ? "yes" : "no", btv->has_remote ? "yes" : "no"); } /* * For Voodoo TV/FM and Voodoo 200. These cards' tuners use a TDA9880 * analog demod, which is not I2C controlled like the newer and more common * TDA9887 series. Instead is has two tri-state input pins, S0 and S1, * that control the IF for the video and audio. Apparently, bttv GPIO * 0x10000 is connected to S0. S0 low selects a 38.9 MHz VIF for B/G/D/K/I * (i.e., PAL) while high selects 45.75 MHz for M/N (i.e., NTSC). */ u32 bttv_tda9880_setnorm(struct bttv *btv, u32 gpiobits) { if (btv->audio == TVAUDIO_INPUT_TUNER) { if (bttv_tvnorms[btv->tvnorm].v4l2_id & V4L2_STD_MN) gpiobits |= 0x10000; else gpiobits &= ~0x10000; } gpio_bits(bttv_tvcards[btv->c.type].gpiomask, gpiobits); return gpiobits; } /* * reset/enable the MSP on some Hauppauge cards * Thanks to Kyösti Mälkki (kmalkki@cc.hut.fi)! * * Hauppauge: pin 5 * Voodoo: pin 20 */ static void __devinit boot_msp34xx(struct bttv *btv, int pin) { int mask = (1 << pin); gpio_inout(mask,mask); gpio_bits(mask,0); mdelay(2); udelay(500); gpio_bits(mask,mask); if (bttv_gpio) bttv_gpio_tracking(btv,"msp34xx"); if (bttv_verbose) printk(KERN_INFO "bttv%d: Hauppauge/Voodoo msp34xx: reset line " "init [%d]\n", btv->c.nr, pin); } /* ----------------------------------------------------------------------- */ /* Imagenation L-Model PXC200 Framegrabber */ /* This is basically the same procedure as * used by Alessandro Rubini in his pxc200 * driver, but using BTTV functions */ static void __devinit init_PXC200(struct bttv *btv) { static int vals[] __devinitdata = { 0x08, 0x09, 0x0a, 0x0b, 0x0d, 0x0d, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x00 }; unsigned int i; int tmp; u32 val; /* Initialise GPIO-connevted stuff */ gpio_inout(0xffffff, (1<<13)); gpio_write(0); udelay(3); gpio_write(1<<13); /* GPIO inputs are pulled up, so no need to drive * reset pin any longer */ gpio_bits(0xffffff, 0); if (bttv_gpio) bttv_gpio_tracking(btv,"pxc200"); /* we could/should try and reset/control the AD pots? but right now we simply turned off the crushing. Without this the AGC drifts drifts remember the EN is reverse logic --> setting BT848_ADC_AGC_EN disable the AGC tboult@eecs.lehigh.edu */ btwrite(BT848_ADC_RESERVED|BT848_ADC_AGC_EN, BT848_ADC); /* Initialise MAX517 DAC */ printk(KERN_INFO "Setting DAC reference voltage level ...\n"); bttv_I2CWrite(btv,0x5E,0,0x80,1); /* Initialise 12C508 PIC */ /* The I2CWrite and I2CRead commmands are actually to the * same chips - but the R/W bit is included in the address * argument so the numbers are different */ printk(KERN_INFO "Initialising 12C508 PIC chip ...\n"); /* First of all, enable the clock line. This is used in the PXC200-F */ val = btread(BT848_GPIO_DMA_CTL); val |= BT848_GPIO_DMA_CTL_GPCLKMODE; btwrite(val, BT848_GPIO_DMA_CTL); /* Then, push to 0 the reset pin long enough to reset the * * device same as above for the reset line, but not the same * value sent to the GPIO-connected stuff * which one is the good one? */ gpio_inout(0xffffff,(1<<2)); gpio_write(0); udelay(10); gpio_write(1<<2); for (i = 0; i < ARRAY_SIZE(vals); i++) { tmp=bttv_I2CWrite(btv,0x1E,0,vals[i],1); if (tmp != -1) { printk(KERN_INFO "I2C Write(%2.2x) = %i\nI2C Read () = %2.2x\n\n", vals[i],tmp,bttv_I2CRead(btv,0x1F,NULL)); } } printk(KERN_INFO "PXC200 Initialised.\n"); } /* ----------------------------------------------------------------------- */ /* * The Adlink RTV-24 (aka Angelo) has some special initialisation to unlock * it. This apparently involves the following procedure for each 878 chip: * * 1) write 0x00C3FEFF to the GPIO_OUT_EN register * * 2) write to GPIO_DATA * - 0x0E * - sleep 1ms * - 0x10 + 0x0E * - sleep 10ms * - 0x0E * read from GPIO_DATA into buf (uint_32) * - if ( data>>18 & 0x01 != 0) || ( buf>>19 & 0x01 != 1 ) * error. ERROR_CPLD_Check_Failed stop. * * 3) write to GPIO_DATA * - write 0x4400 + 0x0E * - sleep 10ms * - write 0x4410 + 0x0E * - sleep 1ms * - write 0x0E * read from GPIO_DATA into buf (uint_32) * - if ( buf>>18 & 0x01 ) || ( buf>>19 & 0x01 != 0 ) * error. ERROR_CPLD_Check_Failed. */ /* ----------------------------------------------------------------------- */ static void init_RTV24 (struct bttv *btv) { uint32_t dataRead = 0; long watchdog_value = 0x0E; printk (KERN_INFO "bttv%d: Adlink RTV-24 initialisation in progress ...\n", btv->c.nr); btwrite (0x00c3feff, BT848_GPIO_OUT_EN); btwrite (0 + watchdog_value, BT848_GPIO_DATA); msleep (1); btwrite (0x10 + watchdog_value, BT848_GPIO_DATA); msleep (10); btwrite (0 + watchdog_value, BT848_GPIO_DATA); dataRead = btread (BT848_GPIO_DATA); if ((((dataRead >> 18) & 0x01) != 0) || (((dataRead >> 19) & 0x01) != 1)) { printk (KERN_INFO "bttv%d: Adlink RTV-24 initialisation(1) ERROR_CPLD_Check_Failed (read %d)\n", btv->c.nr, dataRead); } btwrite (0x4400 + watchdog_value, BT848_GPIO_DATA); msleep (10); btwrite (0x4410 + watchdog_value, BT848_GPIO_DATA); msleep (1); btwrite (watchdog_value, BT848_GPIO_DATA); msleep (1); dataRead = btread (BT848_GPIO_DATA); if ((((dataRead >> 18) & 0x01) != 0) || (((dataRead >> 19) & 0x01) != 0)) { printk (KERN_INFO "bttv%d: Adlink RTV-24 initialisation(2) ERROR_CPLD_Check_Failed (read %d)\n", btv->c.nr, dataRead); return; } printk (KERN_INFO "bttv%d: Adlink RTV-24 initialisation complete.\n", btv->c.nr); } /* ----------------------------------------------------------------------- */ /* Miro Pro radio stuff -- the tea5757 is connected to some GPIO ports */ /* * Copyright (c) 1999 Csaba Halasz <qgehali@uni-miskolc.hu> * This code is placed under the terms of the GNU General Public License * * Brutally hacked by Dan Sheridan <dan.sheridan@contact.org.uk> djs52 8/3/00 */ static void bus_low(struct bttv *btv, int bit) { if (btv->mbox_ior) { gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel, btv->mbox_ior | btv->mbox_iow | btv->mbox_csel); udelay(5); } gpio_bits(bit,0); udelay(5); if (btv->mbox_ior) { gpio_bits(btv->mbox_iow | btv->mbox_csel, 0); udelay(5); } } static void bus_high(struct bttv *btv, int bit) { if (btv->mbox_ior) { gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel, btv->mbox_ior | btv->mbox_iow | btv->mbox_csel); udelay(5); } gpio_bits(bit,bit); udelay(5); if (btv->mbox_ior) { gpio_bits(btv->mbox_iow | btv->mbox_csel, 0); udelay(5); } } static int bus_in(struct bttv *btv, int bit) { if (btv->mbox_ior) { gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel, btv->mbox_ior | btv->mbox_iow | btv->mbox_csel); udelay(5); gpio_bits(btv->mbox_iow | btv->mbox_csel, 0); udelay(5); } return gpio_read() & (bit); } /* TEA5757 register bits */ #define TEA_FREQ 0:14 #define TEA_BUFFER 15:15 #define TEA_SIGNAL_STRENGTH 16:17 #define TEA_PORT1 18:18 #define TEA_PORT0 19:19 #define TEA_BAND 20:21 #define TEA_BAND_FM 0 #define TEA_BAND_MW 1 #define TEA_BAND_LW 2 #define TEA_BAND_SW 3 #define TEA_MONO 22:22 #define TEA_ALLOW_STEREO 0 #define TEA_FORCE_MONO 1 #define TEA_SEARCH_DIRECTION 23:23 #define TEA_SEARCH_DOWN 0 #define TEA_SEARCH_UP 1 #define TEA_STATUS 24:24 #define TEA_STATUS_TUNED 0 #define TEA_STATUS_SEARCHING 1 /* Low-level stuff */ static int tea5757_read(struct bttv *btv) { unsigned long timeout; int value = 0; int i; /* better safe than sorry */ gpio_inout(btv->mbox_mask, btv->mbox_clk | btv->mbox_we); if (btv->mbox_ior) { gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel, btv->mbox_ior | btv->mbox_iow | btv->mbox_csel); udelay(5); } if (bttv_gpio) bttv_gpio_tracking(btv,"tea5757 read"); bus_low(btv,btv->mbox_we); bus_low(btv,btv->mbox_clk); udelay(10); timeout= jiffies + msecs_to_jiffies(1000); /* wait for DATA line to go low; error if it doesn't */ while (bus_in(btv,btv->mbox_data) && time_before(jiffies, timeout)) schedule(); if (bus_in(btv,btv->mbox_data)) { printk(KERN_WARNING "bttv%d: tea5757: read timeout\n",btv->c.nr); return -1; } dprintk("bttv%d: tea5757:",btv->c.nr); for (i = 0; i < 24; i++) { udelay(5); bus_high(btv,btv->mbox_clk); udelay(5); dprintk("%c",(bus_in(btv,btv->mbox_most) == 0)?'T':'-'); bus_low(btv,btv->mbox_clk); value <<= 1; value |= (bus_in(btv,btv->mbox_data) == 0)?0:1; /* MSB first */ dprintk("%c", (bus_in(btv,btv->mbox_most) == 0)?'S':'M'); } dprintk("\nbttv%d: tea5757: read 0x%X\n", btv->c.nr, value); return value; } static int tea5757_write(struct bttv *btv, int value) { int i; int reg = value; gpio_inout(btv->mbox_mask, btv->mbox_clk | btv->mbox_we | btv->mbox_data); if (btv->mbox_ior) { gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel, btv->mbox_ior | btv->mbox_iow | btv->mbox_csel); udelay(5); } if (bttv_gpio) bttv_gpio_tracking(btv,"tea5757 write"); dprintk("bttv%d: tea5757: write 0x%X\n", btv->c.nr, value); bus_low(btv,btv->mbox_clk); bus_high(btv,btv->mbox_we); for (i = 0; i < 25; i++) { if (reg & 0x1000000) bus_high(btv,btv->mbox_data); else bus_low(btv,btv->mbox_data); reg <<= 1; bus_high(btv,btv->mbox_clk); udelay(10); bus_low(btv,btv->mbox_clk); udelay(10); } bus_low(btv,btv->mbox_we); /* unmute !!! */ return 0; } void tea5757_set_freq(struct bttv *btv, unsigned short freq) { dprintk("tea5757_set_freq %d\n",freq); tea5757_write(btv, 5 * freq + 0x358); /* add 10.7MHz (see docs) */ } /* RemoteVision MX (rv605) muxsel helper [Miguel Freitas] * * This is needed because rv605 don't use a normal multiplex, but a crosspoint * switch instead (CD22M3494E). This IC can have multiple active connections * between Xn (input) and Yn (output) pins. We need to clear any existing * connection prior to establish a new one, pulsing the STROBE pin. * * The board hardwire Y0 (xpoint) to MUX1 and MUXOUT to Yin. * GPIO pins are wired as: * GPIO[0:3] - AX[0:3] (xpoint) - P1[0:3] (microcontroller) * GPIO[4:6] - AY[0:2] (xpoint) - P1[4:6] (microcontroller) * GPIO[7] - DATA (xpoint) - P1[7] (microcontroller) * GPIO[8] - - P3[5] (microcontroller) * GPIO[9] - RESET (xpoint) - P3[6] (microcontroller) * GPIO[10] - STROBE (xpoint) - P3[7] (microcontroller) * GPINTR - - P3[4] (microcontroller) * * The microcontroller is a 80C32 like. It should be possible to change xpoint * configuration either directly (as we are doing) or using the microcontroller * which is also wired to I2C interface. I have no further info on the * microcontroller features, one would need to disassembly the firmware. * note: the vendor refused to give any information on this product, all * that stuff was found using a multimeter! :) */ static void rv605_muxsel(struct bttv *btv, unsigned int input) { static const u8 muxgpio[] = { 0x3, 0x1, 0x2, 0x4, 0xf, 0x7, 0xe, 0x0, 0xd, 0xb, 0xc, 0x6, 0x9, 0x5, 0x8, 0xa }; gpio_bits(0x07f, muxgpio[input]); /* reset all conections */ gpio_bits(0x200,0x200); mdelay(1); gpio_bits(0x200,0x000); mdelay(1); /* create a new connection */ gpio_bits(0x480,0x480); mdelay(1); gpio_bits(0x480,0x080); mdelay(1); } /* Tibet Systems 'Progress DVR' CS16 muxsel helper [Chris Fanning] * * The CS16 (available on eBay cheap) is a PCI board with four Fusion * 878A chips, a PCI bridge, an Atmel microcontroller, four sync separator * chips, ten eight input analog multiplexors, a not chip and a few * other components. * * 16 inputs on a secondary bracket are provided and can be selected * from each of the four capture chips. Two of the eight input * multiplexors are used to select from any of the 16 input signals. * * Unsupported hardware capabilities: * . A video output monitor on the secondary bracket can be selected from * one of the 878A chips. * . Another passthrough but I haven't spent any time investigating it. * . Digital I/O (logic level connected to GPIO) is available from an * onboard header. * * The on chip input mux should always be set to 2. * GPIO[16:19] - Video input selection * GPIO[0:3] - Video output monitor select (only available from one 878A) * GPIO[?:?] - Digital I/O. * * There is an ATMEL microcontroller with an 8031 core on board. I have not * determined what function (if any) it provides. With the microcontroller * and sync separator chips a guess is that it might have to do with video * switching and maybe some digital I/O. */ static void tibetCS16_muxsel(struct bttv *btv, unsigned int input) { /* video mux */ gpio_bits(0x0f0000, input << 16); } static void tibetCS16_init(struct bttv *btv) { /* enable gpio bits, mask obtained via btSpy */ gpio_inout(0xffffff, 0x0f7fff); gpio_write(0x0f7fff); } /* * The following routines for the Kodicom-4400r get a little mind-twisting. * There is a "master" controller and three "slave" controllers, together * an analog switch which connects any of 16 cameras to any of the BT87A's. * The analog switch is controlled by the "master", but the detection order * of the four BT878A chips is in an order which I just don't understand. * The "master" is actually the second controller to be detected. The * logic on the board uses logical numbers for the 4 controllers, but * those numbers are different from the detection sequence. When working * with the analog switch, we need to "map" from the detection sequence * over to the board's logical controller number. This mapping sequence * is {3, 0, 2, 1}, i.e. the first controller to be detected is logical * unit 3, the second (which is the master) is logical unit 0, etc. * We need to maintain the status of the analog switch (which of the 16 * cameras is connected to which of the 4 controllers). Rather than * add to the bttv structure for this, we use the data reserved for * the mbox (unused for this card type). */ /* * First a routine to set the analog switch, which controls which camera * is routed to which controller. The switch comprises an X-address * (gpio bits 0-3, representing the camera, ranging from 0-15), and a * Y-address (gpio bits 4-6, representing the controller, ranging from 0-3). * A data value (gpio bit 7) of '1' enables the switch, and '0' disables * the switch. A STROBE bit (gpio bit 8) latches the data value into the * specified address. The idea is to set the address and data, then bring * STROBE high, and finally bring STROBE back to low. */ static void kodicom4400r_write(struct bttv *btv, unsigned char xaddr, unsigned char yaddr, unsigned char data) { unsigned int udata; udata = (data << 7) | ((yaddr&3) << 4) | (xaddr&0xf); gpio_bits(0x1ff, udata); /* write ADDR and DAT */ gpio_bits(0x1ff, udata | (1 << 8)); /* strobe high */ gpio_bits(0x1ff, udata); /* strobe low */ } /* * Next the mux select. Both the "master" and "slave" 'cards' (controllers) * use this routine. The routine finds the "master" for the card, maps * the controller number from the detected position over to the logical * number, writes the appropriate data to the analog switch, and housekeeps * the local copy of the switch information. The parameter 'input' is the * requested camera number (0 - 15). */ static void kodicom4400r_muxsel(struct bttv *btv, unsigned int input) { char *sw_status; int xaddr, yaddr; struct bttv *mctlr; static unsigned char map[4] = {3, 0, 2, 1}; mctlr = master[btv->c.nr]; if (mctlr == NULL) { /* ignore if master not yet detected */ return; } yaddr = (btv->c.nr - mctlr->c.nr + 1) & 3; /* the '&' is for safety */ yaddr = map[yaddr]; sw_status = (char *)(&mctlr->mbox_we); xaddr = input & 0xf; /* Check if the controller/camera pair has changed, else ignore */ if (sw_status[yaddr] != xaddr) { /* "open" the old switch, "close" the new one, save the new */ kodicom4400r_write(mctlr, sw_status[yaddr], yaddr, 0); sw_status[yaddr] = xaddr; kodicom4400r_write(mctlr, xaddr, yaddr, 1); } } /* * During initialisation, we need to reset the analog switch. We * also preset the switch to map the 4 connectors on the card to the * *user's* (see above description of kodicom4400r_muxsel) channels * 0 through 3 */ static void kodicom4400r_init(struct bttv *btv) { char *sw_status = (char *)(&btv->mbox_we); int ix; gpio_inout(0x0003ff, 0x0003ff); gpio_write(1 << 9); /* reset MUX */ gpio_write(0); /* Preset camera 0 to the 4 controllers */ for (ix = 0; ix < 4; ix++) { sw_status[ix] = ix; kodicom4400r_write(btv, ix, ix, 1); } /* * Since this is the "master", we need to set up the * other three controller chips' pointers to this structure * for later use in the muxsel routine. */ if ((btv->c.nr<1) || (btv->c.nr>BTTV_MAX-3)) return; master[btv->c.nr-1] = btv; master[btv->c.nr] = btv; master[btv->c.nr+1] = btv; master[btv->c.nr+2] = btv; } /* The Grandtec X-Guard framegrabber card uses two Dual 4-channel * video multiplexers to provide up to 16 video inputs. These * multiplexers are controlled by the lower 8 GPIO pins of the * bt878. The multiplexers probably Pericom PI5V331Q or similar. * xxx0 is pin xxx of multiplexer U5, * yyy1 is pin yyy of multiplexer U2 */ #define ENA0 0x01 #define ENB0 0x02 #define ENA1 0x04 #define ENB1 0x08 #define IN10 0x10 #define IN00 0x20 #define IN11 0x40 #define IN01 0x80 static void xguard_muxsel(struct bttv *btv, unsigned int input) { static const int masks[] = { ENB0, ENB0|IN00, ENB0|IN10, ENB0|IN00|IN10, ENA0, ENA0|IN00, ENA0|IN10, ENA0|IN00|IN10, ENB1, ENB1|IN01, ENB1|IN11, ENB1|IN01|IN11, ENA1, ENA1|IN01, ENA1|IN11, ENA1|IN01|IN11, }; gpio_write(masks[input%16]); } static void picolo_tetra_init(struct bttv *btv) { /*This is the video input redirection fonctionality : I DID NOT USED IT. */ btwrite (0x08<<16,BT848_GPIO_DATA);/*GPIO[19] [==> 4053 B+C] set to 1 */ btwrite (0x04<<16,BT848_GPIO_DATA);/*GPIO[18] [==> 4053 A] set to 1*/ } static void picolo_tetra_muxsel (struct bttv* btv, unsigned int input) { dprintk (KERN_DEBUG "bttv%d : picolo_tetra_muxsel => input = %d\n",btv->c.nr,input); /*Just set the right path in the analog multiplexers : channel 1 -> 4 ==> Analog Mux ==> MUX0*/ /*GPIO[20]&GPIO[21] used to choose the right input*/ btwrite (input<<20,BT848_GPIO_DATA); } /* * ivc120_muxsel [Added by Alan Garfield <alan@fromorbit.com>] * * The IVC120G security card has 4 i2c controlled TDA8540 matrix * swichers to provide 16 channels to MUX0. The TDA8540's have * 4 independent outputs and as such the IVC120G also has the * optional "Monitor Out" bus. This allows the card to be looking * at one input while the monitor is looking at another. * * Since I've couldn't be bothered figuring out how to add an * independent muxsel for the monitor bus, I've just set it to * whatever the card is looking at. * * OUT0 of the TDA8540's is connected to MUX0 (0x03) * OUT1 of the TDA8540's is connected to "Monitor Out" (0x0C) * * TDA8540_ALT3 IN0-3 = Channel 13 - 16 (0x03) * TDA8540_ALT4 IN0-3 = Channel 1 - 4 (0x03) * TDA8540_ALT5 IN0-3 = Channel 5 - 8 (0x03) * TDA8540_ALT6 IN0-3 = Channel 9 - 12 (0x03) * */ /* All 7 possible sub-ids for the TDA8540 Matrix Switcher */ #define I2C_TDA8540 0x90 #define I2C_TDA8540_ALT1 0x92 #define I2C_TDA8540_ALT2 0x94 #define I2C_TDA8540_ALT3 0x96 #define I2C_TDA8540_ALT4 0x98 #define I2C_TDA8540_ALT5 0x9a #define I2C_TDA8540_ALT6 0x9c static void ivc120_muxsel(struct bttv *btv, unsigned int input) { /* Simple maths */ int key = input % 4; int matrix = input / 4; dprintk("bttv%d: ivc120_muxsel: Input - %02d | TDA - %02d | In - %02d\n", btv->c.nr, input, matrix, key); /* Handles the input selection on the TDA8540's */ bttv_I2CWrite(btv, I2C_TDA8540_ALT3, 0x00, ((matrix == 3) ? (key | key << 2) : 0x00), 1); bttv_I2CWrite(btv, I2C_TDA8540_ALT4, 0x00, ((matrix == 0) ? (key | key << 2) : 0x00), 1); bttv_I2CWrite(btv, I2C_TDA8540_ALT5, 0x00, ((matrix == 1) ? (key | key << 2) : 0x00), 1); bttv_I2CWrite(btv, I2C_TDA8540_ALT6, 0x00, ((matrix == 2) ? (key | key << 2) : 0x00), 1); /* Handles the output enables on the TDA8540's */ bttv_I2CWrite(btv, I2C_TDA8540_ALT3, 0x02, ((matrix == 3) ? 0x03 : 0x00), 1); /* 13 - 16 */ bttv_I2CWrite(btv, I2C_TDA8540_ALT4, 0x02, ((matrix == 0) ? 0x03 : 0x00), 1); /* 1-4 */ bttv_I2CWrite(btv, I2C_TDA8540_ALT5, 0x02, ((matrix == 1) ? 0x03 : 0x00), 1); /* 5-8 */ bttv_I2CWrite(btv, I2C_TDA8540_ALT6, 0x02, ((matrix == 2) ? 0x03 : 0x00), 1); /* 9-12 */ /* 878's MUX0 is already selected for input via muxsel values */ } /* PXC200 muxsel helper * luke@syseng.anu.edu.au * another transplant * from Alessandro Rubini (rubini@linux.it) * * There are 4 kinds of cards: * PXC200L which is bt848 * PXC200F which is bt848 with PIC controlling mux * PXC200AL which is bt878 * PXC200AF which is bt878 with PIC controlling mux */ #define PX_CFG_PXC200F 0x01 #define PX_FLAG_PXC200A 0x00001000 /* a pxc200A is bt-878 based */ #define PX_I2C_PIC 0x0f #define PX_PXC200A_CARDID 0x200a1295 #define PX_I2C_CMD_CFG 0x00 static void PXC200_muxsel(struct bttv *btv, unsigned int input) { int rc; long mux; int bitmask; unsigned char buf[2]; /* Read PIC config to determine if this is a PXC200F */ /* PX_I2C_CMD_CFG*/ buf[0]=0; buf[1]=0; rc=bttv_I2CWrite(btv,(PX_I2C_PIC<<1),buf[0],buf[1],1); if (rc) { printk(KERN_DEBUG "bttv%d: PXC200_muxsel: pic cfg write failed:%d\n", btv->c.nr,rc); /* not PXC ? do nothing */ return; } rc=bttv_I2CRead(btv,(PX_I2C_PIC<<1),NULL); if (!(rc & PX_CFG_PXC200F)) { printk(KERN_DEBUG "bttv%d: PXC200_muxsel: not PXC200F rc:%d \n", btv->c.nr,rc); return; } /* The multiplexer in the 200F is handled by the GPIO port */ /* get correct mapping between inputs */ /* mux = bttv_tvcards[btv->type].muxsel[input] & 3; */ /* ** not needed!? */ mux = input; /* make sure output pins are enabled */ /* bitmask=0x30f; */ bitmask=0x302; /* check whether we have a PXC200A */ if (btv->cardid == PX_PXC200A_CARDID) { bitmask ^= 0x180; /* use 7 and 9, not 8 and 9 */ bitmask |= 7<<4; /* the DAC */ } btwrite(bitmask, BT848_GPIO_OUT_EN); bitmask = btread(BT848_GPIO_DATA); if (btv->cardid == PX_PXC200A_CARDID) bitmask = (bitmask & ~0x280) | ((mux & 2) << 8) | ((mux & 1) << 7); else /* older device */ bitmask = (bitmask & ~0x300) | ((mux & 3) << 8); btwrite(bitmask,BT848_GPIO_DATA); /* * Was "to be safe, set the bt848 to input 0" * Actually, since it's ok at load time, better not messing * with these bits (on PXC200AF you need to set mux 2 here) * * needed because bttv-driver sets mux before calling this function */ if (btv->cardid == PX_PXC200A_CARDID) btaor(2<<5, ~BT848_IFORM_MUXSEL, BT848_IFORM); else /* older device */ btand(~BT848_IFORM_MUXSEL,BT848_IFORM); printk(KERN_DEBUG "bttv%d: setting input channel to:%d\n", btv->c.nr,(int)mux); } static void phytec_muxsel(struct bttv *btv, unsigned int input) { unsigned int mux = input % 4; if (input == btv->svhs) mux = 0; gpio_bits(0x3, mux); } /* * GeoVision GV-800(S) functions * Bruno Christo <bchristo@inf.ufsm.br> */ /* This is a function to control the analog switch, which determines which * camera is routed to which controller. The switch comprises an X-address * (gpio bits 0-3, representing the camera, ranging from 0-15), and a * Y-address (gpio bits 4-6, representing the controller, ranging from 0-3). * A data value (gpio bit 18) of '1' enables the switch, and '0' disables * the switch. A STROBE bit (gpio bit 17) latches the data value into the * specified address. There is also a chip select (gpio bit 16). * The idea is to set the address and chip select together, bring * STROBE high, write the data, and finally bring STROBE back to low. */ static void gv800s_write(struct bttv *btv, unsigned char xaddr, unsigned char yaddr, unsigned char data) { /* On the "master" 878A: * GPIO bits 0-9 are used for the analog switch: * 00 - 03: camera selector * 04 - 06: 878A (controller) selector * 16: cselect * 17: strobe * 18: data (1->on, 0->off) * 19: reset */ const u32 ADDRESS = ((xaddr&0xf) | (yaddr&3)<<4); const u32 CSELECT = 1<<16; const u32 STROBE = 1<<17; const u32 DATA = data<<18; gpio_bits(0x1007f, ADDRESS | CSELECT); /* write ADDRESS and CSELECT */ gpio_bits(0x20000, STROBE); /* STROBE high */ gpio_bits(0x40000, DATA); /* write DATA */ gpio_bits(0x20000, ~STROBE); /* STROBE low */ } /* * GeoVision GV-800(S) muxsel * * Each of the 4 cards (controllers) use this function. * The controller using this function selects the input through the GPIO pins * of the "master" card. A pointer to this card is stored in master[btv->c.nr]. * * The parameter 'input' is the requested camera number (0-4) on the controller. * The map array has the address of each input. Note that the addresses in the * array are in the sequence the original GeoVision driver uses, that is, set * every controller to input 0, then to input 1, 2, 3, repeat. This means that * the physical "camera 1" connector corresponds to controller 0 input 0, * "camera 2" corresponds to controller 1 input 0, and so on. * * After getting the input address, the function then writes the appropriate * data to the analog switch, and housekeeps the local copy of the switch * information. */ static void gv800s_muxsel(struct bttv *btv, unsigned int input) { struct bttv *mctlr; char *sw_status; int xaddr, yaddr; static unsigned int map[4][4] = { { 0x0, 0x4, 0xa, 0x6 }, { 0x1, 0x5, 0xb, 0x7 }, { 0x2, 0x8, 0xc, 0xe }, { 0x3, 0x9, 0xd, 0xf } }; input = input%4; mctlr = master[btv->c.nr]; if (mctlr == NULL) { /* do nothing until the "master" is detected */ return; } yaddr = (btv->c.nr - mctlr->c.nr) & 3; sw_status = (char *)(&mctlr->mbox_we); xaddr = map[yaddr][input] & 0xf; /* Check if the controller/camera pair has changed, ignore otherwise */ if (sw_status[yaddr] != xaddr) { /* disable the old switch, enable the new one and save status */ gv800s_write(mctlr, sw_status[yaddr], yaddr, 0); sw_status[yaddr] = xaddr; gv800s_write(mctlr, xaddr, yaddr, 1); } } /* GeoVision GV-800(S) "master" chip init */ static void gv800s_init(struct bttv *btv) { char *sw_status = (char *)(&btv->mbox_we); int ix; gpio_inout(0xf107f, 0xf107f); gpio_write(1<<19); /* reset the analog MUX */ gpio_write(0); /* Preset camera 0 to the 4 controllers */ for (ix = 0; ix < 4; ix++) { sw_status[ix] = ix; gv800s_write(btv, ix, ix, 1); } /* Inputs on the "master" controller need this brightness fix */ bttv_I2CWrite(btv, 0x18, 0x5, 0x90, 1); if (btv->c.nr > BTTV_MAX-4) return; /* * Store the "master" controller pointer in the master * array for later use in the muxsel function. */ master[btv->c.nr] = btv; master[btv->c.nr+1] = btv; master[btv->c.nr+2] = btv; master[btv->c.nr+3] = btv; } /* ----------------------------------------------------------------------- */ /* motherboard chipset specific stuff */ void __init bttv_check_chipset(void) { int pcipci_fail = 0; struct pci_dev *dev = NULL; if (pci_pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL)) /* should check if target is AGP */ pcipci_fail = 1; if (pci_pci_problems & (PCIPCI_TRITON|PCIPCI_NATOMA|PCIPCI_VIAETBF)) triton1 = 1; if (pci_pci_problems & PCIPCI_VSFX) vsfx = 1; #ifdef PCIPCI_ALIMAGIK if (pci_pci_problems & PCIPCI_ALIMAGIK) latency = 0x0A; #endif /* print warnings about any quirks found */ if (triton1) printk(KERN_INFO "bttv: Host bridge needs ETBF enabled.\n"); if (vsfx) printk(KERN_INFO "bttv: Host bridge needs VSFX enabled.\n"); if (pcipci_fail) { printk(KERN_INFO "bttv: bttv and your chipset may not work " "together.\n"); if (!no_overlay) { printk(KERN_INFO "bttv: overlay will be disabled.\n"); no_overlay = 1; } else { printk(KERN_INFO "bttv: overlay forced. Use this " "option at your own risk.\n"); } } if (UNSET != latency) printk(KERN_INFO "bttv: pci latency fixup [%d]\n",latency); while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, dev))) { unsigned char b; pci_read_config_byte(dev, 0x53, &b); if (bttv_debug) printk(KERN_INFO "bttv: Host bridge: 82441FX Natoma, " "bufcon=0x%02x\n",b); } } int __devinit bttv_handle_chipset(struct bttv *btv) { unsigned char command; if (!triton1 && !vsfx && UNSET == latency) return 0; if (bttv_verbose) { if (triton1) printk(KERN_INFO "bttv%d: enabling ETBF (430FX/VP3 compatibilty)\n",btv->c.nr); if (vsfx && btv->id >= 878) printk(KERN_INFO "bttv%d: enabling VSFX\n",btv->c.nr); if (UNSET != latency) printk(KERN_INFO "bttv%d: setting pci timer to %d\n", btv->c.nr,latency); } if (btv->id < 878) { /* bt848 (mis)uses a bit in the irq mask for etbf */ if (triton1) btv->triton1 = BT848_INT_ETBF; } else { /* bt878 has a bit in the pci config space for it */ pci_read_config_byte(btv->c.pci, BT878_DEVCTRL, &command); if (triton1) command |= BT878_EN_TBFX; if (vsfx) command |= BT878_EN_VSFX; pci_write_config_byte(btv->c.pci, BT878_DEVCTRL, command); } if (UNSET != latency) pci_write_config_byte(btv->c.pci, PCI_LATENCY_TIMER, latency); return 0; } /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
TheWhisp/android_kernel_samsung_kyle
drivers/ide/ide-floppy.c
2791
14754
/* * IDE ATAPI floppy driver. * * Copyright (C) 1996-1999 Gadi Oxman <gadio@netvision.net.il> * Copyright (C) 2000-2002 Paul Bristow <paul@paulbristow.net> * Copyright (C) 2005 Bartlomiej Zolnierkiewicz * * This driver supports the following IDE floppy drives: * * LS-120/240 SuperDisk * Iomega Zip 100/250 * Iomega PC Card Clik!/PocketZip * * For a historical changelog see * Documentation/ide/ChangeLog.ide-floppy.1996-2002 */ #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/genhd.h> #include <linux/cdrom.h> #include <linux/ide.h> #include <linux/hdreg.h> #include <linux/bitops.h> #include <linux/mutex.h> #include <linux/scatterlist.h> #include <scsi/scsi_ioctl.h> #include <asm/byteorder.h> #include <linux/irq.h> #include <linux/uaccess.h> #include <linux/io.h> #include <asm/unaligned.h> #include "ide-floppy.h" /* * After each failed packet command we issue a request sense command and retry * the packet command IDEFLOPPY_MAX_PC_RETRIES times. */ #define IDEFLOPPY_MAX_PC_RETRIES 3 /* format capacities descriptor codes */ #define CAPACITY_INVALID 0x00 #define CAPACITY_UNFORMATTED 0x01 #define CAPACITY_CURRENT 0x02 #define CAPACITY_NO_CARTRIDGE 0x03 /* * The following delay solves a problem with ATAPI Zip 100 drive where BSY bit * was apparently being deasserted before the unit was ready to receive data. */ #define IDEFLOPPY_PC_DELAY (HZ/20) /* default delay for ZIP 100 (50ms) */ static int ide_floppy_callback(ide_drive_t *drive, int dsc) { struct ide_disk_obj *floppy = drive->driver_data; struct ide_atapi_pc *pc = drive->pc; struct request *rq = pc->rq; int uptodate = pc->error ? 0 : 1; ide_debug_log(IDE_DBG_FUNC, "enter"); if (drive->failed_pc == pc) drive->failed_pc = NULL; if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || rq->cmd_type == REQ_TYPE_BLOCK_PC) uptodate = 1; /* FIXME */ else if (pc->c[0] == GPCMD_REQUEST_SENSE) { u8 *buf = bio_data(rq->bio); if (!pc->error) { floppy->sense_key = buf[2] & 0x0F; floppy->asc = buf[12]; floppy->ascq = buf[13]; floppy->progress_indication = buf[15] & 0x80 ? (u16)get_unaligned((u16 *)&buf[16]) : 0x10000; if (drive->failed_pc) ide_debug_log(IDE_DBG_PC, "pc = %x", drive->failed_pc->c[0]); ide_debug_log(IDE_DBG_SENSE, "sense key = %x, asc = %x," "ascq = %x", floppy->sense_key, floppy->asc, floppy->ascq); } else printk(KERN_ERR PFX "Error in REQUEST SENSE itself - " "Aborting request!\n"); } if (rq->cmd_type == REQ_TYPE_SPECIAL) rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; return uptodate; } static void ide_floppy_report_error(struct ide_disk_obj *floppy, struct ide_atapi_pc *pc) { /* suppress error messages resulting from Medium not present */ if (floppy->sense_key == 0x02 && floppy->asc == 0x3a && floppy->ascq == 0x00) return; printk(KERN_ERR PFX "%s: I/O error, pc = %2x, key = %2x, " "asc = %2x, ascq = %2x\n", floppy->drive->name, pc->c[0], floppy->sense_key, floppy->asc, floppy->ascq); } static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd, struct ide_atapi_pc *pc) { struct ide_disk_obj *floppy = drive->driver_data; if (drive->failed_pc == NULL && pc->c[0] != GPCMD_REQUEST_SENSE) drive->failed_pc = pc; /* Set the current packet command */ drive->pc = pc; if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) { unsigned int done = blk_rq_bytes(drive->hwif->rq); if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR)) ide_floppy_report_error(floppy, pc); /* Giving up */ pc->error = IDE_DRV_ERROR_GENERAL; drive->failed_pc = NULL; drive->pc_callback(drive, 0); ide_complete_rq(drive, -EIO, done); return ide_stopped; } ide_debug_log(IDE_DBG_FUNC, "retry #%d", pc->retries); pc->retries++; return ide_issue_pc(drive, cmd); } void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *pc) { ide_init_pc(pc); pc->c[0] = GPCMD_READ_FORMAT_CAPACITIES; pc->c[7] = 255; pc->c[8] = 255; pc->req_xfer = 255; } /* A mode sense command is used to "sense" floppy parameters. */ void ide_floppy_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code) { u16 length = 8; /* sizeof(Mode Parameter Header) = 8 Bytes */ ide_init_pc(pc); pc->c[0] = GPCMD_MODE_SENSE_10; pc->c[1] = 0; pc->c[2] = page_code; switch (page_code) { case IDEFLOPPY_CAPABILITIES_PAGE: length += 12; break; case IDEFLOPPY_FLEXIBLE_DISK_PAGE: length += 32; break; default: printk(KERN_ERR PFX "unsupported page code in %s\n", __func__); } put_unaligned(cpu_to_be16(length), (u16 *) &pc->c[7]); pc->req_xfer = length; } static void idefloppy_create_rw_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc, struct request *rq, unsigned long sector) { struct ide_disk_obj *floppy = drive->driver_data; int block = sector / floppy->bs_factor; int blocks = blk_rq_sectors(rq) / floppy->bs_factor; int cmd = rq_data_dir(rq); ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks); ide_init_pc(pc); pc->c[0] = cmd == READ ? GPCMD_READ_10 : GPCMD_WRITE_10; put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]); put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]); memcpy(rq->cmd, pc->c, 12); pc->rq = rq; if (rq->cmd_flags & REQ_WRITE) pc->flags |= PC_FLAG_WRITING; pc->flags |= PC_FLAG_DMA_OK; } static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy, struct ide_atapi_pc *pc, struct request *rq) { ide_init_pc(pc); memcpy(pc->c, rq->cmd, sizeof(pc->c)); pc->rq = rq; if (blk_rq_bytes(rq)) { pc->flags |= PC_FLAG_DMA_OK; if (rq_data_dir(rq) == WRITE) pc->flags |= PC_FLAG_WRITING; } } static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, struct request *rq, sector_t block) { struct ide_disk_obj *floppy = drive->driver_data; struct ide_cmd cmd; struct ide_atapi_pc *pc; ide_debug_log(IDE_DBG_FUNC, "enter, cmd: 0x%x\n", rq->cmd[0]); if (drive->debug_mask & IDE_DBG_RQ) blk_dump_rq_flags(rq, (rq->rq_disk ? rq->rq_disk->disk_name : "dev?")); if (rq->errors >= ERROR_MAX) { if (drive->failed_pc) { ide_floppy_report_error(floppy, drive->failed_pc); drive->failed_pc = NULL; } else printk(KERN_ERR PFX "%s: I/O error\n", drive->name); if (rq->cmd_type == REQ_TYPE_SPECIAL) { rq->errors = 0; ide_complete_rq(drive, 0, blk_rq_bytes(rq)); return ide_stopped; } else goto out_end; } switch (rq->cmd_type) { case REQ_TYPE_FS: if (((long)blk_rq_pos(rq) % floppy->bs_factor) || (blk_rq_sectors(rq) % floppy->bs_factor)) { printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", drive->name); goto out_end; } pc = &floppy->queued_pc; idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); break; case REQ_TYPE_SPECIAL: case REQ_TYPE_SENSE: pc = (struct ide_atapi_pc *)rq->special; break; case REQ_TYPE_BLOCK_PC: pc = &floppy->queued_pc; idefloppy_blockpc_cmd(floppy, pc, rq); break; default: BUG(); } ide_prep_sense(drive, rq); memset(&cmd, 0, sizeof(cmd)); if (rq_data_dir(rq)) cmd.tf_flags |= IDE_TFLAG_WRITE; cmd.rq = rq; if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); ide_map_sg(drive, &cmd); } pc->rq = rq; return ide_floppy_issue_pc(drive, &cmd, pc); out_end: drive->failed_pc = NULL; if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) rq->errors = -EIO; ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); return ide_stopped; } /* * Look at the flexible disk page parameters. We ignore the CHS capacity * parameters and use the LBA parameters instead. */ static int ide_floppy_get_flexible_disk_page(ide_drive_t *drive, struct ide_atapi_pc *pc) { struct ide_disk_obj *floppy = drive->driver_data; struct gendisk *disk = floppy->disk; u8 *page, buf[40]; int capacity, lba_capacity; u16 transfer_rate, sector_size, cyls, rpm; u8 heads, sectors; ide_floppy_create_mode_sense_cmd(pc, IDEFLOPPY_FLEXIBLE_DISK_PAGE); if (ide_queue_pc_tail(drive, disk, pc, buf, pc->req_xfer)) { printk(KERN_ERR PFX "Can't get flexible disk page params\n"); return 1; } if (buf[3] & 0x80) drive->dev_flags |= IDE_DFLAG_WP; else drive->dev_flags &= ~IDE_DFLAG_WP; set_disk_ro(disk, !!(drive->dev_flags & IDE_DFLAG_WP)); page = &buf[8]; transfer_rate = be16_to_cpup((__be16 *)&buf[8 + 2]); sector_size = be16_to_cpup((__be16 *)&buf[8 + 6]); cyls = be16_to_cpup((__be16 *)&buf[8 + 8]); rpm = be16_to_cpup((__be16 *)&buf[8 + 28]); heads = buf[8 + 4]; sectors = buf[8 + 5]; capacity = cyls * heads * sectors * sector_size; if (memcmp(page, &floppy->flexible_disk_page, 32)) printk(KERN_INFO PFX "%s: %dkB, %d/%d/%d CHS, %d kBps, " "%d sector size, %d rpm\n", drive->name, capacity / 1024, cyls, heads, sectors, transfer_rate / 8, sector_size, rpm); memcpy(&floppy->flexible_disk_page, page, 32); drive->bios_cyl = cyls; drive->bios_head = heads; drive->bios_sect = sectors; lba_capacity = floppy->blocks * floppy->block_size; if (capacity < lba_capacity) { printk(KERN_NOTICE PFX "%s: The disk reports a capacity of %d " "bytes, but the drive only handles %d\n", drive->name, lba_capacity, capacity); floppy->blocks = floppy->block_size ? capacity / floppy->block_size : 0; drive->capacity64 = floppy->blocks * floppy->bs_factor; } return 0; } /* * Determine if a media is present in the floppy drive, and if so, its LBA * capacity. */ static int ide_floppy_get_capacity(ide_drive_t *drive) { struct ide_disk_obj *floppy = drive->driver_data; struct gendisk *disk = floppy->disk; struct ide_atapi_pc pc; u8 *cap_desc; u8 pc_buf[256], header_len, desc_cnt; int i, rc = 1, blocks, length; ide_debug_log(IDE_DBG_FUNC, "enter"); drive->bios_cyl = 0; drive->bios_head = drive->bios_sect = 0; floppy->blocks = 0; floppy->bs_factor = 1; drive->capacity64 = 0; ide_floppy_create_read_capacity_cmd(&pc); if (ide_queue_pc_tail(drive, disk, &pc, pc_buf, pc.req_xfer)) { printk(KERN_ERR PFX "Can't get floppy parameters\n"); return 1; } header_len = pc_buf[3]; cap_desc = &pc_buf[4]; desc_cnt = header_len / 8; /* capacity descriptor of 8 bytes */ for (i = 0; i < desc_cnt; i++) { unsigned int desc_start = 4 + i*8; blocks = be32_to_cpup((__be32 *)&pc_buf[desc_start]); length = be16_to_cpup((__be16 *)&pc_buf[desc_start + 6]); ide_debug_log(IDE_DBG_PROBE, "Descriptor %d: %dkB, %d blocks, " "%d sector size", i, blocks * length / 1024, blocks, length); if (i) continue; /* * the code below is valid only for the 1st descriptor, ie i=0 */ switch (pc_buf[desc_start + 4] & 0x03) { /* Clik! drive returns this instead of CAPACITY_CURRENT */ case CAPACITY_UNFORMATTED: if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) /* * If it is not a clik drive, break out * (maintains previous driver behaviour) */ break; case CAPACITY_CURRENT: /* Normal Zip/LS-120 disks */ if (memcmp(cap_desc, &floppy->cap_desc, 8)) printk(KERN_INFO PFX "%s: %dkB, %d blocks, %d " "sector size\n", drive->name, blocks * length / 1024, blocks, length); memcpy(&floppy->cap_desc, cap_desc, 8); if (!length || length % 512) { printk(KERN_NOTICE PFX "%s: %d bytes block size" " not supported\n", drive->name, length); } else { floppy->blocks = blocks; floppy->block_size = length; floppy->bs_factor = length / 512; if (floppy->bs_factor != 1) printk(KERN_NOTICE PFX "%s: Warning: " "non 512 bytes block size not " "fully supported\n", drive->name); drive->capacity64 = floppy->blocks * floppy->bs_factor; rc = 0; } break; case CAPACITY_NO_CARTRIDGE: /* * This is a KERN_ERR so it appears on screen * for the user to see */ printk(KERN_ERR PFX "%s: No disk in drive\n", drive->name); break; case CAPACITY_INVALID: printk(KERN_ERR PFX "%s: Invalid capacity for disk " "in drive\n", drive->name); break; } ide_debug_log(IDE_DBG_PROBE, "Descriptor 0 Code: %d", pc_buf[desc_start + 4] & 0x03); } /* Clik! disk does not support get_flexible_disk_page */ if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) (void) ide_floppy_get_flexible_disk_page(drive, &pc); return rc; } static void ide_floppy_setup(ide_drive_t *drive) { struct ide_disk_obj *floppy = drive->driver_data; u16 *id = drive->id; drive->pc_callback = ide_floppy_callback; /* * We used to check revisions here. At this point however I'm giving up. * Just assume they are all broken, its easier. * * The actual reason for the workarounds was likely a driver bug after * all rather than a firmware bug, and the workaround below used to hide * it. It should be fixed as of version 1.9, but to be on the safe side * we'll leave the limitation below for the 2.2.x tree. */ if (!strncmp((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI", 20)) { drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE; /* This value will be visible in the /proc/ide/hdx/settings */ drive->pc_delay = IDEFLOPPY_PC_DELAY; blk_queue_max_hw_sectors(drive->queue, 64); } /* * Guess what? The IOMEGA Clik! drive also needs the above fix. It makes * nasty clicking noises without it, so please don't remove this. */ if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) { blk_queue_max_hw_sectors(drive->queue, 64); drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE; /* IOMEGA Clik! drives do not support lock/unlock commands */ drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING; } (void) ide_floppy_get_capacity(drive); ide_proc_register_driver(drive, floppy->driver); drive->dev_flags |= IDE_DFLAG_ATTACH; } static void ide_floppy_flush(ide_drive_t *drive) { } static int ide_floppy_init_media(ide_drive_t *drive, struct gendisk *disk) { int ret = 0; if (ide_do_test_unit_ready(drive, disk)) ide_do_start_stop(drive, disk, 1); ret = ide_floppy_get_capacity(drive); set_capacity(disk, ide_gd_capacity(drive)); return ret; } const struct ide_disk_ops ide_atapi_disk_ops = { .check = ide_check_atapi_device, .get_capacity = ide_floppy_get_capacity, .setup = ide_floppy_setup, .flush = ide_floppy_flush, .init_media = ide_floppy_init_media, .set_doorlock = ide_set_media_lock, .do_request = ide_floppy_do_request, .ioctl = ide_floppy_ioctl, };
gpl-2.0