repo_name
string
path
string
copies
string
size
string
content
string
license
string
Shaaan/android_kernel_sony_C1905
sound/soc/codecs/rt5631.c
4894
56577
/* * rt5631.c -- RT5631 ALSA Soc Audio driver * * Copyright 2011 Realtek Microelectronics * * Author: flove <flove@realtek.com> * * Based on WM8753.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> #include <sound/tlv.h> #include "rt5631.h" struct rt5631_priv { int codec_version; int master; int sysclk; int rx_rate; int bclk_rate; int dmic_used_flag; }; static const u16 rt5631_reg[RT5631_VENDOR_ID2 + 1] = { [RT5631_SPK_OUT_VOL] = 0x8888, [RT5631_HP_OUT_VOL] = 0x8080, [RT5631_MONO_AXO_1_2_VOL] = 0xa080, [RT5631_AUX_IN_VOL] = 0x0808, [RT5631_ADC_REC_MIXER] = 0xf0f0, [RT5631_VDAC_DIG_VOL] = 0x0010, [RT5631_OUTMIXER_L_CTRL] = 0xffc0, [RT5631_OUTMIXER_R_CTRL] = 0xffc0, [RT5631_AXO1MIXER_CTRL] = 0x88c0, [RT5631_AXO2MIXER_CTRL] = 0x88c0, [RT5631_DIG_MIC_CTRL] = 0x3000, [RT5631_MONO_INPUT_VOL] = 0x8808, [RT5631_SPK_MIXER_CTRL] = 0xf8f8, [RT5631_SPK_MONO_OUT_CTRL] = 0xfc00, [RT5631_SPK_MONO_HP_OUT_CTRL] = 0x4440, [RT5631_SDP_CTRL] = 0x8000, [RT5631_MONO_SDP_CTRL] = 0x8000, [RT5631_STEREO_AD_DA_CLK_CTRL] = 0x2010, [RT5631_GEN_PUR_CTRL_REG] = 0x0e00, [RT5631_INT_ST_IRQ_CTRL_2] = 0x071a, [RT5631_MISC_CTRL] = 0x2040, [RT5631_DEPOP_FUN_CTRL_2] = 0x8000, [RT5631_SOFT_VOL_CTRL] = 0x07e0, [RT5631_ALC_CTRL_1] = 0x0206, [RT5631_ALC_CTRL_3] = 0x2000, [RT5631_PSEUDO_SPATL_CTRL] = 0x0553, }; /** * rt5631_write_index - write index register of 2nd layer */ static void rt5631_write_index(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { snd_soc_write(codec, RT5631_INDEX_ADD, reg); snd_soc_write(codec, RT5631_INDEX_DATA, value); } /** * rt5631_read_index - read index register of 2nd layer */ static unsigned int rt5631_read_index(struct snd_soc_codec *codec, unsigned int reg) { unsigned int value; snd_soc_write(codec, RT5631_INDEX_ADD, reg); value = snd_soc_read(codec, RT5631_INDEX_DATA); return value; } static int rt5631_reset(struct snd_soc_codec *codec) { return snd_soc_write(codec, RT5631_RESET, 0); } static int rt5631_volatile_register(struct snd_soc_codec *codec, unsigned int reg) { switch (reg) { case RT5631_RESET: case RT5631_INT_ST_IRQ_CTRL_2: case RT5631_INDEX_ADD: case RT5631_INDEX_DATA: case RT5631_EQ_CTRL: return 1; default: return 0; } } static int rt5631_readable_register(struct snd_soc_codec *codec, unsigned int reg) { switch (reg) { case RT5631_RESET: case RT5631_SPK_OUT_VOL: case RT5631_HP_OUT_VOL: case RT5631_MONO_AXO_1_2_VOL: case RT5631_AUX_IN_VOL: case RT5631_STEREO_DAC_VOL_1: case RT5631_MIC_CTRL_1: case RT5631_STEREO_DAC_VOL_2: case RT5631_ADC_CTRL_1: case RT5631_ADC_REC_MIXER: case RT5631_ADC_CTRL_2: case RT5631_VDAC_DIG_VOL: case RT5631_OUTMIXER_L_CTRL: case RT5631_OUTMIXER_R_CTRL: case RT5631_AXO1MIXER_CTRL: case RT5631_AXO2MIXER_CTRL: case RT5631_MIC_CTRL_2: case RT5631_DIG_MIC_CTRL: case RT5631_MONO_INPUT_VOL: case RT5631_SPK_MIXER_CTRL: case RT5631_SPK_MONO_OUT_CTRL: case RT5631_SPK_MONO_HP_OUT_CTRL: case RT5631_SDP_CTRL: case RT5631_MONO_SDP_CTRL: case RT5631_STEREO_AD_DA_CLK_CTRL: case RT5631_PWR_MANAG_ADD1: case RT5631_PWR_MANAG_ADD2: case RT5631_PWR_MANAG_ADD3: case RT5631_PWR_MANAG_ADD4: case RT5631_GEN_PUR_CTRL_REG: case RT5631_GLOBAL_CLK_CTRL: case RT5631_PLL_CTRL: case RT5631_INT_ST_IRQ_CTRL_1: case RT5631_INT_ST_IRQ_CTRL_2: case RT5631_GPIO_CTRL: case RT5631_MISC_CTRL: case RT5631_DEPOP_FUN_CTRL_1: case RT5631_DEPOP_FUN_CTRL_2: case RT5631_JACK_DET_CTRL: case RT5631_SOFT_VOL_CTRL: case RT5631_ALC_CTRL_1: case RT5631_ALC_CTRL_2: case RT5631_ALC_CTRL_3: case RT5631_PSEUDO_SPATL_CTRL: case RT5631_INDEX_ADD: case RT5631_INDEX_DATA: case RT5631_EQ_CTRL: case RT5631_VENDOR_ID: case RT5631_VENDOR_ID1: case RT5631_VENDOR_ID2: return 1; default: return 0; } } static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0); static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0); static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); /* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */ static unsigned int mic_bst_tlv[] = { TLV_DB_RANGE_HEAD(7), 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0), 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0), 3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0), 6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0), 7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0), 8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0), }; static int rt5631_dmic_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec); ucontrol->value.integer.value[0] = rt5631->dmic_used_flag; return 0; } static int rt5631_dmic_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec); rt5631->dmic_used_flag = ucontrol->value.integer.value[0]; return 0; } /* MIC Input Type */ static const char *rt5631_input_mode[] = { "Single ended", "Differential"}; static const SOC_ENUM_SINGLE_DECL( rt5631_mic1_mode_enum, RT5631_MIC_CTRL_1, RT5631_MIC1_DIFF_INPUT_SHIFT, rt5631_input_mode); static const SOC_ENUM_SINGLE_DECL( rt5631_mic2_mode_enum, RT5631_MIC_CTRL_1, RT5631_MIC2_DIFF_INPUT_SHIFT, rt5631_input_mode); /* MONO Input Type */ static const SOC_ENUM_SINGLE_DECL( rt5631_monoin_mode_enum, RT5631_MONO_INPUT_VOL, RT5631_MONO_DIFF_INPUT_SHIFT, rt5631_input_mode); /* SPK Ratio Gain Control */ static const char *rt5631_spk_ratio[] = {"1.00x", "1.09x", "1.27x", "1.44x", "1.56x", "1.68x", "1.99x", "2.34x"}; static const SOC_ENUM_SINGLE_DECL( rt5631_spk_ratio_enum, RT5631_GEN_PUR_CTRL_REG, RT5631_SPK_AMP_RATIO_CTRL_SHIFT, rt5631_spk_ratio); static const struct snd_kcontrol_new rt5631_snd_controls[] = { /* MIC */ SOC_ENUM("MIC1 Mode Control", rt5631_mic1_mode_enum), SOC_SINGLE_TLV("MIC1 Boost", RT5631_MIC_CTRL_2, RT5631_MIC1_BOOST_SHIFT, 8, 0, mic_bst_tlv), SOC_ENUM("MIC2 Mode Control", rt5631_mic2_mode_enum), SOC_SINGLE_TLV("MIC2 Boost", RT5631_MIC_CTRL_2, RT5631_MIC2_BOOST_SHIFT, 8, 0, mic_bst_tlv), /* MONO IN */ SOC_ENUM("MONOIN Mode Control", rt5631_monoin_mode_enum), SOC_DOUBLE_TLV("MONOIN_RX Capture Volume", RT5631_MONO_INPUT_VOL, RT5631_L_VOL_SHIFT, RT5631_R_VOL_SHIFT, RT5631_VOL_MASK, 1, in_vol_tlv), /* AXI */ SOC_DOUBLE_TLV("AXI Capture Volume", RT5631_AUX_IN_VOL, RT5631_L_VOL_SHIFT, RT5631_R_VOL_SHIFT, RT5631_VOL_MASK, 1, in_vol_tlv), /* DAC */ SOC_DOUBLE_TLV("PCM Playback Volume", RT5631_STEREO_DAC_VOL_2, RT5631_L_VOL_SHIFT, RT5631_R_VOL_SHIFT, RT5631_DAC_VOL_MASK, 1, dac_vol_tlv), SOC_DOUBLE("PCM Playback Switch", RT5631_STEREO_DAC_VOL_1, RT5631_L_MUTE_SHIFT, RT5631_R_MUTE_SHIFT, 1, 1), /* AXO */ SOC_SINGLE("AXO1 Playback Switch", RT5631_MONO_AXO_1_2_VOL, RT5631_L_MUTE_SHIFT, 1, 1), SOC_SINGLE("AXO2 Playback Switch", RT5631_MONO_AXO_1_2_VOL, RT5631_R_VOL_SHIFT, 1, 1), /* OUTVOL */ SOC_DOUBLE("OUTVOL Channel Switch", RT5631_SPK_OUT_VOL, RT5631_L_EN_SHIFT, RT5631_R_EN_SHIFT, 1, 0), /* SPK */ SOC_DOUBLE("Speaker Playback Switch", RT5631_SPK_OUT_VOL, RT5631_L_MUTE_SHIFT, RT5631_R_MUTE_SHIFT, 1, 1), SOC_DOUBLE_TLV("Speaker Playback Volume", RT5631_SPK_OUT_VOL, RT5631_L_VOL_SHIFT, RT5631_R_VOL_SHIFT, 39, 1, out_vol_tlv), /* MONO OUT */ SOC_SINGLE("MONO Playback Switch", RT5631_MONO_AXO_1_2_VOL, RT5631_MUTE_MONO_SHIFT, 1, 1), /* HP */ SOC_DOUBLE("HP Playback Switch", RT5631_HP_OUT_VOL, RT5631_L_MUTE_SHIFT, RT5631_R_MUTE_SHIFT, 1, 1), SOC_DOUBLE_TLV("HP Playback Volume", RT5631_HP_OUT_VOL, RT5631_L_VOL_SHIFT, RT5631_R_VOL_SHIFT, RT5631_VOL_MASK, 1, out_vol_tlv), /* DMIC */ SOC_SINGLE_EXT("DMIC Switch", 0, 0, 1, 0, rt5631_dmic_get, rt5631_dmic_put), SOC_DOUBLE("DMIC Capture Switch", RT5631_DIG_MIC_CTRL, RT5631_DMIC_L_CH_MUTE_SHIFT, RT5631_DMIC_R_CH_MUTE_SHIFT, 1, 1), /* SPK Ratio Gain Control */ SOC_ENUM("SPK Ratio Control", rt5631_spk_ratio_enum), }; static int check_sysclk1_source(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { unsigned int reg; reg = snd_soc_read(source->codec, RT5631_GLOBAL_CLK_CTRL); return reg & RT5631_SYSCLK_SOUR_SEL_PLL; } static int check_dmic_used(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(source->codec); return rt5631->dmic_used_flag; } static int check_dacl_to_outmixl(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { unsigned int reg; reg = snd_soc_read(source->codec, RT5631_OUTMIXER_L_CTRL); return !(reg & RT5631_M_DAC_L_TO_OUTMIXER_L); } static int check_dacr_to_outmixr(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { unsigned int reg; reg = snd_soc_read(source->codec, RT5631_OUTMIXER_R_CTRL); return !(reg & RT5631_M_DAC_R_TO_OUTMIXER_R); } static int check_dacl_to_spkmixl(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { unsigned int reg; reg = snd_soc_read(source->codec, RT5631_SPK_MIXER_CTRL); return !(reg & RT5631_M_DAC_L_TO_SPKMIXER_L); } static int check_dacr_to_spkmixr(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { unsigned int reg; reg = snd_soc_read(source->codec, RT5631_SPK_MIXER_CTRL); return !(reg & RT5631_M_DAC_R_TO_SPKMIXER_R); } static int check_adcl_select(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { unsigned int reg; reg = snd_soc_read(source->codec, RT5631_ADC_REC_MIXER); return !(reg & RT5631_M_MIC1_TO_RECMIXER_L); } static int check_adcr_select(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { unsigned int reg; reg = snd_soc_read(source->codec, RT5631_ADC_REC_MIXER); return !(reg & RT5631_M_MIC2_TO_RECMIXER_R); } /** * onebit_depop_power_stage - auto depop in power stage. * @enable: power on/off * * When power on/off headphone, the depop sequence is done by hardware. */ static void onebit_depop_power_stage(struct snd_soc_codec *codec, int enable) { unsigned int soft_vol, hp_zc; /* enable one-bit depop function */ snd_soc_update_bits(codec, RT5631_DEPOP_FUN_CTRL_2, RT5631_EN_ONE_BIT_DEPOP, 0); /* keep soft volume and zero crossing setting */ soft_vol = snd_soc_read(codec, RT5631_SOFT_VOL_CTRL); snd_soc_write(codec, RT5631_SOFT_VOL_CTRL, 0); hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2); snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff); if (enable) { /* config one-bit depop parameter */ rt5631_write_index(codec, RT5631_TEST_MODE_CTRL, 0x84c0); rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x309f); rt5631_write_index(codec, RT5631_CP_INTL_REG2, 0x6530); /* power on capless block */ snd_soc_write(codec, RT5631_DEPOP_FUN_CTRL_2, RT5631_EN_CAP_FREE_DEPOP); } else { /* power off capless block */ snd_soc_write(codec, RT5631_DEPOP_FUN_CTRL_2, 0); msleep(100); } /* recover soft volume and zero crossing setting */ snd_soc_write(codec, RT5631_SOFT_VOL_CTRL, soft_vol); snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc); } /** * onebit_depop_mute_stage - auto depop in mute stage. * @enable: mute/unmute * * When mute/unmute headphone, the depop sequence is done by hardware. */ static void onebit_depop_mute_stage(struct snd_soc_codec *codec, int enable) { unsigned int soft_vol, hp_zc; /* enable one-bit depop function */ snd_soc_update_bits(codec, RT5631_DEPOP_FUN_CTRL_2, RT5631_EN_ONE_BIT_DEPOP, 0); /* keep soft volume and zero crossing setting */ soft_vol = snd_soc_read(codec, RT5631_SOFT_VOL_CTRL); snd_soc_write(codec, RT5631_SOFT_VOL_CTRL, 0); hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2); snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff); if (enable) { schedule_timeout_uninterruptible(msecs_to_jiffies(10)); /* config one-bit depop parameter */ rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x307f); snd_soc_update_bits(codec, RT5631_HP_OUT_VOL, RT5631_L_MUTE | RT5631_R_MUTE, 0); msleep(300); } else { snd_soc_update_bits(codec, RT5631_HP_OUT_VOL, RT5631_L_MUTE | RT5631_R_MUTE, RT5631_L_MUTE | RT5631_R_MUTE); msleep(100); } /* recover soft volume and zero crossing setting */ snd_soc_write(codec, RT5631_SOFT_VOL_CTRL, soft_vol); snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc); } /** * onebit_depop_power_stage - step by step depop sequence in power stage. * @enable: power on/off * * When power on/off headphone, the depop sequence is done in step by step. */ static void depop_seq_power_stage(struct snd_soc_codec *codec, int enable) { unsigned int soft_vol, hp_zc; /* depop control by register */ snd_soc_update_bits(codec, RT5631_DEPOP_FUN_CTRL_2, RT5631_EN_ONE_BIT_DEPOP, RT5631_EN_ONE_BIT_DEPOP); /* keep soft volume and zero crossing setting */ soft_vol = snd_soc_read(codec, RT5631_SOFT_VOL_CTRL); snd_soc_write(codec, RT5631_SOFT_VOL_CTRL, 0); hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2); snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff); if (enable) { /* config depop sequence parameter */ rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x303e); /* power on headphone and charge pump */ snd_soc_update_bits(codec, RT5631_PWR_MANAG_ADD3, RT5631_PWR_CHARGE_PUMP | RT5631_PWR_HP_L_AMP | RT5631_PWR_HP_R_AMP, RT5631_PWR_CHARGE_PUMP | RT5631_PWR_HP_L_AMP | RT5631_PWR_HP_R_AMP); /* power on soft generator and depop mode2 */ snd_soc_write(codec, RT5631_DEPOP_FUN_CTRL_1, RT5631_POW_ON_SOFT_GEN | RT5631_EN_DEPOP2_FOR_HP); msleep(100); /* stop depop mode */ snd_soc_update_bits(codec, RT5631_PWR_MANAG_ADD3, RT5631_PWR_HP_DEPOP_DIS, RT5631_PWR_HP_DEPOP_DIS); } else { /* config depop sequence parameter */ rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x303F); snd_soc_write(codec, RT5631_DEPOP_FUN_CTRL_1, RT5631_POW_ON_SOFT_GEN | RT5631_EN_MUTE_UNMUTE_DEPOP | RT5631_PD_HPAMP_L_ST_UP | RT5631_PD_HPAMP_R_ST_UP); msleep(75); snd_soc_write(codec, RT5631_DEPOP_FUN_CTRL_1, RT5631_POW_ON_SOFT_GEN | RT5631_PD_HPAMP_L_ST_UP | RT5631_PD_HPAMP_R_ST_UP); /* start depop mode */ snd_soc_update_bits(codec, RT5631_PWR_MANAG_ADD3, RT5631_PWR_HP_DEPOP_DIS, 0); /* config depop sequence parameter */ snd_soc_write(codec, RT5631_DEPOP_FUN_CTRL_1, RT5631_POW_ON_SOFT_GEN | RT5631_EN_DEPOP2_FOR_HP | RT5631_PD_HPAMP_L_ST_UP | RT5631_PD_HPAMP_R_ST_UP); msleep(80); snd_soc_write(codec, RT5631_DEPOP_FUN_CTRL_1, RT5631_POW_ON_SOFT_GEN); /* power down headphone and charge pump */ snd_soc_update_bits(codec, RT5631_PWR_MANAG_ADD3, RT5631_PWR_CHARGE_PUMP | RT5631_PWR_HP_L_AMP | RT5631_PWR_HP_R_AMP, 0); } /* recover soft volume and zero crossing setting */ snd_soc_write(codec, RT5631_SOFT_VOL_CTRL, soft_vol); snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc); } /** * depop_seq_mute_stage - step by step depop sequence in mute stage. * @enable: mute/unmute * * When mute/unmute headphone, the depop sequence is done in step by step. */ static void depop_seq_mute_stage(struct snd_soc_codec *codec, int enable) { unsigned int soft_vol, hp_zc; /* depop control by register */ snd_soc_update_bits(codec, RT5631_DEPOP_FUN_CTRL_2, RT5631_EN_ONE_BIT_DEPOP, RT5631_EN_ONE_BIT_DEPOP); /* keep soft volume and zero crossing setting */ soft_vol = snd_soc_read(codec, RT5631_SOFT_VOL_CTRL); snd_soc_write(codec, RT5631_SOFT_VOL_CTRL, 0); hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2); snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff); if (enable) { schedule_timeout_uninterruptible(msecs_to_jiffies(10)); /* config depop sequence parameter */ rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x302f); snd_soc_write(codec, RT5631_DEPOP_FUN_CTRL_1, RT5631_POW_ON_SOFT_GEN | RT5631_EN_MUTE_UNMUTE_DEPOP | RT5631_EN_HP_R_M_UN_MUTE_DEPOP | RT5631_EN_HP_L_M_UN_MUTE_DEPOP); snd_soc_update_bits(codec, RT5631_HP_OUT_VOL, RT5631_L_MUTE | RT5631_R_MUTE, 0); msleep(160); } else { /* config depop sequence parameter */ rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x302f); snd_soc_write(codec, RT5631_DEPOP_FUN_CTRL_1, RT5631_POW_ON_SOFT_GEN | RT5631_EN_MUTE_UNMUTE_DEPOP | RT5631_EN_HP_R_M_UN_MUTE_DEPOP | RT5631_EN_HP_L_M_UN_MUTE_DEPOP); snd_soc_update_bits(codec, RT5631_HP_OUT_VOL, RT5631_L_MUTE | RT5631_R_MUTE, RT5631_L_MUTE | RT5631_R_MUTE); msleep(150); } /* recover soft volume and zero crossing setting */ snd_soc_write(codec, RT5631_SOFT_VOL_CTRL, soft_vol); snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc); } static int hp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec); switch (event) { case SND_SOC_DAPM_PRE_PMD: if (rt5631->codec_version) { onebit_depop_mute_stage(codec, 0); onebit_depop_power_stage(codec, 0); } else { depop_seq_mute_stage(codec, 0); depop_seq_power_stage(codec, 0); } break; case SND_SOC_DAPM_POST_PMU: if (rt5631->codec_version) { onebit_depop_power_stage(codec, 1); onebit_depop_mute_stage(codec, 1); } else { depop_seq_power_stage(codec, 1); depop_seq_mute_stage(codec, 1); } break; default: break; } return 0; } static int set_dmic_params(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec); switch (rt5631->rx_rate) { case 44100: case 48000: snd_soc_update_bits(codec, RT5631_DIG_MIC_CTRL, RT5631_DMIC_CLK_CTRL_MASK, RT5631_DMIC_CLK_CTRL_TO_32FS); break; case 32000: case 22050: snd_soc_update_bits(codec, RT5631_DIG_MIC_CTRL, RT5631_DMIC_CLK_CTRL_MASK, RT5631_DMIC_CLK_CTRL_TO_64FS); break; case 16000: case 11025: case 8000: snd_soc_update_bits(codec, RT5631_DIG_MIC_CTRL, RT5631_DMIC_CLK_CTRL_MASK, RT5631_DMIC_CLK_CTRL_TO_128FS); break; default: return -EINVAL; } return 0; } static const struct snd_kcontrol_new rt5631_recmixl_mixer_controls[] = { SOC_DAPM_SINGLE("OUTMIXL Capture Switch", RT5631_ADC_REC_MIXER, RT5631_M_OUTMIXL_RECMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("MIC1_BST1 Capture Switch", RT5631_ADC_REC_MIXER, RT5631_M_MIC1_RECMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("AXILVOL Capture Switch", RT5631_ADC_REC_MIXER, RT5631_M_AXIL_RECMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("MONOIN_RX Capture Switch", RT5631_ADC_REC_MIXER, RT5631_M_MONO_IN_RECMIXL_BIT, 1, 1), }; static const struct snd_kcontrol_new rt5631_recmixr_mixer_controls[] = { SOC_DAPM_SINGLE("MONOIN_RX Capture Switch", RT5631_ADC_REC_MIXER, RT5631_M_MONO_IN_RECMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("AXIRVOL Capture Switch", RT5631_ADC_REC_MIXER, RT5631_M_AXIR_RECMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("MIC2_BST2 Capture Switch", RT5631_ADC_REC_MIXER, RT5631_M_MIC2_RECMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("OUTMIXR Capture Switch", RT5631_ADC_REC_MIXER, RT5631_M_OUTMIXR_RECMIXR_BIT, 1, 1), }; static const struct snd_kcontrol_new rt5631_spkmixl_mixer_controls[] = { SOC_DAPM_SINGLE("RECMIXL Playback Switch", RT5631_SPK_MIXER_CTRL, RT5631_M_RECMIXL_SPKMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("MIC1_P Playback Switch", RT5631_SPK_MIXER_CTRL, RT5631_M_MIC1P_SPKMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("DACL Playback Switch", RT5631_SPK_MIXER_CTRL, RT5631_M_DACL_SPKMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("OUTMIXL Playback Switch", RT5631_SPK_MIXER_CTRL, RT5631_M_OUTMIXL_SPKMIXL_BIT, 1, 1), }; static const struct snd_kcontrol_new rt5631_spkmixr_mixer_controls[] = { SOC_DAPM_SINGLE("OUTMIXR Playback Switch", RT5631_SPK_MIXER_CTRL, RT5631_M_OUTMIXR_SPKMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("DACR Playback Switch", RT5631_SPK_MIXER_CTRL, RT5631_M_DACR_SPKMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("MIC2_P Playback Switch", RT5631_SPK_MIXER_CTRL, RT5631_M_MIC2P_SPKMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("RECMIXR Playback Switch", RT5631_SPK_MIXER_CTRL, RT5631_M_RECMIXR_SPKMIXR_BIT, 1, 1), }; static const struct snd_kcontrol_new rt5631_outmixl_mixer_controls[] = { SOC_DAPM_SINGLE("RECMIXL Playback Switch", RT5631_OUTMIXER_L_CTRL, RT5631_M_RECMIXL_OUTMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("RECMIXR Playback Switch", RT5631_OUTMIXER_L_CTRL, RT5631_M_RECMIXR_OUTMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("DACL Playback Switch", RT5631_OUTMIXER_L_CTRL, RT5631_M_DACL_OUTMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("MIC1_BST1 Playback Switch", RT5631_OUTMIXER_L_CTRL, RT5631_M_MIC1_OUTMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("MIC2_BST2 Playback Switch", RT5631_OUTMIXER_L_CTRL, RT5631_M_MIC2_OUTMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("MONOIN_RXP Playback Switch", RT5631_OUTMIXER_L_CTRL, RT5631_M_MONO_INP_OUTMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("AXILVOL Playback Switch", RT5631_OUTMIXER_L_CTRL, RT5631_M_AXIL_OUTMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("AXIRVOL Playback Switch", RT5631_OUTMIXER_L_CTRL, RT5631_M_AXIR_OUTMIXL_BIT, 1, 1), SOC_DAPM_SINGLE("VDAC Playback Switch", RT5631_OUTMIXER_L_CTRL, RT5631_M_VDAC_OUTMIXL_BIT, 1, 1), }; static const struct snd_kcontrol_new rt5631_outmixr_mixer_controls[] = { SOC_DAPM_SINGLE("VDAC Playback Switch", RT5631_OUTMIXER_R_CTRL, RT5631_M_VDAC_OUTMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("AXIRVOL Playback Switch", RT5631_OUTMIXER_R_CTRL, RT5631_M_AXIR_OUTMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("AXILVOL Playback Switch", RT5631_OUTMIXER_R_CTRL, RT5631_M_AXIL_OUTMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("MONOIN_RXN Playback Switch", RT5631_OUTMIXER_R_CTRL, RT5631_M_MONO_INN_OUTMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("MIC2_BST2 Playback Switch", RT5631_OUTMIXER_R_CTRL, RT5631_M_MIC2_OUTMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("MIC1_BST1 Playback Switch", RT5631_OUTMIXER_R_CTRL, RT5631_M_MIC1_OUTMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("DACR Playback Switch", RT5631_OUTMIXER_R_CTRL, RT5631_M_DACR_OUTMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("RECMIXR Playback Switch", RT5631_OUTMIXER_R_CTRL, RT5631_M_RECMIXR_OUTMIXR_BIT, 1, 1), SOC_DAPM_SINGLE("RECMIXL Playback Switch", RT5631_OUTMIXER_R_CTRL, RT5631_M_RECMIXL_OUTMIXR_BIT, 1, 1), }; static const struct snd_kcontrol_new rt5631_AXO1MIX_mixer_controls[] = { SOC_DAPM_SINGLE("MIC1_BST1 Playback Switch", RT5631_AXO1MIXER_CTRL, RT5631_M_MIC1_AXO1MIX_BIT , 1, 1), SOC_DAPM_SINGLE("MIC2_BST2 Playback Switch", RT5631_AXO1MIXER_CTRL, RT5631_M_MIC2_AXO1MIX_BIT, 1, 1), SOC_DAPM_SINGLE("OUTVOLL Playback Switch", RT5631_AXO1MIXER_CTRL, RT5631_M_OUTMIXL_AXO1MIX_BIT , 1 , 1), SOC_DAPM_SINGLE("OUTVOLR Playback Switch", RT5631_AXO1MIXER_CTRL, RT5631_M_OUTMIXR_AXO1MIX_BIT, 1, 1), }; static const struct snd_kcontrol_new rt5631_AXO2MIX_mixer_controls[] = { SOC_DAPM_SINGLE("MIC1_BST1 Playback Switch", RT5631_AXO2MIXER_CTRL, RT5631_M_MIC1_AXO2MIX_BIT, 1, 1), SOC_DAPM_SINGLE("MIC2_BST2 Playback Switch", RT5631_AXO2MIXER_CTRL, RT5631_M_MIC2_AXO2MIX_BIT, 1, 1), SOC_DAPM_SINGLE("OUTVOLL Playback Switch", RT5631_AXO2MIXER_CTRL, RT5631_M_OUTMIXL_AXO2MIX_BIT, 1, 1), SOC_DAPM_SINGLE("OUTVOLR Playback Switch", RT5631_AXO2MIXER_CTRL, RT5631_M_OUTMIXR_AXO2MIX_BIT, 1 , 1), }; static const struct snd_kcontrol_new rt5631_spolmix_mixer_controls[] = { SOC_DAPM_SINGLE("SPKVOLL Playback Switch", RT5631_SPK_MONO_OUT_CTRL, RT5631_M_SPKVOLL_SPOLMIX_BIT, 1, 1), SOC_DAPM_SINGLE("SPKVOLR Playback Switch", RT5631_SPK_MONO_OUT_CTRL, RT5631_M_SPKVOLR_SPOLMIX_BIT, 1, 1), }; static const struct snd_kcontrol_new rt5631_spormix_mixer_controls[] = { SOC_DAPM_SINGLE("SPKVOLL Playback Switch", RT5631_SPK_MONO_OUT_CTRL, RT5631_M_SPKVOLL_SPORMIX_BIT, 1, 1), SOC_DAPM_SINGLE("SPKVOLR Playback Switch", RT5631_SPK_MONO_OUT_CTRL, RT5631_M_SPKVOLR_SPORMIX_BIT, 1, 1), }; static const struct snd_kcontrol_new rt5631_monomix_mixer_controls[] = { SOC_DAPM_SINGLE("OUTVOLL Playback Switch", RT5631_SPK_MONO_OUT_CTRL, RT5631_M_OUTVOLL_MONOMIX_BIT, 1, 1), SOC_DAPM_SINGLE("OUTVOLR Playback Switch", RT5631_SPK_MONO_OUT_CTRL, RT5631_M_OUTVOLR_MONOMIX_BIT, 1, 1), }; /* Left SPK Volume Input */ static const char *rt5631_spkvoll_sel[] = {"Vmid", "SPKMIXL"}; static const SOC_ENUM_SINGLE_DECL( rt5631_spkvoll_enum, RT5631_SPK_OUT_VOL, RT5631_L_EN_SHIFT, rt5631_spkvoll_sel); static const struct snd_kcontrol_new rt5631_spkvoll_mux_control = SOC_DAPM_ENUM("Left SPKVOL SRC", rt5631_spkvoll_enum); /* Left HP Volume Input */ static const char *rt5631_hpvoll_sel[] = {"Vmid", "OUTMIXL"}; static const SOC_ENUM_SINGLE_DECL( rt5631_hpvoll_enum, RT5631_HP_OUT_VOL, RT5631_L_EN_SHIFT, rt5631_hpvoll_sel); static const struct snd_kcontrol_new rt5631_hpvoll_mux_control = SOC_DAPM_ENUM("Left HPVOL SRC", rt5631_hpvoll_enum); /* Left Out Volume Input */ static const char *rt5631_outvoll_sel[] = {"Vmid", "OUTMIXL"}; static const SOC_ENUM_SINGLE_DECL( rt5631_outvoll_enum, RT5631_MONO_AXO_1_2_VOL, RT5631_L_EN_SHIFT, rt5631_outvoll_sel); static const struct snd_kcontrol_new rt5631_outvoll_mux_control = SOC_DAPM_ENUM("Left OUTVOL SRC", rt5631_outvoll_enum); /* Right Out Volume Input */ static const char *rt5631_outvolr_sel[] = {"Vmid", "OUTMIXR"}; static const SOC_ENUM_SINGLE_DECL( rt5631_outvolr_enum, RT5631_MONO_AXO_1_2_VOL, RT5631_R_EN_SHIFT, rt5631_outvolr_sel); static const struct snd_kcontrol_new rt5631_outvolr_mux_control = SOC_DAPM_ENUM("Right OUTVOL SRC", rt5631_outvolr_enum); /* Right HP Volume Input */ static const char *rt5631_hpvolr_sel[] = {"Vmid", "OUTMIXR"}; static const SOC_ENUM_SINGLE_DECL( rt5631_hpvolr_enum, RT5631_HP_OUT_VOL, RT5631_R_EN_SHIFT, rt5631_hpvolr_sel); static const struct snd_kcontrol_new rt5631_hpvolr_mux_control = SOC_DAPM_ENUM("Right HPVOL SRC", rt5631_hpvolr_enum); /* Right SPK Volume Input */ static const char *rt5631_spkvolr_sel[] = {"Vmid", "SPKMIXR"}; static const SOC_ENUM_SINGLE_DECL( rt5631_spkvolr_enum, RT5631_SPK_OUT_VOL, RT5631_R_EN_SHIFT, rt5631_spkvolr_sel); static const struct snd_kcontrol_new rt5631_spkvolr_mux_control = SOC_DAPM_ENUM("Right SPKVOL SRC", rt5631_spkvolr_enum); /* SPO Left Channel Input */ static const char *rt5631_spol_src_sel[] = { "SPOLMIX", "MONOIN_RX", "VDAC", "DACL"}; static const SOC_ENUM_SINGLE_DECL( rt5631_spol_src_enum, RT5631_SPK_MONO_HP_OUT_CTRL, RT5631_SPK_L_MUX_SEL_SHIFT, rt5631_spol_src_sel); static const struct snd_kcontrol_new rt5631_spol_mux_control = SOC_DAPM_ENUM("SPOL SRC", rt5631_spol_src_enum); /* SPO Right Channel Input */ static const char *rt5631_spor_src_sel[] = { "SPORMIX", "MONOIN_RX", "VDAC", "DACR"}; static const SOC_ENUM_SINGLE_DECL( rt5631_spor_src_enum, RT5631_SPK_MONO_HP_OUT_CTRL, RT5631_SPK_R_MUX_SEL_SHIFT, rt5631_spor_src_sel); static const struct snd_kcontrol_new rt5631_spor_mux_control = SOC_DAPM_ENUM("SPOR SRC", rt5631_spor_src_enum); /* MONO Input */ static const char *rt5631_mono_src_sel[] = {"MONOMIX", "MONOIN_RX", "VDAC"}; static const SOC_ENUM_SINGLE_DECL( rt5631_mono_src_enum, RT5631_SPK_MONO_HP_OUT_CTRL, RT5631_MONO_MUX_SEL_SHIFT, rt5631_mono_src_sel); static const struct snd_kcontrol_new rt5631_mono_mux_control = SOC_DAPM_ENUM("MONO SRC", rt5631_mono_src_enum); /* Left HPO Input */ static const char *rt5631_hpl_src_sel[] = {"Left HPVOL", "Left DAC"}; static const SOC_ENUM_SINGLE_DECL( rt5631_hpl_src_enum, RT5631_SPK_MONO_HP_OUT_CTRL, RT5631_HP_L_MUX_SEL_SHIFT, rt5631_hpl_src_sel); static const struct snd_kcontrol_new rt5631_hpl_mux_control = SOC_DAPM_ENUM("HPL SRC", rt5631_hpl_src_enum); /* Right HPO Input */ static const char *rt5631_hpr_src_sel[] = {"Right HPVOL", "Right DAC"}; static const SOC_ENUM_SINGLE_DECL( rt5631_hpr_src_enum, RT5631_SPK_MONO_HP_OUT_CTRL, RT5631_HP_R_MUX_SEL_SHIFT, rt5631_hpr_src_sel); static const struct snd_kcontrol_new rt5631_hpr_mux_control = SOC_DAPM_ENUM("HPR SRC", rt5631_hpr_src_enum); static const struct snd_soc_dapm_widget rt5631_dapm_widgets[] = { /* Vmid */ SND_SOC_DAPM_VMID("Vmid"), /* PLL1 */ SND_SOC_DAPM_SUPPLY("PLL1", RT5631_PWR_MANAG_ADD2, RT5631_PWR_PLL1_BIT, 0, NULL, 0), /* Input Side */ /* Input Lines */ SND_SOC_DAPM_INPUT("MIC1"), SND_SOC_DAPM_INPUT("MIC2"), SND_SOC_DAPM_INPUT("AXIL"), SND_SOC_DAPM_INPUT("AXIR"), SND_SOC_DAPM_INPUT("MONOIN_RXN"), SND_SOC_DAPM_INPUT("MONOIN_RXP"), SND_SOC_DAPM_INPUT("DMIC"), /* MICBIAS */ SND_SOC_DAPM_MICBIAS("MIC Bias1", RT5631_PWR_MANAG_ADD2, RT5631_PWR_MICBIAS1_VOL_BIT, 0), SND_SOC_DAPM_MICBIAS("MIC Bias2", RT5631_PWR_MANAG_ADD2, RT5631_PWR_MICBIAS2_VOL_BIT, 0), /* Boost */ SND_SOC_DAPM_PGA("MIC1 Boost", RT5631_PWR_MANAG_ADD2, RT5631_PWR_MIC1_BOOT_GAIN_BIT, 0, NULL, 0), SND_SOC_DAPM_PGA("MIC2 Boost", RT5631_PWR_MANAG_ADD2, RT5631_PWR_MIC2_BOOT_GAIN_BIT, 0, NULL, 0), SND_SOC_DAPM_PGA("MONOIN_RXP Boost", RT5631_PWR_MANAG_ADD4, RT5631_PWR_MONO_IN_P_VOL_BIT, 0, NULL, 0), SND_SOC_DAPM_PGA("MONOIN_RXN Boost", RT5631_PWR_MANAG_ADD4, RT5631_PWR_MONO_IN_N_VOL_BIT, 0, NULL, 0), SND_SOC_DAPM_PGA("AXIL Boost", RT5631_PWR_MANAG_ADD4, RT5631_PWR_AXIL_IN_VOL_BIT, 0, NULL, 0), SND_SOC_DAPM_PGA("AXIR Boost", RT5631_PWR_MANAG_ADD4, RT5631_PWR_AXIR_IN_VOL_BIT, 0, NULL, 0), /* MONO In */ SND_SOC_DAPM_MIXER("MONO_IN", SND_SOC_NOPM, 0, 0, NULL, 0), /* REC Mixer */ SND_SOC_DAPM_MIXER("RECMIXL Mixer", RT5631_PWR_MANAG_ADD2, RT5631_PWR_RECMIXER_L_BIT, 0, &rt5631_recmixl_mixer_controls[0], ARRAY_SIZE(rt5631_recmixl_mixer_controls)), SND_SOC_DAPM_MIXER("RECMIXR Mixer", RT5631_PWR_MANAG_ADD2, RT5631_PWR_RECMIXER_R_BIT, 0, &rt5631_recmixr_mixer_controls[0], ARRAY_SIZE(rt5631_recmixr_mixer_controls)), /* Because of record duplication for L/R channel, * L/R ADCs need power up at the same time */ SND_SOC_DAPM_MIXER("ADC Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), /* DMIC */ SND_SOC_DAPM_SUPPLY("DMIC Supply", RT5631_DIG_MIC_CTRL, RT5631_DMIC_ENA_SHIFT, 0, set_dmic_params, SND_SOC_DAPM_PRE_PMU), /* ADC Data Srouce */ SND_SOC_DAPM_SUPPLY("Left ADC Select", RT5631_INT_ST_IRQ_CTRL_2, RT5631_ADC_DATA_SEL_MIC1_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Right ADC Select", RT5631_INT_ST_IRQ_CTRL_2, RT5631_ADC_DATA_SEL_MIC2_SHIFT, 0, NULL, 0), /* ADCs */ SND_SOC_DAPM_ADC("Left ADC", "HIFI Capture", RT5631_PWR_MANAG_ADD1, RT5631_PWR_ADC_L_CLK_BIT, 0), SND_SOC_DAPM_ADC("Right ADC", "HIFI Capture", RT5631_PWR_MANAG_ADD1, RT5631_PWR_ADC_R_CLK_BIT, 0), /* DAC and ADC supply power */ SND_SOC_DAPM_SUPPLY("I2S", RT5631_PWR_MANAG_ADD1, RT5631_PWR_MAIN_I2S_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("DAC REF", RT5631_PWR_MANAG_ADD1, RT5631_PWR_DAC_REF_BIT, 0, NULL, 0), /* Output Side */ /* DACs */ SND_SOC_DAPM_DAC("Left DAC", "HIFI Playback", RT5631_PWR_MANAG_ADD1, RT5631_PWR_DAC_L_CLK_BIT, 0), SND_SOC_DAPM_DAC("Right DAC", "HIFI Playback", RT5631_PWR_MANAG_ADD1, RT5631_PWR_DAC_R_CLK_BIT, 0), SND_SOC_DAPM_DAC("Voice DAC", "Voice DAC Mono Playback", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_PGA("Voice DAC Boost", SND_SOC_NOPM, 0, 0, NULL, 0), /* DAC supply power */ SND_SOC_DAPM_SUPPLY("Left DAC To Mixer", RT5631_PWR_MANAG_ADD1, RT5631_PWR_DAC_L_TO_MIXER_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Right DAC To Mixer", RT5631_PWR_MANAG_ADD1, RT5631_PWR_DAC_R_TO_MIXER_BIT, 0, NULL, 0), /* Left SPK Mixer */ SND_SOC_DAPM_MIXER("SPKMIXL Mixer", RT5631_PWR_MANAG_ADD2, RT5631_PWR_SPKMIXER_L_BIT, 0, &rt5631_spkmixl_mixer_controls[0], ARRAY_SIZE(rt5631_spkmixl_mixer_controls)), /* Left Out Mixer */ SND_SOC_DAPM_MIXER("OUTMIXL Mixer", RT5631_PWR_MANAG_ADD2, RT5631_PWR_OUTMIXER_L_BIT, 0, &rt5631_outmixl_mixer_controls[0], ARRAY_SIZE(rt5631_outmixl_mixer_controls)), /* Right Out Mixer */ SND_SOC_DAPM_MIXER("OUTMIXR Mixer", RT5631_PWR_MANAG_ADD2, RT5631_PWR_OUTMIXER_R_BIT, 0, &rt5631_outmixr_mixer_controls[0], ARRAY_SIZE(rt5631_outmixr_mixer_controls)), /* Right SPK Mixer */ SND_SOC_DAPM_MIXER("SPKMIXR Mixer", RT5631_PWR_MANAG_ADD2, RT5631_PWR_SPKMIXER_R_BIT, 0, &rt5631_spkmixr_mixer_controls[0], ARRAY_SIZE(rt5631_spkmixr_mixer_controls)), /* Volume Mux */ SND_SOC_DAPM_MUX("Left SPKVOL Mux", RT5631_PWR_MANAG_ADD4, RT5631_PWR_SPK_L_VOL_BIT, 0, &rt5631_spkvoll_mux_control), SND_SOC_DAPM_MUX("Left HPVOL Mux", RT5631_PWR_MANAG_ADD4, RT5631_PWR_HP_L_OUT_VOL_BIT, 0, &rt5631_hpvoll_mux_control), SND_SOC_DAPM_MUX("Left OUTVOL Mux", RT5631_PWR_MANAG_ADD4, RT5631_PWR_LOUT_VOL_BIT, 0, &rt5631_outvoll_mux_control), SND_SOC_DAPM_MUX("Right OUTVOL Mux", RT5631_PWR_MANAG_ADD4, RT5631_PWR_ROUT_VOL_BIT, 0, &rt5631_outvolr_mux_control), SND_SOC_DAPM_MUX("Right HPVOL Mux", RT5631_PWR_MANAG_ADD4, RT5631_PWR_HP_R_OUT_VOL_BIT, 0, &rt5631_hpvolr_mux_control), SND_SOC_DAPM_MUX("Right SPKVOL Mux", RT5631_PWR_MANAG_ADD4, RT5631_PWR_SPK_R_VOL_BIT, 0, &rt5631_spkvolr_mux_control), /* DAC To HP */ SND_SOC_DAPM_PGA_S("Left DAC_HP", 0, SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA_S("Right DAC_HP", 0, SND_SOC_NOPM, 0, 0, NULL, 0), /* HP Depop */ SND_SOC_DAPM_PGA_S("HP Depop", 1, SND_SOC_NOPM, 0, 0, hp_event, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), /* AXO1 Mixer */ SND_SOC_DAPM_MIXER("AXO1MIX Mixer", RT5631_PWR_MANAG_ADD3, RT5631_PWR_AXO1MIXER_BIT, 0, &rt5631_AXO1MIX_mixer_controls[0], ARRAY_SIZE(rt5631_AXO1MIX_mixer_controls)), /* SPOL Mixer */ SND_SOC_DAPM_MIXER("SPOLMIX Mixer", SND_SOC_NOPM, 0, 0, &rt5631_spolmix_mixer_controls[0], ARRAY_SIZE(rt5631_spolmix_mixer_controls)), /* MONO Mixer */ SND_SOC_DAPM_MIXER("MONOMIX Mixer", RT5631_PWR_MANAG_ADD3, RT5631_PWR_MONOMIXER_BIT, 0, &rt5631_monomix_mixer_controls[0], ARRAY_SIZE(rt5631_monomix_mixer_controls)), /* SPOR Mixer */ SND_SOC_DAPM_MIXER("SPORMIX Mixer", SND_SOC_NOPM, 0, 0, &rt5631_spormix_mixer_controls[0], ARRAY_SIZE(rt5631_spormix_mixer_controls)), /* AXO2 Mixer */ SND_SOC_DAPM_MIXER("AXO2MIX Mixer", RT5631_PWR_MANAG_ADD3, RT5631_PWR_AXO2MIXER_BIT, 0, &rt5631_AXO2MIX_mixer_controls[0], ARRAY_SIZE(rt5631_AXO2MIX_mixer_controls)), /* Mux */ SND_SOC_DAPM_MUX("SPOL Mux", SND_SOC_NOPM, 0, 0, &rt5631_spol_mux_control), SND_SOC_DAPM_MUX("SPOR Mux", SND_SOC_NOPM, 0, 0, &rt5631_spor_mux_control), SND_SOC_DAPM_MUX("MONO Mux", SND_SOC_NOPM, 0, 0, &rt5631_mono_mux_control), SND_SOC_DAPM_MUX("HPL Mux", SND_SOC_NOPM, 0, 0, &rt5631_hpl_mux_control), SND_SOC_DAPM_MUX("HPR Mux", SND_SOC_NOPM, 0, 0, &rt5631_hpr_mux_control), /* AMP supply */ SND_SOC_DAPM_SUPPLY("MONO Depop", RT5631_PWR_MANAG_ADD3, RT5631_PWR_MONO_DEPOP_DIS_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Class D", RT5631_PWR_MANAG_ADD1, RT5631_PWR_CLASS_D_BIT, 0, NULL, 0), /* Output Lines */ SND_SOC_DAPM_OUTPUT("AUXO1"), SND_SOC_DAPM_OUTPUT("AUXO2"), SND_SOC_DAPM_OUTPUT("SPOL"), SND_SOC_DAPM_OUTPUT("SPOR"), SND_SOC_DAPM_OUTPUT("HPOL"), SND_SOC_DAPM_OUTPUT("HPOR"), SND_SOC_DAPM_OUTPUT("MONO"), }; static const struct snd_soc_dapm_route rt5631_dapm_routes[] = { {"MIC1 Boost", NULL, "MIC1"}, {"MIC2 Boost", NULL, "MIC2"}, {"MONOIN_RXP Boost", NULL, "MONOIN_RXP"}, {"MONOIN_RXN Boost", NULL, "MONOIN_RXN"}, {"AXIL Boost", NULL, "AXIL"}, {"AXIR Boost", NULL, "AXIR"}, {"MONO_IN", NULL, "MONOIN_RXP Boost"}, {"MONO_IN", NULL, "MONOIN_RXN Boost"}, {"RECMIXL Mixer", "OUTMIXL Capture Switch", "OUTMIXL Mixer"}, {"RECMIXL Mixer", "MIC1_BST1 Capture Switch", "MIC1 Boost"}, {"RECMIXL Mixer", "AXILVOL Capture Switch", "AXIL Boost"}, {"RECMIXL Mixer", "MONOIN_RX Capture Switch", "MONO_IN"}, {"RECMIXR Mixer", "OUTMIXR Capture Switch", "OUTMIXR Mixer"}, {"RECMIXR Mixer", "MIC2_BST2 Capture Switch", "MIC2 Boost"}, {"RECMIXR Mixer", "AXIRVOL Capture Switch", "AXIR Boost"}, {"RECMIXR Mixer", "MONOIN_RX Capture Switch", "MONO_IN"}, {"ADC Mixer", NULL, "RECMIXL Mixer"}, {"ADC Mixer", NULL, "RECMIXR Mixer"}, {"Left ADC", NULL, "ADC Mixer"}, {"Left ADC", NULL, "Left ADC Select", check_adcl_select}, {"Left ADC", NULL, "PLL1", check_sysclk1_source}, {"Left ADC", NULL, "I2S"}, {"Left ADC", NULL, "DAC REF"}, {"Right ADC", NULL, "ADC Mixer"}, {"Right ADC", NULL, "Right ADC Select", check_adcr_select}, {"Right ADC", NULL, "PLL1", check_sysclk1_source}, {"Right ADC", NULL, "I2S"}, {"Right ADC", NULL, "DAC REF"}, {"DMIC", NULL, "DMIC Supply", check_dmic_used}, {"Left ADC", NULL, "DMIC"}, {"Right ADC", NULL, "DMIC"}, {"Left DAC", NULL, "PLL1", check_sysclk1_source}, {"Left DAC", NULL, "I2S"}, {"Left DAC", NULL, "DAC REF"}, {"Right DAC", NULL, "PLL1", check_sysclk1_source}, {"Right DAC", NULL, "I2S"}, {"Right DAC", NULL, "DAC REF"}, {"Voice DAC Boost", NULL, "Voice DAC"}, {"SPKMIXL Mixer", NULL, "Left DAC To Mixer", check_dacl_to_spkmixl}, {"SPKMIXL Mixer", "RECMIXL Playback Switch", "RECMIXL Mixer"}, {"SPKMIXL Mixer", "MIC1_P Playback Switch", "MIC1"}, {"SPKMIXL Mixer", "DACL Playback Switch", "Left DAC"}, {"SPKMIXL Mixer", "OUTMIXL Playback Switch", "OUTMIXL Mixer"}, {"SPKMIXR Mixer", NULL, "Right DAC To Mixer", check_dacr_to_spkmixr}, {"SPKMIXR Mixer", "OUTMIXR Playback Switch", "OUTMIXR Mixer"}, {"SPKMIXR Mixer", "DACR Playback Switch", "Right DAC"}, {"SPKMIXR Mixer", "MIC2_P Playback Switch", "MIC2"}, {"SPKMIXR Mixer", "RECMIXR Playback Switch", "RECMIXR Mixer"}, {"OUTMIXL Mixer", NULL, "Left DAC To Mixer", check_dacl_to_outmixl}, {"OUTMIXL Mixer", "RECMIXL Playback Switch", "RECMIXL Mixer"}, {"OUTMIXL Mixer", "RECMIXR Playback Switch", "RECMIXR Mixer"}, {"OUTMIXL Mixer", "DACL Playback Switch", "Left DAC"}, {"OUTMIXL Mixer", "MIC1_BST1 Playback Switch", "MIC1 Boost"}, {"OUTMIXL Mixer", "MIC2_BST2 Playback Switch", "MIC2 Boost"}, {"OUTMIXL Mixer", "MONOIN_RXP Playback Switch", "MONOIN_RXP Boost"}, {"OUTMIXL Mixer", "AXILVOL Playback Switch", "AXIL Boost"}, {"OUTMIXL Mixer", "AXIRVOL Playback Switch", "AXIR Boost"}, {"OUTMIXL Mixer", "VDAC Playback Switch", "Voice DAC Boost"}, {"OUTMIXR Mixer", NULL, "Right DAC To Mixer", check_dacr_to_outmixr}, {"OUTMIXR Mixer", "RECMIXL Playback Switch", "RECMIXL Mixer"}, {"OUTMIXR Mixer", "RECMIXR Playback Switch", "RECMIXR Mixer"}, {"OUTMIXR Mixer", "DACR Playback Switch", "Right DAC"}, {"OUTMIXR Mixer", "MIC1_BST1 Playback Switch", "MIC1 Boost"}, {"OUTMIXR Mixer", "MIC2_BST2 Playback Switch", "MIC2 Boost"}, {"OUTMIXR Mixer", "MONOIN_RXN Playback Switch", "MONOIN_RXN Boost"}, {"OUTMIXR Mixer", "AXILVOL Playback Switch", "AXIL Boost"}, {"OUTMIXR Mixer", "AXIRVOL Playback Switch", "AXIR Boost"}, {"OUTMIXR Mixer", "VDAC Playback Switch", "Voice DAC Boost"}, {"Left SPKVOL Mux", "SPKMIXL", "SPKMIXL Mixer"}, {"Left SPKVOL Mux", "Vmid", "Vmid"}, {"Left HPVOL Mux", "OUTMIXL", "OUTMIXL Mixer"}, {"Left HPVOL Mux", "Vmid", "Vmid"}, {"Left OUTVOL Mux", "OUTMIXL", "OUTMIXL Mixer"}, {"Left OUTVOL Mux", "Vmid", "Vmid"}, {"Right OUTVOL Mux", "OUTMIXR", "OUTMIXR Mixer"}, {"Right OUTVOL Mux", "Vmid", "Vmid"}, {"Right HPVOL Mux", "OUTMIXR", "OUTMIXR Mixer"}, {"Right HPVOL Mux", "Vmid", "Vmid"}, {"Right SPKVOL Mux", "SPKMIXR", "SPKMIXR Mixer"}, {"Right SPKVOL Mux", "Vmid", "Vmid"}, {"AXO1MIX Mixer", "MIC1_BST1 Playback Switch", "MIC1 Boost"}, {"AXO1MIX Mixer", "OUTVOLL Playback Switch", "Left OUTVOL Mux"}, {"AXO1MIX Mixer", "OUTVOLR Playback Switch", "Right OUTVOL Mux"}, {"AXO1MIX Mixer", "MIC2_BST2 Playback Switch", "MIC2 Boost"}, {"AXO2MIX Mixer", "MIC1_BST1 Playback Switch", "MIC1 Boost"}, {"AXO2MIX Mixer", "OUTVOLL Playback Switch", "Left OUTVOL Mux"}, {"AXO2MIX Mixer", "OUTVOLR Playback Switch", "Right OUTVOL Mux"}, {"AXO2MIX Mixer", "MIC2_BST2 Playback Switch", "MIC2 Boost"}, {"SPOLMIX Mixer", "SPKVOLL Playback Switch", "Left SPKVOL Mux"}, {"SPOLMIX Mixer", "SPKVOLR Playback Switch", "Right SPKVOL Mux"}, {"SPORMIX Mixer", "SPKVOLL Playback Switch", "Left SPKVOL Mux"}, {"SPORMIX Mixer", "SPKVOLR Playback Switch", "Right SPKVOL Mux"}, {"MONOMIX Mixer", "OUTVOLL Playback Switch", "Left OUTVOL Mux"}, {"MONOMIX Mixer", "OUTVOLR Playback Switch", "Right OUTVOL Mux"}, {"SPOL Mux", "SPOLMIX", "SPOLMIX Mixer"}, {"SPOL Mux", "MONOIN_RX", "MONO_IN"}, {"SPOL Mux", "VDAC", "Voice DAC Boost"}, {"SPOL Mux", "DACL", "Left DAC"}, {"SPOR Mux", "SPORMIX", "SPORMIX Mixer"}, {"SPOR Mux", "MONOIN_RX", "MONO_IN"}, {"SPOR Mux", "VDAC", "Voice DAC Boost"}, {"SPOR Mux", "DACR", "Right DAC"}, {"MONO Mux", "MONOMIX", "MONOMIX Mixer"}, {"MONO Mux", "MONOIN_RX", "MONO_IN"}, {"MONO Mux", "VDAC", "Voice DAC Boost"}, {"Right DAC_HP", NULL, "Right DAC"}, {"Left DAC_HP", NULL, "Left DAC"}, {"HPL Mux", "Left HPVOL", "Left HPVOL Mux"}, {"HPL Mux", "Left DAC", "Left DAC_HP"}, {"HPR Mux", "Right HPVOL", "Right HPVOL Mux"}, {"HPR Mux", "Right DAC", "Right DAC_HP"}, {"HP Depop", NULL, "HPL Mux"}, {"HP Depop", NULL, "HPR Mux"}, {"AUXO1", NULL, "AXO1MIX Mixer"}, {"AUXO2", NULL, "AXO2MIX Mixer"}, {"SPOL", NULL, "Class D"}, {"SPOL", NULL, "SPOL Mux"}, {"SPOR", NULL, "Class D"}, {"SPOR", NULL, "SPOR Mux"}, {"HPOL", NULL, "HP Depop"}, {"HPOR", NULL, "HP Depop"}, {"MONO", NULL, "MONO Depop"}, {"MONO", NULL, "MONO Mux"}, }; struct coeff_clk_div { u32 mclk; u32 bclk; u32 rate; u16 reg_val; }; /* PLL divisors */ struct pll_div { u32 pll_in; u32 pll_out; u16 reg_val; }; static const struct pll_div codec_master_pll_div[] = { {2048000, 8192000, 0x0ea0}, {3686400, 8192000, 0x4e27}, {12000000, 8192000, 0x456b}, {13000000, 8192000, 0x495f}, {13100000, 8192000, 0x0320}, {2048000, 11289600, 0xf637}, {3686400, 11289600, 0x2f22}, {12000000, 11289600, 0x3e2f}, {13000000, 11289600, 0x4d5b}, {13100000, 11289600, 0x363b}, {2048000, 16384000, 0x1ea0}, {3686400, 16384000, 0x9e27}, {12000000, 16384000, 0x452b}, {13000000, 16384000, 0x542f}, {13100000, 16384000, 0x03a0}, {2048000, 16934400, 0xe625}, {3686400, 16934400, 0x9126}, {12000000, 16934400, 0x4d2c}, {13000000, 16934400, 0x742f}, {13100000, 16934400, 0x3c27}, {2048000, 22579200, 0x2aa0}, {3686400, 22579200, 0x2f20}, {12000000, 22579200, 0x7e2f}, {13000000, 22579200, 0x742f}, {13100000, 22579200, 0x3c27}, {2048000, 24576000, 0x2ea0}, {3686400, 24576000, 0xee27}, {12000000, 24576000, 0x2915}, {13000000, 24576000, 0x772e}, {13100000, 24576000, 0x0d20}, {26000000, 24576000, 0x2027}, {26000000, 22579200, 0x392f}, {24576000, 22579200, 0x0921}, {24576000, 24576000, 0x02a0}, }; static const struct pll_div codec_slave_pll_div[] = { {256000, 2048000, 0x46f0}, {256000, 4096000, 0x3ea0}, {352800, 5644800, 0x3ea0}, {512000, 8192000, 0x3ea0}, {1024000, 8192000, 0x46f0}, {705600, 11289600, 0x3ea0}, {1024000, 16384000, 0x3ea0}, {1411200, 22579200, 0x3ea0}, {1536000, 24576000, 0x3ea0}, {2048000, 16384000, 0x1ea0}, {2822400, 22579200, 0x1ea0}, {2822400, 45158400, 0x5ec0}, {5644800, 45158400, 0x46f0}, {3072000, 24576000, 0x1ea0}, {3072000, 49152000, 0x5ec0}, {6144000, 49152000, 0x46f0}, {705600, 11289600, 0x3ea0}, {705600, 8467200, 0x3ab0}, {24576000, 24576000, 0x02a0}, {1411200, 11289600, 0x1690}, {2822400, 11289600, 0x0a90}, {1536000, 12288000, 0x1690}, {3072000, 12288000, 0x0a90}, }; static struct coeff_clk_div coeff_div[] = { /* sysclk is 256fs */ {2048000, 8000 * 32, 8000, 0x1000}, {2048000, 8000 * 64, 8000, 0x0000}, {2822400, 11025 * 32, 11025, 0x1000}, {2822400, 11025 * 64, 11025, 0x0000}, {4096000, 16000 * 32, 16000, 0x1000}, {4096000, 16000 * 64, 16000, 0x0000}, {5644800, 22050 * 32, 22050, 0x1000}, {5644800, 22050 * 64, 22050, 0x0000}, {8192000, 32000 * 32, 32000, 0x1000}, {8192000, 32000 * 64, 32000, 0x0000}, {11289600, 44100 * 32, 44100, 0x1000}, {11289600, 44100 * 64, 44100, 0x0000}, {12288000, 48000 * 32, 48000, 0x1000}, {12288000, 48000 * 64, 48000, 0x0000}, {22579200, 88200 * 32, 88200, 0x1000}, {22579200, 88200 * 64, 88200, 0x0000}, {24576000, 96000 * 32, 96000, 0x1000}, {24576000, 96000 * 64, 96000, 0x0000}, /* sysclk is 512fs */ {4096000, 8000 * 32, 8000, 0x3000}, {4096000, 8000 * 64, 8000, 0x2000}, {5644800, 11025 * 32, 11025, 0x3000}, {5644800, 11025 * 64, 11025, 0x2000}, {8192000, 16000 * 32, 16000, 0x3000}, {8192000, 16000 * 64, 16000, 0x2000}, {11289600, 22050 * 32, 22050, 0x3000}, {11289600, 22050 * 64, 22050, 0x2000}, {16384000, 32000 * 32, 32000, 0x3000}, {16384000, 32000 * 64, 32000, 0x2000}, {22579200, 44100 * 32, 44100, 0x3000}, {22579200, 44100 * 64, 44100, 0x2000}, {24576000, 48000 * 32, 48000, 0x3000}, {24576000, 48000 * 64, 48000, 0x2000}, {45158400, 88200 * 32, 88200, 0x3000}, {45158400, 88200 * 64, 88200, 0x2000}, {49152000, 96000 * 32, 96000, 0x3000}, {49152000, 96000 * 64, 96000, 0x2000}, /* sysclk is 24.576Mhz or 22.5792Mhz */ {24576000, 8000 * 32, 8000, 0x7080}, {24576000, 8000 * 64, 8000, 0x6080}, {24576000, 16000 * 32, 16000, 0x5080}, {24576000, 16000 * 64, 16000, 0x4080}, {24576000, 24000 * 32, 24000, 0x5000}, {24576000, 24000 * 64, 24000, 0x4000}, {24576000, 32000 * 32, 32000, 0x3080}, {24576000, 32000 * 64, 32000, 0x2080}, {22579200, 11025 * 32, 11025, 0x7000}, {22579200, 11025 * 64, 11025, 0x6000}, {22579200, 22050 * 32, 22050, 0x5000}, {22579200, 22050 * 64, 22050, 0x4000}, }; static int get_coeff(int mclk, int rate, int timesofbclk) { int i; for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { if (coeff_div[i].mclk == mclk && coeff_div[i].rate == rate && (coeff_div[i].bclk / coeff_div[i].rate) == timesofbclk) return i; } return -EINVAL; } static int rt5631_hifi_pcm_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec); int timesofbclk = 32, coeff; unsigned int iface = 0; dev_dbg(codec->dev, "enter %s\n", __func__); rt5631->bclk_rate = snd_soc_params_to_bclk(params); if (rt5631->bclk_rate < 0) { dev_err(codec->dev, "Fail to get BCLK rate\n"); return rt5631->bclk_rate; } rt5631->rx_rate = params_rate(params); if (rt5631->master) coeff = get_coeff(rt5631->sysclk, rt5631->rx_rate, rt5631->bclk_rate / rt5631->rx_rate); else coeff = get_coeff(rt5631->sysclk, rt5631->rx_rate, timesofbclk); if (coeff < 0) { dev_err(codec->dev, "Fail to get coeff\n"); return -EINVAL; } switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= RT5631_SDP_I2S_DL_20; break; case SNDRV_PCM_FORMAT_S24_LE: iface |= RT5631_SDP_I2S_DL_24; break; case SNDRV_PCM_FORMAT_S8: iface |= RT5631_SDP_I2S_DL_8; break; default: return -EINVAL; } snd_soc_update_bits(codec, RT5631_SDP_CTRL, RT5631_SDP_I2S_DL_MASK, iface); snd_soc_write(codec, RT5631_STEREO_AD_DA_CLK_CTRL, coeff_div[coeff].reg_val); return 0; } static int rt5631_hifi_codec_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec); unsigned int iface = 0; dev_dbg(codec->dev, "enter %s\n", __func__); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: rt5631->master = 1; break; case SND_SOC_DAIFMT_CBS_CFS: iface |= RT5631_SDP_MODE_SEL_SLAVE; rt5631->master = 0; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: break; case SND_SOC_DAIFMT_LEFT_J: iface |= RT5631_SDP_I2S_DF_LEFT; break; case SND_SOC_DAIFMT_DSP_A: iface |= RT5631_SDP_I2S_DF_PCM_A; break; case SND_SOC_DAIFMT_DSP_B: iface |= RT5631_SDP_I2S_DF_PCM_B; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_NF: iface |= RT5631_SDP_I2S_BCLK_POL_CTRL; break; default: return -EINVAL; } snd_soc_write(codec, RT5631_SDP_CTRL, iface); return 0; } static int rt5631_hifi_codec_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec); dev_dbg(codec->dev, "enter %s, syclk=%d\n", __func__, freq); if ((freq >= (256 * 8000)) && (freq <= (512 * 96000))) { rt5631->sysclk = freq; return 0; } return -EINVAL; } static int rt5631_codec_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { struct snd_soc_codec *codec = codec_dai->codec; struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec); int i, ret = -EINVAL; dev_dbg(codec->dev, "enter %s\n", __func__); if (!freq_in || !freq_out) { dev_dbg(codec->dev, "PLL disabled\n"); snd_soc_update_bits(codec, RT5631_GLOBAL_CLK_CTRL, RT5631_SYSCLK_SOUR_SEL_MASK, RT5631_SYSCLK_SOUR_SEL_MCLK); return 0; } if (rt5631->master) { for (i = 0; i < ARRAY_SIZE(codec_master_pll_div); i++) if (freq_in == codec_master_pll_div[i].pll_in && freq_out == codec_master_pll_div[i].pll_out) { dev_info(codec->dev, "change PLL in master mode\n"); snd_soc_write(codec, RT5631_PLL_CTRL, codec_master_pll_div[i].reg_val); schedule_timeout_uninterruptible( msecs_to_jiffies(20)); snd_soc_update_bits(codec, RT5631_GLOBAL_CLK_CTRL, RT5631_SYSCLK_SOUR_SEL_MASK | RT5631_PLLCLK_SOUR_SEL_MASK, RT5631_SYSCLK_SOUR_SEL_PLL | RT5631_PLLCLK_SOUR_SEL_MCLK); ret = 0; break; } } else { for (i = 0; i < ARRAY_SIZE(codec_slave_pll_div); i++) if (freq_in == codec_slave_pll_div[i].pll_in && freq_out == codec_slave_pll_div[i].pll_out) { dev_info(codec->dev, "change PLL in slave mode\n"); snd_soc_write(codec, RT5631_PLL_CTRL, codec_slave_pll_div[i].reg_val); schedule_timeout_uninterruptible( msecs_to_jiffies(20)); snd_soc_update_bits(codec, RT5631_GLOBAL_CLK_CTRL, RT5631_SYSCLK_SOUR_SEL_MASK | RT5631_PLLCLK_SOUR_SEL_MASK, RT5631_SYSCLK_SOUR_SEL_PLL | RT5631_PLLCLK_SOUR_SEL_BCLK); ret = 0; break; } } return ret; } static int rt5631_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { switch (level) { case SND_SOC_BIAS_ON: case SND_SOC_BIAS_PREPARE: snd_soc_update_bits(codec, RT5631_PWR_MANAG_ADD2, RT5631_PWR_MICBIAS1_VOL | RT5631_PWR_MICBIAS2_VOL, RT5631_PWR_MICBIAS1_VOL | RT5631_PWR_MICBIAS2_VOL); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { snd_soc_update_bits(codec, RT5631_PWR_MANAG_ADD3, RT5631_PWR_VREF | RT5631_PWR_MAIN_BIAS, RT5631_PWR_VREF | RT5631_PWR_MAIN_BIAS); msleep(80); snd_soc_update_bits(codec, RT5631_PWR_MANAG_ADD3, RT5631_PWR_FAST_VREF_CTRL, RT5631_PWR_FAST_VREF_CTRL); codec->cache_only = false; snd_soc_cache_sync(codec); } break; case SND_SOC_BIAS_OFF: snd_soc_write(codec, RT5631_PWR_MANAG_ADD1, 0x0000); snd_soc_write(codec, RT5631_PWR_MANAG_ADD2, 0x0000); snd_soc_write(codec, RT5631_PWR_MANAG_ADD3, 0x0000); snd_soc_write(codec, RT5631_PWR_MANAG_ADD4, 0x0000); break; default: break; } codec->dapm.bias_level = level; return 0; } static int rt5631_probe(struct snd_soc_codec *codec) { struct rt5631_priv *rt5631 = snd_soc_codec_get_drvdata(codec); unsigned int val; int ret; ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C); if (ret != 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } val = rt5631_read_index(codec, RT5631_ADDA_MIXER_INTL_REG3); if (val & 0x0002) rt5631->codec_version = 1; else rt5631->codec_version = 0; rt5631_reset(codec); snd_soc_update_bits(codec, RT5631_PWR_MANAG_ADD3, RT5631_PWR_VREF | RT5631_PWR_MAIN_BIAS, RT5631_PWR_VREF | RT5631_PWR_MAIN_BIAS); msleep(80); snd_soc_update_bits(codec, RT5631_PWR_MANAG_ADD3, RT5631_PWR_FAST_VREF_CTRL, RT5631_PWR_FAST_VREF_CTRL); /* enable HP zero cross */ snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, 0x0f18); /* power off ClassD auto Recovery */ if (rt5631->codec_version) snd_soc_update_bits(codec, RT5631_INT_ST_IRQ_CTRL_2, 0x2000, 0x2000); else snd_soc_update_bits(codec, RT5631_INT_ST_IRQ_CTRL_2, 0x2000, 0); /* DMIC */ if (rt5631->dmic_used_flag) { snd_soc_update_bits(codec, RT5631_GPIO_CTRL, RT5631_GPIO_PIN_FUN_SEL_MASK | RT5631_GPIO_DMIC_FUN_SEL_MASK, RT5631_GPIO_PIN_FUN_SEL_GPIO_DIMC | RT5631_GPIO_DMIC_FUN_SEL_DIMC); snd_soc_update_bits(codec, RT5631_DIG_MIC_CTRL, RT5631_DMIC_L_CH_LATCH_MASK | RT5631_DMIC_R_CH_LATCH_MASK, RT5631_DMIC_L_CH_LATCH_FALLING | RT5631_DMIC_R_CH_LATCH_RISING); } codec->dapm.bias_level = SND_SOC_BIAS_STANDBY; return 0; } static int rt5631_remove(struct snd_soc_codec *codec) { rt5631_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } #ifdef CONFIG_PM static int rt5631_suspend(struct snd_soc_codec *codec) { rt5631_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int rt5631_resume(struct snd_soc_codec *codec) { rt5631_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } #else #define rt5631_suspend NULL #define rt5631_resume NULL #endif #define RT5631_STEREO_RATES SNDRV_PCM_RATE_8000_96000 #define RT5631_FORMAT (SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S20_3LE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S8) static const struct snd_soc_dai_ops rt5631_ops = { .hw_params = rt5631_hifi_pcm_params, .set_fmt = rt5631_hifi_codec_set_dai_fmt, .set_sysclk = rt5631_hifi_codec_set_dai_sysclk, .set_pll = rt5631_codec_set_dai_pll, }; static struct snd_soc_dai_driver rt5631_dai[] = { { .name = "rt5631-hifi", .id = 1, .playback = { .stream_name = "HIFI Playback", .channels_min = 1, .channels_max = 2, .rates = RT5631_STEREO_RATES, .formats = RT5631_FORMAT, }, .capture = { .stream_name = "HIFI Capture", .channels_min = 1, .channels_max = 2, .rates = RT5631_STEREO_RATES, .formats = RT5631_FORMAT, }, .ops = &rt5631_ops, }, }; static struct snd_soc_codec_driver soc_codec_dev_rt5631 = { .probe = rt5631_probe, .remove = rt5631_remove, .suspend = rt5631_suspend, .resume = rt5631_resume, .set_bias_level = rt5631_set_bias_level, .reg_cache_size = RT5631_VENDOR_ID2 + 1, .reg_word_size = sizeof(u16), .reg_cache_default = rt5631_reg, .volatile_register = rt5631_volatile_register, .readable_register = rt5631_readable_register, .reg_cache_step = 1, .controls = rt5631_snd_controls, .num_controls = ARRAY_SIZE(rt5631_snd_controls), .dapm_widgets = rt5631_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(rt5631_dapm_widgets), .dapm_routes = rt5631_dapm_routes, .num_dapm_routes = ARRAY_SIZE(rt5631_dapm_routes), }; static const struct i2c_device_id rt5631_i2c_id[] = { { "rt5631", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, rt5631_i2c_id); static int rt5631_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct rt5631_priv *rt5631; int ret; rt5631 = devm_kzalloc(&i2c->dev, sizeof(struct rt5631_priv), GFP_KERNEL); if (NULL == rt5631) return -ENOMEM; i2c_set_clientdata(i2c, rt5631); ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5631, rt5631_dai, ARRAY_SIZE(rt5631_dai)); return ret; } static __devexit int rt5631_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static struct i2c_driver rt5631_i2c_driver = { .driver = { .name = "rt5631", .owner = THIS_MODULE, }, .probe = rt5631_i2c_probe, .remove = __devexit_p(rt5631_i2c_remove), .id_table = rt5631_i2c_id, }; static int __init rt5631_modinit(void) { return i2c_add_driver(&rt5631_i2c_driver); } module_init(rt5631_modinit); static void __exit rt5631_modexit(void) { i2c_del_driver(&rt5631_i2c_driver); } module_exit(rt5631_modexit); MODULE_DESCRIPTION("ASoC RT5631 driver"); MODULE_AUTHOR("flove <flove@realtek.com>"); MODULE_LICENSE("GPL");
gpl-2.0
jmztaylor/android_kernel_htc_k2_plc_cl
sound/arm/pxa2xx-ac97.c
4894
6492
/* * linux/sound/pxa2xx-ac97.c -- AC97 support for the Intel PXA2xx chip. * * Author: Nicolas Pitre * Created: Dec 02, 2004 * Copyright: MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include <sound/pxa2xx-lib.h> #include <mach/regs-ac97.h> #include <mach/audio.h> #include "pxa2xx-pcm.h" static void pxa2xx_ac97_reset(struct snd_ac97 *ac97) { if (!pxa2xx_ac97_try_cold_reset(ac97)) { pxa2xx_ac97_try_warm_reset(ac97); } pxa2xx_ac97_finish_reset(ac97); } static struct snd_ac97_bus_ops pxa2xx_ac97_ops = { .read = pxa2xx_ac97_read, .write = pxa2xx_ac97_write, .reset = pxa2xx_ac97_reset, }; static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_out = { .name = "AC97 PCM out", .dev_addr = __PREG(PCDR), .drcmr = &DRCMR(12), .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_BURST32 | DCMD_WIDTH4, }; static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_in = { .name = "AC97 PCM in", .dev_addr = __PREG(PCDR), .drcmr = &DRCMR(11), .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_BURST32 | DCMD_WIDTH4, }; static struct snd_pcm *pxa2xx_ac97_pcm; static struct snd_ac97 *pxa2xx_ac97_ac97; static int pxa2xx_ac97_pcm_startup(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; pxa2xx_audio_ops_t *platform_ops; int r; runtime->hw.channels_min = 2; runtime->hw.channels_max = 2; r = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? AC97_RATES_FRONT_DAC : AC97_RATES_ADC; runtime->hw.rates = pxa2xx_ac97_ac97->rates[r]; snd_pcm_limit_hw_rates(runtime); platform_ops = substream->pcm->card->dev->platform_data; if (platform_ops && platform_ops->startup) return platform_ops->startup(substream, platform_ops->priv); else return 0; } static void pxa2xx_ac97_pcm_shutdown(struct snd_pcm_substream *substream) { pxa2xx_audio_ops_t *platform_ops; platform_ops = substream->pcm->card->dev->platform_data; if (platform_ops && platform_ops->shutdown) platform_ops->shutdown(substream, platform_ops->priv); } static int pxa2xx_ac97_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; int reg = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? AC97_PCM_FRONT_DAC_RATE : AC97_PCM_LR_ADC_RATE; return snd_ac97_set_rate(pxa2xx_ac97_ac97, reg, runtime->rate); } static struct pxa2xx_pcm_client pxa2xx_ac97_pcm_client = { .playback_params = &pxa2xx_ac97_pcm_out, .capture_params = &pxa2xx_ac97_pcm_in, .startup = pxa2xx_ac97_pcm_startup, .shutdown = pxa2xx_ac97_pcm_shutdown, .prepare = pxa2xx_ac97_pcm_prepare, }; #ifdef CONFIG_PM static int pxa2xx_ac97_do_suspend(struct snd_card *card, pm_message_t state) { pxa2xx_audio_ops_t *platform_ops = card->dev->platform_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3cold); snd_pcm_suspend_all(pxa2xx_ac97_pcm); snd_ac97_suspend(pxa2xx_ac97_ac97); if (platform_ops && platform_ops->suspend) platform_ops->suspend(platform_ops->priv); return pxa2xx_ac97_hw_suspend(); } static int pxa2xx_ac97_do_resume(struct snd_card *card) { pxa2xx_audio_ops_t *platform_ops = card->dev->platform_data; int rc; rc = pxa2xx_ac97_hw_resume(); if (rc) return rc; if (platform_ops && platform_ops->resume) platform_ops->resume(platform_ops->priv); snd_ac97_resume(pxa2xx_ac97_ac97); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } static int pxa2xx_ac97_suspend(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); int ret = 0; if (card) ret = pxa2xx_ac97_do_suspend(card, PMSG_SUSPEND); return ret; } static int pxa2xx_ac97_resume(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); int ret = 0; if (card) ret = pxa2xx_ac97_do_resume(card); return ret; } static const struct dev_pm_ops pxa2xx_ac97_pm_ops = { .suspend = pxa2xx_ac97_suspend, .resume = pxa2xx_ac97_resume, }; #endif static int __devinit pxa2xx_ac97_probe(struct platform_device *dev) { struct snd_card *card; struct snd_ac97_bus *ac97_bus; struct snd_ac97_template ac97_template; int ret; pxa2xx_audio_ops_t *pdata = dev->dev.platform_data; if (dev->id >= 0) { dev_err(&dev->dev, "PXA2xx has only one AC97 port.\n"); ret = -ENXIO; goto err_dev; } ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, 0, &card); if (ret < 0) goto err; card->dev = &dev->dev; strncpy(card->driver, dev->dev.driver->name, sizeof(card->driver)); ret = pxa2xx_pcm_new(card, &pxa2xx_ac97_pcm_client, &pxa2xx_ac97_pcm); if (ret) goto err; ret = pxa2xx_ac97_hw_probe(dev); if (ret) goto err; ret = snd_ac97_bus(card, 0, &pxa2xx_ac97_ops, NULL, &ac97_bus); if (ret) goto err_remove; memset(&ac97_template, 0, sizeof(ac97_template)); ret = snd_ac97_mixer(ac97_bus, &ac97_template, &pxa2xx_ac97_ac97); if (ret) goto err_remove; snprintf(card->shortname, sizeof(card->shortname), "%s", snd_ac97_get_short_name(pxa2xx_ac97_ac97)); snprintf(card->longname, sizeof(card->longname), "%s (%s)", dev->dev.driver->name, card->mixername); if (pdata && pdata->codec_pdata[0]) snd_ac97_dev_add_pdata(ac97_bus->codec[0], pdata->codec_pdata[0]); snd_card_set_dev(card, &dev->dev); ret = snd_card_register(card); if (ret == 0) { platform_set_drvdata(dev, card); return 0; } err_remove: pxa2xx_ac97_hw_remove(dev); err: if (card) snd_card_free(card); err_dev: return ret; } static int __devexit pxa2xx_ac97_remove(struct platform_device *dev) { struct snd_card *card = platform_get_drvdata(dev); if (card) { snd_card_free(card); platform_set_drvdata(dev, NULL); pxa2xx_ac97_hw_remove(dev); } return 0; } static struct platform_driver pxa2xx_ac97_driver = { .probe = pxa2xx_ac97_probe, .remove = __devexit_p(pxa2xx_ac97_remove), .driver = { .name = "pxa2xx-ac97", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &pxa2xx_ac97_pm_ops, #endif }, }; module_platform_driver(pxa2xx_ac97_driver); MODULE_AUTHOR("Nicolas Pitre"); MODULE_DESCRIPTION("AC97 driver for the Intel PXA2xx chip"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-ac97");
gpl-2.0
HeliumRom/android_kernel_nubia_nx507j
drivers/acpi/acpica/pswalk.c
5150
3540
/****************************************************************************** * * Module Name: pswalk - Parser routines to walk parsed op tree(s) * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("pswalk") /******************************************************************************* * * FUNCTION: acpi_ps_delete_parse_tree * * PARAMETERS: subtree_root - Root of tree (or subtree) to delete * * RETURN: None * * DESCRIPTION: Delete a portion of or an entire parse tree. * ******************************************************************************/ void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root) { union acpi_parse_object *op = subtree_root; union acpi_parse_object *next = NULL; union acpi_parse_object *parent = NULL; ACPI_FUNCTION_TRACE_PTR(ps_delete_parse_tree, subtree_root); /* Visit all nodes in the subtree */ while (op) { /* Check if we are not ascending */ if (op != parent) { /* Look for an argument or child of the current op */ next = acpi_ps_get_arg(op, 0); if (next) { /* Still going downward in tree (Op is not completed yet) */ op = next; continue; } } /* No more children, this Op is complete. */ next = op->common.next; parent = op->common.parent; acpi_ps_free_op(op); /* If we are back to the starting point, the walk is complete. */ if (op == subtree_root) { return_VOID; } if (next) { op = next; } else { op = parent; } } return_VOID; }
gpl-2.0
sub77/kernel_samsung_matissewifi
drivers/acpi/acpica/pswalk.c
5150
3540
/****************************************************************************** * * Module Name: pswalk - Parser routines to walk parsed op tree(s) * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("pswalk") /******************************************************************************* * * FUNCTION: acpi_ps_delete_parse_tree * * PARAMETERS: subtree_root - Root of tree (or subtree) to delete * * RETURN: None * * DESCRIPTION: Delete a portion of or an entire parse tree. * ******************************************************************************/ void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root) { union acpi_parse_object *op = subtree_root; union acpi_parse_object *next = NULL; union acpi_parse_object *parent = NULL; ACPI_FUNCTION_TRACE_PTR(ps_delete_parse_tree, subtree_root); /* Visit all nodes in the subtree */ while (op) { /* Check if we are not ascending */ if (op != parent) { /* Look for an argument or child of the current op */ next = acpi_ps_get_arg(op, 0); if (next) { /* Still going downward in tree (Op is not completed yet) */ op = next; continue; } } /* No more children, this Op is complete. */ next = op->common.next; parent = op->common.parent; acpi_ps_free_op(op); /* If we are back to the starting point, the walk is complete. */ if (op == subtree_root) { return_VOID; } if (next) { op = next; } else { op = parent; } } return_VOID; }
gpl-2.0
CyanogenU8833/android_kernel_huawei_msm8x25
drivers/media/video/upd64031a.c
5150
7643
/* * upd64031A - NEC Electronics Ghost Reduction for NTSC in Japan * * 2003 by T.Adachi <tadachi@tadachi-net.com> * 2003 by Takeru KOMORIYA <komoriya@paken.org> * 2006 by Hans Verkuil <hverkuil@xs4all.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/upd64031a.h> /* --------------------- read registers functions define -------------------- */ /* bit masks */ #define GR_MODE_MASK 0xc0 #define DIRECT_3DYCS_CONNECT_MASK 0xc0 #define SYNC_CIRCUIT_MASK 0xa0 /* -------------------------------------------------------------------------- */ MODULE_DESCRIPTION("uPD64031A driver"); MODULE_AUTHOR("T. Adachi, Takeru KOMORIYA, Hans Verkuil"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); enum { R00 = 0, R01, R02, R03, R04, R05, R06, R07, R08, R09, R0A, R0B, R0C, R0D, R0E, R0F, /* unused registers R10, R11, R12, R13, R14, R15, R16, R17, */ TOT_REGS }; struct upd64031a_state { struct v4l2_subdev sd; u8 regs[TOT_REGS]; u8 gr_mode; u8 direct_3dycs_connect; u8 ext_comp_sync; u8 ext_vert_sync; }; static inline struct upd64031a_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct upd64031a_state, sd); } static u8 upd64031a_init[] = { 0x00, 0xb8, 0x48, 0xd2, 0xe6, 0x03, 0x10, 0x0b, 0xaf, 0x7f, 0x00, 0x00, 0x1d, 0x5e, 0x00, 0xd0 }; /* ------------------------------------------------------------------------ */ static u8 upd64031a_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 buf[2]; if (reg >= sizeof(buf)) return 0xff; i2c_master_recv(client, buf, 2); return buf[reg]; } /* ------------------------------------------------------------------------ */ static void upd64031a_write(struct v4l2_subdev *sd, u8 reg, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 buf[2]; buf[0] = reg; buf[1] = val; v4l2_dbg(1, debug, sd, "write reg: %02X val: %02X\n", reg, val); if (i2c_master_send(client, buf, 2) != 2) v4l2_err(sd, "I/O error write 0x%02x/0x%02x\n", reg, val); } /* ------------------------------------------------------------------------ */ /* The input changed due to new input or channel changed */ static int upd64031a_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq) { struct upd64031a_state *state = to_state(sd); u8 reg = state->regs[R00]; v4l2_dbg(1, debug, sd, "changed input or channel\n"); upd64031a_write(sd, R00, reg | 0x10); upd64031a_write(sd, R00, reg & ~0x10); return 0; } /* ------------------------------------------------------------------------ */ static int upd64031a_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct upd64031a_state *state = to_state(sd); u8 r00, r05, r08; state->gr_mode = (input & 3) << 6; state->direct_3dycs_connect = (input & 0xc) << 4; state->ext_comp_sync = (input & UPD64031A_COMPOSITE_EXTERNAL) << 1; state->ext_vert_sync = (input & UPD64031A_VERTICAL_EXTERNAL) << 2; r00 = (state->regs[R00] & ~GR_MODE_MASK) | state->gr_mode; r05 = (state->regs[R00] & ~SYNC_CIRCUIT_MASK) | state->ext_comp_sync | state->ext_vert_sync; r08 = (state->regs[R08] & ~DIRECT_3DYCS_CONNECT_MASK) | state->direct_3dycs_connect; upd64031a_write(sd, R00, r00); upd64031a_write(sd, R05, r05); upd64031a_write(sd, R08, r08); return upd64031a_s_frequency(sd, NULL); } static int upd64031a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_UPD64031A, 0); } static int upd64031a_log_status(struct v4l2_subdev *sd) { v4l2_info(sd, "Status: SA00=0x%02x SA01=0x%02x\n", upd64031a_read(sd, 0), upd64031a_read(sd, 1)); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int upd64031a_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; reg->val = upd64031a_read(sd, reg->reg & 0xff); reg->size = 1; return 0; } static int upd64031a_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; upd64031a_write(sd, reg->reg & 0xff, reg->val & 0xff); return 0; } #endif /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops upd64031a_core_ops = { .log_status = upd64031a_log_status, .g_chip_ident = upd64031a_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = upd64031a_g_register, .s_register = upd64031a_s_register, #endif }; static const struct v4l2_subdev_tuner_ops upd64031a_tuner_ops = { .s_frequency = upd64031a_s_frequency, }; static const struct v4l2_subdev_video_ops upd64031a_video_ops = { .s_routing = upd64031a_s_routing, }; static const struct v4l2_subdev_ops upd64031a_ops = { .core = &upd64031a_core_ops, .tuner = &upd64031a_tuner_ops, .video = &upd64031a_video_ops, }; /* ------------------------------------------------------------------------ */ /* i2c implementation */ static int upd64031a_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct upd64031a_state *state; struct v4l2_subdev *sd; int i; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = kzalloc(sizeof(struct upd64031a_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &upd64031a_ops); memcpy(state->regs, upd64031a_init, sizeof(state->regs)); state->gr_mode = UPD64031A_GR_ON << 6; state->direct_3dycs_connect = UPD64031A_3DYCS_COMPOSITE << 4; state->ext_comp_sync = state->ext_vert_sync = 0; for (i = 0; i < TOT_REGS; i++) upd64031a_write(sd, i, state->regs[i]); return 0; } static int upd64031a_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_state(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id upd64031a_id[] = { { "upd64031a", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, upd64031a_id); static struct i2c_driver upd64031a_driver = { .driver = { .owner = THIS_MODULE, .name = "upd64031a", }, .probe = upd64031a_probe, .remove = upd64031a_remove, .id_table = upd64031a_id, }; module_i2c_driver(upd64031a_driver);
gpl-2.0
championswimmer/android_kernel_sony_huashan
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
5662
14965
/************************************************************************** * * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "drmP.h" #include "vmwgfx_drv.h" #include "ttm/ttm_placement.h" #include "svga_overlay.h" #include "svga_escape.h" #define VMW_MAX_NUM_STREAMS 1 struct vmw_stream { struct vmw_dma_buffer *buf; bool claimed; bool paused; struct drm_vmw_control_stream_arg saved; }; /** * Overlay control */ struct vmw_overlay { /* * Each stream is a single overlay. In Xv these are called ports. */ struct mutex mutex; struct vmw_stream stream[VMW_MAX_NUM_STREAMS]; }; static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); return dev_priv ? dev_priv->overlay_priv : NULL; } struct vmw_escape_header { uint32_t cmd; SVGAFifoCmdEscape body; }; struct vmw_escape_video_flush { struct vmw_escape_header escape; SVGAEscapeVideoFlush flush; }; static inline void fill_escape(struct vmw_escape_header *header, uint32_t size) { header->cmd = SVGA_CMD_ESCAPE; header->body.nsid = SVGA_ESCAPE_NSID_VMWARE; header->body.size = size; } static inline void fill_flush(struct vmw_escape_video_flush *cmd, uint32_t stream_id) { fill_escape(&cmd->escape, sizeof(cmd->flush)); cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH; cmd->flush.streamId = stream_id; } /** * Send put command to hw. * * Returns * -ERESTARTSYS if interrupted by a signal. */ static int vmw_overlay_send_put(struct vmw_private *dev_priv, struct vmw_dma_buffer *buf, struct drm_vmw_control_stream_arg *arg, bool interruptible) { struct vmw_escape_video_flush *flush; size_t fifo_size; bool have_so = dev_priv->sou_priv ? true : false; int i, num_items; SVGAGuestPtr ptr; struct { struct vmw_escape_header escape; struct { uint32_t cmdType; uint32_t streamId; } header; } *cmds; struct { uint32_t registerId; uint32_t value; } *items; /* defines are a index needs + 1 */ if (have_so) num_items = SVGA_VIDEO_DST_SCREEN_ID + 1; else num_items = SVGA_VIDEO_PITCH_3 + 1; fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items; cmds = vmw_fifo_reserve(dev_priv, fifo_size); /* hardware has hung, can't do anything here */ if (!cmds) return -ENOMEM; items = (typeof(items))&cmds[1]; flush = (struct vmw_escape_video_flush *)&items[num_items]; /* the size is header + number of items */ fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1)); cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; cmds->header.streamId = arg->stream_id; /* the IDs are neatly numbered */ for (i = 0; i < num_items; i++) items[i].registerId = i; vmw_bo_get_guest_ptr(&buf->base, &ptr); ptr.offset += arg->offset; items[SVGA_VIDEO_ENABLED].value = true; items[SVGA_VIDEO_FLAGS].value = arg->flags; items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset; items[SVGA_VIDEO_FORMAT].value = arg->format; items[SVGA_VIDEO_COLORKEY].value = arg->color_key; items[SVGA_VIDEO_SIZE].value = arg->size; items[SVGA_VIDEO_WIDTH].value = arg->width; items[SVGA_VIDEO_HEIGHT].value = arg->height; items[SVGA_VIDEO_SRC_X].value = arg->src.x; items[SVGA_VIDEO_SRC_Y].value = arg->src.y; items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w; items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h; items[SVGA_VIDEO_DST_X].value = arg->dst.x; items[SVGA_VIDEO_DST_Y].value = arg->dst.y; items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w; items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h; items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0]; items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1]; items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2]; if (have_so) { items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId; items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID; } fill_flush(flush, arg->stream_id); vmw_fifo_commit(dev_priv, fifo_size); return 0; } /** * Send stop command to hw. * * Returns * -ERESTARTSYS if interrupted by a signal. */ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, uint32_t stream_id, bool interruptible) { struct { struct vmw_escape_header escape; SVGAEscapeVideoSetRegs body; struct vmw_escape_video_flush flush; } *cmds; int ret; for (;;) { cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds)); if (cmds) break; ret = vmw_fallback_wait(dev_priv, false, true, 0, interruptible, 3*HZ); if (interruptible && ret == -ERESTARTSYS) return ret; else BUG_ON(ret != 0); } fill_escape(&cmds->escape, sizeof(cmds->body)); cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; cmds->body.header.streamId = stream_id; cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED; cmds->body.items[0].value = false; fill_flush(&cmds->flush, stream_id); vmw_fifo_commit(dev_priv, sizeof(*cmds)); return 0; } /** * Move a buffer to vram or gmr if @pin is set, else unpin the buffer. * * With the introduction of screen objects buffers could now be * used with GMRs instead of being locked to vram. */ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, struct vmw_dma_buffer *buf, bool pin, bool inter) { if (!pin) return vmw_dmabuf_unpin(dev_priv, buf, inter); if (!dev_priv->sou_priv) return vmw_dmabuf_to_vram(dev_priv, buf, true, inter); return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter); } /** * Stop or pause a stream. * * If the stream is paused the no evict flag is removed from the buffer * but left in vram. This allows for instance mode_set to evict it * should it need to. * * The caller must hold the overlay lock. * * @stream_id which stream to stop/pause. * @pause true to pause, false to stop completely. */ static int vmw_overlay_stop(struct vmw_private *dev_priv, uint32_t stream_id, bool pause, bool interruptible) { struct vmw_overlay *overlay = dev_priv->overlay_priv; struct vmw_stream *stream = &overlay->stream[stream_id]; int ret; /* no buffer attached the stream is completely stopped */ if (!stream->buf) return 0; /* If the stream is paused this is already done */ if (!stream->paused) { ret = vmw_overlay_send_stop(dev_priv, stream_id, interruptible); if (ret) return ret; /* We just remove the NO_EVICT flag so no -ENOMEM */ ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false, interruptible); if (interruptible && ret == -ERESTARTSYS) return ret; else BUG_ON(ret != 0); } if (!pause) { vmw_dmabuf_unreference(&stream->buf); stream->paused = false; } else { stream->paused = true; } return 0; } /** * Update a stream and send any put or stop fifo commands needed. * * The caller must hold the overlay lock. * * Returns * -ENOMEM if buffer doesn't fit in vram. * -ERESTARTSYS if interrupted. */ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, struct vmw_dma_buffer *buf, struct drm_vmw_control_stream_arg *arg, bool interruptible) { struct vmw_overlay *overlay = dev_priv->overlay_priv; struct vmw_stream *stream = &overlay->stream[arg->stream_id]; int ret = 0; if (!buf) return -EINVAL; DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__, stream->buf, buf, stream->paused ? "" : "not "); if (stream->buf != buf) { ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, interruptible); if (ret) return ret; } else if (!stream->paused) { /* If the buffers match and not paused then just send * the put command, no need to do anything else. */ ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible); if (ret == 0) stream->saved = *arg; else BUG_ON(!interruptible); return ret; } /* We don't start the old stream if we are interrupted. * Might return -ENOMEM if it can't fit the buffer in vram. */ ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible); if (ret) return ret; ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible); if (ret) { /* This one needs to happen no matter what. We only remove * the NO_EVICT flag so this is safe from -ENOMEM. */ BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false) != 0); return ret; } if (stream->buf != buf) stream->buf = vmw_dmabuf_reference(buf); stream->saved = *arg; /* stream is no longer stopped/paused */ stream->paused = false; return 0; } /** * Stop all streams. * * Used by the fb code when starting. * * Takes the overlay lock. */ int vmw_overlay_stop_all(struct vmw_private *dev_priv) { struct vmw_overlay *overlay = dev_priv->overlay_priv; int i, ret; if (!overlay) return 0; mutex_lock(&overlay->mutex); for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { struct vmw_stream *stream = &overlay->stream[i]; if (!stream->buf) continue; ret = vmw_overlay_stop(dev_priv, i, false, false); WARN_ON(ret != 0); } mutex_unlock(&overlay->mutex); return 0; } /** * Try to resume all paused streams. * * Used by the kms code after moving a new scanout buffer to vram. * * Takes the overlay lock. */ int vmw_overlay_resume_all(struct vmw_private *dev_priv) { struct vmw_overlay *overlay = dev_priv->overlay_priv; int i, ret; if (!overlay) return 0; mutex_lock(&overlay->mutex); for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { struct vmw_stream *stream = &overlay->stream[i]; if (!stream->paused) continue; ret = vmw_overlay_update_stream(dev_priv, stream->buf, &stream->saved, false); if (ret != 0) DRM_INFO("%s: *warning* failed to resume stream %i\n", __func__, i); } mutex_unlock(&overlay->mutex); return 0; } /** * Pauses all active streams. * * Used by the kms code when moving a new scanout buffer to vram. * * Takes the overlay lock. */ int vmw_overlay_pause_all(struct vmw_private *dev_priv) { struct vmw_overlay *overlay = dev_priv->overlay_priv; int i, ret; if (!overlay) return 0; mutex_lock(&overlay->mutex); for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { if (overlay->stream[i].paused) DRM_INFO("%s: *warning* stream %i already paused\n", __func__, i); ret = vmw_overlay_stop(dev_priv, i, true, false); WARN_ON(ret != 0); } mutex_unlock(&overlay->mutex); return 0; } int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_overlay *overlay = dev_priv->overlay_priv; struct drm_vmw_control_stream_arg *arg = (struct drm_vmw_control_stream_arg *)data; struct vmw_dma_buffer *buf; struct vmw_resource *res; int ret; if (!overlay) return -ENOSYS; ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res); if (ret) return ret; mutex_lock(&overlay->mutex); if (!arg->enabled) { ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true); goto out_unlock; } ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); if (ret) goto out_unlock; ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); vmw_dmabuf_unreference(&buf); out_unlock: mutex_unlock(&overlay->mutex); vmw_resource_unreference(&res); return ret; } int vmw_overlay_num_overlays(struct vmw_private *dev_priv) { if (!dev_priv->overlay_priv) return 0; return VMW_MAX_NUM_STREAMS; } int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv) { struct vmw_overlay *overlay = dev_priv->overlay_priv; int i, k; if (!overlay) return 0; mutex_lock(&overlay->mutex); for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++) if (!overlay->stream[i].claimed) k++; mutex_unlock(&overlay->mutex); return k; } int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out) { struct vmw_overlay *overlay = dev_priv->overlay_priv; int i; if (!overlay) return -ENOSYS; mutex_lock(&overlay->mutex); for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { if (overlay->stream[i].claimed) continue; overlay->stream[i].claimed = true; *out = i; mutex_unlock(&overlay->mutex); return 0; } mutex_unlock(&overlay->mutex); return -ESRCH; } int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id) { struct vmw_overlay *overlay = dev_priv->overlay_priv; BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS); if (!overlay) return -ENOSYS; mutex_lock(&overlay->mutex); WARN_ON(!overlay->stream[stream_id].claimed); vmw_overlay_stop(dev_priv, stream_id, false, false); overlay->stream[stream_id].claimed = false; mutex_unlock(&overlay->mutex); return 0; } int vmw_overlay_init(struct vmw_private *dev_priv) { struct vmw_overlay *overlay; int i; if (dev_priv->overlay_priv) return -EINVAL; if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) && (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) { DRM_INFO("hardware doesn't support overlays\n"); return -ENOSYS; } overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return -ENOMEM; mutex_init(&overlay->mutex); for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { overlay->stream[i].buf = NULL; overlay->stream[i].paused = false; overlay->stream[i].claimed = false; } dev_priv->overlay_priv = overlay; return 0; } int vmw_overlay_close(struct vmw_private *dev_priv) { struct vmw_overlay *overlay = dev_priv->overlay_priv; bool forgotten_buffer = false; int i; if (!overlay) return -ENOSYS; for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { if (overlay->stream[i].buf) { forgotten_buffer = true; vmw_overlay_stop(dev_priv, i, false, false); } } WARN_ON(forgotten_buffer); dev_priv->overlay_priv = NULL; kfree(overlay); return 0; }
gpl-2.0
gamerman123x/kernel_oneplus_msm8974
drivers/input/mouse/logips2pp.c
7198
11714
/* * Logitech PS/2++ mouse driver * * Copyright (c) 1999-2003 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2003 Eric Wong <eric@yhbt.net> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/input.h> #include <linux/serio.h> #include <linux/libps2.h> #include "psmouse.h" #include "logips2pp.h" /* Logitech mouse types */ #define PS2PP_KIND_WHEEL 1 #define PS2PP_KIND_MX 2 #define PS2PP_KIND_TP3 3 #define PS2PP_KIND_TRACKMAN 4 /* Logitech mouse features */ #define PS2PP_WHEEL 0x01 #define PS2PP_HWHEEL 0x02 #define PS2PP_SIDE_BTN 0x04 #define PS2PP_EXTRA_BTN 0x08 #define PS2PP_TASK_BTN 0x10 #define PS2PP_NAV_BTN 0x20 struct ps2pp_info { u8 model; u8 kind; u16 features; }; /* * Process a PS2++ or PS2T++ packet. */ static psmouse_ret_t ps2pp_process_byte(struct psmouse *psmouse) { struct input_dev *dev = psmouse->dev; unsigned char *packet = psmouse->packet; if (psmouse->pktcnt < 3) return PSMOUSE_GOOD_DATA; /* * Full packet accumulated, process it */ if ((packet[0] & 0x48) == 0x48 && (packet[1] & 0x02) == 0x02) { /* Logitech extended packet */ switch ((packet[1] >> 4) | (packet[0] & 0x30)) { case 0x0d: /* Mouse extra info */ input_report_rel(dev, packet[2] & 0x80 ? REL_HWHEEL : REL_WHEEL, (int) (packet[2] & 8) - (int) (packet[2] & 7)); input_report_key(dev, BTN_SIDE, (packet[2] >> 4) & 1); input_report_key(dev, BTN_EXTRA, (packet[2] >> 5) & 1); break; case 0x0e: /* buttons 4, 5, 6, 7, 8, 9, 10 info */ input_report_key(dev, BTN_SIDE, (packet[2]) & 1); input_report_key(dev, BTN_EXTRA, (packet[2] >> 1) & 1); input_report_key(dev, BTN_BACK, (packet[2] >> 3) & 1); input_report_key(dev, BTN_FORWARD, (packet[2] >> 4) & 1); input_report_key(dev, BTN_TASK, (packet[2] >> 2) & 1); break; case 0x0f: /* TouchPad extra info */ input_report_rel(dev, packet[2] & 0x08 ? REL_HWHEEL : REL_WHEEL, (int) ((packet[2] >> 4) & 8) - (int) ((packet[2] >> 4) & 7)); packet[0] = packet[2] | 0x08; break; default: psmouse_dbg(psmouse, "Received PS2++ packet #%x, but don't know how to handle.\n", (packet[1] >> 4) | (packet[0] & 0x30)); break; } } else { /* Standard PS/2 motion data */ input_report_rel(dev, REL_X, packet[1] ? (int) packet[1] - (int) ((packet[0] << 4) & 0x100) : 0); input_report_rel(dev, REL_Y, packet[2] ? (int) ((packet[0] << 3) & 0x100) - (int) packet[2] : 0); } input_report_key(dev, BTN_LEFT, packet[0] & 1); input_report_key(dev, BTN_MIDDLE, (packet[0] >> 2) & 1); input_report_key(dev, BTN_RIGHT, (packet[0] >> 1) & 1); input_sync(dev); return PSMOUSE_FULL_PACKET; } /* * ps2pp_cmd() sends a PS2++ command, sliced into two bit * pieces through the SETRES command. This is needed to send extended * commands to mice on notebooks that try to understand the PS/2 protocol * Ugly. */ static int ps2pp_cmd(struct psmouse *psmouse, unsigned char *param, unsigned char command) { if (psmouse_sliced_command(psmouse, command)) return -1; if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_POLL | 0x0300)) return -1; return 0; } /* * SmartScroll / CruiseControl for some newer Logitech mice Defaults to * enabled if we do nothing to it. Of course I put this in because I want it * disabled :P * 1 - enabled (if previously disabled, also default) * 0 - disabled */ static void ps2pp_set_smartscroll(struct psmouse *psmouse, bool smartscroll) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[4]; ps2pp_cmd(psmouse, param, 0x32); param[0] = 0; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); param[0] = smartscroll; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); } static ssize_t ps2pp_attr_show_smartscroll(struct psmouse *psmouse, void *data, char *buf) { return sprintf(buf, "%d\n", psmouse->smartscroll); } static ssize_t ps2pp_attr_set_smartscroll(struct psmouse *psmouse, void *data, const char *buf, size_t count) { unsigned int value; int err; err = kstrtouint(buf, 10, &value); if (err) return err; if (value > 1) return -EINVAL; ps2pp_set_smartscroll(psmouse, value); psmouse->smartscroll = value; return count; } PSMOUSE_DEFINE_ATTR(smartscroll, S_IWUSR | S_IRUGO, NULL, ps2pp_attr_show_smartscroll, ps2pp_attr_set_smartscroll); /* * Support 800 dpi resolution _only_ if the user wants it (there are good * reasons to not use it even if the mouse supports it, and of course there are * also good reasons to use it, let the user decide). */ static void ps2pp_set_resolution(struct psmouse *psmouse, unsigned int resolution) { if (resolution > 400) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param = 3; ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); ps2_command(ps2dev, &param, PSMOUSE_CMD_SETRES); psmouse->resolution = 800; } else psmouse_set_resolution(psmouse, resolution); } static void ps2pp_disconnect(struct psmouse *psmouse) { device_remove_file(&psmouse->ps2dev.serio->dev, &psmouse_attr_smartscroll.dattr); } static const struct ps2pp_info *get_model_info(unsigned char model) { static const struct ps2pp_info ps2pp_list[] = { { 1, 0, 0 }, /* Simple 2-button mouse */ { 12, 0, PS2PP_SIDE_BTN}, { 13, 0, 0 }, { 15, PS2PP_KIND_MX, /* MX1000 */ PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | PS2PP_EXTRA_BTN | PS2PP_NAV_BTN | PS2PP_HWHEEL }, { 40, 0, PS2PP_SIDE_BTN }, { 41, 0, PS2PP_SIDE_BTN }, { 42, 0, PS2PP_SIDE_BTN }, { 43, 0, PS2PP_SIDE_BTN }, { 50, 0, 0 }, { 51, 0, 0 }, { 52, PS2PP_KIND_WHEEL, PS2PP_SIDE_BTN | PS2PP_WHEEL }, { 53, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 56, PS2PP_KIND_WHEEL, PS2PP_SIDE_BTN | PS2PP_WHEEL }, /* Cordless MouseMan Wheel */ { 61, PS2PP_KIND_MX, /* MX700 */ PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | PS2PP_EXTRA_BTN | PS2PP_NAV_BTN }, { 66, PS2PP_KIND_MX, /* MX3100 reciver */ PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | PS2PP_EXTRA_BTN | PS2PP_NAV_BTN | PS2PP_HWHEEL }, { 72, PS2PP_KIND_TRACKMAN, 0 }, /* T-CH11: TrackMan Marble */ { 73, PS2PP_KIND_TRACKMAN, PS2PP_SIDE_BTN }, /* TrackMan FX */ { 75, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 76, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 79, PS2PP_KIND_TRACKMAN, PS2PP_WHEEL }, /* TrackMan with wheel */ { 80, PS2PP_KIND_WHEEL, PS2PP_SIDE_BTN | PS2PP_WHEEL }, { 81, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 83, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 85, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 86, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 87, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 88, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 96, 0, 0 }, { 97, PS2PP_KIND_TP3, PS2PP_WHEEL | PS2PP_HWHEEL }, { 99, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 100, PS2PP_KIND_MX, /* MX510 */ PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | PS2PP_EXTRA_BTN | PS2PP_NAV_BTN }, { 111, PS2PP_KIND_MX, PS2PP_WHEEL | PS2PP_SIDE_BTN }, /* MX300 reports task button as side */ { 112, PS2PP_KIND_MX, /* MX500 */ PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | PS2PP_EXTRA_BTN | PS2PP_NAV_BTN }, { 114, PS2PP_KIND_MX, /* MX310 */ PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | PS2PP_EXTRA_BTN } }; int i; for (i = 0; i < ARRAY_SIZE(ps2pp_list); i++) if (model == ps2pp_list[i].model) return &ps2pp_list[i]; return NULL; } /* * Set up input device's properties based on the detected mouse model. */ static void ps2pp_set_model_properties(struct psmouse *psmouse, const struct ps2pp_info *model_info, bool using_ps2pp) { struct input_dev *input_dev = psmouse->dev; if (model_info->features & PS2PP_SIDE_BTN) __set_bit(BTN_SIDE, input_dev->keybit); if (model_info->features & PS2PP_EXTRA_BTN) __set_bit(BTN_EXTRA, input_dev->keybit); if (model_info->features & PS2PP_TASK_BTN) __set_bit(BTN_TASK, input_dev->keybit); if (model_info->features & PS2PP_NAV_BTN) { __set_bit(BTN_FORWARD, input_dev->keybit); __set_bit(BTN_BACK, input_dev->keybit); } if (model_info->features & PS2PP_WHEEL) __set_bit(REL_WHEEL, input_dev->relbit); if (model_info->features & PS2PP_HWHEEL) __set_bit(REL_HWHEEL, input_dev->relbit); switch (model_info->kind) { case PS2PP_KIND_WHEEL: psmouse->name = "Wheel Mouse"; break; case PS2PP_KIND_MX: psmouse->name = "MX Mouse"; break; case PS2PP_KIND_TP3: psmouse->name = "TouchPad 3"; break; case PS2PP_KIND_TRACKMAN: psmouse->name = "TrackMan"; break; default: /* * Set name to "Mouse" only when using PS2++, * otherwise let other protocols define suitable * name */ if (using_ps2pp) psmouse->name = "Mouse"; break; } } /* * Logitech magic init. Detect whether the mouse is a Logitech one * and its exact model and try turning on extended protocol for ones * that support it. */ int ps2pp_init(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[4]; unsigned char model, buttons; const struct ps2pp_info *model_info; bool use_ps2pp = false; int error; param[0] = 0; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); param[1] = 0; ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO); model = ((param[0] >> 4) & 0x07) | ((param[0] << 3) & 0x78); buttons = param[1]; if (!model || !buttons) return -1; model_info = get_model_info(model); if (model_info) { /* * Do Logitech PS2++ / PS2T++ magic init. */ if (model_info->kind == PS2PP_KIND_TP3) { /* Touch Pad 3 */ /* Unprotect RAM */ param[0] = 0x11; param[1] = 0x04; param[2] = 0x68; ps2_command(ps2dev, param, 0x30d1); /* Enable features */ param[0] = 0x11; param[1] = 0x05; param[2] = 0x0b; ps2_command(ps2dev, param, 0x30d1); /* Enable PS2++ */ param[0] = 0x11; param[1] = 0x09; param[2] = 0xc3; ps2_command(ps2dev, param, 0x30d1); param[0] = 0; if (!ps2_command(ps2dev, param, 0x13d1) && param[0] == 0x06 && param[1] == 0x00 && param[2] == 0x14) { use_ps2pp = true; } } else { param[0] = param[1] = param[2] = 0; ps2pp_cmd(psmouse, param, 0x39); /* Magic knock */ ps2pp_cmd(psmouse, param, 0xDB); if ((param[0] & 0x78) == 0x48 && (param[1] & 0xf3) == 0xc2 && (param[2] & 0x03) == ((param[1] >> 2) & 3)) { ps2pp_set_smartscroll(psmouse, false); use_ps2pp = true; } } } else { psmouse_warn(psmouse, "Detected unknown Logitech mouse model %d\n", model); } if (set_properties) { psmouse->vendor = "Logitech"; psmouse->model = model; if (use_ps2pp) { psmouse->protocol_handler = ps2pp_process_byte; psmouse->pktsize = 3; if (model_info->kind != PS2PP_KIND_TP3) { psmouse->set_resolution = ps2pp_set_resolution; psmouse->disconnect = ps2pp_disconnect; error = device_create_file(&psmouse->ps2dev.serio->dev, &psmouse_attr_smartscroll.dattr); if (error) { psmouse_err(psmouse, "failed to create smartscroll sysfs attribute, error: %d\n", error); return -1; } } } if (buttons >= 3) __set_bit(BTN_MIDDLE, psmouse->dev->keybit); if (model_info) ps2pp_set_model_properties(psmouse, model_info, use_ps2pp); } return use_ps2pp ? 0 : -1; }
gpl-2.0
akashnk/android_kernel_samsung_cooper
sound/core/oss/io.c
14622
4433
/* * PCM I/O Plug-In Interface * Copyright (c) 1999 by Jaroslav Kysela <perex@perex.cz> * * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Library General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "pcm_plugin.h" #define pcm_write(plug,buf,count) snd_pcm_oss_write3(plug,buf,count,1) #define pcm_writev(plug,vec,count) snd_pcm_oss_writev3(plug,vec,count,1) #define pcm_read(plug,buf,count) snd_pcm_oss_read3(plug,buf,count,1) #define pcm_readv(plug,vec,count) snd_pcm_oss_readv3(plug,vec,count,1) /* * Basic io plugin */ static snd_pcm_sframes_t io_playback_transfer(struct snd_pcm_plugin *plugin, const struct snd_pcm_plugin_channel *src_channels, struct snd_pcm_plugin_channel *dst_channels, snd_pcm_uframes_t frames) { if (snd_BUG_ON(!plugin)) return -ENXIO; if (snd_BUG_ON(!src_channels)) return -ENXIO; if (plugin->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED) { return pcm_write(plugin->plug, src_channels->area.addr, frames); } else { int channel, channels = plugin->dst_format.channels; void **bufs = (void**)plugin->extra_data; if (snd_BUG_ON(!bufs)) return -ENXIO; for (channel = 0; channel < channels; channel++) { if (src_channels[channel].enabled) bufs[channel] = src_channels[channel].area.addr; else bufs[channel] = NULL; } return pcm_writev(plugin->plug, bufs, frames); } } static snd_pcm_sframes_t io_capture_transfer(struct snd_pcm_plugin *plugin, const struct snd_pcm_plugin_channel *src_channels, struct snd_pcm_plugin_channel *dst_channels, snd_pcm_uframes_t frames) { if (snd_BUG_ON(!plugin)) return -ENXIO; if (snd_BUG_ON(!dst_channels)) return -ENXIO; if (plugin->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED) { return pcm_read(plugin->plug, dst_channels->area.addr, frames); } else { int channel, channels = plugin->dst_format.channels; void **bufs = (void**)plugin->extra_data; if (snd_BUG_ON(!bufs)) return -ENXIO; for (channel = 0; channel < channels; channel++) { if (dst_channels[channel].enabled) bufs[channel] = dst_channels[channel].area.addr; else bufs[channel] = NULL; } return pcm_readv(plugin->plug, bufs, frames); } return 0; } static snd_pcm_sframes_t io_src_channels(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t frames, struct snd_pcm_plugin_channel **channels) { int err; unsigned int channel; struct snd_pcm_plugin_channel *v; err = snd_pcm_plugin_client_channels(plugin, frames, &v); if (err < 0) return err; *channels = v; if (plugin->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED) { for (channel = 0; channel < plugin->src_format.channels; ++channel, ++v) v->wanted = 1; } return frames; } int snd_pcm_plugin_build_io(struct snd_pcm_substream *plug, struct snd_pcm_hw_params *params, struct snd_pcm_plugin **r_plugin) { int err; struct snd_pcm_plugin_format format; struct snd_pcm_plugin *plugin; if (snd_BUG_ON(!r_plugin)) return -ENXIO; *r_plugin = NULL; if (snd_BUG_ON(!plug || !params)) return -ENXIO; format.format = params_format(params); format.rate = params_rate(params); format.channels = params_channels(params); err = snd_pcm_plugin_build(plug, "I/O io", &format, &format, sizeof(void *) * format.channels, &plugin); if (err < 0) return err; plugin->access = params_access(params); if (snd_pcm_plug_stream(plug) == SNDRV_PCM_STREAM_PLAYBACK) { plugin->transfer = io_playback_transfer; if (plugin->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED) plugin->client_channels = io_src_channels; } else { plugin->transfer = io_capture_transfer; } *r_plugin = plugin; return 0; }
gpl-2.0
amondot/QGIS
src/plugins/compass/qgscompassplugin.cpp
31
8125
/*************************************************************************** qgscompassplugin.cpp Functions: ------------------- begin : Jan 28, 2012 copyright : (C) 2012 by Marco Bernasocchi email : marco@bernawebdesign.ch ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ // includes #include <qgisinterface.h> #include <qgisgui.h> #include <qgsapplication.h> #include "qgscompassplugin.h" #include <QMenu> #include <QAction> #include <QFile> #include <QToolBar> #include <QMessageBox> #include "qgscompassplugingui.h" static const QString sName = QObject::tr( "Internal Compass" ); static const QString sDescription = QObject::tr( "Shows a QtSensors compass reading" ); static const QString sCategory = QObject::tr( "Plugins" ); static const QString sPluginVersion = QObject::tr( "Version 0.9" ); static const QgisPlugin::PLUGINTYPE sPluginType = QgisPlugin::UI; static const QString sPluginIcon = ":/compass.svn"; /** * Constructor for the plugin. The plugin is passed a pointer to the main app * and an interface object that provides access to exposed functions in QGIS. * @param qgis Pointer to the QGIS main window * @param _qI Pointer to the QGIS interface object */ QgsCompassPlugin::QgsCompassPlugin( QgisInterface *themQGisIface ) : QgisPlugin( sName, sDescription, sCategory, sPluginVersion, sPluginType ) , mQGisIface( themQGisIface ) , mActionRunCompass( 0 ) , mActionAboutCompass( 0 ) , mQgsCompassPluginGui( 0 ) , mDock( 0 ) { } QgsCompassPlugin::~QgsCompassPlugin() { } /* Following functions return name, description, version, and type for the plugin */ QString QgsCompassPlugin::name() { return sName; } QString QgsCompassPlugin::version() { return sPluginVersion; } QString QgsCompassPlugin::description() { return sDescription; } QString QgsCompassPlugin::category() { return sCategory; } int QgsCompassPlugin::type() { return QgisPlugin::UI; } //method defined in interface void QgsCompassPlugin::help() { //implement me! } /* * Initialize the GUI interface for the plugin */ void QgsCompassPlugin::initGui() { // Create the action for tool mActionRunCompass = new QAction( QIcon(), tr( "Show compass" ), this ); mActionRunCompass->setObjectName( "mActionRunCompass" ); connect( mActionRunCompass, SIGNAL( triggered() ), this, SLOT( run() ) ); mActionAboutCompass = new QAction( QIcon(), tr( "&About" ), this ); mActionAboutCompass->setObjectName( "mActionAboutCompass" ); connect( mActionAboutCompass, SIGNAL( triggered() ), this, SLOT( about() ) ); setCurrentTheme( "" ); // this is called when the icon theme is changed connect( mQGisIface, SIGNAL( currentThemeChanged( QString ) ), this, SLOT( setCurrentTheme( QString ) ) ); // Add the icon to the toolbar mQGisIface->pluginToolBar()->addAction( mActionRunCompass ); //mQGisIface->pluginToolBar()->addAction( mActionAboutCompass ); mQGisIface->addPluginToMenu( sName, mActionRunCompass ); mQGisIface->addPluginToMenu( sName, mActionAboutCompass ); // this is called when the icon theme is changed } // Slot called when the buffer menu item is activated void QgsCompassPlugin::run() { if ( ! mDock ) { mDock = new QDockWidget( "Internal Compass", mQGisIface->mainWindow() ); mQgsCompassPluginGui = new QgsCompassPluginGui( mDock ); mDock->setWidget( mQgsCompassPluginGui ); mDock->setFeatures( QDockWidget::DockWidgetClosable | QDockWidget::DockWidgetMovable | QDockWidget::DockWidgetFloatable ); mQGisIface->addDockWidget( Qt::LeftDockWidgetArea, mDock ); } mDock->show(); QObject::connect( mDock, SIGNAL( visibilityChanged( bool ) ), mQgsCompassPluginGui, SLOT( handleVisibilityChanged( bool ) ) ); } // Unload the plugin by cleaning up the GUI void QgsCompassPlugin::unload() { // remove the GUI mQGisIface->removeToolBarIcon( mActionRunCompass ); mQGisIface->removePluginMenu( sName, mActionRunCompass ); //mQGisIface->removeToolBarIcon( mActionAboutCompass ); mQGisIface->removePluginMenu( sName, mActionAboutCompass ); delete mActionRunCompass; mActionRunCompass = 0; delete mActionAboutCompass; mActionAboutCompass = 0; delete mDock; mDock = 0; } //! Set icons to the current theme void QgsCompassPlugin::setCurrentTheme( QString ) { if ( mActionRunCompass && mActionAboutCompass ) { mActionRunCompass->setIcon( getThemeIcon( "/mCompassRun.png" ) ); mActionAboutCompass->setIcon( getThemeIcon( "/mActionAbout.png" ) ); } } QIcon QgsCompassPlugin::getThemeIcon( const QString &theName ) { if ( QFile::exists( QgsApplication::activeThemePath() + "/plugins" + theName ) ) { return QIcon( QgsApplication::activeThemePath() + "/plugins" + theName ); } else if ( QFile::exists( QgsApplication::defaultThemePath() + "/plugins" + theName ) ) { return QIcon( QgsApplication::defaultThemePath() + "/plugins" + theName ); } else { return QIcon( ":/icons" + theName ); } } void QgsCompassPlugin::about() { QString title = QString( "About Internal Compass" ); // sort by date of contribution QString text = QString( "<center><b>Internal Compass</b></center>" "<center>%1</center>" "<p>Shows reading of an internal compass using QtSensors<br/>" "<b>Developer:</b>" "<ol type=disc>" "<li>Marco Bernasocchi" "</ol>" "<p><b>Homepage:</b><br>" "<a href=\"http://opengis.ch\">http://opengis.ch</a></p>" "<p><b>Compass calibration:</b><br/>" "To calibrate the compass slowly rotate the device three times around each axis or " "rotate it like a on a Mobius strip.<br/>" "This <a href='http://www.youtube.com/watch?v=oNJJPeoG8lQ'>Video</a> demonstrates the process " "(this can be done from within QGIS as well).</p>" ).arg( sPluginVersion ); // create dynamicaly because on Mac this dialog is modeless QWidget *w = new QWidget; w->setAttribute( Qt::WA_DeleteOnClose ); w->setWindowIcon( getThemeIcon( "/compass.png" ) ); QMessageBox::about( w, title, text ); } /** * Required extern functions needed for every plugin * These functions can be called prior to creating an instance * of the plugin class */ // Class factory to return a new instance of the plugin class QGISEXTERN QgisPlugin * classFactory( QgisInterface * themQGisIfacePointer ) { return new QgsCompassPlugin( themQGisIfacePointer ); } // Return the name of the plugin - note that we do not user class members as // the class may not yet be insantiated when this method is called. QGISEXTERN QString name() { return sName; } // Return the description QGISEXTERN QString description() { return sDescription; } // Return the category QGISEXTERN QString category() { return sCategory; } // Return the type (either UI or MapLayer plugin) QGISEXTERN int type() { return sPluginType; } // Return the version number for the plugin QGISEXTERN QString version() { return sPluginVersion; } QGISEXTERN QString icon() { return sPluginIcon; } // Delete ourself QGISEXTERN void unload( QgisPlugin * thePluginPointer ) { delete thePluginPointer; }
gpl-2.0
flar2/evita-bulletproof
drivers/video/msm/vidc/common/init/vidc_init.c
31
26639
/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/cdev.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/list.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/android_pmem.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/debugfs.h> #include <mach/clk.h> #include <linux/pm_runtime.h> #include <mach/msm_subsystem_map.h> #include <media/msm/vcd_api.h> #include <media/msm/vidc_init.h> #include "vidc_init_internal.h" #include "vcd_res_tracker_api.h" #define DBG(x...) \ if (vidc_msg_debug) { \ printk(KERN_DEBUG "[VID] " x); \ } #define VIDC_NAME "msm_vidc_reg" #define ERR(x...) printk(KERN_ERR "[VID] " x) static struct vidc_dev *vidc_device_p; static dev_t vidc_dev_num; static struct class *vidc_class; static unsigned int vidc_mmu_subsystem[] = {MSM_SUBSYSTEM_VIDEO}; static const struct file_operations vidc_fops = { .owner = THIS_MODULE, .open = NULL, .release = NULL, .unlocked_ioctl = NULL, }; struct workqueue_struct *vidc_wq; struct workqueue_struct *vidc_timer_wq; static irqreturn_t vidc_isr(int irq, void *dev); static spinlock_t vidc_spin_lock; u32 vidc_msg_timing, vidc_msg_pmem, vidc_msg_register, vidc_msg_debug; #ifdef VIDC_ENABLE_DBGFS struct dentry *vidc_debugfs_root; struct dentry *vidc_get_debugfs_root(void) { if (vidc_debugfs_root == NULL) vidc_debugfs_root = debugfs_create_dir("vidc", NULL); return vidc_debugfs_root; } void vidc_debugfs_file_create(struct dentry *root, const char *name, u32 *var) { struct dentry *vidc_debugfs_file = debugfs_create_u32(name, S_IRUGO | S_IWUSR, root, var); if (!vidc_debugfs_file) ERR("%s(): Error creating/opening file %s\n", __func__, name); } #endif static void vidc_timer_fn(unsigned long data) { unsigned long flag; struct vidc_timer *hw_timer = NULL; ERR("%s() Timer expired\n", __func__); spin_lock_irqsave(&vidc_spin_lock, flag); hw_timer = (struct vidc_timer *)data; list_add_tail(&hw_timer->list, &vidc_device_p->vidc_timer_queue); spin_unlock_irqrestore(&vidc_spin_lock, flag); DBG("Queue the work for timer\n"); queue_work(vidc_timer_wq, &vidc_device_p->vidc_timer_worker); } static void vidc_timer_handler(struct work_struct *work) { unsigned long flag = 0; u32 islist_empty = 0; struct vidc_timer *hw_timer = NULL; ERR("%s() Timer expired\n", __func__); do { spin_lock_irqsave(&vidc_spin_lock, flag); islist_empty = list_empty(&vidc_device_p->vidc_timer_queue); if (!islist_empty) { hw_timer = list_first_entry( &vidc_device_p->vidc_timer_queue, struct vidc_timer, list); list_del(&hw_timer->list); } spin_unlock_irqrestore(&vidc_spin_lock, flag); if (!islist_empty && hw_timer && hw_timer->cb_func) hw_timer->cb_func(hw_timer->userdata); } while (!islist_empty); } static void vidc_work_handler(struct work_struct *work) { DBG("vidc_work_handler()"); vcd_read_and_clear_interrupt(); vcd_response_handler(); enable_irq(vidc_device_p->irq); DBG("vidc_work_handler() done"); } static DECLARE_WORK(vidc_work, vidc_work_handler); static int __devinit vidc_720p_probe(struct platform_device *pdev) { struct resource *resource; DBG("Enter %s()\n", __func__); if (pdev->id) { ERR("Invalid plaform device ID = %d\n", pdev->id); return -EINVAL; } vidc_device_p->irq = platform_get_irq(pdev, 0); if (unlikely(vidc_device_p->irq < 0)) { ERR("%s(): Invalid irq = %d\n", __func__, vidc_device_p->irq); return -ENXIO; } resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(!resource)) { ERR("%s(): Invalid resource\n", __func__); return -ENXIO; } vidc_device_p->phys_base = resource->start; vidc_device_p->virt_base = ioremap(resource->start, resource->end - resource->start + 1); if (!vidc_device_p->virt_base) { ERR("%s() : ioremap failed\n", __func__); return -ENOMEM; } vidc_device_p->device = &pdev->dev; mutex_init(&vidc_device_p->lock); vidc_wq = create_singlethread_workqueue("vidc_worker_queue"); if (!vidc_wq) { ERR("%s: create workque failed\n", __func__); return -ENOMEM; } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); return 0; } static int __devexit vidc_720p_remove(struct platform_device *pdev) { if (pdev->id) { ERR("Invalid plaform device ID = %d\n", pdev->id); return -EINVAL; } pm_runtime_disable(&pdev->dev); return 0; } static int vidc_runtime_suspend(struct device *dev) { dev_dbg(dev, "pm_runtime: suspending...\n"); return 0; } static int vidc_runtime_resume(struct device *dev) { dev_dbg(dev, "pm_runtime: resuming...\n"); return 0; } static const struct dev_pm_ops vidc_dev_pm_ops = { .runtime_suspend = vidc_runtime_suspend, .runtime_resume = vidc_runtime_resume, }; static struct platform_driver msm_vidc_720p_platform_driver = { .probe = vidc_720p_probe, .remove = vidc_720p_remove, .driver = { .name = "msm_vidc", .pm = &vidc_dev_pm_ops, }, }; static void __exit vidc_exit(void) { platform_driver_unregister(&msm_vidc_720p_platform_driver); } static irqreturn_t vidc_isr(int irq, void *dev) { DBG("\n vidc_isr() %d ", irq); disable_irq_nosync(irq); queue_work(vidc_wq, &vidc_work); return IRQ_HANDLED; } static int __init vidc_init(void) { int rc = 0; struct device *class_devp; #ifdef VIDC_ENABLE_DBGFS struct dentry *root = NULL; #endif vidc_device_p = kzalloc(sizeof(struct vidc_dev), GFP_KERNEL); if (!vidc_device_p) { ERR("%s Unable to allocate memory for vidc_dev\n", __func__); return -ENOMEM; } rc = alloc_chrdev_region(&vidc_dev_num, 0, 1, VIDC_NAME); if (rc < 0) { ERR("%s: alloc_chrdev_region Failed rc = %d\n", __func__, rc); goto error_vidc_alloc_chrdev_region; } vidc_class = class_create(THIS_MODULE, VIDC_NAME); if (IS_ERR(vidc_class)) { rc = PTR_ERR(vidc_class); ERR("%s: couldn't create vidc_class rc = %d\n", __func__, rc); goto error_vidc_class_create; } class_devp = device_create(vidc_class, NULL, vidc_dev_num, NULL, VIDC_NAME); if (IS_ERR(class_devp)) { rc = PTR_ERR(class_devp); ERR("%s: class device_create failed %d\n", __func__, rc); goto error_vidc_class_device_create; } cdev_init(&vidc_device_p->cdev, &vidc_fops); vidc_device_p->cdev.owner = THIS_MODULE; rc = cdev_add(&(vidc_device_p->cdev), vidc_dev_num, 1); if (rc < 0) { ERR("%s: cdev_add failed %d\n", __func__, rc); goto error_vidc_cdev_add; } rc = platform_driver_register(&msm_vidc_720p_platform_driver); if (rc) { ERR("%s failed to load\n", __func__); goto error_vidc_platfom_register; } rc = request_irq(vidc_device_p->irq, vidc_isr, IRQF_TRIGGER_HIGH, "vidc", vidc_device_p->device); if (unlikely(rc)) { ERR("%s() :request_irq failed\n", __func__); goto error_vidc_request_irq; } res_trk_init(vidc_device_p->device, vidc_device_p->irq); vidc_timer_wq = create_singlethread_workqueue("vidc_timer_wq"); if (!vidc_timer_wq) { ERR("%s: create workque failed\n", __func__); rc = -ENOMEM; goto error_vidc_create_workqueue; } DBG("Disabling IRQ in %s()\n", __func__); disable_irq_nosync(vidc_device_p->irq); INIT_WORK(&vidc_device_p->vidc_timer_worker, vidc_timer_handler); spin_lock_init(&vidc_spin_lock); INIT_LIST_HEAD(&vidc_device_p->vidc_timer_queue); vidc_device_p->ref_count = 0; vidc_device_p->firmware_refcount = 0; vidc_device_p->get_firmware = 0; #ifdef VIDC_ENABLE_DBGFS root = vidc_get_debugfs_root(); if (root) { vidc_debugfs_file_create(root, "vidc_msg_timing", (u32 *) &vidc_msg_timing); vidc_debugfs_file_create(root, "vidc_msg_pmem", (u32 *) &vidc_msg_pmem); vidc_debugfs_file_create(root, "vidc_msg_register", (u32 *) &vidc_msg_register); vidc_debugfs_file_create(root, "vidc_msg_debug", (u32 *) &vidc_msg_debug); } #endif return 0; error_vidc_create_workqueue: free_irq(vidc_device_p->irq, vidc_device_p->device); error_vidc_request_irq: platform_driver_unregister(&msm_vidc_720p_platform_driver); error_vidc_platfom_register: cdev_del(&(vidc_device_p->cdev)); error_vidc_cdev_add: device_destroy(vidc_class, vidc_dev_num); error_vidc_class_device_create: class_destroy(vidc_class); error_vidc_class_create: unregister_chrdev_region(vidc_dev_num, 1); error_vidc_alloc_chrdev_region: kfree(vidc_device_p); return rc; } void __iomem *vidc_get_ioaddr(void) { return (u8 *)vidc_device_p->virt_base; } EXPORT_SYMBOL(vidc_get_ioaddr); int vidc_load_firmware(void) { u32 status = true; if (!res_trk_check_for_sec_session()) { mutex_lock(&vidc_device_p->lock); if (!vidc_device_p->get_firmware) { status = res_trk_download_firmware(); if (!status) goto error; vidc_device_p->get_firmware = 1; } vidc_device_p->firmware_refcount++; error: mutex_unlock(&vidc_device_p->lock); } return status; } EXPORT_SYMBOL(vidc_load_firmware); void vidc_release_firmware(void) { if (!res_trk_check_for_sec_session()) { mutex_lock(&vidc_device_p->lock); if (vidc_device_p->firmware_refcount > 0) vidc_device_p->firmware_refcount--; else vidc_device_p->firmware_refcount = 0; mutex_unlock(&vidc_device_p->lock); } } EXPORT_SYMBOL(vidc_release_firmware); u32 vidc_get_fd_info(struct video_client_ctx *client_ctx, enum buffer_dir buffer, int pmem_fd, unsigned long kvaddr, int index, struct ion_handle **buff_handle) { struct buf_addr_table *buf_addr_table; u32 rc = 0; if (!client_ctx) return false; if (buffer == BUFFER_TYPE_INPUT) buf_addr_table = client_ctx->input_buf_addr_table; else buf_addr_table = client_ctx->output_buf_addr_table; if (buf_addr_table[index].pmem_fd == pmem_fd) { if (buf_addr_table[index].kernel_vaddr == kvaddr) { rc = buf_addr_table[index].buff_ion_flag; *buff_handle = buf_addr_table[index].buff_ion_handle; } else *buff_handle = NULL; } else *buff_handle = NULL; return rc; } EXPORT_SYMBOL(vidc_get_fd_info); void vidc_cleanup_addr_table(struct video_client_ctx *client_ctx, enum buffer_dir buffer) { u32 *num_of_buffers = NULL; u32 i = 0; struct buf_addr_table *buf_addr_table; if (buffer == BUFFER_TYPE_INPUT) { buf_addr_table = client_ctx->input_buf_addr_table; num_of_buffers = &client_ctx->num_of_input_buffers; DBG("%s(): buffer = INPUT\n", __func__); } else { buf_addr_table = client_ctx->output_buf_addr_table; num_of_buffers = &client_ctx->num_of_output_buffers; DBG("%s(): buffer = OUTPUT\n", __func__); } if (!*num_of_buffers) goto bail_out_cleanup; if (!client_ctx->user_ion_client) goto bail_out_cleanup; for (i = 0; i < *num_of_buffers; ++i) { if (buf_addr_table[i].client_data) { msm_subsystem_unmap_buffer( (struct msm_mapped_buffer *) buf_addr_table[i].client_data); buf_addr_table[i].client_data = NULL; } if (!IS_ERR_OR_NULL(buf_addr_table[i].buff_ion_handle)) { if (!IS_ERR_OR_NULL(client_ctx->user_ion_client)) { ion_unmap_kernel(client_ctx->user_ion_client, buf_addr_table[i]. buff_ion_handle); if (!res_trk_check_for_sec_session()) { ion_unmap_iommu( client_ctx->user_ion_client, buf_addr_table[i]. buff_ion_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL); } ion_free(client_ctx->user_ion_client, buf_addr_table[i]. buff_ion_handle); buf_addr_table[i].buff_ion_handle = NULL; } } } if (client_ctx->vcd_h264_mv_buffer.client_data) { msm_subsystem_unmap_buffer((struct msm_mapped_buffer *) client_ctx->vcd_h264_mv_buffer.client_data); client_ctx->vcd_h264_mv_buffer.client_data = NULL; } if (!IS_ERR_OR_NULL(client_ctx->h264_mv_ion_handle)) { if (!IS_ERR_OR_NULL(client_ctx->user_ion_client)) { ion_unmap_kernel(client_ctx->user_ion_client, client_ctx->h264_mv_ion_handle); if (!res_trk_check_for_sec_session() && (res_trk_get_core_type() != (u32)VCD_CORE_720P)) { ion_unmap_iommu(client_ctx->user_ion_client, client_ctx->h264_mv_ion_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL); } ion_free(client_ctx->user_ion_client, client_ctx->h264_mv_ion_handle); client_ctx->h264_mv_ion_handle = NULL; } } bail_out_cleanup: return; } EXPORT_SYMBOL(vidc_cleanup_addr_table); u32 vidc_lookup_addr_table(struct video_client_ctx *client_ctx, enum buffer_dir buffer, u32 search_with_user_vaddr, unsigned long *user_vaddr, unsigned long *kernel_vaddr, unsigned long *phy_addr, int *pmem_fd, struct file **file, s32 *buffer_index) { u32 num_of_buffers; u32 i; struct buf_addr_table *buf_addr_table; u32 found = false; if (!client_ctx) return false; mutex_lock(&client_ctx->enrty_queue_lock); if (buffer == BUFFER_TYPE_INPUT) { buf_addr_table = client_ctx->input_buf_addr_table; num_of_buffers = client_ctx->num_of_input_buffers; DBG("%s(): buffer = INPUT\n", __func__); } else { buf_addr_table = client_ctx->output_buf_addr_table; num_of_buffers = client_ctx->num_of_output_buffers; DBG("%s(): buffer = OUTPUT\n", __func__); } for (i = 0; i < num_of_buffers; ++i) { if (search_with_user_vaddr) { if (*user_vaddr == buf_addr_table[i].user_vaddr) { *kernel_vaddr = buf_addr_table[i].kernel_vaddr; found = true; DBG("%s() : client_ctx = %p." " user_virt_addr = 0x%08lx is found", __func__, client_ctx, *user_vaddr); break; } } else { if (*kernel_vaddr == buf_addr_table[i].kernel_vaddr) { *user_vaddr = buf_addr_table[i].user_vaddr; found = true; DBG("%s() : client_ctx = %p." " kernel_virt_addr = 0x%08lx is found", __func__, client_ctx, *kernel_vaddr); break; } } } if (found) { *phy_addr = buf_addr_table[i].dev_addr; *pmem_fd = buf_addr_table[i].pmem_fd; *file = buf_addr_table[i].file; *buffer_index = i; if (search_with_user_vaddr) { DBG("kernel_vaddr = 0x%08lx, phy_addr = 0x%08lx " " pmem_fd = %d, struct *file = %p " "buffer_index = %d\n", *kernel_vaddr, *phy_addr, *pmem_fd, *file, *buffer_index); } else { DBG("user_vaddr = 0x%08lx, phy_addr = 0x%08lx " " pmem_fd = %d, struct *file = %p " "buffer_index = %d\n", *user_vaddr, *phy_addr, *pmem_fd, *file, *buffer_index); } mutex_unlock(&client_ctx->enrty_queue_lock); return true; } else { if (search_with_user_vaddr) { DBG("%s() : client_ctx = %p user_virt_addr = 0x%08lx" " Not Found.\n", __func__, client_ctx, *user_vaddr); } else { DBG("%s() : client_ctx = %p kernel_virt_addr = 0x%08lx" " Not Found.\n", __func__, client_ctx, *kernel_vaddr); } mutex_unlock(&client_ctx->enrty_queue_lock); return false; } } EXPORT_SYMBOL(vidc_lookup_addr_table); u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx, enum buffer_dir buffer, unsigned long user_vaddr, unsigned long *kernel_vaddr, int pmem_fd, unsigned long buffer_addr_offset, unsigned int max_num_buffers, unsigned long length) { unsigned long len, phys_addr; struct file *file = NULL; u32 *num_of_buffers = NULL; u32 i, flags; struct buf_addr_table *buf_addr_table; struct msm_mapped_buffer *mapped_buffer = NULL; struct ion_handle *buff_ion_handle = NULL; unsigned long ionflag = 0; unsigned long iova = 0; int ret = 0; unsigned long buffer_size = 0; size_t ion_len; if (!client_ctx || !length) return false; mutex_lock(&client_ctx->enrty_queue_lock); if (buffer == BUFFER_TYPE_INPUT) { buf_addr_table = client_ctx->input_buf_addr_table; num_of_buffers = &client_ctx->num_of_input_buffers; DBG("%s(): buffer = INPUT #Buf = %d\n", __func__, *num_of_buffers); } else { buf_addr_table = client_ctx->output_buf_addr_table; num_of_buffers = &client_ctx->num_of_output_buffers; DBG("%s(): buffer = OUTPUT #Buf = %d\n", __func__, *num_of_buffers); length = length * 2; } if (*num_of_buffers == max_num_buffers) { ERR("%s(): Num of buffers reached max value : %d", __func__, max_num_buffers); goto bail_out_add; } i = 0; while (i < *num_of_buffers && user_vaddr != buf_addr_table[i].user_vaddr) i++; if (i < *num_of_buffers) { DBG("%s() : client_ctx = %p." " user_virt_addr = 0x%08lx already set", __func__, client_ctx, user_vaddr); goto bail_out_add; } else { if (!vcd_get_ion_status()) { if (get_pmem_file(pmem_fd, &phys_addr, kernel_vaddr, &len, &file)) { ERR("%s(): get_pmem_file failed\n", __func__); goto bail_out_add; } put_pmem_file(file); flags = (buffer == BUFFER_TYPE_INPUT) ? MSM_SUBSYSTEM_MAP_IOVA : MSM_SUBSYSTEM_MAP_IOVA|MSM_SUBSYSTEM_ALIGN_IOVA_8K; mapped_buffer = msm_subsystem_map_buffer(phys_addr, length, flags, vidc_mmu_subsystem, sizeof(vidc_mmu_subsystem)/sizeof(unsigned int)); if (IS_ERR(mapped_buffer)) { pr_err("buffer map failed"); goto bail_out_add; } buf_addr_table[*num_of_buffers].client_data = (void *) mapped_buffer; buf_addr_table[*num_of_buffers].dev_addr = mapped_buffer->iova[0]; } else { buff_ion_handle = ion_import_dma_buf( client_ctx->user_ion_client, pmem_fd); if (IS_ERR_OR_NULL(buff_ion_handle)) { ERR("%s(): get_ION_handle failed\n", __func__); goto bail_out_add; } if (ion_handle_get_flags(client_ctx->user_ion_client, buff_ion_handle, &ionflag)) { ERR("%s():ION flags fail\n", __func__); goto bail_out_add; } *kernel_vaddr = (unsigned long) ion_map_kernel( client_ctx->user_ion_client, buff_ion_handle, ionflag); if (IS_ERR_OR_NULL((void *)*kernel_vaddr)) { ERR("%s():ION virtual addr fail\n", __func__); *kernel_vaddr = (unsigned long)NULL; show_mem(SHOW_MEM_FILTER_NODES); goto ion_free_error; } if (res_trk_check_for_sec_session() || (res_trk_get_core_type() == (u32)VCD_CORE_720P)) { if (ion_phys(client_ctx->user_ion_client, buff_ion_handle, &phys_addr, &ion_len)) { ERR("%s():ION physical addr fail\n", __func__); goto ion_map_error; } len = (unsigned long) ion_len; buf_addr_table[*num_of_buffers].client_data = NULL; buf_addr_table[*num_of_buffers].dev_addr = phys_addr; } else { ret = ion_map_iommu(client_ctx->user_ion_client, buff_ion_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL, SZ_8K, length, (unsigned long *) &iova, (unsigned long *) &buffer_size, UNCACHED, ION_IOMMU_UNMAP_DELAYED); if (ret || !iova) { ERR( "%s():ION iommu map fail, ret = %d, iova = 0x%lx\n", __func__, ret, iova); goto ion_map_error; } phys_addr = iova; buf_addr_table[*num_of_buffers].client_data = NULL; buf_addr_table[*num_of_buffers].dev_addr = iova; } } phys_addr += buffer_addr_offset; (*kernel_vaddr) += buffer_addr_offset; buf_addr_table[*num_of_buffers].user_vaddr = user_vaddr; buf_addr_table[*num_of_buffers].kernel_vaddr = *kernel_vaddr; buf_addr_table[*num_of_buffers].pmem_fd = pmem_fd; buf_addr_table[*num_of_buffers].file = file; buf_addr_table[*num_of_buffers].phy_addr = phys_addr; buf_addr_table[*num_of_buffers].buff_ion_handle = buff_ion_handle; buf_addr_table[*num_of_buffers].buff_ion_flag = ionflag; *num_of_buffers = *num_of_buffers + 1; DBG("%s() : client_ctx = %p, user_virt_addr = 0x%08lx, " "kernel_vaddr = 0x%08lx phys_addr=%lu inserted!", __func__, client_ctx, user_vaddr, *kernel_vaddr, phys_addr); } mutex_unlock(&client_ctx->enrty_queue_lock); return true; ion_map_error: if (*kernel_vaddr && buff_ion_handle) ion_unmap_kernel(client_ctx->user_ion_client, buff_ion_handle); ion_free_error: if (!IS_ERR_OR_NULL(buff_ion_handle)) ion_free(client_ctx->user_ion_client, buff_ion_handle); bail_out_add: mutex_unlock(&client_ctx->enrty_queue_lock); return false; } EXPORT_SYMBOL(vidc_insert_addr_table); u32 vidc_insert_addr_table_kernel(struct video_client_ctx *client_ctx, enum buffer_dir buffer, unsigned long user_vaddr, unsigned long kernel_vaddr, unsigned long phys_addr, unsigned int max_num_buffers, unsigned long length) { u32 *num_of_buffers = NULL; u32 i; struct buf_addr_table *buf_addr_table; struct msm_mapped_buffer *mapped_buffer = NULL; if (!client_ctx || !length || !kernel_vaddr || !phys_addr) return false; mutex_lock(&client_ctx->enrty_queue_lock); if (buffer == BUFFER_TYPE_INPUT) { buf_addr_table = client_ctx->input_buf_addr_table; num_of_buffers = &client_ctx->num_of_input_buffers; DBG("%s(): buffer = INPUT #Buf = %d\n", __func__, *num_of_buffers); } else { buf_addr_table = client_ctx->output_buf_addr_table; num_of_buffers = &client_ctx->num_of_output_buffers; DBG("%s(): buffer = OUTPUT #Buf = %d\n", __func__, *num_of_buffers); } if (*num_of_buffers == max_num_buffers) { ERR("%s(): Num of buffers reached max value : %d", __func__, max_num_buffers); goto bail_out_add; } i = 0; while (i < *num_of_buffers && user_vaddr != buf_addr_table[i].user_vaddr) { i++; } if (i < *num_of_buffers) { DBG("%s() : client_ctx = %p." " user_virt_addr = 0x%08lx already set", __func__, client_ctx, user_vaddr); goto bail_out_add; } else { mapped_buffer = NULL; buf_addr_table[*num_of_buffers].client_data = (void *) mapped_buffer; buf_addr_table[*num_of_buffers].dev_addr = phys_addr; buf_addr_table[*num_of_buffers].user_vaddr = user_vaddr; buf_addr_table[*num_of_buffers].kernel_vaddr = kernel_vaddr; buf_addr_table[*num_of_buffers].pmem_fd = -1; buf_addr_table[*num_of_buffers].file = NULL; buf_addr_table[*num_of_buffers].phy_addr = phys_addr; buf_addr_table[*num_of_buffers].buff_ion_handle = NULL; *num_of_buffers = *num_of_buffers + 1; DBG("%s() : client_ctx = %p, user_virt_addr = 0x%08lx, " "kernel_vaddr = 0x%08lx inserted!", __func__, client_ctx, user_vaddr, kernel_vaddr); } mutex_unlock(&client_ctx->enrty_queue_lock); return true; bail_out_add: mutex_unlock(&client_ctx->enrty_queue_lock); return false; } EXPORT_SYMBOL(vidc_insert_addr_table_kernel); u32 vidc_delete_addr_table(struct video_client_ctx *client_ctx, enum buffer_dir buffer, unsigned long user_vaddr, unsigned long *kernel_vaddr) { u32 *num_of_buffers = NULL; u32 i; struct buf_addr_table *buf_addr_table; if (!client_ctx) return false; mutex_lock(&client_ctx->enrty_queue_lock); if (buffer == BUFFER_TYPE_INPUT) { buf_addr_table = client_ctx->input_buf_addr_table; num_of_buffers = &client_ctx->num_of_input_buffers; } else { buf_addr_table = client_ctx->output_buf_addr_table; num_of_buffers = &client_ctx->num_of_output_buffers; } if (!*num_of_buffers) goto bail_out_del; i = 0; while (i < *num_of_buffers && user_vaddr != buf_addr_table[i].user_vaddr) i++; if (i == *num_of_buffers) { pr_err("%s() : client_ctx = %p." " user_virt_addr = 0x%08lx NOT found", __func__, client_ctx, user_vaddr); goto bail_out_del; } if (buf_addr_table[i].client_data) { msm_subsystem_unmap_buffer( (struct msm_mapped_buffer *)buf_addr_table[i].client_data); buf_addr_table[i].client_data = NULL; } *kernel_vaddr = buf_addr_table[i].kernel_vaddr; if (buf_addr_table[i].buff_ion_handle) { ion_unmap_kernel(client_ctx->user_ion_client, buf_addr_table[i].buff_ion_handle); if (!res_trk_check_for_sec_session() && (res_trk_get_core_type() != (u32)VCD_CORE_720P)) { ion_unmap_iommu(client_ctx->user_ion_client, buf_addr_table[i].buff_ion_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL); } ion_free(client_ctx->user_ion_client, buf_addr_table[i].buff_ion_handle); buf_addr_table[i].buff_ion_handle = NULL; } if (i < (*num_of_buffers - 1)) { buf_addr_table[i].client_data = buf_addr_table[*num_of_buffers - 1].client_data; buf_addr_table[i].dev_addr = buf_addr_table[*num_of_buffers - 1].dev_addr; buf_addr_table[i].user_vaddr = buf_addr_table[*num_of_buffers - 1].user_vaddr; buf_addr_table[i].kernel_vaddr = buf_addr_table[*num_of_buffers - 1].kernel_vaddr; buf_addr_table[i].phy_addr = buf_addr_table[*num_of_buffers - 1].phy_addr; buf_addr_table[i].pmem_fd = buf_addr_table[*num_of_buffers - 1].pmem_fd; buf_addr_table[i].file = buf_addr_table[*num_of_buffers - 1].file; buf_addr_table[i].buff_ion_handle = buf_addr_table[*num_of_buffers - 1].buff_ion_handle; } *num_of_buffers = *num_of_buffers - 1; DBG("%s() : client_ctx = %p." " user_virt_addr = 0x%08lx is found and deleted", __func__, client_ctx, user_vaddr); mutex_unlock(&client_ctx->enrty_queue_lock); return true; bail_out_del: mutex_unlock(&client_ctx->enrty_queue_lock); return false; } EXPORT_SYMBOL(vidc_delete_addr_table); u32 vidc_timer_create(void (*timer_handler)(void *), void *user_data, void **timer_handle) { struct vidc_timer *hw_timer = NULL; if (!timer_handler || !timer_handle) { DBG("%s(): timer creation failed\n ", __func__); return false; } hw_timer = kzalloc(sizeof(struct vidc_timer), GFP_KERNEL); if (!hw_timer) { DBG("%s(): timer creation failed in allocation\n ", __func__); return false; } init_timer(&hw_timer->hw_timeout); hw_timer->hw_timeout.data = (unsigned long)hw_timer; hw_timer->hw_timeout.function = vidc_timer_fn; hw_timer->cb_func = timer_handler; hw_timer->userdata = user_data; *timer_handle = hw_timer; return true; } EXPORT_SYMBOL(vidc_timer_create); void vidc_timer_release(void *timer_handle) { kfree(timer_handle); } EXPORT_SYMBOL(vidc_timer_release); void vidc_timer_start(void *timer_handle, u32 time_out) { struct vidc_timer *hw_timer = (struct vidc_timer *)timer_handle; DBG("%s(): start timer\n ", __func__); if (hw_timer) { hw_timer->hw_timeout.expires = jiffies + 1*HZ; add_timer(&hw_timer->hw_timeout); } } EXPORT_SYMBOL(vidc_timer_start); void vidc_timer_stop(void *timer_handle) { struct vidc_timer *hw_timer = (struct vidc_timer *)timer_handle; DBG("%s(): stop timer\n ", __func__); if (hw_timer) del_timer(&hw_timer->hw_timeout); } EXPORT_SYMBOL(vidc_timer_stop); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Video decoder/encoder driver Init Module"); MODULE_VERSION("1.0"); module_init(vidc_init); module_exit(vidc_exit);
gpl-2.0
linux-wpan/linux-wpan
net/bluetooth/sco.c
287
24629
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth SCO sockets. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/sco.h> static bool disable_esco; static const struct proto_ops sco_sock_ops; static struct bt_sock_list sco_sk_list = { .lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock) }; /* ---- SCO connections ---- */ struct sco_conn { struct hci_conn *hcon; spinlock_t lock; struct sock *sk; unsigned int mtu; }; #define sco_conn_lock(c) spin_lock(&c->lock); #define sco_conn_unlock(c) spin_unlock(&c->lock); static void sco_sock_close(struct sock *sk); static void sco_sock_kill(struct sock *sk); /* ----- SCO socket info ----- */ #define sco_pi(sk) ((struct sco_pinfo *) sk) struct sco_pinfo { struct bt_sock bt; bdaddr_t src; bdaddr_t dst; __u32 flags; __u16 setting; struct sco_conn *conn; }; /* ---- SCO timers ---- */ #define SCO_CONN_TIMEOUT (HZ * 40) #define SCO_DISCONN_TIMEOUT (HZ * 2) static void sco_sock_timeout(unsigned long arg) { struct sock *sk = (struct sock *) arg; BT_DBG("sock %p state %d", sk, sk->sk_state); bh_lock_sock(sk); sk->sk_err = ETIMEDOUT; sk->sk_state_change(sk); bh_unlock_sock(sk); sco_sock_kill(sk); sock_put(sk); } static void sco_sock_set_timer(struct sock *sk, long timeout) { BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout); } static void sco_sock_clear_timer(struct sock *sk) { BT_DBG("sock %p state %d", sk, sk->sk_state); sk_stop_timer(sk, &sk->sk_timer); } /* ---- SCO connections ---- */ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) { struct hci_dev *hdev = hcon->hdev; struct sco_conn *conn = hcon->sco_data; if (conn) return conn; conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL); if (!conn) return NULL; spin_lock_init(&conn->lock); hcon->sco_data = conn; conn->hcon = hcon; if (hdev->sco_mtu > 0) conn->mtu = hdev->sco_mtu; else conn->mtu = 60; BT_DBG("hcon %p conn %p", hcon, conn); return conn; } /* Delete channel. * Must be called on the locked socket. */ static void sco_chan_del(struct sock *sk, int err) { struct sco_conn *conn; conn = sco_pi(sk)->conn; BT_DBG("sk %p, conn %p, err %d", sk, conn, err); if (conn) { sco_conn_lock(conn); conn->sk = NULL; sco_pi(sk)->conn = NULL; sco_conn_unlock(conn); if (conn->hcon) hci_conn_drop(conn->hcon); } sk->sk_state = BT_CLOSED; sk->sk_err = err; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_ZAPPED); } static int sco_conn_del(struct hci_conn *hcon, int err) { struct sco_conn *conn = hcon->sco_data; struct sock *sk; if (!conn) return 0; BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); /* Kill socket */ sco_conn_lock(conn); sk = conn->sk; sco_conn_unlock(conn); if (sk) { bh_lock_sock(sk); sco_sock_clear_timer(sk); sco_chan_del(sk, err); bh_unlock_sock(sk); sco_sock_kill(sk); } hcon->sco_data = NULL; kfree(conn); return 0; } static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { BT_DBG("conn %p", conn); sco_pi(sk)->conn = conn; conn->sk = sk; if (parent) bt_accept_enqueue(parent, sk); } static int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { int err = 0; sco_conn_lock(conn); if (conn->sk) err = -EBUSY; else __sco_chan_add(conn, sk, parent); sco_conn_unlock(conn); return err; } static int sco_connect(struct sock *sk) { struct sco_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; int err, type; BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; else type = SCO_LINK; if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT && (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) { err = -EOPNOTSUPP; goto done; } hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, sco_pi(sk)->setting); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto done; } conn = sco_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); err = -ENOMEM; goto done; } /* Update source addr of the socket */ bacpy(&sco_pi(sk)->src, &hcon->src); err = sco_chan_add(conn, sk, NULL); if (err) goto done; if (hcon->state == BT_CONNECTED) { sco_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else { sk->sk_state = BT_CONNECT; sco_sock_set_timer(sk, sk->sk_sndtimeo); } done: hci_dev_unlock(hdev); hci_dev_put(hdev); return err; } static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) { struct sco_conn *conn = sco_pi(sk)->conn; struct sk_buff *skb; int err; /* Check outgoing MTU */ if (len > conn->mtu) return -EINVAL; BT_DBG("sk %p len %d", sk, len); skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return err; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { kfree_skb(skb); return -EFAULT; } hci_send_sco(conn->hcon, skb); return len; } static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) { struct sock *sk; sco_conn_lock(conn); sk = conn->sk; sco_conn_unlock(conn); if (!sk) goto drop; BT_DBG("sk %p len %d", sk, skb->len); if (sk->sk_state != BT_CONNECTED) goto drop; if (!sock_queue_rcv_skb(sk, skb)) return; drop: kfree_skb(skb); } /* -------- Socket interface ---------- */ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) { struct sock *sk; sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&sco_pi(sk)->src, ba)) return sk; } return NULL; } /* Find socket listening on source bdaddr. * Returns closest match. */ static struct sock *sco_get_sock_listen(bdaddr_t *src) { struct sock *sk = NULL, *sk1 = NULL; read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; /* Exact match. */ if (!bacmp(&sco_pi(sk)->src, src)) break; /* Closest match */ if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY)) sk1 = sk; } read_unlock(&sco_sk_list.lock); return sk ? sk : sk1; } static void sco_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } static void sco_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted channels */ while ((sk = bt_accept_dequeue(parent, NULL))) { sco_sock_close(sk); sco_sock_kill(sk); } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void sco_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d", sk, sk->sk_state); /* Kill poor orphan */ bt_sock_unlink(&sco_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void __sco_sock_close(struct sock *sk) { BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: sco_sock_cleanup_listen(sk); break; case BT_CONNECTED: case BT_CONFIG: if (sco_pi(sk)->conn->hcon) { sk->sk_state = BT_DISCONN; sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); hci_conn_drop(sco_pi(sk)->conn->hcon); sco_pi(sk)->conn->hcon = NULL; } else sco_chan_del(sk, ECONNRESET); break; case BT_CONNECT2: case BT_CONNECT: case BT_DISCONN: sco_chan_del(sk, ECONNRESET); break; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Must be called on unlocked socket. */ static void sco_sock_close(struct sock *sk) { sco_sock_clear_timer(sk); lock_sock(sk); __sco_sock_close(sk); release_sock(sk); sco_sock_kill(sk); } static void sco_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; security_sk_clone(parent, sk); } } static struct proto sco_proto = { .name = "SCO", .owner = THIS_MODULE, .obj_size = sizeof(struct sco_pinfo) }; static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) { struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sk->sk_destruct = sco_sock_destruct; sk->sk_sndtimeo = SCO_CONN_TIMEOUT; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT; setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk); bt_sock_link(&sco_sk_list, sk); return sk; } static int sco_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; sock->ops = &sco_sock_ops; sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC); if (!sk) return -ENOMEM; sco_sock_init(sk, NULL); return 0; } static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr); sk->sk_state = BT_BOUND; done: release_sock(sk); return err; } static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; int err; BT_DBG("sk %p", sk); if (alen < sizeof(struct sockaddr_sco) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) return -EBADFD; if (sk->sk_type != SOCK_SEQPACKET) return -EINVAL; lock_sock(sk); /* Set destination address and psm */ bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr); err = sco_connect(sk); if (err) goto done; err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); return err; } static int sco_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; bdaddr_t *src = &sco_pi(sk)->src; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } write_lock(&sco_sk_list.lock); if (__sco_get_sock_listen_by_addr(src)) { err = -EADDRINUSE; goto unlock; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; unlock: write_unlock(&sco_sk_list.lock); done: release_sock(sk); return err; } static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *ch; long timeo; int err = 0; lock_sock(sk); timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } ch = bt_accept_dequeue(sk, newsock); if (ch) break; if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", ch); done: release_sock(sk); return err; } static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); addr->sa_family = AF_BLUETOOTH; *len = sizeof(struct sockaddr_sco); if (peer) bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst); else bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src); return 0; } static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; int err; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); if (sk->sk_state == BT_CONNECTED) err = sco_send_frame(sk, msg, len); else err = -ENOTCONN; release_sock(sk); return err; } static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting) { struct hci_dev *hdev = conn->hdev; BT_DBG("conn %p", conn); conn->state = BT_CONFIG; if (!lmp_esco_capable(hdev)) { struct hci_cp_accept_conn_req cp; bacpy(&cp.bdaddr, &conn->dst); cp.role = 0x00; /* Ignored */ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); } else { struct hci_cp_accept_sync_conn_req cp; bacpy(&cp.bdaddr, &conn->dst); cp.pkt_type = cpu_to_le16(conn->pkt_type); cp.tx_bandwidth = cpu_to_le32(0x00001f40); cp.rx_bandwidth = cpu_to_le32(0x00001f40); cp.content_format = cpu_to_le16(setting); switch (setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_TRANSP: if (conn->pkt_type & ESCO_2EV3) cp.max_latency = cpu_to_le16(0x0008); else cp.max_latency = cpu_to_le16(0x000D); cp.retrans_effort = 0x02; break; case SCO_AIRMODE_CVSD: cp.max_latency = cpu_to_le16(0xffff); cp.retrans_effort = 0xff; break; } hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), &cp); } } static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sco_pinfo *pi = sco_pi(sk); lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { sco_conn_defer_accept(pi->conn->hcon, pi->setting); sk->sk_state = BT_CONFIG; release_sock(sk); return 0; } release_sock(sk); return bt_sock_recvmsg(iocb, sock, msg, len, flags); } static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; int len, err = 0; struct bt_voice voice; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); else clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); break; case BT_VOICE: if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECT2) { err = -EINVAL; break; } voice.setting = sco_pi(sk)->setting; len = min_t(unsigned int, sizeof(voice), optlen); if (copy_from_user((char *) &voice, optval, len)) { err = -EFAULT; break; } /* Explicitly check for these values */ if (voice.setting != BT_VOICE_TRANSPARENT && voice.setting != BT_VOICE_CVSD_16BIT) { err = -EINVAL; break; } sco_pi(sk)->setting = voice.setting; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sco_options opts; struct sco_conninfo cinfo; int len, err = 0; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case SCO_OPTIONS: if (sk->sk_state != BT_CONNECTED && !(sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } opts.mtu = sco_pi(sk)->conn->mtu; BT_DBG("mtu %d", opts.mtu); len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *)&opts, len)) err = -EFAULT; break; case SCO_CONNINFO: if (sk->sk_state != BT_CONNECTED && !(sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *)&cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int len, err = 0; struct bt_voice voice; BT_DBG("sk %p", sk); if (level == SOL_SCO) return sco_sock_getsockopt_old(sock, optname, optval, optlen); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), (u32 __user *) optval)) err = -EFAULT; break; case BT_VOICE: voice.setting = sco_pi(sk)->setting; len = min_t(unsigned int, len, sizeof(voice)); if (copy_to_user(optval, (char *)&voice, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; lock_sock(sk); if (!sk->sk_shutdown) { sk->sk_shutdown = SHUTDOWN_MASK; sco_sock_clear_timer(sk); __sco_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } release_sock(sk); return err; } static int sco_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; sco_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) { lock_sock(sk); err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); release_sock(sk); } sock_orphan(sk); sco_sock_kill(sk); return err; } static void sco_conn_ready(struct sco_conn *conn) { struct sock *parent; struct sock *sk = conn->sk; BT_DBG("conn %p", conn); if (sk) { sco_sock_clear_timer(sk); bh_lock_sock(sk); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); bh_unlock_sock(sk); } else { sco_conn_lock(conn); parent = sco_get_sock_listen(&conn->hcon->src); if (!parent) { sco_conn_unlock(conn); return; } bh_lock_sock(parent); sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC); if (!sk) { bh_unlock_sock(parent); sco_conn_unlock(conn); return; } sco_sock_init(sk, parent); bacpy(&sco_pi(sk)->src, &conn->hcon->src); bacpy(&sco_pi(sk)->dst, &conn->hcon->dst); hci_conn_hold(conn->hcon); __sco_chan_add(conn, sk, parent); if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) sk->sk_state = BT_CONNECT2; else sk->sk_state = BT_CONNECTED; /* Wake up parent */ parent->sk_data_ready(parent); bh_unlock_sock(parent); sco_conn_unlock(conn); } } /* ----- SCO interface with lower layer (HCI) ----- */ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { struct sock *sk; int lm = 0; BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); /* Find listening sockets */ read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) || !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) { lm |= HCI_LM_ACCEPT; if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) *flags |= HCI_PROTO_DEFER; break; } } read_unlock(&sco_sk_list.lock); return lm; } void sco_connect_cfm(struct hci_conn *hcon, __u8 status) { BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); if (!status) { struct sco_conn *conn; conn = sco_conn_add(hcon); if (conn) sco_conn_ready(conn); } else sco_conn_del(hcon, bt_to_errno(status)); } void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) { BT_DBG("hcon %p reason %d", hcon, reason); sco_conn_del(hcon, bt_to_errno(reason)); } int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) { struct sco_conn *conn = hcon->sco_data; if (!conn) goto drop; BT_DBG("conn %p len %d", conn, skb->len); if (skb->len) { sco_recv_frame(conn, skb); return 0; } drop: kfree_skb(skb); return 0; } static int sco_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src, &sco_pi(sk)->dst, sk->sk_state); } read_unlock(&sco_sk_list.lock); return 0; } static int sco_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, sco_debugfs_show, inode->i_private); } static const struct file_operations sco_debugfs_fops = { .open = sco_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct dentry *sco_debugfs; static const struct proto_ops sco_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = sco_sock_release, .bind = sco_sock_bind, .connect = sco_sock_connect, .listen = sco_sock_listen, .accept = sco_sock_accept, .getname = sco_sock_getname, .sendmsg = sco_sock_sendmsg, .recvmsg = sco_sock_recvmsg, .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, .shutdown = sco_sock_shutdown, .setsockopt = sco_sock_setsockopt, .getsockopt = sco_sock_getsockopt }; static const struct net_proto_family sco_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = sco_sock_create, }; int __init sco_init(void) { int err; err = proto_register(&sco_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops); if (err < 0) { BT_ERR("SCO socket registration failed"); goto error; } err = bt_procfs_init(&init_net, "sco", &sco_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create SCO proc file"); bt_sock_unregister(BTPROTO_SCO); goto error; } BT_INFO("SCO socket layer initialized"); if (IS_ERR_OR_NULL(bt_debugfs)) return 0; sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, NULL, &sco_debugfs_fops); return 0; error: proto_unregister(&sco_proto); return err; } void __exit sco_exit(void) { bt_procfs_cleanup(&init_net, "sco"); debugfs_remove(sco_debugfs); bt_sock_unregister(BTPROTO_SCO); proto_unregister(&sco_proto); } module_param(disable_esco, bool, 0644); MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
gpl-2.0
uplusplus/ls300_smdkc110
drivers/media/dvb/bt8xx/dst_ca.c
543
21662
/* CA-driver for TwinHan DST Frontend/Card Copyright (C) 2004, 2005 Manu Abraham (manu@kromtek.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/smp_lock.h> #include <linux/string.h> #include <linux/dvb/ca.h> #include "dvbdev.h" #include "dvb_frontend.h" #include "dst_ca.h" #include "dst_common.h" #define DST_CA_ERROR 0 #define DST_CA_NOTICE 1 #define DST_CA_INFO 2 #define DST_CA_DEBUG 3 #define dprintk(x, y, z, format, arg...) do { \ if (z) { \ if ((x > DST_CA_ERROR) && (x > y)) \ printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \ else if ((x > DST_CA_NOTICE) && (x > y)) \ printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \ else if ((x > DST_CA_INFO) && (x > y)) \ printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \ else if ((x > DST_CA_DEBUG) && (x > y)) \ printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \ } else { \ if (x > y) \ printk(format, ## arg); \ } \ } while(0) static unsigned int verbose = 5; module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)"); /* Need some more work */ static int ca_set_slot_descr(void) { /* We could make this more graceful ? */ return -EOPNOTSUPP; } /* Need some more work */ static int ca_set_pid(void) { /* We could make this more graceful ? */ return -EOPNOTSUPP; } static void put_command_and_length(u8 *data, int command, int length) { data[0] = (command >> 16) & 0xff; data[1] = (command >> 8) & 0xff; data[2] = command & 0xff; data[3] = length; } static void put_checksum(u8 *check_string, int length) { dprintk(verbose, DST_CA_DEBUG, 1, " Computing string checksum."); dprintk(verbose, DST_CA_DEBUG, 1, " -> string length : 0x%02x", length); check_string[length] = dst_check_sum (check_string, length); dprintk(verbose, DST_CA_DEBUG, 1, " -> checksum : 0x%02x", check_string[length]); } static int dst_ci_command(struct dst_state* state, u8 * data, u8 *ca_string, u8 len, int read) { u8 reply; mutex_lock(&state->dst_mutex); dst_comm_init(state); msleep(65); if (write_dst(state, data, len)) { dprintk(verbose, DST_CA_INFO, 1, " Write not successful, trying to recover"); dst_error_recovery(state); goto error; } if ((dst_pio_disable(state)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " DST PIO disable failed."); goto error; } if (read_dst(state, &reply, GET_ACK) < 0) { dprintk(verbose, DST_CA_INFO, 1, " Read not successful, trying to recover"); dst_error_recovery(state); goto error; } if (read) { if (! dst_wait_dst_ready(state, LONG_DELAY)) { dprintk(verbose, DST_CA_NOTICE, 1, " 8820 not ready"); goto error; } if (read_dst(state, ca_string, 128) < 0) { /* Try to make this dynamic */ dprintk(verbose, DST_CA_INFO, 1, " Read not successful, trying to recover"); dst_error_recovery(state); goto error; } } mutex_unlock(&state->dst_mutex); return 0; error: mutex_unlock(&state->dst_mutex); return -EIO; } static int dst_put_ci(struct dst_state *state, u8 *data, int len, u8 *ca_string, int read) { u8 dst_ca_comm_err = 0; while (dst_ca_comm_err < RETRIES) { dprintk(verbose, DST_CA_NOTICE, 1, " Put Command"); if (dst_ci_command(state, data, ca_string, len, read)) { // If error dst_error_recovery(state); dst_ca_comm_err++; // work required here. } else { break; } } if(dst_ca_comm_err == RETRIES) return -1; return 0; } static int ca_get_app_info(struct dst_state *state) { int length, str_length; static u8 command[8] = {0x07, 0x40, 0x01, 0x00, 0x01, 0x00, 0x00, 0xff}; put_checksum(&command[0], command[0]); if ((dst_put_ci(state, command, sizeof(command), state->messages, GET_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !"); return -1; } dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !"); dprintk(verbose, DST_CA_INFO, 1, " ================================ CI Module Application Info ======================================"); dprintk(verbose, DST_CA_INFO, 1, " Application Type=[%d], Application Vendor=[%d], Vendor Code=[%d]\n%s: Application info=[%s]", state->messages[7], (state->messages[8] << 8) | state->messages[9], (state->messages[10] << 8) | state->messages[11], __func__, (char *)(&state->messages[12])); dprintk(verbose, DST_CA_INFO, 1, " =================================================================================================="); // Transform dst message to correct application_info message length = state->messages[5]; str_length = length - 6; if (str_length < 0) { str_length = 0; dprintk(verbose, DST_CA_ERROR, 1, "Invalid string length returned in ca_get_app_info(). Recovering."); } // First, the command and length fields put_command_and_length(&state->messages[0], CA_APP_INFO, length); // Copy application_type, application_manufacturer and manufacturer_code memcpy(&state->messages[4], &state->messages[7], 5); // Set string length and copy string state->messages[9] = str_length; memcpy(&state->messages[10], &state->messages[12], str_length); return 0; } static int ca_get_ca_info(struct dst_state *state) { int srcPtr, dstPtr, i, num_ids; static u8 slot_command[8] = {0x07, 0x40, 0x00, 0x00, 0x02, 0x00, 0x00, 0xff}; const int in_system_id_pos = 8, out_system_id_pos = 4, in_num_ids_pos = 7; put_checksum(&slot_command[0], slot_command[0]); if ((dst_put_ci(state, slot_command, sizeof (slot_command), state->messages, GET_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !"); return -1; } dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !"); // Print raw data dprintk(verbose, DST_CA_INFO, 0, " DST data = ["); for (i = 0; i < state->messages[0] + 1; i++) { dprintk(verbose, DST_CA_INFO, 0, " 0x%02x", state->messages[i]); } dprintk(verbose, DST_CA_INFO, 0, "]\n"); // Set the command and length of the output num_ids = state->messages[in_num_ids_pos]; if (num_ids >= 100) { num_ids = 100; dprintk(verbose, DST_CA_ERROR, 1, "Invalid number of ids (>100). Recovering."); } put_command_and_length(&state->messages[0], CA_INFO, num_ids * 2); dprintk(verbose, DST_CA_INFO, 0, " CA_INFO = ["); srcPtr = in_system_id_pos; dstPtr = out_system_id_pos; for(i = 0; i < num_ids; i++) { dprintk(verbose, DST_CA_INFO, 0, " 0x%02x%02x", state->messages[srcPtr + 0], state->messages[srcPtr + 1]); // Append to output state->messages[dstPtr + 0] = state->messages[srcPtr + 0]; state->messages[dstPtr + 1] = state->messages[srcPtr + 1]; srcPtr += 2; dstPtr += 2; } dprintk(verbose, DST_CA_INFO, 0, "]\n"); return 0; } static int ca_get_slot_caps(struct dst_state *state, struct ca_caps *p_ca_caps, void __user *arg) { int i; u8 slot_cap[256]; static u8 slot_command[8] = {0x07, 0x40, 0x02, 0x00, 0x02, 0x00, 0x00, 0xff}; put_checksum(&slot_command[0], slot_command[0]); if ((dst_put_ci(state, slot_command, sizeof (slot_command), slot_cap, GET_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !"); return -1; } dprintk(verbose, DST_CA_NOTICE, 1, " -->dst_put_ci SUCCESS !"); /* Will implement the rest soon */ dprintk(verbose, DST_CA_INFO, 1, " Slot cap = [%d]", slot_cap[7]); dprintk(verbose, DST_CA_INFO, 0, "===================================\n"); for (i = 0; i < slot_cap[0] + 1; i++) dprintk(verbose, DST_CA_INFO, 0, " %d", slot_cap[i]); dprintk(verbose, DST_CA_INFO, 0, "\n"); p_ca_caps->slot_num = 1; p_ca_caps->slot_type = 1; p_ca_caps->descr_num = slot_cap[7]; p_ca_caps->descr_type = 1; if (copy_to_user(arg, p_ca_caps, sizeof (struct ca_caps))) return -EFAULT; return 0; } /* Need some more work */ static int ca_get_slot_descr(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg) { return -EOPNOTSUPP; } static int ca_get_slot_info(struct dst_state *state, struct ca_slot_info *p_ca_slot_info, void __user *arg) { int i; static u8 slot_command[8] = {0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff}; u8 *slot_info = state->messages; put_checksum(&slot_command[0], 7); if ((dst_put_ci(state, slot_command, sizeof (slot_command), slot_info, GET_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !"); return -1; } dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !"); /* Will implement the rest soon */ dprintk(verbose, DST_CA_INFO, 1, " Slot info = [%d]", slot_info[3]); dprintk(verbose, DST_CA_INFO, 0, "===================================\n"); for (i = 0; i < 8; i++) dprintk(verbose, DST_CA_INFO, 0, " %d", slot_info[i]); dprintk(verbose, DST_CA_INFO, 0, "\n"); if (slot_info[4] & 0x80) { p_ca_slot_info->flags = CA_CI_MODULE_PRESENT; p_ca_slot_info->num = 1; p_ca_slot_info->type = CA_CI; } else if (slot_info[4] & 0x40) { p_ca_slot_info->flags = CA_CI_MODULE_READY; p_ca_slot_info->num = 1; p_ca_slot_info->type = CA_CI; } else p_ca_slot_info->flags = 0; if (copy_to_user(arg, p_ca_slot_info, sizeof (struct ca_slot_info))) return -EFAULT; return 0; } static int ca_get_message(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg) { u8 i = 0; u32 command = 0; if (copy_from_user(p_ca_message, arg, sizeof (struct ca_msg))) return -EFAULT; if (p_ca_message->msg) { dprintk(verbose, DST_CA_NOTICE, 1, " Message = [%02x %02x %02x]", p_ca_message->msg[0], p_ca_message->msg[1], p_ca_message->msg[2]); for (i = 0; i < 3; i++) { command = command | p_ca_message->msg[i]; if (i < 2) command = command << 8; } dprintk(verbose, DST_CA_NOTICE, 1, " Command=[0x%x]", command); switch (command) { case CA_APP_INFO: memcpy(p_ca_message->msg, state->messages, 128); if (copy_to_user(arg, p_ca_message, sizeof (struct ca_msg)) ) return -EFAULT; break; case CA_INFO: memcpy(p_ca_message->msg, state->messages, 128); if (copy_to_user(arg, p_ca_message, sizeof (struct ca_msg)) ) return -EFAULT; break; } } return 0; } static int handle_dst_tag(struct dst_state *state, struct ca_msg *p_ca_message, struct ca_msg *hw_buffer, u32 length) { if (state->dst_hw_cap & DST_TYPE_HAS_SESSION) { hw_buffer->msg[2] = p_ca_message->msg[1]; /* MSB */ hw_buffer->msg[3] = p_ca_message->msg[2]; /* LSB */ } else { if (length > 247) { dprintk(verbose, DST_CA_ERROR, 1, " Message too long ! *** Bailing Out *** !"); return -1; } hw_buffer->msg[0] = (length & 0xff) + 7; hw_buffer->msg[1] = 0x40; hw_buffer->msg[2] = 0x03; hw_buffer->msg[3] = 0x00; hw_buffer->msg[4] = 0x03; hw_buffer->msg[5] = length & 0xff; hw_buffer->msg[6] = 0x00; /* * Need to compute length for EN50221 section 8.3.2, for the time being * assuming 8.3.2 is not applicable */ memcpy(&hw_buffer->msg[7], &p_ca_message->msg[4], length); } return 0; } static int write_to_8820(struct dst_state *state, struct ca_msg *hw_buffer, u8 length, u8 reply) { if ((dst_put_ci(state, hw_buffer->msg, length, hw_buffer->msg, reply)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " DST-CI Command failed."); dprintk(verbose, DST_CA_NOTICE, 1, " Resetting DST."); rdc_reset_state(state); return -1; } dprintk(verbose, DST_CA_NOTICE, 1, " DST-CI Command success."); return 0; } static u32 asn_1_decode(u8 *asn_1_array) { u8 length_field = 0, word_count = 0, count = 0; u32 length = 0; length_field = asn_1_array[0]; dprintk(verbose, DST_CA_DEBUG, 1, " Length field=[%02x]", length_field); if (length_field < 0x80) { length = length_field & 0x7f; dprintk(verbose, DST_CA_DEBUG, 1, " Length=[%02x]\n", length); } else { word_count = length_field & 0x7f; for (count = 0; count < word_count; count++) { length = length << 8; length += asn_1_array[count + 1]; dprintk(verbose, DST_CA_DEBUG, 1, " Length=[%04x]", length); } } return length; } static int debug_string(u8 *msg, u32 length, u32 offset) { u32 i; dprintk(verbose, DST_CA_DEBUG, 0, " String=[ "); for (i = offset; i < length; i++) dprintk(verbose, DST_CA_DEBUG, 0, "%02x ", msg[i]); dprintk(verbose, DST_CA_DEBUG, 0, "]\n"); return 0; } static int ca_set_pmt(struct dst_state *state, struct ca_msg *p_ca_message, struct ca_msg *hw_buffer, u8 reply, u8 query) { u32 length = 0; u8 tag_length = 8; length = asn_1_decode(&p_ca_message->msg[3]); dprintk(verbose, DST_CA_DEBUG, 1, " CA Message length=[%d]", length); debug_string(&p_ca_message->msg[4], length, 0); /* length is excluding tag & length */ memset(hw_buffer->msg, '\0', length); handle_dst_tag(state, p_ca_message, hw_buffer, length); put_checksum(hw_buffer->msg, hw_buffer->msg[0]); debug_string(hw_buffer->msg, (length + tag_length), 0); /* tags too */ write_to_8820(state, hw_buffer, (length + tag_length), reply); return 0; } /* Board supports CA PMT reply ? */ static int dst_check_ca_pmt(struct dst_state *state, struct ca_msg *p_ca_message, struct ca_msg *hw_buffer) { int ca_pmt_reply_test = 0; /* Do test board */ /* Not there yet but soon */ /* CA PMT Reply capable */ if (ca_pmt_reply_test) { if ((ca_set_pmt(state, p_ca_message, hw_buffer, 1, GET_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " ca_set_pmt.. failed !"); return -1; } /* Process CA PMT Reply */ /* will implement soon */ dprintk(verbose, DST_CA_ERROR, 1, " Not there yet"); } /* CA PMT Reply not capable */ if (!ca_pmt_reply_test) { if ((ca_set_pmt(state, p_ca_message, hw_buffer, 0, NO_REPLY)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " ca_set_pmt.. failed !"); return -1; } dprintk(verbose, DST_CA_NOTICE, 1, " ca_set_pmt.. success !"); /* put a dummy message */ } return 0; } static int ca_send_message(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg) { int i = 0; unsigned int ca_message_header_len; u32 command = 0; struct ca_msg *hw_buffer; int result = 0; if ((hw_buffer = kmalloc(sizeof (struct ca_msg), GFP_KERNEL)) == NULL) { dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure"); return -ENOMEM; } dprintk(verbose, DST_CA_DEBUG, 1, " "); if (copy_from_user(p_ca_message, arg, sizeof (struct ca_msg))) { result = -EFAULT; goto free_mem_and_exit; } if (p_ca_message->msg) { ca_message_header_len = p_ca_message->length; /* Restore it back when you are done */ /* EN50221 tag */ command = 0; for (i = 0; i < 3; i++) { command = command | p_ca_message->msg[i]; if (i < 2) command = command << 8; } dprintk(verbose, DST_CA_DEBUG, 1, " Command=[0x%x]\n", command); switch (command) { case CA_PMT: dprintk(verbose, DST_CA_DEBUG, 1, "Command = SEND_CA_PMT"); if ((ca_set_pmt(state, p_ca_message, hw_buffer, 0, 0)) < 0) { // code simplification started dprintk(verbose, DST_CA_ERROR, 1, " -->CA_PMT Failed !"); result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_PMT Success !"); break; case CA_PMT_REPLY: dprintk(verbose, DST_CA_INFO, 1, "Command = CA_PMT_REPLY"); /* Have to handle the 2 basic types of cards here */ if ((dst_check_ca_pmt(state, p_ca_message, hw_buffer)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_PMT_REPLY Failed !"); result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_PMT_REPLY Success !"); break; case CA_APP_INFO_ENQUIRY: // only for debugging dprintk(verbose, DST_CA_INFO, 1, " Getting Cam Application information"); if ((ca_get_app_info(state)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_APP_INFO_ENQUIRY Failed !"); result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_APP_INFO_ENQUIRY Success !"); break; case CA_INFO_ENQUIRY: dprintk(verbose, DST_CA_INFO, 1, " Getting CA Information"); if ((ca_get_ca_info(state)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_INFO_ENQUIRY Failed !"); result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_INFO_ENQUIRY Success !"); break; } } free_mem_and_exit: kfree (hw_buffer); return result; } static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioctl_arg) { struct dvb_device *dvbdev; struct dst_state *state; struct ca_slot_info *p_ca_slot_info; struct ca_caps *p_ca_caps; struct ca_msg *p_ca_message; void __user *arg = (void __user *)ioctl_arg; int result = 0; lock_kernel(); dvbdev = (struct dvb_device *)file->private_data; state = (struct dst_state *)dvbdev->priv; p_ca_message = kmalloc(sizeof (struct ca_msg), GFP_KERNEL); p_ca_slot_info = kmalloc(sizeof (struct ca_slot_info), GFP_KERNEL); p_ca_caps = kmalloc(sizeof (struct ca_caps), GFP_KERNEL); if (!p_ca_message || !p_ca_slot_info || !p_ca_caps) { dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure"); result = -ENOMEM; goto free_mem_and_exit; } /* We have now only the standard ioctl's, the driver is upposed to handle internals. */ switch (cmd) { case CA_SEND_MSG: dprintk(verbose, DST_CA_INFO, 1, " Sending message"); if ((ca_send_message(state, p_ca_message, arg)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SEND_MSG Failed !"); result = -1; goto free_mem_and_exit; } break; case CA_GET_MSG: dprintk(verbose, DST_CA_INFO, 1, " Getting message"); if ((ca_get_message(state, p_ca_message, arg)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_MSG Failed !"); result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_MSG Success !"); break; case CA_RESET: dprintk(verbose, DST_CA_ERROR, 1, " Resetting DST"); dst_error_bailout(state); msleep(4000); break; case CA_GET_SLOT_INFO: dprintk(verbose, DST_CA_INFO, 1, " Getting Slot info"); if ((ca_get_slot_info(state, p_ca_slot_info, arg)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_SLOT_INFO Failed !"); result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_SLOT_INFO Success !"); break; case CA_GET_CAP: dprintk(verbose, DST_CA_INFO, 1, " Getting Slot capabilities"); if ((ca_get_slot_caps(state, p_ca_caps, arg)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_CAP Failed !"); result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_CAP Success !"); break; case CA_GET_DESCR_INFO: dprintk(verbose, DST_CA_INFO, 1, " Getting descrambler description"); if ((ca_get_slot_descr(state, p_ca_message, arg)) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_DESCR_INFO Failed !"); result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_DESCR_INFO Success !"); break; case CA_SET_DESCR: dprintk(verbose, DST_CA_INFO, 1, " Setting descrambler"); if ((ca_set_slot_descr()) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SET_DESCR Failed !"); result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_DESCR Success !"); break; case CA_SET_PID: dprintk(verbose, DST_CA_INFO, 1, " Setting PID"); if ((ca_set_pid()) < 0) { dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SET_PID Failed !"); result = -1; goto free_mem_and_exit; } dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_PID Success !"); default: result = -EOPNOTSUPP; }; free_mem_and_exit: kfree (p_ca_message); kfree (p_ca_slot_info); kfree (p_ca_caps); unlock_kernel(); return result; } static int dst_ca_open(struct inode *inode, struct file *file) { dprintk(verbose, DST_CA_DEBUG, 1, " Device opened [%p] ", file); try_module_get(THIS_MODULE); return 0; } static int dst_ca_release(struct inode *inode, struct file *file) { dprintk(verbose, DST_CA_DEBUG, 1, " Device closed."); module_put(THIS_MODULE); return 0; } static ssize_t dst_ca_read(struct file *file, char __user *buffer, size_t length, loff_t *offset) { ssize_t bytes_read = 0; dprintk(verbose, DST_CA_DEBUG, 1, " Device read."); return bytes_read; } static ssize_t dst_ca_write(struct file *file, const char __user *buffer, size_t length, loff_t *offset) { dprintk(verbose, DST_CA_DEBUG, 1, " Device write."); return 0; } static const struct file_operations dst_ca_fops = { .owner = THIS_MODULE, .unlocked_ioctl = dst_ca_ioctl, .open = dst_ca_open, .release = dst_ca_release, .read = dst_ca_read, .write = dst_ca_write }; static struct dvb_device dvbdev_ca = { .priv = NULL, .users = 1, .readers = 1, .writers = 1, .fops = &dst_ca_fops }; struct dvb_device *dst_ca_attach(struct dst_state *dst, struct dvb_adapter *dvb_adapter) { struct dvb_device *dvbdev; dprintk(verbose, DST_CA_ERROR, 1, "registering DST-CA device"); if (dvb_register_device(dvb_adapter, &dvbdev, &dvbdev_ca, dst, DVB_DEVICE_CA) == 0) { dst->dst_ca = dvbdev; return dst->dst_ca; } return NULL; } EXPORT_SYMBOL(dst_ca_attach); MODULE_DESCRIPTION("DST DVB-S/T/C Combo CA driver"); MODULE_AUTHOR("Manu Abraham"); MODULE_LICENSE("GPL");
gpl-2.0
EnJens/kernel_tf201_stock
drivers/staging/octeon/ethernet-rx.c
543
15440
/********************************************************************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information **********************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/cache.h> #include <linux/cpumask.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/string.h> #include <linux/prefetch.h> #include <linux/ratelimit.h> #include <linux/smp.h> #include <net/dst.h> #ifdef CONFIG_XFRM #include <linux/xfrm.h> #include <net/xfrm.h> #endif /* CONFIG_XFRM */ #include <linux/atomic.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "ethernet-mem.h" #include "ethernet-rx.h" #include "octeon-ethernet.h" #include "ethernet-util.h" #include "cvmx-helper.h" #include "cvmx-wqe.h" #include "cvmx-fau.h" #include "cvmx-pow.h" #include "cvmx-pip.h" #include "cvmx-scratch.h" #include "cvmx-gmxx-defs.h" struct cvm_napi_wrapper { struct napi_struct napi; } ____cacheline_aligned_in_smp; static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp; struct cvm_oct_core_state { int baseline_cores; /* * The number of additional cores that could be processing * input packtes. */ atomic_t available_cores; cpumask_t cpu_state; } ____cacheline_aligned_in_smp; static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp; static void cvm_oct_enable_napi(void *_) { int cpu = smp_processor_id(); napi_schedule(&cvm_oct_napi[cpu].napi); } static void cvm_oct_enable_one_cpu(void) { int v; int cpu; /* Check to see if more CPUs are available for receive processing... */ v = atomic_sub_if_positive(1, &core_state.available_cores); if (v < 0) return; /* ... if a CPU is available, Turn on NAPI polling for that CPU. */ for_each_online_cpu(cpu) { if (!cpu_test_and_set(cpu, core_state.cpu_state)) { v = smp_call_function_single(cpu, cvm_oct_enable_napi, NULL, 0); if (v) panic("Can't enable NAPI."); break; } } } static void cvm_oct_no_more_work(void) { int cpu = smp_processor_id(); /* * CPU zero is special. It always has the irq enabled when * waiting for incoming packets. */ if (cpu == 0) { enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); return; } cpu_clear(cpu, core_state.cpu_state); atomic_add(1, &core_state.available_cores); } /** * cvm_oct_do_interrupt - interrupt handler. * * The interrupt occurs whenever the POW has packets in our group. * */ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) { /* Disable the IRQ and start napi_poll. */ disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); cvm_oct_enable_napi(NULL); return IRQ_HANDLED; } /** * cvm_oct_check_rcv_error - process receive errors * @work: Work queue entry pointing to the packet. * * Returns Non-zero if the packet can be dropped, zero otherwise. */ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) { if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) { /* * Ignore length errors on min size packets. Some * equipment incorrectly pads packets to 64+4FCS * instead of 60+4FCS. Note these packets still get * counted as frame errors. */ } else if (USE_10MBPS_PREAMBLE_WORKAROUND && ((work->word2.snoip.err_code == 5) || (work->word2.snoip.err_code == 7))) { /* * We received a packet with either an alignment error * or a FCS error. This may be signalling that we are * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK} * off. If this is the case we need to parse the * packet to determine if we can remove a non spec * preamble and generate a correct packet. */ int interface = cvmx_helper_get_interface_num(work->ipprt); int index = cvmx_helper_get_interface_index_num(work->ipprt); union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { uint8_t *ptr = cvmx_phys_to_ptr(work->packet_ptr.s.addr); int i = 0; while (i < work->len - 1) { if (*ptr != 0x55) break; ptr++; i++; } if (*ptr == 0xd5) { /* printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt); */ work->packet_ptr.s.addr += i + 1; work->len -= i + 5; } else if ((*ptr & 0xf) == 0xd) { /* printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt); */ work->packet_ptr.s.addr += i; work->len -= i + 4; for (i = 0; i < work->len; i++) { *ptr = ((*ptr & 0xf0) >> 4) | ((*(ptr + 1) & 0xf) << 4); ptr++; } } else { printk_ratelimited("Port %d unknown preamble, packet " "dropped\n", work->ipprt); /* cvmx_helper_dump_packet(work); */ cvm_oct_free_work(work); return 1; } } } else { printk_ratelimited("Port %d receive error code %d, packet dropped\n", work->ipprt, work->word2.snoip.err_code); cvm_oct_free_work(work); return 1; } return 0; } /** * cvm_oct_napi_poll - the NAPI poll function. * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller * @budget: Maximum number of packets to receive. * * Returns the number of packets processed. */ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) { const int coreid = cvmx_get_core_num(); uint64_t old_group_mask; uint64_t old_scratch; int rx_count = 0; int did_work_request = 0; int packet_not_copied; /* Prefetch cvm_oct_device since we know we need it soon */ prefetch(cvm_oct_device); if (USE_ASYNC_IOBDMA) { /* Save scratch in case userspace is using it */ CVMX_SYNCIOBDMA; old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); } /* Only allow work for our group (and preserve priorities) */ old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); if (USE_ASYNC_IOBDMA) { cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); did_work_request = 1; } while (rx_count < budget) { struct sk_buff *skb = NULL; struct sk_buff **pskb = NULL; int skb_in_hw; cvmx_wqe_t *work; if (USE_ASYNC_IOBDMA && did_work_request) work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); else work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); prefetch(work); did_work_request = 0; if (work == NULL) { union cvmx_pow_wq_int wq_int; wq_int.u64 = 0; wq_int.s.iq_dis = 1 << pow_receive_group; wq_int.s.wq_int = 1 << pow_receive_group; cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); break; } pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *)); prefetch(pskb); if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); did_work_request = 1; } if (rx_count == 0) { /* * First time through, see if there is enough * work waiting to merit waking another * CPU. */ union cvmx_pow_wq_int_cntx counts; int backlog; int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores); counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group)); backlog = counts.s.iq_cnt + counts.s.ds_cnt; if (backlog > budget * cores_in_use && napi != NULL) cvm_oct_enable_one_cpu(); } skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; if (likely(skb_in_hw)) { skb = *pskb; prefetch(&skb->head); prefetch(&skb->len); } prefetch(cvm_oct_device[work->ipprt]); /* Immediately throw away all packets with receive errors */ if (unlikely(work->word2.snoip.rcv_error)) { if (cvm_oct_check_rcv_error(work)) continue; } /* * We can only use the zero copy path if skbuffs are * in the FPA pool and the packet fits in a single * buffer. */ if (likely(skb_in_hw)) { skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head); prefetch(skb->data); skb->len = work->len; skb_set_tail_pointer(skb, skb->len); packet_not_copied = 1; } else { /* * We have to copy the packet. First allocate * an skbuff for it. */ skb = dev_alloc_skb(work->len); if (!skb) { printk_ratelimited("Port %d failed to allocate " "skbuff, packet dropped\n", work->ipprt); cvm_oct_free_work(work); continue; } /* * Check if we've received a packet that was * entirely stored in the work entry. */ if (unlikely(work->word2.s.bufs == 0)) { uint8_t *ptr = work->packet_data; if (likely(!work->word2.s.not_IP)) { /* * The beginning of the packet * moves for IP packets. */ if (work->word2.s.is_v6) ptr += 2; else ptr += 6; } memcpy(skb_put(skb, work->len), ptr, work->len); /* No packet buffers to free */ } else { int segments = work->word2.s.bufs; union cvmx_buf_ptr segment_ptr = work->packet_ptr; int len = work->len; while (segments--) { union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8); /* * Octeon Errata PKI-100: The segment size is * wrong. Until it is fixed, calculate the * segment size based on the packet pool * buffer size. When it is fixed, the * following line should be replaced with this * one: int segment_size = * segment_ptr.s.size; */ int segment_size = CVMX_FPA_PACKET_POOL_SIZE - (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7)); /* * Don't copy more than what * is left in the packet. */ if (segment_size > len) segment_size = len; /* Copy the data into the packet */ memcpy(skb_put(skb, segment_size), cvmx_phys_to_ptr(segment_ptr.s.addr), segment_size); len -= segment_size; segment_ptr = next_ptr; } } packet_not_copied = 0; } if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) && cvm_oct_device[work->ipprt])) { struct net_device *dev = cvm_oct_device[work->ipprt]; struct octeon_ethernet *priv = netdev_priv(dev); /* * Only accept packets for devices that are * currently up. */ if (likely(dev->flags & IFF_UP)) { skb->protocol = eth_type_trans(skb, dev); skb->dev = dev; if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error || !work->word2.s.tcp_or_udp)) skb->ip_summed = CHECKSUM_NONE; else skb->ip_summed = CHECKSUM_UNNECESSARY; /* Increment RX stats for virtual ports */ if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { #ifdef CONFIG_64BIT atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); #else atomic_add(1, (atomic_t *)&priv->stats.rx_packets); atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); #endif } netif_receive_skb(skb); rx_count++; } else { /* Drop any packet received for a device that isn't up */ /* printk_ratelimited("%s: Device not up, packet dropped\n", dev->name); */ #ifdef CONFIG_64BIT atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); #else atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); #endif dev_kfree_skb_irq(skb); } } else { /* * Drop any packet received for a device that * doesn't exist. */ printk_ratelimited("Port %d not controlled by Linux, packet dropped\n", work->ipprt); dev_kfree_skb_irq(skb); } /* * Check to see if the skbuff and work share the same * packet buffer. */ if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) { /* * This buffer needs to be replaced, increment * the number of buffers we need to free by * one. */ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 1); cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); } else { cvm_oct_free_work(work); } } /* Restore the original POW group mask */ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); if (USE_ASYNC_IOBDMA) { /* Restore the scratch area */ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); } cvm_oct_rx_refill_pool(0); if (rx_count < budget && napi != NULL) { /* No more work */ napi_complete(napi); cvm_oct_no_more_work(); } return rx_count; } #ifdef CONFIG_NET_POLL_CONTROLLER /** * cvm_oct_poll_controller - poll for receive packets * device. * * @dev: Device to poll. Unused */ void cvm_oct_poll_controller(struct net_device *dev) { cvm_oct_napi_poll(NULL, 16); } #endif void cvm_oct_rx_initialize(void) { int i; struct net_device *dev_for_napi = NULL; union cvmx_pow_wq_int_thrx int_thr; union cvmx_pow_wq_int_pc int_pc; for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { if (cvm_oct_device[i]) { dev_for_napi = cvm_oct_device[i]; break; } } if (NULL == dev_for_napi) panic("No net_devices were allocated."); if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus()) atomic_set(&core_state.available_cores, max_rx_cpus); else atomic_set(&core_state.available_cores, num_online_cpus()); core_state.baseline_cores = atomic_read(&core_state.available_cores); core_state.cpu_state = CPU_MASK_NONE; for_each_possible_cpu(i) { netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi, cvm_oct_napi_poll, rx_napi_weight); napi_enable(&cvm_oct_napi[i].napi); } /* Register an IRQ hander for to receive POW interrupts */ i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device); if (i) panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_WORKQ0 + pow_receive_group); disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); int_thr.u64 = 0; int_thr.s.tc_en = 1; int_thr.s.tc_thr = 1; /* Enable POW interrupt when our port has at least one packet */ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64); int_pc.u64 = 0; int_pc.s.pc_thr = 5; cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); /* Scheduld NAPI now. This will indirectly enable interrupts. */ cvm_oct_enable_one_cpu(); } void cvm_oct_rx_shutdown(void) { int i; /* Shutdown all of the NAPIs */ for_each_possible_cpu(i) netif_napi_del(&cvm_oct_napi[i].napi); }
gpl-2.0
Vegaviet-Dev/Kernel_N4_N910SLK
net/ipv6/esp6.c
799
16209
/* * Copyright (C)2002 USAGI/WIDE Project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Authors * * Mitsuru KANDA @USAGI : IPv6 Support * Kazunori MIYAZAWA @USAGI : * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * * This file is derived from net/ipv4/esp.c */ #define pr_fmt(fmt) "IPv6: " fmt #include <crypto/aead.h> #include <crypto/authenc.h> #include <linux/err.h> #include <linux/module.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/esp.h> #include <linux/scatterlist.h> #include <linux/kernel.h> #include <linux/pfkeyv2.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <net/ip6_route.h> #include <net/icmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <linux/icmpv6.h> struct esp_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; }; #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) static u32 esp6_get_mtu(struct xfrm_state *x, int mtu); /* * Allocate an AEAD request structure with extra space for SG and IV. * * For alignment considerations the upper 32 bits of the sequence number are * placed at the front, if present. Followed by the IV, the request and finally * the SG list. * * TODO: Use spare space in skb for this where possible. */ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen) { unsigned int len; len = seqihlen; len += crypto_aead_ivsize(aead); if (len) { len += crypto_aead_alignmask(aead) & ~(crypto_tfm_ctx_alignment() - 1); len = ALIGN(len, crypto_tfm_ctx_alignment()); } len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); len = ALIGN(len, __alignof__(struct scatterlist)); len += sizeof(struct scatterlist) * nfrags; return kmalloc(len, GFP_ATOMIC); } static inline __be32 *esp_tmp_seqhi(void *tmp) { return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32)); } static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) { return crypto_aead_ivsize(aead) ? PTR_ALIGN((u8 *)tmp + seqhilen, crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; } static inline struct aead_givcrypt_request *esp_tmp_givreq( struct crypto_aead *aead, u8 *iv) { struct aead_givcrypt_request *req; req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), crypto_tfm_ctx_alignment()); aead_givcrypt_set_tfm(req, aead); return req; } static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) { struct aead_request *req; req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), crypto_tfm_ctx_alignment()); aead_request_set_tfm(req, aead); return req; } static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, struct aead_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_aead_reqsize(aead), __alignof__(struct scatterlist)); } static inline struct scatterlist *esp_givreq_sg( struct crypto_aead *aead, struct aead_givcrypt_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_aead_reqsize(aead), __alignof__(struct scatterlist)); } static void esp_output_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; kfree(ESP_SKB_CB(skb)->tmp); xfrm_output_resume(skb, err); } static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; struct ip_esp_hdr *esph; struct crypto_aead *aead; struct aead_givcrypt_request *req; struct scatterlist *sg; struct scatterlist *asg; struct sk_buff *trailer; void *tmp; int blksize; int clen; int alen; int plen; int tfclen; int nfrags; int assoclen; int sglists; int seqhilen; u8 *iv; u8 *tail; __be32 *seqhi; struct esp_data *esp = x->data; /* skb is pure payload to encrypt */ aead = esp->aead; alen = crypto_aead_authsize(aead); tfclen = 0; if (x->tfcpad) { struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); u32 padto; padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached)); if (skb->len < padto) tfclen = padto - skb->len; } blksize = ALIGN(crypto_aead_blocksize(aead), 4); clen = ALIGN(skb->len + 2 + tfclen, blksize); if (esp->padlen) clen = ALIGN(clen, esp->padlen); plen = clen - skb->len - tfclen; err = skb_cow_data(skb, tfclen + plen + alen, &trailer); if (err < 0) goto error; nfrags = err; assoclen = sizeof(*esph); sglists = 1; seqhilen = 0; if (x->props.flags & XFRM_STATE_ESN) { sglists += 2; seqhilen += sizeof(__be32); assoclen += seqhilen; } tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); if (!tmp) { err = -ENOMEM; goto error; } seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); req = esp_tmp_givreq(aead, iv); asg = esp_givreq_sg(aead, req); sg = asg + sglists; /* Fill padding... */ tail = skb_tail_pointer(trailer); if (tfclen) { memset(tail, 0, tfclen); tail += tfclen; } do { int i; for (i = 0; i < plen - 2; i++) tail[i] = i + 1; } while (0); tail[plen - 2] = plen - 2; tail[plen - 1] = *skb_mac_header(skb); pskb_put(skb, trailer, clen - skb->len + alen); skb_push(skb, -skb_network_offset(skb)); esph = ip_esp_hdr(skb); *skb_mac_header(skb) = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, esph->enc_data + crypto_aead_ivsize(aead) - skb->data, clen + alen); if ((x->props.flags & XFRM_STATE_ESN)) { sg_init_table(asg, 3); sg_set_buf(asg, &esph->spi, sizeof(__be32)); *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi); sg_set_buf(asg + 1, seqhi, seqhilen); sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); } else sg_init_one(asg, esph, sizeof(*esph)); aead_givcrypt_set_callback(req, 0, esp_output_done, skb); aead_givcrypt_set_crypt(req, sg, sg, clen, iv); aead_givcrypt_set_assoc(req, asg, assoclen); aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq.output.low); ESP_SKB_CB(skb)->tmp = tmp; err = crypto_aead_givencrypt(req); if (err == -EINPROGRESS) goto error; if (err == -EBUSY) err = NET_XMIT_DROP; kfree(tmp); error: return err; } static int esp_input_done2(struct sk_buff *skb, int err) { struct xfrm_state *x = xfrm_input_state(skb); struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; int alen = crypto_aead_authsize(aead); int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); int elen = skb->len - hlen; int hdr_len = skb_network_header_len(skb); int padlen; u8 nexthdr[2]; kfree(ESP_SKB_CB(skb)->tmp); if (unlikely(err)) goto out; if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2)) BUG(); err = -EINVAL; padlen = nexthdr[0]; if (padlen + 2 + alen >= elen) { LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage " "padlen=%d, elen=%d\n", padlen + 2, elen - alen); goto out; } /* ... check padding bits here. Silly. :-) */ pskb_trim(skb, skb->len - alen - padlen - 2); __skb_pull(skb, hlen); if (x->props.mode == XFRM_MODE_TUNNEL) skb_reset_transport_header(skb); else skb_set_transport_header(skb, -hdr_len); err = nexthdr[1]; /* RFC4303: Drop dummy packets without any error */ if (err == IPPROTO_NONE) err = -EINVAL; out: return err; } static void esp_input_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; xfrm_input_resume(skb, esp_input_done2(skb, err)); } static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) { struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; struct aead_request *req; struct sk_buff *trailer; int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); int nfrags; int assoclen; int sglists; int seqhilen; int ret = 0; void *tmp; __be32 *seqhi; u8 *iv; struct scatterlist *sg; struct scatterlist *asg; if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) { ret = -EINVAL; goto out; } if (elen <= 0) { ret = -EINVAL; goto out; } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) { ret = -EINVAL; goto out; } ret = -ENOMEM; assoclen = sizeof(*esph); sglists = 1; seqhilen = 0; if (x->props.flags & XFRM_STATE_ESN) { sglists += 2; seqhilen += sizeof(__be32); assoclen += seqhilen; } tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); if (!tmp) goto out; ESP_SKB_CB(skb)->tmp = tmp; seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); sg = asg + sglists; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr *)skb->data; /* Get ivec. This can be wrong, check against another impls. */ iv = esph->enc_data; sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); if ((x->props.flags & XFRM_STATE_ESN)) { sg_init_table(asg, 3); sg_set_buf(asg, &esph->spi, sizeof(__be32)); *seqhi = XFRM_SKB_CB(skb)->seq.input.hi; sg_set_buf(asg + 1, seqhi, seqhilen); sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); } else sg_init_one(asg, esph, sizeof(*esph)); aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, assoclen); ret = crypto_aead_decrypt(req); if (ret == -EINPROGRESS) goto out; ret = esp_input_done2(skb, ret); out: return ret; } static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); u32 align = max_t(u32, blksize, esp->padlen); unsigned int net_adj; if (x->props.mode != XFRM_MODE_TUNNEL) net_adj = sizeof(struct ipv6hdr); else net_adj = 0; return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - net_adj) & ~(align - 1)) + (net_adj - 2); } static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct net *net = dev_net(skb->dev); const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); struct xfrm_state *x; if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG && type != NDISC_REDIRECT) return; x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); if (!x) return; if (type == NDISC_REDIRECT) ip6_redirect(skb, net, 0, 0); else ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID); xfrm_state_put(x); } static void esp6_destroy(struct xfrm_state *x) { struct esp_data *esp = x->data; if (!esp) return; crypto_free_aead(esp->aead); kfree(esp); } static int esp_init_aead(struct xfrm_state *x) { struct esp_data *esp = x->data; struct crypto_aead *aead; int err; aead = crypto_alloc_aead(x->aead->alg_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; esp->aead = aead; err = crypto_aead_setkey(aead, x->aead->alg_key, (x->aead->alg_key_len + 7) / 8); if (err) goto error; err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); if (err) goto error; error: return err; } static int esp_init_authenc(struct xfrm_state *x) { struct esp_data *esp = x->data; struct crypto_aead *aead; struct crypto_authenc_key_param *param; struct rtattr *rta; char *key; char *p; char authenc_name[CRYPTO_MAX_ALG_NAME]; unsigned int keylen; int err; err = -EINVAL; if (x->ealg == NULL) goto error; err = -ENAMETOOLONG; if ((x->props.flags & XFRM_STATE_ESN)) { if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authencesn(%s,%s)", x->aalg ? x->aalg->alg_name : "digest_null", x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) goto error; } else { if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", x->aalg ? x->aalg->alg_name : "digest_null", x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) goto error; } aead = crypto_alloc_aead(authenc_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; esp->aead = aead; keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); err = -ENOMEM; key = kmalloc(keylen, GFP_KERNEL); if (!key) goto error; p = key; rta = (void *)p; rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; rta->rta_len = RTA_LENGTH(sizeof(*param)); param = RTA_DATA(rta); p += RTA_SPACE(sizeof(*param)); if (x->aalg) { struct xfrm_algo_desc *aalg_desc; memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); p += (x->aalg->alg_key_len + 7) / 8; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); err = -EINVAL; if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_aead_authsize(aead)) { NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_aead_authsize(aead), aalg_desc->uinfo.auth.icv_fullbits/8); goto free_key; } err = crypto_aead_setauthsize( aead, x->aalg->alg_trunc_len / 8); if (err) goto free_key; } param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); err = crypto_aead_setkey(aead, key, keylen); free_key: kfree(key); error: return err; } static int esp6_init_state(struct xfrm_state *x) { struct esp_data *esp; struct crypto_aead *aead; u32 align; int err; if (x->encap) return -EINVAL; esp = kzalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; x->data = esp; if (x->aead) err = esp_init_aead(x); else err = esp_init_authenc(x); if (err) goto error; aead = esp->aead; esp->padlen = 0; x->props.header_len = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); switch (x->props.mode) { case XFRM_MODE_BEET: if (x->sel.family != AF_INET6) x->props.header_len += IPV4_BEET_PHMAXLEN + (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); break; case XFRM_MODE_TRANSPORT: break; case XFRM_MODE_TUNNEL: x->props.header_len += sizeof(struct ipv6hdr); break; default: goto error; } align = ALIGN(crypto_aead_blocksize(aead), 4); if (esp->padlen) align = max_t(u32, align, esp->padlen); x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead); error: return err; } static const struct xfrm_type esp6_type = { .description = "ESP6", .owner = THIS_MODULE, .proto = IPPROTO_ESP, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = esp6_init_state, .destructor = esp6_destroy, .get_mtu = esp6_get_mtu, .input = esp6_input, .output = esp6_output, .hdr_offset = xfrm6_find_1stfragopt, }; static const struct inet6_protocol esp6_protocol = { .handler = xfrm6_rcv, .err_handler = esp6_err, .flags = INET6_PROTO_NOPOLICY, }; static int __init esp6_init(void) { if (xfrm_register_type(&esp6_type, AF_INET6) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) { pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&esp6_type, AF_INET6); return -EAGAIN; } return 0; } static void __exit esp6_fini(void) { if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0) pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0) pr_info("%s: can't remove xfrm type\n", __func__); } module_init(esp6_init); module_exit(esp6_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
gpl-2.0
uclinux-cortexm/uclinux
fs/afs/server.c
799
8024
/* AFS server record management * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/slab.h> #include "internal.h" static unsigned afs_server_timeout = 10; /* server timeout in seconds */ static void afs_reap_server(struct work_struct *); /* tree of all the servers, indexed by IP address */ static struct rb_root afs_servers = RB_ROOT; static DEFINE_RWLOCK(afs_servers_lock); /* LRU list of all the servers not currently in use */ static LIST_HEAD(afs_server_graveyard); static DEFINE_SPINLOCK(afs_server_graveyard_lock); static DECLARE_DELAYED_WORK(afs_server_reaper, afs_reap_server); /* * install a server record in the master tree */ static int afs_install_server(struct afs_server *server) { struct afs_server *xserver; struct rb_node **pp, *p; int ret; _enter("%p", server); write_lock(&afs_servers_lock); ret = -EEXIST; pp = &afs_servers.rb_node; p = NULL; while (*pp) { p = *pp; _debug("- consider %p", p); xserver = rb_entry(p, struct afs_server, master_rb); if (server->addr.s_addr < xserver->addr.s_addr) pp = &(*pp)->rb_left; else if (server->addr.s_addr > xserver->addr.s_addr) pp = &(*pp)->rb_right; else goto error; } rb_link_node(&server->master_rb, p, pp); rb_insert_color(&server->master_rb, &afs_servers); ret = 0; error: write_unlock(&afs_servers_lock); return ret; } /* * allocate a new server record */ static struct afs_server *afs_alloc_server(struct afs_cell *cell, const struct in_addr *addr) { struct afs_server *server; _enter(""); server = kzalloc(sizeof(struct afs_server), GFP_KERNEL); if (server) { atomic_set(&server->usage, 1); server->cell = cell; INIT_LIST_HEAD(&server->link); INIT_LIST_HEAD(&server->grave); init_rwsem(&server->sem); spin_lock_init(&server->fs_lock); server->fs_vnodes = RB_ROOT; server->cb_promises = RB_ROOT; spin_lock_init(&server->cb_lock); init_waitqueue_head(&server->cb_break_waitq); INIT_DELAYED_WORK(&server->cb_break_work, afs_dispatch_give_up_callbacks); memcpy(&server->addr, addr, sizeof(struct in_addr)); server->addr.s_addr = addr->s_addr; } _leave(" = %p{%d}", server, atomic_read(&server->usage)); return server; } /* * get an FS-server record for a cell */ struct afs_server *afs_lookup_server(struct afs_cell *cell, const struct in_addr *addr) { struct afs_server *server, *candidate; _enter("%p,%pI4", cell, &addr->s_addr); /* quick scan of the list to see if we already have the server */ read_lock(&cell->servers_lock); list_for_each_entry(server, &cell->servers, link) { if (server->addr.s_addr == addr->s_addr) goto found_server_quickly; } read_unlock(&cell->servers_lock); candidate = afs_alloc_server(cell, addr); if (!candidate) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } write_lock(&cell->servers_lock); /* check the cell's server list again */ list_for_each_entry(server, &cell->servers, link) { if (server->addr.s_addr == addr->s_addr) goto found_server; } _debug("new"); server = candidate; if (afs_install_server(server) < 0) goto server_in_two_cells; afs_get_cell(cell); list_add_tail(&server->link, &cell->servers); write_unlock(&cell->servers_lock); _leave(" = %p{%d}", server, atomic_read(&server->usage)); return server; /* found a matching server quickly */ found_server_quickly: _debug("found quickly"); afs_get_server(server); read_unlock(&cell->servers_lock); no_longer_unused: if (!list_empty(&server->grave)) { spin_lock(&afs_server_graveyard_lock); list_del_init(&server->grave); spin_unlock(&afs_server_graveyard_lock); } _leave(" = %p{%d}", server, atomic_read(&server->usage)); return server; /* found a matching server on the second pass */ found_server: _debug("found"); afs_get_server(server); write_unlock(&cell->servers_lock); kfree(candidate); goto no_longer_unused; /* found a server that seems to be in two cells */ server_in_two_cells: write_unlock(&cell->servers_lock); kfree(candidate); printk(KERN_NOTICE "kAFS: Server %pI4 appears to be in two cells\n", addr); _leave(" = -EEXIST"); return ERR_PTR(-EEXIST); } /* * look up a server by its IP address */ struct afs_server *afs_find_server(const struct in_addr *_addr) { struct afs_server *server = NULL; struct rb_node *p; struct in_addr addr = *_addr; _enter("%pI4", &addr.s_addr); read_lock(&afs_servers_lock); p = afs_servers.rb_node; while (p) { server = rb_entry(p, struct afs_server, master_rb); _debug("- consider %p", p); if (addr.s_addr < server->addr.s_addr) { p = p->rb_left; } else if (addr.s_addr > server->addr.s_addr) { p = p->rb_right; } else { afs_get_server(server); goto found; } } server = NULL; found: read_unlock(&afs_servers_lock); ASSERTIFCMP(server, server->addr.s_addr, ==, addr.s_addr); _leave(" = %p", server); return server; } /* * destroy a server record * - removes from the cell list */ void afs_put_server(struct afs_server *server) { if (!server) return; _enter("%p{%d}", server, atomic_read(&server->usage)); _debug("PUT SERVER %d", atomic_read(&server->usage)); ASSERTCMP(atomic_read(&server->usage), >, 0); if (likely(!atomic_dec_and_test(&server->usage))) { _leave(""); return; } afs_flush_callback_breaks(server); spin_lock(&afs_server_graveyard_lock); if (atomic_read(&server->usage) == 0) { list_move_tail(&server->grave, &afs_server_graveyard); server->time_of_death = get_seconds(); schedule_delayed_work(&afs_server_reaper, afs_server_timeout * HZ); } spin_unlock(&afs_server_graveyard_lock); _leave(" [dead]"); } /* * destroy a dead server */ static void afs_destroy_server(struct afs_server *server) { _enter("%p", server); ASSERTIF(server->cb_break_head != server->cb_break_tail, delayed_work_pending(&server->cb_break_work)); ASSERTCMP(server->fs_vnodes.rb_node, ==, NULL); ASSERTCMP(server->cb_promises.rb_node, ==, NULL); ASSERTCMP(server->cb_break_head, ==, server->cb_break_tail); ASSERTCMP(atomic_read(&server->cb_break_n), ==, 0); afs_put_cell(server->cell); kfree(server); } /* * reap dead server records */ static void afs_reap_server(struct work_struct *work) { LIST_HEAD(corpses); struct afs_server *server; unsigned long delay, expiry; time_t now; now = get_seconds(); spin_lock(&afs_server_graveyard_lock); while (!list_empty(&afs_server_graveyard)) { server = list_entry(afs_server_graveyard.next, struct afs_server, grave); /* the queue is ordered most dead first */ expiry = server->time_of_death + afs_server_timeout; if (expiry > now) { delay = (expiry - now) * HZ; if (!schedule_delayed_work(&afs_server_reaper, delay)) { cancel_delayed_work(&afs_server_reaper); schedule_delayed_work(&afs_server_reaper, delay); } break; } write_lock(&server->cell->servers_lock); write_lock(&afs_servers_lock); if (atomic_read(&server->usage) > 0) { list_del_init(&server->grave); } else { list_move_tail(&server->grave, &corpses); list_del_init(&server->link); rb_erase(&server->master_rb, &afs_servers); } write_unlock(&afs_servers_lock); write_unlock(&server->cell->servers_lock); } spin_unlock(&afs_server_graveyard_lock); /* now reap the corpses we've extracted */ while (!list_empty(&corpses)) { server = list_entry(corpses.next, struct afs_server, grave); list_del(&server->grave); afs_destroy_server(server); } } /* * discard all the server records for rmmod */ void __exit afs_purge_servers(void) { afs_server_timeout = 0; cancel_delayed_work(&afs_server_reaper); schedule_delayed_work(&afs_server_reaper, 0); }
gpl-2.0
ML-Design/LG-P500-2.6.35-re-write
drivers/net/wimax/i2400m/usb-rx.c
1055
15076
/* * Intel Wireless WiMAX Connection 2400m * USB RX handling * * * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * Intel Corporation <linux-wimax@intel.com> * Yanir Lubetkin <yanirx.lubetkin@intel.com> * - Initial implementation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * - Use skb_clone(), break up processing in chunks * - Split transport/device specific * - Make buffer size dynamic to exert less memory pressure * * * This handles the RX path on USB. * * When a notification is received that says 'there is RX data ready', * we call i2400mu_rx_kick(); that wakes up the RX kthread, which * reads a buffer from USB and passes it to i2400m_rx() in the generic * handling code. The RX buffer has an specific format that is * described in rx.c. * * We use a kernel thread in a loop because: * * - we want to be able to call the USB power management get/put * functions (blocking) before each transaction. * * - We might get a lot of notifications and we don't want to submit * a zillion reads; by serializing, we are throttling. * * - RX data processing can get heavy enough so that it is not * appropiate for doing it in the USB callback; thus we run it in a * process context. * * We provide a read buffer of an arbitrary size (short of a page); if * the callback reports -EOVERFLOW, it means it was too small, so we * just double the size and retry (being careful to append, as * sometimes the device provided some data). Every now and then we * check if the average packet size is smaller than the current packet * size and if so, we halve it. At the end, the size of the * preallocated buffer should be following the average received * transaction size, adapting dynamically to it. * * ROADMAP * * i2400mu_rx_kick() Called from notif.c when we get a * 'data ready' notification * i2400mu_rxd() Kernel RX daemon * i2400mu_rx() Receive USB data * i2400m_rx() Send data to generic i2400m RX handling * * i2400mu_rx_setup() called from i2400mu_bus_dev_start() * * i2400mu_rx_release() called from i2400mu_bus_dev_stop() */ #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/usb.h> #include "i2400m-usb.h" #define D_SUBMODULE rx #include "usb-debug-levels.h" /* * Dynamic RX size * * We can't let the rx_size be a multiple of 512 bytes (the RX * endpoint's max packet size). On some USB host controllers (we * haven't been able to fully characterize which), if the device is * about to send (for example) X bytes and we only post a buffer to * receive n*512, it will fail to mark that as babble (so that * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the * rest). * * So on growing or shrinking, if it is a multiple of the * maxpacketsize, we remove some (instead of incresing some, so in a * buddy allocator we try to waste less space). * * Note we also need a hook for this on i2400mu_rx() -- when we do the * first read, we are sure we won't hit this spot because * i240mm->rx_size has been set properly. However, if we have to * double because of -EOVERFLOW, when we launch the read to get the * rest of the data, we *have* to make sure that also is not a * multiple of the max_pkt_size. */ static size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu) { struct device *dev = &i2400mu->usb_iface->dev; size_t rx_size; const size_t max_pkt_size = 512; rx_size = 2 * i2400mu->rx_size; if (rx_size % max_pkt_size == 0) { rx_size -= 8; d_printf(1, dev, "RX: expected size grew to %zu [adjusted -8] " "from %zu\n", rx_size, i2400mu->rx_size); } else d_printf(1, dev, "RX: expected size grew to %zu from %zu\n", rx_size, i2400mu->rx_size); return rx_size; } static void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu) { const size_t max_pkt_size = 512; struct device *dev = &i2400mu->usb_iface->dev; if (unlikely(i2400mu->rx_size_cnt >= 100 && i2400mu->rx_size_auto_shrink)) { size_t avg_rx_size = i2400mu->rx_size_acc / i2400mu->rx_size_cnt; size_t new_rx_size = i2400mu->rx_size / 2; if (avg_rx_size < new_rx_size) { if (new_rx_size % max_pkt_size == 0) { new_rx_size -= 8; d_printf(1, dev, "RX: expected size shrank to %zu " "[adjusted -8] from %zu\n", new_rx_size, i2400mu->rx_size); } else d_printf(1, dev, "RX: expected size shrank to %zu " "from %zu\n", new_rx_size, i2400mu->rx_size); i2400mu->rx_size = new_rx_size; i2400mu->rx_size_cnt = 0; i2400mu->rx_size_acc = i2400mu->rx_size; } } } /* * Receive a message with payloads from the USB bus into an skb * * @i2400mu: USB device descriptor * @rx_skb: skb where to place the received message * * Deals with all the USB-specifics of receiving, dynamically * increasing the buffer size if so needed. Returns the payload in the * skb, ready to process. On a zero-length packet, we retry. * * On soft USB errors, we retry (until they become too frequent and * then are promoted to hard); on hard USB errors, we reset the * device. On other errors (skb realloacation, we just drop it and * hope for the next invocation to solve it). * * Returns: pointer to the skb if ok, ERR_PTR on error. * NOTE: this function might realloc the skb (if it is too small), * so always update with the one returned. * ERR_PTR() is < 0 on error. * Will return NULL if it cannot reallocate -- this can be * considered a transient retryable error. */ static struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb) { int result = 0; struct device *dev = &i2400mu->usb_iface->dev; int usb_pipe, read_size, rx_size, do_autopm; struct usb_endpoint_descriptor *epd; const size_t max_pkt_size = 512; d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); do_autopm = atomic_read(&i2400mu->do_autopm); result = do_autopm ? usb_autopm_get_interface(i2400mu->usb_iface) : 0; if (result < 0) { dev_err(dev, "RX: can't get autopm: %d\n", result); do_autopm = 0; } epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in); usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); retry: rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len; if (unlikely(rx_size % max_pkt_size == 0)) { rx_size -= 8; d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size); } result = usb_bulk_msg( i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len, rx_size, &read_size, 200); usb_mark_last_busy(i2400mu->usb_dev); switch (result) { case 0: if (read_size == 0) goto retry; /* ZLP, just resubmit */ skb_put(rx_skb, read_size); break; case -EPIPE: /* * Stall -- maybe the device is choking with our * requests. Clear it and give it some time. If they * happen to often, it might be another symptom, so we * reset. * * No error handling for usb_clear_halt(0; if it * works, the retry works; if it fails, this switch * does the error handling for us. */ if (edc_inc(&i2400mu->urb_edc, 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { dev_err(dev, "BM-CMD: too many stalls in " "URB; resetting device\n"); goto do_reset; } usb_clear_halt(i2400mu->usb_dev, usb_pipe); msleep(10); /* give the device some time */ goto retry; case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ case -ESHUTDOWN: case -ECONNRESET: break; case -EOVERFLOW: { /* too small, reallocate */ struct sk_buff *new_skb; rx_size = i2400mu_rx_size_grow(i2400mu); if (rx_size <= (1 << 16)) /* cap it */ i2400mu->rx_size = rx_size; else if (printk_ratelimit()) { dev_err(dev, "BUG? rx_size up to %d\n", rx_size); result = -EINVAL; goto out; } skb_put(rx_skb, read_size); new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len, GFP_KERNEL); if (new_skb == NULL) { if (printk_ratelimit()) dev_err(dev, "RX: Can't reallocate skb to %d; " "RX dropped\n", rx_size); kfree_skb(rx_skb); rx_skb = NULL; goto out; /* drop it...*/ } kfree_skb(rx_skb); rx_skb = new_skb; i2400mu->rx_size_cnt = 0; i2400mu->rx_size_acc = i2400mu->rx_size; d_printf(1, dev, "RX: size changed to %d, received %d, " "copied %d, capacity %ld\n", rx_size, read_size, rx_skb->len, (long) (skb_end_pointer(new_skb) - new_skb->head)); goto retry; } /* In most cases, it happens due to the hardware scheduling a * read when there was no data - unfortunately, we have no way * to tell this timeout from a USB timeout. So we just ignore * it. */ case -ETIMEDOUT: dev_err(dev, "RX: timeout: %d\n", result); result = 0; break; default: /* Any error */ if (edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) goto error_reset; dev_err(dev, "RX: error receiving URB: %d, retrying\n", result); goto retry; } out: if (do_autopm) usb_autopm_put_interface(i2400mu->usb_iface); d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb); return rx_skb; error_reset: dev_err(dev, "RX: maximum errors in URB exceeded; " "resetting device\n"); do_reset: usb_queue_reset_device(i2400mu->usb_iface); rx_skb = ERR_PTR(result); goto out; } /* * Kernel thread for USB reception of data * * This thread waits for a kick; once kicked, it will allocate an skb * and receive a single message to it from USB (using * i2400mu_rx()). Once received, it is passed to the generic i2400m RX * code for processing. * * When done processing, it runs some dirty statistics to verify if * the last 100 messages received were smaller than half of the * current RX buffer size. In that case, the RX buffer size is * halved. This will helps lowering the pressure on the memory * allocator. * * Hard errors force the thread to exit. */ static int i2400mu_rxd(void *_i2400mu) { int result = 0; struct i2400mu *i2400mu = _i2400mu; struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = &i2400mu->usb_iface->dev; struct net_device *net_dev = i2400m->wimax_dev.net_dev; size_t pending; int rx_size; struct sk_buff *rx_skb; unsigned long flags; d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); spin_lock_irqsave(&i2400m->rx_lock, flags); BUG_ON(i2400mu->rx_kthread != NULL); i2400mu->rx_kthread = current; spin_unlock_irqrestore(&i2400m->rx_lock, flags); while (1) { d_printf(2, dev, "RX: waiting for messages\n"); pending = 0; wait_event_interruptible( i2400mu->rx_wq, (kthread_should_stop() /* check this first! */ || (pending = atomic_read(&i2400mu->rx_pending_count))) ); if (kthread_should_stop()) break; if (pending == 0) continue; rx_size = i2400mu->rx_size; d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size); rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL); if (rx_skb == NULL) { dev_err(dev, "RX: can't allocate skb [%d bytes]\n", rx_size); msleep(50); /* give it some time? */ continue; } /* Receive the message with the payloads */ rx_skb = i2400mu_rx(i2400mu, rx_skb); result = PTR_ERR(rx_skb); if (IS_ERR(rx_skb)) goto out; atomic_dec(&i2400mu->rx_pending_count); if (rx_skb == NULL || rx_skb->len == 0) { /* some "ignorable" condition */ kfree_skb(rx_skb); continue; } /* Deliver the message to the generic i2400m code */ i2400mu->rx_size_cnt++; i2400mu->rx_size_acc += rx_skb->len; result = i2400m_rx(i2400m, rx_skb); if (result == -EIO && edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { goto error_reset; } /* Maybe adjust RX buffer size */ i2400mu_rx_size_maybe_shrink(i2400mu); } result = 0; out: spin_lock_irqsave(&i2400m->rx_lock, flags); i2400mu->rx_kthread = NULL; spin_unlock_irqrestore(&i2400m->rx_lock, flags); d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result); return result; error_reset: dev_err(dev, "RX: maximum errors in received buffer exceeded; " "resetting device\n"); usb_queue_reset_device(i2400mu->usb_iface); goto out; } /* * Start reading from the device * * @i2400m: device instance * * Notify the RX thread that there is data pending. */ void i2400mu_rx_kick(struct i2400mu *i2400mu) { struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = &i2400mu->usb_iface->dev; d_fnstart(3, dev, "(i2400mu %p)\n", i2400m); atomic_inc(&i2400mu->rx_pending_count); wake_up_all(&i2400mu->rx_wq); d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); } int i2400mu_rx_setup(struct i2400mu *i2400mu) { int result = 0; struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = &i2400mu->usb_iface->dev; struct wimax_dev *wimax_dev = &i2400m->wimax_dev; struct task_struct *kthread; kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx", wimax_dev->name); /* the kthread function sets i2400mu->rx_thread */ if (IS_ERR(kthread)) { result = PTR_ERR(kthread); dev_err(dev, "RX: cannot start thread: %d\n", result); } return result; } void i2400mu_rx_release(struct i2400mu *i2400mu) { unsigned long flags; struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = i2400m_dev(i2400m); struct task_struct *kthread; spin_lock_irqsave(&i2400m->rx_lock, flags); kthread = i2400mu->rx_kthread; i2400mu->rx_kthread = NULL; spin_unlock_irqrestore(&i2400m->rx_lock, flags); if (kthread) kthread_stop(kthread); else d_printf(1, dev, "RX: kthread had already exited\n"); }
gpl-2.0
fenggangwu/sffs
drivers/mtd/chips/map_ram.c
1311
3727
/* * Common code to handle map devices which are simple RAM * (C) 2000 Red Hat. GPL'd. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <asm/io.h> #include <asm/byteorder.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *); static int mapram_erase (struct mtd_info *, struct erase_info *); static void mapram_nop (struct mtd_info *); static struct mtd_info *map_ram_probe(struct map_info *map); static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long, unsigned long, unsigned long); static struct mtd_chip_driver mapram_chipdrv = { .probe = map_ram_probe, .name = "map_ram", .module = THIS_MODULE }; static struct mtd_info *map_ram_probe(struct map_info *map) { struct mtd_info *mtd; /* Check the first byte is RAM */ #if 0 map_write8(map, 0x55, 0); if (map_read8(map, 0) != 0x55) return NULL; map_write8(map, 0xAA, 0); if (map_read8(map, 0) != 0xAA) return NULL; /* Check the last byte is RAM */ map_write8(map, 0x55, map->size-1); if (map_read8(map, map->size-1) != 0x55) return NULL; map_write8(map, 0xAA, map->size-1); if (map_read8(map, map->size-1) != 0xAA) return NULL; #endif /* OK. It seems to be RAM. */ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); if (!mtd) return NULL; map->fldrv = &mapram_chipdrv; mtd->priv = map; mtd->name = map->name; mtd->type = MTD_RAM; mtd->size = map->size; mtd->_erase = mapram_erase; mtd->_get_unmapped_area = mapram_unmapped_area; mtd->_read = mapram_read; mtd->_write = mapram_write; mtd->_panic_write = mapram_write; mtd->_sync = mapram_nop; mtd->flags = MTD_CAP_RAM; mtd->writesize = 1; mtd->erasesize = PAGE_SIZE; while(mtd->size & (mtd->erasesize - 1)) mtd->erasesize >>= 1; __module_get(THIS_MODULE); return mtd; } /* * Allow NOMMU mmap() to directly map the device (if not NULL) * - return the address to which the offset maps * - return -ENOSYS to indicate refusal to do the mapping */ static unsigned long mapram_unmapped_area(struct mtd_info *mtd, unsigned long len, unsigned long offset, unsigned long flags) { struct map_info *map = mtd->priv; return (unsigned long) map->virt + offset; } static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct map_info *map = mtd->priv; map_copy_from(map, buf, from, len); *retlen = len; return 0; } static int mapram_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct map_info *map = mtd->priv; map_copy_to(map, to, buf, len); *retlen = len; return 0; } static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr) { /* Yeah, it's inefficient. Who cares? It's faster than a _real_ flash erase. */ struct map_info *map = mtd->priv; map_word allff; unsigned long i; allff = map_word_ff(map); for (i=0; i<instr->len; i += map_bankwidth(map)) map_write(map, allff, instr->addr + i); instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); return 0; } static void mapram_nop(struct mtd_info *mtd) { /* Nothing to see here */ } static int __init map_ram_init(void) { register_mtd_chip_driver(&mapram_chipdrv); return 0; } static void __exit map_ram_exit(void) { unregister_mtd_chip_driver(&mapram_chipdrv); } module_init(map_ram_init); module_exit(map_ram_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("MTD chip driver for RAM chips");
gpl-2.0
mirror-androidarmv6/android_kernel_lge_msm7x27
arch/sparc/kernel/ds.c
1567
25435
/* ds.c: Domain Services driver for Logical Domains * * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/reboot.h> #include <linux/cpu.h> #include <asm/ldc.h> #include <asm/vio.h> #include <asm/mdesc.h> #include <asm/head.h> #include <asm/irq.h> #define DRV_MODULE_NAME "ds" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "1.0" #define DRV_MODULE_RELDATE "Jul 11, 2007" static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("Sun LDOM domain services driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); struct ds_msg_tag { __u32 type; #define DS_INIT_REQ 0x00 #define DS_INIT_ACK 0x01 #define DS_INIT_NACK 0x02 #define DS_REG_REQ 0x03 #define DS_REG_ACK 0x04 #define DS_REG_NACK 0x05 #define DS_UNREG_REQ 0x06 #define DS_UNREG_ACK 0x07 #define DS_UNREG_NACK 0x08 #define DS_DATA 0x09 #define DS_NACK 0x0a __u32 len; }; /* Result codes */ #define DS_OK 0x00 #define DS_REG_VER_NACK 0x01 #define DS_REG_DUP 0x02 #define DS_INV_HDL 0x03 #define DS_TYPE_UNKNOWN 0x04 struct ds_version { __u16 major; __u16 minor; }; struct ds_ver_req { struct ds_msg_tag tag; struct ds_version ver; }; struct ds_ver_ack { struct ds_msg_tag tag; __u16 minor; }; struct ds_ver_nack { struct ds_msg_tag tag; __u16 major; }; struct ds_reg_req { struct ds_msg_tag tag; __u64 handle; __u16 major; __u16 minor; char svc_id[0]; }; struct ds_reg_ack { struct ds_msg_tag tag; __u64 handle; __u16 minor; }; struct ds_reg_nack { struct ds_msg_tag tag; __u64 handle; __u16 major; }; struct ds_unreg_req { struct ds_msg_tag tag; __u64 handle; }; struct ds_unreg_ack { struct ds_msg_tag tag; __u64 handle; }; struct ds_unreg_nack { struct ds_msg_tag tag; __u64 handle; }; struct ds_data { struct ds_msg_tag tag; __u64 handle; }; struct ds_data_nack { struct ds_msg_tag tag; __u64 handle; __u64 result; }; struct ds_info; struct ds_cap_state { __u64 handle; void (*data)(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); const char *service_id; u8 state; #define CAP_STATE_UNKNOWN 0x00 #define CAP_STATE_REG_SENT 0x01 #define CAP_STATE_REGISTERED 0x02 }; static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static void domain_shutdown_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static void domain_panic_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); #ifdef CONFIG_HOTPLUG_CPU static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); #endif static void ds_pri_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static void ds_var_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static struct ds_cap_state ds_states_template[] = { { .service_id = "md-update", .data = md_update_data, }, { .service_id = "domain-shutdown", .data = domain_shutdown_data, }, { .service_id = "domain-panic", .data = domain_panic_data, }, #ifdef CONFIG_HOTPLUG_CPU { .service_id = "dr-cpu", .data = dr_cpu_data, }, #endif { .service_id = "pri", .data = ds_pri_data, }, { .service_id = "var-config", .data = ds_var_data, }, { .service_id = "var-config-backup", .data = ds_var_data, }, }; static DEFINE_SPINLOCK(ds_lock); struct ds_info { struct ldc_channel *lp; u8 hs_state; #define DS_HS_START 0x01 #define DS_HS_DONE 0x02 u64 id; void *rcv_buf; int rcv_buf_len; struct ds_cap_state *ds_states; int num_ds_states; struct ds_info *next; }; static struct ds_info *ds_info_list; static struct ds_cap_state *find_cap(struct ds_info *dp, u64 handle) { unsigned int index = handle >> 32; if (index >= dp->num_ds_states) return NULL; return &dp->ds_states[index]; } static struct ds_cap_state *find_cap_by_string(struct ds_info *dp, const char *name) { int i; for (i = 0; i < dp->num_ds_states; i++) { if (strcmp(dp->ds_states[i].service_id, name)) continue; return &dp->ds_states[i]; } return NULL; } static int __ds_send(struct ldc_channel *lp, void *data, int len) { int err, limit = 1000; err = -EINVAL; while (limit-- > 0) { err = ldc_write(lp, data, len); if (!err || (err != -EAGAIN)) break; udelay(1); } return err; } static int ds_send(struct ldc_channel *lp, void *data, int len) { unsigned long flags; int err; spin_lock_irqsave(&ds_lock, flags); err = __ds_send(lp, data, len); spin_unlock_irqrestore(&ds_lock, flags); return err; } struct ds_md_update_req { __u64 req_num; }; struct ds_md_update_res { __u64 req_num; __u32 result; }; static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ldc_channel *lp = dp->lp; struct ds_data *dpkt = buf; struct ds_md_update_req *rp; struct { struct ds_data data; struct ds_md_update_res res; } pkt; rp = (struct ds_md_update_req *) (dpkt + 1); printk(KERN_INFO "ds-%llu: Machine description update.\n", dp->id); mdesc_update(); memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); pkt.data.handle = cp->handle; pkt.res.req_num = rp->req_num; pkt.res.result = DS_OK; ds_send(lp, &pkt, sizeof(pkt)); } struct ds_shutdown_req { __u64 req_num; __u32 ms_delay; }; struct ds_shutdown_res { __u64 req_num; __u32 result; char reason[1]; }; static void domain_shutdown_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ldc_channel *lp = dp->lp; struct ds_data *dpkt = buf; struct ds_shutdown_req *rp; struct { struct ds_data data; struct ds_shutdown_res res; } pkt; rp = (struct ds_shutdown_req *) (dpkt + 1); printk(KERN_ALERT "ds-%llu: Shutdown request from " "LDOM manager received.\n", dp->id); memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); pkt.data.handle = cp->handle; pkt.res.req_num = rp->req_num; pkt.res.result = DS_OK; pkt.res.reason[0] = 0; ds_send(lp, &pkt, sizeof(pkt)); orderly_poweroff(true); } struct ds_panic_req { __u64 req_num; }; struct ds_panic_res { __u64 req_num; __u32 result; char reason[1]; }; static void domain_panic_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ldc_channel *lp = dp->lp; struct ds_data *dpkt = buf; struct ds_panic_req *rp; struct { struct ds_data data; struct ds_panic_res res; } pkt; rp = (struct ds_panic_req *) (dpkt + 1); printk(KERN_ALERT "ds-%llu: Panic request from " "LDOM manager received.\n", dp->id); memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); pkt.data.handle = cp->handle; pkt.res.req_num = rp->req_num; pkt.res.result = DS_OK; pkt.res.reason[0] = 0; ds_send(lp, &pkt, sizeof(pkt)); panic("PANIC requested by LDOM manager."); } #ifdef CONFIG_HOTPLUG_CPU struct dr_cpu_tag { __u64 req_num; __u32 type; #define DR_CPU_CONFIGURE 0x43 #define DR_CPU_UNCONFIGURE 0x55 #define DR_CPU_FORCE_UNCONFIGURE 0x46 #define DR_CPU_STATUS 0x53 /* Responses */ #define DR_CPU_OK 0x6f #define DR_CPU_ERROR 0x65 __u32 num_records; }; struct dr_cpu_resp_entry { __u32 cpu; __u32 result; #define DR_CPU_RES_OK 0x00 #define DR_CPU_RES_FAILURE 0x01 #define DR_CPU_RES_BLOCKED 0x02 #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03 #define DR_CPU_RES_NOT_IN_MD 0x04 __u32 stat; #define DR_CPU_STAT_NOT_PRESENT 0x00 #define DR_CPU_STAT_UNCONFIGURED 0x01 #define DR_CPU_STAT_CONFIGURED 0x02 __u32 str_off; }; static void __dr_cpu_send_error(struct ds_info *dp, struct ds_cap_state *cp, struct ds_data *data) { struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); struct { struct ds_data data; struct dr_cpu_tag tag; } pkt; int msg_len; memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.handle = cp->handle; pkt.tag.req_num = tag->req_num; pkt.tag.type = DR_CPU_ERROR; pkt.tag.num_records = 0; msg_len = (sizeof(struct ds_data) + sizeof(struct dr_cpu_tag)); pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag); __ds_send(dp->lp, &pkt, msg_len); } static void dr_cpu_send_error(struct ds_info *dp, struct ds_cap_state *cp, struct ds_data *data) { unsigned long flags; spin_lock_irqsave(&ds_lock, flags); __dr_cpu_send_error(dp, cp, data); spin_unlock_irqrestore(&ds_lock, flags); } #define CPU_SENTINEL 0xffffffff static void purge_dups(u32 *list, u32 num_ents) { unsigned int i; for (i = 0; i < num_ents; i++) { u32 cpu = list[i]; unsigned int j; if (cpu == CPU_SENTINEL) continue; for (j = i + 1; j < num_ents; j++) { if (list[j] == cpu) list[j] = CPU_SENTINEL; } } } static int dr_cpu_size_response(int ncpus) { return (sizeof(struct ds_data) + sizeof(struct dr_cpu_tag) + (sizeof(struct dr_cpu_resp_entry) * ncpus)); } static void dr_cpu_init_response(struct ds_data *resp, u64 req_num, u64 handle, int resp_len, int ncpus, cpumask_t *mask, u32 default_stat) { struct dr_cpu_resp_entry *ent; struct dr_cpu_tag *tag; int i, cpu; tag = (struct dr_cpu_tag *) (resp + 1); ent = (struct dr_cpu_resp_entry *) (tag + 1); resp->tag.type = DS_DATA; resp->tag.len = resp_len - sizeof(struct ds_msg_tag); resp->handle = handle; tag->req_num = req_num; tag->type = DR_CPU_OK; tag->num_records = ncpus; i = 0; for_each_cpu_mask(cpu, *mask) { ent[i].cpu = cpu; ent[i].result = DR_CPU_RES_OK; ent[i].stat = default_stat; i++; } BUG_ON(i != ncpus); } static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus, u32 res, u32 stat) { struct dr_cpu_resp_entry *ent; struct dr_cpu_tag *tag; int i; tag = (struct dr_cpu_tag *) (resp + 1); ent = (struct dr_cpu_resp_entry *) (tag + 1); for (i = 0; i < ncpus; i++) { if (ent[i].cpu != cpu) continue; ent[i].result = res; ent[i].stat = stat; break; } } static int __cpuinit dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp, u64 req_num, cpumask_t *mask) { struct ds_data *resp; int resp_len, ncpus, cpu; unsigned long flags; ncpus = cpus_weight(*mask); resp_len = dr_cpu_size_response(ncpus); resp = kzalloc(resp_len, GFP_KERNEL); if (!resp) return -ENOMEM; dr_cpu_init_response(resp, req_num, cp->handle, resp_len, ncpus, mask, DR_CPU_STAT_CONFIGURED); mdesc_populate_present_mask(mask); mdesc_fill_in_cpu_data(mask); for_each_cpu_mask(cpu, *mask) { int err; printk(KERN_INFO "ds-%llu: Starting cpu %d...\n", dp->id, cpu); err = cpu_up(cpu); if (err) { __u32 res = DR_CPU_RES_FAILURE; __u32 stat = DR_CPU_STAT_UNCONFIGURED; if (!cpu_present(cpu)) { /* CPU not present in MD */ res = DR_CPU_RES_NOT_IN_MD; stat = DR_CPU_STAT_NOT_PRESENT; } else if (err == -ENODEV) { /* CPU did not call in successfully */ res = DR_CPU_RES_CPU_NOT_RESPONDING; } printk(KERN_INFO "ds-%llu: CPU startup failed err=%d\n", dp->id, err); dr_cpu_mark(resp, cpu, ncpus, res, stat); } } spin_lock_irqsave(&ds_lock, flags); __ds_send(dp->lp, resp, resp_len); spin_unlock_irqrestore(&ds_lock, flags); kfree(resp); /* Redistribute IRQs, taking into account the new cpus. */ fixup_irqs(); return 0; } static int dr_cpu_unconfigure(struct ds_info *dp, struct ds_cap_state *cp, u64 req_num, cpumask_t *mask) { struct ds_data *resp; int resp_len, ncpus, cpu; unsigned long flags; ncpus = cpus_weight(*mask); resp_len = dr_cpu_size_response(ncpus); resp = kzalloc(resp_len, GFP_KERNEL); if (!resp) return -ENOMEM; dr_cpu_init_response(resp, req_num, cp->handle, resp_len, ncpus, mask, DR_CPU_STAT_UNCONFIGURED); for_each_cpu_mask(cpu, *mask) { int err; printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n", dp->id, cpu); err = cpu_down(cpu); if (err) dr_cpu_mark(resp, cpu, ncpus, DR_CPU_RES_FAILURE, DR_CPU_STAT_CONFIGURED); } spin_lock_irqsave(&ds_lock, flags); __ds_send(dp->lp, resp, resp_len); spin_unlock_irqrestore(&ds_lock, flags); kfree(resp); return 0; } static void __cpuinit dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ds_data *data = buf; struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); u32 *cpu_list = (u32 *) (tag + 1); u64 req_num = tag->req_num; cpumask_t mask; unsigned int i; int err; switch (tag->type) { case DR_CPU_CONFIGURE: case DR_CPU_UNCONFIGURE: case DR_CPU_FORCE_UNCONFIGURE: break; default: dr_cpu_send_error(dp, cp, data); return; } purge_dups(cpu_list, tag->num_records); cpus_clear(mask); for (i = 0; i < tag->num_records; i++) { if (cpu_list[i] == CPU_SENTINEL) continue; if (cpu_list[i] < nr_cpu_ids) cpu_set(cpu_list[i], mask); } if (tag->type == DR_CPU_CONFIGURE) err = dr_cpu_configure(dp, cp, req_num, &mask); else err = dr_cpu_unconfigure(dp, cp, req_num, &mask); if (err) dr_cpu_send_error(dp, cp, data); } #endif /* CONFIG_HOTPLUG_CPU */ struct ds_pri_msg { __u64 req_num; __u64 type; #define DS_PRI_REQUEST 0x00 #define DS_PRI_DATA 0x01 #define DS_PRI_UPDATE 0x02 }; static void ds_pri_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ds_data *dpkt = buf; struct ds_pri_msg *rp; rp = (struct ds_pri_msg *) (dpkt + 1); printk(KERN_INFO "ds-%llu: PRI REQ [%llx:%llx], len=%d\n", dp->id, rp->req_num, rp->type, len); } struct ds_var_hdr { __u32 type; #define DS_VAR_SET_REQ 0x00 #define DS_VAR_DELETE_REQ 0x01 #define DS_VAR_SET_RESP 0x02 #define DS_VAR_DELETE_RESP 0x03 }; struct ds_var_set_msg { struct ds_var_hdr hdr; char name_and_value[0]; }; struct ds_var_delete_msg { struct ds_var_hdr hdr; char name[0]; }; struct ds_var_resp { struct ds_var_hdr hdr; __u32 result; #define DS_VAR_SUCCESS 0x00 #define DS_VAR_NO_SPACE 0x01 #define DS_VAR_INVALID_VAR 0x02 #define DS_VAR_INVALID_VAL 0x03 #define DS_VAR_NOT_PRESENT 0x04 }; static DEFINE_MUTEX(ds_var_mutex); static int ds_var_doorbell; static int ds_var_response; static void ds_var_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ds_data *dpkt = buf; struct ds_var_resp *rp; rp = (struct ds_var_resp *) (dpkt + 1); if (rp->hdr.type != DS_VAR_SET_RESP && rp->hdr.type != DS_VAR_DELETE_RESP) return; ds_var_response = rp->result; wmb(); ds_var_doorbell = 1; } void ldom_set_var(const char *var, const char *value) { struct ds_cap_state *cp; struct ds_info *dp; unsigned long flags; spin_lock_irqsave(&ds_lock, flags); cp = NULL; for (dp = ds_info_list; dp; dp = dp->next) { struct ds_cap_state *tmp; tmp = find_cap_by_string(dp, "var-config"); if (tmp && tmp->state == CAP_STATE_REGISTERED) { cp = tmp; break; } } if (!cp) { for (dp = ds_info_list; dp; dp = dp->next) { struct ds_cap_state *tmp; tmp = find_cap_by_string(dp, "var-config-backup"); if (tmp && tmp->state == CAP_STATE_REGISTERED) { cp = tmp; break; } } } spin_unlock_irqrestore(&ds_lock, flags); if (cp) { union { struct { struct ds_data data; struct ds_var_set_msg msg; } header; char all[512]; } pkt; char *base, *p; int msg_len, loops; memset(&pkt, 0, sizeof(pkt)); pkt.header.data.tag.type = DS_DATA; pkt.header.data.handle = cp->handle; pkt.header.msg.hdr.type = DS_VAR_SET_REQ; base = p = &pkt.header.msg.name_and_value[0]; strcpy(p, var); p += strlen(var) + 1; strcpy(p, value); p += strlen(value) + 1; msg_len = (sizeof(struct ds_data) + sizeof(struct ds_var_set_msg) + (p - base)); msg_len = (msg_len + 3) & ~3; pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag); mutex_lock(&ds_var_mutex); spin_lock_irqsave(&ds_lock, flags); ds_var_doorbell = 0; ds_var_response = -1; __ds_send(dp->lp, &pkt, msg_len); spin_unlock_irqrestore(&ds_lock, flags); loops = 1000; while (ds_var_doorbell == 0) { if (loops-- < 0) break; barrier(); udelay(100); } mutex_unlock(&ds_var_mutex); if (ds_var_doorbell == 0 || ds_var_response != DS_VAR_SUCCESS) printk(KERN_ERR "ds-%llu: var-config [%s:%s] " "failed, response(%d).\n", dp->id, var, value, ds_var_response); } else { printk(KERN_ERR PFX "var-config not registered so " "could not set (%s) variable to (%s).\n", var, value); } } void ldom_reboot(const char *boot_command) { /* Don't bother with any of this if the boot_command * is empty. */ if (boot_command && strlen(boot_command)) { char full_boot_str[256]; strcpy(full_boot_str, "boot "); strcpy(full_boot_str + strlen("boot "), boot_command); ldom_set_var("reboot-command", full_boot_str); } sun4v_mach_sir(); } void ldom_power_off(void) { sun4v_mach_exit(0); } static void ds_conn_reset(struct ds_info *dp) { printk(KERN_ERR "ds-%llu: ds_conn_reset() from %p\n", dp->id, __builtin_return_address(0)); } static int register_services(struct ds_info *dp) { struct ldc_channel *lp = dp->lp; int i; for (i = 0; i < dp->num_ds_states; i++) { struct { struct ds_reg_req req; u8 id_buf[256]; } pbuf; struct ds_cap_state *cp = &dp->ds_states[i]; int err, msg_len; u64 new_count; if (cp->state == CAP_STATE_REGISTERED) continue; new_count = sched_clock() & 0xffffffff; cp->handle = ((u64) i << 32) | new_count; msg_len = (sizeof(struct ds_reg_req) + strlen(cp->service_id)); memset(&pbuf, 0, sizeof(pbuf)); pbuf.req.tag.type = DS_REG_REQ; pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag)); pbuf.req.handle = cp->handle; pbuf.req.major = 1; pbuf.req.minor = 0; strcpy(pbuf.req.svc_id, cp->service_id); err = __ds_send(lp, &pbuf, msg_len); if (err > 0) cp->state = CAP_STATE_REG_SENT; } return 0; } static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt) { if (dp->hs_state == DS_HS_START) { if (pkt->type != DS_INIT_ACK) goto conn_reset; dp->hs_state = DS_HS_DONE; return register_services(dp); } if (dp->hs_state != DS_HS_DONE) goto conn_reset; if (pkt->type == DS_REG_ACK) { struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt; struct ds_cap_state *cp = find_cap(dp, ap->handle); if (!cp) { printk(KERN_ERR "ds-%llu: REG ACK for unknown " "handle %llx\n", dp->id, ap->handle); return 0; } printk(KERN_INFO "ds-%llu: Registered %s service.\n", dp->id, cp->service_id); cp->state = CAP_STATE_REGISTERED; } else if (pkt->type == DS_REG_NACK) { struct ds_reg_nack *np = (struct ds_reg_nack *) pkt; struct ds_cap_state *cp = find_cap(dp, np->handle); if (!cp) { printk(KERN_ERR "ds-%llu: REG NACK for " "unknown handle %llx\n", dp->id, np->handle); return 0; } cp->state = CAP_STATE_UNKNOWN; } return 0; conn_reset: ds_conn_reset(dp); return -ECONNRESET; } static void __send_ds_nack(struct ds_info *dp, u64 handle) { struct ds_data_nack nack = { .tag = { .type = DS_NACK, .len = (sizeof(struct ds_data_nack) - sizeof(struct ds_msg_tag)), }, .handle = handle, .result = DS_INV_HDL, }; __ds_send(dp->lp, &nack, sizeof(nack)); } static LIST_HEAD(ds_work_list); static DECLARE_WAIT_QUEUE_HEAD(ds_wait); struct ds_queue_entry { struct list_head list; struct ds_info *dp; int req_len; int __pad; u64 req[0]; }; static void process_ds_work(void) { struct ds_queue_entry *qp, *tmp; unsigned long flags; LIST_HEAD(todo); spin_lock_irqsave(&ds_lock, flags); list_splice_init(&ds_work_list, &todo); spin_unlock_irqrestore(&ds_lock, flags); list_for_each_entry_safe(qp, tmp, &todo, list) { struct ds_data *dpkt = (struct ds_data *) qp->req; struct ds_info *dp = qp->dp; struct ds_cap_state *cp = find_cap(dp, dpkt->handle); int req_len = qp->req_len; if (!cp) { printk(KERN_ERR "ds-%llu: Data for unknown " "handle %llu\n", dp->id, dpkt->handle); spin_lock_irqsave(&ds_lock, flags); __send_ds_nack(dp, dpkt->handle); spin_unlock_irqrestore(&ds_lock, flags); } else { cp->data(dp, cp, dpkt, req_len); } list_del(&qp->list); kfree(qp); } } static int ds_thread(void *__unused) { DEFINE_WAIT(wait); while (1) { prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE); if (list_empty(&ds_work_list)) schedule(); finish_wait(&ds_wait, &wait); if (kthread_should_stop()) break; process_ds_work(); } return 0; } static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len) { struct ds_data *dpkt = (struct ds_data *) pkt; struct ds_queue_entry *qp; qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC); if (!qp) { __send_ds_nack(dp, dpkt->handle); } else { qp->dp = dp; memcpy(&qp->req, pkt, len); list_add_tail(&qp->list, &ds_work_list); wake_up(&ds_wait); } return 0; } static void ds_up(struct ds_info *dp) { struct ldc_channel *lp = dp->lp; struct ds_ver_req req; int err; req.tag.type = DS_INIT_REQ; req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag); req.ver.major = 1; req.ver.minor = 0; err = __ds_send(lp, &req, sizeof(req)); if (err > 0) dp->hs_state = DS_HS_START; } static void ds_reset(struct ds_info *dp) { int i; dp->hs_state = 0; for (i = 0; i < dp->num_ds_states; i++) { struct ds_cap_state *cp = &dp->ds_states[i]; cp->state = CAP_STATE_UNKNOWN; } } static void ds_event(void *arg, int event) { struct ds_info *dp = arg; struct ldc_channel *lp = dp->lp; unsigned long flags; int err; spin_lock_irqsave(&ds_lock, flags); if (event == LDC_EVENT_UP) { ds_up(dp); spin_unlock_irqrestore(&ds_lock, flags); return; } if (event == LDC_EVENT_RESET) { ds_reset(dp); spin_unlock_irqrestore(&ds_lock, flags); return; } if (event != LDC_EVENT_DATA_READY) { printk(KERN_WARNING "ds-%llu: Unexpected LDC event %d\n", dp->id, event); spin_unlock_irqrestore(&ds_lock, flags); return; } err = 0; while (1) { struct ds_msg_tag *tag; err = ldc_read(lp, dp->rcv_buf, sizeof(*tag)); if (unlikely(err < 0)) { if (err == -ECONNRESET) ds_conn_reset(dp); break; } if (err == 0) break; tag = dp->rcv_buf; err = ldc_read(lp, tag + 1, tag->len); if (unlikely(err < 0)) { if (err == -ECONNRESET) ds_conn_reset(dp); break; } if (err < tag->len) break; if (tag->type < DS_DATA) err = ds_handshake(dp, dp->rcv_buf); else err = ds_data(dp, dp->rcv_buf, sizeof(*tag) + err); if (err == -ECONNRESET) break; } spin_unlock_irqrestore(&ds_lock, flags); } static int __devinit ds_probe(struct vio_dev *vdev, const struct vio_device_id *id) { static int ds_version_printed; struct ldc_channel_config ds_cfg = { .event = ds_event, .mtu = 4096, .mode = LDC_MODE_STREAM, }; struct mdesc_handle *hp; struct ldc_channel *lp; struct ds_info *dp; const u64 *val; int err, i; if (ds_version_printed++ == 0) printk(KERN_INFO "%s", version); dp = kzalloc(sizeof(*dp), GFP_KERNEL); err = -ENOMEM; if (!dp) goto out_err; hp = mdesc_grab(); val = mdesc_get_property(hp, vdev->mp, "id", NULL); if (val) dp->id = *val; mdesc_release(hp); dp->rcv_buf = kzalloc(4096, GFP_KERNEL); if (!dp->rcv_buf) goto out_free_dp; dp->rcv_buf_len = 4096; dp->ds_states = kzalloc(sizeof(ds_states_template), GFP_KERNEL); if (!dp->ds_states) goto out_free_rcv_buf; memcpy(dp->ds_states, ds_states_template, sizeof(ds_states_template)); dp->num_ds_states = ARRAY_SIZE(ds_states_template); for (i = 0; i < dp->num_ds_states; i++) dp->ds_states[i].handle = ((u64)i << 32); ds_cfg.tx_irq = vdev->tx_irq; ds_cfg.rx_irq = vdev->rx_irq; lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out_free_ds_states; } dp->lp = lp; err = ldc_bind(lp, "DS"); if (err) goto out_free_ldc; spin_lock_irq(&ds_lock); dp->next = ds_info_list; ds_info_list = dp; spin_unlock_irq(&ds_lock); return err; out_free_ldc: ldc_free(dp->lp); out_free_ds_states: kfree(dp->ds_states); out_free_rcv_buf: kfree(dp->rcv_buf); out_free_dp: kfree(dp); out_err: return err; } static int ds_remove(struct vio_dev *vdev) { return 0; } static struct vio_device_id __initdata ds_match[] = { { .type = "domain-services-port", }, {}, }; static struct vio_driver ds_driver = { .id_table = ds_match, .probe = ds_probe, .remove = ds_remove, .driver = { .name = "ds", .owner = THIS_MODULE, } }; static int __init ds_init(void) { kthread_run(ds_thread, NULL, "kldomd"); return vio_register_driver(&ds_driver); } subsys_initcall(ds_init);
gpl-2.0
joelalmeidaptg/VirginityKernel
drivers/input/keyboard/pmic8xxx-keypad.c
1823
19863
/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/mfd/pm8xxx/core.h> #include <linux/mfd/pm8xxx/gpio.h> #include <linux/input/pmic8xxx-keypad.h> #define PM8XXX_MAX_ROWS 18 #define PM8XXX_MAX_COLS 8 #define PM8XXX_ROW_SHIFT 3 #define PM8XXX_MATRIX_MAX_SIZE (PM8XXX_MAX_ROWS * PM8XXX_MAX_COLS) #define PM8XXX_MIN_ROWS 5 #define PM8XXX_MIN_COLS 5 #define MAX_SCAN_DELAY 128 #define MIN_SCAN_DELAY 1 /* in nanoseconds */ #define MAX_ROW_HOLD_DELAY 122000 #define MIN_ROW_HOLD_DELAY 30500 #define MAX_DEBOUNCE_TIME 20 #define MIN_DEBOUNCE_TIME 5 #define KEYP_CTRL 0x148 #define KEYP_CTRL_EVNTS BIT(0) #define KEYP_CTRL_EVNTS_MASK 0x3 #define KEYP_CTRL_SCAN_COLS_SHIFT 5 #define KEYP_CTRL_SCAN_COLS_MIN 5 #define KEYP_CTRL_SCAN_COLS_BITS 0x3 #define KEYP_CTRL_SCAN_ROWS_SHIFT 2 #define KEYP_CTRL_SCAN_ROWS_MIN 5 #define KEYP_CTRL_SCAN_ROWS_BITS 0x7 #define KEYP_CTRL_KEYP_EN BIT(7) #define KEYP_SCAN 0x149 #define KEYP_SCAN_READ_STATE BIT(0) #define KEYP_SCAN_DBOUNCE_SHIFT 1 #define KEYP_SCAN_PAUSE_SHIFT 3 #define KEYP_SCAN_ROW_HOLD_SHIFT 6 #define KEYP_TEST 0x14A #define KEYP_TEST_CLEAR_RECENT_SCAN BIT(6) #define KEYP_TEST_CLEAR_OLD_SCAN BIT(5) #define KEYP_TEST_READ_RESET BIT(4) #define KEYP_TEST_DTEST_EN BIT(3) #define KEYP_TEST_ABORT_READ BIT(0) #define KEYP_TEST_DBG_SELECT_SHIFT 1 /* bits of these registers represent * '0' for key press * '1' for key release */ #define KEYP_RECENT_DATA 0x14B #define KEYP_OLD_DATA 0x14C #define KEYP_CLOCK_FREQ 32768 /** * struct pmic8xxx_kp - internal keypad data structure * @pdata - keypad platform data pointer * @input - input device pointer for keypad * @key_sense_irq - key press/release irq number * @key_stuck_irq - key stuck notification irq number * @keycodes - array to hold the key codes * @dev - parent device pointer * @keystate - present key press/release state * @stuckstate - present state when key stuck irq * @ctrl_reg - control register value */ struct pmic8xxx_kp { const struct pm8xxx_keypad_platform_data *pdata; struct input_dev *input; int key_sense_irq; int key_stuck_irq; unsigned short keycodes[PM8XXX_MATRIX_MAX_SIZE]; struct device *dev; u16 keystate[PM8XXX_MAX_ROWS]; u16 stuckstate[PM8XXX_MAX_ROWS]; u8 ctrl_reg; }; static int pmic8xxx_kp_write_u8(struct pmic8xxx_kp *kp, u8 data, u16 reg) { int rc; rc = pm8xxx_writeb(kp->dev->parent, reg, data); return rc; } static int pmic8xxx_kp_read(struct pmic8xxx_kp *kp, u8 *data, u16 reg, unsigned num_bytes) { int rc; rc = pm8xxx_read_buf(kp->dev->parent, reg, data, num_bytes); return rc; } static int pmic8xxx_kp_read_u8(struct pmic8xxx_kp *kp, u8 *data, u16 reg) { int rc; rc = pmic8xxx_kp_read(kp, data, reg, 1); return rc; } static u8 pmic8xxx_col_state(struct pmic8xxx_kp *kp, u8 col) { /* all keys pressed on that particular row? */ if (col == 0x00) return 1 << kp->pdata->num_cols; else return col & ((1 << kp->pdata->num_cols) - 1); } /* * Synchronous read protocol for RevB0 onwards: * * 1. Write '1' to ReadState bit in KEYP_SCAN register * 2. Wait 2*32KHz clocks, so that HW can successfully enter read mode * synchronously * 3. Read rows in old array first if events are more than one * 4. Read rows in recent array * 5. Wait 4*32KHz clocks * 6. Write '0' to ReadState bit of KEYP_SCAN register so that hw can * synchronously exit read mode. */ static int pmic8xxx_chk_sync_read(struct pmic8xxx_kp *kp) { int rc; u8 scan_val; rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN); if (rc < 0) { dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc); return rc; } scan_val |= 0x1; rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN); if (rc < 0) { dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc); return rc; } /* 2 * 32KHz clocks */ udelay((2 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1); return rc; } static int pmic8xxx_kp_read_data(struct pmic8xxx_kp *kp, u16 *state, u16 data_reg, int read_rows) { int rc, row; u8 new_data[PM8XXX_MAX_ROWS]; rc = pmic8xxx_kp_read(kp, new_data, data_reg, read_rows); if (rc) return rc; for (row = 0; row < kp->pdata->num_rows; row++) { dev_dbg(kp->dev, "new_data[%d] = %d\n", row, new_data[row]); state[row] = pmic8xxx_col_state(kp, new_data[row]); } return rc; } static int pmic8xxx_kp_read_matrix(struct pmic8xxx_kp *kp, u16 *new_state, u16 *old_state) { int rc, read_rows; u8 scan_val; if (kp->pdata->num_rows < PM8XXX_MIN_ROWS) read_rows = PM8XXX_MIN_ROWS; else read_rows = kp->pdata->num_rows; pmic8xxx_chk_sync_read(kp); if (old_state) { rc = pmic8xxx_kp_read_data(kp, old_state, KEYP_OLD_DATA, read_rows); if (rc < 0) { dev_err(kp->dev, "Error reading KEYP_OLD_DATA, rc=%d\n", rc); return rc; } } rc = pmic8xxx_kp_read_data(kp, new_state, KEYP_RECENT_DATA, read_rows); if (rc < 0) { dev_err(kp->dev, "Error reading KEYP_RECENT_DATA, rc=%d\n", rc); return rc; } /* 4 * 32KHz clocks */ udelay((4 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1); rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN); if (rc < 0) { dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc); return rc; } scan_val &= 0xFE; rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN); if (rc < 0) dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc); return rc; } static void __pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, u16 *new_state, u16 *old_state) { int row, col, code; for (row = 0; row < kp->pdata->num_rows; row++) { int bits_changed = new_state[row] ^ old_state[row]; if (!bits_changed) continue; for (col = 0; col < kp->pdata->num_cols; col++) { if (!(bits_changed & (1 << col))) continue; dev_dbg(kp->dev, "key [%d:%d] %s\n", row, col, !(new_state[row] & (1 << col)) ? "pressed" : "released"); code = MATRIX_SCAN_CODE(row, col, PM8XXX_ROW_SHIFT); input_event(kp->input, EV_MSC, MSC_SCAN, code); input_report_key(kp->input, kp->keycodes[code], !(new_state[row] & (1 << col))); input_sync(kp->input); } } } static bool pmic8xxx_detect_ghost_keys(struct pmic8xxx_kp *kp, u16 *new_state) { int row, found_first = -1; u16 check, row_state; check = 0; for (row = 0; row < kp->pdata->num_rows; row++) { row_state = (~new_state[row]) & ((1 << kp->pdata->num_cols) - 1); if (hweight16(row_state) > 1) { if (found_first == -1) found_first = row; if (check & row_state) { dev_dbg(kp->dev, "detected ghost key on row[%d]" " and row[%d]\n", found_first, row); return true; } } check |= row_state; } return false; } static int pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, unsigned int events) { u16 new_state[PM8XXX_MAX_ROWS]; u16 old_state[PM8XXX_MAX_ROWS]; int rc; switch (events) { case 0x1: rc = pmic8xxx_kp_read_matrix(kp, new_state, NULL); if (rc < 0) return rc; /* detecting ghost key is not an error */ if (pmic8xxx_detect_ghost_keys(kp, new_state)) return 0; __pmic8xxx_kp_scan_matrix(kp, new_state, kp->keystate); memcpy(kp->keystate, new_state, sizeof(new_state)); break; case 0x3: /* two events - eventcounter is gray-coded */ rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state); if (rc < 0) return rc; __pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate); __pmic8xxx_kp_scan_matrix(kp, new_state, old_state); memcpy(kp->keystate, new_state, sizeof(new_state)); break; case 0x2: dev_dbg(kp->dev, "Some key events were lost\n"); rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state); if (rc < 0) return rc; __pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate); __pmic8xxx_kp_scan_matrix(kp, new_state, old_state); memcpy(kp->keystate, new_state, sizeof(new_state)); break; default: rc = -EINVAL; } return rc; } /* * NOTE: We are reading recent and old data registers blindly * whenever key-stuck interrupt happens, because events counter doesn't * get updated when this interrupt happens due to key stuck doesn't get * considered as key state change. * * We are not using old data register contents after they are being read * because it might report the key which was pressed before the key being stuck * as stuck key because it's pressed status is stored in the old data * register. */ static irqreturn_t pmic8xxx_kp_stuck_irq(int irq, void *data) { u16 new_state[PM8XXX_MAX_ROWS]; u16 old_state[PM8XXX_MAX_ROWS]; int rc; struct pmic8xxx_kp *kp = data; rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state); if (rc < 0) { dev_err(kp->dev, "failed to read keypad matrix\n"); return IRQ_HANDLED; } __pmic8xxx_kp_scan_matrix(kp, new_state, kp->stuckstate); return IRQ_HANDLED; } static irqreturn_t pmic8xxx_kp_irq(int irq, void *data) { struct pmic8xxx_kp *kp = data; u8 ctrl_val, events; int rc; rc = pmic8xxx_kp_read(kp, &ctrl_val, KEYP_CTRL, 1); if (rc < 0) { dev_err(kp->dev, "failed to read keyp_ctrl register\n"); return IRQ_HANDLED; } events = ctrl_val & KEYP_CTRL_EVNTS_MASK; rc = pmic8xxx_kp_scan_matrix(kp, events); if (rc < 0) dev_err(kp->dev, "failed to scan matrix\n"); return IRQ_HANDLED; } static int __devinit pmic8xxx_kpd_init(struct pmic8xxx_kp *kp) { int bits, rc, cycles; u8 scan_val = 0, ctrl_val = 0; static const u8 row_bits[] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, }; /* Find column bits */ if (kp->pdata->num_cols < KEYP_CTRL_SCAN_COLS_MIN) bits = 0; else bits = kp->pdata->num_cols - KEYP_CTRL_SCAN_COLS_MIN; ctrl_val = (bits & KEYP_CTRL_SCAN_COLS_BITS) << KEYP_CTRL_SCAN_COLS_SHIFT; /* Find row bits */ if (kp->pdata->num_rows < KEYP_CTRL_SCAN_ROWS_MIN) bits = 0; else bits = row_bits[kp->pdata->num_rows - KEYP_CTRL_SCAN_ROWS_MIN]; ctrl_val |= (bits << KEYP_CTRL_SCAN_ROWS_SHIFT); rc = pmic8xxx_kp_write_u8(kp, ctrl_val, KEYP_CTRL); if (rc < 0) { dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc); return rc; } bits = (kp->pdata->debounce_ms / 5) - 1; scan_val |= (bits << KEYP_SCAN_DBOUNCE_SHIFT); bits = fls(kp->pdata->scan_delay_ms) - 1; scan_val |= (bits << KEYP_SCAN_PAUSE_SHIFT); /* Row hold time is a multiple of 32KHz cycles. */ cycles = (kp->pdata->row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC; scan_val |= (cycles << KEYP_SCAN_ROW_HOLD_SHIFT); rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN); if (rc) dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc); return rc; } static int __devinit pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios, struct pmic8xxx_kp *kp, struct pm_gpio *gpio_config) { int rc, i; if (gpio_start < 0 || num_gpios < 0) return -EINVAL; for (i = 0; i < num_gpios; i++) { rc = pm8xxx_gpio_config(gpio_start + i, gpio_config); if (rc) { dev_err(kp->dev, "%s: FAIL pm8xxx_gpio_config():" "for PM GPIO [%d] rc=%d.\n", __func__, gpio_start + i, rc); return rc; } } return 0; } static int pmic8xxx_kp_enable(struct pmic8xxx_kp *kp) { int rc; kp->ctrl_reg |= KEYP_CTRL_KEYP_EN; rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL); if (rc < 0) dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc); return rc; } static int pmic8xxx_kp_disable(struct pmic8xxx_kp *kp) { int rc; kp->ctrl_reg &= ~KEYP_CTRL_KEYP_EN; rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL); if (rc < 0) return rc; return rc; } static int pmic8xxx_kp_open(struct input_dev *dev) { struct pmic8xxx_kp *kp = input_get_drvdata(dev); return pmic8xxx_kp_enable(kp); } static void pmic8xxx_kp_close(struct input_dev *dev) { struct pmic8xxx_kp *kp = input_get_drvdata(dev); pmic8xxx_kp_disable(kp); } /* * keypad controller should be initialized in the following sequence * only, otherwise it might get into FSM stuck state. * * - Initialize keypad control parameters, like no. of rows, columns, * timing values etc., * - configure rows and column gpios pull up/down. * - set irq edge type. * - enable the keypad controller. */ static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev) { const struct pm8xxx_keypad_platform_data *pdata = dev_get_platdata(&pdev->dev); const struct matrix_keymap_data *keymap_data; struct pmic8xxx_kp *kp; int rc; u8 ctrl_val; struct pm_gpio kypd_drv = { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_OPEN_DRAIN, .output_value = 0, .pull = PM_GPIO_PULL_NO, .vin_sel = PM_GPIO_VIN_S3, .out_strength = PM_GPIO_STRENGTH_LOW, .function = PM_GPIO_FUNC_1, .inv_int_pol = 1, }; struct pm_gpio kypd_sns = { .direction = PM_GPIO_DIR_IN, .pull = PM_GPIO_PULL_UP_31P5, .vin_sel = PM_GPIO_VIN_S3, .out_strength = PM_GPIO_STRENGTH_NO, .function = PM_GPIO_FUNC_NORMAL, .inv_int_pol = 1, }; if (!pdata || !pdata->num_cols || !pdata->num_rows || pdata->num_cols > PM8XXX_MAX_COLS || pdata->num_rows > PM8XXX_MAX_ROWS || pdata->num_cols < PM8XXX_MIN_COLS) { dev_err(&pdev->dev, "invalid platform data\n"); return -EINVAL; } if (!pdata->scan_delay_ms || pdata->scan_delay_ms > MAX_SCAN_DELAY || pdata->scan_delay_ms < MIN_SCAN_DELAY || !is_power_of_2(pdata->scan_delay_ms)) { dev_err(&pdev->dev, "invalid keypad scan time supplied\n"); return -EINVAL; } if (!pdata->row_hold_ns || pdata->row_hold_ns > MAX_ROW_HOLD_DELAY || pdata->row_hold_ns < MIN_ROW_HOLD_DELAY || ((pdata->row_hold_ns % MIN_ROW_HOLD_DELAY) != 0)) { dev_err(&pdev->dev, "invalid keypad row hold time supplied\n"); return -EINVAL; } if (!pdata->debounce_ms || ((pdata->debounce_ms % 5) != 0) || pdata->debounce_ms > MAX_DEBOUNCE_TIME || pdata->debounce_ms < MIN_DEBOUNCE_TIME) { dev_err(&pdev->dev, "invalid debounce time supplied\n"); return -EINVAL; } keymap_data = pdata->keymap_data; if (!keymap_data) { dev_err(&pdev->dev, "no keymap data supplied\n"); return -EINVAL; } kp = kzalloc(sizeof(*kp), GFP_KERNEL); if (!kp) return -ENOMEM; platform_set_drvdata(pdev, kp); kp->pdata = pdata; kp->dev = &pdev->dev; kp->input = input_allocate_device(); if (!kp->input) { dev_err(&pdev->dev, "unable to allocate input device\n"); rc = -ENOMEM; goto err_alloc_device; } kp->key_sense_irq = platform_get_irq(pdev, 0); if (kp->key_sense_irq < 0) { dev_err(&pdev->dev, "unable to get keypad sense irq\n"); rc = -ENXIO; goto err_get_irq; } kp->key_stuck_irq = platform_get_irq(pdev, 1); if (kp->key_stuck_irq < 0) { dev_err(&pdev->dev, "unable to get keypad stuck irq\n"); rc = -ENXIO; goto err_get_irq; } kp->input->name = pdata->input_name ? : "PMIC8XXX keypad"; kp->input->phys = pdata->input_phys_device ? : "pmic8xxx_keypad/input0"; kp->input->dev.parent = &pdev->dev; kp->input->id.bustype = BUS_I2C; kp->input->id.version = 0x0001; kp->input->id.product = 0x0001; kp->input->id.vendor = 0x0001; kp->input->evbit[0] = BIT_MASK(EV_KEY); if (pdata->rep) __set_bit(EV_REP, kp->input->evbit); kp->input->keycode = kp->keycodes; kp->input->keycodemax = PM8XXX_MATRIX_MAX_SIZE; kp->input->keycodesize = sizeof(kp->keycodes); kp->input->open = pmic8xxx_kp_open; kp->input->close = pmic8xxx_kp_close; matrix_keypad_build_keymap(keymap_data, PM8XXX_ROW_SHIFT, kp->input->keycode, kp->input->keybit); input_set_capability(kp->input, EV_MSC, MSC_SCAN); input_set_drvdata(kp->input, kp); /* initialize keypad state */ memset(kp->keystate, 0xff, sizeof(kp->keystate)); memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate)); rc = pmic8xxx_kpd_init(kp); if (rc < 0) { dev_err(&pdev->dev, "unable to initialize keypad controller\n"); goto err_get_irq; } rc = pmic8xxx_kp_config_gpio(pdata->cols_gpio_start, pdata->num_cols, kp, &kypd_sns); if (rc < 0) { dev_err(&pdev->dev, "unable to configure keypad sense lines\n"); goto err_gpio_config; } rc = pmic8xxx_kp_config_gpio(pdata->rows_gpio_start, pdata->num_rows, kp, &kypd_drv); if (rc < 0) { dev_err(&pdev->dev, "unable to configure keypad drive lines\n"); goto err_gpio_config; } rc = request_any_context_irq(kp->key_sense_irq, pmic8xxx_kp_irq, IRQF_TRIGGER_RISING, "pmic-keypad", kp); if (rc < 0) { dev_err(&pdev->dev, "failed to request keypad sense irq\n"); goto err_get_irq; } rc = request_any_context_irq(kp->key_stuck_irq, pmic8xxx_kp_stuck_irq, IRQF_TRIGGER_RISING, "pmic-keypad-stuck", kp); if (rc < 0) { dev_err(&pdev->dev, "failed to request keypad stuck irq\n"); goto err_req_stuck_irq; } rc = pmic8xxx_kp_read_u8(kp, &ctrl_val, KEYP_CTRL); if (rc < 0) { dev_err(&pdev->dev, "failed to read KEYP_CTRL register\n"); goto err_pmic_reg_read; } kp->ctrl_reg = ctrl_val; rc = input_register_device(kp->input); if (rc < 0) { dev_err(&pdev->dev, "unable to register keypad input device\n"); goto err_pmic_reg_read; } device_init_wakeup(&pdev->dev, pdata->wakeup); return 0; err_pmic_reg_read: free_irq(kp->key_stuck_irq, NULL); err_req_stuck_irq: free_irq(kp->key_sense_irq, NULL); err_gpio_config: err_get_irq: input_free_device(kp->input); err_alloc_device: platform_set_drvdata(pdev, NULL); kfree(kp); return rc; } static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev) { struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); device_init_wakeup(&pdev->dev, 0); free_irq(kp->key_stuck_irq, NULL); free_irq(kp->key_sense_irq, NULL); input_unregister_device(kp->input); kfree(kp); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM_SLEEP static int pmic8xxx_kp_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); struct input_dev *input_dev = kp->input; if (device_may_wakeup(dev)) { enable_irq_wake(kp->key_sense_irq); } else { mutex_lock(&input_dev->mutex); if (input_dev->users) pmic8xxx_kp_disable(kp); mutex_unlock(&input_dev->mutex); } return 0; } static int pmic8xxx_kp_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); struct input_dev *input_dev = kp->input; if (device_may_wakeup(dev)) { disable_irq_wake(kp->key_sense_irq); } else { mutex_lock(&input_dev->mutex); if (input_dev->users) pmic8xxx_kp_enable(kp); mutex_unlock(&input_dev->mutex); } return 0; } #endif static SIMPLE_DEV_PM_OPS(pm8xxx_kp_pm_ops, pmic8xxx_kp_suspend, pmic8xxx_kp_resume); static struct platform_driver pmic8xxx_kp_driver = { .probe = pmic8xxx_kp_probe, .remove = __devexit_p(pmic8xxx_kp_remove), .driver = { .name = PM8XXX_KEYPAD_DEV_NAME, .owner = THIS_MODULE, .pm = &pm8xxx_kp_pm_ops, }, }; static int __init pmic8xxx_kp_init(void) { return platform_driver_register(&pmic8xxx_kp_driver); } module_init(pmic8xxx_kp_init); static void __exit pmic8xxx_kp_exit(void) { platform_driver_unregister(&pmic8xxx_kp_driver); } module_exit(pmic8xxx_kp_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PMIC8XXX keypad driver"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:pmic8xxx_keypad"); MODULE_AUTHOR("Trilok Soni <tsoni@codeaurora.org>");
gpl-2.0
aryabinin/linux
drivers/media/usb/stk1160/stk1160-ac97.c
1823
3970
/* * STK1160 driver * * Copyright (C) 2012 Ezequiel Garcia * <elezegarcia--a.t--gmail.com> * * Based on Easycap driver by R.M. Thomas * Copyright (C) 2010 R.M. Thomas * <rmthomas--a.t--sciolus.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/ac97_codec.h> #include "stk1160.h" #include "stk1160-reg.h" static struct snd_ac97 *stk1160_ac97; static void stk1160_write_ac97(struct snd_ac97 *ac97, u16 reg, u16 value) { struct stk1160 *dev = ac97->private_data; /* Set codec register address */ stk1160_write_reg(dev, STK1160_AC97_ADDR, reg); /* Set codec command */ stk1160_write_reg(dev, STK1160_AC97_CMD, value & 0xff); stk1160_write_reg(dev, STK1160_AC97_CMD + 1, (value & 0xff00) >> 8); /* * Set command write bit to initiate write operation. * The bit will be cleared when transfer is done. */ stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x8c); } static u16 stk1160_read_ac97(struct snd_ac97 *ac97, u16 reg) { struct stk1160 *dev = ac97->private_data; u8 vall = 0; u8 valh = 0; /* Set codec register address */ stk1160_write_reg(dev, STK1160_AC97_ADDR, reg); /* * Set command read bit to initiate read operation. * The bit will be cleared when transfer is done. */ stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x8b); /* Retrieve register value */ stk1160_read_reg(dev, STK1160_AC97_CMD, &vall); stk1160_read_reg(dev, STK1160_AC97_CMD + 1, &valh); return (valh << 8) | vall; } static void stk1160_reset_ac97(struct snd_ac97 *ac97) { struct stk1160 *dev = ac97->private_data; /* Two-step reset AC97 interface and hardware codec */ stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x94); stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x88); /* Set 16-bit audio data and choose L&R channel*/ stk1160_write_reg(dev, STK1160_AC97CTL_1 + 2, 0x01); } static struct snd_ac97_bus_ops stk1160_ac97_ops = { .read = stk1160_read_ac97, .write = stk1160_write_ac97, .reset = stk1160_reset_ac97, }; int stk1160_ac97_register(struct stk1160 *dev) { struct snd_card *card = NULL; struct snd_ac97_bus *ac97_bus; struct snd_ac97_template ac97_template; int rc; /* * Just want a card to access ac96 controls, * the actual capture interface will be handled by snd-usb-audio */ rc = snd_card_new(dev->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, 0, &card); if (rc < 0) return rc; /* TODO: I'm not sure where should I get these names :-( */ snprintf(card->shortname, sizeof(card->shortname), "stk1160-mixer"); snprintf(card->longname, sizeof(card->longname), "stk1160 ac97 codec mixer control"); strlcpy(card->driver, dev->dev->driver->name, sizeof(card->driver)); rc = snd_ac97_bus(card, 0, &stk1160_ac97_ops, NULL, &ac97_bus); if (rc) goto err; /* We must set private_data before calling snd_ac97_mixer */ memset(&ac97_template, 0, sizeof(ac97_template)); ac97_template.private_data = dev; ac97_template.scaps = AC97_SCAP_SKIP_MODEM; rc = snd_ac97_mixer(ac97_bus, &ac97_template, &stk1160_ac97); if (rc) goto err; dev->snd_card = card; rc = snd_card_register(card); if (rc) goto err; return 0; err: dev->snd_card = NULL; snd_card_free(card); return rc; } int stk1160_ac97_unregister(struct stk1160 *dev) { struct snd_card *card = dev->snd_card; /* * We need to check usb_device, * because ac97 release attempts to communicate with codec */ if (card && dev->udev) snd_card_free(card); return 0; }
gpl-2.0
ztemt/V5s_N918St_KitKat_kernel
drivers/acpi/acpica/rscalc.c
2079
20545
/******************************************************************************* * * Module Name: rscalc - Calculate stream and list lengths * ******************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #include "acnamesp.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rscalc") /* Local prototypes */ static u8 acpi_rs_count_set_bits(u16 bit_field); static acpi_rs_length acpi_rs_struct_option_length(struct acpi_resource_source *resource_source); static u32 acpi_rs_stream_option_length(u32 resource_length, u32 minimum_total_length); /******************************************************************************* * * FUNCTION: acpi_rs_count_set_bits * * PARAMETERS: bit_field - Field in which to count bits * * RETURN: Number of bits set within the field * * DESCRIPTION: Count the number of bits set in a resource field. Used for * (Short descriptor) interrupt and DMA lists. * ******************************************************************************/ static u8 acpi_rs_count_set_bits(u16 bit_field) { u8 bits_set; ACPI_FUNCTION_ENTRY(); for (bits_set = 0; bit_field; bits_set++) { /* Zero the least significant bit that is set */ bit_field &= (u16) (bit_field - 1); } return (bits_set); } /******************************************************************************* * * FUNCTION: acpi_rs_struct_option_length * * PARAMETERS: resource_source - Pointer to optional descriptor field * * RETURN: Status * * DESCRIPTION: Common code to handle optional resource_source_index and * resource_source fields in some Large descriptors. Used during * list-to-stream conversion * ******************************************************************************/ static acpi_rs_length acpi_rs_struct_option_length(struct acpi_resource_source *resource_source) { ACPI_FUNCTION_ENTRY(); /* * If the resource_source string is valid, return the size of the string * (string_length includes the NULL terminator) plus the size of the * resource_source_index (1). */ if (resource_source->string_ptr) { return ((acpi_rs_length) (resource_source->string_length + 1)); } return (0); } /******************************************************************************* * * FUNCTION: acpi_rs_stream_option_length * * PARAMETERS: resource_length - Length from the resource header * minimum_total_length - Minimum length of this resource, before * any optional fields. Includes header size * * RETURN: Length of optional string (0 if no string present) * * DESCRIPTION: Common code to handle optional resource_source_index and * resource_source fields in some Large descriptors. Used during * stream-to-list conversion * ******************************************************************************/ static u32 acpi_rs_stream_option_length(u32 resource_length, u32 minimum_aml_resource_length) { u32 string_length = 0; ACPI_FUNCTION_ENTRY(); /* * The resource_source_index and resource_source are optional elements of some * Large-type resource descriptors. */ /* * If the length of the actual resource descriptor is greater than the ACPI * spec-defined minimum length, it means that a resource_source_index exists * and is followed by a (required) null terminated string. The string length * (including the null terminator) is the resource length minus the minimum * length, minus one byte for the resource_source_index itself. */ if (resource_length > minimum_aml_resource_length) { /* Compute the length of the optional string */ string_length = resource_length - minimum_aml_resource_length - 1; } /* * Round the length up to a multiple of the native word in order to * guarantee that the entire resource descriptor is native word aligned */ return ((u32) ACPI_ROUND_UP_TO_NATIVE_WORD(string_length)); } /******************************************************************************* * * FUNCTION: acpi_rs_get_aml_length * * PARAMETERS: resource - Pointer to the resource linked list * size_needed - Where the required size is returned * * RETURN: Status * * DESCRIPTION: Takes a linked list of internal resource descriptors and * calculates the size buffer needed to hold the corresponding * external resource byte stream. * ******************************************************************************/ acpi_status acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed) { acpi_size aml_size_needed = 0; acpi_rs_length total_size; ACPI_FUNCTION_TRACE(rs_get_aml_length); /* Traverse entire list of internal resource descriptors */ while (resource) { /* Validate the descriptor type */ if (resource->type > ACPI_RESOURCE_TYPE_MAX) { return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE); } /* Sanity check the length. It must not be zero, or we loop forever */ if (!resource->length) { return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH); } /* Get the base size of the (external stream) resource descriptor */ total_size = acpi_gbl_aml_resource_sizes[resource->type]; /* * Augment the base size for descriptors with optional and/or * variable-length fields */ switch (resource->type) { case ACPI_RESOURCE_TYPE_IRQ: /* Length can be 3 or 2 */ if (resource->data.irq.descriptor_length == 2) { total_size--; } break; case ACPI_RESOURCE_TYPE_START_DEPENDENT: /* Length can be 1 or 0 */ if (resource->data.irq.descriptor_length == 0) { total_size--; } break; case ACPI_RESOURCE_TYPE_VENDOR: /* * Vendor Defined Resource: * For a Vendor Specific resource, if the Length is between 1 and 7 * it will be created as a Small Resource data type, otherwise it * is a Large Resource data type. */ if (resource->data.vendor.byte_length > 7) { /* Base size of a Large resource descriptor */ total_size = sizeof(struct aml_resource_large_header); } /* Add the size of the vendor-specific data */ total_size = (acpi_rs_length) (total_size + resource->data.vendor.byte_length); break; case ACPI_RESOURCE_TYPE_END_TAG: /* * End Tag: * We are done -- return the accumulated total size. */ *size_needed = aml_size_needed + total_size; /* Normal exit */ return_ACPI_STATUS(AE_OK); case ACPI_RESOURCE_TYPE_ADDRESS16: /* * 16-Bit Address Resource: * Add the size of the optional resource_source info */ total_size = (acpi_rs_length) (total_size + acpi_rs_struct_option_length(&resource->data. address16. resource_source)); break; case ACPI_RESOURCE_TYPE_ADDRESS32: /* * 32-Bit Address Resource: * Add the size of the optional resource_source info */ total_size = (acpi_rs_length) (total_size + acpi_rs_struct_option_length(&resource->data. address32. resource_source)); break; case ACPI_RESOURCE_TYPE_ADDRESS64: /* * 64-Bit Address Resource: * Add the size of the optional resource_source info */ total_size = (acpi_rs_length) (total_size + acpi_rs_struct_option_length(&resource->data. address64. resource_source)); break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: /* * Extended IRQ Resource: * Add the size of each additional optional interrupt beyond the * required 1 (4 bytes for each u32 interrupt number) */ total_size = (acpi_rs_length) (total_size + ((resource->data.extended_irq.interrupt_count - 1) * 4) + /* Add the size of the optional resource_source info */ acpi_rs_struct_option_length(&resource->data. extended_irq. resource_source)); break; case ACPI_RESOURCE_TYPE_GPIO: total_size = (acpi_rs_length) (total_size + (resource->data.gpio. pin_table_length * 2) + resource->data.gpio. resource_source.string_length + resource->data.gpio. vendor_length); break; case ACPI_RESOURCE_TYPE_SERIAL_BUS: total_size = acpi_gbl_aml_resource_serial_bus_sizes[resource-> data. common_serial_bus. type]; total_size = (acpi_rs_length) (total_size + resource->data. i2c_serial_bus. resource_source. string_length + resource->data. i2c_serial_bus. vendor_length); break; default: break; } /* Update the total */ aml_size_needed += total_size; /* Point to the next object */ resource = ACPI_ADD_PTR(struct acpi_resource, resource, resource->length); } /* Did not find an end_tag resource descriptor */ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); } /******************************************************************************* * * FUNCTION: acpi_rs_get_list_length * * PARAMETERS: aml_buffer - Pointer to the resource byte stream * aml_buffer_length - Size of aml_buffer * size_needed - Where the size needed is returned * * RETURN: Status * * DESCRIPTION: Takes an external resource byte stream and calculates the size * buffer needed to hold the corresponding internal resource * descriptor linked list. * ******************************************************************************/ acpi_status acpi_rs_get_list_length(u8 * aml_buffer, u32 aml_buffer_length, acpi_size * size_needed) { acpi_status status; u8 *end_aml; u8 *buffer; u32 buffer_size; u16 temp16; u16 resource_length; u32 extra_struct_bytes; u8 resource_index; u8 minimum_aml_resource_length; union aml_resource *aml_resource; ACPI_FUNCTION_TRACE(rs_get_list_length); *size_needed = ACPI_RS_SIZE_MIN; /* Minimum size is one end_tag */ end_aml = aml_buffer + aml_buffer_length; /* Walk the list of AML resource descriptors */ while (aml_buffer < end_aml) { /* Validate the Resource Type and Resource Length */ status = acpi_ut_validate_resource(NULL, aml_buffer, &resource_index); if (ACPI_FAILURE(status)) { /* * Exit on failure. Cannot continue because the descriptor length * may be bogus also. */ return_ACPI_STATUS(status); } aml_resource = (void *)aml_buffer; /* Get the resource length and base (minimum) AML size */ resource_length = acpi_ut_get_resource_length(aml_buffer); minimum_aml_resource_length = acpi_gbl_resource_aml_sizes[resource_index]; /* * Augment the size for descriptors with optional * and/or variable length fields */ extra_struct_bytes = 0; buffer = aml_buffer + acpi_ut_get_resource_header_length(aml_buffer); switch (acpi_ut_get_resource_type(aml_buffer)) { case ACPI_RESOURCE_NAME_IRQ: /* * IRQ Resource: * Get the number of bits set in the 16-bit IRQ mask */ ACPI_MOVE_16_TO_16(&temp16, buffer); extra_struct_bytes = acpi_rs_count_set_bits(temp16); break; case ACPI_RESOURCE_NAME_DMA: /* * DMA Resource: * Get the number of bits set in the 8-bit DMA mask */ extra_struct_bytes = acpi_rs_count_set_bits(*buffer); break; case ACPI_RESOURCE_NAME_VENDOR_SMALL: case ACPI_RESOURCE_NAME_VENDOR_LARGE: /* * Vendor Resource: * Get the number of vendor data bytes */ extra_struct_bytes = resource_length; /* * There is already one byte included in the minimum * descriptor size. If there are extra struct bytes, * subtract one from the count. */ if (extra_struct_bytes) { extra_struct_bytes--; } break; case ACPI_RESOURCE_NAME_END_TAG: /* * End Tag: This is the normal exit */ return_ACPI_STATUS(AE_OK); case ACPI_RESOURCE_NAME_ADDRESS32: case ACPI_RESOURCE_NAME_ADDRESS16: case ACPI_RESOURCE_NAME_ADDRESS64: /* * Address Resource: * Add the size of the optional resource_source */ extra_struct_bytes = acpi_rs_stream_option_length(resource_length, minimum_aml_resource_length); break; case ACPI_RESOURCE_NAME_EXTENDED_IRQ: /* * Extended IRQ Resource: * Using the interrupt_table_length, add 4 bytes for each additional * interrupt. Note: at least one interrupt is required and is * included in the minimum descriptor size (reason for the -1) */ extra_struct_bytes = (buffer[1] - 1) * sizeof(u32); /* Add the size of the optional resource_source */ extra_struct_bytes += acpi_rs_stream_option_length(resource_length - extra_struct_bytes, minimum_aml_resource_length); break; case ACPI_RESOURCE_NAME_GPIO: /* Vendor data is optional */ if (aml_resource->gpio.vendor_length) { extra_struct_bytes += aml_resource->gpio.vendor_offset - aml_resource->gpio.pin_table_offset + aml_resource->gpio.vendor_length; } else { extra_struct_bytes += aml_resource->large_header.resource_length + sizeof(struct aml_resource_large_header) - aml_resource->gpio.pin_table_offset; } break; case ACPI_RESOURCE_NAME_SERIAL_BUS: minimum_aml_resource_length = acpi_gbl_resource_aml_serial_bus_sizes [aml_resource->common_serial_bus.type]; extra_struct_bytes += aml_resource->common_serial_bus.resource_length - minimum_aml_resource_length; break; default: break; } /* * Update the required buffer size for the internal descriptor structs * * Important: Round the size up for the appropriate alignment. This * is a requirement on IA64. */ if (acpi_ut_get_resource_type(aml_buffer) == ACPI_RESOURCE_NAME_SERIAL_BUS) { buffer_size = acpi_gbl_resource_struct_serial_bus_sizes [aml_resource->common_serial_bus.type] + extra_struct_bytes; } else { buffer_size = acpi_gbl_resource_struct_sizes[resource_index] + extra_struct_bytes; } buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size); *size_needed += buffer_size; ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Type %.2X, AmlLength %.2X InternalLength %.2X\n", acpi_ut_get_resource_type(aml_buffer), acpi_ut_get_descriptor_length(aml_buffer), buffer_size)); /* * Point to the next resource within the AML stream using the length * contained in the resource descriptor header */ aml_buffer += acpi_ut_get_descriptor_length(aml_buffer); } /* Did not find an end_tag resource descriptor */ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); } /******************************************************************************* * * FUNCTION: acpi_rs_get_pci_routing_table_length * * PARAMETERS: package_object - Pointer to the package object * buffer_size_needed - u32 pointer of the size buffer * needed to properly return the * parsed data * * RETURN: Status * * DESCRIPTION: Given a package representing a PCI routing table, this * calculates the size of the corresponding linked list of * descriptions. * ******************************************************************************/ acpi_status acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, acpi_size * buffer_size_needed) { u32 number_of_elements; acpi_size temp_size_needed = 0; union acpi_operand_object **top_object_list; u32 index; union acpi_operand_object *package_element; union acpi_operand_object **sub_object_list; u8 name_found; u32 table_index; ACPI_FUNCTION_TRACE(rs_get_pci_routing_table_length); number_of_elements = package_object->package.count; /* * Calculate the size of the return buffer. * The base size is the number of elements * the sizes of the * structures. Additional space for the strings is added below. * The minus one is to subtract the size of the u8 Source[1] * member because it is added below. * * But each PRT_ENTRY structure has a pointer to a string and * the size of that string must be found. */ top_object_list = package_object->package.elements; for (index = 0; index < number_of_elements; index++) { /* Dereference the sub-package */ package_element = *top_object_list; /* We must have a valid Package object */ if (!package_element || (package_element->common.type != ACPI_TYPE_PACKAGE)) { return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* * The sub_object_list will now point to an array of the * four IRQ elements: Address, Pin, Source and source_index */ sub_object_list = package_element->package.elements; /* Scan the irq_table_elements for the Source Name String */ name_found = FALSE; for (table_index = 0; table_index < 4 && !name_found; table_index++) { if (*sub_object_list && /* Null object allowed */ ((ACPI_TYPE_STRING == (*sub_object_list)->common.type) || ((ACPI_TYPE_LOCAL_REFERENCE == (*sub_object_list)->common.type) && ((*sub_object_list)->reference.class == ACPI_REFCLASS_NAME)))) { name_found = TRUE; } else { /* Look at the next element */ sub_object_list++; } } temp_size_needed += (sizeof(struct acpi_pci_routing_table) - 4); /* Was a String type found? */ if (name_found) { if ((*sub_object_list)->common.type == ACPI_TYPE_STRING) { /* * The length String.Length field does not include the * terminating NULL, add 1 */ temp_size_needed += ((acpi_size) (*sub_object_list)->string. length + 1); } else { temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node); } } else { /* * If no name was found, then this is a NULL, which is * translated as a u32 zero. */ temp_size_needed += sizeof(u32); } /* Round up the size since each element must be aligned */ temp_size_needed = ACPI_ROUND_UP_TO_64BIT(temp_size_needed); /* Point to the next union acpi_operand_object */ top_object_list++; } /* * Add an extra element to the end of the list, essentially a * NULL terminator */ *buffer_size_needed = temp_size_needed + sizeof(struct acpi_pci_routing_table); return_ACPI_STATUS(AE_OK); }
gpl-2.0
Tof37/Kernel-ES209RA-3.0.8
fs/attr.c
2335
6938
/* * linux/fs/attr.c * * Copyright (C) 1991, 1992 Linus Torvalds * changes by Thomas Schoebel-Theuer */ #include <linux/module.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/capability.h> #include <linux/fsnotify.h> #include <linux/fcntl.h> #include <linux/security.h> /** * inode_change_ok - check if attribute changes to an inode are allowed * @inode: inode to check * @attr: attributes to change * * Check if we are allowed to change the attributes contained in @attr * in the given inode. This includes the normal unix access permission * checks, as well as checks for rlimits and others. * * Should be called as the first thing in ->setattr implementations, * possibly after taking additional locks. */ int inode_change_ok(const struct inode *inode, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; /* * First check size constraints. These can't be overriden using * ATTR_FORCE. */ if (ia_valid & ATTR_SIZE) { int error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; } /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) return 0; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && (current_fsuid() != inode->i_uid || attr->ia_uid != inode->i_uid) && !capable(CAP_CHOWN)) return -EPERM; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (current_fsuid() != inode->i_uid || (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid)) && !capable(CAP_CHOWN)) return -EPERM; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { if (!inode_owner_or_capable(inode)) return -EPERM; /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !capable(CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!inode_owner_or_capable(inode)) return -EPERM; } return 0; } EXPORT_SYMBOL(inode_change_ok); /** * inode_newsize_ok - may this inode be truncated to a given size * @inode: the inode to be truncated * @offset: the new size to assign to the inode * @Returns: 0 on success, -ve errno on failure * * inode_newsize_ok must be called with i_mutex held. * * inode_newsize_ok will check filesystem limits and ulimits to check that the * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ * when necessary. Caller must not proceed with inode size change if failure is * returned. @inode must be a file (not directory), with appropriate * permissions to allow truncate (inode_newsize_ok does NOT check these * conditions). */ int inode_newsize_ok(const struct inode *inode, loff_t offset) { if (inode->i_size < offset) { unsigned long limit; limit = rlimit(RLIMIT_FSIZE); if (limit != RLIM_INFINITY && offset > limit) goto out_sig; if (offset > inode->i_sb->s_maxbytes) goto out_big; } else { /* * truncation of in-use swapfiles is disallowed - it would * cause subsequent swapout to scribble on the now-freed * blocks. */ if (IS_SWAPFILE(inode)) return -ETXTBSY; } return 0; out_sig: send_sig(SIGXFSZ, current, 0); out_big: return -EFBIG; } EXPORT_SYMBOL(inode_newsize_ok); /** * setattr_copy - copy simple metadata updates into the generic inode * @inode: the inode to be updated * @attr: the new attributes * * setattr_copy must be called with i_mutex held. * * setattr_copy updates the inode's metadata with that specified * in attr. Noticeably missing is inode size update, which is more complex * as it requires pagecache updates. * * The inode is not marked as dirty after this operation. The rationale is * that for "simple" filesystems, the struct inode is the inode storage. * The caller is free to mark the inode dirty afterwards if needed. */ void setattr_copy(struct inode *inode, const struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MTIME) inode->i_mtime = timespec_trunc(attr->ia_mtime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_CTIME) inode->i_ctime = timespec_trunc(attr->ia_ctime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; inode->i_mode = mode; } } EXPORT_SYMBOL(setattr_copy); int notify_change(struct dentry * dentry, struct iattr * attr) { struct inode *inode = dentry->d_inode; mode_t mode = inode->i_mode; int error; struct timespec now; unsigned int ia_valid = attr->ia_valid; if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) { if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; } if ((ia_valid & ATTR_MODE)) { mode_t amode = attr->ia_mode; /* Flag setting protected by i_mutex */ if (is_sxid(amode)) inode->i_flags &= ~S_NOSEC; } now = current_fs_time(inode->i_sb); attr->ia_ctime = now; if (!(ia_valid & ATTR_ATIME_SET)) attr->ia_atime = now; if (!(ia_valid & ATTR_MTIME_SET)) attr->ia_mtime = now; if (ia_valid & ATTR_KILL_PRIV) { attr->ia_valid &= ~ATTR_KILL_PRIV; ia_valid &= ~ATTR_KILL_PRIV; error = security_inode_need_killpriv(dentry); if (error > 0) error = security_inode_killpriv(dentry); if (error) return error; } /* * We now pass ATTR_KILL_S*ID to the lower level setattr function so * that the function has the ability to reinterpret a mode change * that's due to these bits. This adds an implicit restriction that * no function will ever call notify_change with both ATTR_MODE and * ATTR_KILL_S*ID set. */ if ((ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) && (ia_valid & ATTR_MODE)) BUG(); if (ia_valid & ATTR_KILL_SUID) { if (mode & S_ISUID) { ia_valid = attr->ia_valid |= ATTR_MODE; attr->ia_mode = (inode->i_mode & ~S_ISUID); } } if (ia_valid & ATTR_KILL_SGID) { if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { if (!(ia_valid & ATTR_MODE)) { ia_valid = attr->ia_valid |= ATTR_MODE; attr->ia_mode = inode->i_mode; } attr->ia_mode &= ~S_ISGID; } } if (!(attr->ia_valid & ~(ATTR_KILL_SUID | ATTR_KILL_SGID))) return 0; error = security_inode_setattr(dentry, attr); if (error) return error; if (ia_valid & ATTR_SIZE) down_write(&dentry->d_inode->i_alloc_sem); if (inode->i_op->setattr) error = inode->i_op->setattr(dentry, attr); else error = simple_setattr(dentry, attr); if (ia_valid & ATTR_SIZE) up_write(&dentry->d_inode->i_alloc_sem); if (!error) fsnotify_change(dentry, ia_valid); return error; } EXPORT_SYMBOL(notify_change);
gpl-2.0
djvoleur/V_925P_BOF7
drivers/irqchip/irq-mxs.c
2591
3390
/* * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/stmp_device.h> #include <asm/exception.h> #include "irqchip.h" #define HW_ICOLL_VECTOR 0x0000 #define HW_ICOLL_LEVELACK 0x0010 #define HW_ICOLL_CTRL 0x0020 #define HW_ICOLL_STAT_OFFSET 0x0070 #define HW_ICOLL_INTERRUPTn_SET(n) (0x0124 + (n) * 0x10) #define HW_ICOLL_INTERRUPTn_CLR(n) (0x0128 + (n) * 0x10) #define BM_ICOLL_INTERRUPTn_ENABLE 0x00000004 #define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 0x1 #define ICOLL_NUM_IRQS 128 static void __iomem *icoll_base; static struct irq_domain *icoll_domain; static void icoll_ack_irq(struct irq_data *d) { /* * The Interrupt Collector is able to prioritize irqs. * Currently only level 0 is used. So acking can use * BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally. */ __raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0, icoll_base + HW_ICOLL_LEVELACK); } static void icoll_mask_irq(struct irq_data *d) { __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, icoll_base + HW_ICOLL_INTERRUPTn_CLR(d->hwirq)); } static void icoll_unmask_irq(struct irq_data *d) { __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, icoll_base + HW_ICOLL_INTERRUPTn_SET(d->hwirq)); } static struct irq_chip mxs_icoll_chip = { .irq_ack = icoll_ack_irq, .irq_mask = icoll_mask_irq, .irq_unmask = icoll_unmask_irq, }; asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) { u32 irqnr; irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET); __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR); irqnr = irq_find_mapping(icoll_domain, irqnr); handle_IRQ(irqnr, regs); } static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq); set_irq_flags(virq, IRQF_VALID); return 0; } static struct irq_domain_ops icoll_irq_domain_ops = { .map = icoll_irq_domain_map, .xlate = irq_domain_xlate_onecell, }; static void __init icoll_of_init(struct device_node *np, struct device_node *interrupt_parent) { icoll_base = of_iomap(np, 0); WARN_ON(!icoll_base); /* * Interrupt Collector reset, which initializes the priority * for each irq to level 0. */ stmp_reset_block(icoll_base + HW_ICOLL_CTRL); icoll_domain = irq_domain_add_linear(np, ICOLL_NUM_IRQS, &icoll_irq_domain_ops, NULL); WARN_ON(!icoll_domain); } IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init);
gpl-2.0
vikrant82/t320_kernel
drivers/scsi/libsas/sas_expander.c
2847
55649
/* * Serial Attached SCSI (SAS) Expander discovery and configuration * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/scatterlist.h> #include <linux/blkdev.h> #include <linux/slab.h> #include "sas_internal.h" #include <scsi/sas_ata.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "../scsi_sas_internal.h" static int sas_discover_expander(struct domain_device *dev); static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr); static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include); static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); /* ---------- SMP task management ---------- */ static void smp_task_timedout(unsigned long _task) { struct sas_task *task = (void *) _task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); complete(&task->completion); } static void smp_task_done(struct sas_task *task) { if (!del_timer(&task->timer)) return; complete(&task->completion); } /* Give it some long enough timeout. In seconds. */ #define SMP_TIMEOUT 10 static int smp_execute_task(struct domain_device *dev, void *req, int req_size, void *resp, int resp_size) { int res, retry; struct sas_task *task = NULL; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); mutex_lock(&dev->ex_dev.cmd_mutex); for (retry = 0; retry < 3; retry++) { if (test_bit(SAS_DEV_GONE, &dev->state)) { res = -ECOMM; break; } task = sas_alloc_task(GFP_KERNEL); if (!task) { res = -ENOMEM; break; } task->dev = dev; task->task_proto = dev->tproto; sg_init_one(&task->smp_task.smp_req, req, req_size); sg_init_one(&task->smp_task.smp_resp, resp, resp_size); task->task_done = smp_task_done; task->timer.data = (unsigned long) task; task->timer.function = smp_task_timedout; task->timer.expires = jiffies + SMP_TIMEOUT*HZ; add_timer(&task->timer); res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); if (res) { del_timer(&task->timer); SAS_DPRINTK("executing SMP task failed:%d\n", res); break; } wait_for_completion(&task->completion); res = -ECOMM; if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { SAS_DPRINTK("smp task timed out or aborted\n"); i->dft->lldd_abort_task(task); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { SAS_DPRINTK("SMP task aborted and not done\n"); break; } } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAM_STAT_GOOD) { res = 0; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_UNDERRUN) { /* no error, but return the number of bytes of * underrun */ res = task->task_status.residual; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_OVERRUN) { res = -EMSGSIZE; break; } if (task->task_status.resp == SAS_TASK_UNDELIVERED && task->task_status.stat == SAS_DEVICE_UNKNOWN) break; else { SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " "status 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat); sas_free_task(task); task = NULL; } } mutex_unlock(&dev->ex_dev.cmd_mutex); BUG_ON(retry == 3 && task != NULL); sas_free_task(task); return res; } /* ---------- Allocations ---------- */ static inline void *alloc_smp_req(int size) { u8 *p = kzalloc(size, GFP_KERNEL); if (p) p[0] = SMP_REQUEST; return p; } static inline void *alloc_smp_resp(int size) { return kzalloc(size, GFP_KERNEL); } static char sas_route_char(struct domain_device *dev, struct ex_phy *phy) { switch (phy->routing_attr) { case TABLE_ROUTING: if (dev->ex_dev.t2t_supp) return 'U'; else return 'T'; case DIRECT_ROUTING: return 'D'; case SUBTRACTIVE_ROUTING: return 'S'; default: return '?'; } } static enum sas_dev_type to_dev_type(struct discover_resp *dr) { /* This is detecting a failure to transmit initial dev to host * FIS as described in section J.5 of sas-2 r16 */ if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev && dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) return SATA_PENDING; else return dr->attached_dev_type; } static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) { enum sas_dev_type dev_type; enum sas_linkrate linkrate; u8 sas_addr[SAS_ADDR_SIZE]; struct smp_resp *resp = rsp; struct discover_resp *dr = &resp->disc; struct sas_ha_struct *ha = dev->port->ha; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; struct sas_rphy *rphy = dev->rphy; bool new_phy = !phy->phy; char *type; if (new_phy) { if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))) return; phy->phy = sas_phy_alloc(&rphy->dev, phy_id); /* FIXME: error_handling */ BUG_ON(!phy->phy); } switch (resp->result) { case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; break; default: phy->phy_state = PHY_NOT_PRESENT; break; case SMP_RESP_FUNC_ACC: phy->phy_state = PHY_EMPTY; /* do not know yet */ break; } /* check if anything important changed to squelch debug */ dev_type = phy->attached_dev_type; linkrate = phy->linkrate; memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); phy->attached_dev_type = to_dev_type(dr); if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) goto out; phy->phy_id = phy_id; phy->linkrate = dr->linkrate; phy->attached_sata_host = dr->attached_sata_host; phy->attached_sata_dev = dr->attached_sata_dev; phy->attached_sata_ps = dr->attached_sata_ps; phy->attached_iproto = dr->iproto << 1; phy->attached_tproto = dr->tproto << 1; /* help some expanders that fail to zero sas_address in the 'no * device' case */ if (phy->attached_dev_type == NO_DEVICE || phy->linkrate < SAS_LINK_RATE_1_5_GBPS) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); phy->attached_phy_id = dr->attached_phy_id; phy->phy_change_count = dr->change_count; phy->routing_attr = dr->routing_attr; phy->virtual = dr->virtual; phy->last_da_index = -1; phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr); phy->phy->identify.device_type = dr->attached_dev_type; phy->phy->identify.initiator_port_protocols = phy->attached_iproto; phy->phy->identify.target_port_protocols = phy->attached_tproto; if (!phy->attached_tproto && dr->attached_sata_dev) phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; phy->phy->identify.phy_identifier = phy_id; phy->phy->minimum_linkrate_hw = dr->hmin_linkrate; phy->phy->maximum_linkrate_hw = dr->hmax_linkrate; phy->phy->minimum_linkrate = dr->pmin_linkrate; phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; if (new_phy) if (sas_phy_add(phy->phy)) { sas_phy_free(phy->phy); return; } out: switch (phy->attached_dev_type) { case SATA_PENDING: type = "stp pending"; break; case NO_DEVICE: type = "no device"; break; case SAS_END_DEV: if (phy->attached_iproto) { if (phy->attached_tproto) type = "host+target"; else type = "host"; } else { if (dr->attached_sata_dev) type = "stp"; else type = "ssp"; } break; case EDGE_DEV: case FANOUT_DEV: type = "smp"; break; default: type = "unknown"; } /* this routine is polled by libata error recovery so filter * unimportant messages */ if (new_phy || phy->attached_dev_type != dev_type || phy->linkrate != linkrate || SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr)) /* pass */; else return; /* if the attached device type changed and ata_eh is active, * make sure we run revalidation when eh completes (see: * sas_enable_revalidation) */ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending); SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "", SAS_ADDR(dev->sas_addr), phy->phy_id, sas_route_char(dev, phy), phy->linkrate, SAS_ADDR(phy->attached_sas_addr), type); } /* check if we have an existing attached ata device on this expander phy */ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id) { struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id]; struct domain_device *dev; struct sas_rphy *rphy; if (!ex_phy->port) return NULL; rphy = ex_phy->port->rphy; if (!rphy) return NULL; dev = sas_find_dev_by_rphy(rphy); if (dev && dev_is_sata(dev)) return dev; return NULL; } #define DISCOVER_REQ_SIZE 16 #define DISCOVER_RESP_SIZE 56 static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req, u8 *disc_resp, int single) { struct discover_resp *dr; int res; disc_req[9] = single; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) return res; dr = &((struct smp_resp *)disc_resp)->disc; if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) { sas_printk("Found loopback topology, just ignore it!\n"); return 0; } sas_set_ex_phy(dev, single, disc_resp); return 0; } int sas_ex_phy_discover(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int res = 0; u8 *disc_req; u8 *disc_resp; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); if (!disc_resp) { kfree(disc_req); return -ENOMEM; } disc_req[1] = SMP_DISCOVER; if (0 <= single && single < ex->num_phys) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single); } else { int i; for (i = 0; i < ex->num_phys; i++) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, i); if (res) goto out_err; } } out_err: kfree(disc_resp); kfree(disc_req); return res; } static int sas_expander_discover(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int res = -ENOMEM; ex->ex_phy = kzalloc(sizeof(*ex->ex_phy)*ex->num_phys, GFP_KERNEL); if (!ex->ex_phy) return -ENOMEM; res = sas_ex_phy_discover(dev, -1); if (res) goto out_err; return 0; out_err: kfree(ex->ex_phy); ex->ex_phy = NULL; return res; } #define MAX_EXPANDER_PHYS 128 static void ex_assign_report_general(struct domain_device *dev, struct smp_resp *resp) { struct report_general_resp *rg = &resp->rg; dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count); dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes); dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS); dev->ex_dev.t2t_supp = rg->t2t_supp; dev->ex_dev.conf_route_table = rg->conf_route_table; dev->ex_dev.configuring = rg->configuring; memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8); } #define RG_REQ_SIZE 8 #define RG_RESP_SIZE 32 static int sas_ex_general(struct domain_device *dev) { u8 *rg_req; struct smp_resp *rg_resp; int res; int i; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; for (i = 0; i < 5; i++) { res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) { SAS_DPRINTK("RG to ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), rg_resp->result); res = rg_resp->result; goto out; } ex_assign_report_general(dev, rg_resp); if (dev->ex_dev.configuring) { SAS_DPRINTK("RG: ex %llx self-configuring...\n", SAS_ADDR(dev->sas_addr)); schedule_timeout_interruptible(5*HZ); } else break; } out: kfree(rg_req); kfree(rg_resp); return res; } static void ex_assign_manuf_info(struct domain_device *dev, void *_mi_resp) { u8 *mi_resp = _mi_resp; struct sas_rphy *rphy = dev->rphy; struct sas_expander_device *edev = rphy_to_expander_device(rphy); memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN); memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN); memcpy(edev->product_rev, mi_resp + 36, SAS_EXPANDER_PRODUCT_REV_LEN); if (mi_resp[8] & 1) { memcpy(edev->component_vendor_id, mi_resp + 40, SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); edev->component_id = mi_resp[48] << 8 | mi_resp[49]; edev->component_revision_id = mi_resp[50]; } } #define MI_REQ_SIZE 8 #define MI_RESP_SIZE 64 static int sas_ex_manuf_info(struct domain_device *dev) { u8 *mi_req; u8 *mi_resp; int res; mi_req = alloc_smp_req(MI_REQ_SIZE); if (!mi_req) return -ENOMEM; mi_resp = alloc_smp_resp(MI_RESP_SIZE); if (!mi_resp) { kfree(mi_req); return -ENOMEM; } mi_req[1] = SMP_REPORT_MANUF_INFO; res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE); if (res) { SAS_DPRINTK("MI: ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), mi_resp[2]); goto out; } ex_assign_manuf_info(dev, mi_resp); out: kfree(mi_req); kfree(mi_resp); return res; } #define PC_REQ_SIZE 44 #define PC_RESP_SIZE 8 int sas_smp_phy_control(struct domain_device *dev, int phy_id, enum phy_func phy_func, struct sas_phy_linkrates *rates) { u8 *pc_req; u8 *pc_resp; int res; pc_req = alloc_smp_req(PC_REQ_SIZE); if (!pc_req) return -ENOMEM; pc_resp = alloc_smp_resp(PC_RESP_SIZE); if (!pc_resp) { kfree(pc_req); return -ENOMEM; } pc_req[1] = SMP_PHY_CONTROL; pc_req[9] = phy_id; pc_req[10]= phy_func; if (rates) { pc_req[32] = rates->minimum_linkrate << 4; pc_req[33] = rates->maximum_linkrate << 4; } res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); kfree(pc_resp); kfree(pc_req); return res; } static void sas_ex_disable_phy(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL); phy->linkrate = SAS_PHY_DISABLED; } static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr)) sas_ex_disable_phy(dev, i); } } static int sas_dev_present_in_domain(struct asd_sas_port *port, u8 *sas_addr) { struct domain_device *dev; if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr)) return 1; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr)) return 1; } return 0; } #define RPEL_REQ_SIZE 16 #define RPEL_RESP_SIZE 32 int sas_smp_get_phy_events(struct sas_phy *phy) { int res; u8 *req; u8 *resp; struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); struct domain_device *dev = sas_find_dev_by_rphy(rphy); req = alloc_smp_req(RPEL_REQ_SIZE); if (!req) return -ENOMEM; resp = alloc_smp_resp(RPEL_RESP_SIZE); if (!resp) { kfree(req); return -ENOMEM; } req[1] = SMP_REPORT_PHY_ERR_LOG; req[9] = phy->number; res = smp_execute_task(dev, req, RPEL_REQ_SIZE, resp, RPEL_RESP_SIZE); if (!res) goto out; phy->invalid_dword_count = scsi_to_u32(&resp[12]); phy->running_disparity_error_count = scsi_to_u32(&resp[16]); phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]); phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); out: kfree(resp); return res; } #ifdef CONFIG_SCSI_SAS_ATA #define RPS_REQ_SIZE 16 #define RPS_RESP_SIZE 60 int sas_get_report_phy_sata(struct domain_device *dev, int phy_id, struct smp_resp *rps_resp) { int res; u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE); u8 *resp = (u8 *)rps_resp; if (!rps_req) return -ENOMEM; rps_req[1] = SMP_REPORT_PHY_SATA; rps_req[9] = phy_id; res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE, rps_resp, RPS_RESP_SIZE); /* 0x34 is the FIS type for the D2H fis. There's a potential * standards cockup here. sas-2 explicitly specifies the FIS * should be encoded so that FIS type is in resp[24]. * However, some expanders endian reverse this. Undo the * reversal here */ if (!res && resp[27] == 0x34 && resp[24] != 0x34) { int i; for (i = 0; i < 5; i++) { int j = 24 + (i*4); u8 a, b; a = resp[j + 0]; b = resp[j + 1]; resp[j + 0] = resp[j + 3]; resp[j + 1] = resp[j + 2]; resp[j + 2] = b; resp[j + 3] = a; } } kfree(rps_req); return res; } #endif static void sas_ex_get_linkrate(struct domain_device *parent, struct domain_device *child, struct ex_phy *parent_phy) { struct expander_device *parent_ex = &parent->ex_dev; struct sas_port *port; int i; child->pathways = 0; port = parent_phy->port; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *phy = &parent_ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr)) { child->min_linkrate = min(parent->min_linkrate, phy->linkrate); child->max_linkrate = max(parent->max_linkrate, phy->linkrate); child->pathways++; sas_port_add_phy(port, phy->phy); } } child->linkrate = min(parent_phy->linkrate, child->max_linkrate); child->pathways = min(child->pathways, parent->pathways); } static struct domain_device *sas_ex_discover_end_dev( struct domain_device *parent, int phy_id) { struct expander_device *parent_ex = &parent->ex_dev; struct ex_phy *phy = &parent_ex->ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; int res; if (phy->attached_sata_host || phy->attached_sata_ps) return NULL; child = sas_alloc_device(); if (!child) return NULL; kref_get(&parent->kref); child->parent = parent; child->port = parent->port; child->iproto = phy->attached_iproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); if (!phy->port) { phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); if (unlikely(!phy->port)) goto out_err; if (unlikely(sas_port_add(phy->port) != 0)) { sas_port_free(phy->port); goto out_err; } } sas_ex_get_linkrate(parent, child, phy); sas_device_set_phy(child, phy->port); #ifdef CONFIG_SCSI_SAS_ATA if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) { res = sas_get_ata_info(child, phy); if (res) goto out_free; sas_init_dev(child); res = sas_ata_init(child); if (res) goto out_free; rphy = sas_end_device_alloc(phy->port); if (!rphy) goto out_free; child->rphy = rphy; get_device(&rphy->dev); list_add_tail(&child->disco_list_node, &parent->port->disco_list); res = sas_discover_sata(child); if (res) { SAS_DPRINTK("sas_discover_sata() for device %16llx at " "%016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else #endif if (phy->attached_tproto & SAS_PROTOCOL_SSP) { child->dev_type = SAS_END_DEV; rphy = sas_end_device_alloc(phy->port); /* FIXME: error handling */ if (unlikely(!rphy)) goto out_free; child->tproto = phy->attached_tproto; sas_init_dev(child); child->rphy = rphy; get_device(&rphy->dev); sas_fill_in_rphy(child, rphy); list_add_tail(&child->disco_list_node, &parent->port->disco_list); res = sas_discover_end_dev(child); if (res) { SAS_DPRINTK("sas_discover_end_dev() for device %16llx " "at %016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else { SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", phy->attached_tproto, SAS_ADDR(parent->sas_addr), phy_id); goto out_free; } list_add_tail(&child->siblings, &parent_ex->children); return child; out_list_del: sas_rphy_free(child->rphy); list_del(&child->disco_list_node); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); out_free: sas_port_delete(phy->port); out_err: phy->port = NULL; sas_put_device(child); return NULL; } /* See if this phy is part of a wide port */ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id) { struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; int i; for (i = 0; i < parent->ex_dev.num_phys; i++) { struct ex_phy *ephy = &parent->ex_dev.ex_phy[i]; if (ephy == phy) continue; if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, SAS_ADDR_SIZE) && ephy->port) { sas_port_add_phy(ephy->port, phy->phy); phy->port = ephy->port; phy->phy_state = PHY_DEVICE_DISCOVERED; return 0; } } return -ENODEV; } static struct domain_device *sas_ex_discover_expander( struct domain_device *parent, int phy_id) { struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy); struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; struct sas_expander_device *edev; struct asd_sas_port *port; int res; if (phy->routing_attr == DIRECT_ROUTING) { SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not " "allowed\n", SAS_ADDR(parent->sas_addr), phy_id, SAS_ADDR(phy->attached_sas_addr), phy->attached_phy_id); return NULL; } child = sas_alloc_device(); if (!child) return NULL; phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); /* FIXME: better error handling */ BUG_ON(sas_port_add(phy->port) != 0); switch (phy->attached_dev_type) { case EDGE_DEV: rphy = sas_expander_alloc(phy->port, SAS_EDGE_EXPANDER_DEVICE); break; case FANOUT_DEV: rphy = sas_expander_alloc(phy->port, SAS_FANOUT_EXPANDER_DEVICE); break; default: rphy = NULL; /* shut gcc up */ BUG(); } port = parent->port; child->rphy = rphy; get_device(&rphy->dev); edev = rphy_to_expander_device(rphy); child->dev_type = phy->attached_dev_type; kref_get(&parent->kref); child->parent = parent; child->port = port; child->iproto = phy->attached_iproto; child->tproto = phy->attached_tproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); sas_ex_get_linkrate(parent, child, phy); edev->level = parent_ex->level + 1; parent->port->disc.max_level = max(parent->port->disc.max_level, edev->level); sas_init_dev(child); sas_fill_in_rphy(child, rphy); sas_rphy_add(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_add_tail(&child->dev_list_node, &parent->port->dev_list); spin_unlock_irq(&parent->port->dev_list_lock); res = sas_discover_expander(child); if (res) { sas_rphy_delete(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); sas_put_device(child); return NULL; } list_add_tail(&child->siblings, &parent->ex_dev.children); return child; } static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; struct domain_device *child = NULL; int res = 0; /* Phy state */ if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) { if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL)) res = sas_ex_phy_discover(dev, phy_id); if (res) return res; } /* Parent and domain coherency */ if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->port->sas_addr))) { sas_add_parent_port(dev, phy_id); return 0; } if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->parent->sas_addr))) { sas_add_parent_port(dev, phy_id); if (ex_phy->routing_attr == TABLE_ROUTING) sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); return 0; } if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) sas_ex_disable_port(dev, ex_phy->attached_sas_addr); if (ex_phy->attached_dev_type == NO_DEVICE) { if (ex_phy->routing_attr == DIRECT_ROUTING) { memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_configure_routing(dev, ex_phy->attached_sas_addr); } return 0; } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; if (ex_phy->attached_dev_type != SAS_END_DEV && ex_phy->attached_dev_type != FANOUT_DEV && ex_phy->attached_dev_type != EDGE_DEV && ex_phy->attached_dev_type != SATA_PENDING) { SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " "phy 0x%x\n", ex_phy->attached_dev_type, SAS_ADDR(dev->sas_addr), phy_id); return 0; } res = sas_configure_routing(dev, ex_phy->attached_sas_addr); if (res) { SAS_DPRINTK("configure routing for dev %016llx " "reported 0x%x. Forgotten\n", SAS_ADDR(ex_phy->attached_sas_addr), res); sas_disable_routing(dev, ex_phy->attached_sas_addr); return res; } res = sas_ex_join_wide_port(dev, phy_id); if (!res) { SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", phy_id, SAS_ADDR(ex_phy->attached_sas_addr)); return res; } switch (ex_phy->attached_dev_type) { case SAS_END_DEV: case SATA_PENDING: child = sas_ex_discover_end_dev(dev, phy_id); break; case FANOUT_DEV: if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { SAS_DPRINTK("second fanout expander %016llx phy 0x%x " "attached to ex %016llx phy 0x%x\n", SAS_ADDR(ex_phy->attached_sas_addr), ex_phy->attached_phy_id, SAS_ADDR(dev->sas_addr), phy_id); sas_ex_disable_phy(dev, phy_id); break; } else memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); /* fallthrough */ case EDGE_DEV: child = sas_ex_discover_expander(dev, phy_id); break; default: break; } if (child) { int i; for (i = 0; i < ex->num_phys; i++) { if (ex->ex_phy[i].phy_state == PHY_VACANT || ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) continue; /* * Due to races, the phy might not get added to the * wide port, so we add the phy to the wide port here. */ if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == SAS_ADDR(child->sas_addr)) { ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; res = sas_ex_join_wide_port(dev, i); if (!res) SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); } } } return res; } static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == EDGE_DEV || phy->attached_dev_type == FANOUT_DEV) && phy->routing_attr == SUBTRACTIVE_ROUTING) { memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); return 1; } } return 0; } static int sas_check_level_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child; u8 sub_addr[8] = {0, }; list_for_each_entry(child, &ex->children, siblings) { if (child->dev_type != EDGE_DEV && child->dev_type != FANOUT_DEV) continue; if (sub_addr[0] == 0) { sas_find_sub_addr(child, sub_addr); continue; } else { u8 s2[8]; if (sas_find_sub_addr(child, s2) && (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) { SAS_DPRINTK("ex %016llx->%016llx-?->%016llx " "diverges from subtractive " "boundary %016llx\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(child->sas_addr), SAS_ADDR(s2), SAS_ADDR(sub_addr)); sas_ex_disable_port(child, s2); } } } return 0; } /** * sas_ex_discover_devices -- discover devices attached to this expander * dev: pointer to the expander domain device * single: if you want to do a single phy, else set to -1; * * Configure this expander for use with its devices and register the * devices of this expander. */ static int sas_ex_discover_devices(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int i = 0, end = ex->num_phys; int res = 0; if (0 <= single && single < end) { i = single; end = i+1; } for ( ; i < end; i++) { struct ex_phy *ex_phy = &ex->ex_phy[i]; if (ex_phy->phy_state == PHY_VACANT || ex_phy->phy_state == PHY_NOT_PRESENT || ex_phy->phy_state == PHY_DEVICE_DISCOVERED) continue; switch (ex_phy->linkrate) { case SAS_PHY_DISABLED: case SAS_PHY_RESET_PROBLEM: case SAS_SATA_PORT_SELECTOR: continue; default: res = sas_ex_discover_dev(dev, i); if (res) break; continue; } } if (!res) sas_check_level_subtractive_boundary(dev); return res; } static int sas_check_ex_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int i; u8 *sub_sas_addr = NULL; if (dev->dev_type != EDGE_DEV) return 0; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == FANOUT_DEV || phy->attached_dev_type == EDGE_DEV) && phy->routing_attr == SUBTRACTIVE_ROUTING) { if (!sub_sas_addr) sub_sas_addr = &phy->attached_sas_addr[0]; else if (SAS_ADDR(sub_sas_addr) != SAS_ADDR(phy->attached_sas_addr)) { SAS_DPRINTK("ex %016llx phy 0x%x " "diverges(%016llx) on subtractive " "boundary(%016llx). Disabled\n", SAS_ADDR(dev->sas_addr), i, SAS_ADDR(phy->attached_sas_addr), SAS_ADDR(sub_sas_addr)); sas_ex_disable_phy(dev, i); } } } return 0; } static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { static const char *ex_type[] = { [EDGE_DEV] = "edge", [FANOUT_DEV] = "fanout", }; struct domain_device *parent = child->parent; sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx " "phy 0x%x has %c:%c routing link!\n", ex_type[parent->dev_type], SAS_ADDR(parent->sas_addr), parent_phy->phy_id, ex_type[child->dev_type], SAS_ADDR(child->sas_addr), child_phy->phy_id, sas_route_char(parent, parent_phy), sas_route_char(child, child_phy)); } static int sas_check_eeds(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { int res = 0; struct domain_device *parent = child->parent; if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx " "phy S:0x%x, while there is a fanout ex %016llx\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id, SAS_ADDR(parent->port->disc.fanout_sas_addr)); } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) { memcpy(parent->port->disc.eeds_a, parent->sas_addr, SAS_ADDR_SIZE); memcpy(parent->port->disc.eeds_b, child->sas_addr, SAS_ADDR_SIZE); } else if (((SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(child->sas_addr))) && ((SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(child->sas_addr)))) ; else { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx " "phy 0x%x link forms a third EEDS!\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id); } return res; } /* Here we spill over 80 columns. It is intentional. */ static int sas_check_parent_topology(struct domain_device *child) { struct expander_device *child_ex = &child->ex_dev; struct expander_device *parent_ex; int i; int res = 0; if (!child->parent) return 0; if (child->parent->dev_type != EDGE_DEV && child->parent->dev_type != FANOUT_DEV) return 0; parent_ex = &child->parent->ex_dev; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *parent_phy = &parent_ex->ex_phy[i]; struct ex_phy *child_phy; if (parent_phy->phy_state == PHY_VACANT || parent_phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr)) continue; child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; switch (child->parent->dev_type) { case EDGE_DEV: if (child->dev_type == FANOUT_DEV) { if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) { if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) { res = sas_check_eeds(child, parent_phy, child_phy); } else if (child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == TABLE_ROUTING) { if (child_phy->routing_attr == SUBTRACTIVE_ROUTING || (child_phy->routing_attr == TABLE_ROUTING && child_ex->t2t_supp && parent_ex->t2t_supp)) { /* All good */; } else { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } break; case FANOUT_DEV: if (parent_phy->routing_attr != TABLE_ROUTING || child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } break; default: break; } } return res; } #define RRI_REQ_SIZE 16 #define RRI_RESP_SIZE 44 static int sas_configure_present(struct domain_device *dev, int phy_id, u8 *sas_addr, int *index, int *present) { int i, res = 0; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; u8 *rri_req; u8 *rri_resp; *present = 0; *index = 0; rri_req = alloc_smp_req(RRI_REQ_SIZE); if (!rri_req) return -ENOMEM; rri_resp = alloc_smp_resp(RRI_RESP_SIZE); if (!rri_resp) { kfree(rri_req); return -ENOMEM; } rri_req[1] = SMP_REPORT_ROUTE_INFO; rri_req[9] = phy_id; for (i = 0; i < ex->max_route_indexes ; i++) { *(__be16 *)(rri_req+6) = cpu_to_be16(i); res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp, RRI_RESP_SIZE); if (res) goto out; res = rri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx " "phy 0x%x index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, i); goto out; } else if (res != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " "result 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), phy_id, i, res); goto out; } if (SAS_ADDR(sas_addr) != 0) { if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) { *index = i; if ((rri_resp[12] & 0x80) == 0x80) *present = 0; else *present = 1; goto out; } else if (SAS_ADDR(rri_resp+16) == 0) { *index = i; *present = 0; goto out; } } else if (SAS_ADDR(rri_resp+16) == 0 && phy->last_da_index < i) { phy->last_da_index = i; *index = i; *present = 0; goto out; } } res = -1; out: kfree(rri_req); kfree(rri_resp); return res; } #define CRI_REQ_SIZE 44 #define CRI_RESP_SIZE 8 static int sas_configure_set(struct domain_device *dev, int phy_id, u8 *sas_addr, int index, int include) { int res; u8 *cri_req; u8 *cri_resp; cri_req = alloc_smp_req(CRI_REQ_SIZE); if (!cri_req) return -ENOMEM; cri_resp = alloc_smp_resp(CRI_RESP_SIZE); if (!cri_resp) { kfree(cri_req); return -ENOMEM; } cri_req[1] = SMP_CONF_ROUTE_INFO; *(__be16 *)(cri_req+6) = cpu_to_be16(index); cri_req[9] = phy_id; if (SAS_ADDR(sas_addr) == 0 || !include) cri_req[12] |= 0x80; memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE); res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp, CRI_RESP_SIZE); if (res) goto out; res = cri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x " "index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, index); } out: kfree(cri_req); kfree(cri_resp); return res; } static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include) { int index; int present; int res; res = sas_configure_present(dev, phy_id, sas_addr, &index, &present); if (res) return res; if (include ^ present) return sas_configure_set(dev, phy_id, sas_addr, index,include); return res; } /** * sas_configure_parent -- configure routing table of parent * parent: parent expander * child: child expander * sas_addr: SAS port identifier of device directly attached to child */ static int sas_configure_parent(struct domain_device *parent, struct domain_device *child, u8 *sas_addr, int include) { struct expander_device *ex_parent = &parent->ex_dev; int res = 0; int i; if (parent->parent) { res = sas_configure_parent(parent->parent, parent, sas_addr, include); if (res) return res; } if (ex_parent->conf_route_table == 0) { SAS_DPRINTK("ex %016llx has self-configuring routing table\n", SAS_ADDR(parent->sas_addr)); return 0; } for (i = 0; i < ex_parent->num_phys; i++) { struct ex_phy *phy = &ex_parent->ex_phy[i]; if ((phy->routing_attr == TABLE_ROUTING) && (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr))) { res = sas_configure_phy(parent, i, sas_addr, include); if (res) return res; } } return res; } /** * sas_configure_routing -- configure routing * dev: expander device * sas_addr: port identifier of device directly attached to the expander device */ static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 1); return 0; } static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 0); return 0; } /** * sas_discover_expander -- expander discovery * @ex: pointer to expander domain device * * See comment in sas_discover_sata(). */ static int sas_discover_expander(struct domain_device *dev) { int res; res = sas_notify_lldd_dev_found(dev); if (res) return res; res = sas_ex_general(dev); if (res) goto out_err; res = sas_ex_manuf_info(dev); if (res) goto out_err; res = sas_expander_discover(dev); if (res) { SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n", SAS_ADDR(dev->sas_addr), res); goto out_err; } sas_check_ex_subtractive_boundary(dev); res = sas_check_parent_topology(dev); if (res) goto out_err; return 0; out_err: sas_notify_lldd_dev_gone(dev); return res; } static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) { int res = 0; struct domain_device *dev; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); if (level == ex->level) res = sas_ex_discover_devices(dev, -1); else if (level > 0) res = sas_ex_discover_devices(port->port_dev, -1); } } return res; } static int sas_ex_bfs_disc(struct asd_sas_port *port) { int res; int level; do { level = port->disc.max_level; res = sas_ex_level_discovery(port, level); mb(); } while (level < port->disc.max_level); return res; } int sas_discover_root_expander(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); res = sas_rphy_add(dev->rphy); if (res) goto out_err; ex->level = dev->port->disc.max_level; /* 0 */ res = sas_discover_expander(dev); if (res) goto out_err2; sas_ex_bfs_disc(dev->port); return res; out_err2: sas_rphy_remove(dev->rphy); out_err: return res; } /* ---------- Domain revalidation ---------- */ static int sas_get_phy_discover(struct domain_device *dev, int phy_id, struct smp_resp *disc_resp) { int res; u8 *disc_req; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_req[1] = SMP_DISCOVER; disc_req[9] = phy_id; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) goto out; else if (disc_resp->result != SMP_RESP_FUNC_ACC) { res = disc_resp->result; goto out; } out: kfree(disc_req); return res; } static int sas_get_phy_change_count(struct domain_device *dev, int phy_id, int *pcc) { int res; struct smp_resp *disc_resp; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (!res) *pcc = disc_resp->disc.change_count; kfree(disc_resp); return res; } static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, u8 *sas_addr, enum sas_dev_type *type) { int res; struct smp_resp *disc_resp; struct discover_resp *dr; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; dr = &disc_resp->disc; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (res == 0) { memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8); *type = to_dev_type(dr); if (*type == 0) memset(sas_addr, 0, 8); } kfree(disc_resp); return res; } static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, int from_phy, bool update) { struct expander_device *ex = &dev->ex_dev; int res = 0; int i; for (i = from_phy; i < ex->num_phys; i++) { int phy_change_count = 0; res = sas_get_phy_change_count(dev, i, &phy_change_count); switch (res) { case SMP_RESP_PHY_VACANT: case SMP_RESP_NO_PHY: continue; case SMP_RESP_FUNC_ACC: break; default: return res; } if (phy_change_count != ex->ex_phy[i].phy_change_count) { if (update) ex->ex_phy[i].phy_change_count = phy_change_count; *phy_id = i; return 0; } } return 0; } static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) { int res; u8 *rg_req; struct smp_resp *rg_resp; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) goto out; if (rg_resp->result != SMP_RESP_FUNC_ACC) { res = rg_resp->result; goto out; } *ecc = be16_to_cpu(rg_resp->rg.change_count); out: kfree(rg_resp); kfree(rg_req); return res; } /** * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE). * @dev:domain device to be detect. * @src_dev: the device which originated BROADCAST(CHANGE). * * Add self-configuration expander suport. Suppose two expander cascading, * when the first level expander is self-configuring, hotplug the disks in * second level expander, BROADCAST(CHANGE) will not only be originated * in the second level expander, but also be originated in the first level * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say, * expander changed count in two level expanders will all increment at least * once, but the phy which chang count has changed is the source device which * we concerned. */ static int sas_find_bcast_dev(struct domain_device *dev, struct domain_device **src_dev) { struct expander_device *ex = &dev->ex_dev; int ex_change_count = -1; int phy_id = -1; int res; struct domain_device *ch; res = sas_get_ex_change_count(dev, &ex_change_count); if (res) goto out; if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) { /* Just detect if this expander phys phy change count changed, * in order to determine if this expander originate BROADCAST, * and do not update phy change count field in our structure. */ res = sas_find_bcast_phy(dev, &phy_id, 0, false); if (phy_id != -1) { *src_dev = dev; ex->ex_change_count = ex_change_count; SAS_DPRINTK("Expander phy change count has changed\n"); return res; } else SAS_DPRINTK("Expander phys DID NOT change\n"); } list_for_each_entry(ch, &ex->children, siblings) { if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { res = sas_find_bcast_dev(ch, src_dev); if (*src_dev) return res; } } out: return res; } static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child, *n; list_for_each_entry_safe(child, n, &ex->children, siblings) { set_bit(SAS_DEV_GONE, &child->state); if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) sas_unregister_ex_tree(port, child); else sas_unregister_dev(port, child); } sas_unregister_dev(port, dev); } static void sas_unregister_devs_sas_addr(struct domain_device *parent, int phy_id, bool last) { struct expander_device *ex_dev = &parent->ex_dev; struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; struct domain_device *child, *n, *found = NULL; if (last) { list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { set_bit(SAS_DEV_GONE, &child->state); if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) sas_unregister_ex_tree(parent->port, child); else sas_unregister_dev(parent->port, child); found = child; break; } } sas_disable_routing(parent, phy->attached_sas_addr); } memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); if (phy->port) { sas_port_delete_phy(phy->port, phy->phy); sas_device_set_phy(found, phy->port); if (phy->port->num_phys == 0) sas_port_delete(phy->port); phy->port = NULL; } } static int sas_discover_bfs_by_root_level(struct domain_device *root, const int level) { struct expander_device *ex_root = &root->ex_dev; struct domain_device *child; int res = 0; list_for_each_entry(child, &ex_root->children, siblings) { if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) { struct sas_expander_device *ex = rphy_to_expander_device(child->rphy); if (level > ex->level) res = sas_discover_bfs_by_root_level(child, level); else if (level == ex->level) res = sas_ex_discover_devices(child, -1); } } return res; } static int sas_discover_bfs_by_root(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); int level = ex->level+1; res = sas_ex_discover_devices(dev, -1); if (res) goto out; do { res = sas_discover_bfs_by_root_level(dev, level); mb(); level += 1; } while (level <= dev->port->disc.max_level); out: return res; } static int sas_discover_new(struct domain_device *dev, int phy_id) { struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; struct domain_device *child; bool found = false; int res, i; SAS_DPRINTK("ex %016llx phy%d new device attached\n", SAS_ADDR(dev->sas_addr), phy_id); res = sas_ex_phy_discover(dev, phy_id); if (res) goto out; /* to support the wide port inserted */ for (i = 0; i < dev->ex_dev.num_phys; i++) { struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i]; if (i == phy_id) continue; if (SAS_ADDR(ex_phy_temp->attached_sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { found = true; break; } } if (found) { sas_ex_join_wide_port(dev, phy_id); return 0; } res = sas_ex_discover_devices(dev, phy_id); if (!res) goto out; list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) res = sas_discover_bfs_by_root(child); break; } } out: return res; } static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old) { if (old == new) return true; /* treat device directed resets as flutter, if we went * SAS_END_DEV to SATA_PENDING the link needs recovery */ if ((old == SATA_PENDING && new == SAS_END_DEV) || (old == SAS_END_DEV && new == SATA_PENDING)) return true; return false; } static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; enum sas_dev_type type = NO_DEVICE; u8 sas_addr[8]; int res; res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type); switch (res) { case SMP_RESP_NO_PHY: phy->phy_state = PHY_NOT_PRESENT; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; case SMP_RESP_FUNC_ACC: break; } if (SAS_ADDR(sas_addr) == 0) { phy->phy_state = PHY_EMPTY; sas_unregister_devs_sas_addr(dev, phy_id, last); return res; } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) && dev_type_flutter(type, phy->attached_dev_type)) { struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id); char *action = ""; sas_ex_phy_discover(dev, phy_id); if (ata_dev && phy->attached_dev_type == SATA_PENDING) action = ", needs recovery"; SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); return res; } /* delete the old link */ if (SAS_ADDR(phy->attached_sas_addr) && SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) { SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", SAS_ADDR(dev->sas_addr), phy_id, SAS_ADDR(phy->attached_sas_addr)); sas_unregister_devs_sas_addr(dev, phy_id, last); } return sas_discover_new(dev, phy_id); } /** * sas_rediscover - revalidate the domain. * @dev:domain device to be detect. * @phy_id: the phy id will be detected. * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain.For plugging out, we un-register the device only when it is * the last phy in the port, for other phys in this port, we just delete it * from the port.For inserting, we do discovery when it is the * first phy,for other phys in this port, we add it to the port to * forming the wide-port. */ static int sas_rediscover(struct domain_device *dev, const int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; int res = 0; int i; bool last = true; /* is this the last phy of the port */ SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", SAS_ADDR(dev->sas_addr), phy_id); if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) { for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (i == phy_id) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(changed_phy->attached_sas_addr)) { SAS_DPRINTK("phy%d part of wide port with " "phy%d\n", phy_id, i); last = false; break; } } res = sas_rediscover_dev(dev, phy_id, last); } else res = sas_discover_new(dev, phy_id); return res; } /** * sas_revalidate_domain -- revalidate the domain * @port: port to the domain of interest * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain. */ int sas_ex_revalidate_domain(struct domain_device *port_dev) { int res; struct domain_device *dev = NULL; res = sas_find_bcast_dev(port_dev, &dev); if (res) goto out; if (dev) { struct expander_device *ex = &dev->ex_dev; int i = 0, phy_id; do { phy_id = -1; res = sas_find_bcast_phy(dev, &phy_id, i, true); if (phy_id == -1) break; res = sas_rediscover(dev, phy_id); i = phy_id + 1; } while (i < ex->num_phys); } out: return res; } int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct request *req) { struct domain_device *dev; int ret, type; struct request *rsp = req->next_rq; if (!rsp) { printk("%s: space for a smp response is missing\n", __func__); return -EINVAL; } /* no rphy means no smp target support (ie aic94xx host) */ if (!rphy) return sas_smp_host_handler(shost, req, rsp); type = rphy->identify.device_type; if (type != SAS_EDGE_EXPANDER_DEVICE && type != SAS_FANOUT_EXPANDER_DEVICE) { printk("%s: can we send a smp request to a device?\n", __func__); return -EINVAL; } dev = sas_find_dev_by_rphy(rphy); if (!dev) { printk("%s: fail to find a domain_device?\n", __func__); return -EINVAL; } /* do we need to support multiple segments? */ if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { printk("%s: multiple segments req %u %u, rsp %u %u\n", __func__, req->bio->bi_vcnt, blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp)); return -EINVAL; } ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req), bio_data(rsp->bio), blk_rq_bytes(rsp)); if (ret > 0) { /* positive number is the untransferred residual */ rsp->resid_len = ret; req->resid_len = 0; ret = 0; } else if (ret == 0) { rsp->resid_len = 0; req->resid_len = 0; } return ret; }
gpl-2.0
SM-G920P/G920PVPU3BOI1
arch/mips/sni/time.c
3615
4838
#include <linux/types.h> #include <linux/i8253.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/smp.h> #include <linux/time.h> #include <linux/clockchips.h> #include <asm/sni.h> #include <asm/time.h> #include <asm-generic/rtc.h> #define SNI_CLOCK_TICK_RATE 3686400 #define SNI_COUNTER2_DIV 64 #define SNI_COUNTER0_DIV ((SNI_CLOCK_TICK_RATE / SNI_COUNTER2_DIV) / HZ) static void a20r_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: *(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0x34; wmb(); *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV; wmb(); *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV >> 8; wmb(); *(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0xb4; wmb(); *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV; wmb(); *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV >> 8; wmb(); break; case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: break; case CLOCK_EVT_MODE_RESUME: break; } } static struct clock_event_device a20r_clockevent_device = { .name = "a20r-timer", .features = CLOCK_EVT_FEAT_PERIODIC, /* .mult, .shift, .max_delta_ns and .min_delta_ns left uninitialized */ .rating = 300, .irq = SNI_A20R_IRQ_TIMER, .set_mode = a20r_set_mode, }; static irqreturn_t a20r_interrupt(int irq, void *dev_id) { struct clock_event_device *cd = dev_id; *(volatile u8 *)A20R_PT_TIM0_ACK = 0; wmb(); cd->event_handler(cd); return IRQ_HANDLED; } static struct irqaction a20r_irqaction = { .handler = a20r_interrupt, .flags = IRQF_PERCPU | IRQF_TIMER, .name = "a20r-timer", }; /* * a20r platform uses 2 counters to divide the input frequency. * Counter 2 output is connected to Counter 0 & 1 input. */ static void __init sni_a20r_timer_setup(void) { struct clock_event_device *cd = &a20r_clockevent_device; struct irqaction *action = &a20r_irqaction; unsigned int cpu = smp_processor_id(); cd->cpumask = cpumask_of(cpu); clockevents_register_device(cd); action->dev_id = cd; setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction); } #define SNI_8254_TICK_RATE 1193182UL #define SNI_8254_TCSAMP_COUNTER ((SNI_8254_TICK_RATE / HZ) + 255) static __init unsigned long dosample(void) { u32 ct0, ct1; volatile u8 msb; /* Start the counter. */ outb_p(0x34, 0x43); outb_p(SNI_8254_TCSAMP_COUNTER & 0xff, 0x40); outb(SNI_8254_TCSAMP_COUNTER >> 8, 0x40); /* Get initial counter invariant */ ct0 = read_c0_count(); /* Latch and spin until top byte of counter0 is zero */ do { outb(0x00, 0x43); (void) inb(0x40); msb = inb(0x40); ct1 = read_c0_count(); } while (msb); /* Stop the counter. */ outb(0x38, 0x43); /* * Return the difference, this is how far the r4k counter increments * for every 1/HZ seconds. We round off the nearest 1 MHz of master * clock (= 1000000 / HZ / 2). */ /*return (ct1 - ct0 + (500000/HZ/2)) / (500000/HZ) * (500000/HZ);*/ return (ct1 - ct0) / (500000/HZ) * (500000/HZ); } /* * Here we need to calibrate the cycle counter to at least be close. */ void __init plat_time_init(void) { unsigned long r4k_ticks[3]; unsigned long r4k_tick; /* * Figure out the r4k offset, the algorithm is very simple and works in * _all_ cases as long as the 8254 counter register itself works ok (as * an interrupt driving timer it does not because of bug, this is why * we are using the onchip r4k counter/compare register to serve this * purpose, but for r4k_offset calculation it will work ok for us). * There are other very complicated ways of performing this calculation * but this one works just fine so I am not going to futz around. ;-) */ printk(KERN_INFO "Calibrating system timer... "); dosample(); /* Prime cache. */ dosample(); /* Prime cache. */ /* Zero is NOT an option. */ do { r4k_ticks[0] = dosample(); } while (!r4k_ticks[0]); do { r4k_ticks[1] = dosample(); } while (!r4k_ticks[1]); if (r4k_ticks[0] != r4k_ticks[1]) { printk("warning: timer counts differ, retrying... "); r4k_ticks[2] = dosample(); if (r4k_ticks[2] == r4k_ticks[0] || r4k_ticks[2] == r4k_ticks[1]) r4k_tick = r4k_ticks[2]; else { printk("disagreement, using average... "); r4k_tick = (r4k_ticks[0] + r4k_ticks[1] + r4k_ticks[2]) / 3; } } else r4k_tick = r4k_ticks[0]; printk("%d [%d.%04d MHz CPU]\n", (int) r4k_tick, (int) (r4k_tick / (500000 / HZ)), (int) (r4k_tick % (500000 / HZ))); mips_hpt_frequency = r4k_tick * HZ; switch (sni_brd_type) { case SNI_BRD_10: case SNI_BRD_10NEW: case SNI_BRD_TOWER_OASIC: case SNI_BRD_MINITOWER: sni_a20r_timer_setup(); break; } setup_pit_timer(); } void read_persistent_clock(struct timespec *ts) { ts->tv_sec = -1; ts->tv_nsec = 0; }
gpl-2.0
BlackBox-Kernel/blackbox_sprout_lp
arch/mips/jazz/irq.c
3615
4294
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1992 Linus Torvalds * Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle */ #include <linux/clockchips.h> #include <linux/i8253.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <asm/irq_cpu.h> #include <asm/i8259.h> #include <asm/io.h> #include <asm/jazz.h> #include <asm/pgtable.h> #include <asm/tlbmisc.h> static DEFINE_RAW_SPINLOCK(r4030_lock); static void enable_r4030_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq - JAZZ_IRQ_START); unsigned long flags; raw_spin_lock_irqsave(&r4030_lock, flags); mask |= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE); r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask); raw_spin_unlock_irqrestore(&r4030_lock, flags); } void disable_r4030_irq(struct irq_data *d) { unsigned int mask = ~(1 << (d->irq - JAZZ_IRQ_START)); unsigned long flags; raw_spin_lock_irqsave(&r4030_lock, flags); mask &= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE); r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask); raw_spin_unlock_irqrestore(&r4030_lock, flags); } static struct irq_chip r4030_irq_type = { .name = "R4030", .irq_mask = disable_r4030_irq, .irq_unmask = enable_r4030_irq, }; void __init init_r4030_ints(void) { int i; for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++) irq_set_chip_and_handler(i, &r4030_irq_type, handle_level_irq); r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0); r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */ r4030_read_reg32(JAZZ_R4030_INVAL_ADDR); /* clear error bits */ } /* * On systems with i8259-style interrupt controllers we assume for * driver compatibility reasons interrupts 0 - 15 to be the i8259 * interrupts even if the hardware uses a different interrupt numbering. */ void __init arch_init_irq(void) { /* * this is a hack to get back the still needed wired mapping * killed by init_mm() */ /* Map 0xe0000000 -> 0x0:800005C0, 0xe0010000 -> 0x1:30000580 */ add_wired_entry(0x02000017, 0x03c00017, 0xe0000000, PM_64K); /* Map 0xe2000000 -> 0x0:900005C0, 0xe3010000 -> 0x0:910005C0 */ add_wired_entry(0x02400017, 0x02440017, 0xe2000000, PM_16M); /* Map 0xe4000000 -> 0x0:600005C0, 0xe4100000 -> 400005C0 */ add_wired_entry(0x01800017, 0x01000017, 0xe4000000, PM_4M); init_i8259_irqs(); /* Integrated i8259 */ mips_cpu_irq_init(); init_r4030_ints(); change_c0_status(ST0_IM, IE_IRQ2 | IE_IRQ1); } asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_cause() & read_c0_status(); unsigned int irq; if (pending & IE_IRQ4) { r4030_read_reg32(JAZZ_TIMER_REGISTER); do_IRQ(JAZZ_TIMER_IRQ); } else if (pending & IE_IRQ2) { irq = *(volatile u8 *)JAZZ_EISA_IRQ_ACK; do_IRQ(irq); } else if (pending & IE_IRQ1) { irq = *(volatile u8 *)JAZZ_IO_IRQ_SOURCE >> 2; if (likely(irq > 0)) do_IRQ(irq + JAZZ_IRQ_START - 1); else panic("Unimplemented loc_no_irq handler"); } } static void r4030_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { /* Nothing to do ... */ } struct clock_event_device r4030_clockevent = { .name = "r4030", .features = CLOCK_EVT_FEAT_PERIODIC, .rating = 300, .irq = JAZZ_TIMER_IRQ, .set_mode = r4030_set_mode, }; static irqreturn_t r4030_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *cd = dev_id; cd->event_handler(cd); return IRQ_HANDLED; } static struct irqaction r4030_timer_irqaction = { .handler = r4030_timer_interrupt, .flags = IRQF_TIMER, .name = "R4030 timer", }; void __init plat_time_init(void) { struct clock_event_device *cd = &r4030_clockevent; struct irqaction *action = &r4030_timer_irqaction; unsigned int cpu = smp_processor_id(); BUG_ON(HZ != 100); cd->cpumask = cpumask_of(cpu); clockevents_register_device(cd); action->dev_id = cd; setup_irq(JAZZ_TIMER_IRQ, action); /* * Set clock to 100Hz. * * The R4030 timer receives an input clock of 1kHz which is divieded by * a programmable 4-bit divider. This makes it fairly inflexible. */ r4030_write_reg32(JAZZ_TIMER_INTERVAL, 9); setup_pit_timer(); }
gpl-2.0
klaudyuxxx/2.6.35.y-P500
arch/ia64/hp/common/hwsw_iommu.c
4383
1845
/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * Contributed by David Mosberger-Tang <davidm@hpl.hp.com> * * This is a pseudo I/O MMU which dispatches to the hardware I/O MMU * whenever possible. We assume that the hardware I/O MMU requires * full 32-bit addressability, as is the case, e.g., for HP zx1-based * systems (there, the I/O MMU window is mapped at 3-4GB). If a * device doesn't provide full 32-bit addressability, we fall back on * the sw I/O TLB. This is good enough to let us support broken * hardware such as soundcards which have a DMA engine that can * address only 28 bits. */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/swiotlb.h> #include <asm/machvec.h> extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; /* swiotlb declarations & definitions: */ extern int swiotlb_late_init_with_default_size (size_t size); /* * Note: we need to make the determination of whether or not to use * the sw I/O TLB based purely on the device structure. Anything else * would be unreliable or would be too intrusive. */ static inline int use_swiotlb(struct device *dev) { return dev && dev->dma_mask && !sba_dma_ops.dma_supported(dev, *dev->dma_mask); } struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) { if (use_swiotlb(dev)) return &swiotlb_dma_ops; return &sba_dma_ops; } EXPORT_SYMBOL(hwsw_dma_get_ops); void __init hwsw_init (void) { /* default to a smallish 2MB sw I/O TLB */ if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) { #ifdef CONFIG_IA64_GENERIC /* Better to have normal DMA than panic */ printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," " reverting to hpzx1 platform vector\n", __func__); machvec_init("hpzx1"); #else panic("Unable to initialize software I/O TLB services"); #endif } }
gpl-2.0
iAMr00t/android_kernel_lge_ls840
arch/ia64/hp/common/hwsw_iommu.c
4383
1845
/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * Contributed by David Mosberger-Tang <davidm@hpl.hp.com> * * This is a pseudo I/O MMU which dispatches to the hardware I/O MMU * whenever possible. We assume that the hardware I/O MMU requires * full 32-bit addressability, as is the case, e.g., for HP zx1-based * systems (there, the I/O MMU window is mapped at 3-4GB). If a * device doesn't provide full 32-bit addressability, we fall back on * the sw I/O TLB. This is good enough to let us support broken * hardware such as soundcards which have a DMA engine that can * address only 28 bits. */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/swiotlb.h> #include <asm/machvec.h> extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; /* swiotlb declarations & definitions: */ extern int swiotlb_late_init_with_default_size (size_t size); /* * Note: we need to make the determination of whether or not to use * the sw I/O TLB based purely on the device structure. Anything else * would be unreliable or would be too intrusive. */ static inline int use_swiotlb(struct device *dev) { return dev && dev->dma_mask && !sba_dma_ops.dma_supported(dev, *dev->dma_mask); } struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) { if (use_swiotlb(dev)) return &swiotlb_dma_ops; return &sba_dma_ops; } EXPORT_SYMBOL(hwsw_dma_get_ops); void __init hwsw_init (void) { /* default to a smallish 2MB sw I/O TLB */ if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) { #ifdef CONFIG_IA64_GENERIC /* Better to have normal DMA than panic */ printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," " reverting to hpzx1 platform vector\n", __func__); machvec_init("hpzx1"); #else panic("Unable to initialize software I/O TLB services"); #endif } }
gpl-2.0
someone755/android_kernel_sony_msm8974_stock
lib/mpi/mpicoder.c
4895
6763
/* mpicoder.c - Coder for the external representation of MPIs * Copyright (C) 1998, 1999 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include "mpi-internal.h" #define MAX_EXTERN_MPI_BITS 16384 MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) { const uint8_t *buffer = xbuffer; int i, j; unsigned nbits, nbytes, nlimbs, nread = 0; mpi_limb_t a; MPI val = NULL; if (*ret_nread < 2) goto leave; nbits = buffer[0] << 8 | buffer[1]; if (nbits > MAX_EXTERN_MPI_BITS) { pr_info("MPI: mpi too large (%u bits)\n", nbits); goto leave; } buffer += 2; nread = 2; nbytes = (nbits + 7) / 8; nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; val = mpi_alloc(nlimbs); if (!val) return NULL; i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; i %= BYTES_PER_MPI_LIMB; val->nbits = nbits; j = val->nlimbs = nlimbs; val->sign = 0; for (; j > 0; j--) { a = 0; for (; i < BYTES_PER_MPI_LIMB; i++) { if (++nread > *ret_nread) { printk ("MPI: mpi larger than buffer nread=%d ret_nread=%d\n", nread, *ret_nread); goto leave; } a <<= 8; a |= *buffer++; } i = 0; val->d[j - 1] = a; } leave: *ret_nread = nread; return val; } EXPORT_SYMBOL_GPL(mpi_read_from_buffer); /**************** * Make an mpi from a character string. */ int mpi_fromstr(MPI val, const char *str) { int hexmode = 0, sign = 0, prepend_zero = 0, i, j, c, c1, c2; unsigned nbits, nbytes, nlimbs; mpi_limb_t a; if (*str == '-') { sign = 1; str++; } if (*str == '0' && str[1] == 'x') hexmode = 1; else return -EINVAL; /* other bases are not yet supported */ str += 2; nbits = strlen(str) * 4; if (nbits % 8) prepend_zero = 1; nbytes = (nbits + 7) / 8; nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; if (val->alloced < nlimbs) if (!mpi_resize(val, nlimbs)) return -ENOMEM; i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; i %= BYTES_PER_MPI_LIMB; j = val->nlimbs = nlimbs; val->sign = sign; for (; j > 0; j--) { a = 0; for (; i < BYTES_PER_MPI_LIMB; i++) { if (prepend_zero) { c1 = '0'; prepend_zero = 0; } else c1 = *str++; assert(c1); c2 = *str++; assert(c2); if (c1 >= '0' && c1 <= '9') c = c1 - '0'; else if (c1 >= 'a' && c1 <= 'f') c = c1 - 'a' + 10; else if (c1 >= 'A' && c1 <= 'F') c = c1 - 'A' + 10; else { mpi_clear(val); return 1; } c <<= 4; if (c2 >= '0' && c2 <= '9') c |= c2 - '0'; else if (c2 >= 'a' && c2 <= 'f') c |= c2 - 'a' + 10; else if (c2 >= 'A' && c2 <= 'F') c |= c2 - 'A' + 10; else { mpi_clear(val); return 1; } a <<= 8; a |= c; } i = 0; val->d[j - 1] = a; } return 0; } EXPORT_SYMBOL_GPL(mpi_fromstr); /**************** * Return an allocated buffer with the MPI (msb first). * NBYTES receives the length of this buffer. Caller must free the * return string (This function does return a 0 byte buffer with NBYTES * set to zero if the value of A is zero. If sign is not NULL, it will * be set to the sign of the A. */ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) { uint8_t *p, *buffer; mpi_limb_t alimb; int i; unsigned int n; if (sign) *sign = a->sign; *nbytes = n = a->nlimbs * BYTES_PER_MPI_LIMB; if (!n) n++; /* avoid zero length allocation */ p = buffer = kmalloc(n, GFP_KERNEL); if (!p) return NULL; for (i = a->nlimbs - 1; i >= 0; i--) { alimb = a->d[i]; #if BYTES_PER_MPI_LIMB == 4 *p++ = alimb >> 24; *p++ = alimb >> 16; *p++ = alimb >> 8; *p++ = alimb; #elif BYTES_PER_MPI_LIMB == 8 *p++ = alimb >> 56; *p++ = alimb >> 48; *p++ = alimb >> 40; *p++ = alimb >> 32; *p++ = alimb >> 24; *p++ = alimb >> 16; *p++ = alimb >> 8; *p++ = alimb; #else #error please implement for this limb size. #endif } /* this is sub-optimal but we need to do the shift operation * because the caller has to free the returned buffer */ for (p = buffer; !*p && *nbytes; p++, --*nbytes) ; if (p != buffer) memmove(buffer, p, *nbytes); return buffer; } EXPORT_SYMBOL_GPL(mpi_get_buffer); /**************** * Use BUFFER to update MPI. */ int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign) { const uint8_t *buffer = xbuffer, *p; mpi_limb_t alimb; int nlimbs; int i; nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; if (RESIZE_IF_NEEDED(a, nlimbs) < 0) return -ENOMEM; a->sign = sign; for (i = 0, p = buffer + nbytes - 1; p >= buffer + BYTES_PER_MPI_LIMB;) { #if BYTES_PER_MPI_LIMB == 4 alimb = (mpi_limb_t) *p--; alimb |= (mpi_limb_t) *p-- << 8; alimb |= (mpi_limb_t) *p-- << 16; alimb |= (mpi_limb_t) *p-- << 24; #elif BYTES_PER_MPI_LIMB == 8 alimb = (mpi_limb_t) *p--; alimb |= (mpi_limb_t) *p-- << 8; alimb |= (mpi_limb_t) *p-- << 16; alimb |= (mpi_limb_t) *p-- << 24; alimb |= (mpi_limb_t) *p-- << 32; alimb |= (mpi_limb_t) *p-- << 40; alimb |= (mpi_limb_t) *p-- << 48; alimb |= (mpi_limb_t) *p-- << 56; #else #error please implement for this limb size. #endif a->d[i++] = alimb; } if (p >= buffer) { #if BYTES_PER_MPI_LIMB == 4 alimb = *p--; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 8; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 16; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 24; #elif BYTES_PER_MPI_LIMB == 8 alimb = (mpi_limb_t) *p--; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 8; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 16; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 24; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 32; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 40; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 48; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 56; #else #error please implement for this limb size. #endif a->d[i++] = alimb; } a->nlimbs = i; if (i != nlimbs) { pr_emerg("MPI: mpi_set_buffer: Assertion failed (%d != %d)", i, nlimbs); BUG(); } return 0; } EXPORT_SYMBOL_GPL(mpi_set_buffer);
gpl-2.0
Shelnutt2/android_kernel_lge_gee_3.4
drivers/staging/comedi/drivers/das16m1.c
4895
21368
/* comedi/drivers/das16m1.c CIO-DAS16/M1 driver Author: Frank Mori Hess, based on code from the das16 driver. Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: das16m1 Description: CIO-DAS16/M1 Author: Frank Mori Hess <fmhess@users.sourceforge.net> Devices: [Measurement Computing] CIO-DAS16/M1 (cio-das16/m1) Status: works This driver supports a single board - the CIO-DAS16/M1. As far as I know, there are no other boards that have the same register layout. Even the CIO-DAS16/M1/16 is significantly different. I was _barely_ able to reach the full 1 MHz capability of this board, using a hard real-time interrupt (set the TRIG_RT flag in your struct comedi_cmd and use rtlinux or RTAI). The board can't do dma, so the bottleneck is pulling the data across the ISA bus. I timed the interrupt handler, and it took my computer ~470 microseconds to pull 512 samples from the board. So at 1 Mhz sampling rate, expect your CPU to be spending almost all of its time in the interrupt handler. This board has some unusual restrictions for its channel/gain list. If the list has 2 or more channels in it, then two conditions must be satisfied: (1) - even/odd channels must appear at even/odd indices in the list (2) - the list must have an even number of entries. Options: [0] - base io address [1] - irq (optional, but you probably want it) irq can be omitted, although the cmd interface will not work without it. */ #include <linux/ioport.h> #include <linux/interrupt.h> #include "../comedidev.h" #include "8255.h" #include "8253.h" #include "comedi_fc.h" #define DAS16M1_SIZE 16 #define DAS16M1_SIZE2 8 #define DAS16M1_XTAL 100 /* 10 MHz master clock */ #define FIFO_SIZE 1024 /* 1024 sample fifo */ /* CIO-DAS16_M1.pdf "cio-das16/m1" 0 a/d bits 0-3, mux start 12 bit 1 a/d bits 4-11 unused 2 status control 3 di 4 bit do 4 bit 4 unused clear interrupt 5 interrupt, pacer 6 channel/gain queue address 7 channel/gain queue data 89ab 8254 cdef 8254 400 8255 404-407 8254 */ #define DAS16M1_AI 0 /* 16-bit wide register */ #define AI_CHAN(x) ((x) & 0xf) #define DAS16M1_CS 2 #define EXT_TRIG_BIT 0x1 #define OVRUN 0x20 #define IRQDATA 0x80 #define DAS16M1_DIO 3 #define DAS16M1_CLEAR_INTR 4 #define DAS16M1_INTR_CONTROL 5 #define EXT_PACER 0x2 #define INT_PACER 0x3 #define PACER_MASK 0x3 #define INTE 0x80 #define DAS16M1_QUEUE_ADDR 6 #define DAS16M1_QUEUE_DATA 7 #define Q_CHAN(x) ((x) & 0x7) #define Q_RANGE(x) (((x) & 0xf) << 4) #define UNIPOLAR 0x40 #define DAS16M1_8254_FIRST 0x8 #define DAS16M1_8254_FIRST_CNTRL 0xb #define TOTAL_CLEAR 0x30 #define DAS16M1_8254_SECOND 0xc #define DAS16M1_82C55 0x400 #define DAS16M1_8254_THIRD 0x404 static const struct comedi_lrange range_das16m1 = { 9, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25), BIP_RANGE(10), } }; static int das16m1_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16m1_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16m1_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16m1_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int das16m1_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s); static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s); static irqreturn_t das16m1_interrupt(int irq, void *d); static void das16m1_handler(struct comedi_device *dev, unsigned int status); static unsigned int das16m1_set_pacer(struct comedi_device *dev, unsigned int ns, int round_flag); static int das16m1_irq_bits(unsigned int irq); struct das16m1_board { const char *name; unsigned int ai_speed; }; static const struct das16m1_board das16m1_boards[] = { { .name = "cio-das16/m1", /* CIO-DAS16_M1.pdf */ .ai_speed = 1000, /* 1MHz max speed */ }, }; static int das16m1_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int das16m1_detach(struct comedi_device *dev); static struct comedi_driver driver_das16m1 = { .driver_name = "das16m1", .module = THIS_MODULE, .attach = das16m1_attach, .detach = das16m1_detach, .board_name = &das16m1_boards[0].name, .num_names = ARRAY_SIZE(das16m1_boards), .offset = sizeof(das16m1_boards[0]), }; struct das16m1_private_struct { unsigned int control_state; volatile unsigned int adc_count; /* number of samples completed */ /* initial value in lower half of hardware conversion counter, * needed to keep track of whether new count has been loaded into * counter yet (loaded by first sample conversion) */ u16 initial_hw_count; short ai_buffer[FIFO_SIZE]; unsigned int do_bits; /* saves status of digital output bits */ unsigned int divisor1; /* divides master clock to obtain conversion speed */ unsigned int divisor2; /* divides master clock to obtain conversion speed */ }; #define devpriv ((struct das16m1_private_struct *)(dev->private)) #define thisboard ((const struct das16m1_board *)(dev->board_ptr)) static int __init driver_das16m1_init_module(void) { return comedi_driver_register(&driver_das16m1); } static void __exit driver_das16m1_cleanup_module(void) { comedi_driver_unregister(&driver_das16m1); } module_init(driver_das16m1_init_module); module_exit(driver_das16m1_cleanup_module); static inline short munge_sample(short data) { return (data >> 4) & 0xfff; } static int das16m1_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { unsigned int err = 0, tmp, i; /* make sure triggers are valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_FOLLOW; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_FOLLOW) { /* internal trigger */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < thisboard->ai_speed) { cmd->convert_arg = thisboard->ai_speed; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { /* any count is allowed */ } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up arguments */ if (cmd->convert_src == TRIG_TIMER) { tmp = cmd->convert_arg; /* calculate counter values that give desired timing */ i8253_cascade_ns_to_timer_2div(DAS16M1_XTAL, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->convert_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->convert_arg) err++; } if (err) return 4; /* check chanlist against board's peculiarities */ if (cmd->chanlist && cmd->chanlist_len > 1) { for (i = 0; i < cmd->chanlist_len; i++) { /* even/odd channels must go into even/odd queue addresses */ if ((i % 2) != (CR_CHAN(cmd->chanlist[i]) % 2)) { comedi_error(dev, "bad chanlist:\n" " even/odd channels must go have even/odd chanlist indices"); err++; } } if ((cmd->chanlist_len % 2) != 0) { comedi_error(dev, "chanlist must be of even length or length 1"); err++; } } if (err) return 5; return 0; } static int das16m1_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int byte, i; if (dev->irq == 0) { comedi_error(dev, "irq required to execute comedi_cmd"); return -1; } /* disable interrupts and internal pacer */ devpriv->control_state &= ~INTE & ~PACER_MASK; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); /* set software count */ devpriv->adc_count = 0; /* Initialize lower half of hardware counter, used to determine how * many samples are in fifo. Value doesn't actually load into counter * until counter's next clock (the next a/d conversion) */ i8254_load(dev->iobase + DAS16M1_8254_FIRST, 0, 1, 0, 2); /* remember current reading of counter so we know when counter has * actually been loaded */ devpriv->initial_hw_count = i8254_read(dev->iobase + DAS16M1_8254_FIRST, 0, 1); /* setup channel/gain queue */ for (i = 0; i < cmd->chanlist_len; i++) { outb(i, dev->iobase + DAS16M1_QUEUE_ADDR); byte = Q_CHAN(CR_CHAN(cmd->chanlist[i])) | Q_RANGE(CR_RANGE(cmd->chanlist[i])); outb(byte, dev->iobase + DAS16M1_QUEUE_DATA); } /* set counter mode and counts */ cmd->convert_arg = das16m1_set_pacer(dev, cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); /* set control & status register */ byte = 0; /* if we are using external start trigger (also board dislikes having * both start and conversion triggers external simultaneously) */ if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT) byte |= EXT_TRIG_BIT; outb(byte, dev->iobase + DAS16M1_CS); /* clear interrupt bit */ outb(0, dev->iobase + DAS16M1_CLEAR_INTR); /* enable interrupts and internal pacer */ devpriv->control_state &= ~PACER_MASK; if (cmd->convert_src == TRIG_TIMER) devpriv->control_state |= INT_PACER; else devpriv->control_state |= EXT_PACER; devpriv->control_state |= INTE; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); return 0; } static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { devpriv->control_state &= ~INTE & ~PACER_MASK; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); return 0; } static int das16m1_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, n; int byte; const int timeout = 1000; /* disable interrupts and internal pacer */ devpriv->control_state &= ~INTE & ~PACER_MASK; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); /* setup channel/gain queue */ outb(0, dev->iobase + DAS16M1_QUEUE_ADDR); byte = Q_CHAN(CR_CHAN(insn->chanspec)) | Q_RANGE(CR_RANGE(insn->chanspec)); outb(byte, dev->iobase + DAS16M1_QUEUE_DATA); for (n = 0; n < insn->n; n++) { /* clear IRQDATA bit */ outb(0, dev->iobase + DAS16M1_CLEAR_INTR); /* trigger conversion */ outb(0, dev->iobase); for (i = 0; i < timeout; i++) { if (inb(dev->iobase + DAS16M1_CS) & IRQDATA) break; } if (i == timeout) { comedi_error(dev, "timeout"); return -ETIME; } data[n] = munge_sample(inw(dev->iobase)); } return n; } static int das16m1_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int bits; bits = inb(dev->iobase + DAS16M1_DIO) & 0xf; data[1] = bits; data[0] = 0; return 2; } static int das16m1_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int wbits; /* only set bits that have been masked */ data[0] &= 0xf; wbits = devpriv->do_bits; /* zero bits that have been masked */ wbits &= ~data[0]; /* set masked bits */ wbits |= data[0] & data[1]; devpriv->do_bits = wbits; data[1] = wbits; outb(devpriv->do_bits, dev->iobase + DAS16M1_DIO); return 2; } static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; unsigned int status; /* prevent race with interrupt handler */ spin_lock_irqsave(&dev->spinlock, flags); status = inb(dev->iobase + DAS16M1_CS); das16m1_handler(dev, status); spin_unlock_irqrestore(&dev->spinlock, flags); return s->async->buf_write_count - s->async->buf_read_count; } static irqreturn_t das16m1_interrupt(int irq, void *d) { int status; struct comedi_device *dev = d; if (dev->attached == 0) { comedi_error(dev, "premature interrupt"); return IRQ_HANDLED; } /* prevent race with comedi_poll() */ spin_lock(&dev->spinlock); status = inb(dev->iobase + DAS16M1_CS); if ((status & (IRQDATA | OVRUN)) == 0) { comedi_error(dev, "spurious interrupt"); spin_unlock(&dev->spinlock); return IRQ_NONE; } das16m1_handler(dev, status); /* clear interrupt */ outb(0, dev->iobase + DAS16M1_CLEAR_INTR); spin_unlock(&dev->spinlock); return IRQ_HANDLED; } static void munge_sample_array(short *array, unsigned int num_elements) { unsigned int i; for (i = 0; i < num_elements; i++) array[i] = munge_sample(array[i]); } static void das16m1_handler(struct comedi_device *dev, unsigned int status) { struct comedi_subdevice *s; struct comedi_async *async; struct comedi_cmd *cmd; u16 num_samples; u16 hw_counter; s = dev->read_subdev; async = s->async; async->events = 0; cmd = &async->cmd; /* figure out how many samples are in fifo */ hw_counter = i8254_read(dev->iobase + DAS16M1_8254_FIRST, 0, 1); /* make sure hardware counter reading is not bogus due to initial value * not having been loaded yet */ if (devpriv->adc_count == 0 && hw_counter == devpriv->initial_hw_count) { num_samples = 0; } else { /* The calculation of num_samples looks odd, but it uses the following facts. * 16 bit hardware counter is initialized with value of zero (which really * means 0x1000). The counter decrements by one on each conversion * (when the counter decrements from zero it goes to 0xffff). num_samples * is a 16 bit variable, so it will roll over in a similar fashion to the * hardware counter. Work it out, and this is what you get. */ num_samples = -hw_counter - devpriv->adc_count; } /* check if we only need some of the points */ if (cmd->stop_src == TRIG_COUNT) { if (num_samples > cmd->stop_arg * cmd->chanlist_len) num_samples = cmd->stop_arg * cmd->chanlist_len; } /* make sure we dont try to get too many points if fifo has overrun */ if (num_samples > FIFO_SIZE) num_samples = FIFO_SIZE; insw(dev->iobase, devpriv->ai_buffer, num_samples); munge_sample_array(devpriv->ai_buffer, num_samples); cfc_write_array_to_buffer(s, devpriv->ai_buffer, num_samples * sizeof(short)); devpriv->adc_count += num_samples; if (cmd->stop_src == TRIG_COUNT) { if (devpriv->adc_count >= cmd->stop_arg * cmd->chanlist_len) { /* end of acquisition */ das16m1_cancel(dev, s); async->events |= COMEDI_CB_EOA; } } /* this probably won't catch overruns since the card doesn't generate * overrun interrupts, but we might as well try */ if (status & OVRUN) { das16m1_cancel(dev, s); async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_error(dev, "fifo overflow"); } comedi_event(dev, s); } /* This function takes a time in nanoseconds and sets the * * 2 pacer clocks to the closest frequency possible. It also * * returns the actual sampling period. */ static unsigned int das16m1_set_pacer(struct comedi_device *dev, unsigned int ns, int rounding_flags) { i8253_cascade_ns_to_timer_2div(DAS16M1_XTAL, &(devpriv->divisor1), &(devpriv->divisor2), &ns, rounding_flags & TRIG_ROUND_MASK); /* Write the values of ctr1 and ctr2 into counters 1 and 2 */ i8254_load(dev->iobase + DAS16M1_8254_SECOND, 0, 1, devpriv->divisor1, 2); i8254_load(dev->iobase + DAS16M1_8254_SECOND, 0, 2, devpriv->divisor2, 2); return ns; } static int das16m1_irq_bits(unsigned int irq) { int ret; switch (irq) { case 10: ret = 0x0; break; case 11: ret = 0x1; break; case 12: ret = 0x2; break; case 15: ret = 0x3; break; case 2: ret = 0x4; break; case 3: ret = 0x5; break; case 5: ret = 0x6; break; case 7: ret = 0x7; break; default: return -1; break; } return ret << 4; } /* * Options list: * 0 I/O base * 1 IRQ */ static int das16m1_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret; unsigned int irq; unsigned long iobase; iobase = it->options[0]; ret = alloc_private(dev, sizeof(struct das16m1_private_struct)); if (ret < 0) return ret; dev->board_name = thisboard->name; if (!request_region(iobase, DAS16M1_SIZE, driver_das16m1.driver_name)) { comedi_error(dev, "I/O port conflict\n"); return -EIO; } if (!request_region(iobase + DAS16M1_82C55, DAS16M1_SIZE2, driver_das16m1.driver_name)) { release_region(iobase, DAS16M1_SIZE); comedi_error(dev, "I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* now for the irq */ irq = it->options[1]; /* make sure it is valid */ if (das16m1_irq_bits(irq) >= 0) { ret = request_irq(irq, das16m1_interrupt, 0, driver_das16m1.driver_name, dev); if (ret < 0) return ret; dev->irq = irq; printk ("irq %u\n", irq); } else if (irq == 0) { printk (", no irq\n"); } else { comedi_error(dev, "invalid irq\n" " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n"); return -EINVAL; } ret = alloc_subdevices(dev, 4); if (ret < 0) return ret; s = dev->subdevices + 0; dev->read_subdev = s; /* ai */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ; s->n_chan = 8; s->subdev_flags = SDF_DIFF; s->len_chanlist = 256; s->maxdata = (1 << 12) - 1; s->range_table = &range_das16m1; s->insn_read = das16m1_ai_rinsn; s->do_cmdtest = das16m1_cmd_test; s->do_cmd = das16m1_cmd_exec; s->cancel = das16m1_cancel; s->poll = das16m1_poll; s = dev->subdevices + 1; /* di */ s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = das16m1_di_rbits; s = dev->subdevices + 2; /* do */ s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = das16m1_do_wbits; s = dev->subdevices + 3; /* 8255 */ subdev_8255_init(dev, s, NULL, dev->iobase + DAS16M1_82C55); /* disable upper half of hardware conversion counter so it doesn't mess with us */ outb(TOTAL_CLEAR, dev->iobase + DAS16M1_8254_FIRST_CNTRL); /* initialize digital output lines */ outb(devpriv->do_bits, dev->iobase + DAS16M1_DIO); /* set the interrupt level */ if (dev->irq) devpriv->control_state = das16m1_irq_bits(dev->irq); else devpriv->control_state = 0; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); return 0; } static int das16m1_detach(struct comedi_device *dev) { /* das16m1_reset(dev); */ if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 3); if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) { release_region(dev->iobase, DAS16M1_SIZE); release_region(dev->iobase + DAS16M1_82C55, DAS16M1_SIZE2); } return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
slz/delidded-kernel-note3
drivers/staging/comedi/drivers/ke_counter.c
4895
8308
/* comedi/drivers/ke_counter.c Comedi driver for Kolter-Electronic PCI Counter 1 Card COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: ke_counter Description: Driver for Kolter Electronic Counter Card Devices: [Kolter Electronic] PCI Counter Card (ke_counter) Author: Michael Hillmann Updated: Mon, 14 Apr 2008 15:42:42 +0100 Status: tested Configuration Options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. This driver is a simple driver to read the counter values from Kolter Electronic PCI Counter Card. */ #include "../comedidev.h" #include "comedi_pci.h" #define CNT_DRIVER_NAME "ke_counter" #define PCI_VENDOR_ID_KOLTER 0x1001 #define CNT_CARD_DEVICE_ID 0x0014 /*-- function prototypes ----------------------------------------------------*/ static int cnt_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int cnt_detach(struct comedi_device *dev); static DEFINE_PCI_DEVICE_TABLE(cnt_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) }, {0} }; MODULE_DEVICE_TABLE(pci, cnt_pci_table); /*-- board specification structure ------------------------------------------*/ struct cnt_board_struct { const char *name; int device_id; int cnt_channel_nbr; int cnt_bits; }; static const struct cnt_board_struct cnt_boards[] = { { .name = CNT_DRIVER_NAME, .device_id = CNT_CARD_DEVICE_ID, .cnt_channel_nbr = 3, .cnt_bits = 24} }; #define cnt_board_nbr (sizeof(cnt_boards)/sizeof(struct cnt_board_struct)) /*-- device private structure -----------------------------------------------*/ struct cnt_device_private { struct pci_dev *pcidev; }; #define devpriv ((struct cnt_device_private *)dev->private) static struct comedi_driver cnt_driver = { .driver_name = CNT_DRIVER_NAME, .module = THIS_MODULE, .attach = cnt_attach, .detach = cnt_detach, }; static int __devinit cnt_driver_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, cnt_driver.driver_name); } static void __devexit cnt_driver_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver cnt_driver_pci_driver = { .id_table = cnt_pci_table, .probe = &cnt_driver_pci_probe, .remove = __devexit_p(&cnt_driver_pci_remove) }; static int __init cnt_driver_init_module(void) { int retval; retval = comedi_driver_register(&cnt_driver); if (retval < 0) return retval; cnt_driver_pci_driver.name = (char *)cnt_driver.driver_name; return pci_register_driver(&cnt_driver_pci_driver); } static void __exit cnt_driver_cleanup_module(void) { pci_unregister_driver(&cnt_driver_pci_driver); comedi_driver_unregister(&cnt_driver); } module_init(cnt_driver_init_module); module_exit(cnt_driver_cleanup_module); /*-- counter write ----------------------------------------------------------*/ /* This should be used only for resetting the counters; maybe it is better to make a special command 'reset'. */ static int cnt_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); outb((unsigned char)((data[0] >> 24) & 0xff), dev->iobase + chan * 0x20 + 0x10); outb((unsigned char)((data[0] >> 16) & 0xff), dev->iobase + chan * 0x20 + 0x0c); outb((unsigned char)((data[0] >> 8) & 0xff), dev->iobase + chan * 0x20 + 0x08); outb((unsigned char)((data[0] >> 0) & 0xff), dev->iobase + chan * 0x20 + 0x04); /* return the number of samples written */ return 1; } /*-- counter read -----------------------------------------------------------*/ static int cnt_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned char a0, a1, a2, a3, a4; int chan = CR_CHAN(insn->chanspec); int result; a0 = inb(dev->iobase + chan * 0x20); a1 = inb(dev->iobase + chan * 0x20 + 0x04); a2 = inb(dev->iobase + chan * 0x20 + 0x08); a3 = inb(dev->iobase + chan * 0x20 + 0x0c); a4 = inb(dev->iobase + chan * 0x20 + 0x10); result = (a1 + (a2 * 256) + (a3 * 65536)); if (a4 > 0) result = result - s->maxdata; *data = (unsigned int)result; /* return the number of samples read */ return 1; } /*-- attach -----------------------------------------------------------------*/ static int cnt_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *subdevice; struct pci_dev *pci_device = NULL; struct cnt_board_struct *board; unsigned long io_base; int error, i; /* allocate device private structure */ error = alloc_private(dev, sizeof(struct cnt_device_private)); if (error < 0) return error; /* Probe the device to determine what device in the series it is. */ for_each_pci_dev(pci_device) { if (pci_device->vendor == PCI_VENDOR_ID_KOLTER) { for (i = 0; i < cnt_board_nbr; i++) { if (cnt_boards[i].device_id == pci_device->device) { /* was a particular bus/slot requested? */ if ((it->options[0] != 0) || (it->options[1] != 0)) { /* are we on the wrong bus/slot? */ if (pci_device->bus->number != it->options[0] || PCI_SLOT(pci_device->devfn) != it->options[1]) { continue; } } dev->board_ptr = cnt_boards + i; board = (struct cnt_board_struct *) dev->board_ptr; goto found; } } } } printk(KERN_WARNING "comedi%d: no supported board found! (req. bus/slot: %d/%d)\n", dev->minor, it->options[0], it->options[1]); return -EIO; found: printk(KERN_INFO "comedi%d: found %s at PCI bus %d, slot %d\n", dev->minor, board->name, pci_device->bus->number, PCI_SLOT(pci_device->devfn)); devpriv->pcidev = pci_device; dev->board_name = board->name; /* enable PCI device and request regions */ error = comedi_pci_enable(pci_device, CNT_DRIVER_NAME); if (error < 0) { printk(KERN_WARNING "comedi%d: " "failed to enable PCI device and request regions!\n", dev->minor); return error; } /* read register base address [PCI_BASE_ADDRESS #0] */ io_base = pci_resource_start(pci_device, 0); dev->iobase = io_base; /* allocate the subdevice structures */ error = alloc_subdevices(dev, 1); if (error < 0) return error; subdevice = dev->subdevices + 0; dev->read_subdev = subdevice; subdevice->type = COMEDI_SUBD_COUNTER; subdevice->subdev_flags = SDF_READABLE /* | SDF_COMMON */ ; subdevice->n_chan = board->cnt_channel_nbr; subdevice->maxdata = (1 << board->cnt_bits) - 1; subdevice->insn_read = cnt_rinsn; subdevice->insn_write = cnt_winsn; /* select 20MHz clock */ outb(3, dev->iobase + 248); /* reset all counters */ outb(0, dev->iobase); outb(0, dev->iobase + 0x20); outb(0, dev->iobase + 0x40); printk(KERN_INFO "comedi%d: " CNT_DRIVER_NAME " attached.\n", dev->minor); return 0; } /*-- detach -----------------------------------------------------------------*/ static int cnt_detach(struct comedi_device *dev) { if (devpriv && devpriv->pcidev) { if (dev->iobase) comedi_pci_disable(devpriv->pcidev); pci_dev_put(devpriv->pcidev); } printk(KERN_INFO "comedi%d: " CNT_DRIVER_NAME " remove\n", dev->minor); return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
MattCrystal/HTC-One---4.4-Linaro
drivers/video/backlight/jornada720_lcd.c
4895
3234
/* * * LCD driver for HP Jornada 700 series (710/720/728) * Copyright (C) 2006-2009 Kristoffer Ericson <kristoffer.ericson@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 or any later version as published by the Free Software Foundation. * */ #include <linux/device.h> #include <linux/fb.h> #include <linux/kernel.h> #include <linux/lcd.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <mach/jornada720.h> #include <mach/hardware.h> #include <video/s1d13xxxfb.h> #define LCD_MAX_CONTRAST 0xff #define LCD_DEF_CONTRAST 0x80 static int jornada_lcd_get_power(struct lcd_device *dev) { /* LDD2 in PPC = LCD POWER */ if (PPSR & PPC_LDD2) return FB_BLANK_UNBLANK; /* PW ON */ else return FB_BLANK_POWERDOWN; /* PW OFF */ } static int jornada_lcd_get_contrast(struct lcd_device *dev) { int ret; if (jornada_lcd_get_power(dev) != FB_BLANK_UNBLANK) return 0; jornada_ssp_start(); if (jornada_ssp_byte(GETCONTRAST) != TXDUMMY) { printk(KERN_ERR "lcd: get contrast failed\n"); jornada_ssp_end(); return -ETIMEDOUT; } else { ret = jornada_ssp_byte(TXDUMMY); jornada_ssp_end(); return ret; } } static int jornada_lcd_set_contrast(struct lcd_device *dev, int value) { int ret; jornada_ssp_start(); /* start by sending our set contrast cmd to mcu */ ret = jornada_ssp_byte(SETCONTRAST); /* push the new value */ if (jornada_ssp_byte(value) != TXDUMMY) { printk(KERN_ERR "lcd : set contrast failed\n"); jornada_ssp_end(); return -ETIMEDOUT; } /* if we get here we can assume everything went well */ jornada_ssp_end(); return 0; } static int jornada_lcd_set_power(struct lcd_device *dev, int power) { if (power != FB_BLANK_UNBLANK) { PPSR &= ~PPC_LDD2; PPDR |= PPC_LDD2; } else PPSR |= PPC_LDD2; return 0; } static struct lcd_ops jornada_lcd_props = { .get_contrast = jornada_lcd_get_contrast, .set_contrast = jornada_lcd_set_contrast, .get_power = jornada_lcd_get_power, .set_power = jornada_lcd_set_power, }; static int jornada_lcd_probe(struct platform_device *pdev) { struct lcd_device *lcd_device; int ret; lcd_device = lcd_device_register(S1D_DEVICENAME, &pdev->dev, NULL, &jornada_lcd_props); if (IS_ERR(lcd_device)) { ret = PTR_ERR(lcd_device); printk(KERN_ERR "lcd : failed to register device\n"); return ret; } platform_set_drvdata(pdev, lcd_device); /* lets set our default values */ jornada_lcd_set_contrast(lcd_device, LCD_DEF_CONTRAST); jornada_lcd_set_power(lcd_device, FB_BLANK_UNBLANK); /* give it some time to startup */ msleep(100); return 0; } static int jornada_lcd_remove(struct platform_device *pdev) { struct lcd_device *lcd_device = platform_get_drvdata(pdev); lcd_device_unregister(lcd_device); return 0; } static struct platform_driver jornada_lcd_driver = { .probe = jornada_lcd_probe, .remove = jornada_lcd_remove, .driver = { .name = "jornada_lcd", }, }; module_platform_driver(jornada_lcd_driver); MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>"); MODULE_DESCRIPTION("HP Jornada 710/720/728 LCD driver"); MODULE_LICENSE("GPL");
gpl-2.0
aditisstillalive/android_kernel_lge_hammerhead
drivers/scsi/libfc/fc_rport.c
5151
55268
/* * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ /* * RPORT GENERAL INFO * * This file contains all processing regarding fc_rports. It contains the * rport state machine and does all rport interaction with the transport class. * There should be no other places in libfc that interact directly with the * transport class in regards to adding and deleting rports. * * fc_rport's represent N_Port's within the fabric. */ /* * RPORT LOCKING * * The rport should never hold the rport mutex and then attempt to acquire * either the lport or disc mutexes. The rport's mutex is considered lesser * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for * more comments on the hierarchy. * * The locking strategy is similar to the lport's strategy. The lock protects * the rport's states and is held and released by the entry points to the rport * block. All _enter_* functions correspond to rport states and expect the rport * mutex to be locked before calling them. This means that rports only handle * one request or response at a time, since they're not critical for the I/O * path this potential over-use of the mutex is acceptable. */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/export.h> #include <asm/unaligned.h> #include <scsi/libfc.h> #include <scsi/fc_encode.h> #include "fc_libfc.h" static struct workqueue_struct *rport_event_queue; static void fc_rport_enter_flogi(struct fc_rport_priv *); static void fc_rport_enter_plogi(struct fc_rport_priv *); static void fc_rport_enter_prli(struct fc_rport_priv *); static void fc_rport_enter_rtv(struct fc_rport_priv *); static void fc_rport_enter_ready(struct fc_rport_priv *); static void fc_rport_enter_logo(struct fc_rport_priv *); static void fc_rport_enter_adisc(struct fc_rport_priv *); static void fc_rport_recv_plogi_req(struct fc_lport *, struct fc_frame *); static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *); static void fc_rport_timeout(struct work_struct *); static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_work(struct work_struct *); static const char *fc_rport_state_names[] = { [RPORT_ST_INIT] = "Init", [RPORT_ST_FLOGI] = "FLOGI", [RPORT_ST_PLOGI_WAIT] = "PLOGI_WAIT", [RPORT_ST_PLOGI] = "PLOGI", [RPORT_ST_PRLI] = "PRLI", [RPORT_ST_RTV] = "RTV", [RPORT_ST_READY] = "Ready", [RPORT_ST_ADISC] = "ADISC", [RPORT_ST_DELETE] = "Delete", }; /** * fc_rport_lookup() - Lookup a remote port by port_id * @lport: The local port to lookup the remote port on * @port_id: The remote port ID to look up * * The caller must hold either disc_mutex or rcu_read_lock(). */ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, u32 port_id) { struct fc_rport_priv *rdata; list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) if (rdata->ids.port_id == port_id) return rdata; return NULL; } /** * fc_rport_create() - Create a new remote port * @lport: The local port this remote port will be associated with * @ids: The identifiers for the new remote port * * The remote port will start in the INIT state. * * Locking note: must be called with the disc_mutex held. */ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) { struct fc_rport_priv *rdata; rdata = lport->tt.rport_lookup(lport, port_id); if (rdata) return rdata; rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL); if (!rdata) return NULL; rdata->ids.node_name = -1; rdata->ids.port_name = -1; rdata->ids.port_id = port_id; rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; kref_init(&rdata->kref); mutex_init(&rdata->rp_mutex); rdata->local_port = lport; rdata->rp_state = RPORT_ST_INIT; rdata->event = RPORT_EV_NONE; rdata->flags = FC_RP_FLAGS_REC_SUPPORTED; rdata->e_d_tov = lport->e_d_tov; rdata->r_a_tov = lport->r_a_tov; rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); INIT_WORK(&rdata->event_work, fc_rport_work); if (port_id != FC_FID_DIR_SERV) { rdata->lld_event_callback = lport->tt.rport_event_callback; list_add_rcu(&rdata->peers, &lport->disc.rports); } return rdata; } /** * fc_rport_destroy() - Free a remote port after last reference is released * @kref: The remote port's kref */ static void fc_rport_destroy(struct kref *kref) { struct fc_rport_priv *rdata; rdata = container_of(kref, struct fc_rport_priv, kref); kfree_rcu(rdata, rcu); } /** * fc_rport_state() - Return a string identifying the remote port's state * @rdata: The remote port */ static const char *fc_rport_state(struct fc_rport_priv *rdata) { const char *cp; cp = fc_rport_state_names[rdata->rp_state]; if (!cp) cp = "Unknown"; return cp; } /** * fc_set_rport_loss_tmo() - Set the remote port loss timeout * @rport: The remote port that gets a new timeout value * @timeout: The new timeout value (in seconds) */ void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) { if (timeout) rport->dev_loss_tmo = timeout; else rport->dev_loss_tmo = 1; } EXPORT_SYMBOL(fc_set_rport_loss_tmo); /** * fc_plogi_get_maxframe() - Get the maximum payload from the common service * parameters in a FLOGI frame * @flp: The FLOGI or PLOGI payload * @maxval: The maximum frame size upper limit; this may be less than what * is in the service parameters */ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval) { unsigned int mfs; /* * Get max payload from the common service parameters and the * class 3 receive data field size. */ mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK; if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval) maxval = mfs; mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs); if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval) maxval = mfs; return maxval; } /** * fc_rport_state_enter() - Change the state of a remote port * @rdata: The remote port whose state should change * @new: The new state * * Locking Note: Called with the rport lock held */ static void fc_rport_state_enter(struct fc_rport_priv *rdata, enum fc_rport_state new) { if (rdata->rp_state != new) rdata->retries = 0; rdata->rp_state = new; } /** * fc_rport_work() - Handler for remote port events in the rport_event_queue * @work: Handle to the remote port being dequeued */ static void fc_rport_work(struct work_struct *work) { u32 port_id; struct fc_rport_priv *rdata = container_of(work, struct fc_rport_priv, event_work); struct fc_rport_libfc_priv *rpriv; enum fc_rport_event event; struct fc_lport *lport = rdata->local_port; struct fc_rport_operations *rport_ops; struct fc_rport_identifiers ids; struct fc_rport *rport; struct fc4_prov *prov; u8 type; mutex_lock(&rdata->rp_mutex); event = rdata->event; rport_ops = rdata->ops; rport = rdata->rport; FC_RPORT_DBG(rdata, "work event %u\n", event); switch (event) { case RPORT_EV_READY: ids = rdata->ids; rdata->event = RPORT_EV_NONE; rdata->major_retries = 0; kref_get(&rdata->kref); mutex_unlock(&rdata->rp_mutex); if (!rport) rport = fc_remote_port_add(lport->host, 0, &ids); if (!rport) { FC_RPORT_DBG(rdata, "Failed to add the rport\n"); lport->tt.rport_logoff(rdata); kref_put(&rdata->kref, lport->tt.rport_destroy); return; } mutex_lock(&rdata->rp_mutex); if (rdata->rport) FC_RPORT_DBG(rdata, "rport already allocated\n"); rdata->rport = rport; rport->maxframe_size = rdata->maxframe_size; rport->supported_classes = rdata->supported_classes; rpriv = rport->dd_data; rpriv->local_port = lport; rpriv->rp_state = rdata->rp_state; rpriv->flags = rdata->flags; rpriv->e_d_tov = rdata->e_d_tov; rpriv->r_a_tov = rdata->r_a_tov; mutex_unlock(&rdata->rp_mutex); if (rport_ops && rport_ops->event_callback) { FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); } if (rdata->lld_event_callback) { FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); rdata->lld_event_callback(lport, rdata, event); } kref_put(&rdata->kref, lport->tt.rport_destroy); break; case RPORT_EV_FAILED: case RPORT_EV_LOGO: case RPORT_EV_STOP: if (rdata->prli_count) { mutex_lock(&fc_prov_mutex); for (type = 1; type < FC_FC4_PROV_SIZE; type++) { prov = fc_passive_prov[type]; if (prov && prov->prlo) prov->prlo(rdata); } mutex_unlock(&fc_prov_mutex); } port_id = rdata->ids.port_id; mutex_unlock(&rdata->rp_mutex); if (rport_ops && rport_ops->event_callback) { FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); } if (rdata->lld_event_callback) { FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); rdata->lld_event_callback(lport, rdata, event); } cancel_delayed_work_sync(&rdata->retry_work); /* * Reset any outstanding exchanges before freeing rport. */ lport->tt.exch_mgr_reset(lport, 0, port_id); lport->tt.exch_mgr_reset(lport, port_id, 0); if (rport) { rpriv = rport->dd_data; rpriv->rp_state = RPORT_ST_DELETE; mutex_lock(&rdata->rp_mutex); rdata->rport = NULL; mutex_unlock(&rdata->rp_mutex); fc_remote_port_delete(rport); } mutex_lock(&lport->disc.disc_mutex); mutex_lock(&rdata->rp_mutex); if (rdata->rp_state == RPORT_ST_DELETE) { if (port_id == FC_FID_DIR_SERV) { rdata->event = RPORT_EV_NONE; mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, lport->tt.rport_destroy); } else if ((rdata->flags & FC_RP_STARTED) && rdata->major_retries < lport->max_rport_retry_count) { rdata->major_retries++; rdata->event = RPORT_EV_NONE; FC_RPORT_DBG(rdata, "work restart\n"); fc_rport_enter_flogi(rdata); mutex_unlock(&rdata->rp_mutex); } else { FC_RPORT_DBG(rdata, "work delete\n"); list_del_rcu(&rdata->peers); mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, lport->tt.rport_destroy); } } else { /* * Re-open for events. Reissue READY event if ready. */ rdata->event = RPORT_EV_NONE; if (rdata->rp_state == RPORT_ST_READY) fc_rport_enter_ready(rdata); mutex_unlock(&rdata->rp_mutex); } mutex_unlock(&lport->disc.disc_mutex); break; default: mutex_unlock(&rdata->rp_mutex); break; } } /** * fc_rport_login() - Start the remote port login state machine * @rdata: The remote port to be logged in to * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. * * This indicates the intent to be logged into the remote port. * If it appears we are already logged in, ADISC is used to verify * the setup. */ static int fc_rport_login(struct fc_rport_priv *rdata) { mutex_lock(&rdata->rp_mutex); rdata->flags |= FC_RP_STARTED; switch (rdata->rp_state) { case RPORT_ST_READY: FC_RPORT_DBG(rdata, "ADISC port\n"); fc_rport_enter_adisc(rdata); break; case RPORT_ST_DELETE: FC_RPORT_DBG(rdata, "Restart deleted port\n"); break; default: FC_RPORT_DBG(rdata, "Login to port\n"); fc_rport_enter_flogi(rdata); break; } mutex_unlock(&rdata->rp_mutex); return 0; } /** * fc_rport_enter_delete() - Schedule a remote port to be deleted * @rdata: The remote port to be deleted * @event: The event to report as the reason for deletion * * Locking Note: Called with the rport lock held. * * Allow state change into DELETE only once. * * Call queue_work only if there's no event already pending. * Set the new event so that the old pending event will not occur. * Since we have the mutex, even if fc_rport_work() is already started, * it'll see the new event. */ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, enum fc_rport_event event) { if (rdata->rp_state == RPORT_ST_DELETE) return; FC_RPORT_DBG(rdata, "Delete port\n"); fc_rport_state_enter(rdata, RPORT_ST_DELETE); if (rdata->event == RPORT_EV_NONE) queue_work(rport_event_queue, &rdata->event_work); rdata->event = event; } /** * fc_rport_logoff() - Logoff and remove a remote port * @rdata: The remote port to be logged off of * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. */ static int fc_rport_logoff(struct fc_rport_priv *rdata) { mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Remove port\n"); rdata->flags &= ~FC_RP_STARTED; if (rdata->rp_state == RPORT_ST_DELETE) { FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); goto out; } fc_rport_enter_logo(rdata); /* * Change the state to Delete so that we discard * the response. */ fc_rport_enter_delete(rdata, RPORT_EV_STOP); out: mutex_unlock(&rdata->rp_mutex); return 0; } /** * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state * @rdata: The remote port that is ready * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_ready(struct fc_rport_priv *rdata) { fc_rport_state_enter(rdata, RPORT_ST_READY); FC_RPORT_DBG(rdata, "Port is Ready\n"); if (rdata->event == RPORT_EV_NONE) queue_work(rport_event_queue, &rdata->event_work); rdata->event = RPORT_EV_READY; } /** * fc_rport_timeout() - Handler for the retry_work timer * @work: Handle to the remote port that has timed out * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. */ static void fc_rport_timeout(struct work_struct *work) { struct fc_rport_priv *rdata = container_of(work, struct fc_rport_priv, retry_work.work); mutex_lock(&rdata->rp_mutex); switch (rdata->rp_state) { case RPORT_ST_FLOGI: fc_rport_enter_flogi(rdata); break; case RPORT_ST_PLOGI: fc_rport_enter_plogi(rdata); break; case RPORT_ST_PRLI: fc_rport_enter_prli(rdata); break; case RPORT_ST_RTV: fc_rport_enter_rtv(rdata); break; case RPORT_ST_ADISC: fc_rport_enter_adisc(rdata); break; case RPORT_ST_PLOGI_WAIT: case RPORT_ST_READY: case RPORT_ST_INIT: case RPORT_ST_DELETE: break; } mutex_unlock(&rdata->rp_mutex); } /** * fc_rport_error() - Error handler, called once retries have been exhausted * @rdata: The remote port the error is happened on * @fp: The error code encapsulated in a frame pointer * * Locking Note: The rport lock is expected to be held before * calling this routine */ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) { FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n", IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_rport_state(rdata), rdata->retries); switch (rdata->rp_state) { case RPORT_ST_FLOGI: case RPORT_ST_PLOGI: rdata->flags &= ~FC_RP_STARTED; fc_rport_enter_delete(rdata, RPORT_EV_FAILED); break; case RPORT_ST_RTV: fc_rport_enter_ready(rdata); break; case RPORT_ST_PRLI: case RPORT_ST_ADISC: fc_rport_enter_logo(rdata); break; case RPORT_ST_PLOGI_WAIT: case RPORT_ST_DELETE: case RPORT_ST_READY: case RPORT_ST_INIT: break; } } /** * fc_rport_error_retry() - Handler for remote port state retries * @rdata: The remote port whose state is to be retried * @fp: The error code encapsulated in a frame pointer * * If the error was an exchange timeout retry immediately, * otherwise wait for E_D_TOV. * * Locking Note: The rport lock is expected to be held before * calling this routine */ static void fc_rport_error_retry(struct fc_rport_priv *rdata, struct fc_frame *fp) { unsigned long delay = FC_DEF_E_D_TOV; /* make sure this isn't an FC_EX_CLOSED error, never retry those */ if (PTR_ERR(fp) == -FC_EX_CLOSED) goto out; if (rdata->retries < rdata->local_port->max_rport_retry_count) { FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n", PTR_ERR(fp), fc_rport_state(rdata)); rdata->retries++; /* no additional delay on exchange timeouts */ if (PTR_ERR(fp) == -FC_EX_TIMEOUT) delay = 0; schedule_delayed_work(&rdata->retry_work, delay); return; } out: fc_rport_error(rdata, fp); } /** * fc_rport_login_complete() - Handle parameters and completion of p-mp login. * @rdata: The remote port which we logged into or which logged into us. * @fp: The FLOGI or PLOGI request or response frame * * Returns non-zero error if a problem is detected with the frame. * Does not free the frame. * * This is only used in point-to-multipoint mode for FIP currently. */ static int fc_rport_login_complete(struct fc_rport_priv *rdata, struct fc_frame *fp) { struct fc_lport *lport = rdata->local_port; struct fc_els_flogi *flogi; unsigned int e_d_tov; u16 csp_flags; flogi = fc_frame_payload_get(fp, sizeof(*flogi)); if (!flogi) return -EINVAL; csp_flags = ntohs(flogi->fl_csp.sp_features); if (fc_frame_payload_op(fp) == ELS_FLOGI) { if (csp_flags & FC_SP_FT_FPORT) { FC_RPORT_DBG(rdata, "Fabric bit set in FLOGI\n"); return -EINVAL; } } else { /* * E_D_TOV is not valid on an incoming FLOGI request. */ e_d_tov = ntohl(flogi->fl_csp.sp_e_d_tov); if (csp_flags & FC_SP_FT_EDTR) e_d_tov /= 1000000; if (e_d_tov > rdata->e_d_tov) rdata->e_d_tov = e_d_tov; } rdata->maxframe_size = fc_plogi_get_maxframe(flogi, lport->mfs); return 0; } /** * fc_rport_flogi_resp() - Handle response to FLOGI request for p-mp mode * @sp: The sequence that the FLOGI was on * @fp: The FLOGI response frame * @rp_arg: The remote port that received the FLOGI response */ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *rp_arg) { struct fc_rport_priv *rdata = rp_arg; struct fc_lport *lport = rdata->local_port; struct fc_els_flogi *flogi; unsigned int r_a_tov; FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) goto put; mutex_lock(&rdata->rp_mutex); if (rdata->rp_state != RPORT_ST_FLOGI) { FC_RPORT_DBG(rdata, "Received a FLOGI response, but in state " "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_rport_error(rdata, fp); goto err; } if (fc_frame_payload_op(fp) != ELS_LS_ACC) goto bad; if (fc_rport_login_complete(rdata, fp)) goto bad; flogi = fc_frame_payload_get(fp, sizeof(*flogi)); if (!flogi) goto bad; r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov); if (r_a_tov > rdata->r_a_tov) rdata->r_a_tov = r_a_tov; if (rdata->ids.port_name < lport->wwpn) fc_rport_enter_plogi(rdata); else fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); put: kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); return; bad: FC_RPORT_DBG(rdata, "Bad FLOGI response\n"); fc_rport_error_retry(rdata, fp); goto out; } /** * fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp * @rdata: The remote port to send a FLOGI to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; if (!lport->point_to_multipoint) return fc_rport_enter_plogi(rdata); FC_RPORT_DBG(rdata, "Entered FLOGI state from %s state\n", fc_rport_state(rdata)); fc_rport_state_enter(rdata, RPORT_ST_FLOGI); fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) return fc_rport_error_retry(rdata, fp); if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI, fc_rport_flogi_resp, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode * @lport: The local port that received the PLOGI request * @rx_fp: The PLOGI request frame */ static void fc_rport_recv_flogi_req(struct fc_lport *lport, struct fc_frame *rx_fp) { struct fc_disc *disc; struct fc_els_flogi *flp; struct fc_rport_priv *rdata; struct fc_frame *fp = rx_fp; struct fc_seq_els_data rjt_data; u32 sid; sid = fc_frame_sid(fp); FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n"); disc = &lport->disc; mutex_lock(&disc->disc_mutex); if (!lport->point_to_multipoint) { rjt_data.reason = ELS_RJT_UNSUP; rjt_data.explan = ELS_EXPL_NONE; goto reject; } flp = fc_frame_payload_get(fp, sizeof(*flp)); if (!flp) { rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_INV_LEN; goto reject; } rdata = lport->tt.rport_lookup(lport, sid); if (!rdata) { rjt_data.reason = ELS_RJT_FIP; rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; goto reject; } mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received FLOGI in %s state\n", fc_rport_state(rdata)); switch (rdata->rp_state) { case RPORT_ST_INIT: /* * If received the FLOGI request on RPORT which is INIT state * (means not transition to FLOGI either fc_rport timeout * function didn;t trigger or this end hasn;t received * beacon yet from other end. In that case only, allow RPORT * state machine to continue, otherwise fall through which * causes the code to send reject response. * NOTE; Not checking for FIP->state such as VNMP_UP or * VNMP_CLAIM because if FIP state is not one of those, * RPORT wouldn;t have created and 'rport_lookup' would have * failed anyway in that case. */ if (lport->point_to_multipoint) break; case RPORT_ST_DELETE: mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_FIP; rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; goto reject; case RPORT_ST_FLOGI: case RPORT_ST_PLOGI_WAIT: case RPORT_ST_PLOGI: break; case RPORT_ST_PRLI: case RPORT_ST_RTV: case RPORT_ST_READY: case RPORT_ST_ADISC: /* * Set the remote port to be deleted and to then restart. * This queues work to be sure exchanges are reset. */ fc_rport_enter_delete(rdata, RPORT_EV_LOGO); mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_BUSY; rjt_data.explan = ELS_EXPL_NONE; goto reject; } if (fc_rport_login_complete(rdata, fp)) { mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; goto reject; } fp = fc_frame_alloc(lport, sizeof(*flp)); if (!fp) goto out; fc_flogi_fill(lport, fp); flp = fc_frame_payload_get(fp, sizeof(*flp)); flp->fl_cmd = ELS_LS_ACC; fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); if (rdata->ids.port_name < lport->wwpn) fc_rport_enter_plogi(rdata); else fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); out: mutex_unlock(&rdata->rp_mutex); mutex_unlock(&disc->disc_mutex); fc_frame_free(rx_fp); return; reject: mutex_unlock(&disc->disc_mutex); lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); fc_frame_free(rx_fp); } /** * fc_rport_plogi_resp() - Handler for ELS PLOGI responses * @sp: The sequence the PLOGI is on * @fp: The PLOGI response frame * @rdata_arg: The remote port that sent the PLOGI response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; struct fc_lport *lport = rdata->local_port; struct fc_els_flogi *plp = NULL; u16 csp_seq; u16 cssp_seq; u8 op; mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp)); if (rdata->rp_state != RPORT_ST_PLOGI) { FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state " "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_rport_error_retry(rdata, fp); goto err; } op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC && (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) { rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn); rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn); /* save plogi response sp_features for further reference */ rdata->sp_features = ntohs(plp->fl_csp.sp_features); if (lport->point_to_multipoint) fc_rport_login_complete(rdata, fp); csp_seq = ntohs(plp->fl_csp.sp_tot_seq); cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq); if (cssp_seq < csp_seq) csp_seq = cssp_seq; rdata->max_seq = csp_seq; rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs); fc_rport_enter_prli(rdata); } else fc_rport_error_retry(rdata, fp); out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** * fc_rport_enter_plogi() - Send Port Login (PLOGI) request * @rdata: The remote port to send a PLOGI to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n", fc_rport_state(rdata)); fc_rport_state_enter(rdata, RPORT_ST_PLOGI); rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) { FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__); fc_rport_error_retry(rdata, fp); return; } rdata->e_d_tov = lport->e_d_tov; if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, fc_rport_plogi_resp, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** * fc_rport_prli_resp() - Process Login (PRLI) response handler * @sp: The sequence the PRLI response was on * @fp: The PRLI response frame * @rdata_arg: The remote port that sent the PRLI response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; struct { struct fc_els_prli prli; struct fc_els_spp spp; } *pp; struct fc_els_spp temp_spp; struct fc4_prov *prov; u32 roles = FC_RPORT_ROLE_UNKNOWN; u32 fcp_parm = 0; u8 op; u8 resp_code = 0; mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp)); if (rdata->rp_state != RPORT_ST_PRLI) { FC_RPORT_DBG(rdata, "Received a PRLI response, but in state " "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_rport_error_retry(rdata, fp); goto err; } /* reinitialize remote port roles */ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) { pp = fc_frame_payload_get(fp, sizeof(*pp)); if (!pp) goto out; resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK); FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n", pp->spp.spp_flags); rdata->spp_type = pp->spp.spp_type; if (resp_code != FC_SPP_RESP_ACK) { if (resp_code == FC_SPP_RESP_CONF) fc_rport_error(rdata, fp); else fc_rport_error_retry(rdata, fp); goto out; } if (pp->prli.prli_spp_len < sizeof(pp->spp)) goto out; fcp_parm = ntohl(pp->spp.spp_params); if (fcp_parm & FCP_SPPF_RETRY) rdata->flags |= FC_RP_FLAGS_RETRY; if (fcp_parm & FCP_SPPF_CONF_COMPL) rdata->flags |= FC_RP_FLAGS_CONF_REQ; prov = fc_passive_prov[FC_TYPE_FCP]; if (prov) { memset(&temp_spp, 0, sizeof(temp_spp)); prov->prli(rdata, pp->prli.prli_spp_len, &pp->spp, &temp_spp); } rdata->supported_classes = FC_COS_CLASS3; if (fcp_parm & FCP_SPPF_INIT_FCN) roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (fcp_parm & FCP_SPPF_TARG_FCN) roles |= FC_RPORT_ROLE_FCP_TARGET; rdata->ids.roles = roles; fc_rport_enter_rtv(rdata); } else { FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n"); fc_rport_error_retry(rdata, fp); } out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** * fc_rport_enter_prli() - Send Process Login (PRLI) request * @rdata: The remote port to send the PRLI request to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; struct { struct fc_els_prli prli; struct fc_els_spp spp; } *pp; struct fc_frame *fp; struct fc4_prov *prov; /* * If the rport is one of the well known addresses * we skip PRLI and RTV and go straight to READY. */ if (rdata->ids.port_id >= FC_FID_DOM_MGR) { fc_rport_enter_ready(rdata); return; } FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n", fc_rport_state(rdata)); fc_rport_state_enter(rdata, RPORT_ST_PRLI); fp = fc_frame_alloc(lport, sizeof(*pp)); if (!fp) { fc_rport_error_retry(rdata, fp); return; } fc_prli_fill(lport, fp); prov = fc_passive_prov[FC_TYPE_FCP]; if (prov) { pp = fc_frame_payload_get(fp, sizeof(*pp)); prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp); } fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id, fc_host_port_id(lport->host), FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp, NULL, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses * @sp: The sequence the RTV was on * @fp: The RTV response frame * @rdata_arg: The remote port that sent the RTV response * * Many targets don't seem to support this. * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; u8 op; mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp)); if (rdata->rp_state != RPORT_ST_RTV) { FC_RPORT_DBG(rdata, "Received a RTV response, but in state " "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_rport_error(rdata, fp); goto err; } op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) { struct fc_els_rtv_acc *rtv; u32 toq; u32 tov; rtv = fc_frame_payload_get(fp, sizeof(*rtv)); if (rtv) { toq = ntohl(rtv->rtv_toq); tov = ntohl(rtv->rtv_r_a_tov); if (tov == 0) tov = 1; rdata->r_a_tov = tov; tov = ntohl(rtv->rtv_e_d_tov); if (toq & FC_ELS_RTV_EDRES) tov /= 1000000; if (tov == 0) tov = 1; rdata->e_d_tov = tov; } } fc_rport_enter_ready(rdata); out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request * @rdata: The remote port to send the RTV request to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) { struct fc_frame *fp; struct fc_lport *lport = rdata->local_port; FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n", fc_rport_state(rdata)); fc_rport_state_enter(rdata, RPORT_ST_RTV); fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); if (!fp) { fc_rport_error_retry(rdata, fp); return; } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, fc_rport_rtv_resp, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** * fc_rport_logo_resp() - Handler for logout (LOGO) responses * @sp: The sequence the LOGO was on * @fp: The LOGO response frame * @lport_arg: The local port */ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, void *lport_arg) { struct fc_lport *lport = lport_arg; FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did, "Received a LOGO %s\n", fc_els_resp_type(fp)); if (IS_ERR(fp)) return; fc_frame_free(fp); } /** * fc_rport_enter_logo() - Send a logout (LOGO) request * @rdata: The remote port to send the LOGO request to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n", fc_rport_state(rdata)); fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); if (!fp) return; (void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, fc_rport_logo_resp, lport, 0); } /** * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses * @sp: The sequence the ADISC response was on * @fp: The ADISC response frame * @rdata_arg: The remote port that sent the ADISC response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; struct fc_els_adisc *adisc; u8 op; mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received a ADISC response\n"); if (rdata->rp_state != RPORT_ST_ADISC) { FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_rport_error(rdata, fp); goto err; } /* * If address verification failed. Consider us logged out of the rport. * Since the rport is still in discovery, we want to be * logged in, so go to PLOGI state. Otherwise, go back to READY. */ op = fc_frame_payload_op(fp); adisc = fc_frame_payload_get(fp, sizeof(*adisc)); if (op != ELS_LS_ACC || !adisc || ntoh24(adisc->adisc_port_id) != rdata->ids.port_id || get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name || get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) { FC_RPORT_DBG(rdata, "ADISC error or mismatch\n"); fc_rport_enter_flogi(rdata); } else { FC_RPORT_DBG(rdata, "ADISC OK\n"); fc_rport_enter_ready(rdata); } out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** * fc_rport_enter_adisc() - Send Address Discover (ADISC) request * @rdata: The remote port to send the ADISC request to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; FC_RPORT_DBG(rdata, "sending ADISC from %s state\n", fc_rport_state(rdata)); fc_rport_state_enter(rdata, RPORT_ST_ADISC); fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc)); if (!fp) { fc_rport_error_retry(rdata, fp); return; } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, fc_rport_adisc_resp, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests * @rdata: The remote port that sent the ADISC request * @in_fp: The ADISC request frame * * Locking Note: Called with the lport and rport locks held. */ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, struct fc_frame *in_fp) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; struct fc_els_adisc *adisc; struct fc_seq_els_data rjt_data; FC_RPORT_DBG(rdata, "Received ADISC request\n"); adisc = fc_frame_payload_get(in_fp, sizeof(*adisc)); if (!adisc) { rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); goto drop; } fp = fc_frame_alloc(lport, sizeof(*adisc)); if (!fp) goto drop; fc_adisc_fill(lport, fp); adisc = fc_frame_payload_get(fp, sizeof(*adisc)); adisc->adisc_cmd = ELS_LS_ACC; fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); drop: fc_frame_free(in_fp); } /** * fc_rport_recv_rls_req() - Handle received Read Link Status request * @rdata: The remote port that sent the RLS request * @rx_fp: The PRLI request frame * * Locking Note: The rport lock is expected to be held before calling * this function. */ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; struct fc_els_rls *rls; struct fc_els_rls_resp *rsp; struct fc_els_lesb *lesb; struct fc_seq_els_data rjt_data; struct fc_host_statistics *hst; FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n", fc_rport_state(rdata)); rls = fc_frame_payload_get(rx_fp, sizeof(*rls)); if (!rls) { rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; goto out_rjt; } fp = fc_frame_alloc(lport, sizeof(*rsp)); if (!fp) { rjt_data.reason = ELS_RJT_UNAB; rjt_data.explan = ELS_EXPL_INSUF_RES; goto out_rjt; } rsp = fc_frame_payload_get(fp, sizeof(*rsp)); memset(rsp, 0, sizeof(*rsp)); rsp->rls_cmd = ELS_LS_ACC; lesb = &rsp->rls_lesb; if (lport->tt.get_lesb) { /* get LESB from LLD if it supports it */ lport->tt.get_lesb(lport, lesb); } else { fc_get_host_stats(lport->host); hst = &lport->host_stats; lesb->lesb_link_fail = htonl(hst->link_failure_count); lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count); lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count); lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count); lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count); lesb->lesb_inv_crc = htonl(hst->invalid_crc_count); } fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); goto out; out_rjt: lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); out: fc_frame_free(rx_fp); } /** * fc_rport_recv_els_req() - Handler for validated ELS requests * @lport: The local port that received the ELS request * @fp: The ELS request frame * * Handle incoming ELS requests that require port login. * The ELS opcode has already been validated by the caller. * * Locking Note: Called with the lport lock held. */ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_rport_priv *rdata; struct fc_seq_els_data els_data; mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp)); if (!rdata) { mutex_unlock(&lport->disc.disc_mutex); goto reject; } mutex_lock(&rdata->rp_mutex); mutex_unlock(&lport->disc.disc_mutex); switch (rdata->rp_state) { case RPORT_ST_PRLI: case RPORT_ST_RTV: case RPORT_ST_READY: case RPORT_ST_ADISC: break; default: mutex_unlock(&rdata->rp_mutex); goto reject; } switch (fc_frame_payload_op(fp)) { case ELS_PRLI: fc_rport_recv_prli_req(rdata, fp); break; case ELS_PRLO: fc_rport_recv_prlo_req(rdata, fp); break; case ELS_ADISC: fc_rport_recv_adisc_req(rdata, fp); break; case ELS_RRQ: lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL); fc_frame_free(fp); break; case ELS_REC: lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL); fc_frame_free(fp); break; case ELS_RLS: fc_rport_recv_rls_req(rdata, fp); break; default: fc_frame_free(fp); /* can't happen */ break; } mutex_unlock(&rdata->rp_mutex); return; reject: els_data.reason = ELS_RJT_UNAB; els_data.explan = ELS_EXPL_PLOGI_REQD; lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); fc_frame_free(fp); } /** * fc_rport_recv_req() - Handler for requests * @lport: The local port that received the request * @fp: The request frame * * Locking Note: Called with the lport lock held. */ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_seq_els_data els_data; /* * Handle FLOGI, PLOGI and LOGO requests separately, since they * don't require prior login. * Check for unsupported opcodes first and reject them. * For some ops, it would be incorrect to reject with "PLOGI required". */ switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: fc_rport_recv_flogi_req(lport, fp); break; case ELS_PLOGI: fc_rport_recv_plogi_req(lport, fp); break; case ELS_LOGO: fc_rport_recv_logo_req(lport, fp); break; case ELS_PRLI: case ELS_PRLO: case ELS_ADISC: case ELS_RRQ: case ELS_REC: case ELS_RLS: fc_rport_recv_els_req(lport, fp); break; default: els_data.reason = ELS_RJT_UNSUP; els_data.explan = ELS_EXPL_NONE; lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); fc_frame_free(fp); break; } } /** * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests * @lport: The local port that received the PLOGI request * @rx_fp: The PLOGI request frame * * Locking Note: The rport lock is held before calling this function. */ static void fc_rport_recv_plogi_req(struct fc_lport *lport, struct fc_frame *rx_fp) { struct fc_disc *disc; struct fc_rport_priv *rdata; struct fc_frame *fp = rx_fp; struct fc_els_flogi *pl; struct fc_seq_els_data rjt_data; u32 sid; sid = fc_frame_sid(fp); FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n"); pl = fc_frame_payload_get(fp, sizeof(*pl)); if (!pl) { FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n"); rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; goto reject; } disc = &lport->disc; mutex_lock(&disc->disc_mutex); rdata = lport->tt.rport_create(lport, sid); if (!rdata) { mutex_unlock(&disc->disc_mutex); rjt_data.reason = ELS_RJT_UNAB; rjt_data.explan = ELS_EXPL_INSUF_RES; goto reject; } mutex_lock(&rdata->rp_mutex); mutex_unlock(&disc->disc_mutex); rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn); rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn); /* * If the rport was just created, possibly due to the incoming PLOGI, * set the state appropriately and accept the PLOGI. * * If we had also sent a PLOGI, and if the received PLOGI is from a * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason * "command already in progress". * * XXX TBD: If the session was ready before, the PLOGI should result in * all outstanding exchanges being reset. */ switch (rdata->rp_state) { case RPORT_ST_INIT: FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n"); break; case RPORT_ST_PLOGI_WAIT: FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI_WAIT state\n"); break; case RPORT_ST_PLOGI: FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n"); if (rdata->ids.port_name < lport->wwpn) { mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_INPROG; rjt_data.explan = ELS_EXPL_NONE; goto reject; } break; case RPORT_ST_PRLI: case RPORT_ST_RTV: case RPORT_ST_READY: case RPORT_ST_ADISC: FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " "- ignored for now\n", rdata->rp_state); /* XXX TBD - should reset */ break; case RPORT_ST_FLOGI: case RPORT_ST_DELETE: FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", fc_rport_state(rdata)); mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_BUSY; rjt_data.explan = ELS_EXPL_NONE; goto reject; } /* * Get session payload size from incoming PLOGI. */ rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs); /* * Send LS_ACC. If this fails, the originator should retry. */ fp = fc_frame_alloc(lport, sizeof(*pl)); if (!fp) goto out; fc_plogi_fill(lport, fp, ELS_LS_ACC); fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); fc_rport_enter_prli(rdata); out: mutex_unlock(&rdata->rp_mutex); fc_frame_free(rx_fp); return; reject: lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); fc_frame_free(fp); } /** * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests * @rdata: The remote port that sent the PRLI request * @rx_fp: The PRLI request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; struct { struct fc_els_prli prli; struct fc_els_spp spp; } *pp; struct fc_els_spp *rspp; /* request service param page */ struct fc_els_spp *spp; /* response spp */ unsigned int len; unsigned int plen; enum fc_els_spp_resp resp; enum fc_els_spp_resp passive; struct fc_seq_els_data rjt_data; struct fc4_prov *prov; FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", fc_rport_state(rdata)); len = fr_len(rx_fp) - sizeof(struct fc_frame_header); pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); if (!pp) goto reject_len; plen = ntohs(pp->prli.prli_len); if ((plen % 4) != 0 || plen > len || plen < 16) goto reject_len; if (plen < len) len = plen; plen = pp->prli.prli_spp_len; if ((plen % 4) != 0 || plen < sizeof(*spp) || plen > len || len < sizeof(*pp) || plen < 12) goto reject_len; rspp = &pp->spp; fp = fc_frame_alloc(lport, len); if (!fp) { rjt_data.reason = ELS_RJT_UNAB; rjt_data.explan = ELS_EXPL_INSUF_RES; goto reject; } pp = fc_frame_payload_get(fp, len); WARN_ON(!pp); memset(pp, 0, len); pp->prli.prli_cmd = ELS_LS_ACC; pp->prli.prli_spp_len = plen; pp->prli.prli_len = htons(len); len -= sizeof(struct fc_els_prli); /* * Go through all the service parameter pages and build * response. If plen indicates longer SPP than standard, * use that. The entire response has been pre-cleared above. */ spp = &pp->spp; mutex_lock(&fc_prov_mutex); while (len >= plen) { rdata->spp_type = rspp->spp_type; spp->spp_type = rspp->spp_type; spp->spp_type_ext = rspp->spp_type_ext; resp = 0; if (rspp->spp_type < FC_FC4_PROV_SIZE) { prov = fc_active_prov[rspp->spp_type]; if (prov) resp = prov->prli(rdata, plen, rspp, spp); prov = fc_passive_prov[rspp->spp_type]; if (prov) { passive = prov->prli(rdata, plen, rspp, spp); if (!resp || passive == FC_SPP_RESP_ACK) resp = passive; } } if (!resp) { if (spp->spp_flags & FC_SPP_EST_IMG_PAIR) resp |= FC_SPP_RESP_CONF; else resp |= FC_SPP_RESP_INVL; } spp->spp_flags |= resp; len -= plen; rspp = (struct fc_els_spp *)((char *)rspp + plen); spp = (struct fc_els_spp *)((char *)spp + plen); } mutex_unlock(&fc_prov_mutex); /* * Send LS_ACC. If this fails, the originator should retry. */ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); switch (rdata->rp_state) { case RPORT_ST_PRLI: fc_rport_enter_ready(rdata); break; default: break; } goto drop; reject_len: rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; reject: lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); drop: fc_frame_free(rx_fp); } /** * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests * @rdata: The remote port that sent the PRLO request * @rx_fp: The PRLO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; struct { struct fc_els_prlo prlo; struct fc_els_spp spp; } *pp; struct fc_els_spp *rspp; /* request service param page */ struct fc_els_spp *spp; /* response spp */ unsigned int len; unsigned int plen; struct fc_seq_els_data rjt_data; FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", fc_rport_state(rdata)); len = fr_len(rx_fp) - sizeof(struct fc_frame_header); pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); if (!pp) goto reject_len; plen = ntohs(pp->prlo.prlo_len); if (plen != 20) goto reject_len; if (plen < len) len = plen; rspp = &pp->spp; fp = fc_frame_alloc(lport, len); if (!fp) { rjt_data.reason = ELS_RJT_UNAB; rjt_data.explan = ELS_EXPL_INSUF_RES; goto reject; } pp = fc_frame_payload_get(fp, len); WARN_ON(!pp); memset(pp, 0, len); pp->prlo.prlo_cmd = ELS_LS_ACC; pp->prlo.prlo_obs = 0x10; pp->prlo.prlo_len = htons(len); spp = &pp->spp; spp->spp_type = rspp->spp_type; spp->spp_type_ext = rspp->spp_type_ext; spp->spp_flags = FC_SPP_RESP_ACK; fc_rport_enter_delete(rdata, RPORT_EV_LOGO); fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); goto drop; reject_len: rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; reject: lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); drop: fc_frame_free(rx_fp); } /** * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests * @lport: The local port that received the LOGO request * @fp: The LOGO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_rport_priv *rdata; u32 sid; lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); sid = fc_frame_sid(fp); mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, sid); if (rdata) { mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", fc_rport_state(rdata)); fc_rport_enter_delete(rdata, RPORT_EV_LOGO); mutex_unlock(&rdata->rp_mutex); } else FC_RPORT_ID_DBG(lport, sid, "Received LOGO from non-logged-in port\n"); mutex_unlock(&lport->disc.disc_mutex); fc_frame_free(fp); } /** * fc_rport_flush_queue() - Flush the rport_event_queue */ static void fc_rport_flush_queue(void) { flush_workqueue(rport_event_queue); } /** * fc_rport_init() - Initialize the remote port layer for a local port * @lport: The local port to initialize the remote port layer for */ int fc_rport_init(struct fc_lport *lport) { if (!lport->tt.rport_lookup) lport->tt.rport_lookup = fc_rport_lookup; if (!lport->tt.rport_create) lport->tt.rport_create = fc_rport_create; if (!lport->tt.rport_login) lport->tt.rport_login = fc_rport_login; if (!lport->tt.rport_logoff) lport->tt.rport_logoff = fc_rport_logoff; if (!lport->tt.rport_recv_req) lport->tt.rport_recv_req = fc_rport_recv_req; if (!lport->tt.rport_flush_queue) lport->tt.rport_flush_queue = fc_rport_flush_queue; if (!lport->tt.rport_destroy) lport->tt.rport_destroy = fc_rport_destroy; return 0; } EXPORT_SYMBOL(fc_rport_init); /** * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator. * @rdata: remote port private * @spp_len: service parameter page length * @rspp: received service parameter page * @spp: response service parameter page * * Returns the value for the response code to be placed in spp_flags; * Returns 0 if not an initiator. */ static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len, const struct fc_els_spp *rspp, struct fc_els_spp *spp) { struct fc_lport *lport = rdata->local_port; u32 fcp_parm; fcp_parm = ntohl(rspp->spp_params); rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; if (fcp_parm & FCP_SPPF_INIT_FCN) rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (fcp_parm & FCP_SPPF_TARG_FCN) rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET; if (fcp_parm & FCP_SPPF_RETRY) rdata->flags |= FC_RP_FLAGS_RETRY; rdata->supported_classes = FC_COS_CLASS3; if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR)) return 0; spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR; /* * OR in our service parameters with other providers (target), if any. */ fcp_parm = ntohl(spp->spp_params); spp->spp_params = htonl(fcp_parm | lport->service_params); return FC_SPP_RESP_ACK; } /* * FC-4 provider ops for FCP initiator. */ struct fc4_prov fc_rport_fcp_init = { .prli = fc_rport_fcp_prli, }; /** * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0 * @rdata: remote port private * @spp_len: service parameter page length * @rspp: received service parameter page * @spp: response service parameter page */ static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len, const struct fc_els_spp *rspp, struct fc_els_spp *spp) { if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) return FC_SPP_RESP_INVL; return FC_SPP_RESP_ACK; } /* * FC-4 provider ops for type 0 service parameters. * * This handles the special case of type 0 which is always successful * but doesn't do anything otherwise. */ struct fc4_prov fc_rport_t0_prov = { .prli = fc_rport_t0_prli, }; /** * fc_setup_rport() - Initialize the rport_event_queue */ int fc_setup_rport(void) { rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); if (!rport_event_queue) return -ENOMEM; return 0; } /** * fc_destroy_rport() - Destroy the rport_event_queue */ void fc_destroy_rport(void) { destroy_workqueue(rport_event_queue); } /** * fc_rport_terminate_io() - Stop all outstanding I/O on a remote port * @rport: The remote port whose I/O should be terminated */ void fc_rport_terminate_io(struct fc_rport *rport) { struct fc_rport_libfc_priv *rpriv = rport->dd_data; struct fc_lport *lport = rpriv->local_port; lport->tt.exch_mgr_reset(lport, 0, rport->port_id); lport->tt.exch_mgr_reset(lport, rport->port_id, 0); } EXPORT_SYMBOL(fc_rport_terminate_io);
gpl-2.0
thermatk/N8000
arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
8735
3569
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * This module provides system/board/application information obtained * by the bootloader. */ #include <linux/module.h> #include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx-spinlock.h> #include <asm/octeon/cvmx-sysinfo.h> /** * This structure defines the private state maintained by sysinfo module. * */ static struct { struct cvmx_sysinfo sysinfo; /* system information */ cvmx_spinlock_t lock; /* mutex spinlock */ } state = { .lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER }; /* * Global variables that define the min/max of the memory region set * up for 32 bit userspace access. */ uint64_t linux_mem32_min; uint64_t linux_mem32_max; uint64_t linux_mem32_wired; uint64_t linux_mem32_offset; /** * This function returns the application information as obtained * by the bootloader. This provides the core mask of the cores * running the same application image, as well as the physical * memory regions available to the core. * * Returns Pointer to the boot information structure * */ struct cvmx_sysinfo *cvmx_sysinfo_get(void) { return &(state.sysinfo); } EXPORT_SYMBOL(cvmx_sysinfo_get); /** * This function is used in non-simple executive environments (such as * Linux kernel, u-boot, etc.) to configure the minimal fields that * are required to use simple executive files directly. * * Locking (if required) must be handled outside of this * function * * @phy_mem_desc_ptr: * Pointer to global physical memory descriptor * (bootmem descriptor) @board_type: Octeon board * type enumeration * * @board_rev_major: * Board major revision * @board_rev_minor: * Board minor revision * @cpu_clock_hz: * CPU clock freqency in hertz * * Returns 0: Failure * 1: success */ int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr, uint16_t board_type, uint8_t board_rev_major, uint8_t board_rev_minor, uint32_t cpu_clock_hz) { /* The sysinfo structure was already initialized */ if (state.sysinfo.board_type) return 0; memset(&(state.sysinfo), 0x0, sizeof(state.sysinfo)); state.sysinfo.phy_mem_desc_ptr = phy_mem_desc_ptr; state.sysinfo.board_type = board_type; state.sysinfo.board_rev_major = board_rev_major; state.sysinfo.board_rev_minor = board_rev_minor; state.sysinfo.cpu_clock_hz = cpu_clock_hz; return 1; }
gpl-2.0
hagar006/android_kernel_sony_apq8064
drivers/scsi/libsas/sas_host_smp.c
9503
9710
/* * Serial Attached SCSI (SAS) Expander discovery and configuration * * Copyright (C) 2007 James E.J. Bottomley * <James.Bottomley@HansenPartnership.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 only. */ #include <linux/scatterlist.h> #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/export.h> #include "sas_internal.h" #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "../scsi_sas_internal.h" static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data, u8 phy_id) { struct sas_phy *phy; struct sas_rphy *rphy; if (phy_id >= sas_ha->num_phys) { resp_data[2] = SMP_RESP_NO_PHY; return; } resp_data[2] = SMP_RESP_FUNC_ACC; phy = sas_ha->sas_phy[phy_id]->phy; resp_data[9] = phy_id; resp_data[13] = phy->negotiated_linkrate; memcpy(resp_data + 16, sas_ha->sas_addr, SAS_ADDR_SIZE); memcpy(resp_data + 24, sas_ha->sas_phy[phy_id]->attached_sas_addr, SAS_ADDR_SIZE); resp_data[40] = (phy->minimum_linkrate << 4) | phy->minimum_linkrate_hw; resp_data[41] = (phy->maximum_linkrate << 4) | phy->maximum_linkrate_hw; if (!sas_ha->sas_phy[phy_id]->port || !sas_ha->sas_phy[phy_id]->port->port_dev) return; rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy; resp_data[12] = rphy->identify.device_type << 4; resp_data[14] = rphy->identify.initiator_port_protocols; resp_data[15] = rphy->identify.target_port_protocols; } /** * to_sas_gpio_gp_bit - given the gpio frame data find the byte/bit position of 'od' * @od: od bit to find * @data: incoming bitstream (from frame) * @index: requested data register index (from frame) * @count: total number of registers in the bitstream (from frame) * @bit: bit position of 'od' in the returned byte * * returns NULL if 'od' is not in 'data' * * From SFF-8485 v0.7: * "In GPIO_TX[1], bit 0 of byte 3 contains the first bit (i.e., OD0.0) * and bit 7 of byte 0 contains the 32nd bit (i.e., OD10.1). * * In GPIO_TX[2], bit 0 of byte 3 contains the 33rd bit (i.e., OD10.2) * and bit 7 of byte 0 contains the 64th bit (i.e., OD21.0)." * * The general-purpose (raw-bitstream) RX registers have the same layout * although 'od' is renamed 'id' for 'input data'. * * SFF-8489 defines the behavior of the LEDs in response to the 'od' values. */ static u8 *to_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count, u8 *bit) { unsigned int reg; u8 byte; /* gp registers start at index 1 */ if (index == 0) return NULL; index--; /* make index 0-based */ if (od < index * 32) return NULL; od -= index * 32; reg = od >> 5; if (reg >= count) return NULL; od &= (1 << 5) - 1; byte = 3 - (od >> 3); *bit = od & ((1 << 3) - 1); return &data[reg * 4 + byte]; } int try_test_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count) { u8 *byte; u8 bit; byte = to_sas_gpio_gp_bit(od, data, index, count, &bit); if (!byte) return -1; return (*byte >> bit) & 1; } EXPORT_SYMBOL(try_test_sas_gpio_gp_bit); static int sas_host_smp_write_gpio(struct sas_ha_struct *sas_ha, u8 *resp_data, u8 reg_type, u8 reg_index, u8 reg_count, u8 *req_data) { struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt); int written; if (i->dft->lldd_write_gpio == NULL) { resp_data[2] = SMP_RESP_FUNC_UNK; return 0; } written = i->dft->lldd_write_gpio(sas_ha, reg_type, reg_index, reg_count, req_data); if (written < 0) { resp_data[2] = SMP_RESP_FUNC_FAILED; written = 0; } else resp_data[2] = SMP_RESP_FUNC_ACC; return written; } static void sas_report_phy_sata(struct sas_ha_struct *sas_ha, u8 *resp_data, u8 phy_id) { struct sas_rphy *rphy; struct dev_to_host_fis *fis; int i; if (phy_id >= sas_ha->num_phys) { resp_data[2] = SMP_RESP_NO_PHY; return; } resp_data[2] = SMP_RESP_PHY_NO_SATA; if (!sas_ha->sas_phy[phy_id]->port) return; rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy; fis = (struct dev_to_host_fis *) sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd; if (rphy->identify.target_port_protocols != SAS_PROTOCOL_SATA) return; resp_data[2] = SMP_RESP_FUNC_ACC; resp_data[9] = phy_id; memcpy(resp_data + 16, sas_ha->sas_phy[phy_id]->attached_sas_addr, SAS_ADDR_SIZE); /* check to see if we have a valid d2h fis */ if (fis->fis_type != 0x34) return; /* the d2h fis is required by the standard to be in LE format */ for (i = 0; i < 20; i += 4) { u8 *dst = resp_data + 24 + i, *src = &sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd[i]; dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; } } static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id, u8 phy_op, enum sas_linkrate min, enum sas_linkrate max, u8 *resp_data) { struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt); struct sas_phy_linkrates rates; struct asd_sas_phy *asd_phy; if (phy_id >= sas_ha->num_phys) { resp_data[2] = SMP_RESP_NO_PHY; return; } asd_phy = sas_ha->sas_phy[phy_id]; switch (phy_op) { case PHY_FUNC_NOP: case PHY_FUNC_LINK_RESET: case PHY_FUNC_HARD_RESET: case PHY_FUNC_DISABLE: case PHY_FUNC_CLEAR_ERROR_LOG: case PHY_FUNC_CLEAR_AFFIL: case PHY_FUNC_TX_SATA_PS_SIGNAL: break; default: resp_data[2] = SMP_RESP_PHY_UNK_OP; return; } rates.minimum_linkrate = min; rates.maximum_linkrate = max; /* filter reset requests through libata eh */ if (phy_op == PHY_FUNC_LINK_RESET && sas_try_ata_reset(asd_phy) == 0) { resp_data[2] = SMP_RESP_FUNC_ACC; return; } if (i->dft->lldd_control_phy(asd_phy, phy_op, &rates)) resp_data[2] = SMP_RESP_FUNC_FAILED; else resp_data[2] = SMP_RESP_FUNC_ACC; } int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, struct request *rsp) { u8 *req_data = NULL, *resp_data = NULL, *buf; struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); int error = -EINVAL; /* eight is the minimum size for request and response frames */ if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8) goto out; if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE || bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) { shost_printk(KERN_ERR, shost, "SMP request/response frame crosses page boundary"); goto out; } req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL); /* make sure frame can always be built ... we copy * back only the requested length */ resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL); if (!req_data || !resp_data) { error = -ENOMEM; goto out; } local_irq_disable(); buf = kmap_atomic(bio_page(req->bio)); memcpy(req_data, buf, blk_rq_bytes(req)); kunmap_atomic(buf - bio_offset(req->bio)); local_irq_enable(); if (req_data[0] != SMP_REQUEST) goto out; /* always succeeds ... even if we can't process the request * the result is in the response frame */ error = 0; /* set up default don't know response */ resp_data[0] = SMP_RESPONSE; resp_data[1] = req_data[1]; resp_data[2] = SMP_RESP_FUNC_UNK; switch (req_data[1]) { case SMP_REPORT_GENERAL: req->resid_len -= 8; rsp->resid_len -= 32; resp_data[2] = SMP_RESP_FUNC_ACC; resp_data[9] = sas_ha->num_phys; break; case SMP_REPORT_MANUF_INFO: req->resid_len -= 8; rsp->resid_len -= 64; resp_data[2] = SMP_RESP_FUNC_ACC; memcpy(resp_data + 12, shost->hostt->name, SAS_EXPANDER_VENDOR_ID_LEN); memcpy(resp_data + 20, "libsas virt phy", SAS_EXPANDER_PRODUCT_ID_LEN); break; case SMP_READ_GPIO_REG: /* FIXME: need GPIO support in the transport class */ break; case SMP_DISCOVER: req->resid_len -= 16; if ((int)req->resid_len < 0) { req->resid_len = 0; error = -EINVAL; goto out; } rsp->resid_len -= 56; sas_host_smp_discover(sas_ha, resp_data, req_data[9]); break; case SMP_REPORT_PHY_ERR_LOG: /* FIXME: could implement this with additional * libsas callbacks providing the HW supports it */ break; case SMP_REPORT_PHY_SATA: req->resid_len -= 16; if ((int)req->resid_len < 0) { req->resid_len = 0; error = -EINVAL; goto out; } rsp->resid_len -= 60; sas_report_phy_sata(sas_ha, resp_data, req_data[9]); break; case SMP_REPORT_ROUTE_INFO: /* Can't implement; hosts have no routes */ break; case SMP_WRITE_GPIO_REG: { /* SFF-8485 v0.7 */ const int base_frame_size = 11; int to_write = req_data[4]; if (blk_rq_bytes(req) < base_frame_size + to_write * 4 || req->resid_len < base_frame_size + to_write * 4) { resp_data[2] = SMP_RESP_INV_FRM_LEN; break; } to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2], req_data[3], to_write, &req_data[8]); req->resid_len -= base_frame_size + to_write * 4; rsp->resid_len -= 8; break; } case SMP_CONF_ROUTE_INFO: /* Can't implement; hosts have no routes */ break; case SMP_PHY_CONTROL: req->resid_len -= 44; if ((int)req->resid_len < 0) { req->resid_len = 0; error = -EINVAL; goto out; } rsp->resid_len -= 8; sas_phy_control(sas_ha, req_data[9], req_data[10], req_data[32] >> 4, req_data[33] >> 4, resp_data); break; case SMP_PHY_TEST_FUNCTION: /* FIXME: should this be implemented? */ break; default: /* probably a 2.0 function */ break; } local_irq_disable(); buf = kmap_atomic(bio_page(rsp->bio)); memcpy(buf, resp_data, blk_rq_bytes(rsp)); flush_kernel_dcache_page(bio_page(rsp->bio)); kunmap_atomic(buf - bio_offset(rsp->bio)); local_irq_enable(); out: kfree(req_data); kfree(resp_data); return error; }
gpl-2.0
thiagomacieira/linux
arch/arm/mm/copypage-fa.c
9759
2317
/* * linux/arch/arm/lib/copypage-fa.S * * Copyright (C) 2005 Faraday Corp. * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * Based on copypage-v4wb.S: * Copyright (C) 1995-1999 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/highmem.h> /* * Faraday optimised copy_user_page */ static void __naked fa_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ mov r2, %0 @ 1\n\ 1: ldmia r1!, {r3, r4, ip, lr} @ 4\n\ stmia r0, {r3, r4, ip, lr} @ 4\n\ mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ add r0, r0, #16 @ 1\n\ ldmia r1!, {r3, r4, ip, lr} @ 4\n\ stmia r0, {r3, r4, ip, lr} @ 4\n\ mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ add r0, r0, #16 @ 1\n\ subs r2, r2, #1 @ 1\n\ bne 1b @ 1\n\ mcr p15, 0, r2, c7, c10, 4 @ 1 drain WB\n\ ldmfd sp!, {r4, pc} @ 3" : : "I" (PAGE_SIZE / 32)); } void fa_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; kto = kmap_atomic(to); kfrom = kmap_atomic(from); fa_copy_user_page(kto, kfrom); kunmap_atomic(kfrom); kunmap_atomic(kto); } /* * Faraday optimised clear_user_page * * Same story as above. */ void fa_clear_user_highpage(struct page *page, unsigned long vaddr) { void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ 1: stmia %0, {r2, r3, ip, lr} @ 4\n\ mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ add %0, %0, #16 @ 1\n\ stmia %0, {r2, r3, ip, lr} @ 4\n\ mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ add %0, %0, #16 @ 1\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1\n\ mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr); } struct cpu_user_fns fa_user_fns __initdata = { .cpu_clear_user_highpage = fa_clear_user_highpage, .cpu_copy_user_highpage = fa_copy_user_highpage, };
gpl-2.0
kaber/nf-next-ipv6-nat
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
32
68839
/******************************************************************* * This file is part of the Emulex RoCE Device Driver for * * RoCE (RDMA over Converged Ethernet) adapters. * * Copyright (C) 2008-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * * * Contact Information: * linux-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 *******************************************************************/ #include <linux/dma-mapping.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include <rdma/iw_cm.h> #include <rdma/ib_umem.h> #include <rdma/ib_addr.h> #include "ocrdma.h" #include "ocrdma_hw.h" #include "ocrdma_verbs.h" #include "ocrdma_abi.h" int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { if (index > 1) return -EINVAL; *pkey = 0xffff; return 0; } int ocrdma_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *sgid) { struct ocrdma_dev *dev; dev = get_ocrdma_dev(ibdev); memset(sgid, 0, sizeof(*sgid)); if (index >= OCRDMA_MAX_SGID) return -EINVAL; memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); return 0; } int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) { struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); memset(attr, 0, sizeof *attr); memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); attr->max_mr_size = ~0ull; attr->page_size_cap = 0xffff000; attr->vendor_id = dev->nic_info.pdev->vendor; attr->vendor_part_id = dev->nic_info.pdev->device; attr->hw_ver = 0; attr->max_qp = dev->attr.max_qp; attr->max_ah = dev->attr.max_qp; attr->max_qp_wr = dev->attr.max_wqe; attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_LOCAL_DMA_LKEY; attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); attr->max_sge_rd = 0; attr->max_cq = dev->attr.max_cq; attr->max_cqe = dev->attr.max_cqe; attr->max_mr = dev->attr.max_mr; attr->max_mw = 0; attr->max_pd = dev->attr.max_pd; attr->atomic_cap = 0; attr->max_fmr = 0; attr->max_map_per_fmr = 0; attr->max_qp_rd_atom = min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; attr->max_srq = (dev->attr.max_qp - 1); attr->max_srq_sge = dev->attr.max_srq_sge; attr->max_srq_wr = dev->attr.max_rqe; attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; attr->max_fast_reg_page_list_len = 0; attr->max_pkeys = 1; return 0; } int ocrdma_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { enum ib_port_state port_state; struct ocrdma_dev *dev; struct net_device *netdev; dev = get_ocrdma_dev(ibdev); if (port > 1) { ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port); return -EINVAL; } netdev = dev->nic_info.netdev; if (netif_running(netdev) && netif_oper_up(netdev)) { port_state = IB_PORT_ACTIVE; props->phys_state = 5; } else { port_state = IB_PORT_DOWN; props->phys_state = 3; } props->max_mtu = IB_MTU_4096; props->active_mtu = iboe_get_mtu(netdev->mtu); props->lid = 0; props->lmc = 0; props->sm_lid = 0; props->sm_sl = 0; props->state = port_state; props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; props->gid_tbl_len = OCRDMA_MAX_SGID; props->pkey_tbl_len = 1; props->bad_pkey_cntr = 0; props->qkey_viol_cntr = 0; props->active_width = IB_WIDTH_1X; props->active_speed = 4; props->max_msg_sz = 0x80000000; props->max_vl_num = 4; return 0; } int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, struct ib_port_modify *props) { struct ocrdma_dev *dev; dev = get_ocrdma_dev(ibdev); if (port > 1) { ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port); return -EINVAL; } return 0; } static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, unsigned long len) { struct ocrdma_mm *mm; mm = kzalloc(sizeof(*mm), GFP_KERNEL); if (mm == NULL) return -ENOMEM; mm->key.phy_addr = phy_addr; mm->key.len = len; INIT_LIST_HEAD(&mm->entry); mutex_lock(&uctx->mm_list_lock); list_add_tail(&mm->entry, &uctx->mm_head); mutex_unlock(&uctx->mm_list_lock); return 0; } static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, unsigned long len) { struct ocrdma_mm *mm, *tmp; mutex_lock(&uctx->mm_list_lock); list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { if (len != mm->key.len || phy_addr != mm->key.phy_addr) continue; list_del(&mm->entry); kfree(mm); break; } mutex_unlock(&uctx->mm_list_lock); } static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, unsigned long len) { bool found = false; struct ocrdma_mm *mm; mutex_lock(&uctx->mm_list_lock); list_for_each_entry(mm, &uctx->mm_head, entry) { if (len != mm->key.len || phy_addr != mm->key.phy_addr) continue; found = true; break; } mutex_unlock(&uctx->mm_list_lock); return found; } struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { int status; struct ocrdma_ucontext *ctx; struct ocrdma_alloc_ucontext_resp resp; struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); struct pci_dev *pdev = dev->nic_info.pdev; u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); if (!udata) return ERR_PTR(-EFAULT); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return ERR_PTR(-ENOMEM); ctx->dev = dev; INIT_LIST_HEAD(&ctx->mm_head); mutex_init(&ctx->mm_list_lock); ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, &ctx->ah_tbl.pa, GFP_KERNEL); if (!ctx->ah_tbl.va) { kfree(ctx); return ERR_PTR(-ENOMEM); } memset(ctx->ah_tbl.va, 0, map_len); ctx->ah_tbl.len = map_len; resp.ah_tbl_len = ctx->ah_tbl.len; resp.ah_tbl_page = ctx->ah_tbl.pa; status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); if (status) goto map_err; resp.dev_id = dev->id; resp.max_inline_data = dev->attr.max_inline_data; resp.wqe_size = dev->attr.wqe_size; resp.rqe_size = dev->attr.rqe_size; resp.dpp_wqe_size = dev->attr.wqe_size; resp.rsvd = 0; memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); status = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (status) goto cpy_err; return &ctx->ibucontext; cpy_err: ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); map_err: dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, ctx->ah_tbl.pa); kfree(ctx); return ERR_PTR(status); } int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) { struct ocrdma_mm *mm, *tmp; struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); struct pci_dev *pdev = uctx->dev->nic_info.pdev; ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, uctx->ah_tbl.pa); list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { list_del(&mm->entry); kfree(mm); } kfree(uctx); return 0; } int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context); struct ocrdma_dev *dev = ucontext->dev; unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; u64 unmapped_db = (u64) dev->nic_info.unmapped_db; unsigned long len = (vma->vm_end - vma->vm_start); int status = 0; bool found; if (vma->vm_start & (PAGE_SIZE - 1)) return -EINVAL; found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len); if (!found) return -EINVAL; if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + dev->nic_info.db_total_size)) && (len <= dev->nic_info.db_page_size)) { /* doorbell mapping */ status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len, vma->vm_page_prot); } else if (dev->nic_info.dpp_unmapped_len && (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) && (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr + dev->nic_info.dpp_unmapped_len)) && (len <= dev->nic_info.dpp_unmapped_len)) { /* dpp area mapping */ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len, vma->vm_page_prot); } else { /* queue memory mapping */ status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len, vma->vm_page_prot); } return status; } static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd, struct ib_ucontext *ib_ctx, struct ib_udata *udata) { int status; u64 db_page_addr; u64 dpp_page_addr = 0; u32 db_page_size; struct ocrdma_alloc_pd_uresp rsp; struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); rsp.id = pd->id; rsp.dpp_enabled = pd->dpp_enabled; db_page_addr = pd->dev->nic_info.unmapped_db + (pd->id * pd->dev->nic_info.db_page_size); db_page_size = pd->dev->nic_info.db_page_size; status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size); if (status) return status; if (pd->dpp_enabled) { dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr + (pd->id * OCRDMA_DPP_PAGE_SIZE); status = ocrdma_add_mmap(uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE); if (status) goto dpp_map_err; rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr); rsp.dpp_page_addr_lo = dpp_page_addr; } status = ib_copy_to_udata(udata, &rsp, sizeof(rsp)); if (status) goto ucopy_err; pd->uctx = uctx; return 0; ucopy_err: if (pd->dpp_enabled) ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE); dpp_map_err: ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); return status; } struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); struct ocrdma_pd *pd; int status; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) return ERR_PTR(-ENOMEM); pd->dev = dev; if (udata && context) { pd->dpp_enabled = (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) ? true : false; pd->num_dpp_qp = pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; } status = ocrdma_mbx_alloc_pd(dev, pd); if (status) { kfree(pd); return ERR_PTR(status); } atomic_set(&pd->use_cnt, 0); if (udata && context) { status = ocrdma_copy_pd_uresp(pd, context, udata); if (status) goto err; } return &pd->ibpd; err: ocrdma_dealloc_pd(&pd->ibpd); return ERR_PTR(status); } int ocrdma_dealloc_pd(struct ib_pd *ibpd) { struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); struct ocrdma_dev *dev = pd->dev; int status; u64 usr_db; if (atomic_read(&pd->use_cnt)) { ocrdma_err("%s(%d) pd=0x%x is in use.\n", __func__, dev->id, pd->id); status = -EFAULT; goto dealloc_err; } status = ocrdma_mbx_dealloc_pd(dev, pd); if (pd->uctx) { u64 dpp_db = dev->nic_info.dpp_unmapped_addr + (pd->id * OCRDMA_DPP_PAGE_SIZE); if (pd->dpp_enabled) ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE); usr_db = dev->nic_info.unmapped_db + (pd->id * dev->nic_info.db_page_size); ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); } kfree(pd); dealloc_err: return status; } static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd, int acc, u32 num_pbls, u32 addr_check) { int status; struct ocrdma_mr *mr; struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); struct ocrdma_dev *dev = pd->dev; if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { ocrdma_err("%s(%d) leaving err, invalid access rights\n", __func__, dev->id); return ERR_PTR(-EINVAL); } mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->hwmr.dev = dev; mr->hwmr.fr_mr = 0; mr->hwmr.local_rd = 1; mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; mr->hwmr.num_pbls = num_pbls; status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check); if (status) { kfree(mr); return ERR_PTR(-ENOMEM); } mr->pd = pd; atomic_inc(&pd->use_cnt); mr->ibmr.lkey = mr->hwmr.lkey; if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) mr->ibmr.rkey = mr->hwmr.lkey; return mr; } struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc) { struct ocrdma_mr *mr; mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE); if (IS_ERR(mr)) return ERR_CAST(mr); return &mr->ibmr; } static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) { struct pci_dev *pdev = dev->nic_info.pdev; int i = 0; if (mr->pbl_table) { for (i = 0; i < mr->num_pbls; i++) { if (!mr->pbl_table[i].va) continue; dma_free_coherent(&pdev->dev, mr->pbl_size, mr->pbl_table[i].va, mr->pbl_table[i].pa); } kfree(mr->pbl_table); mr->pbl_table = NULL; } } static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes) { u32 num_pbls = 0; u32 idx = 0; int status = 0; u32 pbl_size; do { pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx); if (pbl_size > MAX_OCRDMA_PBL_SIZE) { status = -EFAULT; break; } num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64))); num_pbls = num_pbls / (pbl_size / sizeof(u64)); idx++; } while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl); mr->hwmr.num_pbes = num_pbes; mr->hwmr.num_pbls = num_pbls; mr->hwmr.pbl_size = pbl_size; return status; } static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) { int status = 0; int i; u32 dma_len = mr->pbl_size; struct pci_dev *pdev = dev->nic_info.pdev; void *va; dma_addr_t pa; mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) * mr->num_pbls, GFP_KERNEL); if (!mr->pbl_table) return -ENOMEM; for (i = 0; i < mr->num_pbls; i++) { va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); if (!va) { ocrdma_free_mr_pbl_tbl(dev, mr); status = -ENOMEM; break; } memset(va, 0, dma_len); mr->pbl_table[i].va = va; mr->pbl_table[i].pa = pa; } return status; } static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, u32 num_pbes) { struct ocrdma_pbe *pbe; struct ib_umem_chunk *chunk; struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; struct ib_umem *umem = mr->umem; int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; if (!mr->hwmr.num_pbes) return; pbe = (struct ocrdma_pbe *)pbl_tbl->va; pbe_cnt = 0; shift = ilog2(umem->page_size); list_for_each_entry(chunk, &umem->chunk_list, list) { /* get all the dma regions from the chunk. */ for (i = 0; i < chunk->nmap; i++) { pages = sg_dma_len(&chunk->page_list[i]) >> shift; for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { /* store the page address in pbe */ pbe->pa_lo = cpu_to_le32(sg_dma_address (&chunk->page_list[i]) + (umem->page_size * pg_cnt)); pbe->pa_hi = cpu_to_le32(upper_32_bits ((sg_dma_address (&chunk->page_list[i]) + umem->page_size * pg_cnt))); pbe_cnt += 1; total_num_pbes += 1; pbe++; /* if done building pbes, issue the mbx cmd. */ if (total_num_pbes == num_pbes) return; /* if the given pbl is full storing the pbes, * move to next pbl. */ if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) { pbl_tbl++; pbe = (struct ocrdma_pbe *)pbl_tbl->va; pbe_cnt = 0; } } } } } struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, u64 usr_addr, int acc, struct ib_udata *udata) { int status = -ENOMEM; struct ocrdma_dev *dev; struct ocrdma_mr *mr; struct ocrdma_pd *pd; u32 num_pbes; pd = get_ocrdma_pd(ibpd); dev = pd->dev; if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) return ERR_PTR(-EINVAL); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(status); mr->hwmr.dev = dev; mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); if (IS_ERR(mr->umem)) { status = -EFAULT; goto umem_err; } num_pbes = ib_umem_page_count(mr->umem); status = ocrdma_get_pbl_info(mr, num_pbes); if (status) goto umem_err; mr->hwmr.pbe_size = mr->umem->page_size; mr->hwmr.fbo = mr->umem->offset; mr->hwmr.va = usr_addr; mr->hwmr.len = len; mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; mr->hwmr.local_rd = 1; mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); if (status) goto umem_err; build_user_pbes(dev, mr, num_pbes); status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); if (status) goto mbx_err; mr->pd = pd; atomic_inc(&pd->use_cnt); mr->ibmr.lkey = mr->hwmr.lkey; if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) mr->ibmr.rkey = mr->hwmr.lkey; return &mr->ibmr; mbx_err: ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); umem_err: kfree(mr); return ERR_PTR(status); } int ocrdma_dereg_mr(struct ib_mr *ib_mr) { struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); struct ocrdma_dev *dev = mr->hwmr.dev; int status; status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); if (mr->hwmr.fr_mr == 0) ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); atomic_dec(&mr->pd->use_cnt); /* it could be user registered memory. */ if (mr->umem) ib_umem_release(mr->umem); kfree(mr); return status; } static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata, struct ib_ucontext *ib_ctx) { int status; struct ocrdma_ucontext *uctx; struct ocrdma_create_cq_uresp uresp; uresp.cq_id = cq->id; uresp.page_size = cq->len; uresp.num_pages = 1; uresp.max_hw_cqe = cq->max_hw_cqe; uresp.page_addr[0] = cq->pa; uresp.db_page_addr = cq->dev->nic_info.unmapped_db; uresp.db_page_size = cq->dev->nic_info.db_page_size; uresp.phase_change = cq->phase_change ? 1 : 0; status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (status) { ocrdma_err("%s(%d) copy error cqid=0x%x.\n", __func__, cq->dev->id, cq->id); goto err; } uctx = get_ocrdma_ucontext(ib_ctx); status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); if (status) goto err; status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size); if (status) { ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); goto err; } cq->ucontext = uctx; err: return status; } struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_ctx, struct ib_udata *udata) { struct ocrdma_cq *cq; struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); int status; struct ocrdma_create_cq_ureq ureq; if (udata) { if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) return ERR_PTR(-EFAULT); } else ureq.dpp_cq = 0; cq = kzalloc(sizeof(*cq), GFP_KERNEL); if (!cq) return ERR_PTR(-ENOMEM); spin_lock_init(&cq->cq_lock); spin_lock_init(&cq->comp_handler_lock); atomic_set(&cq->use_cnt, 0); INIT_LIST_HEAD(&cq->sq_head); INIT_LIST_HEAD(&cq->rq_head); cq->dev = dev; status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq); if (status) { kfree(cq); return ERR_PTR(status); } if (ib_ctx) { status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx); if (status) goto ctx_err; } cq->phase = OCRDMA_CQE_VALID; cq->arm_needed = true; dev->cq_tbl[cq->id] = cq; return &cq->ibcq; ctx_err: ocrdma_mbx_destroy_cq(dev, cq); kfree(cq); return ERR_PTR(status); } int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata) { int status = 0; struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) { status = -EINVAL; return status; } ibcq->cqe = new_cnt; return status; } int ocrdma_destroy_cq(struct ib_cq *ibcq) { int status; struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); struct ocrdma_dev *dev = cq->dev; if (atomic_read(&cq->use_cnt)) return -EINVAL; status = ocrdma_mbx_destroy_cq(dev, cq); if (cq->ucontext) { ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len); ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db, dev->nic_info.db_page_size); } dev->cq_tbl[cq->id] = NULL; kfree(cq); return status; } static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) { int status = -EINVAL; if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { dev->qp_tbl[qp->id] = qp; status = 0; } return status; } static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) { dev->qp_tbl[qp->id] = NULL; } static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, struct ib_qp_init_attr *attrs) { if (attrs->qp_type != IB_QPT_GSI && attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_UD) { ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n", __func__, dev->id, attrs->qp_type); return -EINVAL; } if (attrs->cap.max_send_wr > dev->attr.max_wqe) { ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n", __func__, dev->id, attrs->cap.max_send_wr); ocrdma_err("%s(%d) supported send_wr=0x%x\n", __func__, dev->id, dev->attr.max_wqe); return -EINVAL; } if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n", __func__, dev->id, attrs->cap.max_recv_wr); ocrdma_err("%s(%d) supported recv_wr=0x%x\n", __func__, dev->id, dev->attr.max_rqe); return -EINVAL; } if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { ocrdma_err("%s(%d) unsupported inline data size=0x%x" " requested\n", __func__, dev->id, attrs->cap.max_inline_data); ocrdma_err("%s(%d) supported inline data size=0x%x\n", __func__, dev->id, dev->attr.max_inline_data); return -EINVAL; } if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n", __func__, dev->id, attrs->cap.max_send_sge); ocrdma_err("%s(%d) supported send_sge=0x%x\n", __func__, dev->id, dev->attr.max_send_sge); return -EINVAL; } if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n", __func__, dev->id, attrs->cap.max_recv_sge); ocrdma_err("%s(%d) supported recv_sge=0x%x\n", __func__, dev->id, dev->attr.max_recv_sge); return -EINVAL; } /* unprivileged user space cannot create special QP */ if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { ocrdma_err ("%s(%d) Userspace can't create special QPs of type=0x%x\n", __func__, dev->id, attrs->qp_type); return -EINVAL; } /* allow creating only one GSI type of QP */ if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { ocrdma_err("%s(%d) GSI special QPs already created.\n", __func__, dev->id); return -EINVAL; } /* verify consumer QPs are not trying to use GSI QP's CQ */ if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) { if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) || (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) || (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n", __func__, dev->id); return -EINVAL; } } return 0; } static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, struct ib_udata *udata, int dpp_offset, int dpp_credit_lmt, int srq) { int status = 0; u64 usr_db; struct ocrdma_create_qp_uresp uresp; struct ocrdma_dev *dev = qp->dev; struct ocrdma_pd *pd = qp->pd; memset(&uresp, 0, sizeof(uresp)); usr_db = dev->nic_info.unmapped_db + (pd->id * dev->nic_info.db_page_size); uresp.qp_id = qp->id; uresp.sq_dbid = qp->sq.dbid; uresp.num_sq_pages = 1; uresp.sq_page_size = qp->sq.len; uresp.sq_page_addr[0] = qp->sq.pa; uresp.num_wqe_allocated = qp->sq.max_cnt; if (!srq) { uresp.rq_dbid = qp->rq.dbid; uresp.num_rq_pages = 1; uresp.rq_page_size = qp->rq.len; uresp.rq_page_addr[0] = qp->rq.pa; uresp.num_rqe_allocated = qp->rq.max_cnt; } uresp.db_page_addr = usr_db; uresp.db_page_size = dev->nic_info.db_page_size; if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ? OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET; uresp.db_shift = (qp->id < 128) ? 24 : 16; } else { uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET; uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; uresp.db_shift = 16; } if (qp->dpp_enabled) { uresp.dpp_credit = dpp_credit_lmt; uresp.dpp_offset = dpp_offset; } status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (status) { ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id); goto err; } status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); if (status) goto err; if (!srq) { status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0], uresp.rq_page_size); if (status) goto rq_map_err; } return status; rq_map_err: ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); err: return status; } static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, struct ocrdma_pd *pd) { if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { qp->sq_db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size) + OCRDMA_DB_GEN2_SQ_OFFSET; qp->rq_db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size) + ((qp->id < 128) ? OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET); } else { qp->sq_db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size) + OCRDMA_DB_SQ_OFFSET; qp->rq_db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size) + OCRDMA_DB_RQ_OFFSET; } } static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) { qp->wqe_wr_id_tbl = kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt, GFP_KERNEL); if (qp->wqe_wr_id_tbl == NULL) return -ENOMEM; qp->rqe_wr_id_tbl = kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); if (qp->rqe_wr_id_tbl == NULL) return -ENOMEM; return 0; } static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, struct ocrdma_pd *pd, struct ib_qp_init_attr *attrs) { qp->pd = pd; spin_lock_init(&qp->q_lock); INIT_LIST_HEAD(&qp->sq_entry); INIT_LIST_HEAD(&qp->rq_entry); qp->qp_type = attrs->qp_type; qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; qp->max_inline_data = attrs->cap.max_inline_data; qp->sq.max_sges = attrs->cap.max_send_sge; qp->rq.max_sges = attrs->cap.max_recv_sge; qp->state = OCRDMA_QPS_RST; } static void ocrdma_set_qp_use_cnt(struct ocrdma_qp *qp, struct ocrdma_pd *pd) { atomic_inc(&pd->use_cnt); atomic_inc(&qp->sq_cq->use_cnt); atomic_inc(&qp->rq_cq->use_cnt); if (qp->srq) atomic_inc(&qp->srq->use_cnt); qp->ibqp.qp_num = qp->id; } static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, struct ib_qp_init_attr *attrs) { if (attrs->qp_type == IB_QPT_GSI) { dev->gsi_qp_created = 1; dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq); dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq); } } struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *attrs, struct ib_udata *udata) { int status; struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); struct ocrdma_qp *qp; struct ocrdma_dev *dev = pd->dev; struct ocrdma_create_qp_ureq ureq; u16 dpp_credit_lmt, dpp_offset; status = ocrdma_check_qp_params(ibpd, dev, attrs); if (status) goto gen_err; memset(&ureq, 0, sizeof(ureq)); if (udata) { if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) return ERR_PTR(-EFAULT); } qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) { status = -ENOMEM; goto gen_err; } qp->dev = dev; ocrdma_set_qp_init_params(qp, pd, attrs); mutex_lock(&dev->dev_lock); status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, ureq.dpp_cq_id, &dpp_offset, &dpp_credit_lmt); if (status) goto mbx_err; /* user space QP's wr_id table are managed in library */ if (udata == NULL) { qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | OCRDMA_QP_FAST_REG); status = ocrdma_alloc_wr_id_tbl(qp); if (status) goto map_err; } status = ocrdma_add_qpn_map(dev, qp); if (status) goto map_err; ocrdma_set_qp_db(dev, qp, pd); if (udata) { status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, dpp_credit_lmt, (attrs->srq != NULL)); if (status) goto cpy_err; } ocrdma_store_gsi_qp_cq(dev, attrs); ocrdma_set_qp_use_cnt(qp, pd); mutex_unlock(&dev->dev_lock); return &qp->ibqp; cpy_err: ocrdma_del_qpn_map(dev, qp); map_err: ocrdma_mbx_destroy_qp(dev, qp); mbx_err: mutex_unlock(&dev->dev_lock); kfree(qp->wqe_wr_id_tbl); kfree(qp->rqe_wr_id_tbl); kfree(qp); ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status); gen_err: return ERR_PTR(status); } int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) { int status = 0; struct ocrdma_qp *qp; struct ocrdma_dev *dev; enum ib_qp_state old_qps; qp = get_ocrdma_qp(ibqp); dev = qp->dev; if (attr_mask & IB_QP_STATE) status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps); /* if new and previous states are same hw doesn't need to * know about it. */ if (status < 0) return status; status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps); return status; } int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { unsigned long flags; int status = -EINVAL; struct ocrdma_qp *qp; struct ocrdma_dev *dev; enum ib_qp_state old_qps, new_qps; qp = get_ocrdma_qp(ibqp); dev = qp->dev; /* syncronize with multiple context trying to change, retrive qps */ mutex_lock(&dev->dev_lock); /* syncronize with wqe, rqe posting and cqe processing contexts */ spin_lock_irqsave(&qp->q_lock, flags); old_qps = get_ibqp_state(qp->state); if (attr_mask & IB_QP_STATE) new_qps = attr->qp_state; else new_qps = old_qps; spin_unlock_irqrestore(&qp->q_lock, flags); if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for " "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, old_qps, new_qps); goto param_err; } status = _ocrdma_modify_qp(ibqp, attr, attr_mask); if (status > 0) status = 0; param_err: mutex_unlock(&dev->dev_lock); return status; } static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu) { switch (mtu) { case 256: return IB_MTU_256; case 512: return IB_MTU_512; case 1024: return IB_MTU_1024; case 2048: return IB_MTU_2048; case 4096: return IB_MTU_4096; default: return IB_MTU_1024; } } static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags) { int ib_qp_acc_flags = 0; if (qp_cap_flags & OCRDMA_QP_INB_WR) ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; if (qp_cap_flags & OCRDMA_QP_INB_RD) ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; return ib_qp_acc_flags; } int ocrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int attr_mask, struct ib_qp_init_attr *qp_init_attr) { int status; u32 qp_state; struct ocrdma_qp_params params; struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); struct ocrdma_dev *dev = qp->dev; memset(&params, 0, sizeof(params)); mutex_lock(&dev->dev_lock); status = ocrdma_mbx_query_qp(dev, qp, &params); mutex_unlock(&dev->dev_lock); if (status) goto mbx_err; qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT); qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT); qp_attr->path_mtu = ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> OCRDMA_QP_PARAMS_PATH_MTU_SHIFT; qp_attr->path_mig_state = IB_MIG_MIGRATED; qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK; qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK; qp_attr->dest_qp_num = params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK; qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; qp_attr->cap.max_send_sge = qp->sq.max_sges; qp_attr->cap.max_recv_sge = qp->rq.max_sges; qp_attr->cap.max_inline_data = dev->attr.max_inline_data; qp_init_attr->cap = qp_attr->cap; memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0], sizeof(params.dgid)); qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK; qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> OCRDMA_QP_PARAMS_TCLASS_SHIFT; qp_attr->ah_attr.ah_flags = IB_AH_GRH; qp_attr->ah_attr.port_num = 1; qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_SL_MASK) >> OCRDMA_QP_PARAMS_SL_SHIFT; qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >> OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >> OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT; qp_attr->retry_cnt = (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >> OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT; qp_attr->min_rnr_timer = 0; qp_attr->pkey_index = 0; qp_attr->port_num = 1; qp_attr->ah_attr.src_path_bits = 0; qp_attr->ah_attr.static_rate = 0; qp_attr->alt_pkey_index = 0; qp_attr->alt_port_num = 0; qp_attr->alt_timeout = 0; memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> OCRDMA_QP_PARAMS_STATE_SHIFT; qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; qp_attr->max_dest_rd_atomic = params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; qp_attr->max_rd_atomic = params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; mbx_err: return status; } static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) { int i = idx / 32; unsigned int mask = (1 << (idx % 32)); if (srq->idx_bit_fields[i] & mask) srq->idx_bit_fields[i] &= ~mask; else srq->idx_bit_fields[i] |= mask; } static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) { int free_cnt; if (q->head >= q->tail) free_cnt = (q->max_cnt - q->head) + q->tail; else free_cnt = q->tail - q->head; return free_cnt; } static int is_hw_sq_empty(struct ocrdma_qp *qp) { return (qp->sq.tail == qp->sq.head && ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0); } static int is_hw_rq_empty(struct ocrdma_qp *qp) { return (qp->rq.tail == qp->rq.head) ? 1 : 0; } static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) { return q->va + (q->head * q->entry_size); } static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q, u32 idx) { return q->va + (idx * q->entry_size); } static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q) { q->head = (q->head + 1) & q->max_wqe_idx; } static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q) { q->tail = (q->tail + 1) & q->max_wqe_idx; } /* discard the cqe for a given QP */ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) { unsigned long cq_flags; unsigned long flags; int discard_cnt = 0; u32 cur_getp, stop_getp; struct ocrdma_cqe *cqe; u32 qpn = 0; spin_lock_irqsave(&cq->cq_lock, cq_flags); /* traverse through the CQEs in the hw CQ, * find the matching CQE for a given qp, * mark the matching one discarded by clearing qpn. * ring the doorbell in the poll_cq() as * we don't complete out of order cqe. */ cur_getp = cq->getp; /* find upto when do we reap the cq. */ stop_getp = cur_getp; do { if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) break; cqe = cq->va + cur_getp; /* if (a) done reaping whole hw cq, or * (b) qp_xq becomes empty. * then exit */ qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; /* if previously discarded cqe found, skip that too. */ /* check for matching qp */ if (qpn == 0 || qpn != qp->id) goto skip_cqe; /* mark cqe discarded so that it is not picked up later * in the poll_cq(). */ discard_cnt += 1; cqe->cmn.qpn = 0; if (is_cqe_for_sq(cqe)) ocrdma_hwq_inc_tail(&qp->sq); else { if (qp->srq) { spin_lock_irqsave(&qp->srq->q_lock, flags); ocrdma_hwq_inc_tail(&qp->srq->rq); ocrdma_srq_toggle_bit(qp->srq, cur_getp); spin_unlock_irqrestore(&qp->srq->q_lock, flags); } else ocrdma_hwq_inc_tail(&qp->rq); } skip_cqe: cur_getp = (cur_getp + 1) % cq->max_hw_cqe; } while (cur_getp != stop_getp); spin_unlock_irqrestore(&cq->cq_lock, cq_flags); } static void ocrdma_del_flush_qp(struct ocrdma_qp *qp) { int found = false; unsigned long flags; struct ocrdma_dev *dev = qp->dev; /* sync with any active CQ poll */ spin_lock_irqsave(&dev->flush_q_lock, flags); found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); if (found) list_del(&qp->sq_entry); if (!qp->srq) { found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); if (found) list_del(&qp->rq_entry); } spin_unlock_irqrestore(&dev->flush_q_lock, flags); } int ocrdma_destroy_qp(struct ib_qp *ibqp) { int status; struct ocrdma_pd *pd; struct ocrdma_qp *qp; struct ocrdma_dev *dev; struct ib_qp_attr attrs; int attr_mask = IB_QP_STATE; unsigned long flags; qp = get_ocrdma_qp(ibqp); dev = qp->dev; attrs.qp_state = IB_QPS_ERR; pd = qp->pd; /* change the QP state to ERROR */ _ocrdma_modify_qp(ibqp, &attrs, attr_mask); /* ensure that CQEs for newly created QP (whose id may be same with * one which just getting destroyed are same), dont get * discarded until the old CQEs are discarded. */ mutex_lock(&dev->dev_lock); status = ocrdma_mbx_destroy_qp(dev, qp); /* * acquire CQ lock while destroy is in progress, in order to * protect against proessing in-flight CQEs for this QP. */ spin_lock_irqsave(&qp->sq_cq->cq_lock, flags); if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) spin_lock(&qp->rq_cq->cq_lock); ocrdma_del_qpn_map(dev, qp); if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) spin_unlock(&qp->rq_cq->cq_lock); spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags); if (!pd->uctx) { ocrdma_discard_cqes(qp, qp->sq_cq); ocrdma_discard_cqes(qp, qp->rq_cq); } mutex_unlock(&dev->dev_lock); if (pd->uctx) { ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len); if (!qp->srq) ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len); } ocrdma_del_flush_qp(qp); atomic_dec(&qp->pd->use_cnt); atomic_dec(&qp->sq_cq->use_cnt); atomic_dec(&qp->rq_cq->use_cnt); if (qp->srq) atomic_dec(&qp->srq->use_cnt); kfree(qp->wqe_wr_id_tbl); kfree(qp->rqe_wr_id_tbl); kfree(qp); return status; } static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata) { int status; struct ocrdma_create_srq_uresp uresp; uresp.rq_dbid = srq->rq.dbid; uresp.num_rq_pages = 1; uresp.rq_page_addr[0] = srq->rq.pa; uresp.rq_page_size = srq->rq.len; uresp.db_page_addr = srq->dev->nic_info.unmapped_db + (srq->pd->id * srq->dev->nic_info.db_page_size); uresp.db_page_size = srq->dev->nic_info.db_page_size; uresp.num_rqe_allocated = srq->rq.max_cnt; if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; uresp.db_shift = 24; } else { uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; uresp.db_shift = 16; } status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (status) return status; status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0], uresp.rq_page_size); if (status) return status; return status; } struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { int status = -ENOMEM; struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); struct ocrdma_dev *dev = pd->dev; struct ocrdma_srq *srq; if (init_attr->attr.max_sge > dev->attr.max_recv_sge) return ERR_PTR(-EINVAL); if (init_attr->attr.max_wr > dev->attr.max_rqe) return ERR_PTR(-EINVAL); srq = kzalloc(sizeof(*srq), GFP_KERNEL); if (!srq) return ERR_PTR(status); spin_lock_init(&srq->q_lock); srq->dev = dev; srq->pd = pd; srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); status = ocrdma_mbx_create_srq(srq, init_attr, pd); if (status) goto err; if (udata == NULL) { srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, GFP_KERNEL); if (srq->rqe_wr_id_tbl == NULL) goto arm_err; srq->bit_fields_len = (srq->rq.max_cnt / 32) + (srq->rq.max_cnt % 32 ? 1 : 0); srq->idx_bit_fields = kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL); if (srq->idx_bit_fields == NULL) goto arm_err; memset(srq->idx_bit_fields, 0xff, srq->bit_fields_len * sizeof(u32)); } if (init_attr->attr.srq_limit) { status = ocrdma_mbx_modify_srq(srq, &init_attr->attr); if (status) goto arm_err; } atomic_set(&srq->use_cnt, 0); if (udata) { status = ocrdma_copy_srq_uresp(srq, udata); if (status) goto arm_err; } atomic_inc(&pd->use_cnt); return &srq->ibsrq; arm_err: ocrdma_mbx_destroy_srq(dev, srq); err: kfree(srq->rqe_wr_id_tbl); kfree(srq->idx_bit_fields); kfree(srq); return ERR_PTR(status); } int ocrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr, enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata) { int status = 0; struct ocrdma_srq *srq; srq = get_ocrdma_srq(ibsrq); if (srq_attr_mask & IB_SRQ_MAX_WR) status = -EINVAL; else status = ocrdma_mbx_modify_srq(srq, srq_attr); return status; } int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) { int status; struct ocrdma_srq *srq; srq = get_ocrdma_srq(ibsrq); status = ocrdma_mbx_query_srq(srq, srq_attr); return status; } int ocrdma_destroy_srq(struct ib_srq *ibsrq) { int status; struct ocrdma_srq *srq; struct ocrdma_dev *dev; srq = get_ocrdma_srq(ibsrq); dev = srq->dev; if (atomic_read(&srq->use_cnt)) { ocrdma_err("%s(%d) err, srq=0x%x in use\n", __func__, dev->id, srq->id); return -EAGAIN; } status = ocrdma_mbx_destroy_srq(dev, srq); if (srq->pd->uctx) ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len); atomic_dec(&srq->pd->use_cnt); kfree(srq->idx_bit_fields); kfree(srq->rqe_wr_id_tbl); kfree(srq); return status; } /* unprivileged verbs and their support functions. */ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) { struct ocrdma_ewqe_ud_hdr *ud_hdr = (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah); ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn; if (qp->qp_type == IB_QPT_GSI) ud_hdr->qkey = qp->qkey; else ud_hdr->qkey = wr->wr.ud.remote_qkey; ud_hdr->rsvd_ahid = ah->id; } static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, struct ocrdma_sge *sge, int num_sge, struct ib_sge *sg_list) { int i; for (i = 0; i < num_sge; i++) { sge[i].lrkey = sg_list[i].lkey; sge[i].addr_lo = sg_list[i].addr; sge[i].addr_hi = upper_32_bits(sg_list[i].addr); sge[i].len = sg_list[i].length; hdr->total_len += sg_list[i].length; } if (num_sge == 0) memset(sge, 0, sizeof(*sge)); } static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ocrdma_sge *sge, struct ib_send_wr *wr, u32 wqe_size) { if (wr->send_flags & IB_SEND_INLINE) { if (wr->sg_list[0].length > qp->max_inline_data) { ocrdma_err("%s() supported_len=0x%x," " unspported len req=0x%x\n", __func__, qp->max_inline_data, wr->sg_list[0].length); return -EINVAL; } memcpy(sge, (void *)(unsigned long)wr->sg_list[0].addr, wr->sg_list[0].length); hdr->total_len = wr->sg_list[0].length; wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT); } else { ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); if (wr->num_sge) wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge)); else wqe_size += sizeof(struct ocrdma_sge); hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); } hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); return 0; } static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) { int status; struct ocrdma_sge *sge; u32 wqe_size = sizeof(*hdr); if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { ocrdma_build_ud_hdr(qp, hdr, wr); sge = (struct ocrdma_sge *)(hdr + 2); wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr); } else sge = (struct ocrdma_sge *)(hdr + 1); status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); return status; } static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) { int status; struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); struct ocrdma_sge *sge = ext_rw + 1; u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw); status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); if (status) return status; ext_rw->addr_lo = wr->wr.rdma.remote_addr; ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); ext_rw->lrkey = wr->wr.rdma.rkey; ext_rw->len = hdr->total_len; return 0; } static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) { struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); struct ocrdma_sge *sge = ext_rw + 1; u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) + sizeof(struct ocrdma_hdr_wqe); ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); ext_rw->addr_lo = wr->wr.rdma.remote_addr; ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); ext_rw->lrkey = wr->wr.rdma.rkey; ext_rw->len = hdr->total_len; } static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) { u32 val = qp->sq.dbid | (1 << 16); iowrite32(val, qp->sq_db); } int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { int status = 0; struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); struct ocrdma_hdr_wqe *hdr; unsigned long flags; spin_lock_irqsave(&qp->q_lock, flags); if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { spin_unlock_irqrestore(&qp->q_lock, flags); return -EINVAL; } while (wr) { if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || wr->num_sge > qp->sq.max_sges) { status = -ENOMEM; break; } hdr = ocrdma_hwq_head(&qp->sq); hdr->cw = 0; if (wr->send_flags & IB_SEND_SIGNALED) hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); if (wr->send_flags & IB_SEND_FENCE) hdr->cw |= (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT); if (wr->send_flags & IB_SEND_SOLICITED) hdr->cw |= (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT); hdr->total_len = 0; switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); hdr->immdt = ntohl(wr->ex.imm_data); case IB_WR_SEND: hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); ocrdma_build_send(qp, hdr, wr); break; case IB_WR_SEND_WITH_INV: hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); hdr->lkey = wr->ex.invalidate_rkey; status = ocrdma_build_send(qp, hdr, wr); break; case IB_WR_RDMA_WRITE_WITH_IMM: hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); hdr->immdt = ntohl(wr->ex.imm_data); case IB_WR_RDMA_WRITE: hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); status = ocrdma_build_write(qp, hdr, wr); break; case IB_WR_RDMA_READ_WITH_INV: hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); case IB_WR_RDMA_READ: ocrdma_build_read(qp, hdr, wr); break; case IB_WR_LOCAL_INV: hdr->cw |= (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT); hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT; hdr->lkey = wr->ex.invalidate_rkey; break; default: status = -EINVAL; break; } if (status) { *bad_wr = wr; break; } if (wr->send_flags & IB_SEND_SIGNALED) qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; else qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) & OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE); /* make sure wqe is written before adapter can access it */ wmb(); /* inform hw to start processing it */ ocrdma_ring_sq_db(qp); /* update pointer, counter for next wr */ ocrdma_hwq_inc_head(&qp->sq); wr = wr->next; } spin_unlock_irqrestore(&qp->q_lock, flags); return status; } static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) { u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp)); iowrite32(val, qp->rq_db); } static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, u16 tag) { u32 wqe_size = 0; struct ocrdma_sge *sge; if (wr->num_sge) wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe); else wqe_size = sizeof(*sge) + sizeof(*rqe); rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); rqe->total_len = 0; rqe->rsvd_tag = tag; sge = (struct ocrdma_sge *)(rqe + 1); ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list); ocrdma_cpu_to_le32(rqe, wqe_size); } int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { int status = 0; unsigned long flags; struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); struct ocrdma_hdr_wqe *rqe; spin_lock_irqsave(&qp->q_lock, flags); if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { spin_unlock_irqrestore(&qp->q_lock, flags); *bad_wr = wr; return -EINVAL; } while (wr) { if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || wr->num_sge > qp->rq.max_sges) { *bad_wr = wr; status = -ENOMEM; break; } rqe = ocrdma_hwq_head(&qp->rq); ocrdma_build_rqe(rqe, wr, 0); qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; /* make sure rqe is written before adapter can access it */ wmb(); /* inform hw to start processing it */ ocrdma_ring_rq_db(qp); /* update pointer, counter for next wr */ ocrdma_hwq_inc_head(&qp->rq); wr = wr->next; } spin_unlock_irqrestore(&qp->q_lock, flags); return status; } /* cqe for srq's rqe can potentially arrive out of order. * index gives the entry in the shadow table where to store * the wr_id. tag/index is returned in cqe to reference back * for a given rqe. */ static int ocrdma_srq_get_idx(struct ocrdma_srq *srq) { int row = 0; int indx = 0; for (row = 0; row < srq->bit_fields_len; row++) { if (srq->idx_bit_fields[row]) { indx = ffs(srq->idx_bit_fields[row]); indx = (row * 32) + (indx - 1); if (indx >= srq->rq.max_cnt) BUG(); ocrdma_srq_toggle_bit(srq, indx); break; } } if (row == srq->bit_fields_len) BUG(); return indx; } static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) { u32 val = srq->rq.dbid | (1 << 16); iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET); } int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { int status = 0; unsigned long flags; struct ocrdma_srq *srq; struct ocrdma_hdr_wqe *rqe; u16 tag; srq = get_ocrdma_srq(ibsrq); spin_lock_irqsave(&srq->q_lock, flags); while (wr) { if (ocrdma_hwq_free_cnt(&srq->rq) == 0 || wr->num_sge > srq->rq.max_sges) { status = -ENOMEM; *bad_wr = wr; break; } tag = ocrdma_srq_get_idx(srq); rqe = ocrdma_hwq_head(&srq->rq); ocrdma_build_rqe(rqe, wr, tag); srq->rqe_wr_id_tbl[tag] = wr->wr_id; /* make sure rqe is written before adapter can perform DMA */ wmb(); /* inform hw to start processing it */ ocrdma_ring_srq_db(srq); /* update pointer, counter for next wr */ ocrdma_hwq_inc_head(&srq->rq); wr = wr->next; } spin_unlock_irqrestore(&srq->q_lock, flags); return status; } static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) { enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR; switch (status) { case OCRDMA_CQE_GENERAL_ERR: ibwc_status = IB_WC_GENERAL_ERR; break; case OCRDMA_CQE_LOC_LEN_ERR: ibwc_status = IB_WC_LOC_LEN_ERR; break; case OCRDMA_CQE_LOC_QP_OP_ERR: ibwc_status = IB_WC_LOC_QP_OP_ERR; break; case OCRDMA_CQE_LOC_EEC_OP_ERR: ibwc_status = IB_WC_LOC_EEC_OP_ERR; break; case OCRDMA_CQE_LOC_PROT_ERR: ibwc_status = IB_WC_LOC_PROT_ERR; break; case OCRDMA_CQE_WR_FLUSH_ERR: ibwc_status = IB_WC_WR_FLUSH_ERR; break; case OCRDMA_CQE_MW_BIND_ERR: ibwc_status = IB_WC_MW_BIND_ERR; break; case OCRDMA_CQE_BAD_RESP_ERR: ibwc_status = IB_WC_BAD_RESP_ERR; break; case OCRDMA_CQE_LOC_ACCESS_ERR: ibwc_status = IB_WC_LOC_ACCESS_ERR; break; case OCRDMA_CQE_REM_INV_REQ_ERR: ibwc_status = IB_WC_REM_INV_REQ_ERR; break; case OCRDMA_CQE_REM_ACCESS_ERR: ibwc_status = IB_WC_REM_ACCESS_ERR; break; case OCRDMA_CQE_REM_OP_ERR: ibwc_status = IB_WC_REM_OP_ERR; break; case OCRDMA_CQE_RETRY_EXC_ERR: ibwc_status = IB_WC_RETRY_EXC_ERR; break; case OCRDMA_CQE_RNR_RETRY_EXC_ERR: ibwc_status = IB_WC_RNR_RETRY_EXC_ERR; break; case OCRDMA_CQE_LOC_RDD_VIOL_ERR: ibwc_status = IB_WC_LOC_RDD_VIOL_ERR; break; case OCRDMA_CQE_REM_INV_RD_REQ_ERR: ibwc_status = IB_WC_REM_INV_RD_REQ_ERR; break; case OCRDMA_CQE_REM_ABORT_ERR: ibwc_status = IB_WC_REM_ABORT_ERR; break; case OCRDMA_CQE_INV_EECN_ERR: ibwc_status = IB_WC_INV_EECN_ERR; break; case OCRDMA_CQE_INV_EEC_STATE_ERR: ibwc_status = IB_WC_INV_EEC_STATE_ERR; break; case OCRDMA_CQE_FATAL_ERR: ibwc_status = IB_WC_FATAL_ERR; break; case OCRDMA_CQE_RESP_TIMEOUT_ERR: ibwc_status = IB_WC_RESP_TIMEOUT_ERR; break; default: ibwc_status = IB_WC_GENERAL_ERR; break; }; return ibwc_status; } static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, u32 wqe_idx) { struct ocrdma_hdr_wqe *hdr; struct ocrdma_sge *rw; int opcode; hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; /* Undo the hdr->cw swap */ opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK; switch (opcode) { case OCRDMA_WRITE: ibwc->opcode = IB_WC_RDMA_WRITE; break; case OCRDMA_READ: rw = (struct ocrdma_sge *)(hdr + 1); ibwc->opcode = IB_WC_RDMA_READ; ibwc->byte_len = rw->len; break; case OCRDMA_SEND: ibwc->opcode = IB_WC_SEND; break; case OCRDMA_LKEY_INV: ibwc->opcode = IB_WC_LOCAL_INV; break; default: ibwc->status = IB_WC_GENERAL_ERR; ocrdma_err("%s() invalid opcode received = 0x%x\n", __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); break; }; } static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe) { if (is_cqe_for_sq(cqe)) { cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( cqe->flags_status_srcqpn) & ~OCRDMA_CQE_STATUS_MASK); cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( cqe->flags_status_srcqpn) | (OCRDMA_CQE_WR_FLUSH_ERR << OCRDMA_CQE_STATUS_SHIFT)); } else { if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( cqe->flags_status_srcqpn) & ~OCRDMA_CQE_UD_STATUS_MASK); cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( cqe->flags_status_srcqpn) | (OCRDMA_CQE_WR_FLUSH_ERR << OCRDMA_CQE_UD_STATUS_SHIFT)); } else { cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( cqe->flags_status_srcqpn) & ~OCRDMA_CQE_STATUS_MASK); cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( cqe->flags_status_srcqpn) | (OCRDMA_CQE_WR_FLUSH_ERR << OCRDMA_CQE_STATUS_SHIFT)); } } } static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) { bool expand = false; ibwc->byte_len = 0; ibwc->qp = &qp->ibqp; ibwc->status = ocrdma_to_ibwc_err(status); ocrdma_flush_qp(qp); ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL); /* if wqe/rqe pending for which cqe needs to be returned, * trigger inflating it. */ if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { expand = true; ocrdma_set_cqe_status_flushed(qp, cqe); } return expand; } static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) { ibwc->opcode = IB_WC_RECV; ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; ocrdma_hwq_inc_tail(&qp->rq); return ocrdma_update_err_cqe(ibwc, cqe, qp, status); } static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) { ocrdma_update_wc(qp, ibwc, qp->sq.tail); ocrdma_hwq_inc_tail(&qp->sq); return ocrdma_update_err_cqe(ibwc, cqe, qp, status); } static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop) { bool expand; int status = (le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; /* when hw sq is empty, but rq is not empty, so we continue * to keep the cqe in order to get the cq event again. */ if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { /* when cq for rq and sq is same, it is safe to return * flush cqe for RQEs. */ if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { *polled = true; status = OCRDMA_CQE_WR_FLUSH_ERR; expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); } else { /* stop processing further cqe as this cqe is used for * triggering cq event on buddy cq of RQ. * When QP is destroyed, this cqe will be removed * from the cq's hardware q. */ *polled = false; *stop = true; expand = false; } } else { *polled = true; expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); } return expand; } static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled) { bool expand = false; int tail = qp->sq.tail; u32 wqe_idx; if (!qp->wqe_wr_id_tbl[tail].signaled) { expand = true; /* CQE cannot be consumed yet */ *polled = false; /* WC cannot be consumed yet */ } else { ibwc->status = IB_WC_SUCCESS; ibwc->wc_flags = 0; ibwc->qp = &qp->ibqp; ocrdma_update_wc(qp, ibwc, tail); *polled = true; wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK; if (tail != wqe_idx) expand = true; /* Coalesced CQE can't be consumed yet */ } ocrdma_hwq_inc_tail(&qp->sq); return expand; } static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop) { int status; bool expand; status = (le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; if (status == OCRDMA_CQE_SUCCESS) expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); else expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); return expand; } static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) { int status; status = (le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_SRCQP_MASK; ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & OCRDMA_CQE_PKEY_MASK; ibwc->wc_flags = IB_WC_GRH; ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> OCRDMA_CQE_UD_XFER_LEN_SHIFT); return status; } static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp) { unsigned long flags; struct ocrdma_srq *srq; u32 wqe_idx; srq = get_ocrdma_srq(qp->ibqp.srq); wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT; ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; spin_lock_irqsave(&srq->q_lock, flags); ocrdma_srq_toggle_bit(srq, wqe_idx); spin_unlock_irqrestore(&srq->q_lock, flags); ocrdma_hwq_inc_tail(&srq->rq); } static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop, int status) { bool expand; /* when hw_rq is empty, but wq is not empty, so continue * to keep the cqe to get the cq event again. */ if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { *polled = true; status = OCRDMA_CQE_WR_FLUSH_ERR; expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); } else { *polled = false; *stop = true; expand = false; } } else { *polled = true; expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); } return expand; } static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc) { ibwc->opcode = IB_WC_RECV; ibwc->qp = &qp->ibqp; ibwc->status = IB_WC_SUCCESS; if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) ocrdma_update_ud_rcqe(ibwc, cqe); else ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen); if (is_cqe_imm(cqe)) { ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); ibwc->wc_flags |= IB_WC_WITH_IMM; } else if (is_cqe_wr_imm(cqe)) { ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM; ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); ibwc->wc_flags |= IB_WC_WITH_IMM; } else if (is_cqe_invalidated(cqe)) { ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt); ibwc->wc_flags |= IB_WC_WITH_INVALIDATE; } if (qp->ibqp.srq) ocrdma_update_free_srq_cqe(ibwc, cqe, qp); else { ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; ocrdma_hwq_inc_tail(&qp->rq); } } static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop) { int status; bool expand = false; ibwc->wc_flags = 0; if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) status = (le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; else status = (le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; if (status == OCRDMA_CQE_SUCCESS) { *polled = true; ocrdma_poll_success_rcqe(qp, cqe, ibwc); } else { expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, status); } return expand; } static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, u16 cur_getp) { if (cq->phase_change) { if (cur_getp == 0) cq->phase = (~cq->phase & OCRDMA_CQE_VALID); } else /* clear valid bit */ cqe->flags_status_srcqpn = 0; } static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, struct ib_wc *ibwc) { u16 qpn = 0; int i = 0; bool expand = false; int polled_hw_cqes = 0; struct ocrdma_qp *qp = NULL; struct ocrdma_dev *dev = cq->dev; struct ocrdma_cqe *cqe; u16 cur_getp; bool polled = false; bool stop = false; cur_getp = cq->getp; while (num_entries) { cqe = cq->va + cur_getp; /* check whether valid cqe or not */ if (!is_cqe_valid(cq, cqe)) break; qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); /* ignore discarded cqe */ if (qpn == 0) goto skip_cqe; qp = dev->qp_tbl[qpn]; BUG_ON(qp == NULL); if (is_cqe_for_sq(cqe)) { expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, &stop); } else { expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, &stop); } if (expand) goto expand_cqe; if (stop) goto stop_cqe; /* clear qpn to avoid duplicate processing by discard_cqe() */ cqe->cmn.qpn = 0; skip_cqe: polled_hw_cqes += 1; cur_getp = (cur_getp + 1) % cq->max_hw_cqe; ocrdma_change_cq_phase(cq, cqe, cur_getp); expand_cqe: if (polled) { num_entries -= 1; i += 1; ibwc = ibwc + 1; polled = false; } } stop_cqe: cq->getp = cur_getp; if (polled_hw_cqes || expand || stop) { ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited, polled_hw_cqes); } return i; } /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */ static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, struct ocrdma_qp *qp, struct ib_wc *ibwc) { int err_cqes = 0; while (num_entries) { if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) break; if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { ocrdma_update_wc(qp, ibwc, qp->sq.tail); ocrdma_hwq_inc_tail(&qp->sq); } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; ocrdma_hwq_inc_tail(&qp->rq); } else return err_cqes; ibwc->byte_len = 0; ibwc->status = IB_WC_WR_FLUSH_ERR; ibwc = ibwc + 1; err_cqes += 1; num_entries -= 1; } return err_cqes; } int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { int cqes_to_poll = num_entries; struct ocrdma_cq *cq = NULL; unsigned long flags; struct ocrdma_dev *dev; int num_os_cqe = 0, err_cqes = 0; struct ocrdma_qp *qp; cq = get_ocrdma_cq(ibcq); dev = cq->dev; /* poll cqes from adapter CQ */ spin_lock_irqsave(&cq->cq_lock, flags); num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); spin_unlock_irqrestore(&cq->cq_lock, flags); cqes_to_poll -= num_os_cqe; if (cqes_to_poll) { wc = wc + num_os_cqe; /* adapter returns single error cqe when qp moves to * error state. So insert error cqes with wc_status as * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ * respectively which uses this CQ. */ spin_lock_irqsave(&dev->flush_q_lock, flags); list_for_each_entry(qp, &cq->sq_head, sq_entry) { if (cqes_to_poll == 0) break; err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); cqes_to_poll -= err_cqes; num_os_cqe += err_cqes; wc = wc + err_cqes; } spin_unlock_irqrestore(&dev->flush_q_lock, flags); } return num_os_cqe; } int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) { struct ocrdma_cq *cq; unsigned long flags; struct ocrdma_dev *dev; u16 cq_id; u16 cur_getp; struct ocrdma_cqe *cqe; cq = get_ocrdma_cq(ibcq); cq_id = cq->id; dev = cq->dev; spin_lock_irqsave(&cq->cq_lock, flags); if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) cq->armed = true; if (cq_flags & IB_CQ_SOLICITED) cq->solicited = true; cur_getp = cq->getp; cqe = cq->va + cur_getp; /* check whether any valid cqe exist or not, if not then safe to * arm. If cqe is not yet consumed, then let it get consumed and then * we arm it to avoid false interrupts. */ if (!is_cqe_valid(cq, cqe) || cq->arm_needed) { cq->arm_needed = false; ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0); } spin_unlock_irqrestore(&cq->cq_lock, flags); return 0; }
gpl-2.0
MicroTrustRepos/microkernel
src/l4/pkg/uclibc/lib/contrib/uclibc/test/locale-mbwc/dat_wcrtomb.c
32
3152
/* * TEST SUITE FOR MB/WC FUNCTIONS IN C LIBRARY * * FILE: dat_wcrtomb.c * * WCRTOMB: intwcrtomb (char *s, wchar_t wc, mbstate_t *ps); * */ TST_WCRTOMB tst_wcrtomb_loc [] = { { { Twcrtomb, TST_LOC_de }, { /* #01 : normal case */ { /*input.*/ { 1, 0x00FC, 0,0 }, /*expect*/ { 0, 1,1, "ü" }, }, /* #02 : normal case */ { /*input.*/ { 1, 0x00D6, 0,0 }, /*expect*/ { 0, 1,1, "Ö" }, }, /* #03 : error case */ { /*input.*/ { 1, 0xFFA1, 0,0 }, /*expect*/ { EILSEQ,1,-1, "" }, }, /* #04 : */ { /*input.*/ { 0, 0x0041, 0,0 }, /*expect*/ { 0, 1,1, "" }, }, /* #05 : */ { /*input.*/ { 0, 0x0092, 0,0 }, /*expect*/ { 0, 1,1, "" }, }, { .is_last = 1 } } }, { { Twcrtomb, TST_LOC_enUS }, { /* #01 : normal case */ { /*input.*/ { 1, 0x0041, 0,0 }, /*expect*/ { 0, 1,1, "A" }, }, /* #02 : normal case */ { /*input.*/ { 1, 0x0042, 0,0 }, /*expect*/ { 0, 1,1, "B" }, }, /* #03 : error case */ /* <WAIVER> x 2 */ { /*input.*/ { 1, 0x0092, 0,0 }, /* assume ascii */ /*expect*/ { EILSEQ,1,-1, "" }, }, /* #04 : */ { /*input.*/ { 0, 0x0041, 0,0 }, /*expect*/ { 0, 1,1, "" }, }, /* #05 : */ { /*input.*/ { 0, 0x0092, 0,0 }, /*expect*/ { 0, 1,1, "" }, }, { .is_last = 1 } } }, #if 0 { { Twcrtomb, TST_LOC_eucJP }, { /* #01 : normal case */ { /*input.*/ { 1, 0x3042, 0,0 }, /*expect*/ { 0, 1,2, "\244\242" }, }, /* #02 : normal case */ { /*input.*/ { 1, 0x3044, 0,0 }, /*expect*/ { 0, 1,2, "\244\244" }, }, /* #03 : normal case */ { /*input.*/ { 1, 0x008E, 0,0 }, /*expect*/ { EILSEQ, 1,-1, "" }, }, /* #04 : */ { /*input.*/ { 0, 0x3042, 0,0 }, /*expect*/ { 0, 0,0, "" }, }, /* #05 : */ { /*input.*/ { 0, 0x008E, 0,0 }, /*expect*/ { 0, 0,0, "" }, }, { .is_last = 1 } } }, #else { { Twcrtomb, TST_LOC_ja_UTF8 }, { /* #01 : normal case */ { /*input.*/ { 1, 0x3042, 0,0 }, /*expect*/ { 0, 1,3, "\343\201\202" }, }, /* #02 : normal case */ { /*input.*/ { 1, 0x3044, 0,0 }, /*expect*/ { 0, 1,3, "\343\201\204" }, }, /* #03 : normal case */ { /*input.*/ { 1, 0x008E, 0,0 }, /*expect*/ { EILSEQ, 1,-1, "" }, }, /* #04 : */ { /*input.*/ { 0, 0x3042, 0,0 }, /*expect*/ { 0, 0,0, "" }, }, /* #05 : */ { /*input.*/ { 0, 0x008E, 0,0 }, /*expect*/ { 0, 0,0, "" }, }, { .is_last = 1 } } }, #endif { { Twcrtomb, TST_LOC_end } } };
gpl-2.0
schleichdi2/openpli-e2
lib/gui/ewindow.cpp
32
2501
#include <lib/gui/ewindow.h> #include <lib/gui/ewidgetdesktop.h> #include <lib/gui/ewindowstyle.h> #include <lib/gui/ewindowstyleskinned.h> #include <lib/gdi/epng.h> eWindow::eWindow(eWidgetDesktop *desktop, int z): eWidget(0) { m_flags = 0; m_desktop = desktop; /* ask style manager for current style */ ePtr<eWindowStyleManager> mgr; eWindowStyleManager::getInstance(mgr); ePtr<eWindowStyle> style; if (mgr) mgr->getStyle(desktop->getStyleID(), style); /* when there is either no style manager or no style, revert to simple style. */ if (!style) style = new eWindowStyleSimple(); setStyle(style); setZPosition(z); /* must be done before addRootWidget */ /* we are the parent for the child window. */ /* as we are in the constructor, this is thread safe. */ m_child = this; m_child = new eWidget(this); desktop->addRootWidget(this); } eWindow::~eWindow() { m_desktop->removeRootWidget(this); m_child->destruct(); } void eWindow::setTitle(const std::string &string) { if (m_title == string) return; m_title = string; event(evtTitleChanged); } std::string eWindow::getTitle() const { return m_title; } void eWindow::setBackgroundColor(const gRGB &col) { /* set background color for child, too */ eWidget::setBackgroundColor(col); m_child->setBackgroundColor(col); } void eWindow::setFlag(int flags) { m_flags |= flags; } void eWindow::clearFlag(int flags) { m_flags &= ~flags; } int eWindow::event(int event, void *data, void *data2) { switch (event) { case evtWillChangeSize: { eSize &new_size = *static_cast<eSize*>(data); eSize &offset = *static_cast<eSize*>(data2); if (!(m_flags & wfNoBorder)) { ePtr<eWindowStyle> style; if (!getStyle(style)) { // eDebug("[eWindow] evtWillChangeSize to %d %d", new_size.width(), new_size.height()); style->handleNewSize(this, new_size, offset); } } else m_child->resize(new_size); break; } case evtPaint: { if (!(m_flags & wfNoBorder)) { ePtr<eWindowStyle> style; if (!getStyle(style)) { gPainter &painter = *static_cast<gPainter*>(data2); style->paintWindowDecoration(this, painter, m_title); } } return 0; } case evtTitleChanged: /* m_visible_region contains, in contrast to m_visible_with_childs, only the decoration. though repainting the whole decoration is bad, repainting the whole window is even worse. */ invalidate(m_visible_region); break; default: break; } return eWidget::event(event, data, data2); }
gpl-2.0
salqueng/synergy
src/gui/src/KeySequenceWidget.cpp
32
2938
/* * synergy -- mouse and keyboard sharing utility * Copyright (C) 2012 Synergy Si Ltd. * Copyright (C) 2008 Volker Lanz (vl@fidra.de) * * This package is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * found in the file LICENSE that should have accompanied this file. * * This package is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "KeySequenceWidget.h" #include <iostream> #include <QMouseEvent> KeySequenceWidget::KeySequenceWidget(QWidget* parent, const KeySequence& seq) : QPushButton(parent), m_KeySequence(seq), m_BackupSequence(seq), m_Status(Stopped), m_MousePrefix("mousebutton("), m_MousePostfix(")"), m_KeyPrefix("keystroke("), m_KeyPostfix(")") { setFocusPolicy(Qt::NoFocus); updateOutput(); } void KeySequenceWidget::setKeySequence(const KeySequence& seq) { keySequence() = seq; backupSequence() = seq; setStatus(Stopped); updateOutput(); } void KeySequenceWidget::mousePressEvent(QMouseEvent* event) { event->accept(); if (status() == Stopped) { startRecording(); return; } if (m_KeySequence.appendMouseButton(event->button())) stopRecording(); updateOutput(); } void KeySequenceWidget::startRecording() { keySequence() = KeySequence(); setDown(true); setFocus(); grabKeyboard(); setStatus(Recording); } void KeySequenceWidget::stopRecording() { if (!keySequence().valid()) { keySequence() = backupSequence(); updateOutput(); } setDown(false); focusNextChild(); releaseKeyboard(); setStatus(Stopped); emit keySequenceChanged(); } bool KeySequenceWidget::event(QEvent* event) { if (status() == Recording) { switch(event->type()) { case QEvent::KeyPress: keyPressEvent(static_cast<QKeyEvent*>(event)); return true; case QEvent::MouseButtonRelease: event->accept(); return true; case QEvent::ShortcutOverride: event->accept(); return true; case QEvent::FocusOut: stopRecording(); if (!valid()) { keySequence() = backupSequence(); updateOutput(); } break; default: break; } } return QPushButton::event(event); } void KeySequenceWidget::keyPressEvent(QKeyEvent* event) { event->accept(); if (status() == Stopped) return; if (m_KeySequence.appendKey(event->key(), event->modifiers())) stopRecording(); updateOutput(); } void KeySequenceWidget::updateOutput() { QString s; if (m_KeySequence.isMouseButton()) s = mousePrefix() + m_KeySequence.toString() + mousePostfix(); else s = keyPrefix() + m_KeySequence.toString() + keyPostfix(); setText(s); }
gpl-2.0
visi0nary/mt6735-kernel-3.10.61
sound/core/compress_offload.c
32
27128
/* * compress_core.c - compress offload core * * Copyright (C) 2011 Intel Corporation * Authors: Vinod Koul <vinod.koul@linux.intel.com> * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * */ #define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__ #define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt) #include <linux/file.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/math64.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/uio.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/compat.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/compress_params.h> #include <sound/compress_offload.h> #include <sound/compress_driver.h> /* TODO: * - add substream support for multiple devices in case of * SND_DYNAMIC_MINORS is not used * - Multiple node representation * driver should be able to register multiple nodes */ static DEFINE_MUTEX(device_mutex); struct snd_compr_file { unsigned long caps; struct snd_compr_stream stream; }; /* * a note on stream states used: * we use follwing states in the compressed core * SNDRV_PCM_STATE_OPEN: When stream has been opened. * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by * calling SNDRV_COMPRESS_SET_PARAMS. running streams will come to this * state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain. * SNDRV_PCM_STATE_RUNNING: When stream has been started and is * decoding/encoding and rendering/capturing data. * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done * by calling SNDRV_COMPRESS_DRAIN. * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling * SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling * SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively. */ static int snd_compr_open(struct inode *inode, struct file *f) { struct snd_compr *compr; struct snd_compr_file *data; struct snd_compr_runtime *runtime; enum snd_compr_direction dirn; int maj = imajor(inode); int ret; if ((f->f_flags & O_ACCMODE) == O_WRONLY) dirn = SND_COMPRESS_PLAYBACK; else if ((f->f_flags & O_ACCMODE) == O_RDONLY) dirn = SND_COMPRESS_CAPTURE; else return -EINVAL; if (maj == snd_major) compr = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_COMPRESS); else return -EBADFD; if (compr == NULL) { pr_err("no device data!!!\n"); return -ENODEV; } if (dirn != compr->direction) { pr_err("this device doesn't support this direction\n"); snd_card_unref(compr->card); return -EINVAL; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { snd_card_unref(compr->card); return -ENOMEM; } data->stream.ops = compr->ops; data->stream.direction = dirn; data->stream.private_data = compr->private_data; data->stream.device = compr; runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); if (!runtime) { kfree(data); snd_card_unref(compr->card); return -ENOMEM; } runtime->state = SNDRV_PCM_STATE_OPEN; init_waitqueue_head(&runtime->sleep); data->stream.runtime = runtime; f->private_data = (void *)data; mutex_lock(&compr->lock); ret = compr->ops->open(&data->stream); mutex_unlock(&compr->lock); if (ret) { kfree(runtime); kfree(data); } snd_card_unref(compr->card); return ret; } static int snd_compr_free(struct inode *inode, struct file *f) { struct snd_compr_file *data = f->private_data; data->stream.ops->free(&data->stream); kfree(data->stream.runtime->buffer); kfree(data->stream.runtime); kfree(data); return 0; } static int snd_compr_update_tstamp(struct snd_compr_stream *stream, struct snd_compr_tstamp *tstamp) { if (!stream->ops->pointer) return -ENOTSUPP; stream->ops->pointer(stream, tstamp); pr_debug("dsp consumed till %d total %d bytes\n", tstamp->byte_offset, tstamp->copied_total); if (stream->direction == SND_COMPRESS_PLAYBACK) stream->runtime->total_bytes_transferred = tstamp->copied_total; else stream->runtime->total_bytes_available = tstamp->copied_total; return 0; } static size_t snd_compr_calc_avail(struct snd_compr_stream *stream, struct snd_compr_avail *avail) { memset(avail, 0, sizeof(*avail)); snd_compr_update_tstamp(stream, &avail->tstamp); /* Still need to return avail even if tstamp can't be filled in */ if (stream->runtime->total_bytes_available == 0 && stream->runtime->state == SNDRV_PCM_STATE_SETUP && stream->direction == SND_COMPRESS_PLAYBACK) { pr_debug("detected init and someone forgot to do a write\n"); return stream->runtime->buffer_size; } pr_debug("app wrote %lld, DSP consumed %lld\n", stream->runtime->total_bytes_available, stream->runtime->total_bytes_transferred); if (stream->runtime->total_bytes_available == stream->runtime->total_bytes_transferred) { if (stream->direction == SND_COMPRESS_PLAYBACK) { pr_debug("both pointers are same, returning full avail\n"); return stream->runtime->buffer_size; } else { pr_debug("both pointers are same, returning no avail\n"); return 0; } } avail->avail = stream->runtime->total_bytes_available - stream->runtime->total_bytes_transferred; if (stream->direction == SND_COMPRESS_PLAYBACK) avail->avail = stream->runtime->buffer_size - avail->avail; pr_debug("ret avail as %lld\n", avail->avail); return avail->avail; } static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream) { struct snd_compr_avail avail; return snd_compr_calc_avail(stream, &avail); } static int snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg) { struct snd_compr_avail ioctl_avail; size_t avail; avail = snd_compr_calc_avail(stream, &ioctl_avail); ioctl_avail.avail = avail; if (copy_to_user((__u64 __user *)arg, &ioctl_avail, sizeof(ioctl_avail))) return -EFAULT; return 0; } static int snd_compr_write_data(struct snd_compr_stream *stream, const char __user *buf, size_t count) { void *dstn; size_t copy; struct snd_compr_runtime *runtime = stream->runtime; /* 64-bit Modulus */ u64 app_pointer = div64_u64(runtime->total_bytes_available, runtime->buffer_size); app_pointer = runtime->total_bytes_available - (app_pointer * runtime->buffer_size); dstn = runtime->buffer + app_pointer; pr_debug("copying %ld at %lld\n", (unsigned long)count, app_pointer); if (count < runtime->buffer_size - app_pointer) { if (copy_from_user(dstn, buf, count)) return -EFAULT; } else { copy = runtime->buffer_size - app_pointer; if (copy_from_user(dstn, buf, copy)) return -EFAULT; if (copy_from_user(runtime->buffer, buf + copy, count - copy)) return -EFAULT; } /* if DSP cares, let it know data has been written */ if (stream->ops->ack) stream->ops->ack(stream, count); return count; } static ssize_t snd_compr_write(struct file *f, const char __user *buf, size_t count, loff_t *offset) { struct snd_compr_file *data = f->private_data; struct snd_compr_stream *stream; size_t avail; int retval; if (snd_BUG_ON(!data)) return -EFAULT; stream = &data->stream; mutex_lock(&stream->device->lock); /* write is allowed when stream is running or has been steup */ if (stream->runtime->state != SNDRV_PCM_STATE_SETUP && stream->runtime->state != SNDRV_PCM_STATE_RUNNING) { mutex_unlock(&stream->device->lock); return -EBADFD; } avail = snd_compr_get_avail(stream); pr_debug("avail returned %ld\n", (unsigned long)avail); /* calculate how much we can write to buffer */ if (avail > count) avail = count; if (stream->ops->copy) { char __user* cbuf = (char __user*)buf; retval = stream->ops->copy(stream, cbuf, avail); } else { retval = snd_compr_write_data(stream, buf, avail); } if (retval > 0) stream->runtime->total_bytes_available += retval; /* while initiating the stream, write should be called before START * call, so in setup move state */ if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) { stream->runtime->state = SNDRV_PCM_STATE_PREPARED; pr_debug("stream prepared, Houston we are good to go\n"); } mutex_unlock(&stream->device->lock); return retval; } static ssize_t snd_compr_read(struct file *f, char __user *buf, size_t count, loff_t *offset) { struct snd_compr_file *data = f->private_data; struct snd_compr_stream *stream; size_t avail; int retval; if (snd_BUG_ON(!data)) return -EFAULT; stream = &data->stream; mutex_lock(&stream->device->lock); /* read is allowed when stream is running, paused, draining and setup * (yes setup is state which we transition to after stop, so if user * wants to read data after stop we allow that) */ switch (stream->runtime->state) { case SNDRV_PCM_STATE_OPEN: case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_XRUN: case SNDRV_PCM_STATE_SUSPENDED: case SNDRV_PCM_STATE_DISCONNECTED: retval = -EBADFD; goto out; } avail = snd_compr_get_avail(stream); pr_debug("avail returned %ld\n", (unsigned long)avail); /* calculate how much we can read from buffer */ if (avail > count) avail = count; if (stream->ops->copy) { retval = stream->ops->copy(stream, buf, avail); } else { retval = -ENXIO; goto out; } if (retval > 0) stream->runtime->total_bytes_transferred += retval; out: mutex_unlock(&stream->device->lock); return retval; } static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma) { return -ENXIO; } static inline int snd_compr_get_poll(struct snd_compr_stream *stream) { if (stream->direction == SND_COMPRESS_PLAYBACK) return POLLOUT | POLLWRNORM; else return POLLIN | POLLRDNORM; } static unsigned int snd_compr_poll(struct file *f, poll_table *wait) { struct snd_compr_file *data = f->private_data; struct snd_compr_stream *stream; size_t avail; int retval = 0; if (snd_BUG_ON(!data)) return -EFAULT; stream = &data->stream; if (snd_BUG_ON(!stream)) return -EFAULT; mutex_lock(&stream->device->lock); if (stream->runtime->state == SNDRV_PCM_STATE_PAUSED || stream->runtime->state == SNDRV_PCM_STATE_OPEN) { retval = -EBADFD; goto out; } poll_wait(f, &stream->runtime->sleep, wait); avail = snd_compr_get_avail(stream); pr_debug("avail is %ld\n", (unsigned long)avail); /* check if we have at least one fragment to fill */ switch (stream->runtime->state) { case SNDRV_PCM_STATE_DRAINING: /* stream has been woken up after drain is complete * draining done so set stream state to stopped */ retval = snd_compr_get_poll(stream); stream->runtime->state = SNDRV_PCM_STATE_SETUP; break; case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: if (avail >= stream->runtime->fragment_size) retval = snd_compr_get_poll(stream); break; default: if (stream->direction == SND_COMPRESS_PLAYBACK) retval = POLLOUT | POLLWRNORM | POLLERR; else retval = POLLIN | POLLRDNORM | POLLERR; break; } out: mutex_unlock(&stream->device->lock); return retval; } static int snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg) { int retval; struct snd_compr_caps caps; if (!stream->ops->get_caps) return -ENXIO; memset(&caps, 0, sizeof(caps)); retval = stream->ops->get_caps(stream, &caps); if (retval) goto out; if (copy_to_user((void __user *)arg, &caps, sizeof(caps))) retval = -EFAULT; out: return retval; } static int snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg) { int retval; struct snd_compr_codec_caps *caps; if (!stream->ops->get_codec_caps) return -ENXIO; caps = kzalloc(sizeof(*caps), GFP_KERNEL); if (!caps) return -ENOMEM; retval = stream->ops->get_codec_caps(stream, caps); if (retval) goto out; if (copy_to_user((void __user *)arg, caps, sizeof(*caps))) retval = -EFAULT; out: kfree(caps); return retval; } /* revisit this with snd_pcm_preallocate_xxx */ static int snd_compr_allocate_buffer(struct snd_compr_stream *stream, struct snd_compr_params *params) { unsigned int buffer_size; void *buffer; buffer_size = params->buffer.fragment_size * params->buffer.fragments; if (stream->ops->copy) { buffer = NULL; /* if copy is defined the driver will be required to copy * the data from core */ } else { buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) return -ENOMEM; } stream->runtime->fragment_size = params->buffer.fragment_size; stream->runtime->fragments = params->buffer.fragments; stream->runtime->buffer = buffer; stream->runtime->buffer_size = buffer_size; return 0; } static int snd_compress_check_input(struct snd_compr_params *params) { /* first let's check the buffer parameter's */ if (params->buffer.fragment_size == 0 || params->buffer.fragments > SIZE_MAX / params->buffer.fragment_size) return -EINVAL; /* now codec parameters */ if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX) return -EINVAL; if (params->codec.ch_in == 0 || params->codec.ch_out == 0) return -EINVAL; return 0; } static int snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg) { struct snd_compr_params *params; int retval; if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) { /* * we should allow parameter change only when stream has been * opened not in other cases */ params = kmalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; if (copy_from_user(params, (void __user *)arg, sizeof(*params))) { retval = -EFAULT; goto out; } retval = snd_compress_check_input(params); if (retval) goto out; retval = snd_compr_allocate_buffer(stream, params); if (retval) { retval = -ENOMEM; goto out; } retval = stream->ops->set_params(stream, params); if (retval) goto out; stream->metadata_set = false; stream->next_track = false; if (stream->direction == SND_COMPRESS_PLAYBACK) stream->runtime->state = SNDRV_PCM_STATE_SETUP; else stream->runtime->state = SNDRV_PCM_STATE_PREPARED; } else { return -EPERM; } out: kfree(params); return retval; } static int snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg) { struct snd_codec *params; int retval; if (!stream->ops->get_params) return -EBADFD; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; retval = stream->ops->get_params(stream, params); if (retval) goto out; if (copy_to_user((char __user *)arg, params, sizeof(*params))) retval = -EFAULT; out: kfree(params); return retval; } static int snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg) { struct snd_compr_metadata metadata; int retval; if (!stream->ops->get_metadata) return -ENXIO; if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata))) return -EFAULT; retval = stream->ops->get_metadata(stream, &metadata); if (retval != 0) return retval; if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata))) return -EFAULT; return 0; } static int snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg) { struct snd_compr_metadata metadata; int retval; if (!stream->ops->set_metadata) return -ENXIO; /* * we should allow parameter change only when stream has been * opened not in other cases */ if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata))) return -EFAULT; retval = stream->ops->set_metadata(stream, &metadata); stream->metadata_set = true; return retval; } static inline int snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg) { struct snd_compr_tstamp tstamp = {0}; int ret; ret = snd_compr_update_tstamp(stream, &tstamp); if (ret == 0) ret = copy_to_user((struct snd_compr_tstamp __user *)arg, &tstamp, sizeof(tstamp)) ? -EFAULT : 0; return ret; } static int snd_compr_pause(struct snd_compr_stream *stream) { int retval; if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) return -EPERM; retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); if (!retval) stream->runtime->state = SNDRV_PCM_STATE_PAUSED; return retval; } static int snd_compr_resume(struct snd_compr_stream *stream) { int retval; if (stream->runtime->state != SNDRV_PCM_STATE_PAUSED) return -EPERM; retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE); if (!retval) stream->runtime->state = SNDRV_PCM_STATE_RUNNING; return retval; } static int snd_compr_start(struct snd_compr_stream *stream) { int retval; if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED) return -EPERM; retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START); if (!retval) stream->runtime->state = SNDRV_PCM_STATE_RUNNING; return retval; } static int snd_compr_stop(struct snd_compr_stream *stream) { int retval; if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED || stream->runtime->state == SNDRV_PCM_STATE_SETUP) return -EPERM; retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); if (!retval) { snd_compr_drain_notify(stream); stream->runtime->total_bytes_available = 0; stream->runtime->total_bytes_transferred = 0; } return retval; } static int snd_compress_wait_for_drain(struct snd_compr_stream *stream) { int ret; /* * We are called with lock held. So drop the lock while we wait for * drain complete notfication from the driver * * It is expected that driver will notify the drain completion and then * stream will be moved to SETUP state, even if draining resulted in an * error. We can trigger next track after this. */ stream->runtime->state = SNDRV_PCM_STATE_DRAINING; mutex_unlock(&stream->device->lock); /* we wait for drain to complete here, drain can return when * interruption occurred, wait returned error or success. * For the first two cases we don't do anything different here and * return after waking up */ ret = wait_event_interruptible(stream->runtime->sleep, (stream->runtime->state != SNDRV_PCM_STATE_DRAINING)); if (ret == -ERESTARTSYS) pr_debug("wait aborted by a signal"); else if (ret) pr_debug("wait for drain failed with %d\n", ret); wake_up(&stream->runtime->sleep); mutex_lock(&stream->device->lock); return ret; } static int snd_compr_drain(struct snd_compr_stream *stream) { int retval; if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED || stream->runtime->state == SNDRV_PCM_STATE_SETUP) return -EPERM; retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN); if (retval) { pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval); wake_up(&stream->runtime->sleep); return retval; } return snd_compress_wait_for_drain(stream); } static int snd_compr_next_track(struct snd_compr_stream *stream) { int retval; /* only a running stream can transition to next track */ if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) return -EPERM; /* you can signal next track isf this is intended to be a gapless stream * and current track metadata is set */ if (stream->metadata_set == false) return -EPERM; retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK); if (retval != 0) return retval; stream->metadata_set = false; stream->next_track = true; return 0; } static int snd_compr_partial_drain(struct snd_compr_stream *stream) { int retval; if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED || stream->runtime->state == SNDRV_PCM_STATE_SETUP) return -EPERM; /* stream can be drained only when next track has been signalled */ if (stream->next_track == false) return -EPERM; retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN); if (retval) { pr_debug("Partial drain returned failure\n"); wake_up(&stream->runtime->sleep); return retval; } stream->next_track = false; return snd_compress_wait_for_drain(stream); } static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { struct snd_compr_file *data = f->private_data; struct snd_compr_stream *stream; int retval = -ENOTTY; if (snd_BUG_ON(!data)) return -EFAULT; stream = &data->stream; if (snd_BUG_ON(!stream)) return -EFAULT; mutex_lock(&stream->device->lock); switch (_IOC_NR(cmd)) { case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION): retval = put_user(SNDRV_COMPRESS_VERSION, (int __user *)arg) ? -EFAULT : 0; break; case _IOC_NR(SNDRV_COMPRESS_GET_CAPS): retval = snd_compr_get_caps(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS): retval = snd_compr_get_codec_caps(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS): retval = snd_compr_set_params(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS): retval = snd_compr_get_params(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_SET_METADATA): retval = snd_compr_set_metadata(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_GET_METADATA): retval = snd_compr_get_metadata(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_TSTAMP): retval = snd_compr_tstamp(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_AVAIL): retval = snd_compr_ioctl_avail(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_PAUSE): retval = snd_compr_pause(stream); break; case _IOC_NR(SNDRV_COMPRESS_RESUME): retval = snd_compr_resume(stream); break; case _IOC_NR(SNDRV_COMPRESS_START): retval = snd_compr_start(stream); break; case _IOC_NR(SNDRV_COMPRESS_STOP): retval = snd_compr_stop(stream); break; case _IOC_NR(SNDRV_COMPRESS_DRAIN): retval = snd_compr_drain(stream); break; case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN): retval = snd_compr_partial_drain(stream); break; case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK): retval = snd_compr_next_track(stream); break; } mutex_unlock(&stream->device->lock); return retval; } /* * ioctl32 compat */ #ifdef CONFIG_COMPAT static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { switch (_IOC_NR(cmd)) { case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION): case _IOC_NR(SNDRV_COMPRESS_GET_CAPS): case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS): case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS): case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS): case _IOC_NR(SNDRV_COMPRESS_SET_METADATA): case _IOC_NR(SNDRV_COMPRESS_GET_METADATA): case _IOC_NR(SNDRV_COMPRESS_TSTAMP): case _IOC_NR(SNDRV_COMPRESS_AVAIL): case _IOC_NR(SNDRV_COMPRESS_PAUSE): case _IOC_NR(SNDRV_COMPRESS_RESUME): case _IOC_NR(SNDRV_COMPRESS_START): case _IOC_NR(SNDRV_COMPRESS_STOP): case _IOC_NR(SNDRV_COMPRESS_DRAIN): case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN): case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK): return file->f_op->unlocked_ioctl(file, cmd,arg); break; } } #else #define snd_compr_ioctl_compat NULL #endif static const struct file_operations snd_compr_file_ops = { .owner = THIS_MODULE, .open = snd_compr_open, .release = snd_compr_free, .write = snd_compr_write, .read = snd_compr_read, .unlocked_ioctl = snd_compr_ioctl, .compat_ioctl = snd_compr_ioctl_compat, .mmap = snd_compr_mmap, .poll = snd_compr_poll, }; static int snd_compress_dev_register(struct snd_device *device) { int ret = -EINVAL; char str[16]; struct snd_compr *compr; if (snd_BUG_ON(!device || !device->device_data)) return -EBADFD; compr = device->device_data; sprintf(str, "comprC%iD%i", compr->card->number, compr->device); pr_debug("reg %s for device %s, direction %d\n", str, compr->name, compr->direction); /* register compressed device */ ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card, compr->device, &snd_compr_file_ops, compr, str); if (ret < 0) { pr_err("snd_register_device failed\n %d", ret); return ret; } return ret; } static int snd_compress_dev_disconnect(struct snd_device *device) { struct snd_compr *compr; compr = device->device_data; snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card, compr->device); return 0; } /* * snd_compress_new: create new compress device * @card: sound card pointer * @device: device number * @dirn: device direction, should be of type enum snd_compr_direction * @compr: compress device pointer */ int snd_compress_new(struct snd_card *card, int device, int dirn, struct snd_compr *compr) { static struct snd_device_ops ops = { .dev_free = NULL, .dev_register = snd_compress_dev_register, .dev_disconnect = snd_compress_dev_disconnect, }; compr->card = card; compr->device = device; compr->direction = dirn; return snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops); } EXPORT_SYMBOL_GPL(snd_compress_new); static int snd_compress_add_device(struct snd_compr *device) { int ret; if (!device->card) return -EINVAL; /* register the card */ ret = snd_card_register(device->card); if (ret) goto out; return 0; out: pr_err("failed with %d\n", ret); return ret; } static int snd_compress_remove_device(struct snd_compr *device) { return snd_card_free(device->card); } /** * snd_compress_register - register compressed device * * @device: compressed device to register */ int snd_compress_register(struct snd_compr *device) { int retval; if (device->name == NULL || device->dev == NULL || device->ops == NULL) return -EINVAL; pr_debug("Registering compressed device %s\n", device->name); if (snd_BUG_ON(!device->ops->open)) return -EINVAL; if (snd_BUG_ON(!device->ops->free)) return -EINVAL; if (snd_BUG_ON(!device->ops->set_params)) return -EINVAL; if (snd_BUG_ON(!device->ops->trigger)) return -EINVAL; mutex_init(&device->lock); /* register a compressed card */ mutex_lock(&device_mutex); retval = snd_compress_add_device(device); mutex_unlock(&device_mutex); return retval; } EXPORT_SYMBOL_GPL(snd_compress_register); int snd_compress_deregister(struct snd_compr *device) { pr_debug("Removing compressed device %s\n", device->name); mutex_lock(&device_mutex); snd_compress_remove_device(device); mutex_unlock(&device_mutex); return 0; } EXPORT_SYMBOL_GPL(snd_compress_deregister); static int __init snd_compress_init(void) { return 0; } static void __exit snd_compress_exit(void) { } module_init(snd_compress_init); module_exit(snd_compress_exit); MODULE_DESCRIPTION("ALSA Compressed offload framework"); MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
prasidh09/cse506
drivers/hwmon/max1668.c
544
14589
/* Copyright (c) 2011 David George <david.george@ska.ac.za> based on adm1021.c some credit to Christoph Scheurer, but largely a rewrite This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> /* Addresses to scan */ static unsigned short max1668_addr_list[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; /* max1668 registers */ #define MAX1668_REG_TEMP(nr) (nr) #define MAX1668_REG_STAT1 0x05 #define MAX1668_REG_STAT2 0x06 #define MAX1668_REG_MAN_ID 0xfe #define MAX1668_REG_DEV_ID 0xff /* limits */ /* write high limits */ #define MAX1668_REG_LIMH_WR(nr) (0x13 + 2 * (nr)) /* write low limits */ #define MAX1668_REG_LIML_WR(nr) (0x14 + 2 * (nr)) /* read high limits */ #define MAX1668_REG_LIMH_RD(nr) (0x08 + 2 * (nr)) /* read low limits */ #define MAX1668_REG_LIML_RD(nr) (0x09 + 2 * (nr)) /* manufacturer and device ID Constants */ #define MAN_ID_MAXIM 0x4d #define DEV_ID_MAX1668 0x3 #define DEV_ID_MAX1805 0x5 #define DEV_ID_MAX1989 0xb /* read only mode module parameter */ static int read_only; module_param(read_only, bool, 0); MODULE_PARM_DESC(read_only, "Don't set any values, read only mode"); enum chips { max1668, max1805, max1989 }; struct max1668_data { struct device *hwmon_dev; enum chips type; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ /* 1x local and 4x remote */ s8 temp_max[5]; s8 temp_min[5]; s8 temp[5]; u16 alarms; }; static struct max1668_data *max1668_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct max1668_data *data = i2c_get_clientdata(client); struct max1668_data *ret = data; s32 val; int i; mutex_lock(&data->update_lock); if (data->valid && !time_after(jiffies, data->last_updated + HZ + HZ / 2)) goto abort; for (i = 0; i < 5; i++) { val = i2c_smbus_read_byte_data(client, MAX1668_REG_TEMP(i)); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->temp[i] = (s8) val; val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIMH_RD(i)); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->temp_max[i] = (s8) val; val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIML_RD(i)); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->temp_min[i] = (s8) val; } val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT1); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->alarms = val << 8; val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT2); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->alarms |= val; data->last_updated = jiffies; data->valid = 1; abort: mutex_unlock(&data->update_lock); return ret; } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct max1668_data *data = max1668_update_device(dev); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%d\n", data->temp[index] * 1000); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct max1668_data *data = max1668_update_device(dev); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%d\n", data->temp_max[index] * 1000); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct max1668_data *data = max1668_update_device(dev); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%d\n", data->temp_min[index] * 1000); } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int index = to_sensor_dev_attr(attr)->index; struct max1668_data *data = max1668_update_device(dev); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1); } static ssize_t show_fault(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct max1668_data *data = max1668_update_device(dev); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%u\n", (data->alarms & (1 << 12)) && data->temp[index] == 127); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int index = to_sensor_dev_attr(devattr)->index; struct i2c_client *client = to_i2c_client(dev); struct max1668_data *data = i2c_get_clientdata(client); long temp; int ret; ret = kstrtol(buf, 10, &temp); if (ret < 0) return ret; mutex_lock(&data->update_lock); data->temp_max[index] = SENSORS_LIMIT(temp/1000, -128, 127); if (i2c_smbus_write_byte_data(client, MAX1668_REG_LIMH_WR(index), data->temp_max[index])) count = -EIO; mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int index = to_sensor_dev_attr(devattr)->index; struct i2c_client *client = to_i2c_client(dev); struct max1668_data *data = i2c_get_clientdata(client); long temp; int ret; ret = kstrtol(buf, 10, &temp); if (ret < 0) return ret; mutex_lock(&data->update_lock); data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127); if (i2c_smbus_write_byte_data(client, MAX1668_REG_LIML_WR(index), data->temp_max[index])) count = -EIO; mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, set_temp_max, 0); static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min, set_temp_min, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1); static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max, set_temp_max, 1); static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min, set_temp_min, 1); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2); static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max, set_temp_max, 2); static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min, set_temp_min, 2); static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3); static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max, set_temp_max, 3); static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min, set_temp_min, 3); static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4); static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max, set_temp_max, 4); static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min, set_temp_min, 4); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 14); static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 13); static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_fault, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_fault, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_fault, NULL, 3); static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_fault, NULL, 4); /* Attributes common to MAX1668, MAX1989 and MAX1805 */ static struct attribute *max1668_attribute_common[] = { &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, NULL }; /* Attributes not present on MAX1805 */ static struct attribute *max1668_attribute_unique[] = { &sensor_dev_attr_temp4_max.dev_attr.attr, &sensor_dev_attr_temp4_min.dev_attr.attr, &sensor_dev_attr_temp4_input.dev_attr.attr, &sensor_dev_attr_temp5_max.dev_attr.attr, &sensor_dev_attr_temp5_min.dev_attr.attr, &sensor_dev_attr_temp5_input.dev_attr.attr, &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, &sensor_dev_attr_temp4_min_alarm.dev_attr.attr, &sensor_dev_attr_temp5_max_alarm.dev_attr.attr, &sensor_dev_attr_temp5_min_alarm.dev_attr.attr, &sensor_dev_attr_temp4_fault.dev_attr.attr, &sensor_dev_attr_temp5_fault.dev_attr.attr, NULL }; static mode_t max1668_attribute_mode(struct kobject *kobj, struct attribute *attr, int index) { int ret = S_IRUGO; if (read_only) return ret; if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr || attr == &sensor_dev_attr_temp2_max.dev_attr.attr || attr == &sensor_dev_attr_temp3_max.dev_attr.attr || attr == &sensor_dev_attr_temp4_max.dev_attr.attr || attr == &sensor_dev_attr_temp5_max.dev_attr.attr || attr == &sensor_dev_attr_temp1_min.dev_attr.attr || attr == &sensor_dev_attr_temp2_min.dev_attr.attr || attr == &sensor_dev_attr_temp3_min.dev_attr.attr || attr == &sensor_dev_attr_temp4_min.dev_attr.attr || attr == &sensor_dev_attr_temp5_min.dev_attr.attr) ret |= S_IWUSR; return ret; } static const struct attribute_group max1668_group_common = { .attrs = max1668_attribute_common, .is_visible = max1668_attribute_mode }; static const struct attribute_group max1668_group_unique = { .attrs = max1668_attribute_unique, .is_visible = max1668_attribute_mode }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int max1668_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; const char *type_name; int man_id, dev_id; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Check for unsupported part */ man_id = i2c_smbus_read_byte_data(client, MAX1668_REG_MAN_ID); if (man_id != MAN_ID_MAXIM) return -ENODEV; dev_id = i2c_smbus_read_byte_data(client, MAX1668_REG_DEV_ID); if (dev_id < 0) return -ENODEV; type_name = NULL; if (dev_id == DEV_ID_MAX1668) type_name = "max1668"; else if (dev_id == DEV_ID_MAX1805) type_name = "max1805"; else if (dev_id == DEV_ID_MAX1989) type_name = "max1989"; if (!type_name) return -ENODEV; strlcpy(info->type, type_name, I2C_NAME_SIZE); return 0; } static int max1668_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; struct max1668_data *data; int err; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; data = kzalloc(sizeof(struct max1668_data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); data->type = id->driver_data; mutex_init(&data->update_lock); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &max1668_group_common); if (err) goto error_free; if (data->type == max1668 || data->type == max1989) { err = sysfs_create_group(&client->dev.kobj, &max1668_group_unique); if (err) goto error_sysrem0; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto error_sysrem1; } return 0; error_sysrem1: if (data->type == max1668 || data->type == max1989) sysfs_remove_group(&client->dev.kobj, &max1668_group_unique); error_sysrem0: sysfs_remove_group(&client->dev.kobj, &max1668_group_common); error_free: kfree(data); return err; } static int max1668_remove(struct i2c_client *client) { struct max1668_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); if (data->type == max1668 || data->type == max1989) sysfs_remove_group(&client->dev.kobj, &max1668_group_unique); sysfs_remove_group(&client->dev.kobj, &max1668_group_common); kfree(data); return 0; } static const struct i2c_device_id max1668_id[] = { { "max1668", max1668 }, { "max1805", max1805 }, { "max1989", max1989 }, { } }; MODULE_DEVICE_TABLE(i2c, max1668_id); /* This is the driver that will be inserted */ static struct i2c_driver max1668_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "max1668", }, .probe = max1668_probe, .remove = max1668_remove, .id_table = max1668_id, .detect = max1668_detect, .address_list = max1668_addr_list, }; static int __init sensors_max1668_init(void) { return i2c_add_driver(&max1668_driver); } static void __exit sensors_max1668_exit(void) { i2c_del_driver(&max1668_driver); } MODULE_AUTHOR("David George <david.george@ska.ac.za>"); MODULE_DESCRIPTION("MAX1668 remote temperature sensor driver"); MODULE_LICENSE("GPL"); module_init(sensors_max1668_init) module_exit(sensors_max1668_exit)
gpl-2.0
talnoah/Kangaroo_Kernel
net/rose/af_rose.c
544
38406
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) */ #include <linux/capability.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/stat.h> #include <net/net_namespace.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/rose.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/tcp_states.h> #include <net/ip.h> #include <net/arp.h> static int rose_ndevs = 10; int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0; int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1; int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2; int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3; int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE; int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB; int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING; int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT; int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC; int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE; static HLIST_HEAD(rose_list); static DEFINE_SPINLOCK(rose_list_lock); static const struct proto_ops rose_proto_ops; ax25_address rose_callsign; /* * ROSE network devices are virtual network devices encapsulating ROSE * frames into AX.25 which will be sent through an AX.25 device, so form a * special "super class" of normal net devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key rose_netdev_xmit_lock_key; static struct lock_class_key rose_netdev_addr_lock_key; static void rose_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); } static void rose_set_lockdep_key(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); } /* * Convert a ROSE address into text. */ char *rose2asc(char *buf, const rose_address *addr) { if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && addr->rose_addr[4] == 0x00) { strcpy(buf, "*"); } else { sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, addr->rose_addr[1] & 0xFF, addr->rose_addr[2] & 0xFF, addr->rose_addr[3] & 0xFF, addr->rose_addr[4] & 0xFF); } return buf; } /* * Compare two ROSE addresses, 0 == equal. */ int rosecmp(rose_address *addr1, rose_address *addr2) { int i; for (i = 0; i < 5; i++) if (addr1->rose_addr[i] != addr2->rose_addr[i]) return 1; return 0; } /* * Compare two ROSE addresses for only mask digits, 0 == equal. */ int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) { unsigned int i, j; if (mask > 10) return 1; for (i = 0; i < mask; i++) { j = i / 2; if ((i % 2) != 0) { if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F)) return 1; } else { if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0)) return 1; } } return 0; } /* * Socket removal during an interrupt is now safe. */ static void rose_remove_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_del_node_init(sk); spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a broken link layer connection to a * particular neighbour. */ void rose_kill_by_neigh(struct rose_neigh *neigh) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->neighbour == neigh) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->neighbour = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void rose_kill_by_device(struct net_device *dev) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->device == dev) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->device = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Handle device status changes. */ static int rose_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = (struct net_device *)ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event != NETDEV_DOWN) return NOTIFY_DONE; switch (dev->type) { case ARPHRD_ROSE: rose_kill_by_device(dev); break; case ARPHRD_AX25: rose_link_device_down(dev); rose_rt_device_down(dev); break; } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void rose_insert_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_add_node(sk, &rose_list); spin_unlock_bh(&rose_list_lock); } /* * Find a socket that wants to accept the Call Request we just * received. */ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, call) && !rose->source_ndigis && s->sk_state == TCP_LISTEN) goto found; } sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, &null_ax25_address) && s->sk_state == TCP_LISTEN) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a connected ROSE socket given my LCI and device. */ struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->lci == lci && rose->neighbour == neigh) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a unique LCI for a given device. */ unsigned int rose_new_lci(struct rose_neigh *neigh) { int lci; if (neigh->dce_mode) { for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } else { for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } return 0; } /* * Deferred destroy. */ void rose_destroy_socket(struct sock *); /* * Handler for deferred kills. */ static void rose_destroy_timer(unsigned long data) { rose_destroy_socket((struct sock *)data); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. */ void rose_destroy_socket(struct sock *sk) { struct sk_buff *skb; rose_remove_socket(sk); rose_stop_heartbeat(sk); rose_stop_idletimer(sk); rose_stop_timer(sk); rose_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* Queue the unaccepted socket for death */ sock_set_flag(skb->sk, SOCK_DEAD); rose_start_heartbeat(skb->sk); rose_sk(skb->sk)->state = ROSE_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ setup_timer(&sk->sk_timer, rose_destroy_timer, (unsigned long)sk); sk->sk_timer.expires = jiffies + 10 * HZ; add_timer(&sk->sk_timer); } else sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * ROSE socket object. */ static int rose_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int opt; if (level != SOL_ROSE) return -ENOPROTOOPT; if (optlen < sizeof(int)) return -EINVAL; if (get_user(opt, (int __user *)optval)) return -EFAULT; switch (optname) { case ROSE_DEFER: rose->defer = opt ? 1 : 0; return 0; case ROSE_T1: if (opt < 1) return -EINVAL; rose->t1 = opt * HZ; return 0; case ROSE_T2: if (opt < 1) return -EINVAL; rose->t2 = opt * HZ; return 0; case ROSE_T3: if (opt < 1) return -EINVAL; rose->t3 = opt * HZ; return 0; case ROSE_HOLDBACK: if (opt < 1) return -EINVAL; rose->hb = opt * HZ; return 0; case ROSE_IDLE: if (opt < 0) return -EINVAL; rose->idle = opt * 60 * HZ; return 0; case ROSE_QBITINCL: rose->qbitincl = opt ? 1 : 0; return 0; default: return -ENOPROTOOPT; } } static int rose_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int val = 0; int len; if (level != SOL_ROSE) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case ROSE_DEFER: val = rose->defer; break; case ROSE_T1: val = rose->t1 / HZ; break; case ROSE_T2: val = rose->t2 / HZ; break; case ROSE_T3: val = rose->t3 / HZ; break; case ROSE_HOLDBACK: val = rose->hb / HZ; break; case ROSE_IDLE: val = rose->idle / (60 * HZ); break; case ROSE_QBITINCL: val = rose->qbitincl; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, len, sizeof(int)); if (put_user(len, optlen)) return -EFAULT; return copy_to_user(optval, &val, len) ? -EFAULT : 0; } static int rose_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; if (sk->sk_state != TCP_LISTEN) { struct rose_sock *rose = rose_sk(sk); rose->dest_ndigis = 0; memset(&rose->dest_addr, 0, ROSE_ADDR_LEN); memset(&rose->dest_call, 0, AX25_ADDR_LEN); memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; return 0; } return -EOPNOTSUPP; } static struct proto rose_proto = { .name = "ROSE", .owner = THIS_MODULE, .obj_size = sizeof(struct rose_sock), }; static int rose_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct rose_sock *rose; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; if (sock->type != SOCK_SEQPACKET || protocol != 0) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return -ENOMEM; rose = rose_sk(sk); sock_init_data(sock, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sock->ops = &rose_proto_ops; sk->sk_protocol = protocol; init_timer(&rose->timer); init_timer(&rose->idletimer); rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); rose->state = ROSE_STATE_0; return 0; } static struct sock *rose_make_new(struct sock *osk) { struct sock *sk; struct rose_sock *rose, *orose; if (osk->sk_type != SOCK_SEQPACKET) return NULL; sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return NULL; rose = rose_sk(sk); sock_init_data(NULL, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); init_timer(&rose->timer); init_timer(&rose->idletimer); orose = rose_sk(osk); rose->t1 = orose->t1; rose->t2 = orose->t2; rose->t3 = orose->t3; rose->hb = orose->hb; rose->idle = orose->idle; rose->defer = orose->defer; rose->device = orose->device; rose->qbitincl = orose->qbitincl; return sk; } static int rose_release(struct socket *sock) { struct sock *sk = sock->sk; struct rose_sock *rose; if (sk == NULL) return 0; sock_hold(sk); sock_orphan(sk); lock_sock(sk); rose = rose_sk(sk); switch (rose->state) { case ROSE_STATE_0: release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_2: rose->neighbour->use--; release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_1: case ROSE_STATE_3: case ROSE_STATE_4: case ROSE_STATE_5: rose_clear_queues(sk); rose_stop_idletimer(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_start_t3timer(sk); rose->state = ROSE_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; struct net_device *dev; ax25_address *source; ax25_uid_assoc *user; int n; if (!sock_flag(sk, SOCK_ZAPPED)) return -EINVAL; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) return -EADDRNOTAVAIL; source = &addr->srose_call; user = ax25_findbyuid(current_euid()); if (user) { rose->source_call = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; rose->source_call = *source; } rose->source_addr = addr->srose_addr; rose->device = dev; rose->source_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->source_digis[n] = full_addr->srose_digis[n]; } else { if (rose->source_ndigis == 1) { rose->source_digis[0] = addr->srose_digi; } } rose_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); return 0; } static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; unsigned char cause, diagnostic; struct net_device *dev; ax25_uid_assoc *user; int n, err = 0; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS) return -EINVAL; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { /* Connect completed during a ERESTARTSYS event */ sock->state = SS_CONNECTED; goto out_release; } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out_release; } if (sk->sk_state == TCP_ESTABLISHED) { /* No reconnect on a seqpacket socket */ err = -EISCONN; goto out_release; } sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0); if (!rose->neighbour) { err = -ENETUNREACH; goto out_release; } rose->lci = rose_new_lci(rose->neighbour); if (!rose->lci) { err = -ENETUNREACH; goto out_release; } if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ sock_reset_flag(sk, SOCK_ZAPPED); if ((dev = rose_dev_first()) == NULL) { err = -ENETUNREACH; goto out_release; } user = ax25_findbyuid(current_euid()); if (!user) { err = -EINVAL; goto out_release; } memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); rose->source_call = user->call; rose->device = dev; ax25_uid_put(user); rose_insert_socket(sk); /* Finish the bind */ } rose->dest_addr = addr->srose_addr; rose->dest_call = addr->srose_call; rose->rand = ((long)rose & 0xFFFF) + rose->lci; rose->dest_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->dest_digis[n] = full_addr->srose_digis[n]; } else { if (rose->dest_ndigis == 1) { rose->dest_digis[0] = addr->srose_digi; } } /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; rose->state = ROSE_STATE_1; rose->neighbour->use++; rose_write_internal(sk, ROSE_CALL_REQUEST); rose_start_heartbeat(sk); rose_start_t1timer(sk); /* Now the loop */ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { err = -EINPROGRESS; goto out_release; } /* * A Connect Ack with Choke or timeout or failed routing will go to * closed. */ if (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_state != TCP_SYN_SENT) break; if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; } if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); /* Always set at this point */ goto out_release; } sock->state = SS_CONNECTED; out_release: release_sock(sk); return err; } static int rose_accept(struct socket *sock, struct socket *newsock, int flags) { struct sk_buff *skb; struct sock *newsk; DEFINE_WAIT(wait); struct sock *sk; int err = 0; if ((sk = sock->sk) == NULL) return -EINVAL; lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET) { err = -EOPNOTSUPP; goto out_release; } if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto out_release; } /* * The write queue this time is holding sockets ready to use * hooked into the SABM we saved */ for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; if (flags & O_NONBLOCK) { err = -EWOULDBLOCK; break; } if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ skb->sk = NULL; kfree_skb(skb); sk->sk_ack_backlog--; out_release: release_sock(sk); return err; } static int rose_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr; struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int n; memset(srose, 0, sizeof(*srose)); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; for (n = 0; n < rose->dest_ndigis; n++) srose->srose_digis[n] = rose->dest_digis[n]; } else { srose->srose_family = AF_ROSE; srose->srose_addr = rose->source_addr; srose->srose_call = rose->source_call; srose->srose_ndigis = rose->source_ndigis; for (n = 0; n < rose->source_ndigis; n++) srose->srose_digis[n] = rose->source_digis[n]; } *uaddr_len = sizeof(struct full_sockaddr_rose); return 0; } int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) { struct sock *sk; struct sock *make; struct rose_sock *make_rose; struct rose_facilities_struct facilities; int n; skb->sk = NULL; /* Initially we don't know who it's for */ /* * skb->data points to the rose frame start */ memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF, skb->len - ROSE_CALL_REQ_FACILITIES_OFF, &facilities)) { rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); return 0; } sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); /* * We can't accept the Call Request. */ if (sk == NULL || sk_acceptq_is_full(sk) || (make = rose_make_new(sk)) == NULL) { rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); return 0; } skb->sk = make; make->sk_state = TCP_ESTABLISHED; make_rose = rose_sk(make); make_rose->lci = lci; make_rose->dest_addr = facilities.dest_addr; make_rose->dest_call = facilities.dest_call; make_rose->dest_ndigis = facilities.dest_ndigis; for (n = 0 ; n < facilities.dest_ndigis ; n++) make_rose->dest_digis[n] = facilities.dest_digis[n]; make_rose->source_addr = facilities.source_addr; make_rose->source_call = facilities.source_call; make_rose->source_ndigis = facilities.source_ndigis; for (n = 0 ; n < facilities.source_ndigis ; n++) make_rose->source_digis[n]= facilities.source_digis[n]; make_rose->neighbour = neigh; make_rose->device = dev; make_rose->facilities = facilities; make_rose->neighbour->use++; if (rose_sk(sk)->defer) { make_rose->state = ROSE_STATE_5; } else { rose_write_internal(make, ROSE_CALL_ACCEPTED); make_rose->state = ROSE_STATE_3; rose_start_idletimer(make); } make_rose->condition = 0x00; make_rose->vs = 0; make_rose->va = 0; make_rose->vr = 0; make_rose->vl = 0; sk->sk_ack_backlog++; rose_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); rose_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); return 1; } static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name; int err; struct full_sockaddr_rose srose; struct sk_buff *skb; unsigned char *asmptr; int n, size, qbit = 0; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; if (sock_flag(sk, SOCK_ZAPPED)) return -EADDRNOTAVAIL; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); return -EPIPE; } if (rose->neighbour == NULL || rose->device == NULL) return -ENETUNREACH; if (usrose != NULL) { if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose)) return -EINVAL; memset(&srose, 0, sizeof(struct full_sockaddr_rose)); memcpy(&srose, usrose, msg->msg_namelen); if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 || ax25cmp(&rose->dest_call, &srose.srose_call) != 0) return -EISCONN; if (srose.srose_ndigis != rose->dest_ndigis) return -EISCONN; if (srose.srose_ndigis == rose->dest_ndigis) { for (n = 0 ; n < srose.srose_ndigis ; n++) if (ax25cmp(&rose->dest_digis[n], &srose.srose_digis[n])) return -EISCONN; } if (srose.srose_family != AF_ROSE) return -EINVAL; } else { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose.srose_family = AF_ROSE; srose.srose_addr = rose->dest_addr; srose.srose_call = rose->dest_call; srose.srose_ndigis = rose->dest_ndigis; for (n = 0 ; n < rose->dest_ndigis ; n++) srose.srose_digis[n] = rose->dest_digis[n]; } /* Build a packet */ /* Sanity check the packet size */ if (len > 65535) return -EMSGSIZE; size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) return err; skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN); /* * Put the data on the end */ skb_reset_transport_header(skb); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); return err; } /* * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ if (rose->qbitincl) { qbit = skb->data[0]; skb_pull(skb, 1); } /* * Push down the ROSE header */ asmptr = skb_push(skb, ROSE_MIN_LEN); /* Build a ROSE Network header */ asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; asmptr[1] = (rose->lci >> 0) & 0xFF; asmptr[2] = ROSE_DATA; if (qbit) asmptr[0] |= ROSE_Q_BIT; if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); return -ENOTCONN; } #ifdef M_BIT #define ROSE_PACLEN (256-ROSE_MIN_LEN) if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) { unsigned char header[ROSE_MIN_LEN]; struct sk_buff *skbn; int frontlen; int lg; /* Save a copy of the Header */ skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN); skb_pull(skb, ROSE_MIN_LEN); frontlen = skb_headroom(skb); while (skb->len > 0) { if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { kfree_skb(skb); return err; } skbn->sk = sk; skbn->free = 1; skbn->arp = 1; skb_reserve(skbn, frontlen); lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; /* Copy the user data */ skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); skb_pull(skb, lg); /* Duplicate the Header */ skb_push(skbn, ROSE_MIN_LEN); skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); if (skb->len > 0) skbn->data[2] |= M_BIT; skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ } skb->free = 1; kfree_skb(skb); } else { skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ } #else skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ #endif rose_kick(sk); return len; } static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); size_t copied; unsigned char *asmptr; struct sk_buff *skb; int n, er, qbit; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) return er; qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; skb_pull(skb, ROSE_MIN_LEN); if (rose->qbitincl) { asmptr = skb_push(skb, 1); *asmptr = qbit; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (msg->msg_name) { struct sockaddr_rose *srose; struct full_sockaddr_rose *full_srose = msg->msg_name; memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); srose = msg->msg_name; srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; for (n = 0 ; n < rose->dest_ndigis ; n++) full_srose->srose_digis[n] = rose->dest_digis[n]; msg->msg_namelen = sizeof(struct full_sockaddr_rose); } skb_free_datagram(sk, skb); return copied; } static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); void __user *argp = (void __user *)arg; switch (cmd) { case TIOCOUTQ: { long amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; return put_user(amount, (unsigned int __user *) argp); } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; return put_user(amount, (unsigned int __user *) argp); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *) argp); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *) argp); case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: return -EINVAL; case SIOCADDRT: case SIOCDELRT: case SIOCRSCLRRT: if (!capable(CAP_NET_ADMIN)) return -EPERM; return rose_rt_ioctl(cmd, argp); case SIOCRSGCAUSE: { struct rose_cause_struct rose_cause; rose_cause.cause = rose->cause; rose_cause.diagnostic = rose->diagnostic; return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0; } case SIOCRSSCAUSE: { struct rose_cause_struct rose_cause; if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct))) return -EFAULT; rose->cause = rose_cause.cause; rose->diagnostic = rose_cause.diagnostic; return 0; } case SIOCRSSL2CALL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) return -EFAULT; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) return ax25_listen_register(&rose_callsign, NULL); return 0; case SIOCRSGL2CALL: return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0; case SIOCRSACCEPT: if (rose->state == ROSE_STATE_5) { rose_write_internal(sk, ROSE_CALL_ACCEPTED); rose_start_idletimer(sk); rose->condition = 0x00; rose->vs = 0; rose->va = 0; rose->vr = 0; rose->vl = 0; rose->state = ROSE_STATE_3; } return 0; default: return -ENOIOCTLCMD; } return 0; } #ifdef CONFIG_PROC_FS static void *rose_info_start(struct seq_file *seq, loff_t *pos) __acquires(rose_list_lock) { spin_lock_bh(&rose_list_lock); return seq_hlist_start_head(&rose_list, *pos); } static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &rose_list, pos); } static void rose_info_stop(struct seq_file *seq, void *v) __releases(rose_list_lock) { spin_unlock_bh(&rose_list_lock); } static int rose_info_show(struct seq_file *seq, void *v) { char buf[11], rsbuf[11]; if (v == SEQ_START_TOKEN) seq_puts(seq, "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); else { struct sock *s = sk_entry(v); struct rose_sock *rose = rose_sk(s); const char *devname, *callsign; const struct net_device *dev = rose->device; if (!dev) devname = "???"; else devname = dev->name; seq_printf(seq, "%-10s %-9s ", rose2asc(rsbuf, &rose->dest_addr), ax2asc(buf, &rose->dest_call)); if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) callsign = "??????-?"; else callsign = ax2asc(buf, &rose->source_call); seq_printf(seq, "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", rose2asc(rsbuf, &rose->source_addr), callsign, devname, rose->lci & 0x0FFF, (rose->neighbour) ? rose->neighbour->number : 0, rose->state, rose->vs, rose->vr, rose->va, ax25_display_timer(&rose->timer) / HZ, rose->t1 / HZ, rose->t2 / HZ, rose->t3 / HZ, rose->hb / HZ, ax25_display_timer(&rose->idletimer) / (60 * HZ), rose->idle / (60 * HZ), sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); } return 0; } static const struct seq_operations rose_info_seqops = { .start = rose_info_start, .next = rose_info_next, .stop = rose_info_stop, .show = rose_info_show, }; static int rose_info_open(struct inode *inode, struct file *file) { return seq_open(file, &rose_info_seqops); } static const struct file_operations rose_info_fops = { .owner = THIS_MODULE, .open = rose_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ static const struct net_proto_family rose_family_ops = { .family = PF_ROSE, .create = rose_create, .owner = THIS_MODULE, }; static const struct proto_ops rose_proto_ops = { .family = PF_ROSE, .owner = THIS_MODULE, .release = rose_release, .bind = rose_bind, .connect = rose_connect, .socketpair = sock_no_socketpair, .accept = rose_accept, .getname = rose_getname, .poll = datagram_poll, .ioctl = rose_ioctl, .listen = rose_listen, .shutdown = sock_no_shutdown, .setsockopt = rose_setsockopt, .getsockopt = rose_getsockopt, .sendmsg = rose_sendmsg, .recvmsg = rose_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct notifier_block rose_dev_notifier = { .notifier_call = rose_device_event, }; static struct net_device **dev_rose; static struct ax25_protocol rose_pid = { .pid = AX25_P_ROSE, .func = rose_route_frame }; static struct ax25_linkfail rose_linkfail_notifier = { .func = rose_link_failed }; static int __init rose_proto_init(void) { int i; int rc; if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); rc = -EINVAL; goto out; } rc = proto_register(&rose_proto, 0); if (rc != 0) goto out; rose_callsign = null_ax25_address; dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_rose == NULL) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); rc = -ENOMEM; goto out_proto_unregister; } for (i = 0; i < rose_ndevs; i++) { struct net_device *dev; char name[IFNAMSIZ]; sprintf(name, "rose%d", i); dev = alloc_netdev(0, name, rose_setup); if (!dev) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); rc = -ENOMEM; goto fail; } rc = register_netdev(dev); if (rc) { printk(KERN_ERR "ROSE: netdevice registration failed\n"); free_netdev(dev); goto fail; } rose_set_lockdep_key(dev); dev_rose[i] = dev; } sock_register(&rose_family_ops); register_netdevice_notifier(&rose_dev_notifier); ax25_register_pid(&rose_pid); ax25_linkfail_register(&rose_linkfail_notifier); #ifdef CONFIG_SYSCTL rose_register_sysctl(); #endif rose_loopback_init(); rose_add_loopback_neigh(); proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops); proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops); proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops); proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops); out: return rc; fail: while (--i >= 0) { unregister_netdev(dev_rose[i]); free_netdev(dev_rose[i]); } kfree(dev_rose); out_proto_unregister: proto_unregister(&rose_proto); goto out; } module_init(rose_proto_init); module_param(rose_ndevs, int, 0); MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices"); MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_ROSE); static void __exit rose_exit(void) { int i; proc_net_remove(&init_net, "rose"); proc_net_remove(&init_net, "rose_neigh"); proc_net_remove(&init_net, "rose_nodes"); proc_net_remove(&init_net, "rose_routes"); rose_loopback_clear(); rose_rt_free(); ax25_protocol_release(AX25_P_ROSE); ax25_linkfail_release(&rose_linkfail_notifier); if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); #ifdef CONFIG_SYSCTL rose_unregister_sysctl(); #endif unregister_netdevice_notifier(&rose_dev_notifier); sock_unregister(PF_ROSE); for (i = 0; i < rose_ndevs; i++) { struct net_device *dev = dev_rose[i]; if (dev) { unregister_netdev(dev); free_netdev(dev); } } kfree(dev_rose); proto_unregister(&rose_proto); } module_exit(rose_exit);
gpl-2.0
Scorpiion/Beagleboard-xM-Linux-Kernel
drivers/usb/host/hwa-hc.c
544
26056
/* * Host Wire Adapter: * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * The HWA driver is a simple layer that forwards requests to the WAHC * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host * Controller) layers. * * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB * Host Controller that is connected to your system via USB (a USB * dongle that implements a USB host...). There is also a Device Wired * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for * transferring data (it is after all a USB host connected via * Wireless USB), we have a common layer called Wire Adapter Host * Controller that does all the hard work. The WUSBHC (Wireless USB * Host Controller) is the part common to WUSB Host Controllers, the * HWA and the PCI-based one, that is implemented following the WHCI * spec. All these layers are implemented in ../wusbcore. * * The main functions are hwahc_op_urb_{en,de}queue(), that pass the * job of converting a URB to a Wire Adapter * * Entry points: * * hwahc_driver_*() Driver initialization, registration and * teardown. * * hwahc_probe() New device came up, create an instance for * it [from device enumeration]. * * hwahc_disconnect() Remove device instance [from device * enumeration]. * * [__]hwahc_op_*() Host-Wire-Adaptor specific functions for * starting/stopping/etc (some might be made also * DWA). */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/workqueue.h> #include <linux/wait.h> #include <linux/completion.h> #include "../wusbcore/wa-hc.h" #include "../wusbcore/wusbhc.h" struct hwahc { struct wusbhc wusbhc; /* has to be 1st */ struct wahc wa; }; /* * FIXME should be wusbhc * * NOTE: we need to cache the Cluster ID because later...there is no * way to get it :) */ static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id) { int result; struct wusbhc *wusbhc = &hwahc->wusbhc; struct wahc *wa = &hwahc->wa; struct device *dev = &wa->usb_iface->dev; result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_SET_CLUSTER_ID, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, cluster_id, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, NULL, 0, 1000 /* FIXME: arbitrary */); if (result < 0) dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n", cluster_id, result); else wusbhc->cluster_id = cluster_id; dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id); return result; } static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots) { struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_SET_NUM_DNTS, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, interval << 8 | slots, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, NULL, 0, 1000 /* FIXME: arbitrary */); } /* * Reset a WUSB host controller and wait for it to complete doing it. * * @usb_hcd: Pointer to WUSB Host Controller instance. * */ static int hwahc_op_reset(struct usb_hcd *usb_hcd) { int result; struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct device *dev = &hwahc->wa.usb_iface->dev; mutex_lock(&wusbhc->mutex); wa_nep_disarm(&hwahc->wa); result = __wa_set_feature(&hwahc->wa, WA_RESET); if (result < 0) { dev_err(dev, "error commanding HC to reset: %d\n", result); goto error_unlock; } result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0); if (result < 0) { dev_err(dev, "error waiting for HC to reset: %d\n", result); goto error_unlock; } error_unlock: mutex_unlock(&wusbhc->mutex); return result; } /* * FIXME: break this function up */ static int hwahc_op_start(struct usb_hcd *usb_hcd) { u8 addr; int result; struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); result = -ENOSPC; mutex_lock(&wusbhc->mutex); addr = wusb_cluster_id_get(); if (addr == 0) goto error_cluster_id_get; result = __hwahc_set_cluster_id(hwahc, addr); if (result < 0) goto error_set_cluster_id; usb_hcd->uses_new_polling = 1; usb_hcd->poll_rh = 1; usb_hcd->state = HC_STATE_RUNNING; result = 0; out: mutex_unlock(&wusbhc->mutex); return result; error_set_cluster_id: wusb_cluster_id_put(wusbhc->cluster_id); error_cluster_id_get: goto out; } /* * No need to abort pipes, as when this is called, all the children * has been disconnected and that has done it [through * usb_disable_interface() -> usb_disable_endpoint() -> * hwahc_op_ep_disable() - >rpipe_ep_disable()]. */ static void hwahc_op_stop(struct usb_hcd *usb_hcd) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); mutex_lock(&wusbhc->mutex); wusb_cluster_id_put(wusbhc->cluster_id); mutex_unlock(&wusbhc->mutex); } static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__, usb_hcd, hwahc); return -ENOSYS; } static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t gfp) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp); } static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); return wa_urb_dequeue(&hwahc->wa, urb); } /* * Release resources allocated for an endpoint * * If there is an associated rpipe to this endpoint, go ahead and put it. */ static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); rpipe_ep_disable(&hwahc->wa, ep); } static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc) { int result; struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct device *dev = &hwahc->wa.usb_iface->dev; result = __wa_set_feature(&hwahc->wa, WA_ENABLE); if (result < 0) { dev_err(dev, "error commanding HC to start: %d\n", result); goto error_stop; } result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE); if (result < 0) { dev_err(dev, "error waiting for HC to start: %d\n", result); goto error_stop; } result = wa_nep_arm(&hwahc->wa, GFP_KERNEL); if (result < 0) { dev_err(dev, "cannot listen to notifications: %d\n", result); goto error_stop; } return result; error_stop: __wa_clear_feature(&hwahc->wa, WA_ENABLE); return result; } static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay) { struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; int ret; ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_CHAN_STOP, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, delay * 1000, iface_no, NULL, 0, 1000 /* FIXME: arbitrary */); if (ret == 0) msleep(delay); wa_nep_disarm(&hwahc->wa); __wa_stop(&hwahc->wa); } /* * Set the UWB MAS allocation for the WUSB cluster * * @stream_index: stream to use (-1 for cancelling the allocation) * @mas: mas bitmap to use */ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas) { int result; struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; struct device *dev = &wa->usb_iface->dev; u8 mas_le[UWB_NUM_MAS/8]; /* Set the stream index */ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_SET_STREAM_IDX, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, stream_index, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, NULL, 0, 1000 /* FIXME: arbitrary */); if (result < 0) { dev_err(dev, "Cannot set WUSB stream index: %d\n", result); goto out; } uwb_mas_bm_copy_le(mas_le, mas); /* Set the MAS allocation */ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_SET_WUSB_MAS, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, mas_le, 32, 1000 /* FIXME: arbitrary */); if (result < 0) dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result); out: return result; } /* * Add an IE to the host's MMC * * @interval: See WUSB1.0[8.5.3.1] * @repeat_cnt: See WUSB1.0[8.5.3.1] * @handle: See WUSB1.0[8.5.3.1] * @wuie: Pointer to the header of the WUSB IE data to add. * MUST BE allocated in a kmalloc buffer (no stack or * vmalloc). * * NOTE: the format of the WUSB IEs for MMCs are different to the * normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length + * Id in WUSB IEs). Standards...you gotta love'em. */ static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, u8 handle, struct wuie_hdr *wuie) { struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_ADD_MMC_IE, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, interval << 8 | repeat_cnt, handle << 8 | iface_no, wuie, wuie->bLength, 1000 /* FIXME: arbitrary */); } /* * Remove an IE to the host's MMC * * @handle: See WUSB1.0[8.5.3.1] */ static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle) { struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_REMOVE_MMC_IE, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, handle << 8 | iface_no, NULL, 0, 1000 /* FIXME: arbitrary */); } /* * Update device information for a given fake port * * @port_idx: Fake port to which device is connected (wusbhc index, not * USB port number). */ static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; struct hwa_dev_info *dev_info; int ret; /* fill out the Device Info buffer and send it */ dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL); if (!dev_info) return -ENOMEM; uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability, &wusb_dev->availability); dev_info->bDeviceAddress = wusb_dev->addr; /* * If the descriptors haven't been read yet, use a default PHY * rate of 53.3 Mbit/s only. The correct value will be used * when this will be called again as part of the * authentication process (which occurs after the descriptors * have been read). */ if (wusb_dev->wusb_cap_descr) dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates; else dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53); ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_SET_DEV_INFO, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, wusb_dev->port_idx << 8 | iface_no, dev_info, sizeof(struct hwa_dev_info), 1000 /* FIXME: arbitrary */); kfree(dev_info); return ret; } /* * Set host's idea of which encryption (and key) method to use when * talking to ad evice on a given port. * * If key is NULL, it means disable encryption for that "virtual port" * (used when we disconnect). */ static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, const void *key, size_t key_size, u8 key_idx) { int result = -ENOMEM; struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; struct usb_key_descriptor *keyd; size_t keyd_len; keyd_len = sizeof(*keyd) + key_size; keyd = kzalloc(keyd_len, GFP_KERNEL); if (keyd == NULL) return -ENOMEM; keyd->bLength = keyd_len; keyd->bDescriptorType = USB_DT_KEY; keyd->tTKID[0] = (tkid >> 0) & 0xff; keyd->tTKID[1] = (tkid >> 8) & 0xff; keyd->tTKID[2] = (tkid >> 16) & 0xff; memcpy(keyd->bKeyData, key, key_size); result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), USB_REQ_SET_DESCRIPTOR, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, USB_DT_KEY << 8 | key_idx, port_idx << 8 | iface_no, keyd, keyd_len, 1000 /* FIXME: arbitrary */); kzfree(keyd); /* clear keys etc. */ return result; } /* * Set host's idea of which encryption (and key) method to use when * talking to ad evice on a given port. * * If key is NULL, it means disable encryption for that "virtual port" * (used when we disconnect). */ static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, const void *key, size_t key_size) { int result = -ENOMEM; struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; u8 encryption_value; /* Tell the host which key to use to talk to the device */ if (key) { u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK, WUSB_KEY_INDEX_ORIGINATOR_HOST); result = __hwahc_dev_set_key(wusbhc, port_idx, tkid, key, key_size, key_idx); if (result < 0) goto error_set_key; encryption_value = wusbhc->ccm1_etd->bEncryptionValue; } else { /* FIXME: this should come from wusbhc->etd[UNSECURE].value */ encryption_value = 0; } /* Set the encryption type for commmunicating with the device */ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), USB_REQ_SET_ENCRYPTION, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, encryption_value, port_idx << 8 | iface_no, NULL, 0, 1000 /* FIXME: arbitrary */); if (result < 0) dev_err(wusbhc->dev, "Can't set host's WUSB encryption for " "port index %u to %s (value %d): %d\n", port_idx, wusb_et_name(wusbhc->ccm1_etd->bEncryptionType), wusbhc->ccm1_etd->bEncryptionValue, result); error_set_key: return result; } /* * Set host's GTK key */ static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid, const void *key, size_t key_size) { u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST); return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx); } /* * Get the Wire Adapter class-specific descriptor * * NOTE: this descriptor comes with the big bundled configuration * descriptor that includes the interfaces' and endpoints', so * we just look for it in the cached copy kept by the USB stack. * * NOTE2: We convert LE fields to CPU order. */ static int wa_fill_descr(struct wahc *wa) { int result; struct device *dev = &wa->usb_iface->dev; char *itr; struct usb_device *usb_dev = wa->usb_dev; struct usb_descriptor_header *hdr; struct usb_wa_descriptor *wa_descr; size_t itr_size, actconfig_idx; actconfig_idx = (usb_dev->actconfig - usb_dev->config) / sizeof(usb_dev->config[0]); itr = usb_dev->rawdescriptors[actconfig_idx]; itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); while (itr_size >= sizeof(*hdr)) { hdr = (struct usb_descriptor_header *) itr; dev_dbg(dev, "Extra device descriptor: " "type %02x/%u bytes @ %zu (%zu left)\n", hdr->bDescriptorType, hdr->bLength, (itr - usb_dev->rawdescriptors[actconfig_idx]), itr_size); if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER) goto found; itr += hdr->bLength; itr_size -= hdr->bLength; } dev_err(dev, "cannot find Wire Adapter Class descriptor\n"); return -ENODEV; found: result = -EINVAL; if (hdr->bLength > itr_size) { /* is it available? */ dev_err(dev, "incomplete Wire Adapter Class descriptor " "(%zu bytes left, %u needed)\n", itr_size, hdr->bLength); goto error; } if (hdr->bLength < sizeof(*wa->wa_descr)) { dev_err(dev, "short Wire Adapter Class descriptor\n"); goto error; } wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr; /* Make LE fields CPU order */ wa_descr->bcdWAVersion = le16_to_cpu(wa_descr->bcdWAVersion); wa_descr->wNumRPipes = le16_to_cpu(wa_descr->wNumRPipes); wa_descr->wRPipeMaxBlock = le16_to_cpu(wa_descr->wRPipeMaxBlock); if (wa_descr->bcdWAVersion > 0x0100) dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n", wa_descr->bcdWAVersion & 0xff00 >> 8, wa_descr->bcdWAVersion & 0x00ff); result = 0; error: return result; } static struct hc_driver hwahc_hc_driver = { .description = "hwa-hcd", .product_desc = "Wireless USB HWA host controller", .hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd), .irq = NULL, /* FIXME */ .flags = HCD_USB2, /* FIXME */ .reset = hwahc_op_reset, .start = hwahc_op_start, .stop = hwahc_op_stop, .get_frame_number = hwahc_op_get_frame_number, .urb_enqueue = hwahc_op_urb_enqueue, .urb_dequeue = hwahc_op_urb_dequeue, .endpoint_disable = hwahc_op_endpoint_disable, .hub_status_data = wusbhc_rh_status_data, .hub_control = wusbhc_rh_control, .bus_suspend = wusbhc_rh_suspend, .bus_resume = wusbhc_rh_resume, .start_port_reset = wusbhc_rh_start_port_reset, }; static int hwahc_security_create(struct hwahc *hwahc) { int result; struct wusbhc *wusbhc = &hwahc->wusbhc; struct usb_device *usb_dev = hwahc->wa.usb_dev; struct device *dev = &usb_dev->dev; struct usb_security_descriptor *secd; struct usb_encryption_descriptor *etd; void *itr, *top; size_t itr_size, needed, bytes; u8 index; char buf[64]; /* Find the host's security descriptors in the config descr bundle */ index = (usb_dev->actconfig - usb_dev->config) / sizeof(usb_dev->config[0]); itr = usb_dev->rawdescriptors[index]; itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); top = itr + itr_size; result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index], le16_to_cpu(usb_dev->actconfig->desc.wTotalLength), USB_DT_SECURITY, (void **) &secd); if (result == -1) { dev_warn(dev, "BUG? WUSB host has no security descriptors\n"); return 0; } needed = sizeof(*secd); if (top - (void *)secd < needed) { dev_err(dev, "BUG? Not enough data to process security " "descriptor header (%zu bytes left vs %zu needed)\n", top - (void *) secd, needed); return 0; } needed = le16_to_cpu(secd->wTotalLength); if (top - (void *)secd < needed) { dev_err(dev, "BUG? Not enough data to process security " "descriptors (%zu bytes left vs %zu needed)\n", top - (void *) secd, needed); return 0; } /* Walk over the sec descriptors and store CCM1's on wusbhc */ itr = (void *) secd + sizeof(*secd); top = (void *) secd + le16_to_cpu(secd->wTotalLength); index = 0; bytes = 0; while (itr < top) { etd = itr; if (top - itr < sizeof(*etd)) { dev_err(dev, "BUG: bad host security descriptor; " "not enough data (%zu vs %zu left)\n", top - itr, sizeof(*etd)); break; } if (etd->bLength < sizeof(*etd)) { dev_err(dev, "BUG: bad host encryption descriptor; " "descriptor is too short " "(%zu vs %zu needed)\n", (size_t)etd->bLength, sizeof(*etd)); break; } itr += etd->bLength; bytes += snprintf(buf + bytes, sizeof(buf) - bytes, "%s (0x%02x) ", wusb_et_name(etd->bEncryptionType), etd->bEncryptionValue); wusbhc->ccm1_etd = etd; } dev_info(dev, "supported encryption types: %s\n", buf); if (wusbhc->ccm1_etd == NULL) { dev_err(dev, "E: host doesn't support CCM-1 crypto\n"); return 0; } /* Pretty print what we support */ return 0; } static void hwahc_security_release(struct hwahc *hwahc) { /* nothing to do here so far... */ } static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface) { int result; struct device *dev = &iface->dev; struct wusbhc *wusbhc = &hwahc->wusbhc; struct wahc *wa = &hwahc->wa; struct usb_device *usb_dev = interface_to_usbdev(iface); wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */ wa->usb_iface = usb_get_intf(iface); wusbhc->dev = dev; wusbhc->uwb_rc = uwb_rc_get_by_grandpa(iface->dev.parent); if (wusbhc->uwb_rc == NULL) { result = -ENODEV; dev_err(dev, "Cannot get associated UWB Host Controller\n"); goto error_rc_get; } result = wa_fill_descr(wa); /* Get the device descriptor */ if (result < 0) goto error_fill_descriptor; if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) { dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB " "adapter (%u ports)\n", wa->wa_descr->bNumPorts); wusbhc->ports_max = USB_MAXCHILDREN; } else { wusbhc->ports_max = wa->wa_descr->bNumPorts; } wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs; wusbhc->start = __hwahc_op_wusbhc_start; wusbhc->stop = __hwahc_op_wusbhc_stop; wusbhc->mmcie_add = __hwahc_op_mmcie_add; wusbhc->mmcie_rm = __hwahc_op_mmcie_rm; wusbhc->dev_info_set = __hwahc_op_dev_info_set; wusbhc->bwa_set = __hwahc_op_bwa_set; wusbhc->set_num_dnts = __hwahc_op_set_num_dnts; wusbhc->set_ptk = __hwahc_op_set_ptk; wusbhc->set_gtk = __hwahc_op_set_gtk; result = hwahc_security_create(hwahc); if (result < 0) { dev_err(dev, "Can't initialize security: %d\n", result); goto error_security_create; } wa->wusb = wusbhc; /* FIXME: ugly, need to fix */ result = wusbhc_create(&hwahc->wusbhc); if (result < 0) { dev_err(dev, "Can't create WUSB HC structures: %d\n", result); goto error_wusbhc_create; } result = wa_create(&hwahc->wa, iface); if (result < 0) goto error_wa_create; return 0; error_wa_create: wusbhc_destroy(&hwahc->wusbhc); error_wusbhc_create: /* WA Descr fill allocs no resources */ error_security_create: error_fill_descriptor: uwb_rc_put(wusbhc->uwb_rc); error_rc_get: usb_put_intf(iface); usb_put_dev(usb_dev); return result; } static void hwahc_destroy(struct hwahc *hwahc) { struct wusbhc *wusbhc = &hwahc->wusbhc; mutex_lock(&wusbhc->mutex); __wa_destroy(&hwahc->wa); wusbhc_destroy(&hwahc->wusbhc); hwahc_security_release(hwahc); hwahc->wusbhc.dev = NULL; uwb_rc_put(wusbhc->uwb_rc); usb_put_intf(hwahc->wa.usb_iface); usb_put_dev(hwahc->wa.usb_dev); mutex_unlock(&wusbhc->mutex); } static void hwahc_init(struct hwahc *hwahc) { wa_init(&hwahc->wa); } static int hwahc_probe(struct usb_interface *usb_iface, const struct usb_device_id *id) { int result; struct usb_hcd *usb_hcd; struct wusbhc *wusbhc; struct hwahc *hwahc; struct device *dev = &usb_iface->dev; result = -ENOMEM; usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa"); if (usb_hcd == NULL) { dev_err(dev, "unable to allocate instance\n"); goto error_alloc; } usb_hcd->wireless = 1; usb_hcd->flags |= HCD_FLAG_SAW_IRQ; wusbhc = usb_hcd_to_wusbhc(usb_hcd); hwahc = container_of(wusbhc, struct hwahc, wusbhc); hwahc_init(hwahc); result = hwahc_create(hwahc, usb_iface); if (result < 0) { dev_err(dev, "Cannot initialize internals: %d\n", result); goto error_hwahc_create; } result = usb_add_hcd(usb_hcd, 0, 0); if (result < 0) { dev_err(dev, "Cannot add HCD: %d\n", result); goto error_add_hcd; } result = wusbhc_b_create(&hwahc->wusbhc); if (result < 0) { dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result); goto error_wusbhc_b_create; } return 0; error_wusbhc_b_create: usb_remove_hcd(usb_hcd); error_add_hcd: hwahc_destroy(hwahc); error_hwahc_create: usb_put_hcd(usb_hcd); error_alloc: return result; } static void hwahc_disconnect(struct usb_interface *usb_iface) { struct usb_hcd *usb_hcd; struct wusbhc *wusbhc; struct hwahc *hwahc; usb_hcd = usb_get_intfdata(usb_iface); wusbhc = usb_hcd_to_wusbhc(usb_hcd); hwahc = container_of(wusbhc, struct hwahc, wusbhc); wusbhc_b_destroy(&hwahc->wusbhc); usb_remove_hcd(usb_hcd); hwahc_destroy(hwahc); usb_put_hcd(usb_hcd); } static struct usb_device_id hwahc_id_table[] = { /* FIXME: use class labels for this */ { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), }, {}, }; MODULE_DEVICE_TABLE(usb, hwahc_id_table); static struct usb_driver hwahc_driver = { .name = "hwa-hc", .probe = hwahc_probe, .disconnect = hwahc_disconnect, .id_table = hwahc_id_table, }; static int __init hwahc_driver_init(void) { return usb_register(&hwahc_driver); } module_init(hwahc_driver_init); static void __exit hwahc_driver_exit(void) { usb_deregister(&hwahc_driver); } module_exit(hwahc_driver_exit); MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver"); MODULE_LICENSE("GPL");
gpl-2.0
Broadcom/cygnus-linux
arch/mips/alchemy/devboards/db1xxx.c
800
2902
// SPDX-License-Identifier: GPL-2.0 /* * Alchemy DB/PB1xxx board support. */ #include <asm/prom.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-db1x00/bcsr.h> int __init db1000_board_setup(void); int __init db1000_dev_setup(void); int __init db1500_pci_setup(void); int __init db1200_board_setup(void); int __init db1200_dev_setup(void); int __init db1300_board_setup(void); int __init db1300_dev_setup(void); int __init db1550_board_setup(void); int __init db1550_dev_setup(void); int __init db1550_pci_setup(int); static const char *board_type_str(void) { switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) { case BCSR_WHOAMI_DB1000: return "DB1000"; case BCSR_WHOAMI_DB1500: return "DB1500"; case BCSR_WHOAMI_DB1100: return "DB1100"; case BCSR_WHOAMI_PB1500: case BCSR_WHOAMI_PB1500R2: return "PB1500"; case BCSR_WHOAMI_PB1100: return "PB1100"; case BCSR_WHOAMI_PB1200_DDR1: case BCSR_WHOAMI_PB1200_DDR2: return "PB1200"; case BCSR_WHOAMI_DB1200: return "DB1200"; case BCSR_WHOAMI_DB1300: return "DB1300"; case BCSR_WHOAMI_DB1550: return "DB1550"; case BCSR_WHOAMI_PB1550_SDR: case BCSR_WHOAMI_PB1550_DDR: return "PB1550"; default: return "(unknown)"; } } const char *get_system_type(void) { return board_type_str(); } void __init board_setup(void) { int ret; switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1000: case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1100: ret = db1000_board_setup(); break; case ALCHEMY_CPU_AU1550: ret = db1550_board_setup(); break; case ALCHEMY_CPU_AU1200: ret = db1200_board_setup(); break; case ALCHEMY_CPU_AU1300: ret = db1300_board_setup(); break; default: pr_err("unsupported CPU on board\n"); ret = -ENODEV; } if (ret) panic("cannot initialize board support"); } static int __init db1xxx_arch_init(void) { int id = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)); if (id == BCSR_WHOAMI_DB1550) return db1550_pci_setup(0); else if ((id == BCSR_WHOAMI_PB1550_SDR) || (id == BCSR_WHOAMI_PB1550_DDR)) return db1550_pci_setup(1); else if ((id == BCSR_WHOAMI_DB1500) || (id == BCSR_WHOAMI_PB1500) || (id == BCSR_WHOAMI_PB1500R2)) return db1500_pci_setup(); return 0; } arch_initcall(db1xxx_arch_init); static int __init db1xxx_dev_init(void) { mips_set_machine_name(board_type_str()); switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) { case BCSR_WHOAMI_DB1000: case BCSR_WHOAMI_DB1500: case BCSR_WHOAMI_DB1100: case BCSR_WHOAMI_PB1500: case BCSR_WHOAMI_PB1500R2: case BCSR_WHOAMI_PB1100: return db1000_dev_setup(); case BCSR_WHOAMI_PB1200_DDR1: case BCSR_WHOAMI_PB1200_DDR2: case BCSR_WHOAMI_DB1200: return db1200_dev_setup(); case BCSR_WHOAMI_DB1300: return db1300_dev_setup(); case BCSR_WHOAMI_DB1550: case BCSR_WHOAMI_PB1550_SDR: case BCSR_WHOAMI_PB1550_DDR: return db1550_dev_setup(); } return 0; } device_initcall(db1xxx_dev_init);
gpl-2.0
smaeul/kernel_samsung_tuna
drivers/staging/rtl8712/rtl871x_mlme.c
4896
55627
/****************************************************************************** * rtl871x_mlme.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _RTL871X_MLME_C_ #include "osdep_service.h" #include "drv_types.h" #include "recv_osdep.h" #include "xmit_osdep.h" #include "mlme_osdep.h" #include "sta_info.h" #include "wifi.h" #include "wlan_bssdef.h" static void update_ht_cap(struct _adapter *padapter, u8 *pie, uint ie_len); static sint _init_mlme_priv(struct _adapter *padapter) { sint i; u8 *pbuf; struct wlan_network *pnetwork; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; memset((u8 *)pmlmepriv, 0, sizeof(struct mlme_priv)); pmlmepriv->nic_hdl = (u8 *)padapter; pmlmepriv->pscanned = NULL; pmlmepriv->fw_state = 0; pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown; /* Maybe someday we should rename this variable to "active_mode"(Jeff)*/ pmlmepriv->passive_mode = 1; /* 1: active, 0: passive. */ spin_lock_init(&(pmlmepriv->lock)); spin_lock_init(&(pmlmepriv->lock2)); _init_queue(&(pmlmepriv->free_bss_pool)); _init_queue(&(pmlmepriv->scanned_queue)); set_scanned_network_val(pmlmepriv, 0); memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid)); pbuf = _malloc(MAX_BSS_CNT * (sizeof(struct wlan_network))); if (pbuf == NULL) return _FAIL; pmlmepriv->free_bss_buf = pbuf; pnetwork = (struct wlan_network *)pbuf; for (i = 0; i < MAX_BSS_CNT; i++) { _init_listhead(&(pnetwork->list)); list_insert_tail(&(pnetwork->list), &(pmlmepriv->free_bss_pool.queue)); pnetwork++; } pmlmepriv->sitesurveyctrl.last_rx_pkts = 0; pmlmepriv->sitesurveyctrl.last_tx_pkts = 0; pmlmepriv->sitesurveyctrl.traffic_busy = false; /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */ r8712_init_mlme_timer(padapter); return _SUCCESS; } struct wlan_network *_r8712_alloc_network(struct mlme_priv *pmlmepriv) { unsigned long irqL; struct wlan_network *pnetwork; struct __queue *free_queue = &pmlmepriv->free_bss_pool; struct list_head *plist = NULL; if (_queue_empty(free_queue) == true) return NULL; spin_lock_irqsave(&free_queue->lock, irqL); plist = get_next(&(free_queue->queue)); pnetwork = LIST_CONTAINOR(plist , struct wlan_network, list); list_delete(&pnetwork->list); pnetwork->last_scanned = jiffies; pmlmepriv->num_of_scanned++; spin_unlock_irqrestore(&free_queue->lock, irqL); return pnetwork; } static void _free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) { u32 curr_time, delta_time; unsigned long irqL; struct __queue *free_queue = &(pmlmepriv->free_bss_pool); if (pnetwork == NULL) return; if (pnetwork->fixed == true) return; curr_time = jiffies; delta_time = (curr_time - (u32)pnetwork->last_scanned) / HZ; if (delta_time < SCANQUEUE_LIFETIME) return; spin_lock_irqsave(&free_queue->lock, irqL); list_delete(&pnetwork->list); list_insert_tail(&pnetwork->list, &free_queue->queue); pmlmepriv->num_of_scanned--; spin_unlock_irqrestore(&free_queue->lock, irqL); } static void _free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) { struct __queue *free_queue = &pmlmepriv->free_bss_pool; if (pnetwork == NULL) return; if (pnetwork->fixed == true) return; list_delete(&pnetwork->list); list_insert_tail(&pnetwork->list, get_list_head(free_queue)); pmlmepriv->num_of_scanned--; } /* return the wlan_network with the matching addr Shall be calle under atomic context... to avoid possible racing condition... */ static struct wlan_network *_r8712_find_network(struct __queue *scanned_queue, u8 *addr) { unsigned long irqL; struct list_head *phead, *plist; struct wlan_network *pnetwork = NULL; u8 zero_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; if (!memcmp(zero_addr, addr, ETH_ALEN)) return NULL; spin_lock_irqsave(&scanned_queue->lock, irqL); phead = get_list_head(scanned_queue); plist = get_next(phead); while (plist != phead) { pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); plist = get_next(plist); if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN)) break; } spin_unlock_irqrestore(&scanned_queue->lock, irqL); return pnetwork; } static void _free_network_queue(struct _adapter *padapter) { unsigned long irqL; struct list_head *phead, *plist; struct wlan_network *pnetwork; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct __queue *scanned_queue = &pmlmepriv->scanned_queue; spin_lock_irqsave(&scanned_queue->lock, irqL); phead = get_list_head(scanned_queue); plist = get_next(phead); while (end_of_queue_search(phead, plist) == false) { pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); plist = get_next(plist); _free_network(pmlmepriv, pnetwork); } spin_unlock_irqrestore(&scanned_queue->lock, irqL); } sint r8712_if_up(struct _adapter *padapter) { sint res; if (padapter->bDriverStopped || padapter->bSurpriseRemoved || (check_fwstate(&padapter->mlmepriv, _FW_LINKED) == false)) { res = false; } else res = true; return res; } void r8712_generate_random_ibss(u8 *pibss) { u32 curtime = jiffies; pibss[0] = 0x02; /*in ad-hoc mode bit1 must set to 1 */ pibss[1] = 0x11; pibss[2] = 0x87; pibss[3] = (u8)(curtime & 0xff); pibss[4] = (u8)((curtime>>8) & 0xff); pibss[5] = (u8)((curtime>>16) & 0xff); } uint r8712_get_ndis_wlan_bssid_ex_sz(struct ndis_wlan_bssid_ex *bss) { uint t_len; t_len = sizeof(u32) + 6 * sizeof(unsigned long) + 2 + sizeof(struct ndis_802_11_ssid) + sizeof(u32) + sizeof(s32) + sizeof(enum NDIS_802_11_NETWORK_TYPE) + sizeof(struct NDIS_802_11_CONFIGURATION) + sizeof(enum NDIS_802_11_NETWORK_INFRASTRUCTURE) + sizeof(NDIS_802_11_RATES_EX) + sizeof(u32) + bss->IELength; return t_len; } u8 *r8712_get_capability_from_ie(u8 *ie) { return ie + 8 + 2; } int r8712_init_mlme_priv(struct _adapter *padapter) { return _init_mlme_priv(padapter); } void r8712_free_mlme_priv(struct mlme_priv *pmlmepriv) { kfree(pmlmepriv->free_bss_buf); } static struct wlan_network *alloc_network(struct mlme_priv *pmlmepriv) { return _r8712_alloc_network(pmlmepriv); } static void free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) { _free_network_nolock(pmlmepriv, pnetwork); } void r8712_free_network_queue(struct _adapter *dev) { _free_network_queue(dev); } /* return the wlan_network with the matching addr Shall be calle under atomic context... to avoid possible racing condition... */ static struct wlan_network *r8712_find_network(struct __queue *scanned_queue, u8 *addr) { struct wlan_network *pnetwork = _r8712_find_network(scanned_queue, addr); return pnetwork; } int r8712_is_same_ibss(struct _adapter *adapter, struct wlan_network *pnetwork) { int ret = true; struct security_priv *psecuritypriv = &adapter->securitypriv; if ((psecuritypriv->PrivacyAlgrthm != _NO_PRIVACY_) && (pnetwork->network.Privacy == 0)) ret = false; else if ((psecuritypriv->PrivacyAlgrthm == _NO_PRIVACY_) && (pnetwork->network.Privacy == 1)) ret = false; else ret = true; return ret; } static int is_same_network(struct ndis_wlan_bssid_ex *src, struct ndis_wlan_bssid_ex *dst) { u16 s_cap, d_cap; memcpy((u8 *)&s_cap, r8712_get_capability_from_ie(src->IEs), 2); memcpy((u8 *)&d_cap, r8712_get_capability_from_ie(dst->IEs), 2); return (src->Ssid.SsidLength == dst->Ssid.SsidLength) && (src->Configuration.DSConfig == dst->Configuration.DSConfig) && ((!memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN))) && ((!memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength))) && ((s_cap & WLAN_CAPABILITY_IBSS) == (d_cap & WLAN_CAPABILITY_IBSS)) && ((s_cap & WLAN_CAPABILITY_BSS) == (d_cap & WLAN_CAPABILITY_BSS)); } struct wlan_network *r8712_get_oldest_wlan_network( struct __queue *scanned_queue) { struct list_head *plist, *phead; struct wlan_network *pwlan = NULL; struct wlan_network *oldest = NULL; phead = get_list_head(scanned_queue); plist = get_next(phead); while (1) { if (end_of_queue_search(phead, plist) == true) break; pwlan = LIST_CONTAINOR(plist, struct wlan_network, list); if (pwlan->fixed != true) { if (oldest == NULL || time_after((unsigned long)oldest->last_scanned, (unsigned long)pwlan->last_scanned)) oldest = pwlan; } plist = get_next(plist); } return oldest; } static void update_network(struct ndis_wlan_bssid_ex *dst, struct ndis_wlan_bssid_ex *src, struct _adapter *padapter) { u32 last_evm = 0, tmpVal; if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) && is_same_network(&(padapter->mlmepriv.cur_network.network), src)) { if (padapter->recvpriv.signal_qual_data.total_num++ >= PHY_LINKQUALITY_SLID_WIN_MAX) { padapter->recvpriv.signal_qual_data.total_num = PHY_LINKQUALITY_SLID_WIN_MAX; last_evm = padapter->recvpriv.signal_qual_data. elements[padapter->recvpriv. signal_qual_data.index]; padapter->recvpriv.signal_qual_data.total_val -= last_evm; } padapter->recvpriv.signal_qual_data.total_val += src->Rssi; padapter->recvpriv.signal_qual_data. elements[padapter->recvpriv.signal_qual_data. index++] = src->Rssi; if (padapter->recvpriv.signal_qual_data.index >= PHY_LINKQUALITY_SLID_WIN_MAX) padapter->recvpriv.signal_qual_data.index = 0; /* <1> Showed on UI for user, in percentage. */ tmpVal = padapter->recvpriv.signal_qual_data.total_val / padapter->recvpriv.signal_qual_data.total_num; padapter->recvpriv.signal = (u8)tmpVal; src->Rssi = padapter->recvpriv.signal; } else src->Rssi = (src->Rssi + dst->Rssi) / 2; memcpy((u8 *)dst, (u8 *)src, r8712_get_ndis_wlan_bssid_ex_sz(src)); } static void update_current_network(struct _adapter *adapter, struct ndis_wlan_bssid_ex *pnetwork) { struct mlme_priv *pmlmepriv = &adapter->mlmepriv; if (is_same_network(&(pmlmepriv->cur_network.network), pnetwork)) { update_network(&(pmlmepriv->cur_network.network), pnetwork, adapter); r8712_update_protection(adapter, (pmlmepriv->cur_network.network.IEs) + sizeof(struct NDIS_802_11_FIXED_IEs), pmlmepriv->cur_network.network.IELength); } } /* Caller must hold pmlmepriv->lock first. */ static void update_scanned_network(struct _adapter *adapter, struct ndis_wlan_bssid_ex *target) { struct list_head *plist, *phead; u32 bssid_ex_sz; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct __queue *queue = &pmlmepriv->scanned_queue; struct wlan_network *pnetwork = NULL; struct wlan_network *oldest = NULL; phead = get_list_head(queue); plist = get_next(phead); while (1) { if (end_of_queue_search(phead, plist) == true) break; pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); if (is_same_network(&pnetwork->network, target)) break; if ((oldest == ((struct wlan_network *)0)) || time_after((unsigned long)oldest->last_scanned, (unsigned long)pnetwork->last_scanned)) oldest = pnetwork; plist = get_next(plist); } /* If we didn't find a match, then get a new network slot to initialize * with this beacon's information */ if (end_of_queue_search(phead, plist) == true) { if (_queue_empty(&pmlmepriv->free_bss_pool) == true) { /* If there are no more slots, expire the oldest */ pnetwork = oldest; target->Rssi = (pnetwork->network.Rssi + target->Rssi) / 2; memcpy(&pnetwork->network, target, r8712_get_ndis_wlan_bssid_ex_sz(target)); pnetwork->last_scanned = jiffies; } else { /* Otherwise just pull from the free list */ /* update scan_time */ pnetwork = alloc_network(pmlmepriv); if (pnetwork == NULL) return; bssid_ex_sz = r8712_get_ndis_wlan_bssid_ex_sz(target); target->Length = bssid_ex_sz; memcpy(&pnetwork->network, target, bssid_ex_sz); list_insert_tail(&pnetwork->list, &queue->queue); } } else { /* we have an entry and we are going to update it. But * this entry may be already expired. In this case we * do the same as we found a new net and call the new_net * handler */ update_network(&pnetwork->network, target, adapter); pnetwork->last_scanned = jiffies; } } static void rtl8711_add_network(struct _adapter *adapter, struct ndis_wlan_bssid_ex *pnetwork) { unsigned long irqL; struct mlme_priv *pmlmepriv = &(((struct _adapter *)adapter)->mlmepriv); struct __queue *queue = &pmlmepriv->scanned_queue; spin_lock_irqsave(&queue->lock, irqL); update_current_network(adapter, pnetwork); update_scanned_network(adapter, pnetwork); spin_unlock_irqrestore(&queue->lock, irqL); } /*select the desired network based on the capability of the (i)bss. * check items: (1) security * (2) network_type * (3) WMM * (4) HT * (5) others */ static int is_desired_network(struct _adapter *adapter, struct wlan_network *pnetwork) { u8 wps_ie[512]; uint wps_ielen; int bselected = true; struct security_priv *psecuritypriv = &adapter->securitypriv; if (psecuritypriv->wps_phase == true) { if (r8712_get_wps_ie(pnetwork->network.IEs, pnetwork->network.IELength, wps_ie, &wps_ielen) == true) return true; else return false; } if ((psecuritypriv->PrivacyAlgrthm != _NO_PRIVACY_) && (pnetwork->network.Privacy == 0)) bselected = false; if (check_fwstate(&adapter->mlmepriv, WIFI_ADHOC_STATE) == true) { if (pnetwork->network.InfrastructureMode != adapter->mlmepriv.cur_network.network. InfrastructureMode) bselected = false; } return bselected; } /* TODO: Perry : For Power Management */ void r8712_atimdone_event_callback(struct _adapter *adapter , u8 *pbuf) { } void r8712_survey_event_callback(struct _adapter *adapter, u8 *pbuf) { unsigned long flags; u32 len; struct ndis_wlan_bssid_ex *pnetwork; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; pnetwork = (struct ndis_wlan_bssid_ex *)pbuf; #ifdef __BIG_ENDIAN /* endian_convert */ pnetwork->Length = le32_to_cpu(pnetwork->Length); pnetwork->Ssid.SsidLength = le32_to_cpu(pnetwork->Ssid.SsidLength); pnetwork->Privacy = le32_to_cpu(pnetwork->Privacy); pnetwork->Rssi = le32_to_cpu(pnetwork->Rssi); pnetwork->NetworkTypeInUse = le32_to_cpu(pnetwork->NetworkTypeInUse); pnetwork->Configuration.ATIMWindow = le32_to_cpu(pnetwork->Configuration.ATIMWindow); pnetwork->Configuration.BeaconPeriod = le32_to_cpu(pnetwork->Configuration.BeaconPeriod); pnetwork->Configuration.DSConfig = le32_to_cpu(pnetwork->Configuration.DSConfig); pnetwork->Configuration.FHConfig.DwellTime = le32_to_cpu(pnetwork->Configuration.FHConfig.DwellTime); pnetwork->Configuration.FHConfig.HopPattern = le32_to_cpu(pnetwork->Configuration.FHConfig.HopPattern); pnetwork->Configuration.FHConfig.HopSet = le32_to_cpu(pnetwork->Configuration.FHConfig.HopSet); pnetwork->Configuration.FHConfig.Length = le32_to_cpu(pnetwork->Configuration.FHConfig.Length); pnetwork->Configuration.Length = le32_to_cpu(pnetwork->Configuration.Length); pnetwork->InfrastructureMode = le32_to_cpu(pnetwork->InfrastructureMode); pnetwork->IELength = le32_to_cpu(pnetwork->IELength); #endif len = r8712_get_ndis_wlan_bssid_ex_sz(pnetwork); if (len > sizeof(struct wlan_bssid_ex)) return; spin_lock_irqsave(&pmlmepriv->lock2, flags); /* update IBSS_network 's timestamp */ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) { if (!memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) { struct wlan_network *ibss_wlan = NULL; memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8); ibss_wlan = r8712_find_network( &pmlmepriv->scanned_queue, pnetwork->MacAddress); if (ibss_wlan) { memcpy(ibss_wlan->network.IEs, pnetwork->IEs, 8); goto exit; } } } /* lock pmlmepriv->lock when you accessing network_q */ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == false) { if (pnetwork->Ssid.Ssid[0] != 0) rtl8711_add_network(adapter, pnetwork); else { pnetwork->Ssid.SsidLength = 8; memcpy(pnetwork->Ssid.Ssid, "<hidden>", 8); rtl8711_add_network(adapter, pnetwork); } } exit: spin_unlock_irqrestore(&pmlmepriv->lock2, flags); } void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf) { unsigned long irqL; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; spin_lock_irqsave(&pmlmepriv->lock, irqL); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) { u8 timer_cancelled; _cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); } if (pmlmepriv->to_join == true) { if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) { if (check_fwstate(pmlmepriv, _FW_LINKED) == false) { set_fwstate(pmlmepriv, _FW_UNDER_LINKING); if (r8712_select_and_join_from_scan(pmlmepriv) == _SUCCESS) _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT); else { struct wlan_bssid_ex *pdev_network = &(adapter->registrypriv.dev_network); u8 *pibss = adapter->registrypriv. dev_network.MacAddress; pmlmepriv->fw_state ^= _FW_UNDER_SURVEY; memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid)); r8712_update_registrypriv_dev_network (adapter); r8712_generate_random_ibss(pibss); pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE; pmlmepriv->to_join = false; } } } else { pmlmepriv->to_join = false; set_fwstate(pmlmepriv, _FW_UNDER_LINKING); if (r8712_select_and_join_from_scan(pmlmepriv) == _SUCCESS) _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT); else _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); } } spin_unlock_irqrestore(&pmlmepriv->lock, irqL); } /* *r8712_free_assoc_resources: the caller has to lock pmlmepriv->lock */ void r8712_free_assoc_resources(struct _adapter *adapter) { unsigned long irqL; struct wlan_network *pwlan = NULL; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct sta_priv *pstapriv = &adapter->stapriv; struct wlan_network *tgt_network = &pmlmepriv->cur_network; pwlan = r8712_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_AP_STATE)) { struct sta_info *psta; psta = r8712_get_stainfo(&adapter->stapriv, tgt_network->network.MacAddress); spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL); r8712_free_stainfo(adapter, psta); spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL); } if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE)) r8712_free_all_stainfo(adapter); if (pwlan) pwlan->fixed = false; if (((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) && (adapter->stapriv.asoc_sta_count == 1))) free_network_nolock(pmlmepriv, pwlan); } /* *r8712_indicate_connect: the caller has to lock pmlmepriv->lock */ void r8712_indicate_connect(struct _adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; pmlmepriv->to_join = false; set_fwstate(pmlmepriv, _FW_LINKED); padapter->ledpriv.LedControlHandler(padapter, LED_CTL_LINK); r8712_os_indicate_connect(padapter); if (padapter->registrypriv.power_mgnt > PS_MODE_ACTIVE) _set_timer(&pmlmepriv->dhcp_timer, 60000); } /* *r8712_ind_disconnect: the caller has to lock pmlmepriv->lock */ void r8712_ind_disconnect(struct _adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if (check_fwstate(pmlmepriv, _FW_LINKED) == true) { _clr_fwstate_(pmlmepriv, _FW_LINKED); padapter->ledpriv.LedControlHandler(padapter, LED_CTL_NO_LINK); r8712_os_indicate_disconnect(padapter); } if (padapter->pwrctrlpriv.pwr_mode != padapter->registrypriv.power_mgnt) { _cancel_timer_ex(&pmlmepriv->dhcp_timer); r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt, padapter->registrypriv.smart_ps); } } /*Notes: *pnetwork : returns from r8712_joinbss_event_callback *ptarget_wlan: found from scanned_queue *if join_res > 0, for (fw_state==WIFI_STATION_STATE), we check if * "ptarget_sta" & "ptarget_wlan" exist. *if join_res > 0, for (fw_state==WIFI_ADHOC_STATE), we only check * if "ptarget_wlan" exist. *if join_res > 0, update "cur_network->network" from * "pnetwork->network" if (ptarget_wlan !=NULL). */ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf) { unsigned long irqL = 0, irqL2; u8 timer_cancelled; struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct wlan_network *cur_network = &pmlmepriv->cur_network; struct wlan_network *pcur_wlan = NULL, *ptarget_wlan = NULL; unsigned int the_same_macaddr = false; struct wlan_network *pnetwork; if (sizeof(struct list_head) == 4 * sizeof(u32)) { pnetwork = (struct wlan_network *) _malloc(sizeof(struct wlan_network)); memcpy((u8 *)pnetwork+16, (u8 *)pbuf + 8, sizeof(struct wlan_network) - 16); } else pnetwork = (struct wlan_network *)pbuf; #ifdef __BIG_ENDIAN /* endian_convert */ pnetwork->join_res = le32_to_cpu(pnetwork->join_res); pnetwork->network_type = le32_to_cpu(pnetwork->network_type); pnetwork->network.Length = le32_to_cpu(pnetwork->network.Length); pnetwork->network.Ssid.SsidLength = le32_to_cpu(pnetwork->network.Ssid.SsidLength); pnetwork->network.Privacy = le32_to_cpu(pnetwork->network.Privacy); pnetwork->network.Rssi = le32_to_cpu(pnetwork->network.Rssi); pnetwork->network.NetworkTypeInUse = le32_to_cpu(pnetwork->network.NetworkTypeInUse); pnetwork->network.Configuration.ATIMWindow = le32_to_cpu(pnetwork->network.Configuration.ATIMWindow); pnetwork->network.Configuration.BeaconPeriod = le32_to_cpu(pnetwork->network.Configuration.BeaconPeriod); pnetwork->network.Configuration.DSConfig = le32_to_cpu(pnetwork->network.Configuration.DSConfig); pnetwork->network.Configuration.FHConfig.DwellTime = le32_to_cpu(pnetwork->network.Configuration.FHConfig. DwellTime); pnetwork->network.Configuration.FHConfig.HopPattern = le32_to_cpu(pnetwork->network.Configuration. FHConfig.HopPattern); pnetwork->network.Configuration.FHConfig.HopSet = le32_to_cpu(pnetwork->network.Configuration.FHConfig.HopSet); pnetwork->network.Configuration.FHConfig.Length = le32_to_cpu(pnetwork->network.Configuration.FHConfig.Length); pnetwork->network.Configuration.Length = le32_to_cpu(pnetwork->network.Configuration.Length); pnetwork->network.InfrastructureMode = le32_to_cpu(pnetwork->network.InfrastructureMode); pnetwork->network.IELength = le32_to_cpu(pnetwork->network.IELength); #endif the_same_macaddr = !memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN); pnetwork->network.Length = r8712_get_ndis_wlan_bssid_ex_sz(&pnetwork->network); spin_lock_irqsave(&pmlmepriv->lock, irqL); if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex)) goto ignore_joinbss_callback; if (pnetwork->join_res > 0) { if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) { /*s1. find ptarget_wlan*/ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) { if (the_same_macaddr == true) ptarget_wlan = r8712_find_network(&pmlmepriv-> scanned_queue, cur_network->network.MacAddress); else { pcur_wlan = r8712_find_network(&pmlmepriv-> scanned_queue, cur_network->network.MacAddress); pcur_wlan->fixed = false; pcur_sta = r8712_get_stainfo(pstapriv, cur_network->network.MacAddress); spin_lock_irqsave(&pstapriv-> sta_hash_lock, irqL2); r8712_free_stainfo(adapter, pcur_sta); spin_unlock_irqrestore(&(pstapriv-> sta_hash_lock), irqL2); ptarget_wlan = r8712_find_network(&pmlmepriv-> scanned_queue, pnetwork->network. MacAddress); if (ptarget_wlan) ptarget_wlan->fixed = true; } } else { ptarget_wlan = r8712_find_network(&pmlmepriv-> scanned_queue, pnetwork->network.MacAddress); if (ptarget_wlan) ptarget_wlan->fixed = true; } if (ptarget_wlan == NULL) { if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) pmlmepriv->fw_state ^= _FW_UNDER_LINKING; goto ignore_joinbss_callback; } /*s2. find ptarget_sta & update ptarget_sta*/ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { if (the_same_macaddr == true) { ptarget_sta = r8712_get_stainfo(pstapriv, pnetwork->network.MacAddress); if (ptarget_sta == NULL) ptarget_sta = r8712_alloc_stainfo(pstapriv, pnetwork->network.MacAddress); } else ptarget_sta = r8712_alloc_stainfo(pstapriv, pnetwork->network.MacAddress); if (ptarget_sta) /*update ptarget_sta*/ { ptarget_sta->aid = pnetwork->join_res; ptarget_sta->qos_option = 1; ptarget_sta->mac_id = 5; if (adapter->securitypriv. AuthAlgrthm == 2) { adapter->securitypriv. binstallGrpkey = false; adapter->securitypriv. busetkipkey = false; adapter->securitypriv. bgrpkey_handshake = false; ptarget_sta->ieee8021x_blocked = true; ptarget_sta->XPrivacy = adapter->securitypriv. PrivacyAlgrthm; memset((u8 *)&ptarget_sta-> x_UncstKey, 0, sizeof(union Keytype)); memset((u8 *)&ptarget_sta-> tkiprxmickey, 0, sizeof(union Keytype)); memset((u8 *)&ptarget_sta-> tkiptxmickey, 0, sizeof(union Keytype)); memset((u8 *)&ptarget_sta-> txpn, 0, sizeof(union pn48)); memset((u8 *)&ptarget_sta-> rxpn, 0, sizeof(union pn48)); } } else { if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) pmlmepriv->fw_state ^= _FW_UNDER_LINKING; goto ignore_joinbss_callback; } } /*s3. update cur_network & indicate connect*/ memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length); cur_network->aid = pnetwork->join_res; /*update fw_state will clr _FW_UNDER_LINKING*/ switch (pnetwork->network.InfrastructureMode) { case Ndis802_11Infrastructure: pmlmepriv->fw_state = WIFI_STATION_STATE; break; case Ndis802_11IBSS: pmlmepriv->fw_state = WIFI_ADHOC_STATE; break; default: pmlmepriv->fw_state = WIFI_NULL_STATE; break; } r8712_update_protection(adapter, (cur_network->network.IEs) + sizeof(struct NDIS_802_11_FIXED_IEs), (cur_network->network.IELength)); /*TODO: update HT_Capability*/ update_ht_cap(adapter, cur_network->network.IEs, cur_network->network.IELength); /*indicate connect*/ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) r8712_indicate_connect(adapter); _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled); } else goto ignore_joinbss_callback; } else { if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) { _set_timer(&pmlmepriv->assoc_timer, 1); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); } } ignore_joinbss_callback: spin_unlock_irqrestore(&pmlmepriv->lock, irqL); if (sizeof(struct list_head) == 4 * sizeof(u32)) kfree((u8 *)pnetwork); } void r8712_stassoc_event_callback(struct _adapter *adapter, u8 *pbuf) { unsigned long irqL; struct sta_info *psta; struct mlme_priv *pmlmepriv = &(adapter->mlmepriv); struct stassoc_event *pstassoc = (struct stassoc_event *)pbuf; /* to do: */ if (r8712_access_ctrl(&adapter->acl_list, pstassoc->macaddr) == false) return; psta = r8712_get_stainfo(&adapter->stapriv, pstassoc->macaddr); if (psta != NULL) { /*the sta have been in sta_info_queue => do nothing *(between drv has received this event before and * fw have not yet to set key to CAM_ENTRY) */ return; } psta = r8712_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr); if (psta == NULL) return; /* to do : init sta_info variable */ psta->qos_option = 0; psta->mac_id = le32_to_cpu((uint)pstassoc->cam_id); /* psta->aid = (uint)pstassoc->cam_id; */ if (adapter->securitypriv.AuthAlgrthm == 2) psta->XPrivacy = adapter->securitypriv.PrivacyAlgrthm; psta->ieee8021x_blocked = false; spin_lock_irqsave(&pmlmepriv->lock, irqL); if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) || (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) { if (adapter->stapriv.asoc_sta_count == 2) { /* a sta + bc/mc_stainfo (not Ibss_stainfo) */ r8712_indicate_connect(adapter); } } spin_unlock_irqrestore(&pmlmepriv->lock, irqL); } void r8712_stadel_event_callback(struct _adapter *adapter, u8 *pbuf) { unsigned long irqL, irqL2; struct sta_info *psta; struct wlan_network *pwlan = NULL; struct wlan_bssid_ex *pdev_network = NULL; u8 *pibss = NULL; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct stadel_event *pstadel = (struct stadel_event *)pbuf; struct sta_priv *pstapriv = &adapter->stapriv; struct wlan_network *tgt_network = &pmlmepriv->cur_network; spin_lock_irqsave(&pmlmepriv->lock, irqL2); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) { r8712_ind_disconnect(adapter); r8712_free_assoc_resources(adapter); } if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE)) { psta = r8712_get_stainfo(&adapter->stapriv, pstadel->macaddr); spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL); r8712_free_stainfo(adapter, psta); spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL); if (adapter->stapriv.asoc_sta_count == 1) { /*a sta + bc/mc_stainfo (not Ibss_stainfo) */ pwlan = r8712_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress); if (pwlan) { pwlan->fixed = false; free_network_nolock(pmlmepriv, pwlan); } /*re-create ibss*/ pdev_network = &(adapter->registrypriv.dev_network); pibss = adapter->registrypriv.dev_network.MacAddress; memcpy(pdev_network, &tgt_network->network, r8712_get_ndis_wlan_bssid_ex_sz(&tgt_network-> network)); memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid)); r8712_update_registrypriv_dev_network(adapter); r8712_generate_random_ibss(pibss); if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { _clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE); set_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE); } } } spin_unlock_irqrestore(&pmlmepriv->lock, irqL2); } void r8712_cpwm_event_callback(struct _adapter *adapter, u8 *pbuf) { struct reportpwrstate_parm *preportpwrstate = (struct reportpwrstate_parm *)pbuf; preportpwrstate->state |= (u8)(adapter->pwrctrlpriv.cpwm_tog + 0x80); r8712_cpwm_int_hdl(adapter, preportpwrstate); } /* When the Netgear 3500 AP is with WPA2PSK-AES mode, it will send * the ADDBA req frame with start seq control = 0 to wifi client after * the WPA handshake and the seqence number of following data packet * will be 0. In this case, the Rx reorder sequence is not longer than 0 * and the WiFi client will drop the data with seq number 0. * So, the 8712 firmware has to inform driver with receiving the * ADDBA-Req frame so that the driver can reset the * sequence value of Rx reorder contorl. */ void r8712_got_addbareq_event_callback(struct _adapter *adapter, u8 *pbuf) { struct ADDBA_Req_Report_parm *pAddbareq_pram = (struct ADDBA_Req_Report_parm *)pbuf; struct sta_info *psta; struct sta_priv *pstapriv = &adapter->stapriv; struct recv_reorder_ctrl *precvreorder_ctrl = NULL; printk(KERN_INFO "r8712u: [%s] mac = %pM, seq = %d, tid = %d\n", __func__, pAddbareq_pram->MacAddress, pAddbareq_pram->StartSeqNum, pAddbareq_pram->tid); psta = r8712_get_stainfo(pstapriv, pAddbareq_pram->MacAddress); if (psta) { precvreorder_ctrl = &psta->recvreorder_ctrl[pAddbareq_pram->tid]; /* set the indicate_seq to 0xffff so that the rx reorder * can store any following data packet. */ precvreorder_ctrl->indicate_seq = 0xffff; } } void r8712_wpspbc_event_callback(struct _adapter *adapter, u8 *pbuf) { if (adapter->securitypriv.wps_hw_pbc_pressed == false) adapter->securitypriv.wps_hw_pbc_pressed = true; } void _r8712_sitesurvey_ctrl_handler(struct _adapter *adapter) { struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct sitesurvey_ctrl *psitesurveyctrl = &pmlmepriv->sitesurveyctrl; struct registry_priv *pregistrypriv = &adapter->registrypriv; u64 current_tx_pkts; uint current_rx_pkts; current_tx_pkts = (adapter->xmitpriv.tx_pkts) - (psitesurveyctrl->last_tx_pkts); current_rx_pkts = (adapter->recvpriv.rx_pkts) - (psitesurveyctrl->last_rx_pkts); psitesurveyctrl->last_tx_pkts = adapter->xmitpriv.tx_pkts; psitesurveyctrl->last_rx_pkts = adapter->recvpriv.rx_pkts; if ((current_tx_pkts > pregistrypriv->busy_thresh) || (current_rx_pkts > pregistrypriv->busy_thresh)) psitesurveyctrl->traffic_busy = true; else psitesurveyctrl->traffic_busy = false; } void _r8712_join_timeout_handler(struct _adapter *adapter) { unsigned long irqL; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; if (adapter->bDriverStopped || adapter->bSurpriseRemoved) return; spin_lock_irqsave(&pmlmepriv->lock, irqL); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); pmlmepriv->to_join = false; if (check_fwstate(pmlmepriv, _FW_LINKED) == true) { r8712_os_indicate_disconnect(adapter); _clr_fwstate_(pmlmepriv, _FW_LINKED); } if (adapter->pwrctrlpriv.pwr_mode != adapter->registrypriv.power_mgnt) { r8712_set_ps_mode(adapter, adapter->registrypriv.power_mgnt, adapter->registrypriv.smart_ps); } spin_unlock_irqrestore(&pmlmepriv->lock, irqL); } void r8712_scan_timeout_handler (struct _adapter *adapter) { unsigned long irqL; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; spin_lock_irqsave(&pmlmepriv->lock, irqL); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); pmlmepriv->to_join = false; /* scan fail, so clear to_join flag */ spin_unlock_irqrestore(&pmlmepriv->lock, irqL); } void _r8712_dhcp_timeout_handler (struct _adapter *adapter) { if (adapter->bDriverStopped || adapter->bSurpriseRemoved) return; if (adapter->pwrctrlpriv.pwr_mode != adapter->registrypriv.power_mgnt) r8712_set_ps_mode(adapter, adapter->registrypriv.power_mgnt, adapter->registrypriv.smart_ps); } void _r8712_wdg_timeout_handler(struct _adapter *adapter) { r8712_wdg_wk_cmd(adapter); } int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv) { struct list_head *phead; unsigned char *dst_ssid, *src_ssid; struct _adapter *adapter; struct __queue *queue = NULL; struct wlan_network *pnetwork = NULL; struct wlan_network *pnetwork_max_rssi = NULL; adapter = (struct _adapter *)pmlmepriv->nic_hdl; queue = &pmlmepriv->scanned_queue; phead = get_list_head(queue); pmlmepriv->pscanned = get_next(phead); while (1) { if (end_of_queue_search(phead, pmlmepriv->pscanned) == true) { if ((pmlmepriv->assoc_by_rssi == true) && (pnetwork_max_rssi != NULL)) { pnetwork = pnetwork_max_rssi; goto ask_for_joinbss; } return _FAIL; } pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list); if (pnetwork == NULL) return _FAIL; pmlmepriv->pscanned = get_next(pmlmepriv->pscanned); if (pmlmepriv->assoc_by_bssid == true) { dst_ssid = pnetwork->network.MacAddress; src_ssid = pmlmepriv->assoc_bssid; if (!memcmp(dst_ssid, src_ssid, ETH_ALEN)) { if (check_fwstate(pmlmepriv, _FW_LINKED)) { if (is_same_network(&pmlmepriv-> cur_network.network, &pnetwork->network)) { _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); /*r8712_indicate_connect again*/ r8712_indicate_connect(adapter); return 2; } r8712_disassoc_cmd(adapter); r8712_ind_disconnect(adapter); r8712_free_assoc_resources(adapter); } goto ask_for_joinbss; } } else if (pmlmepriv->assoc_ssid.SsidLength == 0) goto ask_for_joinbss; dst_ssid = pnetwork->network.Ssid.Ssid; src_ssid = pmlmepriv->assoc_ssid.Ssid; if ((pnetwork->network.Ssid.SsidLength == pmlmepriv->assoc_ssid.SsidLength) && (!memcmp(dst_ssid, src_ssid, pmlmepriv->assoc_ssid.SsidLength))) { if (pmlmepriv->assoc_by_rssi == true) { /* if the ssid is the same, select the bss * which has the max rssi*/ if (pnetwork_max_rssi) { if (pnetwork->network.Rssi > pnetwork_max_rssi->network.Rssi) pnetwork_max_rssi = pnetwork; } else pnetwork_max_rssi = pnetwork; } else if (is_desired_network(adapter, pnetwork)) { if (check_fwstate(pmlmepriv, _FW_LINKED)) { r8712_disassoc_cmd(adapter); r8712_free_assoc_resources(adapter); } goto ask_for_joinbss; } } } return _FAIL; ask_for_joinbss: return r8712_joinbss_cmd(adapter, pnetwork); } sint r8712_set_auth(struct _adapter *adapter, struct security_priv *psecuritypriv) { struct cmd_priv *pcmdpriv = &adapter->cmdpriv; struct cmd_obj *pcmd; struct setauth_parm *psetauthparm; sint ret = _SUCCESS; pcmd = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (pcmd == NULL) return _FAIL; psetauthparm = (struct setauth_parm *)_malloc( sizeof(struct setauth_parm)); if (psetauthparm == NULL) { kfree((unsigned char *)pcmd); return _FAIL; } memset(psetauthparm, 0, sizeof(struct setauth_parm)); psetauthparm->mode = (u8)psecuritypriv->AuthAlgrthm; pcmd->cmdcode = _SetAuth_CMD_; pcmd->parmbuf = (unsigned char *)psetauthparm; pcmd->cmdsz = sizeof(struct setauth_parm); pcmd->rsp = NULL; pcmd->rspsz = 0; _init_listhead(&pcmd->list); r8712_enqueue_cmd(pcmdpriv, pcmd); return ret; } sint r8712_set_key(struct _adapter *adapter, struct security_priv *psecuritypriv, sint keyid) { struct cmd_priv *pcmdpriv = &adapter->cmdpriv; struct cmd_obj *pcmd; struct setkey_parm *psetkeyparm; u8 keylen; pcmd = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (pcmd == NULL) return _FAIL; psetkeyparm = (struct setkey_parm *)_malloc(sizeof(struct setkey_parm)); if (psetkeyparm == NULL) { kfree((unsigned char *)pcmd); return _FAIL; } memset(psetkeyparm, 0, sizeof(struct setkey_parm)); if (psecuritypriv->AuthAlgrthm == 2) { /* 802.1X */ psetkeyparm->algorithm = (u8)psecuritypriv->XGrpPrivacy; } else { /* WEP */ psetkeyparm->algorithm = (u8)psecuritypriv->PrivacyAlgrthm; } psetkeyparm->keyid = (u8)keyid; switch (psetkeyparm->algorithm) { case _WEP40_: keylen = 5; memcpy(psetkeyparm->key, psecuritypriv->DefKey[keyid].skey, keylen); break; case _WEP104_: keylen = 13; memcpy(psetkeyparm->key, psecuritypriv->DefKey[keyid].skey, keylen); break; case _TKIP_: if (keyid < 1 || keyid > 2) return _FAIL; keylen = 16; memcpy(psetkeyparm->key, &psecuritypriv->XGrpKey[keyid - 1], keylen); psetkeyparm->grpkey = 1; break; case _AES_: if (keyid < 1 || keyid > 2) return _FAIL; keylen = 16; memcpy(psetkeyparm->key, &psecuritypriv->XGrpKey[keyid - 1], keylen); psetkeyparm->grpkey = 1; break; default: return _FAIL; } pcmd->cmdcode = _SetKey_CMD_; pcmd->parmbuf = (u8 *)psetkeyparm; pcmd->cmdsz = (sizeof(struct setkey_parm)); pcmd->rsp = NULL; pcmd->rspsz = 0; _init_listhead(&pcmd->list); r8712_enqueue_cmd(pcmdpriv, pcmd); return _SUCCESS; } /* adjust IEs for r8712_joinbss_cmd in WMM */ int r8712_restruct_wmm_ie(struct _adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len, uint initial_out_len) { unsigned int ielength = 0; unsigned int i, j; i = 12; /* after the fixed IE */ while (i < in_len) { ielength = initial_out_len; if (in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 && in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 && in_ie[i + 5] == 0x02 && i + 5 < in_len) { /*WMM element ID and OUI*/ for (j = i; j < i + 9; j++) { out_ie[ielength] = in_ie[j]; ielength++; } out_ie[initial_out_len + 1] = 0x07; out_ie[initial_out_len + 6] = 0x00; out_ie[initial_out_len + 8] = 0x00; break; } i += (in_ie[i + 1] + 2); /* to the next IE element */ } return ielength; } /* * Ported from 8185: IsInPreAuthKeyList(). * * Search by BSSID, * Return Value: * -1 :if there is no pre-auth key in the table * >=0 :if there is pre-auth key, and return the entry id */ static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid) { struct security_priv *psecuritypriv = &Adapter->securitypriv; int i = 0; do { if (psecuritypriv->PMKIDList[i].bUsed && (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) break; else i++; } while (i < NUM_PMKID_CACHE); if (i == NUM_PMKID_CACHE) { i = -1; /* Could not find. */ } else { ; /* There is one Pre-Authentication Key for the * specific BSSID. */ } return i; } sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len) { u8 authmode = 0, securitytype, match; u8 sec_ie[255], uncst_oui[4], bkup_ie[255]; u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01}; uint ielength, cnt, remove_cnt; int iEntry; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct security_priv *psecuritypriv = &adapter->securitypriv; uint ndisauthmode = psecuritypriv->ndisauthtype; uint ndissecuritytype = psecuritypriv->ndisencryptstatus; if ((ndisauthmode == Ndis802_11AuthModeWPA) || (ndisauthmode == Ndis802_11AuthModeWPAPSK)) { authmode = _WPA_IE_ID_; uncst_oui[0] = 0x0; uncst_oui[1] = 0x50; uncst_oui[2] = 0xf2; } if ((ndisauthmode == Ndis802_11AuthModeWPA2) || (ndisauthmode == Ndis802_11AuthModeWPA2PSK)) { authmode = _WPA2_IE_ID_; uncst_oui[0] = 0x0; uncst_oui[1] = 0x0f; uncst_oui[2] = 0xac; } switch (ndissecuritytype) { case Ndis802_11Encryption1Enabled: case Ndis802_11Encryption1KeyAbsent: securitytype = _WEP40_; uncst_oui[3] = 0x1; break; case Ndis802_11Encryption2Enabled: case Ndis802_11Encryption2KeyAbsent: securitytype = _TKIP_; uncst_oui[3] = 0x2; break; case Ndis802_11Encryption3Enabled: case Ndis802_11Encryption3KeyAbsent: securitytype = _AES_; uncst_oui[3] = 0x4; break; default: securitytype = _NO_PRIVACY_; break; } /*Search required WPA or WPA2 IE and copy to sec_ie[] */ cnt = 12; match = false; while (cnt < in_len) { if (in_ie[cnt] == authmode) { if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) { memcpy(&sec_ie[0], &in_ie[cnt], in_ie[cnt + 1] + 2); match = true; break; } if (authmode == _WPA2_IE_ID_) { memcpy(&sec_ie[0], &in_ie[cnt], in_ie[cnt + 1] + 2); match = true; break; } if (((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt + 2], &wpa_oui[0], 4))) || (authmode == _WPA2_IE_ID_)) memcpy(&bkup_ie[0], &in_ie[cnt], in_ie[cnt + 1] + 2); } cnt += in_ie[cnt+1] + 2; /*get next*/ } /*restruct WPA IE or WPA2 IE in sec_ie[] */ if (match == true) { if (sec_ie[0] == _WPA_IE_ID_) { /* parsing SSN IE to select required encryption * algorithm, and set the bc/mc encryption algorithm */ while (true) { /*check wpa_oui tag*/ if (memcmp(&sec_ie[2], &wpa_oui[0], 4)) { match = false; break; } if ((sec_ie[6] != 0x01) || (sec_ie[7] != 0x0)) { /*IE Ver error*/ match = false; break; } if (!memcmp(&sec_ie[8], &wpa_oui[0], 3)) { /* get bc/mc encryption type (group * key type)*/ switch (sec_ie[11]) { case 0x0: /*none*/ psecuritypriv->XGrpPrivacy = _NO_PRIVACY_; break; case 0x1: /*WEP_40*/ psecuritypriv->XGrpPrivacy = _WEP40_; break; case 0x2: /*TKIP*/ psecuritypriv->XGrpPrivacy = _TKIP_; break; case 0x3: /*AESCCMP*/ case 0x4: psecuritypriv->XGrpPrivacy = _AES_; break; case 0x5: /*WEP_104*/ psecuritypriv->XGrpPrivacy = _WEP104_; break; } } else { match = false; break; } if (sec_ie[12] == 0x01) { /*check the unicast encryption type*/ if (memcmp(&sec_ie[14], &uncst_oui[0], 4)) { match = false; break; } /*else the uncst_oui is match*/ } else { /*mixed mode, unicast_enc_type > 1*/ /*select the uncst_oui and remove * the other uncst_oui*/ cnt = sec_ie[12]; remove_cnt = (cnt-1) * 4; sec_ie[12] = 0x01; memcpy(&sec_ie[14], &uncst_oui[0], 4); /*remove the other unicast suit*/ memcpy(&sec_ie[18], &sec_ie[18 + remove_cnt], sec_ie[1] - 18 + 2 - remove_cnt); sec_ie[1] = sec_ie[1] - remove_cnt; } break; } } if (authmode == _WPA2_IE_ID_) { /* parsing RSN IE to select required encryption * algorithm, and set the bc/mc encryption algorithm */ while (true) { if ((sec_ie[2] != 0x01) || (sec_ie[3] != 0x0)) { /*IE Ver error*/ match = false; break; } if (!memcmp(&sec_ie[4], &uncst_oui[0], 3)) { /*get bc/mc encryption type*/ switch (sec_ie[7]) { case 0x1: /*WEP_40*/ psecuritypriv->XGrpPrivacy = _WEP40_; break; case 0x2: /*TKIP*/ psecuritypriv->XGrpPrivacy = _TKIP_; break; case 0x4: /*AESWRAP*/ psecuritypriv->XGrpPrivacy = _AES_; break; case 0x5: /*WEP_104*/ psecuritypriv->XGrpPrivacy = _WEP104_; break; default: /*one*/ psecuritypriv->XGrpPrivacy = _NO_PRIVACY_; break; } } else { match = false; break; } if (sec_ie[8] == 0x01) { /*check the unicast encryption type*/ if (memcmp(&sec_ie[10], &uncst_oui[0], 4)) { match = false; break; } /*else the uncst_oui is match*/ } else { /*mixed mode, unicast_enc_type > 1*/ /*select the uncst_oui and remove the * other uncst_oui*/ cnt = sec_ie[8]; remove_cnt = (cnt-1)*4; sec_ie[8] = 0x01; memcpy(&sec_ie[10], &uncst_oui[0], 4); /*remove the other unicast suit*/ memcpy(&sec_ie[14], &sec_ie[14 + remove_cnt], (sec_ie[1] - 14 + 2 - remove_cnt)); sec_ie[1] = sec_ie[1]-remove_cnt; } break; } } } if ((authmode == _WPA_IE_ID_) || (authmode == _WPA2_IE_ID_)) { /*copy fixed ie*/ memcpy(out_ie, in_ie, 12); ielength = 12; /*copy RSN or SSN*/ if (match == true) { memcpy(&out_ie[ielength], &sec_ie[0], sec_ie[1]+2); ielength += sec_ie[1] + 2; if (authmode == _WPA2_IE_ID_) { /*the Pre-Authentication bit should be zero*/ out_ie[ielength - 1] = 0; out_ie[ielength - 2] = 0; } r8712_report_sec_ie(adapter, authmode, sec_ie); } } else { /*copy fixed ie only*/ memcpy(out_ie, in_ie, 12); ielength = 12; if (psecuritypriv->wps_phase == true) { memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len); ielength += psecuritypriv->wps_ie_len; } } iEntry = SecIsInPMKIDList(adapter, pmlmepriv->assoc_bssid); if (iEntry < 0) return ielength; else { if (authmode == _WPA2_IE_ID_) { out_ie[ielength] = 1; ielength++; out_ie[ielength] = 0; /*PMKID count = 0x0100*/ ielength++; memcpy(&out_ie[ielength], &psecuritypriv->PMKIDList[iEntry].PMKID, 16); ielength += 16; out_ie[13] += 18;/*PMKID length = 2+16*/ } } return ielength; } void r8712_init_registrypriv_dev_network(struct _adapter *adapter) { struct registry_priv *pregistrypriv = &adapter->registrypriv; struct eeprom_priv *peepriv = &adapter->eeprompriv; struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network; u8 *myhwaddr = myid(peepriv); memcpy(pdev_network->MacAddress, myhwaddr, ETH_ALEN); memcpy(&pdev_network->Ssid, &pregistrypriv->ssid, sizeof(struct ndis_802_11_ssid)); pdev_network->Configuration.Length = sizeof(struct NDIS_802_11_CONFIGURATION); pdev_network->Configuration.BeaconPeriod = 100; pdev_network->Configuration.FHConfig.Length = 0; pdev_network->Configuration.FHConfig.HopPattern = 0; pdev_network->Configuration.FHConfig.HopSet = 0; pdev_network->Configuration.FHConfig.DwellTime = 0; } void r8712_update_registrypriv_dev_network(struct _adapter *adapter) { int sz = 0; struct registry_priv *pregistrypriv = &adapter->registrypriv; struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network; struct security_priv *psecuritypriv = &adapter->securitypriv; struct wlan_network *cur_network = &adapter->mlmepriv.cur_network; pdev_network->Privacy = cpu_to_le32(psecuritypriv->PrivacyAlgrthm > 0 ? 1 : 0) ; /* adhoc no 802.1x */ pdev_network->Rssi = 0; switch (pregistrypriv->wireless_mode) { case WIRELESS_11B: pdev_network->NetworkTypeInUse = cpu_to_le32(Ndis802_11DS); break; case WIRELESS_11G: case WIRELESS_11BG: pdev_network->NetworkTypeInUse = cpu_to_le32(Ndis802_11OFDM24); break; case WIRELESS_11A: pdev_network->NetworkTypeInUse = cpu_to_le32(Ndis802_11OFDM5); break; default: /* TODO */ break; } pdev_network->Configuration.DSConfig = cpu_to_le32( pregistrypriv->channel); if (cur_network->network.InfrastructureMode == Ndis802_11IBSS) pdev_network->Configuration.ATIMWindow = cpu_to_le32(3); pdev_network->InfrastructureMode = cpu_to_le32( cur_network->network.InfrastructureMode); /* 1. Supported rates * 2. IE */ sz = r8712_generate_ie(pregistrypriv); pdev_network->IELength = sz; pdev_network->Length = r8712_get_ndis_wlan_bssid_ex_sz( (struct ndis_wlan_bssid_ex *)pdev_network); } /*the function is at passive_level*/ void r8712_joinbss_reset(struct _adapter *padapter) { int i; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; /* todo: if you want to do something io/reg/hw setting before join_bss, * please add code here */ phtpriv->ampdu_enable = false;/*reset to disabled*/ for (i = 0; i < 16; i++) phtpriv->baddbareq_issued[i] = false;/*reset it*/ if (phtpriv->ht_option) { /* validate usb rx aggregation */ r8712_write8(padapter, 0x102500D9, 48);/*TH = 48 pages, 6k*/ } else { /* invalidate usb rx aggregation */ /* TH=1 => means that invalidate usb rx aggregation */ r8712_write8(padapter, 0x102500D9, 1); } } /*the function is >= passive_level*/ unsigned int r8712_restructure_ht_ie(struct _adapter *padapter, u8 *in_ie, u8 *out_ie, uint in_len, uint *pout_len) { u32 ielen, out_len; unsigned char *p, *pframe; struct ieee80211_ht_cap ht_capie; unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00}; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; phtpriv->ht_option = 0; p = r8712_get_ie(in_ie+12, _HT_CAPABILITY_IE_, &ielen, in_len-12); if (p && (ielen > 0)) { if (pqospriv->qos_option == 0) { out_len = *pout_len; pframe = r8712_set_ie(out_ie+out_len, _VENDOR_SPECIFIC_IE_, _WMM_IE_Length_, WMM_IE, pout_len); pqospriv->qos_option = 1; } out_len = *pout_len; memset(&ht_capie, 0, sizeof(struct ieee80211_ht_cap)); ht_capie.cap_info = IEEE80211_HT_CAP_SUP_WIDTH | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_MAX_AMSDU | IEEE80211_HT_CAP_DSSSCCK40; ht_capie.ampdu_params_info = (IEEE80211_HT_CAP_AMPDU_FACTOR & 0x03) | (IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00); pframe = r8712_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_, sizeof(struct ieee80211_ht_cap), (unsigned char *)&ht_capie, pout_len); phtpriv->ht_option = 1; } return phtpriv->ht_option; } /* the function is > passive_level (in critical_section) */ static void update_ht_cap(struct _adapter *padapter, u8 *pie, uint ie_len) { u8 *p, max_ampdu_sz; int i, len; struct sta_info *bmc_sta, *psta; struct ieee80211_ht_cap *pht_capie; struct ieee80211_ht_addt_info *pht_addtinfo; struct recv_reorder_ctrl *preorder_ctrl; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; struct registry_priv *pregistrypriv = &padapter->registrypriv; struct wlan_network *pcur_network = &(pmlmepriv->cur_network); if (!phtpriv->ht_option) return; /* maybe needs check if ap supports rx ampdu. */ if ((phtpriv->ampdu_enable == false) && (pregistrypriv->ampdu_enable == 1)) phtpriv->ampdu_enable = true; /*check Max Rx A-MPDU Size*/ len = 0; p = r8712_get_ie(pie + sizeof(struct NDIS_802_11_FIXED_IEs), _HT_CAPABILITY_IE_, &len, ie_len - sizeof(struct NDIS_802_11_FIXED_IEs)); if (p && len > 0) { pht_capie = (struct ieee80211_ht_cap *)(p+2); max_ampdu_sz = (pht_capie->ampdu_params_info & IEEE80211_HT_CAP_AMPDU_FACTOR); /* max_ampdu_sz (kbytes); */ max_ampdu_sz = 1 << (max_ampdu_sz+3); phtpriv->rx_ampdu_maxlen = max_ampdu_sz; } /* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info * if A-MPDU Rx is enabled, reseting rx_ordering_ctrl * wstart_b(indicate_seq) to default value=0xffff * todo: check if AP can send A-MPDU packets */ bmc_sta = r8712_get_bcmc_stainfo(padapter); if (bmc_sta) { for (i = 0; i < 16; i++) { preorder_ctrl = &bmc_sta->recvreorder_ctrl[i]; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->wend_b = 0xffff; } } psta = r8712_get_stainfo(&padapter->stapriv, pcur_network->network.MacAddress); if (psta) { for (i = 0; i < 16 ; i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->wend_b = 0xffff; } } len = 0; p = r8712_get_ie(pie + sizeof(struct NDIS_802_11_FIXED_IEs), _HT_ADD_INFO_IE_, &len, ie_len-sizeof(struct NDIS_802_11_FIXED_IEs)); if (p && len > 0) pht_addtinfo = (struct ieee80211_ht_addt_info *)(p + 2); } void r8712_issue_addbareq_cmd(struct _adapter *padapter, int priority) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; if ((phtpriv->ht_option == 1) && (phtpriv->ampdu_enable == true)) { if (phtpriv->baddbareq_issued[priority] == false) { r8712_addbareq_cmd(padapter, (u8)priority); phtpriv->baddbareq_issued[priority] = true; } } }
gpl-2.0
temasek/android_kernel_samsung_hlte
drivers/staging/comedi/drivers/amplc_dio200.c
4896
40849
/* comedi/drivers/amplc_dio200.c Driver for Amplicon PC272E and PCI272 DIO boards. (Support for other boards in Amplicon 200 series may be added at a later date, e.g. PCI215.) Copyright (C) 2005 MEV Ltd. <http://www.mev.co.uk/> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: amplc_dio200 Description: Amplicon 200 Series Digital I/O Author: Ian Abbott <abbotti@mev.co.uk> Devices: [Amplicon] PC212E (pc212e), PC214E (pc214e), PC215E (pc215e), PCI215 (pci215 or amplc_dio200), PC218E (pc218e), PC272E (pc272e), PCI272 (pci272 or amplc_dio200) Updated: Wed, 22 Oct 2008 13:36:02 +0100 Status: works Configuration options - PC212E, PC214E, PC215E, PC218E, PC272E: [0] - I/O port base address [1] - IRQ (optional, but commands won't work without it) Configuration options - PCI215, PCI272: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first available PCI device will be used. Passing a zero for an option is the same as leaving it unspecified. SUBDEVICES PC218E PC212E PC215E/PCI215 ------------- ------------- ------------- Subdevices 7 6 5 0 CTR-X1 PPI-X PPI-X 1 CTR-X2 CTR-Y1 PPI-Y 2 CTR-Y1 CTR-Y2 CTR-Z1 3 CTR-Y2 CTR-Z1 CTR-Z2 4 CTR-Z1 CTR-Z2 INTERRUPT 5 CTR-Z2 INTERRUPT 6 INTERRUPT PC214E PC272E/PCI272 ------------- ------------- Subdevices 4 4 0 PPI-X PPI-X 1 PPI-Y PPI-Y 2 CTR-Z1* PPI-Z 3 INTERRUPT* INTERRUPT Each PPI is a 8255 chip providing 24 DIO channels. The DIO channels are configurable as inputs or outputs in four groups: Port A - channels 0 to 7 Port B - channels 8 to 15 Port CL - channels 16 to 19 Port CH - channels 20 to 23 Only mode 0 of the 8255 chips is supported. Each CTR is a 8254 chip providing 3 16-bit counter channels. Each channel is configured individually with INSN_CONFIG instructions. The specific type of configuration instruction is specified in data[0]. Some configuration instructions expect an additional parameter in data[1]; others return a value in data[1]. The following configuration instructions are supported: INSN_CONFIG_SET_COUNTER_MODE. Sets the counter channel's mode and BCD/binary setting specified in data[1]. INSN_CONFIG_8254_READ_STATUS. Reads the status register value for the counter channel into data[1]. INSN_CONFIG_SET_CLOCK_SRC. Sets the counter channel's clock source as specified in data[1] (this is a hardware-specific value). Not supported on PC214E. For the other boards, valid clock sources are 0 to 7 as follows: 0. CLK n, the counter channel's dedicated CLK input from the SK1 connector. (N.B. for other values, the counter channel's CLKn pin on the SK1 connector is an output!) 1. Internal 10 MHz clock. 2. Internal 1 MHz clock. 3. Internal 100 kHz clock. 4. Internal 10 kHz clock. 5. Internal 1 kHz clock. 6. OUT n-1, the output of counter channel n-1 (see note 1 below). 7. Ext Clock, the counter chip's dedicated Ext Clock input from the SK1 connector. This pin is shared by all three counter channels on the chip. INSN_CONFIG_GET_CLOCK_SRC. Returns the counter channel's current clock source in data[1]. For internal clock sources, data[2] is set to the period in ns. INSN_CONFIG_SET_GATE_SRC. Sets the counter channel's gate source as specified in data[2] (this is a hardware-specific value). Not supported on PC214E. For the other boards, valid gate sources are 0 to 7 as follows: 0. VCC (internal +5V d.c.), i.e. gate permanently enabled. 1. GND (internal 0V d.c.), i.e. gate permanently disabled. 2. GAT n, the counter channel's dedicated GAT input from the SK1 connector. (N.B. for other values, the counter channel's GATn pin on the SK1 connector is an output!) 3. /OUT n-2, the inverted output of counter channel n-2 (see note 2 below). 4. Reserved. 5. Reserved. 6. Reserved. 7. Reserved. INSN_CONFIG_GET_GATE_SRC. Returns the counter channel's current gate source in data[2]. Clock and gate interconnection notes: 1. Clock source OUT n-1 is the output of the preceding channel on the same counter subdevice if n > 0, or the output of channel 2 on the preceding counter subdevice (see note 3) if n = 0. 2. Gate source /OUT n-2 is the inverted output of channel 0 on the same counter subdevice if n = 2, or the inverted output of channel n+1 on the preceding counter subdevice (see note 3) if n < 2. 3. The counter subdevices are connected in a ring, so the highest counter subdevice precedes the lowest. The 'INTERRUPT' subdevice pretends to be a digital input subdevice. The digital inputs come from the interrupt status register. The number of channels matches the number of interrupt sources. The PC214E does not have an interrupt status register; see notes on 'INTERRUPT SOURCES' below. INTERRUPT SOURCES PC218E PC212E PC215E/PCI215 ------------- ------------- ------------- Sources 6 6 6 0 CTR-X1-OUT PPI-X-C0 PPI-X-C0 1 CTR-X2-OUT PPI-X-C3 PPI-X-C3 2 CTR-Y1-OUT CTR-Y1-OUT PPI-Y-C0 3 CTR-Y2-OUT CTR-Y2-OUT PPI-Y-C3 4 CTR-Z1-OUT CTR-Z1-OUT CTR-Z1-OUT 5 CTR-Z2-OUT CTR-Z2-OUT CTR-Z2-OUT PC214E PC272E/PCI272 ------------- ------------- Sources 1 6 0 JUMPER-J5 PPI-X-C0 1 PPI-X-C3 2 PPI-Y-C0 3 PPI-Y-C3 4 PPI-Z-C0 5 PPI-Z-C3 When an interrupt source is enabled in the interrupt source enable register, a rising edge on the source signal latches the corresponding bit to 1 in the interrupt status register. When the interrupt status register value as a whole (actually, just the 6 least significant bits) goes from zero to non-zero, the board will generate an interrupt. For level-triggered hardware interrupts (PCI card), the interrupt will remain asserted until the interrupt status register is cleared to zero. For edge-triggered hardware interrupts (ISA card), no further interrupts will occur until the interrupt status register is cleared to zero. To clear a bit to zero in the interrupt status register, the corresponding interrupt source must be disabled in the interrupt source enable register (there is no separate interrupt clear register). The PC214E does not have an interrupt source enable register or an interrupt status register; its 'INTERRUPT' subdevice has a single channel and its interrupt source is selected by the position of jumper J5. COMMANDS The driver supports a read streaming acquisition command on the 'INTERRUPT' subdevice. The channel list selects the interrupt sources to be enabled. All channels will be sampled together (convert_src == TRIG_NOW). The scan begins a short time after the hardware interrupt occurs, subject to interrupt latencies (scan_begin_src == TRIG_EXT, scan_begin_arg == 0). The value read from the interrupt status register is packed into a short value, one bit per requested channel, in the order they appear in the channel list. */ #include <linux/interrupt.h> #include <linux/slab.h> #include "../comedidev.h" #include "comedi_pci.h" #include "8255.h" #include "8253.h" #define DIO200_DRIVER_NAME "amplc_dio200" /* PCI IDs */ #define PCI_VENDOR_ID_AMPLICON 0x14dc #define PCI_DEVICE_ID_AMPLICON_PCI272 0x000a #define PCI_DEVICE_ID_AMPLICON_PCI215 0x000b #define PCI_DEVICE_ID_INVALID 0xffff /* 200 series registers */ #define DIO200_IO_SIZE 0x20 #define DIO200_XCLK_SCE 0x18 /* Group X clock selection register */ #define DIO200_YCLK_SCE 0x19 /* Group Y clock selection register */ #define DIO200_ZCLK_SCE 0x1a /* Group Z clock selection register */ #define DIO200_XGAT_SCE 0x1b /* Group X gate selection register */ #define DIO200_YGAT_SCE 0x1c /* Group Y gate selection register */ #define DIO200_ZGAT_SCE 0x1d /* Group Z gate selection register */ #define DIO200_INT_SCE 0x1e /* Interrupt enable/status register */ /* * Macros for constructing value for DIO_200_?CLK_SCE and * DIO_200_?GAT_SCE registers: * * 'which' is: 0 for CTR-X1, CTR-Y1, CTR-Z1; 1 for CTR-X2, CTR-Y2 or CTR-Z2. * 'chan' is the channel: 0, 1 or 2. * 'source' is the signal source: 0 to 7. */ #define CLK_SCE(which, chan, source) (((which) << 5) | ((chan) << 3) | (source)) #define GAT_SCE(which, chan, source) (((which) << 5) | ((chan) << 3) | (source)) /* * Periods of the internal clock sources in nanoseconds. */ static const unsigned clock_period[8] = { 0, /* dedicated clock input/output pin */ 100, /* 10 MHz */ 1000, /* 1 MHz */ 10000, /* 100 kHz */ 100000, /* 10 kHz */ 1000000, /* 1 kHz */ 0, /* OUT N-1 */ 0 /* group clock input pin */ }; /* * Board descriptions. */ enum dio200_bustype { isa_bustype, pci_bustype }; enum dio200_model { pc212e_model, pc214e_model, pc215e_model, pci215_model, pc218e_model, pc272e_model, pci272_model, anypci_model }; enum dio200_layout { pc212_layout, pc214_layout, pc215_layout, pc218_layout, pc272_layout }; struct dio200_board { const char *name; unsigned short devid; enum dio200_bustype bustype; enum dio200_model model; enum dio200_layout layout; }; static const struct dio200_board dio200_boards[] = { { .name = "pc212e", .bustype = isa_bustype, .model = pc212e_model, .layout = pc212_layout, }, { .name = "pc214e", .bustype = isa_bustype, .model = pc214e_model, .layout = pc214_layout, }, { .name = "pc215e", .bustype = isa_bustype, .model = pc215e_model, .layout = pc215_layout, }, #ifdef CONFIG_COMEDI_PCI { .name = "pci215", .devid = PCI_DEVICE_ID_AMPLICON_PCI215, .bustype = pci_bustype, .model = pci215_model, .layout = pc215_layout, }, #endif { .name = "pc218e", .bustype = isa_bustype, .model = pc218e_model, .layout = pc218_layout, }, { .name = "pc272e", .bustype = isa_bustype, .model = pc272e_model, .layout = pc272_layout, }, #ifdef CONFIG_COMEDI_PCI { .name = "pci272", .devid = PCI_DEVICE_ID_AMPLICON_PCI272, .bustype = pci_bustype, .model = pci272_model, .layout = pc272_layout, }, #endif #ifdef CONFIG_COMEDI_PCI { .name = DIO200_DRIVER_NAME, .devid = PCI_DEVICE_ID_INVALID, .bustype = pci_bustype, .model = anypci_model, /* wildcard */ }, #endif }; /* * Layout descriptions - some ISA and PCI board descriptions share the same * layout. */ enum dio200_sdtype { sd_none, sd_intr, sd_8255, sd_8254 }; #define DIO200_MAX_SUBDEVS 7 #define DIO200_MAX_ISNS 6 struct dio200_layout_struct { unsigned short n_subdevs; /* number of subdevices */ unsigned char sdtype[DIO200_MAX_SUBDEVS]; /* enum dio200_sdtype */ unsigned char sdinfo[DIO200_MAX_SUBDEVS]; /* depends on sdtype */ char has_int_sce; /* has interrupt enable/status register */ char has_clk_gat_sce; /* has clock/gate selection registers */ }; static const struct dio200_layout_struct dio200_layouts[] = { [pc212_layout] = { .n_subdevs = 6, .sdtype = {sd_8255, sd_8254, sd_8254, sd_8254, sd_8254, sd_intr}, .sdinfo = {0x00, 0x08, 0x0C, 0x10, 0x14, 0x3F}, .has_int_sce = 1, .has_clk_gat_sce = 1, }, [pc214_layout] = { .n_subdevs = 4, .sdtype = {sd_8255, sd_8255, sd_8254, sd_intr}, .sdinfo = {0x00, 0x08, 0x10, 0x01}, .has_int_sce = 0, .has_clk_gat_sce = 0, }, [pc215_layout] = { .n_subdevs = 5, .sdtype = {sd_8255, sd_8255, sd_8254, sd_8254, sd_intr}, .sdinfo = {0x00, 0x08, 0x10, 0x14, 0x3F}, .has_int_sce = 1, .has_clk_gat_sce = 1, }, [pc218_layout] = { .n_subdevs = 7, .sdtype = {sd_8254, sd_8254, sd_8255, sd_8254, sd_8254, sd_intr}, .sdinfo = {0x00, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x3F}, .has_int_sce = 1, .has_clk_gat_sce = 1, }, [pc272_layout] = { .n_subdevs = 4, .sdtype = {sd_8255, sd_8255, sd_8255, sd_intr}, .sdinfo = {0x00, 0x08, 0x10, 0x3F}, .has_int_sce = 1, .has_clk_gat_sce = 0, }, }; /* * PCI driver table. */ #ifdef CONFIG_COMEDI_PCI static DEFINE_PCI_DEVICE_TABLE(dio200_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI215) }, { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI272) }, {0} }; MODULE_DEVICE_TABLE(pci, dio200_pci_table); #endif /* CONFIG_COMEDI_PCI */ /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct dio200_board *)dev->board_ptr) #define thislayout (&dio200_layouts[((struct dio200_board *) \ dev->board_ptr)->layout]) /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct dio200_private { #ifdef CONFIG_COMEDI_PCI struct pci_dev *pci_dev; /* PCI device */ #endif int intr_sd; }; #define devpriv ((struct dio200_private *)dev->private) struct dio200_subdev_8254 { unsigned long iobase; /* Counter base address */ unsigned long clk_sce_iobase; /* CLK_SCE base address */ unsigned long gat_sce_iobase; /* GAT_SCE base address */ int which; /* Bit 5 of CLK_SCE or GAT_SCE */ int has_clk_gat_sce; unsigned clock_src[3]; /* Current clock sources */ unsigned gate_src[3]; /* Current gate sources */ spinlock_t spinlock; }; struct dio200_subdev_intr { unsigned long iobase; spinlock_t spinlock; int active; int has_int_sce; unsigned int valid_isns; unsigned int enabled_isns; unsigned int stopcount; int continuous; }; /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int dio200_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int dio200_detach(struct comedi_device *dev); static struct comedi_driver driver_amplc_dio200 = { .driver_name = DIO200_DRIVER_NAME, .module = THIS_MODULE, .attach = dio200_attach, .detach = dio200_detach, .board_name = &dio200_boards[0].name, .offset = sizeof(struct dio200_board), .num_names = ARRAY_SIZE(dio200_boards), }; #ifdef CONFIG_COMEDI_PCI static int __devinit driver_amplc_dio200_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_amplc_dio200.driver_name); } static void __devexit driver_amplc_dio200_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_amplc_dio200_pci_driver = { .id_table = dio200_pci_table, .probe = &driver_amplc_dio200_pci_probe, .remove = __devexit_p(&driver_amplc_dio200_pci_remove) }; static int __init driver_amplc_dio200_init_module(void) { int retval; retval = comedi_driver_register(&driver_amplc_dio200); if (retval < 0) return retval; driver_amplc_dio200_pci_driver.name = (char *)driver_amplc_dio200.driver_name; return pci_register_driver(&driver_amplc_dio200_pci_driver); } static void __exit driver_amplc_dio200_cleanup_module(void) { pci_unregister_driver(&driver_amplc_dio200_pci_driver); comedi_driver_unregister(&driver_amplc_dio200); } module_init(driver_amplc_dio200_init_module); module_exit(driver_amplc_dio200_cleanup_module); #else static int __init driver_amplc_dio200_init_module(void) { return comedi_driver_register(&driver_amplc_dio200); } static void __exit driver_amplc_dio200_cleanup_module(void) { comedi_driver_unregister(&driver_amplc_dio200); } module_init(driver_amplc_dio200_init_module); module_exit(driver_amplc_dio200_cleanup_module); #endif /* * This function looks for a PCI device matching the requested board name, * bus and slot. */ #ifdef CONFIG_COMEDI_PCI static int dio200_find_pci(struct comedi_device *dev, int bus, int slot, struct pci_dev **pci_dev_p) { struct pci_dev *pci_dev = NULL; *pci_dev_p = NULL; /* Look for matching PCI device. */ for (pci_dev = pci_get_device(PCI_VENDOR_ID_AMPLICON, PCI_ANY_ID, NULL); pci_dev != NULL; pci_dev = pci_get_device(PCI_VENDOR_ID_AMPLICON, PCI_ANY_ID, pci_dev)) { /* If bus/slot specified, check them. */ if (bus || slot) { if (bus != pci_dev->bus->number || slot != PCI_SLOT(pci_dev->devfn)) continue; } if (thisboard->model == anypci_model) { /* Match any supported model. */ int i; for (i = 0; i < ARRAY_SIZE(dio200_boards); i++) { if (dio200_boards[i].bustype != pci_bustype) continue; if (pci_dev->device == dio200_boards[i].devid) { /* Change board_ptr to matched board. */ dev->board_ptr = &dio200_boards[i]; break; } } if (i == ARRAY_SIZE(dio200_boards)) continue; } else { /* Match specific model name. */ if (pci_dev->device != thisboard->devid) continue; } /* Found a match. */ *pci_dev_p = pci_dev; return 0; } /* No match found. */ if (bus || slot) { printk(KERN_ERR "comedi%d: error! no %s found at pci %02x:%02x!\n", dev->minor, thisboard->name, bus, slot); } else { printk(KERN_ERR "comedi%d: error! no %s found!\n", dev->minor, thisboard->name); } return -EIO; } #endif /* * This function checks and requests an I/O region, reporting an error * if there is a conflict. */ static int dio200_request_region(unsigned minor, unsigned long from, unsigned long extent) { if (!from || !request_region(from, extent, DIO200_DRIVER_NAME)) { printk(KERN_ERR "comedi%d: I/O port conflict (%#lx,%lu)!\n", minor, from, extent); return -EIO; } return 0; } /* * 'insn_bits' function for an 'INTERRUPT' subdevice. */ static int dio200_subdev_intr_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dio200_subdev_intr *subpriv = s->private; if (subpriv->has_int_sce) { /* Just read the interrupt status register. */ data[1] = inb(subpriv->iobase) & subpriv->valid_isns; } else { /* No interrupt status register. */ data[0] = 0; } return 2; } /* * Called to stop acquisition for an 'INTERRUPT' subdevice. */ static void dio200_stop_intr(struct comedi_device *dev, struct comedi_subdevice *s) { struct dio200_subdev_intr *subpriv = s->private; subpriv->active = 0; subpriv->enabled_isns = 0; if (subpriv->has_int_sce) outb(0, subpriv->iobase); } /* * Called to start acquisition for an 'INTERRUPT' subdevice. */ static int dio200_start_intr(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned int n; unsigned isn_bits; struct dio200_subdev_intr *subpriv = s->private; struct comedi_cmd *cmd = &s->async->cmd; int retval = 0; if (!subpriv->continuous && subpriv->stopcount == 0) { /* An empty acquisition! */ s->async->events |= COMEDI_CB_EOA; subpriv->active = 0; retval = 1; } else { /* Determine interrupt sources to enable. */ isn_bits = 0; if (cmd->chanlist) { for (n = 0; n < cmd->chanlist_len; n++) isn_bits |= (1U << CR_CHAN(cmd->chanlist[n])); } isn_bits &= subpriv->valid_isns; /* Enable interrupt sources. */ subpriv->enabled_isns = isn_bits; if (subpriv->has_int_sce) outb(isn_bits, subpriv->iobase); } return retval; } /* * Internal trigger function to start acquisition for an 'INTERRUPT' subdevice. */ static int dio200_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum) { struct dio200_subdev_intr *subpriv; unsigned long flags; int event = 0; if (trignum != 0) return -EINVAL; subpriv = s->private; spin_lock_irqsave(&subpriv->spinlock, flags); s->async->inttrig = NULL; if (subpriv->active) event = dio200_start_intr(dev, s); spin_unlock_irqrestore(&subpriv->spinlock, flags); if (event) comedi_event(dev, s); return 1; } /* * This is called from the interrupt service routine to handle a read * scan on an 'INTERRUPT' subdevice. */ static int dio200_handle_read_intr(struct comedi_device *dev, struct comedi_subdevice *s) { struct dio200_subdev_intr *subpriv = s->private; unsigned triggered; unsigned intstat; unsigned cur_enabled; unsigned int oldevents; unsigned long flags; triggered = 0; spin_lock_irqsave(&subpriv->spinlock, flags); oldevents = s->async->events; if (subpriv->has_int_sce) { /* * Collect interrupt sources that have triggered and disable * them temporarily. Loop around until no extra interrupt * sources have triggered, at which point, the valid part of * the interrupt status register will read zero, clearing the * cause of the interrupt. * * Mask off interrupt sources already seen to avoid infinite * loop in case of misconfiguration. */ cur_enabled = subpriv->enabled_isns; while ((intstat = (inb(subpriv->iobase) & subpriv->valid_isns & ~triggered)) != 0) { triggered |= intstat; cur_enabled &= ~triggered; outb(cur_enabled, subpriv->iobase); } } else { /* * No interrupt status register. Assume the single interrupt * source has triggered. */ triggered = subpriv->enabled_isns; } if (triggered) { /* * Some interrupt sources have triggered and have been * temporarily disabled to clear the cause of the interrupt. * * Reenable them NOW to minimize the time they are disabled. */ cur_enabled = subpriv->enabled_isns; if (subpriv->has_int_sce) outb(cur_enabled, subpriv->iobase); if (subpriv->active) { /* * The command is still active. * * Ignore interrupt sources that the command isn't * interested in (just in case there's a race * condition). */ if (triggered & subpriv->enabled_isns) { /* Collect scan data. */ short val; unsigned int n, ch, len; val = 0; len = s->async->cmd.chanlist_len; for (n = 0; n < len; n++) { ch = CR_CHAN(s->async->cmd.chanlist[n]); if (triggered & (1U << ch)) val |= (1U << n); } /* Write the scan to the buffer. */ if (comedi_buf_put(s->async, val)) { s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS); } else { /* Error! Stop acquisition. */ dio200_stop_intr(dev, s); s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW; comedi_error(dev, "buffer overflow"); } /* Check for end of acquisition. */ if (!subpriv->continuous) { /* stop_src == TRIG_COUNT */ if (subpriv->stopcount > 0) { subpriv->stopcount--; if (subpriv->stopcount == 0) { s->async->events |= COMEDI_CB_EOA; dio200_stop_intr(dev, s); } } } } } } spin_unlock_irqrestore(&subpriv->spinlock, flags); if (oldevents != s->async->events) comedi_event(dev, s); return (triggered != 0); } /* * 'cancel' function for an 'INTERRUPT' subdevice. */ static int dio200_subdev_intr_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct dio200_subdev_intr *subpriv = s->private; unsigned long flags; spin_lock_irqsave(&subpriv->spinlock, flags); if (subpriv->active) dio200_stop_intr(dev, s); spin_unlock_irqrestore(&subpriv->spinlock, flags); return 0; } /* * 'do_cmdtest' function for an 'INTERRUPT' subdevice. */ static int dio200_subdev_intr_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; unsigned int tmp; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= (TRIG_NOW | TRIG_INT); if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= (TRIG_COUNT | TRIG_NONE); if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ /* these tests are true if more than one _src bit is set */ if ((cmd->start_src & (cmd->start_src - 1)) != 0) err++; if ((cmd->scan_begin_src & (cmd->scan_begin_src - 1)) != 0) err++; if ((cmd->convert_src & (cmd->convert_src - 1)) != 0) err++; if ((cmd->scan_end_src & (cmd->scan_end_src - 1)) != 0) err++; if ((cmd->stop_src & (cmd->stop_src - 1)) != 0) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ /* cmd->start_src == TRIG_NOW || cmd->start_src == TRIG_INT */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } /* cmd->scan_begin_src == TRIG_EXT */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } /* cmd->convert_src == TRIG_NOW */ if (cmd->convert_arg != 0) { cmd->convert_arg = 0; err++; } /* cmd->scan_end_src == TRIG_COUNT */ if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } switch (cmd->stop_src) { case TRIG_COUNT: /* any count allowed */ break; case TRIG_NONE: if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } break; default: break; } if (err) return 3; /* step 4: fix up any arguments */ /* if (err) return 4; */ return 0; } /* * 'do_cmd' function for an 'INTERRUPT' subdevice. */ static int dio200_subdev_intr_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; struct dio200_subdev_intr *subpriv = s->private; unsigned long flags; int event = 0; spin_lock_irqsave(&subpriv->spinlock, flags); subpriv->active = 1; /* Set up end of acquisition. */ switch (cmd->stop_src) { case TRIG_COUNT: subpriv->continuous = 0; subpriv->stopcount = cmd->stop_arg; break; default: /* TRIG_NONE */ subpriv->continuous = 1; subpriv->stopcount = 0; break; } /* Set up start of acquisition. */ switch (cmd->start_src) { case TRIG_INT: s->async->inttrig = dio200_inttrig_start_intr; break; default: /* TRIG_NOW */ event = dio200_start_intr(dev, s); break; } spin_unlock_irqrestore(&subpriv->spinlock, flags); if (event) comedi_event(dev, s); return 0; } /* * This function initializes an 'INTERRUPT' subdevice. */ static int dio200_subdev_intr_init(struct comedi_device *dev, struct comedi_subdevice *s, unsigned long iobase, unsigned valid_isns, int has_int_sce) { struct dio200_subdev_intr *subpriv; subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL); if (!subpriv) { printk(KERN_ERR "comedi%d: error! out of memory!\n", dev->minor); return -ENOMEM; } subpriv->iobase = iobase; subpriv->has_int_sce = has_int_sce; subpriv->valid_isns = valid_isns; spin_lock_init(&subpriv->spinlock); if (has_int_sce) outb(0, subpriv->iobase); /* Disable interrupt sources. */ s->private = subpriv; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ; if (has_int_sce) { s->n_chan = DIO200_MAX_ISNS; s->len_chanlist = DIO200_MAX_ISNS; } else { /* No interrupt source register. Support single channel. */ s->n_chan = 1; s->len_chanlist = 1; } s->range_table = &range_digital; s->maxdata = 1; s->insn_bits = dio200_subdev_intr_insn_bits; s->do_cmdtest = dio200_subdev_intr_cmdtest; s->do_cmd = dio200_subdev_intr_cmd; s->cancel = dio200_subdev_intr_cancel; return 0; } /* * This function cleans up an 'INTERRUPT' subdevice. */ static void dio200_subdev_intr_cleanup(struct comedi_device *dev, struct comedi_subdevice *s) { struct dio200_subdev_intr *subpriv = s->private; kfree(subpriv); } /* * Interrupt service routine. */ static irqreturn_t dio200_interrupt(int irq, void *d) { struct comedi_device *dev = d; int handled; if (!dev->attached) return IRQ_NONE; if (devpriv->intr_sd >= 0) { handled = dio200_handle_read_intr(dev, dev->subdevices + devpriv->intr_sd); } else { handled = 0; } return IRQ_RETVAL(handled); } /* * Handle 'insn_read' for an '8254' counter subdevice. */ static int dio200_subdev_8254_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dio200_subdev_8254 *subpriv = s->private; int chan = CR_CHAN(insn->chanspec); unsigned long flags; spin_lock_irqsave(&subpriv->spinlock, flags); data[0] = i8254_read(subpriv->iobase, 0, chan); spin_unlock_irqrestore(&subpriv->spinlock, flags); return 1; } /* * Handle 'insn_write' for an '8254' counter subdevice. */ static int dio200_subdev_8254_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dio200_subdev_8254 *subpriv = s->private; int chan = CR_CHAN(insn->chanspec); unsigned long flags; spin_lock_irqsave(&subpriv->spinlock, flags); i8254_write(subpriv->iobase, 0, chan, data[0]); spin_unlock_irqrestore(&subpriv->spinlock, flags); return 1; } /* * Set gate source for an '8254' counter subdevice channel. */ static int dio200_set_gate_src(struct dio200_subdev_8254 *subpriv, unsigned int counter_number, unsigned int gate_src) { unsigned char byte; if (!subpriv->has_clk_gat_sce) return -1; if (counter_number > 2) return -1; if (gate_src > 7) return -1; subpriv->gate_src[counter_number] = gate_src; byte = GAT_SCE(subpriv->which, counter_number, gate_src); outb(byte, subpriv->gat_sce_iobase); return 0; } /* * Get gate source for an '8254' counter subdevice channel. */ static int dio200_get_gate_src(struct dio200_subdev_8254 *subpriv, unsigned int counter_number) { if (!subpriv->has_clk_gat_sce) return -1; if (counter_number > 2) return -1; return subpriv->gate_src[counter_number]; } /* * Set clock source for an '8254' counter subdevice channel. */ static int dio200_set_clock_src(struct dio200_subdev_8254 *subpriv, unsigned int counter_number, unsigned int clock_src) { unsigned char byte; if (!subpriv->has_clk_gat_sce) return -1; if (counter_number > 2) return -1; if (clock_src > 7) return -1; subpriv->clock_src[counter_number] = clock_src; byte = CLK_SCE(subpriv->which, counter_number, clock_src); outb(byte, subpriv->clk_sce_iobase); return 0; } /* * Get clock source for an '8254' counter subdevice channel. */ static int dio200_get_clock_src(struct dio200_subdev_8254 *subpriv, unsigned int counter_number, unsigned int *period_ns) { unsigned clock_src; if (!subpriv->has_clk_gat_sce) return -1; if (counter_number > 2) return -1; clock_src = subpriv->clock_src[counter_number]; *period_ns = clock_period[clock_src]; return clock_src; } /* * Handle 'insn_config' for an '8254' counter subdevice. */ static int dio200_subdev_8254_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dio200_subdev_8254 *subpriv = s->private; int ret = 0; int chan = CR_CHAN(insn->chanspec); unsigned long flags; spin_lock_irqsave(&subpriv->spinlock, flags); switch (data[0]) { case INSN_CONFIG_SET_COUNTER_MODE: ret = i8254_set_mode(subpriv->iobase, 0, chan, data[1]); if (ret < 0) ret = -EINVAL; break; case INSN_CONFIG_8254_READ_STATUS: data[1] = i8254_status(subpriv->iobase, 0, chan); break; case INSN_CONFIG_SET_GATE_SRC: ret = dio200_set_gate_src(subpriv, chan, data[2]); if (ret < 0) ret = -EINVAL; break; case INSN_CONFIG_GET_GATE_SRC: ret = dio200_get_gate_src(subpriv, chan); if (ret < 0) { ret = -EINVAL; break; } data[2] = ret; break; case INSN_CONFIG_SET_CLOCK_SRC: ret = dio200_set_clock_src(subpriv, chan, data[1]); if (ret < 0) ret = -EINVAL; break; case INSN_CONFIG_GET_CLOCK_SRC: ret = dio200_get_clock_src(subpriv, chan, &data[2]); if (ret < 0) { ret = -EINVAL; break; } data[1] = ret; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&subpriv->spinlock, flags); return ret < 0 ? ret : insn->n; } /* * This function initializes an '8254' counter subdevice. * * Note: iobase is the base address of the board, not the subdevice; * offset is the offset to the 8254 chip. */ static int dio200_subdev_8254_init(struct comedi_device *dev, struct comedi_subdevice *s, unsigned long iobase, unsigned offset, int has_clk_gat_sce) { struct dio200_subdev_8254 *subpriv; unsigned int chan; subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL); if (!subpriv) { printk(KERN_ERR "comedi%d: error! out of memory!\n", dev->minor); return -ENOMEM; } s->private = subpriv; s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 3; s->maxdata = 0xFFFF; s->insn_read = dio200_subdev_8254_read; s->insn_write = dio200_subdev_8254_write; s->insn_config = dio200_subdev_8254_config; spin_lock_init(&subpriv->spinlock); subpriv->iobase = offset + iobase; subpriv->has_clk_gat_sce = has_clk_gat_sce; if (has_clk_gat_sce) { /* Derive CLK_SCE and GAT_SCE register offsets from * 8254 offset. */ subpriv->clk_sce_iobase = DIO200_XCLK_SCE + (offset >> 3) + iobase; subpriv->gat_sce_iobase = DIO200_XGAT_SCE + (offset >> 3) + iobase; subpriv->which = (offset >> 2) & 1; } /* Initialize channels. */ for (chan = 0; chan < 3; chan++) { i8254_set_mode(subpriv->iobase, 0, chan, I8254_MODE0 | I8254_BINARY); if (subpriv->has_clk_gat_sce) { /* Gate source 0 is VCC (logic 1). */ dio200_set_gate_src(subpriv, chan, 0); /* Clock source 0 is the dedicated clock input. */ dio200_set_clock_src(subpriv, chan, 0); } } return 0; } /* * This function cleans up an '8254' counter subdevice. */ static void dio200_subdev_8254_cleanup(struct comedi_device *dev, struct comedi_subdevice *s) { struct dio200_subdev_intr *subpriv = s->private; kfree(subpriv); } /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int dio200_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase = 0; unsigned int irq = 0; #ifdef CONFIG_COMEDI_PCI struct pci_dev *pci_dev = NULL; int bus = 0, slot = 0; #endif const struct dio200_layout_struct *layout; int share_irq = 0; int sdx; unsigned n; int ret; printk(KERN_DEBUG "comedi%d: %s: attach\n", dev->minor, DIO200_DRIVER_NAME); ret = alloc_private(dev, sizeof(struct dio200_private)); if (ret < 0) { printk(KERN_ERR "comedi%d: error! out of memory!\n", dev->minor); return ret; } /* Process options. */ switch (thisboard->bustype) { case isa_bustype: iobase = it->options[0]; irq = it->options[1]; share_irq = 0; break; #ifdef CONFIG_COMEDI_PCI case pci_bustype: bus = it->options[0]; slot = it->options[1]; share_irq = 1; ret = dio200_find_pci(dev, bus, slot, &pci_dev); if (ret < 0) return ret; devpriv->pci_dev = pci_dev; break; #endif default: printk(KERN_ERR "comedi%d: %s: BUG! cannot determine board type!\n", dev->minor, DIO200_DRIVER_NAME); return -EINVAL; break; } devpriv->intr_sd = -1; /* Enable device and reserve I/O spaces. */ #ifdef CONFIG_COMEDI_PCI if (pci_dev) { ret = comedi_pci_enable(pci_dev, DIO200_DRIVER_NAME); if (ret < 0) { printk(KERN_ERR "comedi%d: error! cannot enable PCI device and request regions!\n", dev->minor); return ret; } iobase = pci_resource_start(pci_dev, 2); irq = pci_dev->irq; } else #endif { ret = dio200_request_region(dev->minor, iobase, DIO200_IO_SIZE); if (ret < 0) return ret; } dev->iobase = iobase; layout = thislayout; ret = alloc_subdevices(dev, layout->n_subdevs); if (ret < 0) { printk(KERN_ERR "comedi%d: error! out of memory!\n", dev->minor); return ret; } for (n = 0; n < dev->n_subdevices; n++) { s = &dev->subdevices[n]; switch (layout->sdtype[n]) { case sd_8254: /* counter subdevice (8254) */ ret = dio200_subdev_8254_init(dev, s, iobase, layout->sdinfo[n], layout->has_clk_gat_sce); if (ret < 0) return ret; break; case sd_8255: /* digital i/o subdevice (8255) */ ret = subdev_8255_init(dev, s, NULL, iobase + layout->sdinfo[n]); if (ret < 0) return ret; break; case sd_intr: /* 'INTERRUPT' subdevice */ if (irq) { ret = dio200_subdev_intr_init(dev, s, iobase + DIO200_INT_SCE, layout->sdinfo[n], layout-> has_int_sce); if (ret < 0) return ret; devpriv->intr_sd = n; } else { s->type = COMEDI_SUBD_UNUSED; } break; default: s->type = COMEDI_SUBD_UNUSED; break; } } sdx = devpriv->intr_sd; if (sdx >= 0 && sdx < dev->n_subdevices) dev->read_subdev = &dev->subdevices[sdx]; dev->board_name = thisboard->name; if (irq) { unsigned long flags = share_irq ? IRQF_SHARED : 0; if (request_irq(irq, dio200_interrupt, flags, DIO200_DRIVER_NAME, dev) >= 0) { dev->irq = irq; } else { printk(KERN_WARNING "comedi%d: warning! irq %u unavailable!\n", dev->minor, irq); } } printk(KERN_INFO "comedi%d: %s ", dev->minor, dev->board_name); if (thisboard->bustype == isa_bustype) { printk("(base %#lx) ", iobase); } else { #ifdef CONFIG_COMEDI_PCI printk("(pci %s) ", pci_name(pci_dev)); #endif } if (irq) printk("(irq %u%s) ", irq, (dev->irq ? "" : " UNAVAILABLE")); else printk("(no irq) "); printk("attached\n"); return 1; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int dio200_detach(struct comedi_device *dev) { const struct dio200_layout_struct *layout; unsigned n; printk(KERN_DEBUG "comedi%d: %s: detach\n", dev->minor, DIO200_DRIVER_NAME); if (dev->irq) free_irq(dev->irq, dev); if (dev->subdevices) { layout = thislayout; for (n = 0; n < dev->n_subdevices; n++) { struct comedi_subdevice *s = &dev->subdevices[n]; switch (layout->sdtype[n]) { case sd_8254: dio200_subdev_8254_cleanup(dev, s); break; case sd_8255: subdev_8255_cleanup(dev, s); break; case sd_intr: dio200_subdev_intr_cleanup(dev, s); break; default: break; } } } if (devpriv) { #ifdef CONFIG_COMEDI_PCI if (devpriv->pci_dev) { if (dev->iobase) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } else #endif { if (dev->iobase) release_region(dev->iobase, DIO200_IO_SIZE); } } if (dev->board_name) printk(KERN_INFO "comedi%d: %s removed\n", dev->minor, dev->board_name); return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
Schischu/android_kernel_samsung_chagallwifi
drivers/staging/comedi/drivers/cb_pcidas.c
4896
57863
/* comedi/drivers/cb_pcidas.c Developed by Ivan Martinez and Frank Mori Hess, with valuable help from David Schleef and the rest of the Comedi developers comunity. Copyright (C) 2001-2003 Ivan Martinez <imr@oersted.dtu.dk> Copyright (C) 2001,2002 Frank Mori Hess <fmhess@users.sourceforge.net> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1997-8 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: cb_pcidas Description: MeasurementComputing PCI-DAS series with the AMCC S5933 PCI controller Author: Ivan Martinez <imr@oersted.dtu.dk>, Frank Mori Hess <fmhess@users.sourceforge.net> Updated: 2003-3-11 Devices: [Measurement Computing] PCI-DAS1602/16 (cb_pcidas), PCI-DAS1602/16jr, PCI-DAS1602/12, PCI-DAS1200, PCI-DAS1200jr, PCI-DAS1000, PCI-DAS1001, PCI_DAS1002 Status: There are many reports of the driver being used with most of the supported cards. Despite no detailed log is maintained, it can be said that the driver is quite tested and stable. The boards may be autocalibrated using the comedi_calibrate utility. Configuration options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. For commands, the scanned channels must be consecutive (i.e. 4-5-6-7, 2-3-4,...), and must all have the same range and aref. AI Triggering: For start_src == TRIG_EXT, the A/D EXTERNAL TRIGGER IN (pin 45) is used. For 1602 series, the start_arg is interpreted as follows: start_arg == 0 => gated triger (level high) start_arg == CR_INVERT => gated triger (level low) start_arg == CR_EDGE => Rising edge start_arg == CR_EDGE | CR_INVERT => Falling edge For the other boards the trigger will be done on rising edge */ /* TODO: analog triggering on 1602 series */ #include "../comedidev.h" #include <linux/delay.h> #include <linux/interrupt.h> #include "8253.h" #include "8255.h" #include "amcc_s5933.h" #include "comedi_pci.h" #include "comedi_fc.h" #undef CB_PCIDAS_DEBUG /* disable debugging code */ /* #define CB_PCIDAS_DEBUG enable debugging code */ /* PCI vendor number of ComputerBoards/MeasurementComputing */ #define PCI_VENDOR_ID_CB 0x1307 #define TIMER_BASE 100 /* 10MHz master clock */ #define AI_BUFFER_SIZE 1024 /* maximum fifo size of any supported board */ #define AO_BUFFER_SIZE 1024 /* maximum fifo size of any supported board */ #define NUM_CHANNELS_8800 8 #define NUM_CHANNELS_7376 1 #define NUM_CHANNELS_8402 2 #define NUM_CHANNELS_DAC08 1 /* PCI-DAS base addresses */ /* indices of base address regions */ #define S5933_BADRINDEX 0 #define CONT_STAT_BADRINDEX 1 #define ADC_FIFO_BADRINDEX 2 #define PACER_BADRINDEX 3 #define AO_BADRINDEX 4 /* sizes of io regions */ #define CONT_STAT_SIZE 10 #define ADC_FIFO_SIZE 4 #define PACER_SIZE 12 #define AO_SIZE 4 /* Control/Status registers */ #define INT_ADCFIFO 0 /* INTERRUPT / ADC FIFO register */ #define INT_EOS 0x1 /* interrupt end of scan */ #define INT_FHF 0x2 /* interrupt fifo half full */ #define INT_FNE 0x3 /* interrupt fifo not empty */ #define INT_MASK 0x3 /* mask of interrupt select bits */ #define INTE 0x4 /* interrupt enable */ #define DAHFIE 0x8 /* dac half full interrupt enable */ #define EOAIE 0x10 /* end of acquisition interrupt enable */ #define DAHFI 0x20 /* dac half full read status / write interrupt clear */ #define EOAI 0x40 /* read end of acq. interrupt status / write clear */ #define INT 0x80 /* read interrupt status / write clear */ #define EOBI 0x200 /* read end of burst interrupt status */ #define ADHFI 0x400 /* read half-full interrupt status */ #define ADNEI 0x800 /* read fifo not empty interrupt latch status */ #define ADNE 0x1000 /* read, fifo not empty (realtime, not latched) status */ #define DAEMIE 0x1000 /* write, dac empty interrupt enable */ #define LADFUL 0x2000 /* read fifo overflow / write clear */ #define DAEMI 0x4000 /* dac fifo empty interrupt status / write clear */ #define ADCMUX_CONT 2 /* ADC CHANNEL MUX AND CONTROL register */ #define BEGIN_SCAN(x) ((x) & 0xf) #define END_SCAN(x) (((x) & 0xf) << 4) #define GAIN_BITS(x) (((x) & 0x3) << 8) #define UNIP 0x800 /* Analog front-end unipolar for range */ #define SE 0x400 /* Inputs in single-ended mode */ #define PACER_MASK 0x3000 /* pacer source bits */ #define PACER_INT 0x1000 /* internal pacer */ #define PACER_EXT_FALL 0x2000 /* external falling edge */ #define PACER_EXT_RISE 0x3000 /* external rising edge */ #define EOC 0x4000 /* adc not busy */ #define TRIG_CONTSTAT 4 /* TRIGGER CONTROL/STATUS register */ #define SW_TRIGGER 0x1 /* software start trigger */ #define EXT_TRIGGER 0x2 /* external start trigger */ #define ANALOG_TRIGGER 0x3 /* external analog trigger */ #define TRIGGER_MASK 0x3 /* mask of bits that determine start trigger */ #define TGPOL 0x04 /* invert the edge/level of the external trigger (1602 only) */ #define TGSEL 0x08 /* if set edge triggered, otherwise level trigerred (1602 only) */ #define TGEN 0x10 /* enable external start trigger */ #define BURSTE 0x20 /* burst mode enable */ #define XTRCL 0x80 /* clear external trigger */ #define CALIBRATION_REG 6 /* CALIBRATION register */ #define SELECT_8800_BIT 0x100 /* select 8800 caldac */ #define SELECT_TRIMPOT_BIT 0x200 /* select ad7376 trim pot */ #define SELECT_DAC08_BIT 0x400 /* select dac08 caldac */ #define CAL_SRC_BITS(x) (((x) & 0x7) << 11) #define CAL_EN_BIT 0x4000 /* read calibration source instead of analog input channel 0 */ #define SERIAL_DATA_IN_BIT 0x8000 /* serial data stream going to 8800 and 7376 */ #define DAC_CSR 0x8 /* dac control and status register */ enum dac_csr_bits { DACEN = 0x2, /* dac enable */ DAC_MODE_UPDATE_BOTH = 0x80, /* update both dacs when dac0 is written */ }; static inline unsigned int DAC_RANGE(unsigned int channel, unsigned int range) { return (range & 0x3) << (8 + 2 * (channel & 0x1)); } static inline unsigned int DAC_RANGE_MASK(unsigned int channel) { return 0x3 << (8 + 2 * (channel & 0x1)); }; /* bits for 1602 series only */ enum dac_csr_bits_1602 { DAC_EMPTY = 0x1, /* dac fifo empty, read, write clear */ DAC_START = 0x4, /* start/arm dac fifo operations */ DAC_PACER_MASK = 0x18, /* bits that set dac pacer source */ DAC_PACER_INT = 0x8, /* dac internal pacing */ DAC_PACER_EXT_FALL = 0x10, /* dac external pacing, falling edge */ DAC_PACER_EXT_RISE = 0x18, /* dac external pacing, rising edge */ }; static inline unsigned int DAC_CHAN_EN(unsigned int channel) { return 1 << (5 + (channel & 0x1)); /* enable channel 0 or 1 */ }; /* analog input fifo */ #define ADCDATA 0 /* ADC DATA register */ #define ADCFIFOCLR 2 /* ADC FIFO CLEAR */ /* pacer, counter, dio registers */ #define ADC8254 0 #define DIO_8255 4 #define DAC8254 8 /* analog output registers for 100x, 1200 series */ static inline unsigned int DAC_DATA_REG(unsigned int channel) { return 2 * (channel & 0x1); } /* analog output registers for 1602 series*/ #define DACDATA 0 /* DAC DATA register */ #define DACFIFOCLR 2 /* DAC FIFO CLEAR */ /* bit in hexadecimal representation of range index that indicates unipolar input range */ #define IS_UNIPOLAR 0x4 /* analog input ranges for most boards */ static const struct comedi_lrange cb_pcidas_ranges = { 8, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25) } }; /* pci-das1001 input ranges */ static const struct comedi_lrange cb_pcidas_alt_ranges = { 8, { BIP_RANGE(10), BIP_RANGE(1), BIP_RANGE(0.1), BIP_RANGE(0.01), UNI_RANGE(10), UNI_RANGE(1), UNI_RANGE(0.1), UNI_RANGE(0.01) } }; /* analog output ranges */ static const struct comedi_lrange cb_pcidas_ao_ranges = { 4, { BIP_RANGE(5), BIP_RANGE(10), UNI_RANGE(5), UNI_RANGE(10), } }; enum trimpot_model { AD7376, AD8402, }; struct cb_pcidas_board { const char *name; unsigned short device_id; int ai_se_chans; /* Inputs in single-ended mode */ int ai_diff_chans; /* Inputs in differential mode */ int ai_bits; /* analog input resolution */ int ai_speed; /* fastest conversion period in ns */ int ao_nchan; /* number of analog out channels */ int has_ao_fifo; /* analog output has fifo */ int ao_scan_speed; /* analog output speed for 1602 series (for a scan, not conversion) */ int fifo_size; /* number of samples fifo can hold */ const struct comedi_lrange *ranges; enum trimpot_model trimpot; unsigned has_dac08:1; unsigned has_ai_trig_gated:1; /* Tells if the AI trigger can be gated */ unsigned has_ai_trig_invert:1; /* Tells if the AI trigger can be inverted */ }; static const struct cb_pcidas_board cb_pcidas_boards[] = { { .name = "pci-das1602/16", .device_id = 0x1, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 2, .has_ao_fifo = 1, .ao_scan_speed = 10000, .fifo_size = 512, .ranges = &cb_pcidas_ranges, .trimpot = AD8402, .has_dac08 = 1, .has_ai_trig_gated = 1, .has_ai_trig_invert = 1, }, { .name = "pci-das1200", .device_id = 0xF, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 3200, .ao_nchan = 2, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1602/12", .device_id = 0x10, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 3200, .ao_nchan = 2, .has_ao_fifo = 1, .ao_scan_speed = 4000, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 1, .has_ai_trig_invert = 1, }, { .name = "pci-das1200/jr", .device_id = 0x19, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 3200, .ao_nchan = 0, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1602/16/jr", .device_id = 0x1C, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 0, .has_ao_fifo = 0, .fifo_size = 512, .ranges = &cb_pcidas_ranges, .trimpot = AD8402, .has_dac08 = 1, .has_ai_trig_gated = 1, .has_ai_trig_invert = 1, }, { .name = "pci-das1000", .device_id = 0x4C, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 4000, .ao_nchan = 0, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1001", .device_id = 0x1a, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 6800, .ao_nchan = 2, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_alt_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1002", .device_id = 0x1b, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 6800, .ao_nchan = 2, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, }; static DEFINE_PCI_DEVICE_TABLE(cb_pcidas_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0001) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x000f) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0010) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0019) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x001c) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x004c) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x001a) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x001b) }, { 0 } }; MODULE_DEVICE_TABLE(pci, cb_pcidas_pci_table); /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct cb_pcidas_board *)dev->board_ptr) /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct cb_pcidas_private { /* would be useful for a PCI device */ struct pci_dev *pci_dev; /* base addresses */ unsigned long s5933_config; unsigned long control_status; unsigned long adc_fifo; unsigned long pacer_counter_dio; unsigned long ao_registers; /* divisors of master clock for analog input pacing */ unsigned int divisor1; unsigned int divisor2; volatile unsigned int count; /* number of analog input samples remaining */ volatile unsigned int adc_fifo_bits; /* bits to write to interrupt/adcfifo register */ volatile unsigned int s5933_intcsr_bits; /* bits to write to amcc s5933 interrupt control/status register */ volatile unsigned int ao_control_bits; /* bits to write to ao control and status register */ short ai_buffer[AI_BUFFER_SIZE]; short ao_buffer[AO_BUFFER_SIZE]; /* divisors of master clock for analog output pacing */ unsigned int ao_divisor1; unsigned int ao_divisor2; volatile unsigned int ao_count; /* number of analog output samples remaining */ int ao_value[2]; /* remember what the analog outputs are set to, to allow readback */ unsigned int caldac_value[NUM_CHANNELS_8800]; /* for readback of caldac */ unsigned int trimpot_value[NUM_CHANNELS_8402]; /* for readback of trimpot */ unsigned int dac08_value; unsigned int calibration_source; }; /* * most drivers define the following macro to make it easy to * access the private structure. */ #define devpriv ((struct cb_pcidas_private *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int cb_pcidas_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int cb_pcidas_detach(struct comedi_device *dev); static struct comedi_driver driver_cb_pcidas = { .driver_name = "cb_pcidas", .module = THIS_MODULE, .attach = cb_pcidas_attach, .detach = cb_pcidas_detach, }; static int cb_pcidas_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ao_nofifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ao_fifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ao_readback_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int cb_pcidas_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int cb_pcidas_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int cb_pcidas_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *subdev, unsigned int trig_num); static int cb_pcidas_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static irqreturn_t cb_pcidas_interrupt(int irq, void *d); static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status); static int cb_pcidas_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int cb_pcidas_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void cb_pcidas_load_counters(struct comedi_device *dev, unsigned int *ns, int round_flags); static int eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int caldac_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int caldac_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int trimpot_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_trimpot_write(struct comedi_device *dev, unsigned int channel, unsigned int value); static int trimpot_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dac08_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dac08_write(struct comedi_device *dev, unsigned int value); static int dac08_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int caldac_8800_write(struct comedi_device *dev, unsigned int address, uint8_t value); static int trimpot_7376_write(struct comedi_device *dev, uint8_t value); static int trimpot_8402_write(struct comedi_device *dev, unsigned int channel, uint8_t value); static int nvram_read(struct comedi_device *dev, unsigned int address, uint8_t *data); static inline unsigned int cal_enable_bits(struct comedi_device *dev) { return CAL_EN_BIT | CAL_SRC_BITS(devpriv->calibration_source); } /* * Attach is called by the Comedi core to configure the driver * for a particular board. */ static int cb_pcidas_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; struct pci_dev *pcidev = NULL; int index; int i; /* * Allocate the private structure area. */ if (alloc_private(dev, sizeof(struct cb_pcidas_private)) < 0) return -ENOMEM; /* * Probe the device to determine what device in the series it is. */ for_each_pci_dev(pcidev) { /* is it not a computer boards card? */ if (pcidev->vendor != PCI_VENDOR_ID_CB) continue; /* loop through cards supported by this driver */ for (index = 0; index < ARRAY_SIZE(cb_pcidas_boards); index++) { if (cb_pcidas_boards[index].device_id != pcidev->device) continue; /* was a particular bus/slot requested? */ if (it->options[0] || it->options[1]) { /* are we on the wrong bus/slot? */ if (pcidev->bus->number != it->options[0] || PCI_SLOT(pcidev->devfn) != it->options[1]) { continue; } } devpriv->pci_dev = pcidev; dev->board_ptr = cb_pcidas_boards + index; goto found; } } dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n"); return -EIO; found: dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n", cb_pcidas_boards[index].name, pcidev->bus->number, PCI_SLOT(pcidev->devfn)); /* * Enable PCI device and reserve I/O ports. */ if (comedi_pci_enable(pcidev, "cb_pcidas")) { dev_err(dev->hw_dev, "Failed to enable PCI device and request regions\n"); return -EIO; } /* * Initialize devpriv->control_status and devpriv->adc_fifo to point to * their base address. */ devpriv->s5933_config = pci_resource_start(devpriv->pci_dev, S5933_BADRINDEX); devpriv->control_status = pci_resource_start(devpriv->pci_dev, CONT_STAT_BADRINDEX); devpriv->adc_fifo = pci_resource_start(devpriv->pci_dev, ADC_FIFO_BADRINDEX); devpriv->pacer_counter_dio = pci_resource_start(devpriv->pci_dev, PACER_BADRINDEX); if (thisboard->ao_nchan) { devpriv->ao_registers = pci_resource_start(devpriv->pci_dev, AO_BADRINDEX); } /* disable and clear interrupts on amcc s5933 */ outl(INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); /* get irq */ if (request_irq(devpriv->pci_dev->irq, cb_pcidas_interrupt, IRQF_SHARED, "cb_pcidas", dev)) { dev_dbg(dev->hw_dev, "unable to allocate irq %d\n", devpriv->pci_dev->irq); return -EINVAL; } dev->irq = devpriv->pci_dev->irq; /* Initialize dev->board_name */ dev->board_name = thisboard->name; /* * Allocate the subdevice structures. */ if (alloc_subdevices(dev, 7) < 0) return -ENOMEM; s = dev->subdevices + 0; /* analog input subdevice */ dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ; /* WARNING: Number of inputs in differential mode is ignored */ s->n_chan = thisboard->ai_se_chans; s->len_chanlist = thisboard->ai_se_chans; s->maxdata = (1 << thisboard->ai_bits) - 1; s->range_table = thisboard->ranges; s->insn_read = cb_pcidas_ai_rinsn; s->insn_config = ai_config_insn; s->do_cmd = cb_pcidas_ai_cmd; s->do_cmdtest = cb_pcidas_ai_cmdtest; s->cancel = cb_pcidas_cancel; /* analog output subdevice */ s = dev->subdevices + 1; if (thisboard->ao_nchan) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND; s->n_chan = thisboard->ao_nchan; /* analog out resolution is the same as analog input resolution, so use ai_bits */ s->maxdata = (1 << thisboard->ai_bits) - 1; s->range_table = &cb_pcidas_ao_ranges; s->insn_read = cb_pcidas_ao_readback_insn; if (thisboard->has_ao_fifo) { dev->write_subdev = s; s->subdev_flags |= SDF_CMD_WRITE; s->insn_write = cb_pcidas_ao_fifo_winsn; s->do_cmdtest = cb_pcidas_ao_cmdtest; s->do_cmd = cb_pcidas_ao_cmd; s->cancel = cb_pcidas_ao_cancel; } else { s->insn_write = cb_pcidas_ao_nofifo_winsn; } } else { s->type = COMEDI_SUBD_UNUSED; } /* 8255 */ s = dev->subdevices + 2; subdev_8255_init(dev, s, NULL, devpriv->pacer_counter_dio + DIO_8255); /* serial EEPROM, */ s = dev->subdevices + 3; s->type = COMEDI_SUBD_MEMORY; s->subdev_flags = SDF_READABLE | SDF_INTERNAL; s->n_chan = 256; s->maxdata = 0xff; s->insn_read = eeprom_read_insn; /* 8800 caldac */ s = dev->subdevices + 4; s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = NUM_CHANNELS_8800; s->maxdata = 0xff; s->insn_read = caldac_read_insn; s->insn_write = caldac_write_insn; for (i = 0; i < s->n_chan; i++) caldac_8800_write(dev, i, s->maxdata / 2); /* trim potentiometer */ s = dev->subdevices + 5; s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; if (thisboard->trimpot == AD7376) { s->n_chan = NUM_CHANNELS_7376; s->maxdata = 0x7f; } else { s->n_chan = NUM_CHANNELS_8402; s->maxdata = 0xff; } s->insn_read = trimpot_read_insn; s->insn_write = trimpot_write_insn; for (i = 0; i < s->n_chan; i++) cb_pcidas_trimpot_write(dev, i, s->maxdata / 2); /* dac08 caldac */ s = dev->subdevices + 6; if (thisboard->has_dac08) { s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = NUM_CHANNELS_DAC08; s->insn_read = dac08_read_insn; s->insn_write = dac08_write_insn; s->maxdata = 0xff; dac08_write(dev, s->maxdata / 2); } else s->type = COMEDI_SUBD_UNUSED; /* make sure mailbox 4 is empty */ inl(devpriv->s5933_config + AMCC_OP_REG_IMB4); /* Set bits to enable incoming mailbox interrupts on amcc s5933. */ devpriv->s5933_intcsr_bits = INTCSR_INBOX_BYTE(3) | INTCSR_INBOX_SELECT(3) | INTCSR_INBOX_FULL_INT; /* clear and enable interrupt on amcc s5933 */ outl(devpriv->s5933_intcsr_bits | INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); return 1; } /* * cb_pcidas_detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int cb_pcidas_detach(struct comedi_device *dev) { if (devpriv) { if (devpriv->s5933_config) { /* disable and clear interrupts on amcc s5933 */ outl(INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "detaching, incsr is 0x%x\n", inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR)); #endif } } if (dev->irq) free_irq(dev->irq, dev); if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 2); if (devpriv && devpriv->pci_dev) { if (devpriv->s5933_config) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } return 0; } /* * "instructions" read/write data in "one-shot" or "software-triggered" * mode. */ static int cb_pcidas_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, i; unsigned int bits; static const int timeout = 10000; int channel; /* enable calibration input if appropriate */ if (insn->chanspec & CR_ALT_SOURCE) { outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); channel = 0; } else { outw(0, devpriv->control_status + CALIBRATION_REG); channel = CR_CHAN(insn->chanspec); } /* set mux limits and gain */ bits = BEGIN_SCAN(channel) | END_SCAN(channel) | GAIN_BITS(CR_RANGE(insn->chanspec)); /* set unipolar/bipolar */ if (CR_RANGE(insn->chanspec) & IS_UNIPOLAR) bits |= UNIP; /* set singleended/differential */ if (CR_AREF(insn->chanspec) != AREF_DIFF) bits |= SE; outw(bits, devpriv->control_status + ADCMUX_CONT); /* clear fifo */ outw(0, devpriv->adc_fifo + ADCFIFOCLR); /* convert n samples */ for (n = 0; n < insn->n; n++) { /* trigger conversion */ outw(0, devpriv->adc_fifo + ADCDATA); /* wait for conversion to end */ /* return -ETIMEDOUT if there is a timeout */ for (i = 0; i < timeout; i++) { if (inw(devpriv->control_status + ADCMUX_CONT) & EOC) break; } if (i == timeout) return -ETIMEDOUT; /* read data */ data[n] = inw(devpriv->adc_fifo + ADCDATA); } /* return the number of samples read/written */ return n; } static int ai_config_calibration_source(struct comedi_device *dev, unsigned int *data) { static const int num_calibration_sources = 8; unsigned int source = data[1]; if (source >= num_calibration_sources) { dev_err(dev->hw_dev, "invalid calibration source: %i\n", source); return -EINVAL; } devpriv->calibration_source = source; return 2; } static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int id = data[0]; switch (id) { case INSN_CONFIG_ALT_SOURCE: return ai_config_calibration_source(dev, data); break; default: return -EINVAL; break; } return -EINVAL; } /* analog output insn for pcidas-1000 and 1200 series */ static int cb_pcidas_ao_nofifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel; unsigned long flags; /* set channel and range */ channel = CR_CHAN(insn->chanspec); spin_lock_irqsave(&dev->spinlock, flags); devpriv->ao_control_bits &= ~DAC_MODE_UPDATE_BOTH & ~DAC_RANGE_MASK(channel); devpriv->ao_control_bits |= DACEN | DAC_RANGE(channel, CR_RANGE(insn->chanspec)); outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); /* remember value for readback */ devpriv->ao_value[channel] = data[0]; /* send data */ outw(data[0], devpriv->ao_registers + DAC_DATA_REG(channel)); return 1; } /* analog output insn for pcidas-1602 series */ static int cb_pcidas_ao_fifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel; unsigned long flags; /* clear dac fifo */ outw(0, devpriv->ao_registers + DACFIFOCLR); /* set channel and range */ channel = CR_CHAN(insn->chanspec); spin_lock_irqsave(&dev->spinlock, flags); devpriv->ao_control_bits &= ~DAC_CHAN_EN(0) & ~DAC_CHAN_EN(1) & ~DAC_RANGE_MASK(channel) & ~DAC_PACER_MASK; devpriv->ao_control_bits |= DACEN | DAC_RANGE(channel, CR_RANGE(insn-> chanspec)) | DAC_CHAN_EN(channel) | DAC_START; outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); /* remember value for readback */ devpriv->ao_value[channel] = data[0]; /* send data */ outw(data[0], devpriv->ao_registers + DACDATA); return 1; } /* analog output readback insn */ /* XXX loses track of analog output value back after an analog ouput command is executed */ static int cb_pcidas_ao_readback_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->ao_value[CR_CHAN(insn->chanspec)]; return 1; } static int eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { uint8_t nvram_data; int retval; retval = nvram_read(dev, CR_CHAN(insn->chanspec), &nvram_data); if (retval < 0) return retval; data[0] = nvram_data; return 1; } static int caldac_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { const unsigned int channel = CR_CHAN(insn->chanspec); return caldac_8800_write(dev, channel, data[0]); } static int caldac_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->caldac_value[CR_CHAN(insn->chanspec)]; return 1; } /* 1602/16 pregain offset */ static int dac08_write(struct comedi_device *dev, unsigned int value) { if (devpriv->dac08_value == value) return 1; devpriv->dac08_value = value; outw(cal_enable_bits(dev) | (value & 0xff), devpriv->control_status + CALIBRATION_REG); udelay(1); outw(cal_enable_bits(dev) | SELECT_DAC08_BIT | (value & 0xff), devpriv->control_status + CALIBRATION_REG); udelay(1); outw(cal_enable_bits(dev) | (value & 0xff), devpriv->control_status + CALIBRATION_REG); udelay(1); return 1; } static int dac08_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { return dac08_write(dev, data[0]); } static int dac08_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->dac08_value; return 1; } static int cb_pcidas_trimpot_write(struct comedi_device *dev, unsigned int channel, unsigned int value) { if (devpriv->trimpot_value[channel] == value) return 1; devpriv->trimpot_value[channel] = value; switch (thisboard->trimpot) { case AD7376: trimpot_7376_write(dev, value); break; case AD8402: trimpot_8402_write(dev, channel, value); break; default: comedi_error(dev, "driver bug?"); return -1; break; } return 1; } static int trimpot_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int channel = CR_CHAN(insn->chanspec); return cb_pcidas_trimpot_write(dev, channel, data[0]); } static int trimpot_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int channel = CR_CHAN(insn->chanspec); data[0] = devpriv->trimpot_value[channel]; return 1; } static int cb_pcidas_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; int i, gain, start_chan; /* cmdtest tests a particular command to see if it is valid. * Using the cmdtest ioctl, a user can create a valid cmd * and then have it executes by the cmd ioctl. * * cmdtest returns 1,2,3,4 or 0, depending on which tests * the command passes. */ /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER | TRIG_NOW | TRIG_EXT; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT && cmd->convert_src != TRIG_NOW) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; /* make sure trigger sources are compatible with each other */ if (cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_NOW) err++; if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->convert_src != TRIG_NOW) err++; if (cmd->start_src == TRIG_EXT && (cmd->convert_src == TRIG_EXT || cmd->scan_begin_src == TRIG_EXT)) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ switch (cmd->start_src) { case TRIG_EXT: /* External trigger, only CR_EDGE and CR_INVERT flags allowed */ if ((cmd->start_arg & (CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT))) != 0) { cmd->start_arg &= ~(CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT)); err++; } if (!thisboard->has_ai_trig_invert && (cmd->start_arg & CR_INVERT)) { cmd->start_arg &= (CR_FLAGS_MASK & ~CR_INVERT); err++; } break; default: if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } break; } if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < thisboard->ai_speed * cmd->chanlist_len) { cmd->scan_begin_arg = thisboard->ai_speed * cmd->chanlist_len; err++; } } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < thisboard->ai_speed) { cmd->convert_arg = thisboard->ai_speed; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_NONE) { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->scan_begin_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; } if (cmd->convert_src == TRIG_TIMER) { tmp = cmd->convert_arg; i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->convert_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->convert_arg) err++; } if (err) return 4; /* check channel/gain list against card's limitations */ if (cmd->chanlist) { gain = CR_RANGE(cmd->chanlist[0]); start_chan = CR_CHAN(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) != (start_chan + i) % s->n_chan) { comedi_error(dev, "entries in chanlist must be consecutive channels, counting upwards\n"); err++; } if (CR_RANGE(cmd->chanlist[i]) != gain) { comedi_error(dev, "entries in chanlist must all have the same gain\n"); err++; } } } if (err) return 5; return 0; } static int cb_pcidas_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int bits; unsigned long flags; /* make sure CAL_EN_BIT is disabled */ outw(0, devpriv->control_status + CALIBRATION_REG); /* initialize before settings pacer source and count values */ outw(0, devpriv->control_status + TRIG_CONTSTAT); /* clear fifo */ outw(0, devpriv->adc_fifo + ADCFIFOCLR); /* set mux limits, gain and pacer source */ bits = BEGIN_SCAN(CR_CHAN(cmd->chanlist[0])) | END_SCAN(CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1])) | GAIN_BITS(CR_RANGE(cmd->chanlist[0])); /* set unipolar/bipolar */ if (CR_RANGE(cmd->chanlist[0]) & IS_UNIPOLAR) bits |= UNIP; /* set singleended/differential */ if (CR_AREF(cmd->chanlist[0]) != AREF_DIFF) bits |= SE; /* set pacer source */ if (cmd->convert_src == TRIG_EXT || cmd->scan_begin_src == TRIG_EXT) bits |= PACER_EXT_RISE; else bits |= PACER_INT; outw(bits, devpriv->control_status + ADCMUX_CONT); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: sent 0x%x to adcmux control\n", bits); #endif /* load counters */ if (cmd->convert_src == TRIG_TIMER) cb_pcidas_load_counters(dev, &cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); else if (cmd->scan_begin_src == TRIG_TIMER) cb_pcidas_load_counters(dev, &cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); /* set number of conversions */ if (cmd->stop_src == TRIG_COUNT) devpriv->count = cmd->chanlist_len * cmd->stop_arg; /* enable interrupts */ spin_lock_irqsave(&dev->spinlock, flags); devpriv->adc_fifo_bits |= INTE; devpriv->adc_fifo_bits &= ~INT_MASK; if (cmd->flags & TRIG_WAKE_EOS) { if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1) devpriv->adc_fifo_bits |= INT_EOS; /* interrupt end of burst */ else devpriv->adc_fifo_bits |= INT_FNE; /* interrupt fifo not empty */ } else { devpriv->adc_fifo_bits |= INT_FHF; /* interrupt fifo half full */ } #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits); #endif /* enable (and clear) interrupts */ outw(devpriv->adc_fifo_bits | EOAI | INT | LADFUL, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); /* set start trigger and burst mode */ bits = 0; if (cmd->start_src == TRIG_NOW) bits |= SW_TRIGGER; else if (cmd->start_src == TRIG_EXT) { bits |= EXT_TRIGGER | TGEN | XTRCL; if (thisboard->has_ai_trig_invert && (cmd->start_arg & CR_INVERT)) bits |= TGPOL; if (thisboard->has_ai_trig_gated && (cmd->start_arg & CR_EDGE)) bits |= TGSEL; } else { comedi_error(dev, "bug!"); return -1; } if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1) bits |= BURSTE; outw(bits, devpriv->control_status + TRIG_CONTSTAT); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: sent 0x%x to trig control\n", bits); #endif return 0; } static int cb_pcidas_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* cmdtest tests a particular command to see if it is valid. * Using the cmdtest ioctl, a user can create a valid cmd * and then have it executes by the cmd ioctl. * * cmdtest returns 1,2,3,4 or 0, depending on which tests * the command passes. */ /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_INT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < thisboard->ao_scan_speed) { cmd->scan_begin_arg = thisboard->ao_scan_speed; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_NONE) { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->ao_divisor1), &(devpriv->ao_divisor2), &(cmd->scan_begin_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; } if (err) return 4; /* check channel/gain list against card's limitations */ if (cmd->chanlist && cmd->chanlist_len > 1) { if (CR_CHAN(cmd->chanlist[0]) != 0 || CR_CHAN(cmd->chanlist[1]) != 1) { comedi_error(dev, "channels must be ordered channel 0, channel 1 in chanlist\n"); err++; } } if (err) return 5; return 0; } static int cb_pcidas_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int i; unsigned long flags; /* set channel limits, gain */ spin_lock_irqsave(&dev->spinlock, flags); for (i = 0; i < cmd->chanlist_len; i++) { /* enable channel */ devpriv->ao_control_bits |= DAC_CHAN_EN(CR_CHAN(cmd->chanlist[i])); /* set range */ devpriv->ao_control_bits |= DAC_RANGE(CR_CHAN(cmd->chanlist[i]), CR_RANGE(cmd-> chanlist[i])); } /* disable analog out before settings pacer source and count values */ outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); /* clear fifo */ outw(0, devpriv->ao_registers + DACFIFOCLR); /* load counters */ if (cmd->scan_begin_src == TRIG_TIMER) { i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->ao_divisor1), &(devpriv->ao_divisor2), &(cmd->scan_begin_arg), cmd->flags); /* Write the values of ctr1 and ctr2 into counters 1 and 2 */ i8254_load(devpriv->pacer_counter_dio + DAC8254, 0, 1, devpriv->ao_divisor1, 2); i8254_load(devpriv->pacer_counter_dio + DAC8254, 0, 2, devpriv->ao_divisor2, 2); } /* set number of conversions */ if (cmd->stop_src == TRIG_COUNT) devpriv->ao_count = cmd->chanlist_len * cmd->stop_arg; /* set pacer source */ spin_lock_irqsave(&dev->spinlock, flags); switch (cmd->scan_begin_src) { case TRIG_TIMER: devpriv->ao_control_bits |= DAC_PACER_INT; break; case TRIG_EXT: devpriv->ao_control_bits |= DAC_PACER_EXT_RISE; break; default: spin_unlock_irqrestore(&dev->spinlock, flags); comedi_error(dev, "error setting dac pacer source"); return -1; break; } spin_unlock_irqrestore(&dev->spinlock, flags); async->inttrig = cb_pcidas_ao_inttrig; return 0; } static int cb_pcidas_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trig_num) { unsigned int num_bytes, num_points = thisboard->fifo_size; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &s->async->cmd; unsigned long flags; if (trig_num != 0) return -EINVAL; /* load up fifo */ if (cmd->stop_src == TRIG_COUNT && devpriv->ao_count < num_points) num_points = devpriv->ao_count; num_bytes = cfc_read_array_from_buffer(s, devpriv->ao_buffer, num_points * sizeof(short)); num_points = num_bytes / sizeof(short); if (cmd->stop_src == TRIG_COUNT) devpriv->ao_count -= num_points; /* write data to board's fifo */ outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer, num_bytes); /* enable dac half-full and empty interrupts */ spin_lock_irqsave(&dev->spinlock, flags); devpriv->adc_fifo_bits |= DAEMIE | DAHFIE; #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits); #endif /* enable and clear interrupts */ outw(devpriv->adc_fifo_bits | DAEMI | DAHFI, devpriv->control_status + INT_ADCFIFO); /* start dac */ devpriv->ao_control_bits |= DAC_START | DACEN | DAC_EMPTY; outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: sent 0x%x to dac control\n", devpriv->ao_control_bits); #endif spin_unlock_irqrestore(&dev->spinlock, flags); async->inttrig = NULL; return 0; } static irqreturn_t cb_pcidas_interrupt(int irq, void *d) { struct comedi_device *dev = (struct comedi_device *)d; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async; int status, s5933_status; int half_fifo = thisboard->fifo_size / 2; unsigned int num_samples, i; static const int timeout = 10000; unsigned long flags; if (dev->attached == 0) return IRQ_NONE; async = s->async; async->events = 0; s5933_status = inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "intcsr 0x%x\n", s5933_status); dev_dbg(dev->hw_dev, "mbef 0x%x\n", inl(devpriv->s5933_config + AMCC_OP_REG_MBEF)); #endif if ((INTCSR_INTR_ASSERTED & s5933_status) == 0) return IRQ_NONE; /* make sure mailbox 4 is empty */ inl_p(devpriv->s5933_config + AMCC_OP_REG_IMB4); /* clear interrupt on amcc s5933 */ outl(devpriv->s5933_intcsr_bits | INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); status = inw(devpriv->control_status + INT_ADCFIFO); #ifdef CB_PCIDAS_DEBUG if ((status & (INT | EOAI | LADFUL | DAHFI | DAEMI)) == 0) comedi_error(dev, "spurious interrupt"); #endif /* check for analog output interrupt */ if (status & (DAHFI | DAEMI)) handle_ao_interrupt(dev, status); /* check for analog input interrupts */ /* if fifo half-full */ if (status & ADHFI) { /* read data */ num_samples = half_fifo; if (async->cmd.stop_src == TRIG_COUNT && num_samples > devpriv->count) { num_samples = devpriv->count; } insw(devpriv->adc_fifo + ADCDATA, devpriv->ai_buffer, num_samples); cfc_write_array_to_buffer(s, devpriv->ai_buffer, num_samples * sizeof(short)); devpriv->count -= num_samples; if (async->cmd.stop_src == TRIG_COUNT && devpriv->count == 0) { async->events |= COMEDI_CB_EOA; cb_pcidas_cancel(dev, s); } /* clear half-full interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | INT, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); /* else if fifo not empty */ } else if (status & (ADNEI | EOBI)) { for (i = 0; i < timeout; i++) { /* break if fifo is empty */ if ((ADNE & inw(devpriv->control_status + INT_ADCFIFO)) == 0) break; cfc_write_to_buffer(s, inw(devpriv->adc_fifo)); if (async->cmd.stop_src == TRIG_COUNT && --devpriv->count == 0) { /* end of acquisition */ cb_pcidas_cancel(dev, s); async->events |= COMEDI_CB_EOA; break; } } /* clear not-empty interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | INT, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); } else if (status & EOAI) { comedi_error(dev, "bug! encountered end of acquisition interrupt?"); /* clear EOA interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | EOAI, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); } /* check for fifo overflow */ if (status & LADFUL) { comedi_error(dev, "fifo overflow"); /* clear overflow interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | LADFUL, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); cb_pcidas_cancel(dev, s); async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; } comedi_event(dev, s); return IRQ_HANDLED; } static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status) { struct comedi_subdevice *s = dev->write_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int half_fifo = thisboard->fifo_size / 2; unsigned int num_points; unsigned long flags; async->events = 0; if (status & DAEMI) { /* clear dac empty interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | DAEMI, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); if (inw(devpriv->ao_registers + DAC_CSR) & DAC_EMPTY) { if (cmd->stop_src == TRIG_NONE || (cmd->stop_src == TRIG_COUNT && devpriv->ao_count)) { comedi_error(dev, "dac fifo underflow"); cb_pcidas_ao_cancel(dev, s); async->events |= COMEDI_CB_ERROR; } async->events |= COMEDI_CB_EOA; } } else if (status & DAHFI) { unsigned int num_bytes; /* figure out how many points we are writing to fifo */ num_points = half_fifo; if (cmd->stop_src == TRIG_COUNT && devpriv->ao_count < num_points) num_points = devpriv->ao_count; num_bytes = cfc_read_array_from_buffer(s, devpriv->ao_buffer, num_points * sizeof(short)); num_points = num_bytes / sizeof(short); if (async->cmd.stop_src == TRIG_COUNT) devpriv->ao_count -= num_points; /* write data to board's fifo */ outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer, num_points); /* clear half-full interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | DAHFI, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); } comedi_event(dev, s); } /* cancel analog input command */ static int cb_pcidas_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); /* disable interrupts */ devpriv->adc_fifo_bits &= ~INTE & ~EOAIE; outw(devpriv->adc_fifo_bits, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); /* disable start trigger source and burst mode */ outw(0, devpriv->control_status + TRIG_CONTSTAT); /* software pacer source */ outw(0, devpriv->control_status + ADCMUX_CONT); return 0; } /* cancel analog output command */ static int cb_pcidas_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); /* disable interrupts */ devpriv->adc_fifo_bits &= ~DAHFIE & ~DAEMIE; outw(devpriv->adc_fifo_bits, devpriv->control_status + INT_ADCFIFO); /* disable output */ devpriv->ao_control_bits &= ~DACEN & ~DAC_PACER_MASK; outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); return 0; } static void cb_pcidas_load_counters(struct comedi_device *dev, unsigned int *ns, int rounding_flags) { i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), ns, rounding_flags & TRIG_ROUND_MASK); /* Write the values of ctr1 and ctr2 into counters 1 and 2 */ i8254_load(devpriv->pacer_counter_dio + ADC8254, 0, 1, devpriv->divisor1, 2); i8254_load(devpriv->pacer_counter_dio + ADC8254, 0, 2, devpriv->divisor2, 2); } static void write_calibration_bitstream(struct comedi_device *dev, unsigned int register_bits, unsigned int bitstream, unsigned int bitstream_length) { static const int write_delay = 1; unsigned int bit; for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) { if (bitstream & bit) register_bits |= SERIAL_DATA_IN_BIT; else register_bits &= ~SERIAL_DATA_IN_BIT; udelay(write_delay); outw(register_bits, devpriv->control_status + CALIBRATION_REG); } } static int caldac_8800_write(struct comedi_device *dev, unsigned int address, uint8_t value) { static const int num_caldac_channels = 8; static const int bitstream_length = 11; unsigned int bitstream = ((address & 0x7) << 8) | value; static const int caldac_8800_udelay = 1; if (address >= num_caldac_channels) { comedi_error(dev, "illegal caldac channel"); return -1; } if (value == devpriv->caldac_value[address]) return 1; devpriv->caldac_value[address] = value; write_calibration_bitstream(dev, cal_enable_bits(dev), bitstream, bitstream_length); udelay(caldac_8800_udelay); outw(cal_enable_bits(dev) | SELECT_8800_BIT, devpriv->control_status + CALIBRATION_REG); udelay(caldac_8800_udelay); outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); return 1; } static int trimpot_7376_write(struct comedi_device *dev, uint8_t value) { static const int bitstream_length = 7; unsigned int bitstream = value & 0x7f; unsigned int register_bits; static const int ad7376_udelay = 1; register_bits = cal_enable_bits(dev) | SELECT_TRIMPOT_BIT; udelay(ad7376_udelay); outw(register_bits, devpriv->control_status + CALIBRATION_REG); write_calibration_bitstream(dev, register_bits, bitstream, bitstream_length); udelay(ad7376_udelay); outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); return 0; } /* For 1602/16 only * ch 0 : adc gain * ch 1 : adc postgain offset */ static int trimpot_8402_write(struct comedi_device *dev, unsigned int channel, uint8_t value) { static const int bitstream_length = 10; unsigned int bitstream = ((channel & 0x3) << 8) | (value & 0xff); unsigned int register_bits; static const int ad8402_udelay = 1; register_bits = cal_enable_bits(dev) | SELECT_TRIMPOT_BIT; udelay(ad8402_udelay); outw(register_bits, devpriv->control_status + CALIBRATION_REG); write_calibration_bitstream(dev, register_bits, bitstream, bitstream_length); udelay(ad8402_udelay); outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); return 0; } static int wait_for_nvram_ready(unsigned long s5933_base_addr) { static const int timeout = 1000; unsigned int i; for (i = 0; i < timeout; i++) { if ((inb(s5933_base_addr + AMCC_OP_REG_MCSR_NVCMD) & MCSR_NV_BUSY) == 0) return 0; udelay(1); } return -1; } static int nvram_read(struct comedi_device *dev, unsigned int address, uint8_t *data) { unsigned long iobase = devpriv->s5933_config; if (wait_for_nvram_ready(iobase) < 0) return -ETIMEDOUT; outb(MCSR_NV_ENABLE | MCSR_NV_LOAD_LOW_ADDR, iobase + AMCC_OP_REG_MCSR_NVCMD); outb(address & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA); outb(MCSR_NV_ENABLE | MCSR_NV_LOAD_HIGH_ADDR, iobase + AMCC_OP_REG_MCSR_NVCMD); outb((address >> 8) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA); outb(MCSR_NV_ENABLE | MCSR_NV_READ, iobase + AMCC_OP_REG_MCSR_NVCMD); if (wait_for_nvram_ready(iobase) < 0) return -ETIMEDOUT; *data = inb(iobase + AMCC_OP_REG_MCSR_NVDATA); return 0; } /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ static int __devinit driver_cb_pcidas_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_cb_pcidas.driver_name); } static void __devexit driver_cb_pcidas_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_cb_pcidas_pci_driver = { .id_table = cb_pcidas_pci_table, .probe = &driver_cb_pcidas_pci_probe, .remove = __devexit_p(&driver_cb_pcidas_pci_remove) }; static int __init driver_cb_pcidas_init_module(void) { int retval; retval = comedi_driver_register(&driver_cb_pcidas); if (retval < 0) return retval; driver_cb_pcidas_pci_driver.name = (char *)driver_cb_pcidas.driver_name; return pci_register_driver(&driver_cb_pcidas_pci_driver); } static void __exit driver_cb_pcidas_cleanup_module(void) { pci_unregister_driver(&driver_cb_pcidas_pci_driver); comedi_driver_unregister(&driver_cb_pcidas); } module_init(driver_cb_pcidas_init_module); module_exit(driver_cb_pcidas_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
mahound/bricked-S7275-kernel
drivers/input/keyboard/tc3589x-keypad.c
4896
11893
/* * Copyright (C) ST-Ericsson SA 2010 * * Author: Jayeeta Banerjee <jayeeta.banerjee@stericsson.com> * Author: Sundar Iyer <sundar.iyer@stericsson.com> * * License Terms: GNU General Public License, version 2 * * TC35893 MFD Keypad Controller driver */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/platform_device.h> #include <linux/input/matrix_keypad.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/mfd/tc3589x.h> /* Maximum supported keypad matrix row/columns size */ #define TC3589x_MAX_KPROW 8 #define TC3589x_MAX_KPCOL 12 /* keypad related Constants */ #define TC3589x_MAX_DEBOUNCE_SETTLE 0xFF #define DEDICATED_KEY_VAL 0xFF /* Pull up/down masks */ #define TC3589x_NO_PULL_MASK 0x0 #define TC3589x_PULL_DOWN_MASK 0x1 #define TC3589x_PULL_UP_MASK 0x2 #define TC3589x_PULLUP_ALL_MASK 0xAA #define TC3589x_IO_PULL_VAL(index, mask) ((mask)<<((index)%4)*2)) /* Bit masks for IOCFG register */ #define IOCFG_BALLCFG 0x01 #define IOCFG_IG 0x08 #define KP_EVCODE_COL_MASK 0x0F #define KP_EVCODE_ROW_MASK 0x70 #define KP_RELEASE_EVT_MASK 0x80 #define KP_ROW_SHIFT 4 #define KP_NO_VALID_KEY_MASK 0x7F /* bit masks for RESTCTRL register */ #define TC3589x_KBDRST 0x2 #define TC3589x_IRQRST 0x10 #define TC3589x_RESET_ALL 0x1B /* KBDMFS register bit mask */ #define TC3589x_KBDMFS_EN 0x1 /* CLKEN register bitmask */ #define KPD_CLK_EN 0x1 /* RSTINTCLR register bit mask */ #define IRQ_CLEAR 0x1 /* bit masks for keyboard interrupts*/ #define TC3589x_EVT_LOSS_INT 0x8 #define TC3589x_EVT_INT 0x4 #define TC3589x_KBD_LOSS_INT 0x2 #define TC3589x_KBD_INT 0x1 /* bit masks for keyboard interrupt clear*/ #define TC3589x_EVT_INT_CLR 0x2 #define TC3589x_KBD_INT_CLR 0x1 #define TC3589x_KBD_KEYMAP_SIZE 64 /** * struct tc_keypad - data structure used by keypad driver * @tc3589x: pointer to tc35893 * @input: pointer to input device object * @board: keypad platform device * @krow: number of rows * @kcol: number of coloumns * @keymap: matrix scan code table for keycodes * @keypad_stopped: holds keypad status */ struct tc_keypad { struct tc3589x *tc3589x; struct input_dev *input; const struct tc3589x_keypad_platform_data *board; unsigned int krow; unsigned int kcol; unsigned short keymap[TC3589x_KBD_KEYMAP_SIZE]; bool keypad_stopped; }; static int tc3589x_keypad_init_key_hardware(struct tc_keypad *keypad) { int ret; struct tc3589x *tc3589x = keypad->tc3589x; u8 settle_time = keypad->board->settle_time; u8 dbounce_period = keypad->board->debounce_period; u8 rows = keypad->board->krow & 0xf; /* mask out the nibble */ u8 column = keypad->board->kcol & 0xf; /* mask out the nibble */ /* validate platform configurations */ if (keypad->board->kcol > TC3589x_MAX_KPCOL || keypad->board->krow > TC3589x_MAX_KPROW || keypad->board->debounce_period > TC3589x_MAX_DEBOUNCE_SETTLE || keypad->board->settle_time > TC3589x_MAX_DEBOUNCE_SETTLE) return -EINVAL; /* configure KBDSIZE 4 LSbits for cols and 4 MSbits for rows */ ret = tc3589x_reg_write(tc3589x, TC3589x_KBDSIZE, (rows << KP_ROW_SHIFT) | column); if (ret < 0) return ret; /* configure dedicated key config, no dedicated key selected */ ret = tc3589x_reg_write(tc3589x, TC3589x_KBCFG_LSB, DEDICATED_KEY_VAL); if (ret < 0) return ret; ret = tc3589x_reg_write(tc3589x, TC3589x_KBCFG_MSB, DEDICATED_KEY_VAL); if (ret < 0) return ret; /* Configure settle time */ ret = tc3589x_reg_write(tc3589x, TC3589x_KBDSETTLE_REG, settle_time); if (ret < 0) return ret; /* Configure debounce time */ ret = tc3589x_reg_write(tc3589x, TC3589x_KBDBOUNCE, dbounce_period); if (ret < 0) return ret; /* Start of initialise keypad GPIOs */ ret = tc3589x_set_bits(tc3589x, TC3589x_IOCFG, 0x0, IOCFG_IG); if (ret < 0) return ret; /* Configure pull-up resistors for all row GPIOs */ ret = tc3589x_reg_write(tc3589x, TC3589x_IOPULLCFG0_LSB, TC3589x_PULLUP_ALL_MASK); if (ret < 0) return ret; ret = tc3589x_reg_write(tc3589x, TC3589x_IOPULLCFG0_MSB, TC3589x_PULLUP_ALL_MASK); if (ret < 0) return ret; /* Configure pull-up resistors for all column GPIOs */ ret = tc3589x_reg_write(tc3589x, TC3589x_IOPULLCFG1_LSB, TC3589x_PULLUP_ALL_MASK); if (ret < 0) return ret; ret = tc3589x_reg_write(tc3589x, TC3589x_IOPULLCFG1_MSB, TC3589x_PULLUP_ALL_MASK); if (ret < 0) return ret; ret = tc3589x_reg_write(tc3589x, TC3589x_IOPULLCFG2_LSB, TC3589x_PULLUP_ALL_MASK); return ret; } #define TC35893_DATA_REGS 4 #define TC35893_KEYCODE_FIFO_EMPTY 0x7f #define TC35893_KEYCODE_FIFO_CLEAR 0xff #define TC35893_KEYPAD_ROW_SHIFT 0x3 static irqreturn_t tc3589x_keypad_irq(int irq, void *dev) { struct tc_keypad *keypad = dev; struct tc3589x *tc3589x = keypad->tc3589x; u8 i, row_index, col_index, kbd_code, up; u8 code; for (i = 0; i < TC35893_DATA_REGS * 2; i++) { kbd_code = tc3589x_reg_read(tc3589x, TC3589x_EVTCODE_FIFO); /* loop till fifo is empty and no more keys are pressed */ if (kbd_code == TC35893_KEYCODE_FIFO_EMPTY || kbd_code == TC35893_KEYCODE_FIFO_CLEAR) continue; /* valid key is found */ col_index = kbd_code & KP_EVCODE_COL_MASK; row_index = (kbd_code & KP_EVCODE_ROW_MASK) >> KP_ROW_SHIFT; code = MATRIX_SCAN_CODE(row_index, col_index, TC35893_KEYPAD_ROW_SHIFT); up = kbd_code & KP_RELEASE_EVT_MASK; input_event(keypad->input, EV_MSC, MSC_SCAN, code); input_report_key(keypad->input, keypad->keymap[code], !up); input_sync(keypad->input); } /* clear IRQ */ tc3589x_set_bits(tc3589x, TC3589x_KBDIC, 0x0, TC3589x_EVT_INT_CLR | TC3589x_KBD_INT_CLR); /* enable IRQ */ tc3589x_set_bits(tc3589x, TC3589x_KBDMSK, 0x0, TC3589x_EVT_LOSS_INT | TC3589x_EVT_INT); return IRQ_HANDLED; } static int tc3589x_keypad_enable(struct tc_keypad *keypad) { struct tc3589x *tc3589x = keypad->tc3589x; int ret; /* pull the keypad module out of reset */ ret = tc3589x_set_bits(tc3589x, TC3589x_RSTCTRL, TC3589x_KBDRST, 0x0); if (ret < 0) return ret; /* configure KBDMFS */ ret = tc3589x_set_bits(tc3589x, TC3589x_KBDMFS, 0x0, TC3589x_KBDMFS_EN); if (ret < 0) return ret; /* enable the keypad clock */ ret = tc3589x_set_bits(tc3589x, TC3589x_CLKEN, 0x0, KPD_CLK_EN); if (ret < 0) return ret; /* clear pending IRQs */ ret = tc3589x_set_bits(tc3589x, TC3589x_RSTINTCLR, 0x0, 0x1); if (ret < 0) return ret; /* enable the IRQs */ ret = tc3589x_set_bits(tc3589x, TC3589x_KBDMSK, 0x0, TC3589x_EVT_LOSS_INT | TC3589x_EVT_INT); if (ret < 0) return ret; keypad->keypad_stopped = false; return ret; } static int tc3589x_keypad_disable(struct tc_keypad *keypad) { struct tc3589x *tc3589x = keypad->tc3589x; int ret; /* clear IRQ */ ret = tc3589x_set_bits(tc3589x, TC3589x_KBDIC, 0x0, TC3589x_EVT_INT_CLR | TC3589x_KBD_INT_CLR); if (ret < 0) return ret; /* disable all interrupts */ ret = tc3589x_set_bits(tc3589x, TC3589x_KBDMSK, ~(TC3589x_EVT_LOSS_INT | TC3589x_EVT_INT), 0x0); if (ret < 0) return ret; /* disable the keypad module */ ret = tc3589x_set_bits(tc3589x, TC3589x_CLKEN, 0x1, 0x0); if (ret < 0) return ret; /* put the keypad module into reset */ ret = tc3589x_set_bits(tc3589x, TC3589x_RSTCTRL, TC3589x_KBDRST, 0x1); keypad->keypad_stopped = true; return ret; } static int tc3589x_keypad_open(struct input_dev *input) { int error; struct tc_keypad *keypad = input_get_drvdata(input); /* enable the keypad module */ error = tc3589x_keypad_enable(keypad); if (error < 0) { dev_err(&input->dev, "failed to enable keypad module\n"); return error; } error = tc3589x_keypad_init_key_hardware(keypad); if (error < 0) { dev_err(&input->dev, "failed to configure keypad module\n"); return error; } return 0; } static void tc3589x_keypad_close(struct input_dev *input) { struct tc_keypad *keypad = input_get_drvdata(input); /* disable the keypad module */ tc3589x_keypad_disable(keypad); } static int __devinit tc3589x_keypad_probe(struct platform_device *pdev) { struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent); struct tc_keypad *keypad; struct input_dev *input; const struct tc3589x_keypad_platform_data *plat; int error, irq; plat = tc3589x->pdata->keypad; if (!plat) { dev_err(&pdev->dev, "invalid keypad platform data\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; keypad = kzalloc(sizeof(struct tc_keypad), GFP_KERNEL); input = input_allocate_device(); if (!keypad || !input) { dev_err(&pdev->dev, "failed to allocate keypad memory\n"); error = -ENOMEM; goto err_free_mem; } keypad->board = plat; keypad->input = input; keypad->tc3589x = tc3589x; input->id.bustype = BUS_I2C; input->name = pdev->name; input->dev.parent = &pdev->dev; input->keycode = keypad->keymap; input->keycodesize = sizeof(keypad->keymap[0]); input->keycodemax = ARRAY_SIZE(keypad->keymap); input->open = tc3589x_keypad_open; input->close = tc3589x_keypad_close; input_set_drvdata(input, keypad); input_set_capability(input, EV_MSC, MSC_SCAN); __set_bit(EV_KEY, input->evbit); if (!plat->no_autorepeat) __set_bit(EV_REP, input->evbit); matrix_keypad_build_keymap(plat->keymap_data, 0x3, input->keycode, input->keybit); error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq, plat->irqtype, "tc3589x-keypad", keypad); if (error < 0) { dev_err(&pdev->dev, "Could not allocate irq %d,error %d\n", irq, error); goto err_free_mem; } error = input_register_device(input); if (error) { dev_err(&pdev->dev, "Could not register input device\n"); goto err_free_irq; } /* let platform decide if keypad is a wakeup source or not */ device_init_wakeup(&pdev->dev, plat->enable_wakeup); device_set_wakeup_capable(&pdev->dev, plat->enable_wakeup); platform_set_drvdata(pdev, keypad); return 0; err_free_irq: free_irq(irq, keypad); err_free_mem: input_free_device(input); kfree(keypad); return error; } static int __devexit tc3589x_keypad_remove(struct platform_device *pdev) { struct tc_keypad *keypad = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); if (!keypad->keypad_stopped) tc3589x_keypad_disable(keypad); free_irq(irq, keypad); input_unregister_device(keypad->input); kfree(keypad); return 0; } #ifdef CONFIG_PM_SLEEP static int tc3589x_keypad_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct tc_keypad *keypad = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); /* keypad is already off; we do nothing */ if (keypad->keypad_stopped) return 0; /* if device is not a wakeup source, disable it for powersave */ if (!device_may_wakeup(&pdev->dev)) tc3589x_keypad_disable(keypad); else enable_irq_wake(irq); return 0; } static int tc3589x_keypad_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct tc_keypad *keypad = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); if (!keypad->keypad_stopped) return 0; /* enable the device to resume normal operations */ if (!device_may_wakeup(&pdev->dev)) tc3589x_keypad_enable(keypad); else disable_irq_wake(irq); return 0; } #endif static SIMPLE_DEV_PM_OPS(tc3589x_keypad_dev_pm_ops, tc3589x_keypad_suspend, tc3589x_keypad_resume); static struct platform_driver tc3589x_keypad_driver = { .driver = { .name = "tc3589x-keypad", .owner = THIS_MODULE, .pm = &tc3589x_keypad_dev_pm_ops, }, .probe = tc3589x_keypad_probe, .remove = __devexit_p(tc3589x_keypad_remove), }; module_platform_driver(tc3589x_keypad_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jayeeta Banerjee/Sundar Iyer"); MODULE_DESCRIPTION("TC35893 Keypad Driver"); MODULE_ALIAS("platform:tc3589x-keypad");
gpl-2.0
ak-67/kernel_mediatek_wiko
drivers/staging/comedi/drivers/adl_pci8164.c
4896
11579
/* comedi/drivers/adl_pci8164.c Hardware comedi driver fot PCI-8164 Adlink card Copyright (C) 2004 Michel Lachine <mike@mikelachaine.ca> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: adl_pci8164 Description: Driver for the Adlink PCI-8164 4 Axes Motion Control board Devices: [ADLink] PCI-8164 (adl_pci8164) Author: Michel Lachaine <mike@mikelachaine.ca> Status: experimental Updated: Mon, 14 Apr 2008 15:10:32 +0100 Configuration Options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. */ #include "../comedidev.h" #include <linux/kernel.h> #include <linux/delay.h> #include "comedi_fc.h" #include "comedi_pci.h" #include "8253.h" #define PCI8164_AXIS_X 0x00 #define PCI8164_AXIS_Y 0x08 #define PCI8164_AXIS_Z 0x10 #define PCI8164_AXIS_U 0x18 #define PCI8164_MSTS 0x00 #define PCI8164_SSTS 0x02 #define PCI8164_BUF0 0x04 #define PCI8164_BUF1 0x06 #define PCI8164_CMD 0x00 #define PCI8164_OTP 0x02 #define PCI_DEVICE_ID_PCI8164 0x8164 static DEFINE_PCI_DEVICE_TABLE(adl_pci8164_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI8164) }, {0} }; MODULE_DEVICE_TABLE(pci, adl_pci8164_pci_table); struct adl_pci8164_private { int data; struct pci_dev *pci_dev; }; #define devpriv ((struct adl_pci8164_private *)dev->private) static int adl_pci8164_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int adl_pci8164_detach(struct comedi_device *dev); static struct comedi_driver driver_adl_pci8164 = { .driver_name = "adl_pci8164", .module = THIS_MODULE, .attach = adl_pci8164_attach, .detach = adl_pci8164_detach, }; static int adl_pci8164_insn_read_msts(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_read_ssts(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_read_buf0(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_read_buf1(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_write_cmd(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_write_otp(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_write_buf0(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_write_buf1(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev = NULL; struct comedi_subdevice *s; int bus, slot; printk(KERN_INFO "comedi: attempt to attach...\n"); printk(KERN_INFO "comedi%d: adl_pci8164\n", dev->minor); dev->board_name = "pci8164"; bus = it->options[0]; slot = it->options[1]; if (alloc_private(dev, sizeof(struct adl_pci8164_private)) < 0) return -ENOMEM; if (alloc_subdevices(dev, 4) < 0) return -ENOMEM; for_each_pci_dev(pcidev) { if (pcidev->vendor == PCI_VENDOR_ID_ADLINK && pcidev->device == PCI_DEVICE_ID_PCI8164) { if (bus || slot) { /* requested particular bus/slot */ if (pcidev->bus->number != bus || PCI_SLOT(pcidev->devfn) != slot) continue; } devpriv->pci_dev = pcidev; if (comedi_pci_enable(pcidev, "adl_pci8164") < 0) { printk(KERN_ERR "comedi%d: Failed to enable " "PCI device and request regions\n", dev->minor); return -EIO; } dev->iobase = pci_resource_start(pcidev, 2); printk(KERN_DEBUG "comedi: base addr %4lx\n", dev->iobase); s = dev->subdevices + 0; s->type = COMEDI_SUBD_PROC; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 4; s->maxdata = 0xffff; s->len_chanlist = 4; /* s->range_table = &range_axis; */ s->insn_read = adl_pci8164_insn_read_msts; s->insn_write = adl_pci8164_insn_write_cmd; s = dev->subdevices + 1; s->type = COMEDI_SUBD_PROC; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 4; s->maxdata = 0xffff; s->len_chanlist = 4; /* s->range_table = &range_axis; */ s->insn_read = adl_pci8164_insn_read_ssts; s->insn_write = adl_pci8164_insn_write_otp; s = dev->subdevices + 2; s->type = COMEDI_SUBD_PROC; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 4; s->maxdata = 0xffff; s->len_chanlist = 4; /* s->range_table = &range_axis; */ s->insn_read = adl_pci8164_insn_read_buf0; s->insn_write = adl_pci8164_insn_write_buf0; s = dev->subdevices + 3; s->type = COMEDI_SUBD_PROC; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 4; s->maxdata = 0xffff; s->len_chanlist = 4; /* s->range_table = &range_axis; */ s->insn_read = adl_pci8164_insn_read_buf1; s->insn_write = adl_pci8164_insn_write_buf1; printk(KERN_INFO "comedi: attached\n"); return 1; } } printk(KERN_ERR "comedi%d: no supported board found!" "(req. bus/slot : %d/%d)\n", dev->minor, bus, slot); return -EIO; } static int adl_pci8164_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: pci8164: remove\n", dev->minor); if (devpriv && devpriv->pci_dev) { if (dev->iobase) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } return 0; } /* all the read commands are the same except for the addition a constant * const to the data for inw() */ static void adl_pci8164_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data, char *action, unsigned short offset) { int axis, axis_reg; char *axisname; axis = CR_CHAN(insn->chanspec); switch (axis) { case 0: axis_reg = PCI8164_AXIS_X; axisname = "X"; break; case 1: axis_reg = PCI8164_AXIS_Y; axisname = "Y"; break; case 2: axis_reg = PCI8164_AXIS_Z; axisname = "Z"; break; case 3: axis_reg = PCI8164_AXIS_U; axisname = "U"; break; default: axis_reg = PCI8164_AXIS_X; axisname = "X"; } data[0] = inw(dev->iobase + axis_reg + offset); printk(KERN_DEBUG "comedi: pci8164 %s read -> " "%04X:%04X on axis %s\n", action, data[0], data[1], axisname); } static int adl_pci8164_insn_read_msts(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_read(dev, s, insn, data, "MSTS", PCI8164_MSTS); return 2; } static int adl_pci8164_insn_read_ssts(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_read(dev, s, insn, data, "SSTS", PCI8164_SSTS); return 2; } static int adl_pci8164_insn_read_buf0(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_read(dev, s, insn, data, "BUF0", PCI8164_BUF0); return 2; } static int adl_pci8164_insn_read_buf1(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_read(dev, s, insn, data, "BUF1", PCI8164_BUF1); return 2; } /* all the write commands are the same except for the addition a constant * const to the data for outw() */ static void adl_pci8164_insn_out(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data, char *action, unsigned short offset) { unsigned int axis, axis_reg; char *axisname; axis = CR_CHAN(insn->chanspec); switch (axis) { case 0: axis_reg = PCI8164_AXIS_X; axisname = "X"; break; case 1: axis_reg = PCI8164_AXIS_Y; axisname = "Y"; break; case 2: axis_reg = PCI8164_AXIS_Z; axisname = "Z"; break; case 3: axis_reg = PCI8164_AXIS_U; axisname = "U"; break; default: axis_reg = PCI8164_AXIS_X; axisname = "X"; } outw(data[0], dev->iobase + axis_reg + offset); printk(KERN_DEBUG "comedi: pci8164 %s write -> " "%04X:%04X on axis %s\n", action, data[0], data[1], axisname); } static int adl_pci8164_insn_write_cmd(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_out(dev, s, insn, data, "CMD", PCI8164_CMD); return 2; } static int adl_pci8164_insn_write_otp(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_out(dev, s, insn, data, "OTP", PCI8164_OTP); return 2; } static int adl_pci8164_insn_write_buf0(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_out(dev, s, insn, data, "BUF0", PCI8164_BUF0); return 2; } static int adl_pci8164_insn_write_buf1(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_out(dev, s, insn, data, "BUF1", PCI8164_BUF1); return 2; } static int __devinit driver_adl_pci8164_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_adl_pci8164.driver_name); } static void __devexit driver_adl_pci8164_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_adl_pci8164_pci_driver = { .id_table = adl_pci8164_pci_table, .probe = &driver_adl_pci8164_pci_probe, .remove = __devexit_p(&driver_adl_pci8164_pci_remove) }; static int __init driver_adl_pci8164_init_module(void) { int retval; retval = comedi_driver_register(&driver_adl_pci8164); if (retval < 0) return retval; driver_adl_pci8164_pci_driver.name = (char *)driver_adl_pci8164.driver_name; return pci_register_driver(&driver_adl_pci8164_pci_driver); } static void __exit driver_adl_pci8164_cleanup_module(void) { pci_unregister_driver(&driver_adl_pci8164_pci_driver); comedi_driver_unregister(&driver_adl_pci8164); } module_init(driver_adl_pci8164_init_module); module_exit(driver_adl_pci8164_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
ZdrowyGosciu/kernel_d802
sound/pci/sis7019.c
4896
41069
/* * Driver for SiS7019 Audio Accelerator * * Copyright (C) 2004-2007, David Dillow * Written by David Dillow <dave@thedillows.org> * Inspired by the Trident 4D-WaveDX/NX driver. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include "sis7019.h" MODULE_AUTHOR("David Dillow <dave@thedillows.org>"); MODULE_DESCRIPTION("SiS7019"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}"); static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ static bool enable = 1; static int codecs = 1; module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator."); module_param(enable, bool, 0444); MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator."); module_param(codecs, int, 0444); MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)"); static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) }, { 0, } }; MODULE_DEVICE_TABLE(pci, snd_sis7019_ids); /* There are three timing modes for the voices. * * For both playback and capture, when the buffer is one or two periods long, * we use the hardware's built-in Mid-Loop Interrupt and End-Loop Interrupt * to let us know when the periods have ended. * * When performing playback with more than two periods per buffer, we set * the "Stop Sample Offset" and tell the hardware to interrupt us when we * reach it. We then update the offset and continue on until we are * interrupted for the next period. * * Capture channels do not have a SSO, so we allocate a playback channel to * use as a timer for the capture periods. We use the SSO on the playback * channel to clock out virtual periods, and adjust the virtual period length * to maintain synchronization. This algorithm came from the Trident driver. * * FIXME: It'd be nice to make use of some of the synth features in the * hardware, but a woeful lack of documentation is a significant roadblock. */ struct voice { u16 flags; #define VOICE_IN_USE 1 #define VOICE_CAPTURE 2 #define VOICE_SSO_TIMING 4 #define VOICE_SYNC_TIMING 8 u16 sync_cso; u16 period_size; u16 buffer_size; u16 sync_period_size; u16 sync_buffer_size; u32 sso; u32 vperiod; struct snd_pcm_substream *substream; struct voice *timing; void __iomem *ctrl_base; void __iomem *wave_base; void __iomem *sync_base; int num; }; /* We need four pages to store our wave parameters during a suspend. If * we're not doing power management, we still need to allocate a page * for the silence buffer. */ #ifdef CONFIG_PM #define SIS_SUSPEND_PAGES 4 #else #define SIS_SUSPEND_PAGES 1 #endif struct sis7019 { unsigned long ioport; void __iomem *ioaddr; int irq; int codecs_present; struct pci_dev *pci; struct snd_pcm *pcm; struct snd_card *card; struct snd_ac97 *ac97[3]; /* Protect against more than one thread hitting the AC97 * registers (in a more polite manner than pounding the hardware * semaphore) */ struct mutex ac97_mutex; /* voice_lock protects allocation/freeing of the voice descriptions */ spinlock_t voice_lock; struct voice voices[64]; struct voice capture_voice; /* Allocate pages to store the internal wave state during * suspends. When we're operating, this can be used as a silence * buffer for a timing channel. */ void *suspend_state[SIS_SUSPEND_PAGES]; int silence_users; dma_addr_t silence_dma_addr; }; /* These values are also used by the module param 'codecs' to indicate * which codecs should be present. */ #define SIS_PRIMARY_CODEC_PRESENT 0x0001 #define SIS_SECONDARY_CODEC_PRESENT 0x0002 #define SIS_TERTIARY_CODEC_PRESENT 0x0004 /* The HW offset parameters (Loop End, Stop Sample, End Sample) have a * documented range of 8-0xfff8 samples. Given that they are 0-based, * that places our period/buffer range at 9-0xfff9 samples. That makes the * max buffer size 0xfff9 samples * 2 channels * 2 bytes per sample, and * max samples / min samples gives us the max periods in a buffer. * * We'll add a constraint upon open that limits the period and buffer sample * size to values that are legal for the hardware. */ static struct snd_pcm_hardware sis_playback_hw_info = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_SYNC_START | SNDRV_PCM_INFO_RESUME), .formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_CONTINUOUS, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (0xfff9 * 4), .period_bytes_min = 9, .period_bytes_max = (0xfff9 * 4), .periods_min = 1, .periods_max = (0xfff9 / 9), }; static struct snd_pcm_hardware sis_capture_hw_info = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_SYNC_START | SNDRV_PCM_INFO_RESUME), .formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (0xfff9 * 4), .period_bytes_min = 9, .period_bytes_max = (0xfff9 * 4), .periods_min = 1, .periods_max = (0xfff9 / 9), }; static void sis_update_sso(struct voice *voice, u16 period) { void __iomem *base = voice->ctrl_base; voice->sso += period; if (voice->sso >= voice->buffer_size) voice->sso -= voice->buffer_size; /* Enforce the documented hardware minimum offset */ if (voice->sso < 8) voice->sso = 8; /* The SSO is in the upper 16 bits of the register. */ writew(voice->sso & 0xffff, base + SIS_PLAY_DMA_SSO_ESO + 2); } static void sis_update_voice(struct voice *voice) { if (voice->flags & VOICE_SSO_TIMING) { sis_update_sso(voice, voice->period_size); } else if (voice->flags & VOICE_SYNC_TIMING) { int sync; /* If we've not hit the end of the virtual period, update * our records and keep going. */ if (voice->vperiod > voice->period_size) { voice->vperiod -= voice->period_size; if (voice->vperiod < voice->period_size) sis_update_sso(voice, voice->vperiod); else sis_update_sso(voice, voice->period_size); return; } /* Calculate our relative offset between the target and * the actual CSO value. Since we're operating in a loop, * if the value is more than half way around, we can * consider ourselves wrapped. */ sync = voice->sync_cso; sync -= readw(voice->sync_base + SIS_CAPTURE_DMA_FORMAT_CSO); if (sync > (voice->sync_buffer_size / 2)) sync -= voice->sync_buffer_size; /* If sync is positive, then we interrupted too early, and * we'll need to come back in a few samples and try again. * There's a minimum wait, as it takes some time for the DMA * engine to startup, etc... */ if (sync > 0) { if (sync < 16) sync = 16; sis_update_sso(voice, sync); return; } /* Ok, we interrupted right on time, or (hopefully) just * a bit late. We'll adjst our next waiting period based * on how close we got. * * We need to stay just behind the actual channel to ensure * it really is past a period when we get our interrupt -- * otherwise we'll fall into the early code above and have * a minimum wait time, which makes us quite late here, * eating into the user's time to refresh the buffer, esp. * if using small periods. * * If we're less than 9 samples behind, we're on target. * Otherwise, shorten the next vperiod by the amount we've * been delayed. */ if (sync > -9) voice->vperiod = voice->sync_period_size + 1; else voice->vperiod = voice->sync_period_size + sync + 10; if (voice->vperiod < voice->buffer_size) { sis_update_sso(voice, voice->vperiod); voice->vperiod = 0; } else sis_update_sso(voice, voice->period_size); sync = voice->sync_cso + voice->sync_period_size; if (sync >= voice->sync_buffer_size) sync -= voice->sync_buffer_size; voice->sync_cso = sync; } snd_pcm_period_elapsed(voice->substream); } static void sis_voice_irq(u32 status, struct voice *voice) { int bit; while (status) { bit = __ffs(status); status >>= bit + 1; voice += bit; sis_update_voice(voice); voice++; } } static irqreturn_t sis_interrupt(int irq, void *dev) { struct sis7019 *sis = dev; unsigned long io = sis->ioport; struct voice *voice; u32 intr, status; /* We only use the DMA interrupts, and we don't enable any other * source of interrupts. But, it is possible to see an interrupt * status that didn't actually interrupt us, so eliminate anything * we're not expecting to avoid falsely claiming an IRQ, and an * ensuing endless loop. */ intr = inl(io + SIS_GISR); intr &= SIS_GISR_AUDIO_PLAY_DMA_IRQ_STATUS | SIS_GISR_AUDIO_RECORD_DMA_IRQ_STATUS; if (!intr) return IRQ_NONE; do { status = inl(io + SIS_PISR_A); if (status) { sis_voice_irq(status, sis->voices); outl(status, io + SIS_PISR_A); } status = inl(io + SIS_PISR_B); if (status) { sis_voice_irq(status, &sis->voices[32]); outl(status, io + SIS_PISR_B); } status = inl(io + SIS_RISR); if (status) { voice = &sis->capture_voice; if (!voice->timing) snd_pcm_period_elapsed(voice->substream); outl(status, io + SIS_RISR); } outl(intr, io + SIS_GISR); intr = inl(io + SIS_GISR); intr &= SIS_GISR_AUDIO_PLAY_DMA_IRQ_STATUS | SIS_GISR_AUDIO_RECORD_DMA_IRQ_STATUS; } while (intr); return IRQ_HANDLED; } static u32 sis_rate_to_delta(unsigned int rate) { u32 delta; /* This was copied from the trident driver, but it seems its gotten * around a bit... nevertheless, it works well. * * We special case 44100 and 8000 since rounding with the equation * does not give us an accurate enough value. For 11025 and 22050 * the equation gives us the best answer. All other frequencies will * also use the equation. JDW */ if (rate == 44100) delta = 0xeb3; else if (rate == 8000) delta = 0x2ab; else if (rate == 48000) delta = 0x1000; else delta = (((rate << 12) + 24000) / 48000) & 0x0000ffff; return delta; } static void __sis_map_silence(struct sis7019 *sis) { /* Helper function: must hold sis->voice_lock on entry */ if (!sis->silence_users) sis->silence_dma_addr = pci_map_single(sis->pci, sis->suspend_state[0], 4096, PCI_DMA_TODEVICE); sis->silence_users++; } static void __sis_unmap_silence(struct sis7019 *sis) { /* Helper function: must hold sis->voice_lock on entry */ sis->silence_users--; if (!sis->silence_users) pci_unmap_single(sis->pci, sis->silence_dma_addr, 4096, PCI_DMA_TODEVICE); } static void sis_free_voice(struct sis7019 *sis, struct voice *voice) { unsigned long flags; spin_lock_irqsave(&sis->voice_lock, flags); if (voice->timing) { __sis_unmap_silence(sis); voice->timing->flags &= ~(VOICE_IN_USE | VOICE_SSO_TIMING | VOICE_SYNC_TIMING); voice->timing = NULL; } voice->flags &= ~(VOICE_IN_USE | VOICE_SSO_TIMING | VOICE_SYNC_TIMING); spin_unlock_irqrestore(&sis->voice_lock, flags); } static struct voice *__sis_alloc_playback_voice(struct sis7019 *sis) { /* Must hold the voice_lock on entry */ struct voice *voice; int i; for (i = 0; i < 64; i++) { voice = &sis->voices[i]; if (voice->flags & VOICE_IN_USE) continue; voice->flags |= VOICE_IN_USE; goto found_one; } voice = NULL; found_one: return voice; } static struct voice *sis_alloc_playback_voice(struct sis7019 *sis) { struct voice *voice; unsigned long flags; spin_lock_irqsave(&sis->voice_lock, flags); voice = __sis_alloc_playback_voice(sis); spin_unlock_irqrestore(&sis->voice_lock, flags); return voice; } static int sis_alloc_timing_voice(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct sis7019 *sis = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = runtime->private_data; unsigned int period_size, buffer_size; unsigned long flags; int needed; /* If there are one or two periods per buffer, we don't need a * timing voice, as we can use the capture channel's interrupts * to clock out the periods. */ period_size = params_period_size(hw_params); buffer_size = params_buffer_size(hw_params); needed = (period_size != buffer_size && period_size != (buffer_size / 2)); if (needed && !voice->timing) { spin_lock_irqsave(&sis->voice_lock, flags); voice->timing = __sis_alloc_playback_voice(sis); if (voice->timing) __sis_map_silence(sis); spin_unlock_irqrestore(&sis->voice_lock, flags); if (!voice->timing) return -ENOMEM; voice->timing->substream = substream; } else if (!needed && voice->timing) { sis_free_voice(sis, voice); voice->timing = NULL; } return 0; } static int sis_playback_open(struct snd_pcm_substream *substream) { struct sis7019 *sis = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice; voice = sis_alloc_playback_voice(sis); if (!voice) return -EAGAIN; voice->substream = substream; runtime->private_data = voice; runtime->hw = sis_playback_hw_info; snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 9, 0xfff9); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 9, 0xfff9); snd_pcm_set_sync(substream); return 0; } static int sis_substream_close(struct snd_pcm_substream *substream) { struct sis7019 *sis = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = runtime->private_data; sis_free_voice(sis, voice); return 0; } static int sis_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int sis_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int sis_pcm_playback_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = runtime->private_data; void __iomem *ctrl_base = voice->ctrl_base; void __iomem *wave_base = voice->wave_base; u32 format, dma_addr, control, sso_eso, delta, reg; u16 leo; /* We rely on the PCM core to ensure that the parameters for this * substream do not change on us while we're programming the HW. */ format = 0; if (snd_pcm_format_width(runtime->format) == 8) format |= SIS_PLAY_DMA_FORMAT_8BIT; if (!snd_pcm_format_signed(runtime->format)) format |= SIS_PLAY_DMA_FORMAT_UNSIGNED; if (runtime->channels == 1) format |= SIS_PLAY_DMA_FORMAT_MONO; /* The baseline setup is for a single period per buffer, and * we add bells and whistles as needed from there. */ dma_addr = runtime->dma_addr; leo = runtime->buffer_size - 1; control = leo | SIS_PLAY_DMA_LOOP | SIS_PLAY_DMA_INTR_AT_LEO; sso_eso = leo; if (runtime->period_size == (runtime->buffer_size / 2)) { control |= SIS_PLAY_DMA_INTR_AT_MLP; } else if (runtime->period_size != runtime->buffer_size) { voice->flags |= VOICE_SSO_TIMING; voice->sso = runtime->period_size - 1; voice->period_size = runtime->period_size; voice->buffer_size = runtime->buffer_size; control &= ~SIS_PLAY_DMA_INTR_AT_LEO; control |= SIS_PLAY_DMA_INTR_AT_SSO; sso_eso |= (runtime->period_size - 1) << 16; } delta = sis_rate_to_delta(runtime->rate); /* Ok, we're ready to go, set up the channel. */ writel(format, ctrl_base + SIS_PLAY_DMA_FORMAT_CSO); writel(dma_addr, ctrl_base + SIS_PLAY_DMA_BASE); writel(control, ctrl_base + SIS_PLAY_DMA_CONTROL); writel(sso_eso, ctrl_base + SIS_PLAY_DMA_SSO_ESO); for (reg = 0; reg < SIS_WAVE_SIZE; reg += 4) writel(0, wave_base + reg); writel(SIS_WAVE_GENERAL_WAVE_VOLUME, wave_base + SIS_WAVE_GENERAL); writel(delta << 16, wave_base + SIS_WAVE_GENERAL_ARTICULATION); writel(SIS_WAVE_CHANNEL_CONTROL_FIRST_SAMPLE | SIS_WAVE_CHANNEL_CONTROL_AMP_ENABLE | SIS_WAVE_CHANNEL_CONTROL_INTERPOLATE_ENABLE, wave_base + SIS_WAVE_CHANNEL_CONTROL); /* Force PCI writes to post. */ readl(ctrl_base); return 0; } static int sis_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct sis7019 *sis = snd_pcm_substream_chip(substream); unsigned long io = sis->ioport; struct snd_pcm_substream *s; struct voice *voice; void *chip; int starting; u32 record = 0; u32 play[2] = { 0, 0 }; /* No locks needed, as the PCM core will hold the locks on the * substreams, and the HW will only start/stop the indicated voices * without changing the state of the others. */ switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: starting = 1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: starting = 0; break; default: return -EINVAL; } snd_pcm_group_for_each_entry(s, substream) { /* Make sure it is for us... */ chip = snd_pcm_substream_chip(s); if (chip != sis) continue; voice = s->runtime->private_data; if (voice->flags & VOICE_CAPTURE) { record |= 1 << voice->num; voice = voice->timing; } /* voice could be NULL if this a recording stream, and it * doesn't have an external timing channel. */ if (voice) play[voice->num / 32] |= 1 << (voice->num & 0x1f); snd_pcm_trigger_done(s, substream); } if (starting) { if (record) outl(record, io + SIS_RECORD_START_REG); if (play[0]) outl(play[0], io + SIS_PLAY_START_A_REG); if (play[1]) outl(play[1], io + SIS_PLAY_START_B_REG); } else { if (record) outl(record, io + SIS_RECORD_STOP_REG); if (play[0]) outl(play[0], io + SIS_PLAY_STOP_A_REG); if (play[1]) outl(play[1], io + SIS_PLAY_STOP_B_REG); } return 0; } static snd_pcm_uframes_t sis_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = runtime->private_data; u32 cso; cso = readl(voice->ctrl_base + SIS_PLAY_DMA_FORMAT_CSO); cso &= 0xffff; return cso; } static int sis_capture_open(struct snd_pcm_substream *substream) { struct sis7019 *sis = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = &sis->capture_voice; unsigned long flags; /* FIXME: The driver only supports recording from one channel * at the moment, but it could support more. */ spin_lock_irqsave(&sis->voice_lock, flags); if (voice->flags & VOICE_IN_USE) voice = NULL; else voice->flags |= VOICE_IN_USE; spin_unlock_irqrestore(&sis->voice_lock, flags); if (!voice) return -EAGAIN; voice->substream = substream; runtime->private_data = voice; runtime->hw = sis_capture_hw_info; runtime->hw.rates = sis->ac97[0]->rates[AC97_RATES_ADC]; snd_pcm_limit_hw_rates(runtime); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 9, 0xfff9); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 9, 0xfff9); snd_pcm_set_sync(substream); return 0; } static int sis_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct sis7019 *sis = snd_pcm_substream_chip(substream); int rc; rc = snd_ac97_set_rate(sis->ac97[0], AC97_PCM_LR_ADC_RATE, params_rate(hw_params)); if (rc) goto out; rc = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (rc < 0) goto out; rc = sis_alloc_timing_voice(substream, hw_params); out: return rc; } static void sis_prepare_timing_voice(struct voice *voice, struct snd_pcm_substream *substream) { struct sis7019 *sis = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct voice *timing = voice->timing; void __iomem *play_base = timing->ctrl_base; void __iomem *wave_base = timing->wave_base; u16 buffer_size, period_size; u32 format, control, sso_eso, delta; u32 vperiod, sso, reg; /* Set our initial buffer and period as large as we can given a * single page of silence. */ buffer_size = 4096 / runtime->channels; buffer_size /= snd_pcm_format_size(runtime->format, 1); period_size = buffer_size; /* Initially, we want to interrupt just a bit behind the end of * the period we're clocking out. 12 samples seems to give a good * delay. * * We want to spread our interrupts throughout the virtual period, * so that we don't end up with two interrupts back to back at the * end -- this helps minimize the effects of any jitter. Adjust our * clocking period size so that the last period is at least a fourth * of a full period. * * This is all moot if we don't need to use virtual periods. */ vperiod = runtime->period_size + 12; if (vperiod > period_size) { u16 tail = vperiod % period_size; u16 quarter_period = period_size / 4; if (tail && tail < quarter_period) { u16 loops = vperiod / period_size; tail = quarter_period - tail; tail += loops - 1; tail /= loops; period_size -= tail; } sso = period_size - 1; } else { /* The initial period will fit inside the buffer, so we * don't need to use virtual periods -- disable them. */ period_size = runtime->period_size; sso = vperiod - 1; vperiod = 0; } /* The interrupt handler implements the timing synchronization, so * setup its state. */ timing->flags |= VOICE_SYNC_TIMING; timing->sync_base = voice->ctrl_base; timing->sync_cso = runtime->period_size; timing->sync_period_size = runtime->period_size; timing->sync_buffer_size = runtime->buffer_size; timing->period_size = period_size; timing->buffer_size = buffer_size; timing->sso = sso; timing->vperiod = vperiod; /* Using unsigned samples with the all-zero silence buffer * forces the output to the lower rail, killing playback. * So ignore unsigned vs signed -- it doesn't change the timing. */ format = 0; if (snd_pcm_format_width(runtime->format) == 8) format = SIS_CAPTURE_DMA_FORMAT_8BIT; if (runtime->channels == 1) format |= SIS_CAPTURE_DMA_FORMAT_MONO; control = timing->buffer_size - 1; control |= SIS_PLAY_DMA_LOOP | SIS_PLAY_DMA_INTR_AT_SSO; sso_eso = timing->buffer_size - 1; sso_eso |= timing->sso << 16; delta = sis_rate_to_delta(runtime->rate); /* We've done the math, now configure the channel. */ writel(format, play_base + SIS_PLAY_DMA_FORMAT_CSO); writel(sis->silence_dma_addr, play_base + SIS_PLAY_DMA_BASE); writel(control, play_base + SIS_PLAY_DMA_CONTROL); writel(sso_eso, play_base + SIS_PLAY_DMA_SSO_ESO); for (reg = 0; reg < SIS_WAVE_SIZE; reg += 4) writel(0, wave_base + reg); writel(SIS_WAVE_GENERAL_WAVE_VOLUME, wave_base + SIS_WAVE_GENERAL); writel(delta << 16, wave_base + SIS_WAVE_GENERAL_ARTICULATION); writel(SIS_WAVE_CHANNEL_CONTROL_FIRST_SAMPLE | SIS_WAVE_CHANNEL_CONTROL_AMP_ENABLE | SIS_WAVE_CHANNEL_CONTROL_INTERPOLATE_ENABLE, wave_base + SIS_WAVE_CHANNEL_CONTROL); } static int sis_pcm_capture_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = runtime->private_data; void __iomem *rec_base = voice->ctrl_base; u32 format, dma_addr, control; u16 leo; /* We rely on the PCM core to ensure that the parameters for this * substream do not change on us while we're programming the HW. */ format = 0; if (snd_pcm_format_width(runtime->format) == 8) format = SIS_CAPTURE_DMA_FORMAT_8BIT; if (!snd_pcm_format_signed(runtime->format)) format |= SIS_CAPTURE_DMA_FORMAT_UNSIGNED; if (runtime->channels == 1) format |= SIS_CAPTURE_DMA_FORMAT_MONO; dma_addr = runtime->dma_addr; leo = runtime->buffer_size - 1; control = leo | SIS_CAPTURE_DMA_LOOP; /* If we've got more than two periods per buffer, then we have * use a timing voice to clock out the periods. Otherwise, we can * use the capture channel's interrupts. */ if (voice->timing) { sis_prepare_timing_voice(voice, substream); } else { control |= SIS_CAPTURE_DMA_INTR_AT_LEO; if (runtime->period_size != runtime->buffer_size) control |= SIS_CAPTURE_DMA_INTR_AT_MLP; } writel(format, rec_base + SIS_CAPTURE_DMA_FORMAT_CSO); writel(dma_addr, rec_base + SIS_CAPTURE_DMA_BASE); writel(control, rec_base + SIS_CAPTURE_DMA_CONTROL); /* Force the writes to post. */ readl(rec_base); return 0; } static struct snd_pcm_ops sis_playback_ops = { .open = sis_playback_open, .close = sis_substream_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = sis_playback_hw_params, .hw_free = sis_hw_free, .prepare = sis_pcm_playback_prepare, .trigger = sis_pcm_trigger, .pointer = sis_pcm_pointer, }; static struct snd_pcm_ops sis_capture_ops = { .open = sis_capture_open, .close = sis_substream_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = sis_capture_hw_params, .hw_free = sis_hw_free, .prepare = sis_pcm_capture_prepare, .trigger = sis_pcm_trigger, .pointer = sis_pcm_pointer, }; static int __devinit sis_pcm_create(struct sis7019 *sis) { struct snd_pcm *pcm; int rc; /* We have 64 voices, and the driver currently records from * only one channel, though that could change in the future. */ rc = snd_pcm_new(sis->card, "SiS7019", 0, 64, 1, &pcm); if (rc) return rc; pcm->private_data = sis; strcpy(pcm->name, "SiS7019"); sis->pcm = pcm; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &sis_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &sis_capture_ops); /* Try to preallocate some memory, but it's not the end of the * world if this fails. */ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(sis->pci), 64*1024, 128*1024); return 0; } static unsigned short sis_ac97_rw(struct sis7019 *sis, int codec, u32 cmd) { unsigned long io = sis->ioport; unsigned short val = 0xffff; u16 status; u16 rdy; int count; static const u16 codec_ready[3] = { SIS_AC97_STATUS_CODEC_READY, SIS_AC97_STATUS_CODEC2_READY, SIS_AC97_STATUS_CODEC3_READY, }; rdy = codec_ready[codec]; /* Get the AC97 semaphore -- software first, so we don't spin * pounding out IO reads on the hardware semaphore... */ mutex_lock(&sis->ac97_mutex); count = 0xffff; while ((inw(io + SIS_AC97_SEMA) & SIS_AC97_SEMA_BUSY) && --count) udelay(1); if (!count) goto timeout; /* ... and wait for any outstanding commands to complete ... */ count = 0xffff; do { status = inw(io + SIS_AC97_STATUS); if ((status & rdy) && !(status & SIS_AC97_STATUS_BUSY)) break; udelay(1); } while (--count); if (!count) goto timeout_sema; /* ... before sending our command and waiting for it to finish ... */ outl(cmd, io + SIS_AC97_CMD); udelay(10); count = 0xffff; while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count) udelay(1); /* ... and reading the results (if any). */ val = inl(io + SIS_AC97_CMD) >> 16; timeout_sema: outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA); timeout: mutex_unlock(&sis->ac97_mutex); if (!count) { dev_err(&sis->pci->dev, "ac97 codec %d timeout cmd 0x%08x\n", codec, cmd); } return val; } static void sis_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { static const u32 cmd[3] = { SIS_AC97_CMD_CODEC_WRITE, SIS_AC97_CMD_CODEC2_WRITE, SIS_AC97_CMD_CODEC3_WRITE, }; sis_ac97_rw(ac97->private_data, ac97->num, (val << 16) | (reg << 8) | cmd[ac97->num]); } static unsigned short sis_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { static const u32 cmd[3] = { SIS_AC97_CMD_CODEC_READ, SIS_AC97_CMD_CODEC2_READ, SIS_AC97_CMD_CODEC3_READ, }; return sis_ac97_rw(ac97->private_data, ac97->num, (reg << 8) | cmd[ac97->num]); } static int __devinit sis_mixer_create(struct sis7019 *sis) { struct snd_ac97_bus *bus; struct snd_ac97_template ac97; static struct snd_ac97_bus_ops ops = { .write = sis_ac97_write, .read = sis_ac97_read, }; int rc; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = sis; rc = snd_ac97_bus(sis->card, 0, &ops, NULL, &bus); if (!rc && sis->codecs_present & SIS_PRIMARY_CODEC_PRESENT) rc = snd_ac97_mixer(bus, &ac97, &sis->ac97[0]); ac97.num = 1; if (!rc && (sis->codecs_present & SIS_SECONDARY_CODEC_PRESENT)) rc = snd_ac97_mixer(bus, &ac97, &sis->ac97[1]); ac97.num = 2; if (!rc && (sis->codecs_present & SIS_TERTIARY_CODEC_PRESENT)) rc = snd_ac97_mixer(bus, &ac97, &sis->ac97[2]); /* If we return an error here, then snd_card_free() should * free up any ac97 codecs that got created, as well as the bus. */ return rc; } static void sis_free_suspend(struct sis7019 *sis) { int i; for (i = 0; i < SIS_SUSPEND_PAGES; i++) kfree(sis->suspend_state[i]); } static int sis_chip_free(struct sis7019 *sis) { /* Reset the chip, and disable all interrputs. */ outl(SIS_GCR_SOFTWARE_RESET, sis->ioport + SIS_GCR); udelay(25); outl(0, sis->ioport + SIS_GCR); outl(0, sis->ioport + SIS_GIER); /* Now, free everything we allocated. */ if (sis->irq >= 0) free_irq(sis->irq, sis); if (sis->ioaddr) iounmap(sis->ioaddr); pci_release_regions(sis->pci); pci_disable_device(sis->pci); sis_free_suspend(sis); return 0; } static int sis_dev_free(struct snd_device *dev) { struct sis7019 *sis = dev->device_data; return sis_chip_free(sis); } static int sis_chip_init(struct sis7019 *sis) { unsigned long io = sis->ioport; void __iomem *ioaddr = sis->ioaddr; unsigned long timeout; u16 status; int count; int i; /* Reset the audio controller */ outl(SIS_GCR_SOFTWARE_RESET, io + SIS_GCR); udelay(25); outl(0, io + SIS_GCR); /* Get the AC-link semaphore, and reset the codecs */ count = 0xffff; while ((inw(io + SIS_AC97_SEMA) & SIS_AC97_SEMA_BUSY) && --count) udelay(1); if (!count) return -EIO; outl(SIS_AC97_CMD_CODEC_COLD_RESET, io + SIS_AC97_CMD); udelay(250); count = 0xffff; while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count) udelay(1); /* Command complete, we can let go of the semaphore now. */ outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA); if (!count) return -EIO; /* Now that we've finished the reset, find out what's attached. * There are some codec/board combinations that take an extremely * long time to come up. 350+ ms has been observed in the field, * so we'll give them up to 500ms. */ sis->codecs_present = 0; timeout = msecs_to_jiffies(500) + jiffies; while (time_before_eq(jiffies, timeout)) { status = inl(io + SIS_AC97_STATUS); if (status & SIS_AC97_STATUS_CODEC_READY) sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT; if (status & SIS_AC97_STATUS_CODEC2_READY) sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT; if (status & SIS_AC97_STATUS_CODEC3_READY) sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT; if (sis->codecs_present == codecs) break; msleep(1); } /* All done, check for errors. */ if (!sis->codecs_present) { dev_err(&sis->pci->dev, "could not find any codecs\n"); return -EIO; } if (sis->codecs_present != codecs) { dev_warn(&sis->pci->dev, "missing codecs, found %0x, expected %0x\n", sis->codecs_present, codecs); } /* Let the hardware know that the audio driver is alive, * and enable PCM slots on the AC-link for L/R playback (3 & 4) and * record channels. We're going to want to use Variable Rate Audio * for recording, to avoid needlessly resampling from 48kHZ. */ outl(SIS_AC97_CONF_AUDIO_ALIVE, io + SIS_AC97_CONF); outl(SIS_AC97_CONF_AUDIO_ALIVE | SIS_AC97_CONF_PCM_LR_ENABLE | SIS_AC97_CONF_PCM_CAP_MIC_ENABLE | SIS_AC97_CONF_PCM_CAP_LR_ENABLE | SIS_AC97_CONF_CODEC_VRA_ENABLE, io + SIS_AC97_CONF); /* All AC97 PCM slots should be sourced from sub-mixer 0. */ outl(0, io + SIS_AC97_PSR); /* There is only one valid DMA setup for a PCI environment. */ outl(SIS_DMA_CSR_PCI_SETTINGS, io + SIS_DMA_CSR); /* Reset the synchronization groups for all of the channels * to be asyncronous. If we start doing SPDIF or 5.1 sound, etc. * we'll need to change how we handle these. Until then, we just * assign sub-mixer 0 to all playback channels, and avoid any * attenuation on the audio. */ outl(0, io + SIS_PLAY_SYNC_GROUP_A); outl(0, io + SIS_PLAY_SYNC_GROUP_B); outl(0, io + SIS_PLAY_SYNC_GROUP_C); outl(0, io + SIS_PLAY_SYNC_GROUP_D); outl(0, io + SIS_MIXER_SYNC_GROUP); for (i = 0; i < 64; i++) { writel(i, SIS_MIXER_START_ADDR(ioaddr, i)); writel(SIS_MIXER_RIGHT_NO_ATTEN | SIS_MIXER_LEFT_NO_ATTEN | SIS_MIXER_DEST_0, SIS_MIXER_ADDR(ioaddr, i)); } /* Don't attenuate any audio set for the wave amplifier. * * FIXME: Maximum attenuation is set for the music amp, which will * need to change if we start using the synth engine. */ outl(0xffff0000, io + SIS_WEVCR); /* Ensure that the wave engine is in normal operating mode. */ outl(0, io + SIS_WECCR); /* Go ahead and enable the DMA interrupts. They won't go live * until we start a channel. */ outl(SIS_GIER_AUDIO_PLAY_DMA_IRQ_ENABLE | SIS_GIER_AUDIO_RECORD_DMA_IRQ_ENABLE, io + SIS_GIER); return 0; } #ifdef CONFIG_PM static int sis_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct sis7019 *sis = card->private_data; void __iomem *ioaddr = sis->ioaddr; int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(sis->pcm); if (sis->codecs_present & SIS_PRIMARY_CODEC_PRESENT) snd_ac97_suspend(sis->ac97[0]); if (sis->codecs_present & SIS_SECONDARY_CODEC_PRESENT) snd_ac97_suspend(sis->ac97[1]); if (sis->codecs_present & SIS_TERTIARY_CODEC_PRESENT) snd_ac97_suspend(sis->ac97[2]); /* snd_pcm_suspend_all() stopped all channels, so we're quiescent. */ if (sis->irq >= 0) { free_irq(sis->irq, sis); sis->irq = -1; } /* Save the internal state away */ for (i = 0; i < 4; i++) { memcpy_fromio(sis->suspend_state[i], ioaddr, 4096); ioaddr += 4096; } pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int sis_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct sis7019 *sis = card->private_data; void __iomem *ioaddr = sis->ioaddr; int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { dev_err(&pci->dev, "unable to re-enable device\n"); goto error; } if (sis_chip_init(sis)) { dev_err(&pci->dev, "unable to re-init controller\n"); goto error; } if (request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME, sis)) { dev_err(&pci->dev, "unable to regain IRQ %d\n", pci->irq); goto error; } /* Restore saved state, then clear out the page we use for the * silence buffer. */ for (i = 0; i < 4; i++) { memcpy_toio(ioaddr, sis->suspend_state[i], 4096); ioaddr += 4096; } memset(sis->suspend_state[0], 0, 4096); sis->irq = pci->irq; pci_set_master(pci); if (sis->codecs_present & SIS_PRIMARY_CODEC_PRESENT) snd_ac97_resume(sis->ac97[0]); if (sis->codecs_present & SIS_SECONDARY_CODEC_PRESENT) snd_ac97_resume(sis->ac97[1]); if (sis->codecs_present & SIS_TERTIARY_CODEC_PRESENT) snd_ac97_resume(sis->ac97[2]); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; error: snd_card_disconnect(card); return -EIO; } #endif /* CONFIG_PM */ static int sis_alloc_suspend(struct sis7019 *sis) { int i; /* We need 16K to store the internal wave engine state during a * suspend, but we don't need it to be contiguous, so play nice * with the memory system. We'll also use this area for a silence * buffer. */ for (i = 0; i < SIS_SUSPEND_PAGES; i++) { sis->suspend_state[i] = kmalloc(4096, GFP_KERNEL); if (!sis->suspend_state[i]) return -ENOMEM; } memset(sis->suspend_state[0], 0, 4096); return 0; } static int __devinit sis_chip_create(struct snd_card *card, struct pci_dev *pci) { struct sis7019 *sis = card->private_data; struct voice *voice; static struct snd_device_ops ops = { .dev_free = sis_dev_free, }; int rc; int i; rc = pci_enable_device(pci); if (rc) goto error_out; if (pci_set_dma_mask(pci, DMA_BIT_MASK(30)) < 0) { dev_err(&pci->dev, "architecture does not support 30-bit PCI busmaster DMA"); goto error_out_enabled; } memset(sis, 0, sizeof(*sis)); mutex_init(&sis->ac97_mutex); spin_lock_init(&sis->voice_lock); sis->card = card; sis->pci = pci; sis->irq = -1; sis->ioport = pci_resource_start(pci, 0); rc = pci_request_regions(pci, "SiS7019"); if (rc) { dev_err(&pci->dev, "unable request regions\n"); goto error_out_enabled; } rc = -EIO; sis->ioaddr = ioremap_nocache(pci_resource_start(pci, 1), 0x4000); if (!sis->ioaddr) { dev_err(&pci->dev, "unable to remap MMIO, aborting\n"); goto error_out_cleanup; } rc = sis_alloc_suspend(sis); if (rc < 0) { dev_err(&pci->dev, "unable to allocate state storage\n"); goto error_out_cleanup; } rc = sis_chip_init(sis); if (rc) goto error_out_cleanup; if (request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME, sis)) { dev_err(&pci->dev, "unable to allocate irq %d\n", sis->irq); goto error_out_cleanup; } sis->irq = pci->irq; pci_set_master(pci); for (i = 0; i < 64; i++) { voice = &sis->voices[i]; voice->num = i; voice->ctrl_base = SIS_PLAY_DMA_ADDR(sis->ioaddr, i); voice->wave_base = SIS_WAVE_ADDR(sis->ioaddr, i); } voice = &sis->capture_voice; voice->flags = VOICE_CAPTURE; voice->num = SIS_CAPTURE_CHAN_AC97_PCM_IN; voice->ctrl_base = SIS_CAPTURE_DMA_ADDR(sis->ioaddr, voice->num); rc = snd_device_new(card, SNDRV_DEV_LOWLEVEL, sis, &ops); if (rc) goto error_out_cleanup; snd_card_set_dev(card, &pci->dev); return 0; error_out_cleanup: sis_chip_free(sis); error_out_enabled: pci_disable_device(pci); error_out: return rc; } static int __devinit snd_sis7019_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct snd_card *card; struct sis7019 *sis; int rc; rc = -ENOENT; if (!enable) goto error_out; /* The user can specify which codecs should be present so that we * can wait for them to show up if they are slow to recover from * the AC97 cold reset. We default to a single codec, the primary. * * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2. */ codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT | SIS_TERTIARY_CODEC_PRESENT; if (!codecs) codecs = SIS_PRIMARY_CODEC_PRESENT; rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card); if (rc < 0) goto error_out; strcpy(card->driver, "SiS7019"); strcpy(card->shortname, "SiS7019"); rc = sis_chip_create(card, pci); if (rc) goto card_error_out; sis = card->private_data; rc = sis_mixer_create(sis); if (rc) goto card_error_out; rc = sis_pcm_create(sis); if (rc) goto card_error_out; snprintf(card->longname, sizeof(card->longname), "%s Audio Accelerator with %s at 0x%lx, irq %d", card->shortname, snd_ac97_get_short_name(sis->ac97[0]), sis->ioport, sis->irq); rc = snd_card_register(card); if (rc) goto card_error_out; pci_set_drvdata(pci, card); return 0; card_error_out: snd_card_free(card); error_out: return rc; } static void __devexit snd_sis7019_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver sis7019_driver = { .name = KBUILD_MODNAME, .id_table = snd_sis7019_ids, .probe = snd_sis7019_probe, .remove = __devexit_p(snd_sis7019_remove), #ifdef CONFIG_PM .suspend = sis_suspend, .resume = sis_resume, #endif }; static int __init sis7019_init(void) { return pci_register_driver(&sis7019_driver); } static void __exit sis7019_exit(void) { pci_unregister_driver(&sis7019_driver); } module_init(sis7019_init); module_exit(sis7019_exit);
gpl-2.0
agrawa39/linaro-visualize-mem-pages
drivers/gpu/drm/gma500/mmu.c
7712
18536
/************************************************************************** * Copyright (c) 2007, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * **************************************************************************/ #include <drm/drmP.h> #include "psb_drv.h" #include "psb_reg.h" /* * Code for the SGX MMU: */ /* * clflush on one processor only: * clflush should apparently flush the cache line on all processors in an * SMP system. */ /* * kmap atomic: * The usage of the slots must be completely encapsulated within a spinlock, and * no other functions that may be using the locks for other purposed may be * called from within the locked region. * Since the slots are per processor, this will guarantee that we are the only * user. */ /* * TODO: Inserting ptes from an interrupt handler: * This may be desirable for some SGX functionality where the GPU can fault in * needed pages. For that, we need to make an atomic insert_pages function, that * may fail. * If it fails, the caller need to insert the page using a workqueue function, * but on average it should be fast. */ struct psb_mmu_driver { /* protects driver- and pd structures. Always take in read mode * before taking the page table spinlock. */ struct rw_semaphore sem; /* protects page tables, directory tables and pt tables. * and pt structures. */ spinlock_t lock; atomic_t needs_tlbflush; uint8_t __iomem *register_map; struct psb_mmu_pd *default_pd; /*uint32_t bif_ctrl;*/ int has_clflush; int clflush_add; unsigned long clflush_mask; struct drm_psb_private *dev_priv; }; struct psb_mmu_pd; struct psb_mmu_pt { struct psb_mmu_pd *pd; uint32_t index; uint32_t count; struct page *p; uint32_t *v; }; struct psb_mmu_pd { struct psb_mmu_driver *driver; int hw_context; struct psb_mmu_pt **tables; struct page *p; struct page *dummy_pt; struct page *dummy_page; uint32_t pd_mask; uint32_t invalid_pde; uint32_t invalid_pte; }; static inline uint32_t psb_mmu_pt_index(uint32_t offset) { return (offset >> PSB_PTE_SHIFT) & 0x3FF; } static inline uint32_t psb_mmu_pd_index(uint32_t offset) { return offset >> PSB_PDE_SHIFT; } static inline void psb_clflush(void *addr) { __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory"); } static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr) { if (!driver->has_clflush) return; mb(); psb_clflush(addr); mb(); } static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page) { uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT; uint32_t clflush_count = PAGE_SIZE / clflush_add; int i; uint8_t *clf; clf = kmap_atomic(page); mb(); for (i = 0; i < clflush_count; ++i) { psb_clflush(clf); clf += clflush_add; } mb(); kunmap_atomic(clf); } static void psb_pages_clflush(struct psb_mmu_driver *driver, struct page *page[], unsigned long num_pages) { int i; if (!driver->has_clflush) return ; for (i = 0; i < num_pages; i++) psb_page_clflush(driver, *page++); } static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force) { atomic_set(&driver->needs_tlbflush, 0); } static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force) { down_write(&driver->sem); psb_mmu_flush_pd_locked(driver, force); up_write(&driver->sem); } void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot) { if (rc_prot) down_write(&driver->sem); if (rc_prot) up_write(&driver->sem); } void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) { /*ttm_tt_cache_flush(&pd->p, 1);*/ psb_pages_clflush(pd->driver, &pd->p, 1); down_write(&pd->driver->sem); wmb(); psb_mmu_flush_pd_locked(pd->driver, 1); pd->hw_context = hw_context; up_write(&pd->driver->sem); } static inline unsigned long psb_pd_addr_end(unsigned long addr, unsigned long end) { addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK; return (addr < end) ? addr : end; } static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type) { uint32_t mask = PSB_PTE_VALID; if (type & PSB_MMU_CACHED_MEMORY) mask |= PSB_PTE_CACHED; if (type & PSB_MMU_RO_MEMORY) mask |= PSB_PTE_RO; if (type & PSB_MMU_WO_MEMORY) mask |= PSB_PTE_WO; return (pfn << PAGE_SHIFT) | mask; } struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver, int trap_pagefaults, int invalid_type) { struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL); uint32_t *v; int i; if (!pd) return NULL; pd->p = alloc_page(GFP_DMA32); if (!pd->p) goto out_err1; pd->dummy_pt = alloc_page(GFP_DMA32); if (!pd->dummy_pt) goto out_err2; pd->dummy_page = alloc_page(GFP_DMA32); if (!pd->dummy_page) goto out_err3; if (!trap_pagefaults) { pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), invalid_type); pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), invalid_type); } else { pd->invalid_pde = 0; pd->invalid_pte = 0; } v = kmap(pd->dummy_pt); for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) v[i] = pd->invalid_pte; kunmap(pd->dummy_pt); v = kmap(pd->p); for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) v[i] = pd->invalid_pde; kunmap(pd->p); clear_page(kmap(pd->dummy_page)); kunmap(pd->dummy_page); pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024); if (!pd->tables) goto out_err4; pd->hw_context = -1; pd->pd_mask = PSB_PTE_VALID; pd->driver = driver; return pd; out_err4: __free_page(pd->dummy_page); out_err3: __free_page(pd->dummy_pt); out_err2: __free_page(pd->p); out_err1: kfree(pd); return NULL; } static void psb_mmu_free_pt(struct psb_mmu_pt *pt) { __free_page(pt->p); kfree(pt); } void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) { struct psb_mmu_driver *driver = pd->driver; struct psb_mmu_pt *pt; int i; down_write(&driver->sem); if (pd->hw_context != -1) psb_mmu_flush_pd_locked(driver, 1); /* Should take the spinlock here, but we don't need to do that since we have the semaphore in write mode. */ for (i = 0; i < 1024; ++i) { pt = pd->tables[i]; if (pt) psb_mmu_free_pt(pt); } vfree(pd->tables); __free_page(pd->dummy_page); __free_page(pd->dummy_pt); __free_page(pd->p); kfree(pd); up_write(&driver->sem); } static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) { struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL); void *v; uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; uint32_t clflush_count = PAGE_SIZE / clflush_add; spinlock_t *lock = &pd->driver->lock; uint8_t *clf; uint32_t *ptes; int i; if (!pt) return NULL; pt->p = alloc_page(GFP_DMA32); if (!pt->p) { kfree(pt); return NULL; } spin_lock(lock); v = kmap_atomic(pt->p); clf = (uint8_t *) v; ptes = (uint32_t *) v; for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) *ptes++ = pd->invalid_pte; if (pd->driver->has_clflush && pd->hw_context != -1) { mb(); for (i = 0; i < clflush_count; ++i) { psb_clflush(clf); clf += clflush_add; } mb(); } kunmap_atomic(v); spin_unlock(lock); pt->count = 0; pt->pd = pd; pt->index = 0; return pt; } static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, unsigned long addr) { uint32_t index = psb_mmu_pd_index(addr); struct psb_mmu_pt *pt; uint32_t *v; spinlock_t *lock = &pd->driver->lock; spin_lock(lock); pt = pd->tables[index]; while (!pt) { spin_unlock(lock); pt = psb_mmu_alloc_pt(pd); if (!pt) return NULL; spin_lock(lock); if (pd->tables[index]) { spin_unlock(lock); psb_mmu_free_pt(pt); spin_lock(lock); pt = pd->tables[index]; continue; } v = kmap_atomic(pd->p); pd->tables[index] = pt; v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; pt->index = index; kunmap_atomic((void *) v); if (pd->hw_context != -1) { psb_mmu_clflush(pd->driver, (void *) &v[index]); atomic_set(&pd->driver->needs_tlbflush, 1); } } pt->v = kmap_atomic(pt->p); return pt; } static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, unsigned long addr) { uint32_t index = psb_mmu_pd_index(addr); struct psb_mmu_pt *pt; spinlock_t *lock = &pd->driver->lock; spin_lock(lock); pt = pd->tables[index]; if (!pt) { spin_unlock(lock); return NULL; } pt->v = kmap_atomic(pt->p); return pt; } static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) { struct psb_mmu_pd *pd = pt->pd; uint32_t *v; kunmap_atomic(pt->v); if (pt->count == 0) { v = kmap_atomic(pd->p); v[pt->index] = pd->invalid_pde; pd->tables[pt->index] = NULL; if (pd->hw_context != -1) { psb_mmu_clflush(pd->driver, (void *) &v[pt->index]); atomic_set(&pd->driver->needs_tlbflush, 1); } kunmap_atomic(pt->v); spin_unlock(&pd->driver->lock); psb_mmu_free_pt(pt); return; } spin_unlock(&pd->driver->lock); } static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr, uint32_t pte) { pt->v[psb_mmu_pt_index(addr)] = pte; } static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt, unsigned long addr) { pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; } void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset, uint32_t gtt_start, uint32_t gtt_pages) { uint32_t *v; uint32_t start = psb_mmu_pd_index(mmu_offset); struct psb_mmu_driver *driver = pd->driver; int num_pages = gtt_pages; down_read(&driver->sem); spin_lock(&driver->lock); v = kmap_atomic(pd->p); v += start; while (gtt_pages--) { *v++ = gtt_start | pd->pd_mask; gtt_start += PAGE_SIZE; } /*ttm_tt_cache_flush(&pd->p, num_pages);*/ psb_pages_clflush(pd->driver, &pd->p, num_pages); kunmap_atomic(v); spin_unlock(&driver->lock); if (pd->hw_context != -1) atomic_set(&pd->driver->needs_tlbflush, 1); up_read(&pd->driver->sem); psb_mmu_flush_pd(pd->driver, 0); } struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) { struct psb_mmu_pd *pd; /* down_read(&driver->sem); */ pd = driver->default_pd; /* up_read(&driver->sem); */ return pd; } void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) { psb_mmu_free_pagedir(driver->default_pd); kfree(driver); } struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, int trap_pagefaults, int invalid_type, struct drm_psb_private *dev_priv) { struct psb_mmu_driver *driver; driver = kmalloc(sizeof(*driver), GFP_KERNEL); if (!driver) return NULL; driver->dev_priv = dev_priv; driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults, invalid_type); if (!driver->default_pd) goto out_err1; spin_lock_init(&driver->lock); init_rwsem(&driver->sem); down_write(&driver->sem); driver->register_map = registers; atomic_set(&driver->needs_tlbflush, 1); driver->has_clflush = 0; if (boot_cpu_has(X86_FEATURE_CLFLSH)) { uint32_t tfms, misc, cap0, cap4, clflush_size; /* * clflush size is determined at kernel setup for x86_64 * but not for i386. We have to do it here. */ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4); clflush_size = ((misc >> 8) & 0xff) * 8; driver->has_clflush = 1; driver->clflush_add = PAGE_SIZE * clflush_size / sizeof(uint32_t); driver->clflush_mask = driver->clflush_add - 1; driver->clflush_mask = ~driver->clflush_mask; } up_write(&driver->sem); return driver; out_err1: kfree(driver); return NULL; } static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride) { struct psb_mmu_pt *pt; uint32_t rows = 1; uint32_t i; unsigned long addr; unsigned long end; unsigned long next; unsigned long add; unsigned long row_add; unsigned long clflush_add = pd->driver->clflush_add; unsigned long clflush_mask = pd->driver->clflush_mask; if (!pd->driver->has_clflush) { /*ttm_tt_cache_flush(&pd->p, num_pages);*/ psb_pages_clflush(pd->driver, &pd->p, num_pages); return; } if (hw_tile_stride) rows = num_pages / desired_tile_stride; else desired_tile_stride = num_pages; add = desired_tile_stride << PAGE_SHIFT; row_add = hw_tile_stride << PAGE_SHIFT; mb(); for (i = 0; i < rows; ++i) { addr = address; end = addr + add; do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_map_lock(pd, addr); if (!pt) continue; do { psb_clflush(&pt->v [psb_mmu_pt_index(addr)]); } while (addr += clflush_add, (addr & clflush_mask) < next); psb_mmu_pt_unmap_unlock(pt); } while (addr = next, next != end); address += row_add; } mb(); } void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages) { struct psb_mmu_pt *pt; unsigned long addr; unsigned long end; unsigned long next; unsigned long f_address = address; down_read(&pd->driver->sem); addr = address; end = addr + (num_pages << PAGE_SHIFT); do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_alloc_map_lock(pd, addr); if (!pt) goto out; do { psb_mmu_invalidate_pte(pt, addr); --pt->count; } while (addr += PAGE_SIZE, addr < next); psb_mmu_pt_unmap_unlock(pt); } while (addr = next, next != end); out: if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); up_read(&pd->driver->sem); if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 0); return; } void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride) { struct psb_mmu_pt *pt; uint32_t rows = 1; uint32_t i; unsigned long addr; unsigned long end; unsigned long next; unsigned long add; unsigned long row_add; unsigned long f_address = address; if (hw_tile_stride) rows = num_pages / desired_tile_stride; else desired_tile_stride = num_pages; add = desired_tile_stride << PAGE_SHIFT; row_add = hw_tile_stride << PAGE_SHIFT; /* down_read(&pd->driver->sem); */ /* Make sure we only need to flush this processor's cache */ for (i = 0; i < rows; ++i) { addr = address; end = addr + add; do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_map_lock(pd, addr); if (!pt) continue; do { psb_mmu_invalidate_pte(pt, addr); --pt->count; } while (addr += PAGE_SIZE, addr < next); psb_mmu_pt_unmap_unlock(pt); } while (addr = next, next != end); address += row_add; } if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, desired_tile_stride, hw_tile_stride); /* up_read(&pd->driver->sem); */ if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 0); } int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, unsigned long address, uint32_t num_pages, int type) { struct psb_mmu_pt *pt; uint32_t pte; unsigned long addr; unsigned long end; unsigned long next; unsigned long f_address = address; int ret = 0; down_read(&pd->driver->sem); addr = address; end = addr + (num_pages << PAGE_SHIFT); do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_alloc_map_lock(pd, addr); if (!pt) { ret = -ENOMEM; goto out; } do { pte = psb_mmu_mask_pte(start_pfn++, type); psb_mmu_set_pte(pt, addr, pte); pt->count++; } while (addr += PAGE_SIZE, addr < next); psb_mmu_pt_unmap_unlock(pt); } while (addr = next, next != end); out: if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); up_read(&pd->driver->sem); if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 1); return ret; } int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride, int type) { struct psb_mmu_pt *pt; uint32_t rows = 1; uint32_t i; uint32_t pte; unsigned long addr; unsigned long end; unsigned long next; unsigned long add; unsigned long row_add; unsigned long f_address = address; int ret = 0; if (hw_tile_stride) { if (num_pages % desired_tile_stride != 0) return -EINVAL; rows = num_pages / desired_tile_stride; } else { desired_tile_stride = num_pages; } add = desired_tile_stride << PAGE_SHIFT; row_add = hw_tile_stride << PAGE_SHIFT; down_read(&pd->driver->sem); for (i = 0; i < rows; ++i) { addr = address; end = addr + add; do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_alloc_map_lock(pd, addr); if (!pt) { ret = -ENOMEM; goto out; } do { pte = psb_mmu_mask_pte(page_to_pfn(*pages++), type); psb_mmu_set_pte(pt, addr, pte); pt->count++; } while (addr += PAGE_SIZE, addr < next); psb_mmu_pt_unmap_unlock(pt); } while (addr = next, next != end); address += row_add; } out: if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, desired_tile_stride, hw_tile_stride); up_read(&pd->driver->sem); if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 1); return ret; } int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, unsigned long *pfn) { int ret; struct psb_mmu_pt *pt; uint32_t tmp; spinlock_t *lock = &pd->driver->lock; down_read(&pd->driver->sem); pt = psb_mmu_pt_map_lock(pd, virtual); if (!pt) { uint32_t *v; spin_lock(lock); v = kmap_atomic(pd->p); tmp = v[psb_mmu_pd_index(virtual)]; kunmap_atomic(v); spin_unlock(lock); if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || !(pd->invalid_pte & PSB_PTE_VALID)) { ret = -EINVAL; goto out; } ret = 0; *pfn = pd->invalid_pte >> PAGE_SHIFT; goto out; } tmp = pt->v[psb_mmu_pt_index(virtual)]; if (!(tmp & PSB_PTE_VALID)) { ret = -EINVAL; } else { ret = 0; *pfn = tmp >> PAGE_SHIFT; } psb_mmu_pt_unmap_unlock(pt); out: up_read(&pd->driver->sem); return ret; }
gpl-2.0
kazukioishi/android_kernel_samsung_klte
lib/ioremap.c
8480
2195
/* * Re-map IO memory to kernel address space so that we can access it. * This is needed for high PCI addresses that aren't mapped in the * 640k-1MB IO memory area on PC's * * (C) Copyright 1995 1996 Linus Torvalds */ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/io.h> #include <linux/export.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { pte_t *pte; u64 pfn; pfn = phys_addr >> PAGE_SHIFT; pte = pte_alloc_kernel(pmd, addr); if (!pte) return -ENOMEM; do { BUG_ON(!pte_none(*pte)); set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); return 0; } static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { pmd_t *pmd; unsigned long next; phys_addr -= addr; pmd = pmd_alloc(&init_mm, pud, addr); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot)) return -ENOMEM; } while (pmd++, addr = next, addr != end); return 0; } static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { pud_t *pud; unsigned long next; phys_addr -= addr; pud = pud_alloc(&init_mm, pgd, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot)) return -ENOMEM; } while (pud++, addr = next, addr != end); return 0; } int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { pgd_t *pgd; unsigned long start; unsigned long next; int err; BUG_ON(addr >= end); start = addr; phys_addr -= addr; pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot); if (err) break; } while (pgd++, addr = next, addr != end); flush_cache_vmap(start, end); return err; } EXPORT_SYMBOL_GPL(ioremap_page_range);
gpl-2.0
RickHutten/paparazzi
sw/airborne/subsystems/gps/gps_mtk.c
33
13498
/* * Copyright (C) 2011 The Paparazzi Team * * This file is part of paparazzi. * * paparazzi is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * paparazzi is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with paparazzi; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ /** @file gps_mtk.c * @brief Mediatek MT3329 specific code * * supports: * DIYDrones V1.4 protocol (AXN1.30_2278) * DIYDrones V1.6 protocol (AXN1.30_2389) * * documentation is partly incorrect, see mtk.xml for what seems * to be "real" * */ #include "gps_mtk.h" #include "subsystems/abi.h" #include "led.h" #include "mcu_periph/sys_time.h" #include "pprzlink/pprzlink_device.h" #ifndef MTK_GPS_LINK #error "MTK_GPS_LINK not set" #endif #define MTK_DIY_OUTPUT_RATE MTK_DIY_OUTPUT_4HZ #define OUTPUT_RATE 4 /* parser status */ #define UNINIT 0 #define GOT_SYNC1_14 1 #define GOT_SYNC2_14 2 #define GOT_CLASS_14 3 #define GOT_SYNC1_16 4 #define GOT_SYNC2_16 5 #define GOT_ID 6 #define GOT_PAYLOAD 7 #define GOT_CHECKSUM1 8 /* last error type */ #define GPS_MTK_ERR_NONE 0 #define GPS_MTK_ERR_OVERRUN 1 #define GPS_MTK_ERR_MSG_TOO_LONG 2 #define GPS_MTK_ERR_CHECKSUM 3 #define GPS_MTK_ERR_UNEXPECTED 4 #define GPS_MTK_ERR_OUT_OF_SYNC 5 /* mediatek gps fix mask */ #define MTK_DIY_FIX_3D 3 #define MTK_DIY_FIX_2D 2 #define MTK_DIY_FIX_NONE 1 /* defines for UTC-GPS time conversion */ #define SECS_MINUTE (60) #define SECS_HOUR (60*60) #define SECS_DAY (60*60*24) #define SECS_WEEK (60*60*24*7) #define isleap(x) ((((x)%400)==0) || (!(((x)%100)==0) && (((x)%4)==0))) const int8_t DAYS_MONTH[12] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; struct GpsMtk gps_mtk; #ifdef GPS_CONFIGURE #define MTK_DIY_SET_BINARY "$PGCMD,16,0,0,0,0,0*6A\r\n" #define MTK_DIY_SET_NMEA "$PGCMD,16,1,1,1,1,1*6B\r\n" #define MTK_DIY_OUTPUT_1HZ "$PMTK220,1000*1F\r\n" #define MTK_DIY_OUTPUT_2HZ "$PMTK220,500*2B\r\n" #define MTK_DIY_OUTPUT_4HZ "$PMTK220,250*29\r\n" #define MTK_DIY_OTUPUT_5HZ "$PMTK220,200*2C\r\n" #define MTK_DIY_OUTPUT_10HZ "$PMTK220,100*2F\r\n" #define MTK_BAUD_RATE_38400 "$PMTK251,38400*27\r\n" #define MTK_DIY_SBAS_ON "$PMTK313,1*2E\r\n" #define MTK_DIY_SBAS_OFF "$PMTK313,0*2F\r\n" #define MTK_DIY_WAAS_ON "$PSRF151,1*3F\r\n" #define MTK_DIY_WAAS_OFF "$PSRF151,0*3E\r\n" bool gps_configuring; static uint8_t gps_status_config; #endif void gps_mtk_read_message(void); void gps_mtk_parse(uint8_t c); void gps_mtk_msg(void); void gps_mtk_init(void) { gps_mtk.status = UNINIT; gps_mtk.msg_available = false; gps_mtk.error_cnt = 0; gps_mtk.error_last = GPS_MTK_ERR_NONE; #ifdef GPS_CONFIGURE gps_status_config = 0; gps_configuring = true; #endif } void gps_mtk_event(void) { struct link_device *dev = &((MTK_GPS_LINK).device); while (dev->char_available(dev->periph)) { gps_mtk_parse(dev->get_byte(dev->periph)); if (gps_mtk.msg_available) { gps_mtk_msg(); } GpsConfigure(); } } void gps_mtk_msg(void) { // current timestamp uint32_t now_ts = get_sys_time_usec(); gps_mtk.state.last_msg_ticks = sys_time.nb_sec_rem; gps_mtk.state.last_msg_time = sys_time.nb_sec; gps_mtk_read_message(); if (gps_mtk.msg_class == MTK_DIY14_ID && gps_mtk.msg_id == MTK_DIY14_NAV_ID) { if (gps_mtk.state.fix == GPS_FIX_3D) { gps_mtk.state.last_3dfix_ticks = sys_time.nb_sec_rem; gps_mtk.state.last_3dfix_time = sys_time.nb_sec; } AbiSendMsgGPS(GPS_MTK_ID, now_ts, &gps_mtk.state); } if (gps_mtk.msg_class == MTK_DIY16_ID && gps_mtk.msg_id == MTK_DIY16_NAV_ID) { if (gps_mtk.state.fix == GPS_FIX_3D) { gps_mtk.state.last_3dfix_ticks = sys_time.nb_sec_rem; gps_mtk.state.last_3dfix_time = sys_time.nb_sec; } AbiSendMsgGPS(GPS_MTK_ID, now_ts, &gps_mtk.state); } gps_mtk.msg_available = false; } static void gps_mtk_time2itow(uint32_t gps_date, uint32_t gps_time, uint16_t *gps_week, uint32_t *gps_itow) { /* convert UTC date/time to GPS week/itow, we have no idea about GPS leap seconds for now */ uint16_t gps_msecond = gps_time % 1000; uint8_t gps_second = (gps_time / 1000) % 100; uint8_t gps_minute = (gps_time / 100000) % 100; uint8_t gps_hour = (gps_time / 10000000) % 100; uint16_t gps_year = 2000 + (gps_date % 100); uint8_t gps_month = (gps_date / 100) % 100; uint8_t gps_day = (gps_date / 10000) % 100; int32_t i, days; *gps_week = 0; *gps_itow = 0; /* sanity checks */ if (gps_month > 12) { return; } if (gps_day > (DAYS_MONTH[gps_month] + ((gps_month == 1) ? isleap(gps_year) : 0))) { return; } if (gps_hour > 23) { return; } if (gps_minute > 59) { return; } if (gps_second > 59) { return; } /* days since 6-JAN-1980 */ days = -6; for (i = 1980; i < gps_year; i++) { days += (365 + isleap(i)); } /* add days in gps_year */ for (i = 0; i < gps_month - 1; i++) { days += DAYS_MONTH[i] + ((i == 1) ? isleap(gps_year) : 0); } days += gps_day; /* convert */ *gps_week = (uint16_t)(days / 7); *gps_itow = ((days % 7) * SECS_DAY + gps_hour * SECS_HOUR + gps_minute * SECS_MINUTE + gps_second) * 1000 + gps_msecond; } void gps_mtk_read_message(void) { if (gps_mtk.msg_class == MTK_DIY14_ID) { if (gps_mtk.msg_id == MTK_DIY14_NAV_ID) { /* get hardware clock ticks */ gps_mtk.state.lla_pos.lat = MTK_DIY14_NAV_LAT(gps_mtk.msg_buf) * 10; gps_mtk.state.lla_pos.lon = MTK_DIY14_NAV_LON(gps_mtk.msg_buf) * 10; SetBit(gps_mtk.state.valid_fields, GPS_VALID_POS_LLA_BIT); // FIXME: with MTK you do not receive vertical speed if (sys_time.nb_sec - gps_mtk.state.last_3dfix_time < 2) { gps_mtk.state.ned_vel.z = ((gps_mtk.state.hmsl - MTK_DIY14_NAV_HEIGHT(gps_mtk.msg_buf) * 10) * OUTPUT_RATE) / 10; } else { gps_mtk.state.ned_vel.z = 0; } gps_mtk.state.hmsl = MTK_DIY14_NAV_HEIGHT(gps_mtk.msg_buf) * 10; SetBit(gps_mtk.state.valid_fields, GPS_VALID_HMSL_BIT); // FIXME: with MTK you do not receive ellipsoid altitude gps_mtk.state.lla_pos.alt = gps_mtk.state.hmsl; gps_mtk.state.gspeed = MTK_DIY14_NAV_GSpeed(gps_mtk.msg_buf); // FIXME: with MTK you do not receive speed 3D gps_mtk.state.speed_3d = gps_mtk.state.gspeed; gps_mtk.state.course = (RadOfDeg(MTK_DIY14_NAV_Heading(gps_mtk.msg_buf))) * 10; SetBit(gps_mtk.state.valid_fields, GPS_VALID_COURSE_BIT); gps_mtk.state.num_sv = MTK_DIY14_NAV_numSV(gps_mtk.msg_buf); switch (MTK_DIY14_NAV_GPSfix(gps_mtk.msg_buf)) { case MTK_DIY_FIX_3D: gps_mtk.state.fix = GPS_FIX_3D; break; case MTK_DIY_FIX_2D: gps_mtk.state.fix = GPS_FIX_2D; break; default: gps_mtk.state.fix = GPS_FIX_NONE; } gps_mtk.state.tow = MTK_DIY14_NAV_ITOW(gps_mtk.msg_buf);; // FIXME: with MTK DIY 1.4 you do not receive GPS week gps_mtk.state.week = 0; #ifdef GPS_LED if (gps_mtk.state.fix == GPS_FIX_3D) { LED_ON(GPS_LED); } else { LED_TOGGLE(GPS_LED); } #endif } } if (gps_mtk.msg_class == MTK_DIY16_ID) { if (gps_mtk.msg_id == MTK_DIY16_NAV_ID) { uint32_t gps_date, gps_time; gps_date = MTK_DIY16_NAV_UTC_DATE(gps_mtk.msg_buf); gps_time = MTK_DIY16_NAV_UTC_TIME(gps_mtk.msg_buf); gps_mtk_time2itow(gps_date, gps_time, &gps_mtk.state.week, &gps_mtk.state.tow); #ifdef GPS_TIMESTAMP /* get hardware clock ticks */ SysTimeTimerStart(gps_mtk.state.t0); gps_mtk.state.t0_tow = gps_mtk.state.tow; gps_mtk.state.t0_tow_frac = 0; #endif gps_mtk.state.lla_pos.lat = MTK_DIY16_NAV_LAT(gps_mtk.msg_buf) * 10; gps_mtk.state.lla_pos.lon = MTK_DIY16_NAV_LON(gps_mtk.msg_buf) * 10; // FIXME: with MTK you do not receive vertical speed if (sys_time.nb_sec - gps_mtk.state.last_3dfix_time < 2) { gps_mtk.state.ned_vel.z = ((gps_mtk.state.hmsl - MTK_DIY16_NAV_HEIGHT(gps_mtk.msg_buf) * 10) * OUTPUT_RATE) / 10; } else { gps_mtk.state.ned_vel.z = 0; } gps_mtk.state.hmsl = MTK_DIY16_NAV_HEIGHT(gps_mtk.msg_buf) * 10; SetBit(gps_mtk.state.valid_fields, GPS_VALID_HMSL_BIT); // FIXME: with MTK you do not receive ellipsoid altitude gps_mtk.state.lla_pos.alt = gps_mtk.state.hmsl; gps_mtk.state.gspeed = MTK_DIY16_NAV_GSpeed(gps_mtk.msg_buf); // FIXME: with MTK you do not receive speed 3D gps_mtk.state.speed_3d = gps_mtk.state.gspeed; gps_mtk.state.course = (RadOfDeg(MTK_DIY16_NAV_Heading(gps_mtk.msg_buf) * 10000)) * 10; SetBit(gps_mtk.state.valid_fields, GPS_VALID_COURSE_BIT); gps_mtk.state.num_sv = MTK_DIY16_NAV_numSV(gps_mtk.msg_buf); switch (MTK_DIY16_NAV_GPSfix(gps_mtk.msg_buf)) { case MTK_DIY_FIX_3D: gps_mtk.state.fix = GPS_FIX_3D; break; case MTK_DIY_FIX_2D: gps_mtk.state.fix = GPS_FIX_2D; break; default: gps_mtk.state.fix = GPS_FIX_NONE; } /* HDOP? */ #ifdef GPS_LED if (gps_mtk.state.fix == GPS_FIX_3D) { LED_ON(GPS_LED); } else { LED_TOGGLE(GPS_LED); } #endif } } } /* byte parsing */ void gps_mtk_parse(uint8_t c) { if (gps_mtk.status < GOT_PAYLOAD) { gps_mtk.ck_a += c; gps_mtk.ck_b += gps_mtk.ck_a; } switch (gps_mtk.status) { case UNINIT: if (c == MTK_DIY14_SYNC1) { gps_mtk.status = GOT_SYNC1_14; } if (c == MTK_DIY16_ID) { gps_mtk.msg_class = c; } gps_mtk.status = GOT_SYNC1_16; break; /* MTK_DIY_VER_14 */ case GOT_SYNC1_14: if (c != MTK_DIY14_SYNC2) { gps_mtk.error_last = GPS_MTK_ERR_OUT_OF_SYNC; goto error; } if (gps_mtk.msg_available) { /* Previous message has not yet been parsed: discard this one */ gps_mtk.error_last = GPS_MTK_ERR_OVERRUN; goto error; } gps_mtk.ck_a = 0; gps_mtk.ck_b = 0; gps_mtk.status++; gps_mtk.len = MTK_DIY14_NAV_LENGTH; break; case GOT_SYNC2_14: if (c != MTK_DIY14_ID) { gps_mtk.error_last = GPS_MTK_ERR_OUT_OF_SYNC; goto error; } gps_mtk.msg_class = c; gps_mtk.msg_idx = 0; gps_mtk.status++; break; case GOT_CLASS_14: if (c != MTK_DIY14_NAV_ID) { gps_mtk.error_last = GPS_MTK_ERR_OUT_OF_SYNC; goto error; } gps_mtk.msg_id = c; gps_mtk.status = GOT_ID; break; /* MTK_DIY_VER_16 */ case GOT_SYNC1_16: if (c != MTK_DIY16_NAV_ID) { gps_mtk.error_last = GPS_MTK_ERR_OUT_OF_SYNC; goto error; } if (gps_mtk.msg_available) { /* Previous message has not yet been parsed: discard this one */ gps_mtk.error_last = GPS_MTK_ERR_OVERRUN; goto error; } gps_mtk.msg_id = c; gps_mtk.ck_a = 0; gps_mtk.ck_b = 0; gps_mtk.status++; break; case GOT_SYNC2_16: gps_mtk.len = c; gps_mtk.msg_idx = 0; gps_mtk.status = GOT_ID; break; case GOT_ID: gps_mtk.msg_buf[gps_mtk.msg_idx] = c; gps_mtk.msg_idx++; if (gps_mtk.msg_idx >= gps_mtk.len) { gps_mtk.status++; } break; case GOT_PAYLOAD: if (c != gps_mtk.ck_a) { gps_mtk.error_last = GPS_MTK_ERR_CHECKSUM; goto error; } gps_mtk.status++; break; case GOT_CHECKSUM1: if (c != gps_mtk.ck_b) { gps_mtk.error_last = GPS_MTK_ERR_CHECKSUM; goto error; } gps_mtk.msg_available = true; goto restart; break; default: gps_mtk.error_last = GPS_MTK_ERR_UNEXPECTED; goto error; } return; error: gps_mtk.error_cnt++; restart: gps_mtk.status = UNINIT; return; } /* * * * GPS dynamic configuration * * */ #ifdef GPS_CONFIGURE #include "pprzlink/pprzlink_device.h" static void MtkSend_CFG(char *dat) { struct link_device *dev = &((MTK_GPS_LINK).device); while (*dat != 0) { dev->put_byte(dev->periph, 0, *dat++); } } void gps_configure_uart(void) { } #ifdef USER_GPS_CONFIGURE #include USER_GPS_CONFIGURE #else static bool user_gps_configure(bool cpt) { switch (cpt) { case 0: MtkSend_CFG(MTK_DIY_SET_BINARY); break; case 1: MtkSend_CFG(MTK_DIY_OUTPUT_RATE); return false; default: break; } return true; /* Continue, except for the last case */ } #endif // ! USER_GPS_CONFIGURE void gps_configure(void) { static uint32_t count = 0; /* start configuring after having received 50 bytes */ if (count++ > 50) { gps_configuring = user_gps_configure(gps_status_config++); } } #endif /* GPS_CONFIGURE */
gpl-2.0
thiz11/kernel_common
drivers/scsi/qla2xxx/qla_mbx.c
33
127732
/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_target.h" #include <linux/delay.h> #include <linux/gfp.h> /* * qla2x00_mailbox_command * Issue mailbox command and waits for completion. * * Input: * ha = adapter block pointer. * mcp = driver internal mbx struct pointer. * * Output: * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. * * Returns: * 0 : QLA_SUCCESS = cmd performed success * 1 : QLA_FUNCTION_FAILED (error encountered) * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) * * Context: * Kernel context. */ static int qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) { int rval; unsigned long flags = 0; device_reg_t __iomem *reg; uint8_t abort_active; uint8_t io_lock_on; uint16_t command = 0; uint16_t *iptr; uint16_t __iomem *optr; uint32_t cnt; uint32_t mboxes; unsigned long wait_time; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); if (ha->pdev->error_state > pci_channel_io_frozen) { ql_log(ql_log_warn, vha, 0x1001, "error_state is greater than pci_channel_io_frozen, " "exiting.\n"); return QLA_FUNCTION_TIMEOUT; } if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0x1002, "Device in failed state, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } reg = ha->iobase; io_lock_on = base_vha->flags.init_done; rval = QLA_SUCCESS; abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (ha->flags.pci_channel_io_perm_failure) { ql_log(ql_log_warn, vha, 0x1003, "Perm failure on EEH timeout MBX, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) { /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; ql_log(ql_log_warn, vha, 0x1004, "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); return QLA_FUNCTION_TIMEOUT; } /* * Wait for active mailbox commands to finish by waiting at most tov * seconds. This is to serialize actual issuing of mailbox cmds during * non ISP abort time. */ if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { /* Timeout occurred. Return error. */ ql_log(ql_log_warn, vha, 0x1005, "Cmd access timeout, cmd=0x%x, Exiting.\n", mcp->mb[0]); return QLA_FUNCTION_TIMEOUT; } ha->flags.mbox_busy = 1; /* Save mailbox command for debug */ ha->mcp = mcp; ql_dbg(ql_dbg_mbx, vha, 0x1006, "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); spin_lock_irqsave(&ha->hardware_lock, flags); /* Load mailbox registers. */ if (IS_QLA82XX(ha)) optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0]; else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha)) optr = (uint16_t __iomem *)&reg->isp24.mailbox0; else optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0); iptr = mcp->mb; command = mcp->mb[0]; mboxes = mcp->out_mb; for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8); if (mboxes & BIT_0) WRT_REG_WORD(optr, *iptr); mboxes >>= 1; optr++; iptr++; } ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111, "Loaded MBX registers (displayed in bytes) =.\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112, (uint8_t *)mcp->mb, 16); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113, ".\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114, ((uint8_t *)mcp->mb + 0x10), 16); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115, ".\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116, ((uint8_t *)mcp->mb + 0x20), 8); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, "I/O Address = %p.\n", optr); ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e); /* Issue set host interrupt command to send cmd out. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); /* Unlock mbx registers and wait for interrupt */ ql_dbg(ql_dbg_mbx, vha, 0x100f, "Going to unlock irq & waiting for interrupts. " "jiffies=%lx.\n", jiffies); /* Wait for mbx cmd completion until timeout */ if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); if (IS_QLA82XX(ha)) { if (RD_REG_DWORD(&reg->isp82.hint) & HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; ql_dbg(ql_dbg_mbx, vha, 0x1010, "Pending mailbox timeout, exiting.\n"); rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; } WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING); } else if (IS_FWI2_CAPABLE(ha)) WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); else WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); } else { ql_dbg(ql_dbg_mbx, vha, 0x1011, "Cmd=%x Polling Mode.\n", command); if (IS_QLA82XX(ha)) { if (RD_REG_DWORD(&reg->isp82.hint) & HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; ql_dbg(ql_dbg_mbx, vha, 0x1012, "Pending mailbox timeout, exiting.\n"); rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; } WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING); } else if (IS_FWI2_CAPABLE(ha)) WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); else WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ while (!ha->flags.mbox_int) { if (time_after(jiffies, wait_time)) break; /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); if (!ha->flags.mbox_int && !(IS_QLA2200(ha) && command == MBC_LOAD_RISC_RAM_EXTENDED)) msleep(10); } /* while */ ql_dbg(ql_dbg_mbx, vha, 0x1013, "Waited %d sec.\n", (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); } /* Check whether we timed out */ if (ha->flags.mbox_int) { uint16_t *iptr2; ql_dbg(ql_dbg_mbx, vha, 0x1014, "Cmd=%x completed.\n", command); /* Got interrupt. Clear the flag. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) { ha->flags.mbox_busy = 0; /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; ha->mcp = NULL; rval = QLA_FUNCTION_FAILED; ql_log(ql_log_warn, vha, 0x1015, "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); goto premature_exit; } if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) rval = QLA_FUNCTION_FAILED; /* Load return mailbox registers. */ iptr2 = mcp->mb; iptr = (uint16_t *)&ha->mailbox_out[0]; mboxes = mcp->in_mb; for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (mboxes & BIT_0) *iptr2 = *iptr; mboxes >>= 1; iptr2++; iptr++; } } else { uint16_t mb0; uint32_t ictrl; if (IS_FWI2_CAPABLE(ha)) { mb0 = RD_REG_WORD(&reg->isp24.mailbox0); ictrl = RD_REG_DWORD(&reg->isp24.ictrl); } else { mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); ictrl = RD_REG_WORD(&reg->isp.ictrl); } ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " "mb[0]=0x%x\n", command, ictrl, jiffies, mb0); ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); /* * Attempt to capture a firmware dump for further analysis * of the current firmware state */ ha->isp_ops->fw_dump(vha, 0); rval = QLA_FUNCTION_TIMEOUT; } ha->flags.mbox_busy = 0; /* Clean up */ ha->mcp = NULL; if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { ql_dbg(ql_dbg_mbx, vha, 0x101a, "Checking for additional resp interrupt.\n"); /* polling mode for non isp_abort commands. */ qla2x00_poll(ha->rsp_q_map[0]); } if (rval == QLA_FUNCTION_TIMEOUT && mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { if (!io_lock_on || (mcp->flags & IOCTL_CMD) || ha->flags.eeh_busy) { /* not in dpc. schedule it for dpc to take over. */ ql_dbg(ql_dbg_mbx, vha, 0x101b, "Timeout, schedule isp_abort_needed.\n"); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (IS_QLA82XX(ha)) { ql_dbg(ql_dbg_mbx, vha, 0x112a, "disabling pause transmit on port " "0 & 1.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0| CRB_NIU_XG_PAUSE_CTL_P1); } ql_log(ql_log_info, base_vha, 0x101c, "Mailbox cmd timeout occurred, cmd=0x%x, " "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " "abort.\n", command, mcp->mb[0], ha->flags.eeh_busy); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } else if (!abort_active) { /* call abort directly since we are in the DPC thread */ ql_dbg(ql_dbg_mbx, vha, 0x101d, "Timeout, calling abort_isp.\n"); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (IS_QLA82XX(ha)) { ql_dbg(ql_dbg_mbx, vha, 0x112b, "disabling pause transmit on port " "0 & 1.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0| CRB_NIU_XG_PAUSE_CTL_P1); } ql_log(ql_log_info, base_vha, 0x101e, "Mailbox cmd timeout occurred, cmd=0x%x, " "mb[0]=0x%x. Scheduling ISP abort ", command, mcp->mb[0]); set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); /* Allow next mbx cmd to come in. */ complete(&ha->mbx_cmd_comp); if (ha->isp_ops->abort_isp(vha)) { /* Failed. retry later. */ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); ql_dbg(ql_dbg_mbx, vha, 0x101f, "Finished abort_isp.\n"); goto mbx_done; } } } premature_exit: /* Allow next mbx cmd to come in. */ complete(&ha->mbx_cmd_comp); mbx_done: if (rval) { ql_log(ql_log_warn, base_vha, 0x1020, "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); } else { ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); } return rval; } int qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, uint32_t risc_code_size) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, "Entered %s.\n", __func__); if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_0; } else { mcp->mb[0] = MBC_LOAD_RISC_RAM; mcp->out_mb = MBX_0; } mcp->mb[1] = LSW(risc_addr); mcp->mb[2] = MSW(req_dma); mcp->mb[3] = LSW(req_dma); mcp->mb[6] = MSW(MSD(req_dma)); mcp->mb[7] = LSW(MSD(req_dma)); mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[4] = MSW(risc_code_size); mcp->mb[5] = LSW(risc_code_size); mcp->out_mb |= MBX_5|MBX_4; } else { mcp->mb[4] = LSW(risc_code_size); mcp->out_mb |= MBX_4; } mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1023, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, "Done %s.\n", __func__); } return rval; } #define EXTENDED_BB_CREDITS BIT_0 /* * qla2x00_execute_fw * Start adapter firmware. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, "Entered %s.\n", __func__); mcp->mb[0] = MBC_EXECUTE_FIRMWARE; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = MSW(risc_addr); mcp->mb[2] = LSW(risc_addr); mcp->mb[3] = 0; if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) { struct nvram_81xx *nv = ha->nvram; mcp->mb[4] = (nv->enhanced_features & EXTENDED_BB_CREDITS); } else mcp->mb[4] = 0; mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; mcp->in_mb |= MBX_1; } else { mcp->mb[1] = LSW(risc_addr); mcp->out_mb |= MBX_1; if (IS_QLA2322(ha) || IS_QLA6322(ha)) { mcp->mb[2] = 0; mcp->out_mb |= MBX_2; } } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1026, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { if (IS_FWI2_CAPABLE(ha)) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027, "Done exchanges=%x.\n", mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, "Done %s.\n", __func__); } } return rval; } /* * qla2x00_get_fw_version * Get firmware version. * * Input: * ha: adapter state pointer. * major: pointer for major number. * minor: pointer for minor number. * subminor: pointer for subminor number. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fw_version(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; mcp->out_mb = MBX_0; mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha)) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; if (IS_FWI2_CAPABLE(ha)) mcp->in_mb |= MBX_17|MBX_16|MBX_15; mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) goto failed; /* Return mailbox data. */ ha->fw_major_version = mcp->mb[1]; ha->fw_minor_version = mcp->mb[2]; ha->fw_subminor_version = mcp->mb[3]; ha->fw_attributes = mcp->mb[6]; if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ else ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) { ha->mpi_version[0] = mcp->mb[10] & 0xff; ha->mpi_version[1] = mcp->mb[11] >> 8; ha->mpi_version[2] = mcp->mb[11] & 0xff; ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; ha->phy_version[0] = mcp->mb[8] & 0xff; ha->phy_version[1] = mcp->mb[9] >> 8; ha->phy_version[2] = mcp->mb[9] & 0xff; } if (IS_FWI2_CAPABLE(ha)) { ha->fw_attributes_h = mcp->mb[15]; ha->fw_attributes_ext[0] = mcp->mb[16]; ha->fw_attributes_ext[1] = mcp->mb[17]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", __func__, mcp->mb[15], mcp->mb[6]); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", __func__, mcp->mb[17], mcp->mb[16]); } failed: if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_fw_options * Set firmware options. * * Input: * ha = adapter block pointer. * fwopt = pointer for firmware options. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; mcp->out_mb = MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); } else { fwopts[0] = mcp->mb[0]; fwopts[1] = mcp->mb[1]; fwopts[2] = mcp->mb[2]; fwopts[3] = mcp->mb[3]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, "Done %s.\n", __func__); } return rval; } /* * qla2x00_set_fw_options * Set firmware options. * * Input: * ha = adapter block pointer. * fwopt = pointer for firmware options. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; mcp->mb[1] = fwopts[1]; mcp->mb[2] = fwopts[2]; mcp->mb[3] = fwopts[3]; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->in_mb |= MBX_1; } else { mcp->mb[10] = fwopts[10]; mcp->mb[11] = fwopts[11]; mcp->mb[12] = 0; /* Undocumented, but used */ mcp->out_mb |= MBX_12|MBX_11|MBX_10; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); fwopts[0] = mcp->mb[0]; if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1030, "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, "Done %s.\n", __func__); } return rval; } /* * qla2x00_mbx_reg_test * Mailbox register wrap test. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_mbx_reg_test(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, "Entered %s.\n", __func__); mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; mcp->mb[1] = 0xAAAA; mcp->mb[2] = 0x5555; mcp->mb[3] = 0xAA55; mcp->mb[4] = 0x55AA; mcp->mb[5] = 0xA5A5; mcp->mb[6] = 0x5A5A; mcp->mb[7] = 0x2525; mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) rval = QLA_FUNCTION_FAILED; if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || mcp->mb[7] != 0x2525) rval = QLA_FUNCTION_FAILED; } if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, "Done %s.\n", __func__); } return rval; } /* * qla2x00_verify_checksum * Verify firmware checksum. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, "Entered %s.\n", __func__); mcp->mb[0] = MBC_VERIFY_CHECKSUM; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[1] = MSW(risc_addr); mcp->mb[2] = LSW(risc_addr); mcp->out_mb |= MBX_2|MBX_1; mcp->in_mb |= MBX_2|MBX_1; } else { mcp->mb[1] = LSW(risc_addr); mcp->out_mb |= MBX_1; mcp->in_mb |= MBX_1; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1036, "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, "Done %s.\n", __func__); } return rval; } /* * qla2x00_issue_iocb * Issue IOCB using mailbox command * * Input: * ha = adapter state pointer. * buffer = buffer pointer. * phys_addr = physical address of buffer. * size = size of buffer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, size_t size, uint32_t tov) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, "Entered %s.\n", __func__); mcp->mb[0] = MBC_IOCB_COMMAND_A64; mcp->mb[1] = 0; mcp->mb[2] = MSW(phys_addr); mcp->mb[3] = LSW(phys_addr); mcp->mb[6] = MSW(MSD(phys_addr)); mcp->mb[7] = LSW(MSD(phys_addr)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_0; mcp->tov = tov; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); } else { sts_entry_t *sts_entry = (sts_entry_t *) buffer; /* Mask reserved bits. */ sts_entry->entry_status &= IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, "Done %s.\n", __func__); } return rval; } int qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, size_t size) { return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, MBX_TOV_SECONDS); } /* * qla2x00_abort_command * Abort command aborts a specified IOCB. * * Input: * ha = adapter block pointer. * sp = SB structure pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_abort_command(srb_t *sp) { unsigned long flags = 0; int rval; uint32_t handle = 0; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; fc_port_t *fcport = sp->fcport; scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req = vha->req; struct scsi_cmnd *cmd = GET_CMD_SP(sp); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, "Entered %s.\n", __func__); spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < req->num_outstanding_cmds; handle++) { if (req->outstanding_cmds[handle] == sp) break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (handle == req->num_outstanding_cmds) { /* command not found */ return QLA_FUNCTION_FAILED; } mcp->mb[0] = MBC_ABORT_COMMAND; if (HAS_EXTENDED_IDS(ha)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = (uint16_t)handle; mcp->mb[3] = (uint16_t)(handle >> 16); mcp->mb[6] = (uint16_t)cmd->device->lun; mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, "Done %s.\n", __func__); } return rval; } int qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; scsi_qla_host_t *vha; struct req_que *req; struct rsp_que *rsp; l = l; vha = fcport->vha; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, "Entered %s.\n", __func__); req = vha->hw->req_q_map[0]; rsp = req->rsp; mcp->mb[0] = MBC_ABORT_TARGET; mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = fcport->loop_id << 8; } mcp->mb[2] = vha->hw->loop_reset_delay; mcp->mb[9] = vha->vp_idx; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, "Failed=%x.\n", rval); } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1040, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, "Done %s.\n", __func__); } return rval; } int qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; scsi_qla_host_t *vha; struct req_que *req; struct rsp_que *rsp; vha = fcport->vha; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, "Entered %s.\n", __func__); req = vha->hw->req_q_map[0]; rsp = req->rsp; mcp->mb[0] = MBC_LUN_RESET; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = l; mcp->mb[3] = 0; mcp->mb[9] = vha->vp_idx; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, MK_SYNC_ID_LUN); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1044, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_adapter_id * Get adapter ID and topology. * * Input: * ha = adapter block pointer. * id = pointer for loop ID. * al_pa = pointer for AL_PA. * area = pointer for area. * domain = pointer for domain. * top = pointer for topology. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_0; mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_CNA_CAPABLE(vha->hw)) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (mcp->mb[0] == MBS_COMMAND_ERROR) rval = QLA_COMMAND_ERROR; else if (mcp->mb[0] == MBS_INVALID_COMMAND) rval = QLA_INVALID_COMMAND; /* Return data. */ *id = mcp->mb[1]; *al_pa = LSB(mcp->mb[2]); *area = MSB(mcp->mb[2]); *domain = LSB(mcp->mb[3]); *top = mcp->mb[6]; *sw_cap = mcp->mb[7]; if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, "Done %s.\n", __func__); if (IS_CNA_CAPABLE(vha->hw)) { vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; vha->fcoe_fcf_idx = mcp->mb[10]; vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; } } return rval; } /* * qla2x00_get_retry_cnt * Get current firmware login retry count and delay. * * Input: * ha = adapter block pointer. * retry_cnt = pointer to login retry count. * tov = pointer to login timeout value. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, uint16_t *r_a_tov) { int rval; uint16_t ratov; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RETRY_COUNT; mcp->out_mb = MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x104a, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /* Convert returned data and check our values. */ *r_a_tov = mcp->mb[3] / 2; ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { /* Update to the larger values */ *retry_cnt = (uint8_t)mcp->mb[1]; *tov = ratov; } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); } return rval; } /* * qla2x00_init_firmware * Initialize adapter firmware. * * Input: * ha = adapter block pointer. * dptr = Initialization control block pointer. * size = size of initialization control block. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, "Entered %s.\n", __func__); if (IS_QLA82XX(ha) && ql2xdbwr) qla82xx_wr_32(ha, ha->nxdb_wr_ptr, (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); if (ha->flags.npiv_supported) mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; else mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; mcp->mb[1] = 0; mcp->mb[2] = MSW(ha->init_cb_dma); mcp->mb[3] = LSW(ha->init_cb_dma); mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) { mcp->mb[1] = BIT_0; mcp->mb[10] = MSW(ha->ex_init_cb_dma); mcp->mb[11] = LSW(ha->ex_init_cb_dma); mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); mcp->mb[14] = sizeof(*ha->ex_init_cb); mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; } /* 1 and 2 should normally be captured. */ mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha)) /* mb3 is additional info about the installed SFP. */ mcp->in_mb |= MBX_3; mcp->buf_size = size; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x104d, "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_node_name_list * Issue get node name list mailbox command, kmalloc() * and return the resulting list. Caller must kfree() it! * * Input: * ha = adapter state pointer. * out_data = resulting list * out_len = length of the resulting list * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) { struct qla_hw_data *ha = vha->hw; struct qla_port_24xx_data *list = NULL; void *pmap; mbx_cmd_t mc; dma_addr_t pmap_dma; ulong dma_size; int rval, left; left = 1; while (left > 0) { dma_size = left * sizeof(*list); pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size, &pmap_dma, GFP_KERNEL); if (!pmap) { ql_log(ql_log_warn, vha, 0x113f, "%s(%ld): DMA Alloc failed of %ld\n", __func__, vha->host_no, dma_size); rval = QLA_MEMORY_ALLOC_FAILED; goto out; } mc.mb[0] = MBC_PORT_NODE_NAME_LIST; mc.mb[1] = BIT_1 | BIT_3; mc.mb[2] = MSW(pmap_dma); mc.mb[3] = LSW(pmap_dma); mc.mb[6] = MSW(MSD(pmap_dma)); mc.mb[7] = LSW(MSD(pmap_dma)); mc.mb[8] = dma_size; mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8; mc.in_mb = MBX_0|MBX_1; mc.tov = 30; mc.flags = MBX_DMA_IN; rval = qla2x00_mailbox_command(vha, &mc); if (rval != QLA_SUCCESS) { if ((mc.mb[0] == MBS_COMMAND_ERROR) && (mc.mb[1] == 0xA)) { left += le16_to_cpu(mc.mb[2]) / sizeof(struct qla_port_24xx_data); goto restart; } goto out_free; } left = 0; list = kzalloc(dma_size, GFP_KERNEL); if (!list) { ql_log(ql_log_warn, vha, 0x1140, "%s(%ld): failed to allocate node names list " "structure.\n", __func__, vha->host_no); rval = QLA_MEMORY_ALLOC_FAILED; goto out_free; } memcpy(list, pmap, dma_size); restart: dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); } *out_data = list; *out_len = dma_size; out: return rval; out_free: dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); return rval; } /* * qla2x00_get_port_database * Issue normal/enhanced get port database mailbox command * and copy device name as necessary. * * Input: * ha = adapter state pointer. * dev = structure pointer. * opt = enhanced cmd option byte. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; port_database_t *pd; struct port_database_24xx *pd24; dma_addr_t pd_dma; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, "Entered %s.\n", __func__); pd24 = NULL; pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { ql_log(ql_log_warn, vha, 0x1050, "Failed to allocate port database structure.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); mcp->mb[0] = MBC_GET_PORT_DATABASE; if (opt != 0 && !IS_FWI2_CAPABLE(ha)) mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; mcp->mb[2] = MSW(pd_dma); mcp->mb[3] = LSW(pd_dma); mcp->mb[6] = MSW(MSD(pd_dma)); mcp->mb[7] = LSW(MSD(pd_dma)); mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10|MBX_1; mcp->in_mb |= MBX_1; } else if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10|MBX_1; } else { mcp->mb[1] = fcport->loop_id << 8 | opt; mcp->out_mb |= MBX_1; } mcp->buf_size = IS_FWI2_CAPABLE(ha) ? PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; mcp->flags = MBX_DMA_IN; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) goto gpd_error_out; if (IS_FWI2_CAPABLE(ha)) { uint64_t zero = 0; pd24 = (struct port_database_24xx *) pd; /* Check for logged in state. */ if (pd24->current_login_state != PDS_PRLI_COMPLETE && pd24->last_login_state != PDS_PRLI_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x1051, "Unable to verify login-state (%x/%x) for " "loop_id %x.\n", pd24->current_login_state, pd24->last_login_state, fcport->loop_id); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } if (fcport->loop_id == FC_NO_LOOP_ID || (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && memcmp(fcport->port_name, pd24->port_name, 8))) { /* We lost the device mid way. */ rval = QLA_NOT_LOGGED_IN; goto gpd_error_out; } /* Names are little-endian. */ memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); /* Get port_id of device. */ fcport->d_id.b.domain = pd24->port_id[0]; fcport->d_id.b.area = pd24->port_id[1]; fcport->d_id.b.al_pa = pd24->port_id[2]; fcport->d_id.b.rsvd_1 = 0; /* If not target must be initiator or unknown type. */ if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; /* Passback COS information. */ fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? FC_COS_CLASS2 : FC_COS_CLASS3; if (pd24->prli_svc_param_word_3[0] & BIT_7) fcport->flags |= FCF_CONF_COMP_SUPPORTED; } else { uint64_t zero = 0; /* Check for logged in state. */ if (pd->master_state != PD_STATE_PORT_LOGGED_IN && pd->slave_state != PD_STATE_PORT_LOGGED_IN) { ql_dbg(ql_dbg_mbx, vha, 0x100a, "Unable to verify login-state (%x/%x) - " "portid=%02x%02x%02x.\n", pd->master_state, pd->slave_state, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } if (fcport->loop_id == FC_NO_LOOP_ID || (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && memcmp(fcport->port_name, pd->port_name, 8))) { /* We lost the device mid way. */ rval = QLA_NOT_LOGGED_IN; goto gpd_error_out; } /* Names are little-endian. */ memcpy(fcport->node_name, pd->node_name, WWN_SIZE); memcpy(fcport->port_name, pd->port_name, WWN_SIZE); /* Get port_id of device. */ fcport->d_id.b.domain = pd->port_id[0]; fcport->d_id.b.area = pd->port_id[3]; fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0; /* If not target must be initiator or unknown type. */ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; /* Passback COS information. */ fcport->supported_classes = (pd->options & BIT_4) ? FC_COS_CLASS2: FC_COS_CLASS3; } gpd_error_out: dma_pool_free(ha->s_dma_pool, pd, pd_dma); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1052, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_firmware_state * Get adapter firmware state. * * Input: * ha = adapter block pointer. * dptr = pointer for firmware state. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; else mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return firmware states. */ states[0] = mcp->mb[1]; if (IS_FWI2_CAPABLE(vha->hw)) { states[1] = mcp->mb[2]; states[2] = mcp->mb[3]; states[3] = mcp->mb[4]; states[4] = mcp->mb[5]; } if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_port_name * Issue get port name mailbox command. * Returned name is in big endian format. * * Input: * ha = adapter block pointer. * loop_id = loop ID of device. * name = pointer for name. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_PORT_NAME; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = loop_id << 8 | opt; } mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); } else { if (name != NULL) { /* This function returns name in big endian. */ name[0] = MSB(mcp->mb[2]); name[1] = LSB(mcp->mb[2]); name[2] = MSB(mcp->mb[3]); name[3] = LSB(mcp->mb[3]); name[4] = MSB(mcp->mb[6]); name[5] = LSB(mcp->mb[6]); name[6] = MSB(mcp->mb[7]); name[7] = LSB(mcp->mb[7]); } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, "Done %s.\n", __func__); } return rval; } /* * qla24xx_link_initialization * Issue link initialization mailbox command. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla24xx_link_initialize(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_LINK_INITIALIZATION; mcp->mb[1] = BIT_6|BIT_4; mcp->mb[2] = 0; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, "Done %s.\n", __func__); } return rval; } /* * qla2x00_lip_reset * Issue LIP reset mailbox command. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_lip_reset(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a, "Entered %s.\n", __func__); if (IS_CNA_CAPABLE(vha->hw)) { /* Logout across all FCFs. */ mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = BIT_1; mcp->mb[2] = 0; mcp->out_mb = MBX_2|MBX_1|MBX_0; } else if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = BIT_6; mcp->mb[2] = 0; mcp->mb[3] = vha->hw->loop_reset_delay; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; } else { mcp->mb[0] = MBC_LIP_RESET; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = 0x00ff; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = 0xff00; } mcp->mb[2] = vha->hw->loop_reset_delay; mcp->mb[3] = 0; } mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, "Done %s.\n", __func__); } return rval; } /* * qla2x00_send_sns * Send SNS command. * * Input: * ha = adapter block pointer. * sns = pointer for command. * cmd_size = command size. * buf_size = response/command size. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, uint16_t cmd_size, size_t buf_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, "Entered %s.\n", __func__); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, "Retry cnt=%d ratov=%d total tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); mcp->mb[0] = MBC_SEND_SNS_COMMAND; mcp->mb[1] = cmd_size; mcp->mb[2] = MSW(sns_phys_address); mcp->mb[3] = LSW(sns_phys_address); mcp->mb[6] = MSW(MSD(sns_phys_address)); mcp->mb[7] = LSW(MSD(sns_phys_address)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->buf_size = buf_size; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x105f, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, "Done %s.\n", __func__); } return rval; } int qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) { int rval; struct logio_entry_24xx *lg; dma_addr_t lg_dma; uint32_t iop[2]; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct rsp_que *rsp; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, "Entered %s.\n", __func__); if (ha->flags.cpu_affinity_enabled) req = ha->req_q_map[0]; else req = vha->req; rsp = req->rsp; lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { ql_log(ql_log_warn, vha, 0x1062, "Failed to allocate login IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(lg, 0, sizeof(struct logio_entry_24xx)); lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); if (opt & BIT_0) lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI); if (opt & BIT_1) lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI); lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = vha->vp_idx; rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, (ha->r_a_tov / 10 * 2) + 2); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1063, "Failed to issue login IOCB (%x).\n", rval); } else if (lg->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x1064, "Failed to complete IOCB -- error status (%x).\n", lg->entry_status); rval = QLA_FUNCTION_FAILED; } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { iop[0] = le32_to_cpu(lg->io_parameter[0]); iop[1] = le32_to_cpu(lg->io_parameter[1]); ql_dbg(ql_dbg_mbx, vha, 0x1065, "Failed to complete IOCB -- completion status (%x) " "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), iop[0], iop[1]); switch (iop[0]) { case LSC_SCODE_PORTID_USED: mb[0] = MBS_PORT_ID_USED; mb[1] = LSW(iop[1]); break; case LSC_SCODE_NPORT_USED: mb[0] = MBS_LOOP_ID_USED; break; case LSC_SCODE_NOLINK: case LSC_SCODE_NOIOCB: case LSC_SCODE_NOXCB: case LSC_SCODE_CMD_FAILED: case LSC_SCODE_NOFABRIC: case LSC_SCODE_FW_NOT_READY: case LSC_SCODE_NOT_LOGGED_IN: case LSC_SCODE_NOPCB: case LSC_SCODE_ELS_REJECT: case LSC_SCODE_CMD_PARAM_ERR: case LSC_SCODE_NONPORT: case LSC_SCODE_LOGGED_IN: case LSC_SCODE_NOFLOGI_ACC: default: mb[0] = MBS_COMMAND_ERROR; break; } } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, "Done %s.\n", __func__); iop[0] = le32_to_cpu(lg->io_parameter[0]); mb[0] = MBS_COMMAND_COMPLETE; mb[1] = 0; if (iop[0] & BIT_4) { if (iop[0] & BIT_8) mb[1] |= BIT_1; } else mb[1] = BIT_0; /* Passback COS information. */ mb[10] = 0; if (lg->io_parameter[7] || lg->io_parameter[8]) mb[10] |= BIT_0; /* Class 2. */ if (lg->io_parameter[9] || lg->io_parameter[10]) mb[10] |= BIT_1; /* Class 3. */ if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7)) mb[10] |= BIT_7; /* Confirmed Completion * Allowed */ } dma_pool_free(ha->s_dma_pool, lg, lg_dma); return rval; } /* * qla2x00_login_fabric * Issue login fabric port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * domain = device domain. * area = device area. * al_pa = device AL_PA. * status = pointer for return status. * opt = command options. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = (loop_id << 8) | opt; } mcp->mb[2] = domain; mcp->mb[3] = area << 8 | al_pa; mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[2] = mcp->mb[2]; mb[6] = mcp->mb[6]; mb[7] = mcp->mb[7]; /* COS retrieved from Get-Port-Database mailbox command. */ mb[10] = 0; } if (rval != QLA_SUCCESS) { /* RLU tmp code: need to change main mailbox_command function to * return ok even when the mailbox completion value is not * SUCCESS. The caller needs to be responsible to interpret * the return values of this mailbox command if we're not * to change too much of the existing code. */ if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) rval = QLA_SUCCESS; /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1068, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, "Done %s.\n", __func__); } return rval; } /* * qla2x00_login_local_device * Issue login loop port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * opt = command options. * * Returns: * Return status code. * * Context: * Kernel context. * */ int qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t *mb_ret, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, "Entered %s.\n", __func__); if (IS_FWI2_CAPABLE(ha)) return qla24xx_login_fabric(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, mb_ret, opt); mcp->mb[0] = MBC_LOGIN_LOOP_PORT; if (HAS_EXTENDED_IDS(ha)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = opt; mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb_ret != NULL) { mb_ret[0] = mcp->mb[0]; mb_ret[1] = mcp->mb[1]; mb_ret[6] = mcp->mb[6]; mb_ret[7] = mcp->mb[7]; } if (rval != QLA_SUCCESS) { /* AV tmp code: need to change main mailbox_command function to * return ok even when the mailbox completion value is not * SUCCESS. The caller needs to be responsible to interpret * the return values of this mailbox command if we're not * to change too much of the existing code. */ if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) rval = QLA_SUCCESS; ql_dbg(ql_dbg_mbx, vha, 0x106b, "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, "Done %s.\n", __func__); } return (rval); } int qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa) { int rval; struct logio_entry_24xx *lg; dma_addr_t lg_dma; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct rsp_que *rsp; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, "Entered %s.\n", __func__); lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { ql_log(ql_log_warn, vha, 0x106e, "Failed to allocate logout IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(lg, 0, sizeof(struct logio_entry_24xx)); if (ql2xmaxqueues > 1) req = ha->req_q_map[0]; else req = vha->req; rsp = req->rsp; lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| LCF_FREE_NPORT); lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = vha->vp_idx; rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, (ha->r_a_tov / 10 * 2) + 2); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x106f, "Failed to issue logout IOCB (%x).\n", rval); } else if (lg->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x1070, "Failed to complete IOCB -- error status (%x).\n", lg->entry_status); rval = QLA_FUNCTION_FAILED; } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x1071, "Failed to complete IOCB -- completion status (%x) " "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), le32_to_cpu(lg->io_parameter[0]), le32_to_cpu(lg->io_parameter[1])); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, lg, lg_dma); return rval; } /* * qla2x00_fabric_logout * Issue logout fabric port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; mcp->out_mb = MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = loop_id << 8; } mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1074, "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, "Done %s.\n", __func__); } return rval; } /* * qla2x00_full_login_lip * Issue full login LIP mailbox command. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_full_login_lip(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; mcp->mb[2] = 0; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_id_list * * Input: * ha = adapter block pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, uint16_t *entries) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, "Entered %s.\n", __func__); if (id_list == NULL) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_ID_LIST; mcp->out_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[2] = MSW(id_list_dma); mcp->mb[3] = LSW(id_list_dma); mcp->mb[6] = MSW(MSD(id_list_dma)); mcp->mb[7] = LSW(MSD(id_list_dma)); mcp->mb[8] = 0; mcp->mb[9] = vha->vp_idx; mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; } else { mcp->mb[1] = MSW(id_list_dma); mcp->mb[2] = LSW(id_list_dma); mcp->mb[3] = MSW(MSD(id_list_dma)); mcp->mb[6] = LSW(MSD(id_list_dma)); mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; } mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); } else { *entries = mcp->mb[1]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_resource_cnts * Get current firmware resource counts. * * Input: * ha = adapter block pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt, uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; mcp->out_mb = MBX_0; mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) mcp->in_mb |= MBX_12; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x107d, "Failed mb[0]=%x.\n", mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], mcp->mb[12]); if (cur_xchg_cnt) *cur_xchg_cnt = mcp->mb[3]; if (orig_xchg_cnt) *orig_xchg_cnt = mcp->mb[6]; if (cur_iocb_cnt) *cur_iocb_cnt = mcp->mb[7]; if (orig_iocb_cnt) *orig_iocb_cnt = mcp->mb[10]; if (vha->hw->flags.npiv_supported && max_npiv_vports) *max_npiv_vports = mcp->mb[11]; if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs) *max_fcfs = mcp->mb[12]; } return (rval); } /* * qla2x00_get_fcal_position_map * Get FCAL (LILP) position map using mailbox command * * Input: * ha = adapter state pointer. * pos_map = buffer pointer (can be NULL). * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; char *pmap; dma_addr_t pmap_dma; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, "Entered %s.\n", __func__); pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); if (pmap == NULL) { ql_log(ql_log_warn, vha, 0x1080, "Memory alloc failed.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(pmap, 0, FCAL_MAP_SIZE); mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; mcp->mb[2] = MSW(pmap_dma); mcp->mb[3] = LSW(pmap_dma); mcp->mb[6] = MSW(MSD(pmap_dma)); mcp->mb[7] = LSW(MSD(pmap_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->buf_size = FCAL_MAP_SIZE; mcp->flags = MBX_DMA_IN; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, "mb0/mb1=%x/%X FC/AL position map size (%x).\n", mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, pmap, pmap[0] + 1); if (pos_map) memcpy(pos_map, pmap, FCAL_MAP_SIZE); } dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_link_status * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * ret_buf = pointer to link status return buffer. * * Returns: * 0 = success. * BIT_0 = mem alloc error. * BIT_1 = mailbox error. */ int qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, struct link_statistics *stats, dma_addr_t stats_dma) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint32_t *siter, *diter, dwords; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_LINK_STATUS; mcp->mb[2] = MSW(stats_dma); mcp->mb[3] = LSW(stats_dma); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = loop_id; mcp->mb[4] = 0; mcp->mb[10] = 0; mcp->out_mb |= MBX_10|MBX_4|MBX_1; mcp->in_mb |= MBX_1; } else if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10|MBX_1; } else { mcp->mb[1] = loop_id << 8; mcp->out_mb |= MBX_1; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x1085, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); rval = QLA_FUNCTION_FAILED; } else { /* Copy over data -- firmware data is LE. */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, "Done %s.\n", __func__); dwords = offsetof(struct link_statistics, unused1) / 4; siter = diter = &stats->link_fail_cnt; while (dwords--) *diter++ = le32_to_cpu(*siter++); } } else { /* Failed. */ ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); } return rval; } int qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, dma_addr_t stats_dma) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint32_t *siter, *diter, dwords; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; mcp->mb[2] = MSW(stats_dma); mcp->mb[3] = LSW(stats_dma); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->mb[8] = sizeof(struct link_statistics) / 4; mcp->mb[9] = vha->vp_idx; mcp->mb[10] = 0; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x1089, "Failed mb[0]=%x.\n", mcp->mb[0]); rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, "Done %s.\n", __func__); /* Copy over data -- firmware data is LE. */ dwords = sizeof(struct link_statistics) / 4; siter = diter = &stats->link_fail_cnt; while (dwords--) *diter++ = le32_to_cpu(*siter++); } } else { /* Failed. */ ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); } return rval; } int qla24xx_abort_command(srb_t *sp) { int rval; unsigned long flags = 0; struct abort_entry_24xx *abt; dma_addr_t abt_dma; uint32_t handle; fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req = vha->req; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, "Entered %s.\n", __func__); spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < req->num_outstanding_cmds; handle++) { if (req->outstanding_cmds[handle] == sp) break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (handle == req->num_outstanding_cmds) { /* Command not found. */ return QLA_FUNCTION_FAILED; } abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); if (abt == NULL) { ql_log(ql_log_warn, vha, 0x108d, "Failed to allocate abort IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(abt, 0, sizeof(struct abort_entry_24xx)); abt->entry_type = ABORT_IOCB_TYPE; abt->entry_count = 1; abt->handle = MAKE_HANDLE(req->id, abt->handle); abt->nport_handle = cpu_to_le16(fcport->loop_id); abt->handle_to_abort = MAKE_HANDLE(req->id, handle); abt->port_id[0] = fcport->d_id.b.al_pa; abt->port_id[1] = fcport->d_id.b.area; abt->port_id[2] = fcport->d_id.b.domain; abt->vp_index = fcport->vha->vp_idx; abt->req_que_no = cpu_to_le16(req->id); rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x108e, "Failed to issue IOCB (%x).\n", rval); } else if (abt->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x108f, "Failed to complete IOCB -- error status (%x).\n", abt->entry_status); rval = QLA_FUNCTION_FAILED; } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { ql_dbg(ql_dbg_mbx, vha, 0x1090, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(abt->nport_handle)); rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, abt, abt_dma); return rval; } struct tsk_mgmt_cmd { union { struct tsk_mgmt_entry tsk; struct sts_entry_24xx sts; } p; }; static int __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; struct tsk_mgmt_cmd *tsk; struct sts_entry_24xx *sts; dma_addr_t tsk_dma; scsi_qla_host_t *vha; struct qla_hw_data *ha; struct req_que *req; struct rsp_que *rsp; vha = fcport->vha; ha = vha->hw; req = vha->req; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, "Entered %s.\n", __func__); if (ha->flags.cpu_affinity_enabled) rsp = ha->rsp_q_map[tag + 1]; else rsp = req->rsp; tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); if (tsk == NULL) { ql_log(ql_log_warn, vha, 0x1093, "Failed to allocate task management IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; tsk->p.tsk.entry_count = 1; tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); tsk->p.tsk.control_flags = cpu_to_le32(type); tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; tsk->p.tsk.port_id[1] = fcport->d_id.b.area; tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; tsk->p.tsk.vp_index = fcport->vha->vp_idx; if (type == TCF_LUN_RESET) { int_to_scsilun(l, &tsk->p.tsk.lun); host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, sizeof(tsk->p.tsk.lun)); } sts = &tsk->p.sts; rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1094, "Failed to issue %s reset IOCB (%x).\n", name, rval); } else if (sts->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x1095, "Failed to complete IOCB -- error status (%x).\n", sts->entry_status); rval = QLA_FUNCTION_FAILED; } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x1096, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(sts->comp_status)); rval = QLA_FUNCTION_FAILED; } else if (le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID) { if (le32_to_cpu(sts->rsp_data_len) < 4) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, "Ignoring inconsistent data length -- not enough " "response info (%d).\n", le32_to_cpu(sts->rsp_data_len)); } else if (sts->data[3]) { ql_dbg(ql_dbg_mbx, vha, 0x1098, "Failed to complete IOCB -- response (%x).\n", sts->data[3]); rval = QLA_FUNCTION_FAILED; } } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1099, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); return rval; } int qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag) { struct qla_hw_data *ha = fcport->vha->hw; if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); } int qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag) { struct qla_hw_data *ha = fcport->vha->hw; if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); } int qla2x00_system_error(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; mcp->tov = 5; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, "Done %s.\n", __func__); } return rval; } /** * qla2x00_set_serdes_params() - * @ha: HA context * * Returns */ int qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, uint16_t sw_em_2g, uint16_t sw_em_4g) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SERDES_PARAMS; mcp->mb[1] = BIT_0; mcp->mb[2] = sw_em_1g | BIT_15; mcp->mb[3] = sw_em_2g | BIT_15; mcp->mb[4] = sw_em_4g | BIT_15; mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x109f, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, "Done %s.\n", __func__); } return rval; } int qla2x00_stop_firmware(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, "Entered %s.\n", __func__); mcp->mb[0] = MBC_STOP_FIRMWARE; mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 5; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); if (mcp->mb[0] == MBS_INVALID_COMMAND) rval = QLA_INVALID_COMMAND; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, "Done %s.\n", __func__); } return rval; } int qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, uint16_t buffers) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_EFT_ENABLE; mcp->mb[2] = LSW(eft_dma); mcp->mb[3] = MSW(eft_dma); mcp->mb[4] = LSW(MSD(eft_dma)); mcp->mb[5] = MSW(MSD(eft_dma)); mcp->mb[6] = buffers; mcp->mb[7] = TC_AEN_DISABLE; mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10a5, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, "Done %s.\n", __func__); } return rval; } int qla2x00_disable_eft_trace(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_EFT_DISABLE; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10a8, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, "Done %s.\n", __func__); } return rval; } int qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, uint16_t buffers, uint16_t *mb, uint32_t *dwords) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, "Entered %s.\n", __func__); if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_FCE_ENABLE; mcp->mb[2] = LSW(fce_dma); mcp->mb[3] = MSW(fce_dma); mcp->mb[4] = LSW(MSD(fce_dma)); mcp->mb[5] = MSW(MSD(fce_dma)); mcp->mb[6] = buffers; mcp->mb[7] = TC_AEN_DISABLE; mcp->mb[8] = 0; mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| MBX_1|MBX_0; mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ab, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, "Done %s.\n", __func__); if (mb) memcpy(mb, mcp->mb, 8 * sizeof(*mb)); if (dwords) *dwords = buffers; } return rval; } int qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_FCE_DISABLE; mcp->mb[2] = TC_FCE_DISABLE_TRACE; mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ae, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, "Done %s.\n", __func__); if (wr) *wr = (uint64_t) mcp->mb[5] << 48 | (uint64_t) mcp->mb[4] << 32 | (uint64_t) mcp->mb[3] << 16 | (uint64_t) mcp->mb[2]; if (rd) *rd = (uint64_t) mcp->mb[9] << 48 | (uint64_t) mcp->mb[8] << 32 | (uint64_t) mcp->mb[7] << 16 | (uint64_t) mcp->mb[6]; } return rval; } int qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t *port_speed, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, "Entered %s.\n", __func__); if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = mcp->mb[3] = 0; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; } if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, "Done %s.\n", __func__); if (port_speed) *port_speed = mcp->mb[3]; } return rval; } int qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t port_speed, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, "Entered %s.\n", __func__); if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = BIT_0; if (IS_CNA_CAPABLE(vha->hw)) mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); else mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; } if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, "Done %s.\n", __func__); } return rval; } void qla24xx_report_id_acquisition(scsi_qla_host_t *vha, struct vp_rpt_id_entry_24xx *rptid_entry) { uint8_t vp_idx; uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; unsigned long flags; int found; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, "Entered %s.\n", __func__); if (rptid_entry->entry_status != 0) return; if (rptid_entry->format == 0) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, "Format 0 : Number of VPs setup %d, number of " "VPs acquired %d.\n", MSB(le16_to_cpu(rptid_entry->vp_count)), LSB(le16_to_cpu(rptid_entry->vp_count))); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, "Primary port id %02x%02x%02x.\n", rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); } else if (rptid_entry->format == 1) { vp_idx = LSB(stat); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, "Format 1: VP[%d] enabled - status %d - with " "port id %02x%02x%02x.\n", vp_idx, MSB(stat), rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); vp = vha; if (vp_idx == 0 && (MSB(stat) != 1)) goto reg_needed; if (MSB(stat) != 0 && MSB(stat) != 2) { ql_dbg(ql_dbg_mbx, vha, 0x10ba, "Could not acquire ID for VP[%d].\n", vp_idx); return; } found = 0; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { if (vp_idx == vp->vp_idx) { found = 1; break; } } spin_unlock_irqrestore(&ha->vport_slock, flags); if (!found) return; vp->d_id.b.domain = rptid_entry->port_id[2]; vp->d_id.b.area = rptid_entry->port_id[1]; vp->d_id.b.al_pa = rptid_entry->port_id[0]; /* * Cannot configure here as we are still sitting on the * response queue. Handle it in dpc context. */ set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); reg_needed: set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); set_bit(VP_DPC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } /* * qla24xx_modify_vp_config * Change VP configuration for vha * * Input: * vha = adapter block pointer. * * Returns: * qla2xxx local function return status code. * * Context: * Kernel context. */ int qla24xx_modify_vp_config(scsi_qla_host_t *vha) { int rval; struct vp_config_entry_24xx *vpmod; dma_addr_t vpmod_dma; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); /* This can be called by the parent */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, "Entered %s.\n", __func__); vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); if (!vpmod) { ql_log(ql_log_warn, vha, 0x10bc, "Failed to allocate modify VP IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(vpmod, 0, sizeof(struct vp_config_entry_24xx)); vpmod->entry_type = VP_CONFIG_IOCB_TYPE; vpmod->entry_count = 1; vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; vpmod->vp_count = 1; vpmod->vp_index1 = vha->vp_idx; vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; qlt_modify_vp_config(vha, vpmod); memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); vpmod->entry_count = 1; rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10bd, "Failed to issue VP config IOCB (%x).\n", rval); } else if (vpmod->comp_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x10be, "Failed to complete IOCB -- error status (%x).\n", vpmod->comp_status); rval = QLA_FUNCTION_FAILED; } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x10bf, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(vpmod->comp_status)); rval = QLA_FUNCTION_FAILED; } else { /* EMPTY */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, "Done %s.\n", __func__); fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); } dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); return rval; } /* * qla24xx_control_vp * Enable a virtual port for given host * * Input: * ha = adapter block pointer. * vhba = virtual adapter (unused) * index = index number for enabled VP * * Returns: * qla2xxx local function return status code. * * Context: * Kernel context. */ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) { int rval; int map, pos; struct vp_ctrl_entry_24xx *vce; dma_addr_t vce_dma; struct qla_hw_data *ha = vha->hw; int vp_index = vha->vp_idx; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1, "Entered %s enabling index %d.\n", __func__, vp_index); if (vp_index == 0 || vp_index >= ha->max_npiv_vports) return QLA_PARAMETER_ERROR; vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma); if (!vce) { ql_log(ql_log_warn, vha, 0x10c2, "Failed to allocate VP control IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); vce->entry_type = VP_CTRL_IOCB_TYPE; vce->entry_count = 1; vce->command = cpu_to_le16(cmd); vce->vp_count = __constant_cpu_to_le16(1); /* index map in firmware starts with 1; decrement index * this is ok as we never use index 0 */ map = (vp_index - 1) / 8; pos = (vp_index - 1) & 7; mutex_lock(&ha->vport_lock); vce->vp_idx_map[map] |= 1 << pos; mutex_unlock(&ha->vport_lock); rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10c3, "Failed to issue VP control IOCB (%x).\n", rval); } else if (vce->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x10c4, "Failed to complete IOCB -- error status (%x).\n", vce->entry_status); rval = QLA_FUNCTION_FAILED; } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x10c5, "Failed to complet IOCB -- completion status (%x).\n", le16_to_cpu(vce->comp_status)); rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, vce, vce_dma); return rval; } /* * qla2x00_send_change_request * Receive or disable RSCN request from fabric controller * * Input: * ha = adapter block pointer * format = registration format: * 0 - Reserved * 1 - Fabric detected registration * 2 - N_port detected registration * 3 - Full registration * FF - clear registration * vp_idx = Virtual port index * * Returns: * qla2x00 local function return status code. * * Context: * Kernel Context */ int qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, uint16_t vp_idx) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; mcp->mb[1] = format; mcp->mb[9] = vp_idx; mcp->out_mb = MBX_9|MBX_1|MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { rval = BIT_1; } } else rval = BIT_1; return rval; } int qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, uint32_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, "Entered %s.\n", __func__); if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; mcp->mb[8] = MSW(addr); mcp->out_mb = MBX_8|MBX_0; } else { mcp->mb[0] = MBC_DUMP_RISC_RAM; mcp->out_mb = MBX_0; } mcp->mb[1] = LSW(addr); mcp->mb[2] = MSW(req_dma); mcp->mb[3] = LSW(req_dma); mcp->mb[6] = MSW(MSD(req_dma)); mcp->mb[7] = LSW(MSD(req_dma)); mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[4] = MSW(size); mcp->mb[5] = LSW(size); mcp->out_mb |= MBX_5|MBX_4; } else { mcp->mb[4] = LSW(size); mcp->out_mb |= MBX_4; } mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1008, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, "Done %s.\n", __func__); } return rval; } /* 84XX Support **************************************************************/ struct cs84xx_mgmt_cmd { union { struct verify_chip_entry_84xx req; struct verify_chip_rsp_84xx rsp; } p; }; int qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) { int rval, retry; struct cs84xx_mgmt_cmd *mn; dma_addr_t mn_dma; uint16_t options; unsigned long flags; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, "Entered %s.\n", __func__); mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); if (mn == NULL) { return QLA_MEMORY_ALLOC_FAILED; } /* Force Update? */ options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; /* Diagnostic firmware? */ /* options |= MENLO_DIAG_FW; */ /* We update the firmware with only one data sequence. */ options |= VCO_END_OF_DATA; do { retry = 0; memset(mn, 0, sizeof(*mn)); mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; mn->p.req.entry_count = 1; mn->p.req.options = cpu_to_le16(options); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, "Dump of Verify Request.\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, (uint8_t *)mn, sizeof(*mn)); rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10cb, "Failed to issue verify IOCB (%x).\n", rval); goto verify_done; } ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, "Dump of Verify Response.\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, (uint8_t *)mn, sizeof(*mn)); status[0] = le16_to_cpu(mn->p.rsp.comp_status); status[1] = status[0] == CS_VCS_CHIP_FAILURE ? le16_to_cpu(mn->p.rsp.failure_code) : 0; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, "cs=%x fc=%x.\n", status[0], status[1]); if (status[0] != CS_COMPLETE) { rval = QLA_FUNCTION_FAILED; if (!(options & VCO_DONT_UPDATE_FW)) { ql_dbg(ql_dbg_mbx, vha, 0x10cf, "Firmware update failed. Retrying " "without update firmware.\n"); options |= VCO_DONT_UPDATE_FW; options &= ~VCO_FORCE_UPDATE; retry = 1; } } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, "Firmware updated to %x.\n", le32_to_cpu(mn->p.rsp.fw_ver)); /* NOTE: we only update OP firmware. */ spin_lock_irqsave(&ha->cs84xx->access_lock, flags); ha->cs84xx->op_fw_version = le32_to_cpu(mn->p.rsp.fw_ver); spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); } } while (retry); verify_done: dma_pool_free(ha->s_dma_pool, mn, mn_dma); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, "Done %s.\n", __func__); } return rval; } int qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) { int rval; unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, "Entered %s.\n", __func__); mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = req->options; mcp->mb[2] = MSW(LSD(req->dma)); mcp->mb[3] = LSW(LSD(req->dma)); mcp->mb[6] = MSW(MSD(req->dma)); mcp->mb[7] = LSW(MSD(req->dma)); mcp->mb[5] = req->length; if (req->rsp) mcp->mb[10] = req->rsp->id; mcp->mb[12] = req->qos; mcp->mb[11] = req->vp_idx; mcp->mb[13] = req->rid; if (IS_QLA83XX(ha)) mcp->mb[15] = 0; reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) + QLA_QUE_PAGE * req->id); mcp->mb[4] = req->id; /* que in ptr index */ mcp->mb[8] = 0; /* que out ptr index */ mcp->mb[9] = 0; mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS * 2; if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) mcp->in_mb |= MBX_1; if (IS_QLA83XX(ha)) { mcp->out_mb |= MBX_15; /* debug q create issue in SR-IOV */ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; } spin_lock_irqsave(&ha->hardware_lock, flags); if (!(req->options & BIT_0)) { WRT_REG_DWORD(&reg->req_q_in, 0); if (!IS_QLA83XX(ha)) WRT_REG_DWORD(&reg->req_q_out, 0); } req->req_q_in = &reg->req_q_in; req->req_q_out = &reg->req_q_out; spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10d4, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, "Done %s.\n", __func__); } return rval; } int qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { int rval; unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, "Entered %s.\n", __func__); mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = rsp->options; mcp->mb[2] = MSW(LSD(rsp->dma)); mcp->mb[3] = LSW(LSD(rsp->dma)); mcp->mb[6] = MSW(MSD(rsp->dma)); mcp->mb[7] = LSW(MSD(rsp->dma)); mcp->mb[5] = rsp->length; mcp->mb[14] = rsp->msix->entry; mcp->mb[13] = rsp->rid; if (IS_QLA83XX(ha)) mcp->mb[15] = 0; reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) + QLA_QUE_PAGE * rsp->id); mcp->mb[4] = rsp->id; /* que in ptr index */ mcp->mb[8] = 0; /* que out ptr index */ mcp->mb[9] = 0; mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS * 2; if (IS_QLA81XX(ha)) { mcp->out_mb |= MBX_12|MBX_11|MBX_10; mcp->in_mb |= MBX_1; } else if (IS_QLA83XX(ha)) { mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; mcp->in_mb |= MBX_1; /* debug q create issue in SR-IOV */ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; } spin_lock_irqsave(&ha->hardware_lock, flags); if (!(rsp->options & BIT_0)) { WRT_REG_DWORD(&reg->rsp_q_out, 0); if (!IS_QLA83XX(ha)) WRT_REG_DWORD(&reg->rsp_q_in, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10d7, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, "Done %s.\n", __func__); } return rval; } int qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, "Entered %s.\n", __func__); mcp->mb[0] = MBC_IDC_ACK; memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10da, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, "Done %s.\n", __func__); } return rval; } int qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, "Entered %s.\n", __func__); if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10dd, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, "Done %s.\n", __func__); *sector_size = mcp->mb[1]; } return rval; } int qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, "Entered %s.\n", __func__); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : FAC_OPT_CMD_WRITE_PROTECT; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e0, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, "Done %s.\n", __func__); } return rval; } int qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, "Entered %s.\n", __func__); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; mcp->mb[2] = LSW(start); mcp->mb[3] = MSW(start); mcp->mb[4] = LSW(finish); mcp->mb[5] = MSW(finish); mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e3, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, "Done %s.\n", __func__); } return rval; } int qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) { int rval = 0; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, "Entered %s.\n", __func__); mcp->mb[0] = MBC_RESTART_MPI_FW; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e6, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, "Done %s.\n", __func__); } return rval; } static int qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RNID_PARAMS; mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); *temp = mcp->mb[1]; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x115a, "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, "Done %s.\n", __func__); } return rval; } int qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; if (len == 1) opt |= BIT_0; mcp->mb[0] = MBC_READ_SFP; mcp->mb[1] = dev; mcp->mb[2] = MSW(sfp_dma); mcp->mb[3] = LSW(sfp_dma); mcp->mb[6] = MSW(MSD(sfp_dma)); mcp->mb[7] = LSW(MSD(sfp_dma)); mcp->mb[8] = len; mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (opt & BIT_0) *sfp = mcp->mb[1]; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e9, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, "Done %s.\n", __func__); } return rval; } int qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; if (len == 1) opt |= BIT_0; if (opt & BIT_0) len = *sfp; mcp->mb[0] = MBC_WRITE_SFP; mcp->mb[1] = dev; mcp->mb[2] = MSW(sfp_dma); mcp->mb[3] = LSW(sfp_dma); mcp->mb[6] = MSW(MSD(sfp_dma)); mcp->mb[7] = LSW(MSD(sfp_dma)); mcp->mb[8] = len; mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ec, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, "Done %s.\n", __func__); } return rval; } int qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, uint16_t size_in_bytes, uint16_t *actual_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, "Entered %s.\n", __func__); if (!IS_CNA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_XGMAC_STATS; mcp->mb[2] = MSW(stats_dma); mcp->mb[3] = LSW(stats_dma); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->mb[8] = size_in_bytes >> 2; mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ef, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, "Done %s.\n", __func__); *actual_size = mcp->mb[2] << 2; } return rval; } int qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, uint16_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, "Entered %s.\n", __func__); if (!IS_CNA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_DCBX_PARAMS; mcp->mb[1] = 0; mcp->mb[2] = MSW(tlv_dma); mcp->mb[3] = LSW(tlv_dma); mcp->mb[6] = MSW(MSD(tlv_dma)); mcp->mb[7] = LSW(MSD(tlv_dma)); mcp->mb[8] = size; mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10f2, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, "Done %s.\n", __func__); } return rval; } int qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_READ_RAM_EXTENDED; mcp->mb[1] = LSW(risc_addr); mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10f5, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, "Done %s.\n", __func__); *data = mcp->mb[3] << 16 | mcp->mb[2]; } return rval; } int qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing /* transfer count */ mcp->mb[10] = LSW(mreq->transfer_size); mcp->mb[11] = MSW(mreq->transfer_size); /* send data address */ mcp->mb[14] = LSW(mreq->send_dma); mcp->mb[15] = MSW(mreq->send_dma); mcp->mb[20] = LSW(MSD(mreq->send_dma)); mcp->mb[21] = MSW(MSD(mreq->send_dma)); /* receive data address */ mcp->mb[16] = LSW(mreq->rcv_dma); mcp->mb[17] = MSW(mreq->rcv_dma); mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); /* Iteration count */ mcp->mb[18] = LSW(mreq->iteration_count); mcp->mb[19] = MSW(mreq->iteration_count); mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; if (IS_CNA_CAPABLE(vha->hw)) mcp->out_mb |= MBX_2; mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; mcp->buf_size = mreq->transfer_size; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10f8, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, "Done %s.\n", __func__); } /* Copy mailbox information */ memcpy( mresp, mcp->mb, 64); return rval; } int qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ if (IS_CNA_CAPABLE(ha)) { mcp->mb[1] |= BIT_15; mcp->mb[2] = vha->fcoe_fcf_idx; } mcp->mb[16] = LSW(mreq->rcv_dma); mcp->mb[17] = MSW(mreq->rcv_dma); mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); mcp->mb[10] = LSW(mreq->transfer_size); mcp->mb[14] = LSW(mreq->send_dma); mcp->mb[15] = MSW(mreq->send_dma); mcp->mb[20] = LSW(MSD(mreq->send_dma)); mcp->mb[21] = MSW(MSD(mreq->send_dma)); mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; if (IS_CNA_CAPABLE(ha)) mcp->out_mb |= MBX_2; mcp->in_mb = MBX_0; if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) mcp->in_mb |= MBX_1; if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) mcp->in_mb |= MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; mcp->buf_size = mreq->transfer_size; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10fb, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, "Done %s.\n", __func__); } /* Copy mailbox information */ memcpy(mresp, mcp->mb, 64); return rval; } int qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); mcp->mb[0] = MBC_ISP84XX_RESET; mcp->mb[1] = enable_diagnostic; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, "Done %s.\n", __func__); return rval; } int qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; mcp->mb[1] = LSW(risc_addr); mcp->mb[2] = LSW(data); mcp->mb[3] = MSW(data); mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1101, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, "Done %s.\n", __func__); } return rval; } int qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) { int rval; uint32_t stat, timer; uint16_t mb0 = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; rval = QLA_SUCCESS; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, "Entered %s.\n", __func__); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); /* Write the MBC data to the registers */ WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER); WRT_REG_WORD(&reg->mailbox1, mb[0]); WRT_REG_WORD(&reg->mailbox2, mb[1]); WRT_REG_WORD(&reg->mailbox3, mb[2]); WRT_REG_WORD(&reg->mailbox4, mb[3]); WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT); /* Poll for MBC interrupt */ for (timer = 6000000; timer; timer--) { /* Check for pending interrupts. */ stat = RD_REG_DWORD(&reg->host_status); if (stat & HSRX_RISC_INT) { stat &= 0xff; if (stat == 0x1 || stat == 0x2 || stat == 0x10 || stat == 0x11) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_REG_WORD(&reg->mailbox0); WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD(&reg->hccr); break; } } udelay(5); } if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) rval = mb0 & MBS_MASK; else rval = QLA_FUNCTION_FAILED; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1104, "Failed=%x mb[0]=%x.\n", rval, mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, "Done %s.\n", __func__); } return rval; } int qla2x00_get_data_rate(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_DATA_RATE; mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha)) mcp->in_mb |= MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1107, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, "Done %s.\n", __func__); if (mcp->mb[1] != 0x7) ha->link_data_rate = mcp->mb[1]; } return rval; } int qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, "Entered %s.\n", __func__); if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_PORT_CONFIG; mcp->out_mb = MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x110a, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /* Copy all bits to preserve original value */ memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, "Done %s.\n", __func__); } return rval; } int qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_PORT_CONFIG; /* Copy all bits to preserve original setting */ memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x110d, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, "Done %s.\n", __func__); return rval; } int qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, "Entered %s.\n", __func__); if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; if (ha->flags.fcp_prio_enabled) mcp->mb[2] = BIT_1; else mcp->mb[2] = BIT_2; mcp->mb[4] = priority & 0xf; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; mb[4] = mcp->mb[4]; } if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, "Done %s.\n", __func__); } return rval; } int qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) { int rval = QLA_FUNCTION_FAILED; struct qla_hw_data *ha = vha->hw; uint8_t byte; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca, "Entered %s.\n", __func__); if (ha->thermal_support & THERMAL_SUPPORT_I2C) { rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0); *temp = byte; if (rval == QLA_SUCCESS) goto done; ql_log(ql_log_warn, vha, 0x10c9, "Thermal not supported through I2C bus, trying alternate " "method (ISP access).\n"); ha->thermal_support &= ~THERMAL_SUPPORT_I2C; } if (ha->thermal_support & THERMAL_SUPPORT_ISP) { rval = qla2x00_read_asic_temperature(vha, temp); if (rval == QLA_SUCCESS) goto done; ql_log(ql_log_warn, vha, 0x1019, "Thermal not supported through ISP.\n"); ha->thermal_support &= ~THERMAL_SUPPORT_ISP; } ql_log(ql_log_warn, vha, 0x1150, "Thermal not supported by this card " "(ignoring further requests).\n"); return rval; done: ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018, "Done %s.\n", __func__); return rval; } int qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_TOGGLE_INTERRUPT; mcp->mb[1] = 1; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1016, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, "Done %s.\n", __func__); } return rval; } int qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, "Entered %s.\n", __func__); if (!IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_TOGGLE_INTERRUPT; mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x100c, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, "Done %s.\n", __func__); } return rval; } int qla82xx_md_get_template_size(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int rval = QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[2] = LSW(RQST_TMPLT_SIZE); mcp->mb[3] = MSW(RQST_TMPLT_SIZE); mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); /* Always copy back return mailbox values. */ if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1120, "mailbox command FAILED=0x%x, subcode=%x.\n", (mcp->mb[1] << 16) | mcp->mb[0], (mcp->mb[3] << 16) | mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, "Done %s.\n", __func__); ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); if (!ha->md_template_size) { ql_dbg(ql_dbg_mbx, vha, 0x1122, "Null template size obtained.\n"); rval = QLA_FUNCTION_FAILED; } } return rval; } int qla82xx_md_get_template(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int rval = QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, "Entered %s.\n", __func__); ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); if (!ha->md_tmplt_hdr) { ql_log(ql_log_warn, vha, 0x1124, "Unable to allocate memory for Minidump template.\n"); return rval; } memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[2] = LSW(RQST_TMPLT); mcp->mb[3] = MSW(RQST_TMPLT); mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); mcp->mb[8] = LSW(ha->md_template_size); mcp->mb[9] = MSW(ha->md_template_size); mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; mcp->tov = MBX_TOV_SECONDS; mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1125, "mailbox command FAILED=0x%x, subcode=%x.\n", ((mcp->mb[1] << 16) | mcp->mb[0]), ((mcp->mb[3] << 16) | mcp->mb[2])); } else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, "Done %s.\n", __func__); return rval; } int qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_SET_LED_CONFIG; mcp->mb[1] = led_cfg[0]; mcp->mb[2] = led_cfg[1]; if (IS_QLA8031(ha)) { mcp->mb[3] = led_cfg[2]; mcp->mb[4] = led_cfg[3]; mcp->mb[5] = led_cfg[4]; mcp->mb[6] = led_cfg[5]; } mcp->out_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA8031(ha)) mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; mcp->in_mb = MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1134, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, "Done %s.\n", __func__); } return rval; } int qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_GET_LED_CONFIG; mcp->out_mb = MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA8031(ha)) mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1137, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { led_cfg[0] = mcp->mb[1]; led_cfg[1] = mcp->mb[2]; if (IS_QLA8031(ha)) { led_cfg[2] = mcp->mb[3]; led_cfg[3] = mcp->mb[4]; led_cfg[4] = mcp->mb[5]; led_cfg[5] = mcp->mb[6]; } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, "Done %s.\n", __func__); } return rval; } int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_SET_LED_CONFIG; if (enable) mcp->mb[7] = 0xE; else mcp->mb[7] = 0xD; mcp->out_mb = MBX_7|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1128, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, "Done %s.\n", __func__); } return rval; } int qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, "Entered %s.\n", __func__); mcp->mb[0] = MBC_WRITE_REMOTE_REG; mcp->mb[1] = LSW(reg); mcp->mb[2] = MSW(reg); mcp->mb[3] = LSW(data); mcp->mb[4] = MSW(data); mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1131, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, "Done %s.\n", __func__); } return rval; } int qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, "Implicit LOGO Unsupported.\n"); return QLA_FUNCTION_FAILED; } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, "Entering %s.\n", __func__); /* Perform Implicit LOGO. */ mcp->mb[0] = MBC_PORT_LOGOUT; mcp->mb[1] = fcport->loop_id; mcp->mb[10] = BIT_15; mcp->out_mb = MBX_10|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) ql_dbg(ql_dbg_mbx, vha, 0x113d, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, "Done %s.\n", __func__); return rval; } int qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; unsigned long retry_max_time = jiffies + (2 * HZ); if (!IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); retry_rd_reg: mcp->mb[0] = MBC_READ_REMOTE_REG; mcp->mb[1] = LSW(reg); mcp->mb[2] = MSW(reg); mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x114c, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { *data = (mcp->mb[3] | (mcp->mb[4] << 16)); if (*data == QLA8XXX_BAD_VALUE) { /* * During soft-reset CAMRAM register reads might * return 0xbad0bad0. So retry for MAX of 2 sec * while reading camram registers. */ if (time_after(jiffies, retry_max_time)) { ql_dbg(ql_dbg_mbx, vha, 0x1141, "Failure to read CAMRAM register. " "data=0x%x.\n", *data); return QLA_FUNCTION_FAILED; } msleep(100); goto retry_rd_reg; } ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); } return rval; } int qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; mcp->out_mb = MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1144, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); ha->isp_ops->fw_dump(vha, 0); } else { ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); } return rval; } int qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint8_t subcode = (uint8_t)options; struct qla_hw_data *ha = vha->hw; if (!IS_QLA8031(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_ACCESS_CONTROL; mcp->mb[1] = options; mcp->out_mb = MBX_1|MBX_0; if (subcode & BIT_2) { mcp->mb[2] = LSW(start_addr); mcp->mb[3] = MSW(start_addr); mcp->mb[4] = LSW(end_addr); mcp->mb[5] = MSW(end_addr); mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; } mcp->in_mb = MBX_2|MBX_1|MBX_0; if (!(subcode & (BIT_2 | BIT_5))) mcp->in_mb |= MBX_4|MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1147, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[4]); ha->isp_ops->fw_dump(vha, 0); } else { if (subcode & BIT_5) *sector_size = mcp->mb[1]; else if (subcode & (BIT_6 | BIT_7)) { ql_dbg(ql_dbg_mbx, vha, 0x1148, "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); } else if (subcode & (BIT_3 | BIT_4)) { ql_dbg(ql_dbg_mbx, vha, 0x1149, "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); } ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); } return rval; } int qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, uint32_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_MCTP_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, "Entered %s.\n", __func__); mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; mcp->mb[1] = LSW(addr); mcp->mb[2] = MSW(req_dma); mcp->mb[3] = LSW(req_dma); mcp->mb[4] = MSW(size); mcp->mb[5] = LSW(size); mcp->mb[6] = MSW(MSD(req_dma)); mcp->mb[7] = LSW(MSD(req_dma)); mcp->mb[8] = MSW(addr); /* Setting RAM ID to valid */ mcp->mb[10] |= BIT_7; /* For MCTP RAM ID is 0x40 */ mcp->mb[10] |= 0x40; mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x114e, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, "Done %s.\n", __func__); } return rval; }
gpl-2.0
fernandolopez/syslinux
gpxe/src/core/serial.c
33
6515
/* * The serial port interface routines implement a simple polled i/o * interface to a standard serial port. Due to the space restrictions * for the boot blocks, no BIOS support is used (since BIOS requires * expensive real/protected mode switches), instead the rudimentary * BIOS support is duplicated here. * * The base address and speed for the i/o port are passed from the * Makefile in the COMCONSOLE and CONSPEED preprocessor macros. The * line control parameters are currently hard-coded to 8 bits, no * parity, 1 stop bit (8N1). This can be changed in init_serial(). */ FILE_LICENCE ( GPL2_OR_LATER ); #include "stddef.h" #include <gpxe/init.h> #include <gpxe/io.h> #include <unistd.h> #include <gpxe/serial.h> #include "config/serial.h" /* Set default values if none specified */ #ifndef COMCONSOLE #define COMCONSOLE 0x3f8 #endif #ifndef COMSPEED #define COMSPEED 9600 #endif #ifndef COMDATA #define COMDATA 8 #endif #ifndef COMPARITY #define COMPARITY 0 #endif #ifndef COMSTOP #define COMSTOP 1 #endif #undef UART_BASE #define UART_BASE ( COMCONSOLE ) #undef UART_BAUD #define UART_BAUD ( COMSPEED ) #if ((115200%UART_BAUD) != 0) #error Bad ttys0 baud rate #endif #define COMBRD (115200/UART_BAUD) /* Line Control Settings */ #define UART_LCS ( ( ( (COMDATA) - 5 ) << 0 ) | \ ( ( (COMPARITY) ) << 3 ) | \ ( ( (COMSTOP) - 1 ) << 2 ) ) /* Data */ #define UART_RBR 0x00 #define UART_TBR 0x00 /* Control */ #define UART_IER 0x01 #define UART_IIR 0x02 #define UART_FCR 0x02 #define UART_LCR 0x03 #define UART_MCR 0x04 #define UART_DLL 0x00 #define UART_DLM 0x01 /* Status */ #define UART_LSR 0x05 #define UART_LSR_TEMPT 0x40 /* Transmitter empty */ #define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ #define UART_LSR_BI 0x10 /* Break interrupt indicator */ #define UART_LSR_FE 0x08 /* Frame error indicator */ #define UART_LSR_PE 0x04 /* Parity error indicator */ #define UART_LSR_OE 0x02 /* Overrun error indicator */ #define UART_LSR_DR 0x01 /* Receiver data ready */ #define UART_MSR 0x06 #define UART_SCR 0x07 #if defined(UART_MEM) #define uart_readb(addr) readb((addr)) #define uart_writeb(val,addr) writeb((val),(addr)) #else #define uart_readb(addr) inb((addr)) #define uart_writeb(val,addr) outb((val),(addr)) #endif /* * void serial_putc(int ch); * Write character `ch' to port UART_BASE. */ void serial_putc ( int ch ) { int i; int status; i = 1000; /* timeout */ while(--i > 0) { status = uart_readb(UART_BASE + UART_LSR); if (status & UART_LSR_THRE) { /* TX buffer emtpy */ uart_writeb(ch, UART_BASE + UART_TBR); break; } mdelay(2); } } /* * int serial_getc(void); * Read a character from port UART_BASE. */ int serial_getc ( void ) { int status; int ch; do { status = uart_readb(UART_BASE + UART_LSR); } while((status & 1) == 0); ch = uart_readb(UART_BASE + UART_RBR); /* fetch (first) character */ ch &= 0x7f; /* remove any parity bits we get */ if (ch == 0x7f) { /* Make DEL... look like BS */ ch = 0x08; } return ch; } /* * int serial_ischar(void); * If there is a character in the input buffer of port UART_BASE, * return nonzero; otherwise return 0. */ int serial_ischar ( void ) { int status; status = uart_readb(UART_BASE + UART_LSR); /* line status reg; */ return status & 1; /* rx char available */ } /* * int serial_init(void); * Initialize port UART_BASE to speed COMSPEED, line settings 8N1. */ static void serial_init ( void ) { int status; int divisor, lcs; DBG ( "Serial port %#x initialising\n", UART_BASE ); divisor = COMBRD; lcs = UART_LCS; #ifdef COMPRESERVE lcs = uart_readb(UART_BASE + UART_LCR) & 0x7f; uart_writeb(0x80 | lcs, UART_BASE + UART_LCR); divisor = (uart_readb(UART_BASE + UART_DLM) << 8) | uart_readb(UART_BASE + UART_DLL); uart_writeb(lcs, UART_BASE + UART_LCR); #endif /* Set Baud Rate Divisor to COMSPEED, and test to see if the * serial port appears to be present. */ uart_writeb(0x80 | lcs, UART_BASE + UART_LCR); uart_writeb(0xaa, UART_BASE + UART_DLL); if (uart_readb(UART_BASE + UART_DLL) != 0xaa) { DBG ( "Serial port %#x UART_DLL failed\n", UART_BASE ); goto out; } uart_writeb(0x55, UART_BASE + UART_DLL); if (uart_readb(UART_BASE + UART_DLL) != 0x55) { DBG ( "Serial port %#x UART_DLL failed\n", UART_BASE ); goto out; } uart_writeb(divisor & 0xff, UART_BASE + UART_DLL); if (uart_readb(UART_BASE + UART_DLL) != (divisor & 0xff)) { DBG ( "Serial port %#x UART_DLL failed\n", UART_BASE ); goto out; } uart_writeb(0xaa, UART_BASE + UART_DLM); if (uart_readb(UART_BASE + UART_DLM) != 0xaa) { DBG ( "Serial port %#x UART_DLM failed\n", UART_BASE ); goto out; } uart_writeb(0x55, UART_BASE + UART_DLM); if (uart_readb(UART_BASE + UART_DLM) != 0x55) { DBG ( "Serial port %#x UART_DLM failed\n", UART_BASE ); goto out; } uart_writeb((divisor >> 8) & 0xff, UART_BASE + UART_DLM); if (uart_readb(UART_BASE + UART_DLM) != ((divisor >> 8) & 0xff)) { DBG ( "Serial port %#x UART_DLM failed\n", UART_BASE ); goto out; } uart_writeb(lcs, UART_BASE + UART_LCR); /* disable interrupts */ uart_writeb(0x0, UART_BASE + UART_IER); /* disable fifo's */ uart_writeb(0x00, UART_BASE + UART_FCR); /* Set clear to send, so flow control works... */ uart_writeb((1<<1), UART_BASE + UART_MCR); /* Flush the input buffer. */ do { /* rx buffer reg * throw away (unconditionally the first time) */ (void) uart_readb(UART_BASE + UART_RBR); /* line status reg */ status = uart_readb(UART_BASE + UART_LSR); } while(status & UART_LSR_DR); out: return; } /* * void serial_fini(void); * Cleanup our use of the serial port, in particular flush the * output buffer so we don't accidentially lose characters. */ static void serial_fini ( int flags __unused ) { int i, status; /* Flush the output buffer to avoid dropping characters, * if we are reinitializing the serial port. */ i = 10000; /* timeout */ do { status = uart_readb(UART_BASE + UART_LSR); } while((--i > 0) && !(status & UART_LSR_TEMPT)); /* Don't mark it as disabled; it's still usable */ } /** * Serial driver initialisation function * * Initialise serial port early on so that it is available to capture * early debug messages. */ struct init_fn serial_init_fn __init_fn ( INIT_SERIAL ) = { .initialise = serial_init, }; /** Serial driver startup function */ struct startup_fn serial_startup_fn __startup_fn ( STARTUP_EARLY ) = { .shutdown = serial_fini, };
gpl-2.0
oldstylejoe/vlc-timed
modules/gui/qt4/components/playlist/playlist_model.cpp
33
30024
/***************************************************************************** * playlist_model.cpp : Manage playlist model **************************************************************************** * Copyright (C) 2006-2011 the VideoLAN team * $Id$ * * Authors: Clément Stenac <zorglub@videolan.org> * Ilkka Ollakkka <ileoo (at) videolan dot org> * Jakob Leben <jleben@videolan.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "qt4.hpp" #include "components/playlist/playlist_model.hpp" #include "input_manager.hpp" /* THEMIM */ #include "util/qt_dirs.hpp" #include "recents.hpp" /* Open:: */ #include <vlc_intf_strings.h> /* I_DIR */ #include "sorting.h" #include <assert.h> #include <QFont> #include <QAction> /************************************************************************* * Playlist model implementation *************************************************************************/ PLModel::PLModel( playlist_t *_p_playlist, /* THEPL */ intf_thread_t *_p_intf, /* main Qt p_intf */ playlist_item_t * p_root, QObject *parent ) /* Basic Qt parent */ : VLCModel( _p_intf, parent ) { p_playlist = _p_playlist; rootItem = NULL; /* PLItem rootItem, will be set in rebuild( ) */ latestSearch = QString(); rebuild( p_root ); DCONNECT( THEMIM->getIM(), metaChanged( input_item_t *), this, processInputItemUpdate( input_item_t *) ); DCONNECT( THEMIM, inputChanged( input_thread_t * ), this, processInputItemUpdate( ) ); CONNECT( THEMIM, playlistItemAppended( int, int ), this, processItemAppend( int, int ) ); CONNECT( THEMIM, playlistItemRemoved( int ), this, processItemRemoval( int ) ); } PLModel::~PLModel() { delete rootItem; } Qt::DropActions PLModel::supportedDropActions() const { return Qt::CopyAction | Qt::MoveAction; } Qt::ItemFlags PLModel::flags( const QModelIndex &index ) const { Qt::ItemFlags flags = QAbstractItemModel::flags( index ); const PLItem *item = index.isValid() ? getItem( index ) : rootItem; if( canEdit() ) { PL_LOCK; playlist_item_t *plItem = playlist_ItemGetById( p_playlist, item->i_playlist_id ); if ( plItem && ( plItem->i_children > -1 ) ) flags |= Qt::ItemIsDropEnabled; PL_UNLOCK; } flags |= Qt::ItemIsDragEnabled; return flags; } QStringList PLModel::mimeTypes() const { QStringList types; types << "vlc/qt-input-items"; return types; } bool modelIndexLessThen( const QModelIndex &i1, const QModelIndex &i2 ) { if( !i1.isValid() || !i2.isValid() ) return false; PLItem *item1 = static_cast<PLItem*>( i1.internalPointer() ); PLItem *item2 = static_cast<PLItem*>( i2.internalPointer() ); if( item1->hasSameParent( item2 ) ) return i1.row() < i2.row(); else return *item1 < *item2; } QMimeData *PLModel::mimeData( const QModelIndexList &indexes ) const { PlMimeData *plMimeData = new PlMimeData(); QModelIndexList list; foreach( const QModelIndex &index, indexes ) { if( index.isValid() && index.column() == 0 ) list.append(index); } qSort(list.begin(), list.end(), modelIndexLessThen); AbstractPLItem *item = NULL; foreach( const QModelIndex &index, list ) { if( item ) { AbstractPLItem *testee = getItem( index ); while( testee->parent() ) { if( testee->parent() == item || testee->parent() == item->parent() ) break; testee = testee->parent(); } if( testee->parent() == item ) continue; item = getItem( index ); } else item = getItem( index ); plMimeData->appendItem( static_cast<PLItem*>(item)->inputItem() ); } return plMimeData; } /* Drop operation */ bool PLModel::dropMimeData( const QMimeData *data, Qt::DropAction action, int row, int, const QModelIndex &parent ) { bool copy = action == Qt::CopyAction; if( !copy && action != Qt::MoveAction ) return true; const PlMimeData *plMimeData = qobject_cast<const PlMimeData*>( data ); if( plMimeData ) { if( copy ) dropAppendCopy( plMimeData, getItem( parent ), row ); else dropMove( plMimeData, getItem( parent ), row ); } return true; } void PLModel::dropAppendCopy( const PlMimeData *plMimeData, PLItem *target, int pos ) { PL_LOCK; playlist_item_t *p_parent = playlist_ItemGetByInput( p_playlist, target->inputItem() ); if( !p_parent ) return; if( pos == -1 ) pos = PLAYLIST_END; QList<input_item_t*> inputItems = plMimeData->inputItems(); foreach( input_item_t* p_input, inputItems ) { playlist_item_t *p_item = playlist_ItemGetByInput( p_playlist, p_input ); if( !p_item ) continue; pos = playlist_NodeAddCopy( p_playlist, p_item, p_parent, pos ); } PL_UNLOCK; } void PLModel::dropMove( const PlMimeData * plMimeData, PLItem *target, int row ) { QList<input_item_t*> inputItems = plMimeData->inputItems(); QList<PLItem*> model_items; playlist_item_t **pp_items; pp_items = (playlist_item_t **) calloc( inputItems.count(), sizeof( playlist_item_t* ) ); if ( !pp_items ) return; PL_LOCK; playlist_item_t *p_parent = playlist_ItemGetByInput( p_playlist, target->inputItem() ); if( !p_parent || row > p_parent->i_children ) { PL_UNLOCK; free( pp_items ); return; } int new_pos = row == -1 ? p_parent->i_children : row; int model_pos = new_pos; int i = 0; foreach( input_item_t *p_input, inputItems ) { playlist_item_t *p_item = playlist_ItemGetByInput( p_playlist, p_input ); if( !p_item ) continue; PLItem *item = findByInputId( rootItem, p_input->i_id ); if( !item ) continue; /* Better not try to move a node into itself. Abort the whole operation in that case, because it is ambiguous. */ AbstractPLItem *climber = target; while( climber ) { if( climber == item ) { PL_UNLOCK; free( pp_items ); return; } climber = climber->parent(); } if( item->parent() == target && target->children.indexOf( item ) < new_pos ) model_pos--; model_items.append( item ); pp_items[i] = p_item; i++; } if( model_items.isEmpty() ) { PL_UNLOCK; free( pp_items ); return; } playlist_TreeMoveMany( p_playlist, i, pp_items, p_parent, new_pos ); PL_UNLOCK; foreach( PLItem *item, model_items ) takeItem( item ); insertChildren( target, model_items, model_pos ); free( pp_items ); } void PLModel::activateItem( const QModelIndex &index ) { assert( index.isValid() ); const PLItem *item = getItem( index ); assert( item ); PL_LOCK; playlist_item_t *p_item = playlist_ItemGetById( p_playlist, item->i_playlist_id ); activateItem( p_item ); PL_UNLOCK; } /* Convenient overloaded private version of activateItem * Must be entered with PL lock */ void PLModel::activateItem( playlist_item_t *p_item ) { if( !p_item ) return; playlist_item_t *p_parent = p_item; while( p_parent ) { if( p_parent->i_id == rootItem->id( PLAYLIST_ID ) ) break; p_parent = p_parent->p_parent; } if( p_parent ) playlist_Control( p_playlist, PLAYLIST_VIEWPLAY, pl_Locked, p_parent, p_item ); } /****************** Base model mandatory implementations *****************/ QVariant PLModel::data( const QModelIndex &index, const int role ) const { switch( role ) { case Qt::FontRole: return customFont; default: if( !index.isValid() ) return QVariant(); } PLItem *item = getItem( index ); if( role == Qt::DisplayRole ) { int metadata = columnToMeta( index.column() ); if( metadata == COLUMN_END ) return QVariant(); QString returninfo; if( metadata == COLUMN_NUMBER ) returninfo = QString::number( index.row() + 1 ); else if( metadata == COLUMN_COVER ) { QString artUrl; artUrl = InputManager::decodeArtURL( item->inputItem() ); if( artUrl.isEmpty() ) { for( int i = 0; i < item->childCount(); i++ ) { artUrl = InputManager::decodeArtURL( item->child( i )->inputItem() ); if( !artUrl.isEmpty() ) break; } } return artUrl; } else { char *psz = psz_column_meta( item->inputItem(), metadata ); returninfo = qfu( psz ); free( psz ); } return QVariant( returninfo ); } else if( role == Qt::DecorationRole ) { switch( columnToMeta(index.column()) ) { case COLUMN_TITLE: /* Used to segfault here because i_type wasn't always initialized */ return QVariant( icons[item->inputItem()->i_type] ); case COLUMN_COVER: /* !warn: changes tree item line height. Otherwise, override * delegate's sizehint */ return getArtPixmap( index, QSize(16,16) ); default: return QVariant(); } } else if( role == Qt::BackgroundRole && isCurrent( index ) ) { return QVariant( QBrush( Qt::gray ) ); } else if( role == IsCurrentRole ) { return QVariant( isCurrent( index ) ); } else if( role == IsLeafNodeRole ) { return QVariant( isLeaf( index ) ); } else if( role == IsCurrentsParentNodeRole ) { return QVariant( isParent( index, currentIndex() ) ); } return QVariant(); } bool PLModel::setData( const QModelIndex &index, const QVariant & value, int role ) { switch( role ) { case Qt::FontRole: customFont = value.value<QFont>(); return true; default: return VLCModel::setData( index, value, role ); } } /* Seek from current index toward the top and see if index is one of parent nodes */ bool PLModel::isParent( const QModelIndex &index, const QModelIndex &current ) const { if( !index.isValid() ) return false; if( index == current ) return true; if( !current.isValid() || !current.parent().isValid() ) return false; return isParent( index, current.parent() ); } bool PLModel::isLeaf( const QModelIndex &index ) const { bool b_isLeaf = false; PL_LOCK; playlist_item_t *plItem = playlist_ItemGetById( p_playlist, itemId( index, PLAYLIST_ID ) ); if( plItem ) b_isLeaf = plItem->i_children == -1; PL_UNLOCK; return b_isLeaf; } PLItem* PLModel::getItem( const QModelIndex & index ) const { PLItem *item = static_cast<PLItem *>( VLCModel::getItem( index ) ); if ( item == NULL ) item = rootItem; return item; } QModelIndex PLModel::index( const int row, const int column, const QModelIndex &parent ) const { PLItem *parentItem = parent.isValid() ? getItem( parent ) : rootItem; PLItem *childItem = static_cast<PLItem*>(parentItem->child( row )); if( childItem ) return createIndex( row, column, childItem ); else return QModelIndex(); } QModelIndex PLModel::indexByPLID( const int i_plid, const int c ) const { return index( findByPLId( rootItem, i_plid ), c ); } QModelIndex PLModel::indexByInputItemID( const int i_inputitem_id, const int c ) const { return index( findByInputId( rootItem, i_inputitem_id ), c ); } QModelIndex PLModel::rootIndex() const { return index( findByPLId( rootItem, rootItem->id( PLAYLIST_ID ) ), 0 ); } bool PLModel::isTree() const { return ( ( rootItem && rootItem->id( PLAYLIST_ID ) != p_playlist->p_playing->i_id ) || var_InheritBool( p_intf, "playlist-tree" ) ); } /* Return the index of a given item */ QModelIndex PLModel::index( PLItem *item, int column ) const { if( !item ) return QModelIndex(); AbstractPLItem *parent = item->parent(); if( parent ) return createIndex( parent->lastIndexOf( item ), column, item ); return QModelIndex(); } QModelIndex PLModel::currentIndex() const { input_thread_t *p_input_thread = THEMIM->getInput(); if( !p_input_thread ) return QModelIndex(); PLItem *item = findByInputId( rootItem, input_GetItem( p_input_thread )->i_id ); return index( item, 0 ); } QModelIndex PLModel::parent( const QModelIndex &index ) const { if( !index.isValid() ) return QModelIndex(); PLItem *childItem = getItem( index ); if( !childItem ) { msg_Err( p_playlist, "Item not found" ); return QModelIndex(); } PLItem *parentItem = static_cast<PLItem*>(childItem->parent()); if( !parentItem || parentItem == rootItem ) return QModelIndex(); if( !parentItem->parent() ) { msg_Err( p_playlist, "No parent found, trying row 0. Please report this" ); return createIndex( 0, 0, parentItem ); } return createIndex(parentItem->row(), 0, parentItem); } int PLModel::rowCount( const QModelIndex &parent ) const { PLItem *parentItem = parent.isValid() ? getItem( parent ) : rootItem; return parentItem->childCount(); } /************************* Lookups *****************************/ PLItem *PLModel::findByPLId( PLItem *root, int i_plitemid ) const { return findInner( root, i_plitemid, false ); } PLItem *PLModel::findByInputId( PLItem *root, int i_input_itemid ) const { PLItem *result = findInner( root, i_input_itemid, true ); return result; } PLItem * PLModel::findInner( PLItem *root, int i_id, bool b_isinputid ) const { if( !root ) return NULL; if( !b_isinputid && root->id( PLAYLIST_ID ) == i_id ) return root; else if( b_isinputid && root->id( INPUTITEM_ID ) == i_id ) return root; QList<AbstractPLItem *>::iterator it = root->children.begin(); while ( it != root->children.end() ) { PLItem *item = static_cast<PLItem *>(*it); if( !b_isinputid && item->id( PLAYLIST_ID ) == i_id ) return item; else if( b_isinputid && item->id( INPUTITEM_ID ) == i_id ) return item; if( item->childCount() ) { PLItem *childFound = findInner( item, i_id, b_isinputid ); if( childFound ) return childFound; } ++it; } return NULL; } PLModel::pl_nodetype PLModel::getPLRootType() const { /* can't rely on rootitem as it depends on view / rebuild() */ AbstractPLItem *plitem = rootItem; while( plitem->parent() ) plitem = plitem->parent(); switch( plitem->id( PLAYLIST_ID ) ) { case 2: return ROOTTYPE_CURRENT_PLAYING; case 3: return ROOTTYPE_MEDIA_LIBRARY; default: return ROOTTYPE_OTHER; } } bool PLModel::canEdit() const { return ( rootItem != NULL && ( rootItem->inputItem() == p_playlist->p_playing->p_input || ( p_playlist->p_media_library && rootItem->inputItem() == p_playlist->p_media_library->p_input ) ) ); } /************************* Updates handling *****************************/ /**** Events processing ****/ void PLModel::processInputItemUpdate( ) { input_thread_t *p_input = THEMIM->getInput(); if( !p_input ) return; if( p_input ) { PLItem *item = findByInputId( rootItem, input_GetItem( p_input )->i_id ); if( item ) emit currentIndexChanged( index( item, 0 ) ); } processInputItemUpdate( input_GetItem( p_input ) ); } void PLModel::processInputItemUpdate( input_item_t *p_item ) { if( !p_item || p_item->i_id <= 0 ) return; PLItem *item = findByInputId( rootItem, p_item->i_id ); if( item ) updateTreeItem( item ); } void PLModel::processItemRemoval( int i_pl_itemid ) { if( i_pl_itemid <= 0 ) return; removeItem( findByPLId( rootItem, i_pl_itemid ) ); } void PLModel::processItemAppend( int i_pl_itemid, int i_pl_itemidparent ) { playlist_item_t *p_item = NULL; PLItem *newItem = NULL; int pos; /* Find the Parent */ PLItem *nodeParentItem = findByPLId( rootItem, i_pl_itemidparent ); if( !nodeParentItem ) return; /* Search for an already matching children */ foreach( AbstractPLItem *existing, nodeParentItem->children ) if( existing->id( PLAYLIST_ID ) == i_pl_itemid ) return; /* Find the child */ PL_LOCK; p_item = playlist_ItemGetById( p_playlist, i_pl_itemid ); if( !p_item || p_item->i_flags & PLAYLIST_DBL_FLAG ) { PL_UNLOCK; return; } for( pos = p_item->p_parent->i_children - 1; pos >= 0; pos-- ) if( p_item->p_parent->pp_children[pos] == p_item ) break; newItem = new PLItem( p_item, nodeParentItem ); PL_UNLOCK; /* We insert the newItem (children) inside the parent */ beginInsertRows( index( nodeParentItem, 0 ), pos, pos ); nodeParentItem->insertChild( newItem, pos ); endInsertRows(); if ( newItem->inputItem() == THEMIM->currentInputItem() ) emit currentIndexChanged( index( newItem, 0 ) ); if( latestSearch.isEmpty() ) return; filter( latestSearch, index( rootItem, 0), false /*FIXME*/ ); } void PLModel::rebuild( playlist_item_t *p_root ) { beginResetModel(); PL_LOCK; if( rootItem ) rootItem->clearChildren(); if( p_root ) // Can be NULL { if ( rootItem ) delete rootItem; rootItem = new PLItem( p_root ); } assert( rootItem ); /* Recreate from root */ updateChildren( rootItem ); PL_UNLOCK; /* And signal the view */ endResetModel(); if( p_root ) emit rootIndexChanged(); } void PLModel::takeItem( PLItem *item ) { assert( item ); PLItem *parent = static_cast<PLItem*>(item->parent()); assert( parent ); int i_index = parent->indexOf( item ); beginRemoveRows( index( parent, 0 ), i_index, i_index ); parent->takeChildAt( i_index ); endRemoveRows(); } void PLModel::insertChildren( PLItem *node, QList<PLItem*>& items, int i_pos ) { assert( node ); int count = items.count(); if( !count ) return; beginInsertRows( index( node, 0 ), i_pos, i_pos + count - 1 ); for( int i = 0; i < count; i++ ) { node->children.insert( i_pos + i, items[i] ); items[i]->parentItem = node; } endInsertRows(); } void PLModel::removeItem( PLItem *item ) { if( !item ) return; if( item->parent() ) { int i = item->parent()->indexOf( item ); beginRemoveRows( index( static_cast<PLItem*>(item->parent()), 0), i, i ); item->parent()->children.removeAt(i); delete item; endRemoveRows(); } else delete item; if(item == rootItem) { rootItem = NULL; rebuild( p_playlist->p_playing ); } } /* This function must be entered WITH the playlist lock */ void PLModel::updateChildren( PLItem *root ) { playlist_item_t *p_node = playlist_ItemGetById( p_playlist, root->id( PLAYLIST_ID ) ); updateChildren( p_node, root ); } /* This function must be entered WITH the playlist lock */ void PLModel::updateChildren( playlist_item_t *p_node, PLItem *root ) { for( int i = 0; i < p_node->i_children ; i++ ) { if( p_node->pp_children[i]->i_flags & PLAYLIST_DBL_FLAG ) continue; PLItem *newItem = new PLItem( p_node->pp_children[i], root ); root->appendChild( newItem ); if( p_node->pp_children[i]->i_children != -1 ) updateChildren( p_node->pp_children[i], newItem ); } } /* Function doesn't need playlist-lock, as we don't touch playlist_item_t stuff here*/ void PLModel::updateTreeItem( PLItem *item ) { if( !item ) return; emit dataChanged( index( item, 0 ) , index( item, columnCount( QModelIndex() ) - 1 ) ); } /************************* Actions ******************************/ /** * Deletion, don't delete items childrens if item is going to be * delete allready, so we remove childrens from selection-list. */ void PLModel::doDelete( QModelIndexList selected ) { if( !canEdit() ) return; while( !selected.isEmpty() ) { QModelIndex index = selected[0]; selected.removeAt( 0 ); if( index.column() != 0 ) continue; PLItem *item = getItem( index ); if( item->childCount() ) recurseDelete( item->children, &selected ); PL_LOCK; playlist_DeleteFromInput( p_playlist, item->inputItem(), pl_Locked ); PL_UNLOCK; removeItem( item ); } } void PLModel::recurseDelete( QList<AbstractPLItem*> children, QModelIndexList *fullList ) { for( int i = children.count() - 1; i >= 0 ; i-- ) { PLItem *item = static_cast<PLItem *>(children[i]); if( item->childCount() ) recurseDelete( item->children, fullList ); fullList->removeAll( index( item, 0 ) ); } } /******* Volume III: Sorting and searching ********/ void PLModel::sort( const int column, Qt::SortOrder order ) { sort( QModelIndex(), indexByPLID( rootItem->id( PLAYLIST_ID ), 0 ) , column, order ); } void PLModel::sort( QModelIndex caller, QModelIndex rootIndex, const int column, Qt::SortOrder order ) { msg_Dbg( p_intf, "Sorting by column %i, order %i", column, order ); int meta = columnToMeta( column ); if( meta == COLUMN_END ) return; PLItem *item = ( rootIndex.isValid() ) ? getItem( rootIndex ) : rootItem; if( !item ) return; int i_root_id = item->id( PLAYLIST_ID ); QModelIndex qIndex = index( item, 0 ); int count = item->childCount(); if( count ) { beginRemoveRows( qIndex, 0, count - 1 ); item->clearChildren(); endRemoveRows( ); } PL_LOCK; { playlist_item_t *p_root = playlist_ItemGetById( p_playlist, i_root_id ); if( p_root ) { playlist_RecursiveNodeSort( p_playlist, p_root, i_column_sorting( meta ), order == Qt::AscendingOrder ? ORDER_NORMAL : ORDER_REVERSE ); } } if( count ) { beginInsertRows( qIndex, 0, count - 1 ); updateChildren( item ); endInsertRows( ); } PL_UNLOCK; /* if we have popup item, try to make sure that you keep that item visible */ if( caller.isValid() ) emit currentIndexChanged( caller ); else if( currentIndex().isValid() ) emit currentIndexChanged( currentIndex() ); } void PLModel::filter( const QString& search_text, const QModelIndex & idx, bool b_recursive ) { latestSearch = search_text; /** \todo Fire the search with a small delay ? */ PL_LOCK; { playlist_item_t *p_root = playlist_ItemGetById( p_playlist, itemId( idx, PLAYLIST_ID ) ); assert( p_root ); playlist_LiveSearchUpdate( p_playlist, p_root, qtu( search_text ), b_recursive ); if( idx.isValid() ) { PLItem *searchRoot = getItem( idx ); beginRemoveRows( idx, 0, searchRoot->childCount() - 1 ); searchRoot->clearChildren(); endRemoveRows(); beginInsertRows( idx, 0, searchRoot->childCount() - 1 ); updateChildren( searchRoot ); // The PL_LOCK is needed here endInsertRows(); PL_UNLOCK; return; } } PL_UNLOCK; rebuild(); } void PLModel::removeAll() { if( rowCount() < 1 ) return; QModelIndexList l; for( int i = 0; i < rowCount(); i++) { QModelIndex indexrecord = index( i, 0, QModelIndex() ); l.append( indexrecord ); } doDelete(l); } void PLModel::createNode( QModelIndex index, QString name ) { if( name.isEmpty() || !index.isValid() ) return; PL_LOCK; index = index.parent(); if ( !index.isValid() ) index = rootIndex(); playlist_item_t *p_item = playlist_ItemGetById( p_playlist, itemId( index, PLAYLIST_ID ) ); if( p_item ) playlist_NodeCreate( p_playlist, qtu( name ), p_item, PLAYLIST_END, 0, NULL ); PL_UNLOCK; } void PLModel::renameNode( QModelIndex index, QString name ) { if( name.isEmpty() || !index.isValid() ) return; PL_LOCK; if ( !index.isValid() ) index = rootIndex(); input_item_t* p_input = this->getInputItem( index ); input_item_SetName( p_input, qtu( name ) ); playlist_t *p_playlist = THEPL; input_item_WriteMeta( VLC_OBJECT(p_playlist), p_input ); PL_UNLOCK; } bool PLModel::action( QAction *action, const QModelIndexList &indexes ) { QModelIndex index; actionsContainerType a = action->data().value<actionsContainerType>(); switch ( a.action ) { case ACTION_PLAY: if ( !indexes.empty() && indexes.first().isValid() ) { activateItem( indexes.first() ); return true; } break; case ACTION_ADDTOPLAYLIST: PL_LOCK; foreach( const QModelIndex &currentIndex, indexes ) { playlist_item_t *p_item = playlist_ItemGetById( THEPL, itemId( currentIndex, PLAYLIST_ID ) ); if( !p_item ) continue; playlist_NodeAddCopy( THEPL, p_item, THEPL->p_playing, PLAYLIST_END ); } PL_UNLOCK; return true; case ACTION_REMOVE: doDelete( indexes ); return true; case ACTION_SORT: if ( indexes.empty() ) break; index = indexes.first().parent(); if( !index.isValid() ) index = rootIndex(); sort( indexes.first(), index, a.column > 0 ? a.column - 1 : -a.column - 1, a.column > 0 ? Qt::AscendingOrder : Qt::DescendingOrder ); return true; case ACTION_CLEAR: removeAll(); return true; case ACTION_ENQUEUEFILE: foreach( const QString &uri, a.uris ) Open::openMRL( p_intf, uri.toLatin1().constData(), false, getPLRootType() == ROOTTYPE_CURRENT_PLAYING ); return true; case ACTION_ENQUEUEDIR: if( a.uris.isEmpty() ) break; Open::openMRL( p_intf, a.uris.first().toLatin1().constData(), false, getPLRootType() == ROOTTYPE_CURRENT_PLAYING ); return true; case ACTION_ENQUEUEGENERIC: foreach( const QString &uri, a.uris ) { QStringList options = a.options.split( " :" ); Open::openMRLwithOptions( p_intf, uri, &options, false ); } return true; default: break; } return false; } bool PLModel::isSupportedAction( actions action, const QModelIndex &index ) const { switch ( action ) { case ACTION_ADDTOPLAYLIST: /* Only if we are not already in Current Playing */ if ( getPLRootType() == ROOTTYPE_CURRENT_PLAYING ) return false; if( index.isValid() && index != rootIndex() ) return ( itemId( index, PLAYLIST_ID ) != THEPL->p_playing->i_id ); case ACTION_SORT: return rowCount(); case ACTION_PLAY: case ACTION_STREAM: case ACTION_SAVE: case ACTION_INFO: case ACTION_REMOVE: return index.isValid() && index != rootIndex(); case ACTION_EXPLORE: if( index.isValid() ) return getURI( index ).startsWith( "file://" ); case ACTION_CREATENODE: return ( canEdit() && isTree() ); case ACTION_RENAMENODE: return ( index != rootIndex() ) && !isLeaf( index ); break; case ACTION_CLEAR: return rowCount() && canEdit(); case ACTION_ENQUEUEFILE: case ACTION_ENQUEUEDIR: case ACTION_ENQUEUEGENERIC: return canEdit(); case ACTION_SAVETOPLAYLIST: return rowCount() > 0; default: return false; } return false; } /******************* Drag and Drop helper class ******************/ PlMimeData::~PlMimeData() { foreach( input_item_t *p_item, _inputItems ) vlc_gc_decref( p_item ); } void PlMimeData::appendItem( input_item_t *p_item ) { vlc_gc_incref( p_item ); _inputItems.append( p_item ); } QList<input_item_t*> PlMimeData::inputItems() const { return _inputItems; } QStringList PlMimeData::formats () const { QStringList fmts; fmts << "vlc/qt-input-items"; return fmts; }
gpl-2.0
loongson-community/linux-3A
drivers/hwmon/jz4740-hwmon.c
289
5497
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 SoC HWMON driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/err.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/mfd/core.h> #include <linux/hwmon.h> struct jz4740_hwmon { struct resource *mem; void __iomem *base; int irq; struct mfd_cell *cell; struct device *hwmon; struct completion read_completion; struct mutex lock; }; static ssize_t jz4740_hwmon_show_name(struct device *dev, struct device_attribute *dev_attr, char *buf) { return sprintf(buf, "jz4740\n"); } static irqreturn_t jz4740_hwmon_irq(int irq, void *data) { struct jz4740_hwmon *hwmon = data; complete(&hwmon->read_completion); return IRQ_HANDLED; } static ssize_t jz4740_hwmon_read_adcin(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct jz4740_hwmon *hwmon = dev_get_drvdata(dev); struct completion *completion = &hwmon->read_completion; unsigned long t; unsigned long val; int ret; mutex_lock(&hwmon->lock); INIT_COMPLETION(*completion); enable_irq(hwmon->irq); hwmon->cell->enable(to_platform_device(dev)); t = wait_for_completion_interruptible_timeout(completion, HZ); if (t > 0) { val = readw(hwmon->base) & 0xfff; val = (val * 3300) >> 12; ret = sprintf(buf, "%lu\n", val); } else { ret = t ? t : -ETIMEDOUT; } hwmon->cell->disable(to_platform_device(dev)); disable_irq(hwmon->irq); mutex_unlock(&hwmon->lock); return ret; } static DEVICE_ATTR(name, S_IRUGO, jz4740_hwmon_show_name, NULL); static DEVICE_ATTR(in0_input, S_IRUGO, jz4740_hwmon_read_adcin, NULL); static struct attribute *jz4740_hwmon_attributes[] = { &dev_attr_name.attr, &dev_attr_in0_input.attr, NULL }; static const struct attribute_group jz4740_hwmon_attr_group = { .attrs = jz4740_hwmon_attributes, }; static int __devinit jz4740_hwmon_probe(struct platform_device *pdev) { int ret; struct jz4740_hwmon *hwmon; hwmon = kmalloc(sizeof(*hwmon), GFP_KERNEL); if (!hwmon) { dev_err(&pdev->dev, "Failed to allocate driver structure\n"); return -ENOMEM; } hwmon->cell = pdev->dev.platform_data; hwmon->irq = platform_get_irq(pdev, 0); if (hwmon->irq < 0) { ret = hwmon->irq; dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); goto err_free; } hwmon->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!hwmon->mem) { ret = -ENOENT; dev_err(&pdev->dev, "Failed to get platform mmio resource\n"); goto err_free; } hwmon->mem = request_mem_region(hwmon->mem->start, resource_size(hwmon->mem), pdev->name); if (!hwmon->mem) { ret = -EBUSY; dev_err(&pdev->dev, "Failed to request mmio memory region\n"); goto err_free; } hwmon->base = ioremap_nocache(hwmon->mem->start, resource_size(hwmon->mem)); if (!hwmon->base) { ret = -EBUSY; dev_err(&pdev->dev, "Failed to ioremap mmio memory\n"); goto err_release_mem_region; } init_completion(&hwmon->read_completion); mutex_init(&hwmon->lock); platform_set_drvdata(pdev, hwmon); ret = request_irq(hwmon->irq, jz4740_hwmon_irq, 0, pdev->name, hwmon); if (ret) { dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); goto err_iounmap; } disable_irq(hwmon->irq); ret = sysfs_create_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); if (ret) { dev_err(&pdev->dev, "Failed to create sysfs group: %d\n", ret); goto err_free_irq; } hwmon->hwmon = hwmon_device_register(&pdev->dev); if (IS_ERR(hwmon->hwmon)) { ret = PTR_ERR(hwmon->hwmon); goto err_remove_file; } return 0; err_remove_file: sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); err_free_irq: free_irq(hwmon->irq, hwmon); err_iounmap: platform_set_drvdata(pdev, NULL); iounmap(hwmon->base); err_release_mem_region: release_mem_region(hwmon->mem->start, resource_size(hwmon->mem)); err_free: kfree(hwmon); return ret; } static int __devexit jz4740_hwmon_remove(struct platform_device *pdev) { struct jz4740_hwmon *hwmon = platform_get_drvdata(pdev); hwmon_device_unregister(hwmon->hwmon); sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); free_irq(hwmon->irq, hwmon); iounmap(hwmon->base); release_mem_region(hwmon->mem->start, resource_size(hwmon->mem)); platform_set_drvdata(pdev, NULL); kfree(hwmon); return 0; } struct platform_driver jz4740_hwmon_driver = { .probe = jz4740_hwmon_probe, .remove = __devexit_p(jz4740_hwmon_remove), .driver = { .name = "jz4740-hwmon", .owner = THIS_MODULE, }, }; static int __init jz4740_hwmon_init(void) { return platform_driver_register(&jz4740_hwmon_driver); } module_init(jz4740_hwmon_init); static void __exit jz4740_hwmon_exit(void) { platform_driver_unregister(&jz4740_hwmon_driver); } module_exit(jz4740_hwmon_exit); MODULE_DESCRIPTION("JZ4740 SoC HWMON driver"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:jz4740-hwmon");
gpl-2.0
oxforever/linux-4.1
drivers/gpu/drm/bochs/bochs_fbdev.c
545
5548
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include "bochs.h" /* ---------------------------------------------------------------------- */ static int bochsfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct drm_fb_helper *fb_helper = info->par; struct bochs_device *bochs = container_of(fb_helper, struct bochs_device, fb.helper); struct bochs_bo *bo = gem_to_bochs_bo(bochs->fb.gfb.obj); return ttm_fbdev_mmap(vma, &bo->bo); } static struct fb_ops bochsfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = sys_fillrect, .fb_copyarea = sys_copyarea, .fb_imageblit = sys_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, .fb_mmap = bochsfb_mmap, }; static int bochsfb_create_object(struct bochs_device *bochs, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { struct drm_device *dev = bochs->dev; struct drm_gem_object *gobj; u32 size; int ret = 0; size = mode_cmd->pitches[0] * mode_cmd->height; ret = bochs_gem_create(dev, size, true, &gobj); if (ret) return ret; *gobj_p = gobj; return ret; } static int bochsfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct bochs_device *bochs = container_of(helper, struct bochs_device, fb.helper); struct drm_device *dev = bochs->dev; struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd; struct device *device = &dev->pdev->dev; struct drm_gem_object *gobj = NULL; struct bochs_bo *bo = NULL; int size, ret; if (sizes->surface_bpp != 32) return -EINVAL; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = mode_cmd.pitches[0] * mode_cmd.height; /* alloc, pin & map bo */ ret = bochsfb_create_object(bochs, &mode_cmd, &gobj); if (ret) { DRM_ERROR("failed to create fbcon backing object %d\n", ret); return ret; } bo = gem_to_bochs_bo(gobj); ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); if (ret) return ret; ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL); if (ret) { DRM_ERROR("failed to pin fbcon\n"); ttm_bo_unreserve(&bo->bo); return ret; } ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); if (ret) { DRM_ERROR("failed to kmap fbcon\n"); ttm_bo_unreserve(&bo->bo); return ret; } ttm_bo_unreserve(&bo->bo); /* init fb device */ info = framebuffer_alloc(0, device); if (info == NULL) return -ENOMEM; info->par = &bochs->fb.helper; ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj); if (ret) return ret; bochs->fb.size = size; /* setup helper */ fb = &bochs->fb.gfb.base; bochs->fb.helper.fb = fb; bochs->fb.helper.fbdev = info; strcpy(info->fix.id, "bochsdrmfb"); info->flags = FBINFO_DEFAULT; info->fbops = &bochsfb_ops; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width, sizes->fb_height); info->screen_base = bo->kmap.virtual; info->screen_size = size; drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node); info->fix.smem_start = 0; info->fix.smem_len = size; ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { DRM_ERROR("%s: can't allocate color map\n", info->fix.id); return -ENOMEM; } return 0; } static int bochs_fbdev_destroy(struct bochs_device *bochs) { struct bochs_framebuffer *gfb = &bochs->fb.gfb; struct fb_info *info; DRM_DEBUG_DRIVER("\n"); if (bochs->fb.helper.fbdev) { info = bochs->fb.helper.fbdev; unregister_framebuffer(info); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } if (gfb->obj) { drm_gem_object_unreference_unlocked(gfb->obj); gfb->obj = NULL; } drm_fb_helper_fini(&bochs->fb.helper); drm_framebuffer_unregister_private(&gfb->base); drm_framebuffer_cleanup(&gfb->base); return 0; } void bochs_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { } void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno) { *red = regno; *green = regno; *blue = regno; } static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = { .gamma_set = bochs_fb_gamma_set, .gamma_get = bochs_fb_gamma_get, .fb_probe = bochsfb_create, }; int bochs_fbdev_init(struct bochs_device *bochs) { int ret; drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper, &bochs_fb_helper_funcs); ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1, 1); if (ret) return ret; ret = drm_fb_helper_single_add_all_connectors(&bochs->fb.helper); if (ret) goto fini; drm_helper_disable_unused_functions(bochs->dev); ret = drm_fb_helper_initial_config(&bochs->fb.helper, 32); if (ret) goto fini; bochs->fb.initialized = true; return 0; fini: drm_fb_helper_fini(&bochs->fb.helper); return ret; } void bochs_fbdev_fini(struct bochs_device *bochs) { if (!bochs->fb.initialized) return; bochs_fbdev_destroy(bochs); bochs->fb.initialized = false; }
gpl-2.0
leexdon/linux
crypto/lzo.c
1313
2658
/* * Cryptographic API. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/crypto.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/lzo.h> struct lzo_ctx { void *lzo_comp_mem; }; static int lzo_init(struct crypto_tfm *tfm) { struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); if (!ctx->lzo_comp_mem) ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS); if (!ctx->lzo_comp_mem) return -ENOMEM; return 0; } static void lzo_exit(struct crypto_tfm *tfm) { struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); kvfree(ctx->lzo_comp_mem); } static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem); if (err != LZO_E_OK) return -EINVAL; *dlen = tmp_len; return 0; } static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { int err; size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ err = lzo1x_decompress_safe(src, slen, dst, &tmp_len); if (err != LZO_E_OK) return -EINVAL; *dlen = tmp_len; return 0; } static struct crypto_alg alg = { .cra_name = "lzo", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct lzo_ctx), .cra_module = THIS_MODULE, .cra_init = lzo_init, .cra_exit = lzo_exit, .cra_u = { .compress = { .coa_compress = lzo_compress, .coa_decompress = lzo_decompress } } }; static int __init lzo_mod_init(void) { return crypto_register_alg(&alg); } static void __exit lzo_mod_fini(void) { crypto_unregister_alg(&alg); } module_init(lzo_mod_init); module_exit(lzo_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO Compression Algorithm"); MODULE_ALIAS_CRYPTO("lzo");
gpl-2.0
arjen75/lg_p700_kernel
drivers/usb/musb/musb_host.c
2337
65462
/* * MUSB OTG driver host support * * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2006 by Texas Instruments * Copyright (C) 2006-2007 Nokia Corporation * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> #include <linux/dma-mapping.h> #include "musb_core.h" #include "musb_host.h" /* MUSB HOST status 22-mar-2006 * * - There's still lots of partial code duplication for fault paths, so * they aren't handled as consistently as they need to be. * * - PIO mostly behaved when last tested. * + including ep0, with all usbtest cases 9, 10 * + usbtest 14 (ep0out) doesn't seem to run at all * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest * configurations, but otherwise double buffering passes basic tests. * + for 2.6.N, for N > ~10, needs API changes for hcd framework. * * - DMA (CPPI) ... partially behaves, not currently recommended * + about 1/15 the speed of typical EHCI implementations (PCI) * + RX, all too often reqpkt seems to misbehave after tx * + TX, no known issues (other than evident silicon issue) * * - DMA (Mentor/OMAP) ...has at least toggle update problems * * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet * starvation ... nothing yet for TX, interrupt, or bulk. * * - Not tested with HNP, but some SRP paths seem to behave. * * NOTE 24-August-2006: * * - Bulk traffic finally uses both sides of hardware ep1, freeing up an * extra endpoint for periodic use enabling hub + keybd + mouse. That * mostly works, except that with "usbnet" it's easy to trigger cases * with "ping" where RX loses. (a) ping to davinci, even "ping -f", * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses * although ARP RX wins. (That test was done with a full speed link.) */ /* * NOTE on endpoint usage: * * CONTROL transfers all go through ep0. BULK ones go through dedicated IN * and OUT endpoints ... hardware is dedicated for those "async" queue(s). * (Yes, bulk _could_ use more of the endpoints than that, and would even * benefit from it.) * * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. * So far that scheduling is both dumb and optimistic: the endpoint will be * "claimed" until its software queue is no longer refilled. No multiplexing * of transfers between endpoints, or anything clever. */ static void musb_ep_program(struct musb *musb, u8 epnum, struct urb *urb, int is_out, u8 *buf, u32 offset, u32 len); /* * Clear TX fifo. Needed to avoid BABBLE errors. */ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) { struct musb *musb = ep->musb; void __iomem *epio = ep->regs; u16 csr; u16 lastcsr = 0; int retries = 1000; csr = musb_readw(epio, MUSB_TXCSR); while (csr & MUSB_TXCSR_FIFONOTEMPTY) { if (csr != lastcsr) dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); lastcsr = csr; csr |= MUSB_TXCSR_FLUSHFIFO; musb_writew(epio, MUSB_TXCSR, csr); csr = musb_readw(epio, MUSB_TXCSR); if (WARN(retries-- < 1, "Could not flush host TX%d fifo: csr: %04x\n", ep->epnum, csr)) return; mdelay(1); } } static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) { void __iomem *epio = ep->regs; u16 csr; int retries = 5; /* scrub any data left in the fifo */ do { csr = musb_readw(epio, MUSB_TXCSR); if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) break; musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); csr = musb_readw(epio, MUSB_TXCSR); udelay(10); } while (--retries); WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", ep->epnum, csr); /* and reset for the next transfer */ musb_writew(epio, MUSB_TXCSR, 0); } /* * Start transmit. Caller is responsible for locking shared resources. * musb must be locked. */ static inline void musb_h_tx_start(struct musb_hw_ep *ep) { u16 txcsr; /* NOTE: no locks here; caller should lock and select EP */ if (ep->epnum) { txcsr = musb_readw(ep->regs, MUSB_TXCSR); txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; musb_writew(ep->regs, MUSB_TXCSR, txcsr); } else { txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; musb_writew(ep->regs, MUSB_CSR0, txcsr); } } static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) { u16 txcsr; /* NOTE: no locks here; caller should lock and select EP */ txcsr = musb_readw(ep->regs, MUSB_TXCSR); txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; if (is_cppi_enabled()) txcsr |= MUSB_TXCSR_DMAMODE; musb_writew(ep->regs, MUSB_TXCSR, txcsr); } static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) { if (is_in != 0 || ep->is_shared_fifo) ep->in_qh = qh; if (is_in == 0 || ep->is_shared_fifo) ep->out_qh = qh; } static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) { return is_in ? ep->in_qh : ep->out_qh; } /* * Start the URB at the front of an endpoint's queue * end must be claimed from the caller. * * Context: controller locked, irqs blocked */ static void musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) { u16 frame; u32 len; void __iomem *mbase = musb->mregs; struct urb *urb = next_urb(qh); void *buf = urb->transfer_buffer; u32 offset = 0; struct musb_hw_ep *hw_ep = qh->hw_ep; unsigned pipe = urb->pipe; u8 address = usb_pipedevice(pipe); int epnum = hw_ep->epnum; /* initialize software qh state */ qh->offset = 0; qh->segsize = 0; /* gather right source of data */ switch (qh->type) { case USB_ENDPOINT_XFER_CONTROL: /* control transfers always start with SETUP */ is_in = 0; musb->ep0_stage = MUSB_EP0_START; buf = urb->setup_packet; len = 8; break; case USB_ENDPOINT_XFER_ISOC: qh->iso_idx = 0; qh->frame = 0; offset = urb->iso_frame_desc[0].offset; len = urb->iso_frame_desc[0].length; break; default: /* bulk, interrupt */ /* actual_length may be nonzero on retry paths */ buf = urb->transfer_buffer + urb->actual_length; len = urb->transfer_buffer_length - urb->actual_length; } dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", qh, urb, address, qh->epnum, is_in ? "in" : "out", ({char *s; switch (qh->type) { case USB_ENDPOINT_XFER_CONTROL: s = ""; break; case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; default: s = "-intr"; break; }; s; }), epnum, buf + offset, len); /* Configure endpoint */ musb_ep_set_qh(hw_ep, is_in, qh); musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); /* transmit may have more work: start it when it is time */ if (is_in) return; /* determine if the time is right for a periodic transfer */ switch (qh->type) { case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n"); frame = musb_readw(mbase, MUSB_FRAME); /* FIXME this doesn't implement that scheduling policy ... * or handle framecounter wrapping */ if ((urb->transfer_flags & URB_ISO_ASAP) || (frame >= urb->start_frame)) { /* REVISIT the SOF irq handler shouldn't duplicate * this code; and we don't init urb->start_frame... */ qh->frame = 0; goto start; } else { qh->frame = urb->start_frame; /* enable SOF interrupt so we can count down */ dev_dbg(musb->controller, "SOF for %d\n", epnum); #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ musb_writeb(mbase, MUSB_INTRUSBE, 0xff); #endif } break; default: start: dev_dbg(musb->controller, "Start TX%d %s\n", epnum, hw_ep->tx_channel ? "dma" : "pio"); if (!hw_ep->tx_channel) musb_h_tx_start(hw_ep); else if (is_cppi_enabled() || tusb_dma_omap()) musb_h_tx_dma_start(hw_ep); } } /* Context: caller owns controller lock, IRQs are blocked */ static void musb_giveback(struct musb *musb, struct urb *urb, int status) __releases(musb->lock) __acquires(musb->lock) { dev_dbg(musb->controller, "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", urb, urb->complete, status, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe) ? "in" : "out", urb->actual_length, urb->transfer_buffer_length ); usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); spin_unlock(&musb->lock); usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); spin_lock(&musb->lock); } /* For bulk/interrupt endpoints only */ static inline void musb_save_toggle(struct musb_qh *qh, int is_in, struct urb *urb) { void __iomem *epio = qh->hw_ep->regs; u16 csr; /* * FIXME: the current Mentor DMA code seems to have * problems getting toggle correct. */ if (is_in) csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; else csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); } /* * Advance this hardware endpoint's queue, completing the specified URB and * advancing to either the next URB queued to that qh, or else invalidating * that qh and advancing to the next qh scheduled after the current one. * * Context: caller owns controller lock, IRQs are blocked */ static void musb_advance_schedule(struct musb *musb, struct urb *urb, struct musb_hw_ep *hw_ep, int is_in) { struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); struct musb_hw_ep *ep = qh->hw_ep; int ready = qh->is_ready; int status; status = (urb->status == -EINPROGRESS) ? 0 : urb->status; /* save toggle eagerly, for paranoia */ switch (qh->type) { case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_INT: musb_save_toggle(qh, is_in, urb); break; case USB_ENDPOINT_XFER_ISOC: if (status == 0 && urb->error_count) status = -EXDEV; break; } qh->is_ready = 0; musb_giveback(musb, urb, status); qh->is_ready = ready; /* reclaim resources (and bandwidth) ASAP; deschedule it, and * invalidate qh as soon as list_empty(&hep->urb_list) */ if (list_empty(&qh->hep->urb_list)) { struct list_head *head; if (is_in) ep->rx_reinit = 1; else ep->tx_reinit = 1; /* Clobber old pointers to this qh */ musb_ep_set_qh(ep, is_in, NULL); qh->hep->hcpriv = NULL; switch (qh->type) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_BULK: /* fifo policy for these lists, except that NAKing * should rotate a qh to the end (for fairness). */ if (qh->mux == 1) { head = qh->ring.prev; list_del(&qh->ring); kfree(qh); qh = first_qh(head); break; } case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: /* this is where periodic bandwidth should be * de-allocated if it's tracked and allocated; * and where we'd update the schedule tree... */ kfree(qh); qh = NULL; break; } } if (qh != NULL && qh->is_ready) { dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); musb_start_urb(musb, is_in, qh); } } static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) { /* we don't want fifo to fill itself again; * ignore dma (various models), * leave toggle alone (may not have been saved yet) */ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_AUTOREQ | MUSB_RXCSR_AUTOCLEAR); /* write 2x to allow double buffering */ musb_writew(hw_ep->regs, MUSB_RXCSR, csr); musb_writew(hw_ep->regs, MUSB_RXCSR, csr); /* flush writebuffer */ return musb_readw(hw_ep->regs, MUSB_RXCSR); } /* * PIO RX for a packet (or part of it). */ static bool musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) { u16 rx_count; u8 *buf; u16 csr; bool done = false; u32 length; int do_flush = 0; struct musb_hw_ep *hw_ep = musb->endpoints + epnum; void __iomem *epio = hw_ep->regs; struct musb_qh *qh = hw_ep->in_qh; int pipe = urb->pipe; void *buffer = urb->transfer_buffer; /* musb_ep_select(mbase, epnum); */ rx_count = musb_readw(epio, MUSB_RXCOUNT); dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, urb->transfer_buffer, qh->offset, urb->transfer_buffer_length); /* unload FIFO */ if (usb_pipeisoc(pipe)) { int status = 0; struct usb_iso_packet_descriptor *d; if (iso_err) { status = -EILSEQ; urb->error_count++; } d = urb->iso_frame_desc + qh->iso_idx; buf = buffer + d->offset; length = d->length; if (rx_count > length) { if (status == 0) { status = -EOVERFLOW; urb->error_count++; } dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); do_flush = 1; } else length = rx_count; urb->actual_length += length; d->actual_length = length; d->status = status; /* see if we are done */ done = (++qh->iso_idx >= urb->number_of_packets); } else { /* non-isoch */ buf = buffer + qh->offset; length = urb->transfer_buffer_length - qh->offset; if (rx_count > length) { if (urb->status == -EINPROGRESS) urb->status = -EOVERFLOW; dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); do_flush = 1; } else length = rx_count; urb->actual_length += length; qh->offset += length; /* see if we are done */ done = (urb->actual_length == urb->transfer_buffer_length) || (rx_count < qh->maxpacket) || (urb->status != -EINPROGRESS); if (done && (urb->status == -EINPROGRESS) && (urb->transfer_flags & URB_SHORT_NOT_OK) && (urb->actual_length < urb->transfer_buffer_length)) urb->status = -EREMOTEIO; } musb_read_fifo(hw_ep, length, buf); csr = musb_readw(epio, MUSB_RXCSR); csr |= MUSB_RXCSR_H_WZC_BITS; if (unlikely(do_flush)) musb_h_flush_rxfifo(hw_ep, csr); else { /* REVISIT this assumes AUTOCLEAR is never set */ csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); if (!done) csr |= MUSB_RXCSR_H_REQPKT; musb_writew(epio, MUSB_RXCSR, csr); } return done; } /* we don't always need to reinit a given side of an endpoint... * when we do, use tx/rx reinit routine and then construct a new CSR * to address data toggle, NYET, and DMA or PIO. * * it's possible that driver bugs (especially for DMA) or aborting a * transfer might have left the endpoint busier than it should be. * the busy/not-empty tests are basically paranoia. */ static void musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) { u16 csr; /* NOTE: we know the "rx" fifo reinit never triggers for ep0. * That always uses tx_reinit since ep0 repurposes TX register * offsets; the initial SETUP packet is also a kind of OUT. */ /* if programmed for Tx, put it in RX mode */ if (ep->is_shared_fifo) { csr = musb_readw(ep->regs, MUSB_TXCSR); if (csr & MUSB_TXCSR_MODE) { musb_h_tx_flush_fifo(ep); csr = musb_readw(ep->regs, MUSB_TXCSR); musb_writew(ep->regs, MUSB_TXCSR, csr | MUSB_TXCSR_FRCDATATOG); } /* * Clear the MODE bit (and everything else) to enable Rx. * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. */ if (csr & MUSB_TXCSR_DMAMODE) musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); musb_writew(ep->regs, MUSB_TXCSR, 0); /* scrub all previous state, clearing toggle */ } else { csr = musb_readw(ep->regs, MUSB_RXCSR); if (csr & MUSB_RXCSR_RXPKTRDY) WARNING("rx%d, packet/%d ready?\n", ep->epnum, musb_readw(ep->regs, MUSB_RXCOUNT)); musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); } /* target addr and (for multipoint) hub addr/port */ if (musb->is_multipoint) { musb_write_rxfunaddr(ep->target_regs, qh->addr_reg); musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg); musb_write_rxhubport(ep->target_regs, qh->h_port_reg); } else musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); /* protocol/endpoint, interval/NAKlimit, i/o size */ musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); /* NOTE: bulk combining rewrites high bits of maxpacket */ /* Set RXMAXP with the FIFO size of the endpoint * to disable double buffer mode. */ if (musb->double_buffer_not_ok) musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); else musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket | ((qh->hb_mult - 1) << 11)); ep->rx_reinit = 0; } static bool musb_tx_dma_program(struct dma_controller *dma, struct musb_hw_ep *hw_ep, struct musb_qh *qh, struct urb *urb, u32 offset, u32 length) { struct dma_channel *channel = hw_ep->tx_channel; void __iomem *epio = hw_ep->regs; u16 pkt_size = qh->maxpacket; u16 csr; u8 mode; #ifdef CONFIG_USB_INVENTRA_DMA if (length > channel->max_len) length = channel->max_len; csr = musb_readw(epio, MUSB_TXCSR); if (length > pkt_size) { mode = 1; csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; /* autoset shouldn't be set in high bandwidth */ if (qh->hb_mult == 1) csr |= MUSB_TXCSR_AUTOSET; } else { mode = 0; csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ } channel->desired_mode = mode; musb_writew(epio, MUSB_TXCSR, csr); #else if (!is_cppi_enabled() && !tusb_dma_omap()) return false; channel->actual_len = 0; /* * TX uses "RNDIS" mode automatically but needs help * to identify the zero-length-final-packet case. */ mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; #endif qh->segsize = length; /* * Ensure the data reaches to main memory before starting * DMA transfer */ wmb(); if (!dma->channel_program(channel, pkt_size, mode, urb->transfer_dma + offset, length)) { dma->channel_release(channel); hw_ep->tx_channel = NULL; csr = musb_readw(epio, MUSB_TXCSR); csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); return false; } return true; } /* * Program an HDRC endpoint as per the given URB * Context: irqs blocked, controller lock held */ static void musb_ep_program(struct musb *musb, u8 epnum, struct urb *urb, int is_out, u8 *buf, u32 offset, u32 len) { struct dma_controller *dma_controller; struct dma_channel *dma_channel; u8 dma_ok; void __iomem *mbase = musb->mregs; struct musb_hw_ep *hw_ep = musb->endpoints + epnum; void __iomem *epio = hw_ep->regs; struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); u16 packet_sz = qh->maxpacket; dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s " "h_addr%02x h_port%02x bytes %d\n", is_out ? "-->" : "<--", epnum, urb, urb->dev->speed, qh->addr_reg, qh->epnum, is_out ? "out" : "in", qh->h_addr_reg, qh->h_port_reg, len); musb_ep_select(mbase, epnum); /* candidate for DMA? */ dma_controller = musb->dma_controller; if (is_dma_capable() && epnum && dma_controller) { dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; if (!dma_channel) { dma_channel = dma_controller->channel_alloc( dma_controller, hw_ep, is_out); if (is_out) hw_ep->tx_channel = dma_channel; else hw_ep->rx_channel = dma_channel; } } else dma_channel = NULL; /* make sure we clear DMAEnab, autoSet bits from previous run */ /* OUT/transmit/EP0 or IN/receive? */ if (is_out) { u16 csr; u16 int_txe; u16 load_count; csr = musb_readw(epio, MUSB_TXCSR); /* disable interrupt in case we flush */ int_txe = musb_readw(mbase, MUSB_INTRTXE); musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); /* general endpoint setup */ if (epnum) { /* flush all old state, set default */ musb_h_tx_flush_fifo(hw_ep); /* * We must not clear the DMAMODE bit before or in * the same cycle with the DMAENAB bit, so we clear * the latter first... */ csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB | MUSB_TXCSR_FRCDATATOG | MUSB_TXCSR_H_RXSTALL | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_TXPKTRDY ); csr |= MUSB_TXCSR_MODE; if (usb_gettoggle(urb->dev, qh->epnum, 1)) csr |= MUSB_TXCSR_H_WR_DATATOGGLE | MUSB_TXCSR_H_DATATOGGLE; else csr |= MUSB_TXCSR_CLRDATATOG; musb_writew(epio, MUSB_TXCSR, csr); /* REVISIT may need to clear FLUSHFIFO ... */ csr &= ~MUSB_TXCSR_DMAMODE; musb_writew(epio, MUSB_TXCSR, csr); csr = musb_readw(epio, MUSB_TXCSR); } else { /* endpoint 0: just flush */ musb_h_ep0_flush_fifo(hw_ep); } /* target addr and (for multipoint) hub addr/port */ if (musb->is_multipoint) { musb_write_txfunaddr(mbase, epnum, qh->addr_reg); musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg); musb_write_txhubport(mbase, epnum, qh->h_port_reg); /* FIXME if !epnum, do the same for RX ... */ } else musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); /* protocol/endpoint/interval/NAKlimit */ if (epnum) { musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); if (musb->double_buffer_not_ok) musb_writew(epio, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); else musb_writew(epio, MUSB_TXMAXP, qh->maxpacket | ((qh->hb_mult - 1) << 11)); musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); } else { musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); if (musb->is_multipoint) musb_writeb(epio, MUSB_TYPE0, qh->type_reg); } if (can_bulk_split(musb, qh->type)) load_count = min((u32) hw_ep->max_packet_sz_tx, len); else load_count = min((u32) packet_sz, len); if (dma_channel && musb_tx_dma_program(dma_controller, hw_ep, qh, urb, offset, len)) load_count = 0; if (load_count) { /* PIO to load FIFO */ qh->segsize = load_count; musb_write_fifo(hw_ep, load_count, buf); } /* re-enable interrupt */ musb_writew(mbase, MUSB_INTRTXE, int_txe); /* IN/receive */ } else { u16 csr; if (hw_ep->rx_reinit) { musb_rx_reinit(musb, qh, hw_ep); /* init new state: toggle and NYET, maybe DMA later */ if (usb_gettoggle(urb->dev, qh->epnum, 0)) csr = MUSB_RXCSR_H_WR_DATATOGGLE | MUSB_RXCSR_H_DATATOGGLE; else csr = 0; if (qh->type == USB_ENDPOINT_XFER_INT) csr |= MUSB_RXCSR_DISNYET; } else { csr = musb_readw(hw_ep->regs, MUSB_RXCSR); if (csr & (MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_DMAENAB | MUSB_RXCSR_H_REQPKT)) ERR("broken !rx_reinit, ep%d csr %04x\n", hw_ep->epnum, csr); /* scrub any stale state, leaving toggle alone */ csr &= MUSB_RXCSR_DISNYET; } /* kick things off */ if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { /* Candidate for DMA */ dma_channel->actual_len = 0L; qh->segsize = len; /* AUTOREQ is in a DMA register */ musb_writew(hw_ep->regs, MUSB_RXCSR, csr); csr = musb_readw(hw_ep->regs, MUSB_RXCSR); /* * Unless caller treats short RX transfers as * errors, we dare not queue multiple transfers. */ dma_ok = dma_controller->channel_program(dma_channel, packet_sz, !(urb->transfer_flags & URB_SHORT_NOT_OK), urb->transfer_dma + offset, qh->segsize); if (!dma_ok) { dma_controller->channel_release(dma_channel); hw_ep->rx_channel = dma_channel = NULL; } else csr |= MUSB_RXCSR_DMAENAB; } csr |= MUSB_RXCSR_H_REQPKT; dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr); musb_writew(hw_ep->regs, MUSB_RXCSR, csr); csr = musb_readw(hw_ep->regs, MUSB_RXCSR); } } /* * Service the default endpoint (ep0) as host. * Return true until it's time to start the status stage. */ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) { bool more = false; u8 *fifo_dest = NULL; u16 fifo_count = 0; struct musb_hw_ep *hw_ep = musb->control_ep; struct musb_qh *qh = hw_ep->in_qh; struct usb_ctrlrequest *request; switch (musb->ep0_stage) { case MUSB_EP0_IN: fifo_dest = urb->transfer_buffer + urb->actual_length; fifo_count = min_t(size_t, len, urb->transfer_buffer_length - urb->actual_length); if (fifo_count < len) urb->status = -EOVERFLOW; musb_read_fifo(hw_ep, fifo_count, fifo_dest); urb->actual_length += fifo_count; if (len < qh->maxpacket) { /* always terminate on short read; it's * rarely reported as an error. */ } else if (urb->actual_length < urb->transfer_buffer_length) more = true; break; case MUSB_EP0_START: request = (struct usb_ctrlrequest *) urb->setup_packet; if (!request->wLength) { dev_dbg(musb->controller, "start no-DATA\n"); break; } else if (request->bRequestType & USB_DIR_IN) { dev_dbg(musb->controller, "start IN-DATA\n"); musb->ep0_stage = MUSB_EP0_IN; more = true; break; } else { dev_dbg(musb->controller, "start OUT-DATA\n"); musb->ep0_stage = MUSB_EP0_OUT; more = true; } /* FALLTHROUGH */ case MUSB_EP0_OUT: fifo_count = min_t(size_t, qh->maxpacket, urb->transfer_buffer_length - urb->actual_length); if (fifo_count) { fifo_dest = (u8 *) (urb->transfer_buffer + urb->actual_length); dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n", fifo_count, (fifo_count == 1) ? "" : "s", fifo_dest); musb_write_fifo(hw_ep, fifo_count, fifo_dest); urb->actual_length += fifo_count; more = true; } break; default: ERR("bogus ep0 stage %d\n", musb->ep0_stage); break; } return more; } /* * Handle default endpoint interrupt as host. Only called in IRQ time * from musb_interrupt(). * * called with controller irqlocked */ irqreturn_t musb_h_ep0_irq(struct musb *musb) { struct urb *urb; u16 csr, len; int status = 0; void __iomem *mbase = musb->mregs; struct musb_hw_ep *hw_ep = musb->control_ep; void __iomem *epio = hw_ep->regs; struct musb_qh *qh = hw_ep->in_qh; bool complete = false; irqreturn_t retval = IRQ_NONE; /* ep0 only has one queue, "in" */ urb = next_urb(qh); musb_ep_select(mbase, 0); csr = musb_readw(epio, MUSB_CSR0); len = (csr & MUSB_CSR0_RXPKTRDY) ? musb_readb(epio, MUSB_COUNT0) : 0; dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", csr, qh, len, urb, musb->ep0_stage); /* if we just did status stage, we are done */ if (MUSB_EP0_STATUS == musb->ep0_stage) { retval = IRQ_HANDLED; complete = true; } /* prepare status */ if (csr & MUSB_CSR0_H_RXSTALL) { dev_dbg(musb->controller, "STALLING ENDPOINT\n"); status = -EPIPE; } else if (csr & MUSB_CSR0_H_ERROR) { dev_dbg(musb->controller, "no response, csr0 %04x\n", csr); status = -EPROTO; } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { dev_dbg(musb->controller, "control NAK timeout\n"); /* NOTE: this code path would be a good place to PAUSE a * control transfer, if another one is queued, so that * ep0 is more likely to stay busy. That's already done * for bulk RX transfers. * * if (qh->ring.next != &musb->control), then * we have a candidate... NAKing is *NOT* an error */ musb_writew(epio, MUSB_CSR0, 0); retval = IRQ_HANDLED; } if (status) { dev_dbg(musb->controller, "aborting\n"); retval = IRQ_HANDLED; if (urb) urb->status = status; complete = true; /* use the proper sequence to abort the transfer */ if (csr & MUSB_CSR0_H_REQPKT) { csr &= ~MUSB_CSR0_H_REQPKT; musb_writew(epio, MUSB_CSR0, csr); csr &= ~MUSB_CSR0_H_NAKTIMEOUT; musb_writew(epio, MUSB_CSR0, csr); } else { musb_h_ep0_flush_fifo(hw_ep); } musb_writeb(epio, MUSB_NAKLIMIT0, 0); /* clear it */ musb_writew(epio, MUSB_CSR0, 0); } if (unlikely(!urb)) { /* stop endpoint since we have no place for its data, this * SHOULD NEVER HAPPEN! */ ERR("no URB for end 0\n"); musb_h_ep0_flush_fifo(hw_ep); goto done; } if (!complete) { /* call common logic and prepare response */ if (musb_h_ep0_continue(musb, len, urb)) { /* more packets required */ csr = (MUSB_EP0_IN == musb->ep0_stage) ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; } else { /* data transfer complete; perform status phase */ if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) csr = MUSB_CSR0_H_STATUSPKT | MUSB_CSR0_H_REQPKT; else csr = MUSB_CSR0_H_STATUSPKT | MUSB_CSR0_TXPKTRDY; /* flag status stage */ musb->ep0_stage = MUSB_EP0_STATUS; dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr); } musb_writew(epio, MUSB_CSR0, csr); retval = IRQ_HANDLED; } else musb->ep0_stage = MUSB_EP0_IDLE; /* call completion handler if done */ if (complete) musb_advance_schedule(musb, urb, hw_ep, 1); done: return retval; } #ifdef CONFIG_USB_INVENTRA_DMA /* Host side TX (OUT) using Mentor DMA works as follows: submit_urb -> - if queue was empty, Program Endpoint - ... which starts DMA to fifo in mode 1 or 0 DMA Isr (transfer complete) -> TxAvail() - Stop DMA (~DmaEnab) (<--- Alert ... currently happens only in musb_cleanup_urb) - TxPktRdy has to be set in mode 0 or for short packets in mode 1. */ #endif /* Service a Tx-Available or dma completion irq for the endpoint */ void musb_host_tx(struct musb *musb, u8 epnum) { int pipe; bool done = false; u16 tx_csr; size_t length = 0; size_t offset = 0; struct musb_hw_ep *hw_ep = musb->endpoints + epnum; void __iomem *epio = hw_ep->regs; struct musb_qh *qh = hw_ep->out_qh; struct urb *urb = next_urb(qh); u32 status = 0; void __iomem *mbase = musb->mregs; struct dma_channel *dma; bool transfer_pending = false; musb_ep_select(mbase, epnum); tx_csr = musb_readw(epio, MUSB_TXCSR); /* with CPPI, DMA sometimes triggers "extra" irqs */ if (!urb) { dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); return; } pipe = urb->pipe; dma = is_dma_capable() ? hw_ep->tx_channel : NULL; dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, dma ? ", dma" : ""); /* check for errors */ if (tx_csr & MUSB_TXCSR_H_RXSTALL) { /* dma was disabled, fifo flushed */ dev_dbg(musb->controller, "TX end %d stall\n", epnum); /* stall; record URB status */ status = -EPIPE; } else if (tx_csr & MUSB_TXCSR_H_ERROR) { /* (NON-ISO) dma was disabled, fifo flushed */ dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum); status = -ETIMEDOUT; } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum); /* NOTE: this code path would be a good place to PAUSE a * transfer, if there's some other (nonperiodic) tx urb * that could use this fifo. (dma complicates it...) * That's already done for bulk RX transfers. * * if (bulk && qh->ring.next != &musb->out_bulk), then * we have a candidate... NAKing is *NOT* an error */ musb_ep_select(mbase, epnum); musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); return; } if (status) { if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { dma->status = MUSB_DMA_STATUS_CORE_ABORT; (void) musb->dma_controller->channel_abort(dma); } /* do the proper sequence to abort the transfer in the * usb core; the dma engine should already be stopped. */ musb_h_tx_flush_fifo(hw_ep); tx_csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_H_RXSTALL | MUSB_TXCSR_H_NAKTIMEOUT ); musb_ep_select(mbase, epnum); musb_writew(epio, MUSB_TXCSR, tx_csr); /* REVISIT may need to clear FLUSHFIFO ... */ musb_writew(epio, MUSB_TXCSR, tx_csr); musb_writeb(epio, MUSB_TXINTERVAL, 0); done = true; } /* second cppi case */ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); return; } if (is_dma_capable() && dma && !status) { /* * DMA has completed. But if we're using DMA mode 1 (multi * packet DMA), we need a terminal TXPKTRDY interrupt before * we can consider this transfer completed, lest we trash * its last packet when writing the next URB's data. So we * switch back to mode 0 to get that interrupt; we'll come * back here once it happens. */ if (tx_csr & MUSB_TXCSR_DMAMODE) { /* * We shouldn't clear DMAMODE with DMAENAB set; so * clear them in a safe order. That should be OK * once TXPKTRDY has been set (and I've never seen * it being 0 at this moment -- DMA interrupt latency * is significant) but if it hasn't been then we have * no choice but to stop being polite and ignore the * programmer's guide... :-) * * Note that we must write TXCSR with TXPKTRDY cleared * in order not to re-trigger the packet send (this bit * can't be cleared by CPU), and there's another caveat: * TXPKTRDY may be set shortly and then cleared in the * double-buffered FIFO mode, so we do an extra TXCSR * read for debouncing... */ tx_csr &= musb_readw(epio, MUSB_TXCSR); if (tx_csr & MUSB_TXCSR_TXPKTRDY) { tx_csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_TXPKTRDY); musb_writew(epio, MUSB_TXCSR, tx_csr | MUSB_TXCSR_H_WZC_BITS); } tx_csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_TXPKTRDY); musb_writew(epio, MUSB_TXCSR, tx_csr | MUSB_TXCSR_H_WZC_BITS); /* * There is no guarantee that we'll get an interrupt * after clearing DMAMODE as we might have done this * too late (after TXPKTRDY was cleared by controller). * Re-read TXCSR as we have spoiled its previous value. */ tx_csr = musb_readw(epio, MUSB_TXCSR); } /* * We may get here from a DMA completion or TXPKTRDY interrupt. * In any case, we must check the FIFO status here and bail out * only if the FIFO still has data -- that should prevent the * "missed" TXPKTRDY interrupts and deal with double-buffered * FIFO mode too... */ if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { dev_dbg(musb->controller, "DMA complete but packet still in FIFO, " "CSR %04x\n", tx_csr); return; } } if (!status || dma || usb_pipeisoc(pipe)) { if (dma) length = dma->actual_len; else length = qh->segsize; qh->offset += length; if (usb_pipeisoc(pipe)) { struct usb_iso_packet_descriptor *d; d = urb->iso_frame_desc + qh->iso_idx; d->actual_length = length; d->status = status; if (++qh->iso_idx >= urb->number_of_packets) { done = true; } else { d++; offset = d->offset; length = d->length; } } else if (dma && urb->transfer_buffer_length == qh->offset) { done = true; } else { /* see if we need to send more data, or ZLP */ if (qh->segsize < qh->maxpacket) done = true; else if (qh->offset == urb->transfer_buffer_length && !(urb->transfer_flags & URB_ZERO_PACKET)) done = true; if (!done) { offset = qh->offset; length = urb->transfer_buffer_length - offset; transfer_pending = true; } } } /* urb->status != -EINPROGRESS means request has been faulted, * so we must abort this transfer after cleanup */ if (urb->status != -EINPROGRESS) { done = true; if (status == 0) status = urb->status; } if (done) { /* set status */ urb->status = status; urb->actual_length = qh->offset; musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); return; } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) { if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, offset, length)) { if (is_cppi_enabled() || tusb_dma_omap()) musb_h_tx_dma_start(hw_ep); return; } } else if (tx_csr & MUSB_TXCSR_DMAENAB) { dev_dbg(musb->controller, "not complete, but DMA enabled?\n"); return; } /* * PIO: start next packet in this URB. * * REVISIT: some docs say that when hw_ep->tx_double_buffered, * (and presumably, FIFO is not half-full) we should write *two* * packets before updating TXCSR; other docs disagree... */ if (length > qh->maxpacket) length = qh->maxpacket; /* Unmap the buffer so that CPU can use it */ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); qh->segsize = length; musb_ep_select(mbase, epnum); musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); } #ifdef CONFIG_USB_INVENTRA_DMA /* Host side RX (IN) using Mentor DMA works as follows: submit_urb -> - if queue was empty, ProgramEndpoint - first IN token is sent out (by setting ReqPkt) LinuxIsr -> RxReady() /\ => first packet is received | - Set in mode 0 (DmaEnab, ~ReqPkt) | -> DMA Isr (transfer complete) -> RxReady() | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) | - if urb not complete, send next IN token (ReqPkt) | | else complete urb. | | --------------------------- * * Nuances of mode 1: * For short packets, no ack (+RxPktRdy) is sent automatically * (even if AutoClear is ON) * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent * automatically => major problem, as collecting the next packet becomes * difficult. Hence mode 1 is not used. * * REVISIT * All we care about at this driver level is that * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; * (b) termination conditions are: short RX, or buffer full; * (c) fault modes include * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. * (and that endpoint's dma queue stops immediately) * - overflow (full, PLUS more bytes in the terminal packet) * * So for example, usb-storage sets URB_SHORT_NOT_OK, and would * thus be a great candidate for using mode 1 ... for all but the * last packet of one URB's transfer. */ #endif /* Schedule next QH from musb->in_bulk and move the current qh to * the end; avoids starvation for other endpoints. */ static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep) { struct dma_channel *dma; struct urb *urb; void __iomem *mbase = musb->mregs; void __iomem *epio = ep->regs; struct musb_qh *cur_qh, *next_qh; u16 rx_csr; musb_ep_select(mbase, ep->epnum); dma = is_dma_capable() ? ep->rx_channel : NULL; /* clear nak timeout bit */ rx_csr = musb_readw(epio, MUSB_RXCSR); rx_csr |= MUSB_RXCSR_H_WZC_BITS; rx_csr &= ~MUSB_RXCSR_DATAERROR; musb_writew(epio, MUSB_RXCSR, rx_csr); cur_qh = first_qh(&musb->in_bulk); if (cur_qh) { urb = next_urb(cur_qh); if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { dma->status = MUSB_DMA_STATUS_CORE_ABORT; musb->dma_controller->channel_abort(dma); urb->actual_length += dma->actual_len; dma->actual_len = 0L; } musb_save_toggle(cur_qh, 1, urb); /* move cur_qh to end of queue */ list_move_tail(&cur_qh->ring, &musb->in_bulk); /* get the next qh from musb->in_bulk */ next_qh = first_qh(&musb->in_bulk); /* set rx_reinit and schedule the next qh */ ep->rx_reinit = 1; musb_start_urb(musb, 1, next_qh); } } /* * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, * and high-bandwidth IN transfer cases. */ void musb_host_rx(struct musb *musb, u8 epnum) { struct urb *urb; struct musb_hw_ep *hw_ep = musb->endpoints + epnum; void __iomem *epio = hw_ep->regs; struct musb_qh *qh = hw_ep->in_qh; size_t xfer_len; void __iomem *mbase = musb->mregs; int pipe; u16 rx_csr, val; bool iso_err = false; bool done = false; u32 status; struct dma_channel *dma; musb_ep_select(mbase, epnum); urb = next_urb(qh); dma = is_dma_capable() ? hw_ep->rx_channel : NULL; status = 0; xfer_len = 0; rx_csr = musb_readw(epio, MUSB_RXCSR); val = rx_csr; if (unlikely(!urb)) { /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least * usbtest #11 (unlinks) triggers it regularly, sometimes * with fifo full. (Only with DMA??) */ dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, musb_readw(epio, MUSB_RXCOUNT)); musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); return; } pipe = urb->pipe; dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", epnum, rx_csr, urb->actual_length, dma ? dma->actual_len : 0); /* check for errors, concurrent stall & unlink is not really * handled yet! */ if (rx_csr & MUSB_RXCSR_H_RXSTALL) { dev_dbg(musb->controller, "RX end %d STALL\n", epnum); /* stall; record URB status */ status = -EPIPE; } else if (rx_csr & MUSB_RXCSR_H_ERROR) { dev_dbg(musb->controller, "end %d RX proto error\n", epnum); status = -EPROTO; musb_writeb(epio, MUSB_RXINTERVAL, 0); } else if (rx_csr & MUSB_RXCSR_DATAERROR) { if (USB_ENDPOINT_XFER_ISOC != qh->type) { dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum); /* NOTE: NAKing is *NOT* an error, so we want to * continue. Except ... if there's a request for * another QH, use that instead of starving it. * * Devices like Ethernet and serial adapters keep * reads posted at all times, which will starve * other devices without this logic. */ if (usb_pipebulk(urb->pipe) && qh->mux == 1 && !list_is_singular(&musb->in_bulk)) { musb_bulk_rx_nak_timeout(musb, hw_ep); return; } musb_ep_select(mbase, epnum); rx_csr |= MUSB_RXCSR_H_WZC_BITS; rx_csr &= ~MUSB_RXCSR_DATAERROR; musb_writew(epio, MUSB_RXCSR, rx_csr); goto finish; } else { dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum); /* packet error reported later */ iso_err = true; } } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n", epnum); status = -EPROTO; } /* faults abort the transfer */ if (status) { /* clean up dma and collect transfer count */ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { dma->status = MUSB_DMA_STATUS_CORE_ABORT; (void) musb->dma_controller->channel_abort(dma); xfer_len = dma->actual_len; } musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); musb_writeb(epio, MUSB_RXINTERVAL, 0); done = true; goto finish; } if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); goto finish; } /* thorough shutdown for now ... given more precise fault handling * and better queueing support, we might keep a DMA pipeline going * while processing this irq for earlier completions. */ /* FIXME this is _way_ too much in-line logic for Mentor DMA */ #ifndef CONFIG_USB_INVENTRA_DMA if (rx_csr & MUSB_RXCSR_H_REQPKT) { /* REVISIT this happened for a while on some short reads... * the cleanup still needs investigation... looks bad... * and also duplicates dma cleanup code above ... plus, * shouldn't this be the "half full" double buffer case? */ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { dma->status = MUSB_DMA_STATUS_CORE_ABORT; (void) musb->dma_controller->channel_abort(dma); xfer_len = dma->actual_len; done = true; } dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, xfer_len, dma ? ", dma" : ""); rx_csr &= ~MUSB_RXCSR_H_REQPKT; musb_ep_select(mbase, epnum); musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | rx_csr); } #endif if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { xfer_len = dma->actual_len; val &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_H_AUTOREQ | MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_RXPKTRDY); musb_writew(hw_ep->regs, MUSB_RXCSR, val); #ifdef CONFIG_USB_INVENTRA_DMA if (usb_pipeisoc(pipe)) { struct usb_iso_packet_descriptor *d; d = urb->iso_frame_desc + qh->iso_idx; d->actual_length = xfer_len; /* even if there was an error, we did the dma * for iso_frame_desc->length */ if (d->status != -EILSEQ && d->status != -EOVERFLOW) d->status = 0; if (++qh->iso_idx >= urb->number_of_packets) done = true; else done = false; } else { /* done if urb buffer is full or short packet is recd */ done = (urb->actual_length + xfer_len >= urb->transfer_buffer_length || dma->actual_len < qh->maxpacket); } /* send IN token for next packet, without AUTOREQ */ if (!done) { val |= MUSB_RXCSR_H_REQPKT; musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val); } dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, done ? "off" : "reset", musb_readw(epio, MUSB_RXCSR), musb_readw(epio, MUSB_RXCOUNT)); #else done = true; #endif } else if (urb->status == -EINPROGRESS) { /* if no errors, be sure a packet is ready for unloading */ if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { status = -EPROTO; ERR("Rx interrupt with no errors or packet!\n"); /* FIXME this is another "SHOULD NEVER HAPPEN" */ /* SCRUB (RX) */ /* do the proper sequence to abort the transfer */ musb_ep_select(mbase, epnum); val &= ~MUSB_RXCSR_H_REQPKT; musb_writew(epio, MUSB_RXCSR, val); goto finish; } /* we are expecting IN packets */ #ifdef CONFIG_USB_INVENTRA_DMA if (dma) { struct dma_controller *c; u16 rx_count; int ret, length; dma_addr_t buf; rx_count = musb_readw(epio, MUSB_RXCOUNT); dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n", epnum, rx_count, urb->transfer_dma + urb->actual_length, qh->offset, urb->transfer_buffer_length); c = musb->dma_controller; if (usb_pipeisoc(pipe)) { int d_status = 0; struct usb_iso_packet_descriptor *d; d = urb->iso_frame_desc + qh->iso_idx; if (iso_err) { d_status = -EILSEQ; urb->error_count++; } if (rx_count > d->length) { if (d_status == 0) { d_status = -EOVERFLOW; urb->error_count++; } dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\ rx_count, d->length); length = d->length; } else length = rx_count; d->status = d_status; buf = urb->transfer_dma + d->offset; } else { length = rx_count; buf = urb->transfer_dma + urb->actual_length; } dma->desired_mode = 0; #ifdef USE_MODE1 /* because of the issue below, mode 1 will * only rarely behave with correct semantics. */ if ((urb->transfer_flags & URB_SHORT_NOT_OK) && (urb->transfer_buffer_length - urb->actual_length) > qh->maxpacket) dma->desired_mode = 1; if (rx_count < hw_ep->max_packet_sz_rx) { length = rx_count; dma->desired_mode = 0; } else { length = urb->transfer_buffer_length; } #endif /* Disadvantage of using mode 1: * It's basically usable only for mass storage class; essentially all * other protocols also terminate transfers on short packets. * * Details: * An extra IN token is sent at the end of the transfer (due to AUTOREQ) * If you try to use mode 1 for (transfer_buffer_length - 512), and try * to use the extra IN token to grab the last packet using mode 0, then * the problem is that you cannot be sure when the device will send the * last packet and RxPktRdy set. Sometimes the packet is recd too soon * such that it gets lost when RxCSR is re-set at the end of the mode 1 * transfer, while sometimes it is recd just a little late so that if you * try to configure for mode 0 soon after the mode 1 transfer is * completed, you will find rxcount 0. Okay, so you might think why not * wait for an interrupt when the pkt is recd. Well, you won't get any! */ val = musb_readw(epio, MUSB_RXCSR); val &= ~MUSB_RXCSR_H_REQPKT; if (dma->desired_mode == 0) val &= ~MUSB_RXCSR_H_AUTOREQ; else val |= MUSB_RXCSR_H_AUTOREQ; val |= MUSB_RXCSR_DMAENAB; /* autoclear shouldn't be set in high bandwidth */ if (qh->hb_mult == 1) val |= MUSB_RXCSR_AUTOCLEAR; musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val); /* REVISIT if when actual_length != 0, * transfer_buffer_length needs to be * adjusted first... */ ret = c->channel_program( dma, qh->maxpacket, dma->desired_mode, buf, length); if (!ret) { c->channel_release(dma); hw_ep->rx_channel = NULL; dma = NULL; /* REVISIT reset CSR */ } } #endif /* Mentor DMA */ if (!dma) { /* Unmap the buffer so that CPU can use it */ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); done = musb_host_packet_rx(musb, urb, epnum, iso_err); dev_dbg(musb->controller, "read %spacket\n", done ? "last " : ""); } } finish: urb->actual_length += xfer_len; qh->offset += xfer_len; if (done) { if (urb->status == -EINPROGRESS) urb->status = status; musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); } } /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. * the software schedule associates multiple such nodes with a given * host side hardware endpoint + direction; scheduling may activate * that hardware endpoint. */ static int musb_schedule( struct musb *musb, struct musb_qh *qh, int is_in) { int idle; int best_diff; int best_end, epnum; struct musb_hw_ep *hw_ep = NULL; struct list_head *head = NULL; u8 toggle; u8 txtype; struct urb *urb = next_urb(qh); /* use fixed hardware for control and bulk */ if (qh->type == USB_ENDPOINT_XFER_CONTROL) { head = &musb->control; hw_ep = musb->control_ep; goto success; } /* else, periodic transfers get muxed to other endpoints */ /* * We know this qh hasn't been scheduled, so all we need to do * is choose which hardware endpoint to put it on ... * * REVISIT what we really want here is a regular schedule tree * like e.g. OHCI uses. */ best_diff = 4096; best_end = -1; for (epnum = 1, hw_ep = musb->endpoints + 1; epnum < musb->nr_endpoints; epnum++, hw_ep++) { int diff; if (musb_ep_get_qh(hw_ep, is_in) != NULL) continue; if (hw_ep == musb->bulk_ep) continue; if (is_in) diff = hw_ep->max_packet_sz_rx; else diff = hw_ep->max_packet_sz_tx; diff -= (qh->maxpacket * qh->hb_mult); if (diff >= 0 && best_diff > diff) { /* * Mentor controller has a bug in that if we schedule * a BULK Tx transfer on an endpoint that had earlier * handled ISOC then the BULK transfer has to start on * a zero toggle. If the BULK transfer starts on a 1 * toggle then this transfer will fail as the mentor * controller starts the Bulk transfer on a 0 toggle * irrespective of the programming of the toggle bits * in the TXCSR register. Check for this condition * while allocating the EP for a Tx Bulk transfer. If * so skip this EP. */ hw_ep = musb->endpoints + epnum; toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in); txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE) >> 4) & 0x3; if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) && toggle && (txtype == USB_ENDPOINT_XFER_ISOC)) continue; best_diff = diff; best_end = epnum; } } /* use bulk reserved ep1 if no other ep is free */ if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { hw_ep = musb->bulk_ep; if (is_in) head = &musb->in_bulk; else head = &musb->out_bulk; /* Enable bulk RX NAK timeout scheme when bulk requests are * multiplexed. This scheme doen't work in high speed to full * speed scenario as NAK interrupts are not coming from a * full speed device connected to a high speed device. * NAK timeout interval is 8 (128 uframe or 16ms) for HS and * 4 (8 frame or 8ms) for FS device. */ if (is_in && qh->dev) qh->intv_reg = (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; goto success; } else if (best_end < 0) { return -ENOSPC; } idle = 1; qh->mux = 0; hw_ep = musb->endpoints + best_end; dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end); success: if (head) { idle = list_empty(head); list_add_tail(&qh->ring, head); qh->mux = 1; } qh->hw_ep = hw_ep; qh->hep->hcpriv = qh; if (idle) musb_start_urb(musb, is_in, qh); return 0; } static int musb_urb_enqueue( struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { unsigned long flags; struct musb *musb = hcd_to_musb(hcd); struct usb_host_endpoint *hep = urb->ep; struct musb_qh *qh; struct usb_endpoint_descriptor *epd = &hep->desc; int ret; unsigned type_reg; unsigned interval; /* host role must be active */ if (!is_host_active(musb) || !musb->is_active) return -ENODEV; spin_lock_irqsave(&musb->lock, flags); ret = usb_hcd_link_urb_to_ep(hcd, urb); qh = ret ? NULL : hep->hcpriv; if (qh) urb->hcpriv = qh; spin_unlock_irqrestore(&musb->lock, flags); /* DMA mapping was already done, if needed, and this urb is on * hep->urb_list now ... so we're done, unless hep wasn't yet * scheduled onto a live qh. * * REVISIT best to keep hep->hcpriv valid until the endpoint gets * disabled, testing for empty qh->ring and avoiding qh setup costs * except for the first urb queued after a config change. */ if (qh || ret) return ret; /* Allocate and initialize qh, minimizing the work done each time * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. * * REVISIT consider a dedicated qh kmem_cache, so it's harder * for bugs in other kernel code to break this driver... */ qh = kzalloc(sizeof *qh, mem_flags); if (!qh) { spin_lock_irqsave(&musb->lock, flags); usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&musb->lock, flags); return -ENOMEM; } qh->hep = hep; qh->dev = urb->dev; INIT_LIST_HEAD(&qh->ring); qh->is_ready = 1; qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); qh->type = usb_endpoint_type(epd); /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier. * Some musb cores don't support high bandwidth ISO transfers; and * we don't (yet!) support high bandwidth interrupt transfers. */ qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03); if (qh->hb_mult > 1) { int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); if (ok) ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx) || (usb_pipeout(urb->pipe) && musb->hb_iso_tx); if (!ok) { ret = -EMSGSIZE; goto done; } qh->maxpacket &= 0x7ff; } qh->epnum = usb_endpoint_num(epd); /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ qh->addr_reg = (u8) usb_pipedevice(urb->pipe); /* precompute rxtype/txtype/type0 register */ type_reg = (qh->type << 4) | qh->epnum; switch (urb->dev->speed) { case USB_SPEED_LOW: type_reg |= 0xc0; break; case USB_SPEED_FULL: type_reg |= 0x80; break; default: type_reg |= 0x40; } qh->type_reg = type_reg; /* Precompute RXINTERVAL/TXINTERVAL register */ switch (qh->type) { case USB_ENDPOINT_XFER_INT: /* * Full/low speeds use the linear encoding, * high speed uses the logarithmic encoding. */ if (urb->dev->speed <= USB_SPEED_FULL) { interval = max_t(u8, epd->bInterval, 1); break; } /* FALLTHROUGH */ case USB_ENDPOINT_XFER_ISOC: /* ISO always uses logarithmic encoding */ interval = min_t(u8, epd->bInterval, 16); break; default: /* REVISIT we actually want to use NAK limits, hinting to the * transfer scheduling logic to try some other qh, e.g. try * for 2 msec first: * * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; * * The downside of disabling this is that transfer scheduling * gets VERY unfair for nonperiodic transfers; a misbehaving * peripheral could make that hurt. That's perfectly normal * for reads from network or serial adapters ... so we have * partial NAKlimit support for bulk RX. * * The upside of disabling it is simpler transfer scheduling. */ interval = 0; } qh->intv_reg = interval; /* precompute addressing for external hub/tt ports */ if (musb->is_multipoint) { struct usb_device *parent = urb->dev->parent; if (parent != hcd->self.root_hub) { qh->h_addr_reg = (u8) parent->devnum; /* set up tt info if needed */ if (urb->dev->tt) { qh->h_port_reg = (u8) urb->dev->ttport; if (urb->dev->tt->hub) qh->h_addr_reg = (u8) urb->dev->tt->hub->devnum; if (urb->dev->tt->multi) qh->h_addr_reg |= 0x80; } } } /* invariant: hep->hcpriv is null OR the qh that's already scheduled. * until we get real dma queues (with an entry for each urb/buffer), * we only have work to do in the former case. */ spin_lock_irqsave(&musb->lock, flags); if (hep->hcpriv) { /* some concurrent activity submitted another urb to hep... * odd, rare, error prone, but legal. */ kfree(qh); qh = NULL; ret = 0; } else ret = musb_schedule(musb, qh, epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); if (ret == 0) { urb->hcpriv = qh; /* FIXME set urb->start_frame for iso/intr, it's tested in * musb_start_urb(), but otherwise only konicawc cares ... */ } spin_unlock_irqrestore(&musb->lock, flags); done: if (ret != 0) { spin_lock_irqsave(&musb->lock, flags); usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&musb->lock, flags); kfree(qh); } return ret; } /* * abort a transfer that's at the head of a hardware queue. * called with controller locked, irqs blocked * that hardware queue advances to the next transfer, unless prevented */ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) { struct musb_hw_ep *ep = qh->hw_ep; struct musb *musb = ep->musb; void __iomem *epio = ep->regs; unsigned hw_end = ep->epnum; void __iomem *regs = ep->musb->mregs; int is_in = usb_pipein(urb->pipe); int status = 0; u16 csr; musb_ep_select(regs, hw_end); if (is_dma_capable()) { struct dma_channel *dma; dma = is_in ? ep->rx_channel : ep->tx_channel; if (dma) { status = ep->musb->dma_controller->channel_abort(dma); dev_dbg(musb->controller, "abort %cX%d DMA for urb %p --> %d\n", is_in ? 'R' : 'T', ep->epnum, urb, status); urb->actual_length += dma->actual_len; } } /* turn off DMA requests, discard state, stop polling ... */ if (is_in) { /* giveback saves bulk toggle */ csr = musb_h_flush_rxfifo(ep, 0); /* REVISIT we still get an irq; should likely clear the * endpoint's irq status here to avoid bogus irqs. * clearing that status is platform-specific... */ } else if (ep->epnum) { musb_h_tx_flush_fifo(ep); csr = musb_readw(epio, MUSB_TXCSR); csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_RXSTALL | MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_TXPKTRDY); musb_writew(epio, MUSB_TXCSR, csr); /* REVISIT may need to clear FLUSHFIFO ... */ musb_writew(epio, MUSB_TXCSR, csr); /* flush cpu writebuffer */ csr = musb_readw(epio, MUSB_TXCSR); } else { musb_h_ep0_flush_fifo(ep); } if (status == 0) musb_advance_schedule(ep->musb, urb, ep, is_in); return status; } static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct musb *musb = hcd_to_musb(hcd); struct musb_qh *qh; unsigned long flags; int is_in = usb_pipein(urb->pipe); int ret; dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), is_in ? "in" : "out"); spin_lock_irqsave(&musb->lock, flags); ret = usb_hcd_check_unlink_urb(hcd, urb, status); if (ret) goto done; qh = urb->hcpriv; if (!qh) goto done; /* * Any URB not actively programmed into endpoint hardware can be * immediately given back; that's any URB not at the head of an * endpoint queue, unless someday we get real DMA queues. And even * if it's at the head, it might not be known to the hardware... * * Otherwise abort current transfer, pending DMA, etc.; urb->status * has already been updated. This is a synchronous abort; it'd be * OK to hold off until after some IRQ, though. * * NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { int ready = qh->is_ready; qh->is_ready = 0; musb_giveback(musb, urb, 0); qh->is_ready = ready; /* If nothing else (usually musb_giveback) is using it * and its URB list has emptied, recycle this qh. */ if (ready && list_empty(&qh->hep->urb_list)) { qh->hep->hcpriv = NULL; list_del(&qh->ring); kfree(qh); } } else ret = musb_cleanup_urb(urb, qh); done: spin_unlock_irqrestore(&musb->lock, flags); return ret; } /* disable an endpoint */ static void musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) { u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN; unsigned long flags; struct musb *musb = hcd_to_musb(hcd); struct musb_qh *qh; struct urb *urb; spin_lock_irqsave(&musb->lock, flags); qh = hep->hcpriv; if (qh == NULL) goto exit; /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ /* Kick the first URB off the hardware, if needed */ qh->is_ready = 0; if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) { urb = next_urb(qh); /* make software (then hardware) stop ASAP */ if (!urb->unlinked) urb->status = -ESHUTDOWN; /* cleanup */ musb_cleanup_urb(urb, qh); /* Then nuke all the others ... and advance the * queue on hw_ep (e.g. bulk ring) when we're done. */ while (!list_empty(&hep->urb_list)) { urb = next_urb(qh); urb->status = -ESHUTDOWN; musb_advance_schedule(musb, urb, qh->hw_ep, is_in); } } else { /* Just empty the queue; the hardware is busy with * other transfers, and since !qh->is_ready nothing * will activate any of these as it advances. */ while (!list_empty(&hep->urb_list)) musb_giveback(musb, next_urb(qh), -ESHUTDOWN); hep->hcpriv = NULL; list_del(&qh->ring); kfree(qh); } exit: spin_unlock_irqrestore(&musb->lock, flags); } static int musb_h_get_frame_number(struct usb_hcd *hcd) { struct musb *musb = hcd_to_musb(hcd); return musb_readw(musb->mregs, MUSB_FRAME); } static int musb_h_start(struct usb_hcd *hcd) { struct musb *musb = hcd_to_musb(hcd); /* NOTE: musb_start() is called when the hub driver turns * on port power, or when (OTG) peripheral starts. */ hcd->state = HC_STATE_RUNNING; musb->port1_status = 0; return 0; } static void musb_h_stop(struct usb_hcd *hcd) { musb_stop(hcd_to_musb(hcd)); hcd->state = HC_STATE_HALT; } static int musb_bus_suspend(struct usb_hcd *hcd) { struct musb *musb = hcd_to_musb(hcd); u8 devctl; if (!is_host_active(musb)) return 0; switch (musb->xceiv->state) { case OTG_STATE_A_SUSPEND: return 0; case OTG_STATE_A_WAIT_VRISE: /* ID could be grounded even if there's no device * on the other end of the cable. NOTE that the * A_WAIT_VRISE timers are messy with MUSB... */ devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) musb->xceiv->state = OTG_STATE_A_WAIT_BCON; break; default: break; } if (musb->is_active) { WARNING("trying to suspend as %s while active\n", otg_state_string(musb->xceiv->state)); return -EBUSY; } else return 0; } static int musb_bus_resume(struct usb_hcd *hcd) { /* resuming child port does the work */ return 0; } const struct hc_driver musb_hc_driver = { .description = "musb-hcd", .product_desc = "MUSB HDRC host driver", .hcd_priv_size = sizeof(struct musb), .flags = HCD_USB2 | HCD_MEMORY, /* not using irq handler or reset hooks from usbcore, since * those must be shared with peripheral code for OTG configs */ .start = musb_h_start, .stop = musb_h_stop, .get_frame_number = musb_h_get_frame_number, .urb_enqueue = musb_urb_enqueue, .urb_dequeue = musb_urb_dequeue, .endpoint_disable = musb_h_disable, .hub_status_data = musb_hub_status_data, .hub_control = musb_hub_control, .bus_suspend = musb_bus_suspend, .bus_resume = musb_bus_resume, /* .start_port_reset = NULL, */ /* .hub_irq_enable = NULL, */ };
gpl-2.0
deadlyindian/android_kernel_oneplus_msm8974
drivers/acpi/acpica/nspredef.c
2849
35417
/****************************************************************************** * * Module Name: nspredef - Validation of ACPI predefined methods and objects * $Revision: 1.1 $ * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #define ACPI_CREATE_PREDEFINED_TABLE #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acpredef.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nspredef") /******************************************************************************* * * This module validates predefined ACPI objects that appear in the namespace, * at the time they are evaluated (via acpi_evaluate_object). The purpose of this * validation is to detect problems with BIOS-exposed predefined ACPI objects * before the results are returned to the ACPI-related drivers. * * There are several areas that are validated: * * 1) The number of input arguments as defined by the method/object in the * ASL is validated against the ACPI specification. * 2) The type of the return object (if any) is validated against the ACPI * specification. * 3) For returned package objects, the count of package elements is * validated, as well as the type of each package element. Nested * packages are supported. * * For any problems found, a warning message is issued. * ******************************************************************************/ /* Local prototypes */ static acpi_status acpi_ns_check_package(struct acpi_predefined_data *data, union acpi_operand_object **return_object_ptr); static acpi_status acpi_ns_check_package_list(struct acpi_predefined_data *data, const union acpi_predefined_info *package, union acpi_operand_object **elements, u32 count); static acpi_status acpi_ns_check_package_elements(struct acpi_predefined_data *data, union acpi_operand_object **elements, u8 type1, u32 count1, u8 type2, u32 count2, u32 start_index); static acpi_status acpi_ns_check_object_type(struct acpi_predefined_data *data, union acpi_operand_object **return_object_ptr, u32 expected_btypes, u32 package_index); static acpi_status acpi_ns_check_reference(struct acpi_predefined_data *data, union acpi_operand_object *return_object); static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes); /* * Names for the types that can be returned by the predefined objects. * Used for warning messages. Must be in the same order as the ACPI_RTYPEs */ static const char *acpi_rtype_names[] = { "/Integer", "/String", "/Buffer", "/Package", "/Reference", }; /******************************************************************************* * * FUNCTION: acpi_ns_check_predefined_names * * PARAMETERS: Node - Namespace node for the method/object * user_param_count - Number of parameters actually passed * return_status - Status from the object evaluation * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status * * DESCRIPTION: Check an ACPI name for a match in the predefined name list. * ******************************************************************************/ acpi_status acpi_ns_check_predefined_names(struct acpi_namespace_node *node, u32 user_param_count, acpi_status return_status, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; acpi_status status = AE_OK; const union acpi_predefined_info *predefined; char *pathname; struct acpi_predefined_data *data; /* Match the name for this method/object against the predefined list */ predefined = acpi_ns_check_for_predefined_name(node); /* Get the full pathname to the object, for use in warning messages */ pathname = acpi_ns_get_external_pathname(node); if (!pathname) { return AE_OK; /* Could not get pathname, ignore */ } /* * Check that the parameter count for this method matches the ASL * definition. For predefined names, ensure that both the caller and * the method itself are in accordance with the ACPI specification. */ acpi_ns_check_parameter_count(pathname, node, user_param_count, predefined); /* If not a predefined name, we cannot validate the return object */ if (!predefined) { goto cleanup; } /* * If the method failed or did not actually return an object, we cannot * validate the return object */ if ((return_status != AE_OK) && (return_status != AE_CTRL_RETURN_VALUE)) { goto cleanup; } /* * If there is no return value, check if we require a return value for * this predefined name. Either one return value is expected, or none, * for both methods and other objects. * * Exit now if there is no return object. Warning if one was expected. */ if (!return_object) { if ((predefined->info.expected_btypes) && (!(predefined->info.expected_btypes & ACPI_RTYPE_NONE))) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, ACPI_WARN_ALWAYS, "Missing expected return value")); status = AE_AML_NO_RETURN_VALUE; } goto cleanup; } /* * Return value validation and possible repair. * * 1) Don't perform return value validation/repair if this feature * has been disabled via a global option. * * 2) We have a return value, but if one wasn't expected, just exit, * this is not a problem. For example, if the "Implicit Return" * feature is enabled, methods will always return a value. * * 3) If the return value can be of any type, then we cannot perform * any validation, just exit. */ if (acpi_gbl_disable_auto_repair || (!predefined->info.expected_btypes) || (predefined->info.expected_btypes == ACPI_RTYPE_ALL)) { goto cleanup; } /* Create the parameter data block for object validation */ data = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_predefined_data)); if (!data) { goto cleanup; } data->predefined = predefined; data->node = node; data->node_flags = node->flags; data->pathname = pathname; /* * Check that the type of the main return object is what is expected * for this predefined name */ status = acpi_ns_check_object_type(data, return_object_ptr, predefined->info.expected_btypes, ACPI_NOT_PACKAGE_ELEMENT); if (ACPI_FAILURE(status)) { goto exit; } /* * For returned Package objects, check the type of all sub-objects. * Note: Package may have been newly created by call above. */ if ((*return_object_ptr)->common.type == ACPI_TYPE_PACKAGE) { data->parent_package = *return_object_ptr; status = acpi_ns_check_package(data, return_object_ptr); if (ACPI_FAILURE(status)) { goto exit; } } /* * The return object was OK, or it was successfully repaired above. * Now make some additional checks such as verifying that package * objects are sorted correctly (if required) or buffer objects have * the correct data width (bytes vs. dwords). These repairs are * performed on a per-name basis, i.e., the code is specific to * particular predefined names. */ status = acpi_ns_complex_repairs(data, node, status, return_object_ptr); exit: /* * If the object validation failed or if we successfully repaired one * or more objects, mark the parent node to suppress further warning * messages during the next evaluation of the same method/object. */ if (ACPI_FAILURE(status) || (data->flags & ACPI_OBJECT_REPAIRED)) { node->flags |= ANOBJ_EVALUATED; } ACPI_FREE(data); cleanup: ACPI_FREE(pathname); return (status); } /******************************************************************************* * * FUNCTION: acpi_ns_check_parameter_count * * PARAMETERS: Pathname - Full pathname to the node (for error msgs) * Node - Namespace node for the method/object * user_param_count - Number of args passed in by the caller * Predefined - Pointer to entry in predefined name table * * RETURN: None * * DESCRIPTION: Check that the declared (in ASL/AML) parameter count for a * predefined name is what is expected (i.e., what is defined in * the ACPI specification for this predefined name.) * ******************************************************************************/ void acpi_ns_check_parameter_count(char *pathname, struct acpi_namespace_node *node, u32 user_param_count, const union acpi_predefined_info *predefined) { u32 param_count; u32 required_params_current; u32 required_params_old; /* Methods have 0-7 parameters. All other types have zero. */ param_count = 0; if (node->type == ACPI_TYPE_METHOD) { param_count = node->object->method.param_count; } if (!predefined) { /* * Check the parameter count for non-predefined methods/objects. * * Warning if too few or too many arguments have been passed by the * caller. An incorrect number of arguments may not cause the method * to fail. However, the method will fail if there are too few * arguments and the method attempts to use one of the missing ones. */ if (user_param_count < param_count) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, ACPI_WARN_ALWAYS, "Insufficient arguments - needs %u, found %u", param_count, user_param_count)); } else if (user_param_count > param_count) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, ACPI_WARN_ALWAYS, "Excess arguments - needs %u, found %u", param_count, user_param_count)); } return; } /* * Validate the user-supplied parameter count. * Allow two different legal argument counts (_SCP, etc.) */ required_params_current = predefined->info.param_count & 0x0F; required_params_old = predefined->info.param_count >> 4; if (user_param_count != ACPI_UINT32_MAX) { if ((user_param_count != required_params_current) && (user_param_count != required_params_old)) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, ACPI_WARN_ALWAYS, "Parameter count mismatch - " "caller passed %u, ACPI requires %u", user_param_count, required_params_current)); } } /* * Check that the ASL-defined parameter count is what is expected for * this predefined name (parameter count as defined by the ACPI * specification) */ if ((param_count != required_params_current) && (param_count != required_params_old)) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, node->flags, "Parameter count mismatch - ASL declared %u, ACPI requires %u", param_count, required_params_current)); } } /******************************************************************************* * * FUNCTION: acpi_ns_check_for_predefined_name * * PARAMETERS: Node - Namespace node for the method/object * * RETURN: Pointer to entry in predefined table. NULL indicates not found. * * DESCRIPTION: Check an object name against the predefined object list. * ******************************************************************************/ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct acpi_namespace_node *node) { const union acpi_predefined_info *this_name; /* Quick check for a predefined name, first character must be underscore */ if (node->name.ascii[0] != '_') { return (NULL); } /* Search info table for a predefined method/object name */ this_name = predefined_names; while (this_name->info.name[0]) { if (ACPI_COMPARE_NAME(node->name.ascii, this_name->info.name)) { return (this_name); } /* * Skip next entry in the table if this name returns a Package * (next entry contains the package info) */ if (this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) { this_name++; } this_name++; } return (NULL); /* Not found */ } /******************************************************************************* * * FUNCTION: acpi_ns_check_package * * PARAMETERS: Data - Pointer to validation data structure * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status * * DESCRIPTION: Check a returned package object for the correct count and * correct type of all sub-objects. * ******************************************************************************/ static acpi_status acpi_ns_check_package(struct acpi_predefined_data *data, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; const union acpi_predefined_info *package; union acpi_operand_object **elements; acpi_status status = AE_OK; u32 expected_count; u32 count; u32 i; ACPI_FUNCTION_NAME(ns_check_package); /* The package info for this name is in the next table entry */ package = data->predefined + 1; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s Validating return Package of Type %X, Count %X\n", data->pathname, package->ret_info.type, return_object->package.count)); /* * For variable-length Packages, we can safely remove all embedded * and trailing NULL package elements */ acpi_ns_remove_null_elements(data, package->ret_info.type, return_object); /* Extract package count and elements array */ elements = return_object->package.elements; count = return_object->package.count; /* The package must have at least one element, else invalid */ if (!count) { ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return Package has no elements (empty)")); return (AE_AML_OPERAND_VALUE); } /* * Decode the type of the expected package contents * * PTYPE1 packages contain no subpackages * PTYPE2 packages contain sub-packages */ switch (package->ret_info.type) { case ACPI_PTYPE1_FIXED: /* * The package count is fixed and there are no sub-packages * * If package is too small, exit. * If package is larger than expected, issue warning but continue */ expected_count = package->ret_info.count1 + package->ret_info.count2; if (count < expected_count) { goto package_too_small; } else if (count > expected_count) { ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Return Package is larger than needed - " "found %u, expected %u\n", data->pathname, count, expected_count)); } /* Validate all elements of the returned package */ status = acpi_ns_check_package_elements(data, elements, package->ret_info. object_type1, package->ret_info. count1, package->ret_info. object_type2, package->ret_info. count2, 0); break; case ACPI_PTYPE1_VAR: /* * The package count is variable, there are no sub-packages, and all * elements must be of the same type */ for (i = 0; i < count; i++) { status = acpi_ns_check_object_type(data, elements, package->ret_info. object_type1, i); if (ACPI_FAILURE(status)) { return (status); } elements++; } break; case ACPI_PTYPE1_OPTION: /* * The package count is variable, there are no sub-packages. There are * a fixed number of required elements, and a variable number of * optional elements. * * Check if package is at least as large as the minimum required */ expected_count = package->ret_info3.count; if (count < expected_count) { goto package_too_small; } /* Variable number of sub-objects */ for (i = 0; i < count; i++) { if (i < package->ret_info3.count) { /* These are the required package elements (0, 1, or 2) */ status = acpi_ns_check_object_type(data, elements, package-> ret_info3. object_type[i], i); if (ACPI_FAILURE(status)) { return (status); } } else { /* These are the optional package elements */ status = acpi_ns_check_object_type(data, elements, package-> ret_info3. tail_object_type, i); if (ACPI_FAILURE(status)) { return (status); } } elements++; } break; case ACPI_PTYPE2_REV_FIXED: /* First element is the (Integer) revision */ status = acpi_ns_check_object_type(data, elements, ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { return (status); } elements++; count--; /* Examine the sub-packages */ status = acpi_ns_check_package_list(data, package, elements, count); break; case ACPI_PTYPE2_PKG_COUNT: /* First element is the (Integer) count of sub-packages to follow */ status = acpi_ns_check_object_type(data, elements, ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { return (status); } /* * Count cannot be larger than the parent package length, but allow it * to be smaller. The >= accounts for the Integer above. */ expected_count = (u32) (*elements)->integer.value; if (expected_count >= count) { goto package_too_small; } count = expected_count; elements++; /* Examine the sub-packages */ status = acpi_ns_check_package_list(data, package, elements, count); break; case ACPI_PTYPE2: case ACPI_PTYPE2_FIXED: case ACPI_PTYPE2_MIN: case ACPI_PTYPE2_COUNT: case ACPI_PTYPE2_FIX_VAR: /* * These types all return a single Package that consists of a * variable number of sub-Packages. * * First, ensure that the first element is a sub-Package. If not, * the BIOS may have incorrectly returned the object as a single * package instead of a Package of Packages (a common error if * there is only one entry). We may be able to repair this by * wrapping the returned Package with a new outer Package. */ if (*elements && ((*elements)->common.type != ACPI_TYPE_PACKAGE)) { /* Create the new outer package and populate it */ status = acpi_ns_wrap_with_package(data, *elements, return_object_ptr); if (ACPI_FAILURE(status)) { return (status); } /* Update locals to point to the new package (of 1 element) */ return_object = *return_object_ptr; elements = return_object->package.elements; count = 1; } /* Examine the sub-packages */ status = acpi_ns_check_package_list(data, package, elements, count); break; default: /* Should not get here if predefined info table is correct */ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Invalid internal return type in table entry: %X", package->ret_info.type)); return (AE_AML_INTERNAL); } return (status); package_too_small: /* Error exit for the case with an incorrect package count */ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return Package is too small - found %u elements, expected %u", count, expected_count)); return (AE_AML_OPERAND_VALUE); } /******************************************************************************* * * FUNCTION: acpi_ns_check_package_list * * PARAMETERS: Data - Pointer to validation data structure * Package - Pointer to package-specific info for method * Elements - Element list of parent package. All elements * of this list should be of type Package. * Count - Count of subpackages * * RETURN: Status * * DESCRIPTION: Examine a list of subpackages * ******************************************************************************/ static acpi_status acpi_ns_check_package_list(struct acpi_predefined_data *data, const union acpi_predefined_info *package, union acpi_operand_object **elements, u32 count) { union acpi_operand_object *sub_package; union acpi_operand_object **sub_elements; acpi_status status; u32 expected_count; u32 i; u32 j; /* * Validate each sub-Package in the parent Package * * NOTE: assumes list of sub-packages contains no NULL elements. * Any NULL elements should have been removed by earlier call * to acpi_ns_remove_null_elements. */ for (i = 0; i < count; i++) { sub_package = *elements; sub_elements = sub_package->package.elements; data->parent_package = sub_package; /* Each sub-object must be of type Package */ status = acpi_ns_check_object_type(data, &sub_package, ACPI_RTYPE_PACKAGE, i); if (ACPI_FAILURE(status)) { return (status); } /* Examine the different types of expected sub-packages */ data->parent_package = sub_package; switch (package->ret_info.type) { case ACPI_PTYPE2: case ACPI_PTYPE2_PKG_COUNT: case ACPI_PTYPE2_REV_FIXED: /* Each subpackage has a fixed number of elements */ expected_count = package->ret_info.count1 + package->ret_info.count2; if (sub_package->package.count < expected_count) { goto package_too_small; } status = acpi_ns_check_package_elements(data, sub_elements, package->ret_info. object_type1, package->ret_info. count1, package->ret_info. object_type2, package->ret_info. count2, 0); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_PTYPE2_FIX_VAR: /* * Each subpackage has a fixed number of elements and an * optional element */ expected_count = package->ret_info.count1 + package->ret_info.count2; if (sub_package->package.count < expected_count) { goto package_too_small; } status = acpi_ns_check_package_elements(data, sub_elements, package->ret_info. object_type1, package->ret_info. count1, package->ret_info. object_type2, sub_package->package. count - package->ret_info. count1, 0); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_PTYPE2_FIXED: /* Each sub-package has a fixed length */ expected_count = package->ret_info2.count; if (sub_package->package.count < expected_count) { goto package_too_small; } /* Check the type of each sub-package element */ for (j = 0; j < expected_count; j++) { status = acpi_ns_check_object_type(data, &sub_elements[j], package-> ret_info2. object_type[j], j); if (ACPI_FAILURE(status)) { return (status); } } break; case ACPI_PTYPE2_MIN: /* Each sub-package has a variable but minimum length */ expected_count = package->ret_info.count1; if (sub_package->package.count < expected_count) { goto package_too_small; } /* Check the type of each sub-package element */ status = acpi_ns_check_package_elements(data, sub_elements, package->ret_info. object_type1, sub_package->package. count, 0, 0, 0); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_PTYPE2_COUNT: /* * First element is the (Integer) count of elements, including * the count field (the ACPI name is num_elements) */ status = acpi_ns_check_object_type(data, sub_elements, ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { return (status); } /* * Make sure package is large enough for the Count and is * is as large as the minimum size */ expected_count = (u32)(*sub_elements)->integer.value; if (sub_package->package.count < expected_count) { goto package_too_small; } if (sub_package->package.count < package->ret_info.count1) { expected_count = package->ret_info.count1; goto package_too_small; } if (expected_count == 0) { /* * Either the num_entries element was originally zero or it was * a NULL element and repaired to an Integer of value zero. * In either case, repair it by setting num_entries to be the * actual size of the subpackage. */ expected_count = sub_package->package.count; (*sub_elements)->integer.value = expected_count; } /* Check the type of each sub-package element */ status = acpi_ns_check_package_elements(data, (sub_elements + 1), package->ret_info. object_type1, (expected_count - 1), 0, 0, 1); if (ACPI_FAILURE(status)) { return (status); } break; default: /* Should not get here, type was validated by caller */ return (AE_AML_INTERNAL); } elements++; } return (AE_OK); package_too_small: /* The sub-package count was smaller than required */ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return Sub-Package[%u] is too small - found %u elements, expected %u", i, sub_package->package.count, expected_count)); return (AE_AML_OPERAND_VALUE); } /******************************************************************************* * * FUNCTION: acpi_ns_check_package_elements * * PARAMETERS: Data - Pointer to validation data structure * Elements - Pointer to the package elements array * Type1 - Object type for first group * Count1 - Count for first group * Type2 - Object type for second group * Count2 - Count for second group * start_index - Start of the first group of elements * * RETURN: Status * * DESCRIPTION: Check that all elements of a package are of the correct object * type. Supports up to two groups of different object types. * ******************************************************************************/ static acpi_status acpi_ns_check_package_elements(struct acpi_predefined_data *data, union acpi_operand_object **elements, u8 type1, u32 count1, u8 type2, u32 count2, u32 start_index) { union acpi_operand_object **this_element = elements; acpi_status status; u32 i; /* * Up to two groups of package elements are supported by the data * structure. All elements in each group must be of the same type. * The second group can have a count of zero. */ for (i = 0; i < count1; i++) { status = acpi_ns_check_object_type(data, this_element, type1, i + start_index); if (ACPI_FAILURE(status)) { return (status); } this_element++; } for (i = 0; i < count2; i++) { status = acpi_ns_check_object_type(data, this_element, type2, (i + count1 + start_index)); if (ACPI_FAILURE(status)) { return (status); } this_element++; } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_check_object_type * * PARAMETERS: Data - Pointer to validation data structure * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * expected_btypes - Bitmap of expected return type(s) * package_index - Index of object within parent package (if * applicable - ACPI_NOT_PACKAGE_ELEMENT * otherwise) * * RETURN: Status * * DESCRIPTION: Check the type of the return object against the expected object * type(s). Use of Btype allows multiple expected object types. * ******************************************************************************/ static acpi_status acpi_ns_check_object_type(struct acpi_predefined_data *data, union acpi_operand_object **return_object_ptr, u32 expected_btypes, u32 package_index) { union acpi_operand_object *return_object = *return_object_ptr; acpi_status status = AE_OK; u32 return_btype; char type_buffer[48]; /* Room for 5 types */ /* * If we get a NULL return_object here, it is a NULL package element. * Since all extraneous NULL package elements were removed earlier by a * call to acpi_ns_remove_null_elements, this is an unexpected NULL element. * We will attempt to repair it. */ if (!return_object) { status = acpi_ns_repair_null_element(data, expected_btypes, package_index, return_object_ptr); if (ACPI_SUCCESS(status)) { return (AE_OK); /* Repair was successful */ } goto type_error_exit; } /* A Namespace node should not get here, but make sure */ if (ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) { ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Invalid return type - Found a Namespace node [%4.4s] type %s", return_object->node.name.ascii, acpi_ut_get_type_name(return_object->node. type))); return (AE_AML_OPERAND_TYPE); } /* * Convert the object type (ACPI_TYPE_xxx) to a bitmapped object type. * The bitmapped type allows multiple possible return types. * * Note, the cases below must handle all of the possible types returned * from all of the predefined names (including elements of returned * packages) */ switch (return_object->common.type) { case ACPI_TYPE_INTEGER: return_btype = ACPI_RTYPE_INTEGER; break; case ACPI_TYPE_BUFFER: return_btype = ACPI_RTYPE_BUFFER; break; case ACPI_TYPE_STRING: return_btype = ACPI_RTYPE_STRING; break; case ACPI_TYPE_PACKAGE: return_btype = ACPI_RTYPE_PACKAGE; break; case ACPI_TYPE_LOCAL_REFERENCE: return_btype = ACPI_RTYPE_REFERENCE; break; default: /* Not one of the supported objects, must be incorrect */ goto type_error_exit; } /* Is the object one of the expected types? */ if (return_btype & expected_btypes) { /* For reference objects, check that the reference type is correct */ if (return_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) { status = acpi_ns_check_reference(data, return_object); } return (status); } /* Type mismatch -- attempt repair of the returned object */ status = acpi_ns_repair_object(data, expected_btypes, package_index, return_object_ptr); if (ACPI_SUCCESS(status)) { return (AE_OK); /* Repair was successful */ } type_error_exit: /* Create a string with all expected types for this predefined object */ acpi_ns_get_expected_types(type_buffer, expected_btypes); if (package_index == ACPI_NOT_PACKAGE_ELEMENT) { ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return type mismatch - found %s, expected %s", acpi_ut_get_object_type_name (return_object), type_buffer)); } else { ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return Package type mismatch at index %u - " "found %s, expected %s", package_index, acpi_ut_get_object_type_name (return_object), type_buffer)); } return (AE_AML_OPERAND_TYPE); } /******************************************************************************* * * FUNCTION: acpi_ns_check_reference * * PARAMETERS: Data - Pointer to validation data structure * return_object - Object returned from the evaluation of a * method or object * * RETURN: Status * * DESCRIPTION: Check a returned reference object for the correct reference * type. The only reference type that can be returned from a * predefined method is a named reference. All others are invalid. * ******************************************************************************/ static acpi_status acpi_ns_check_reference(struct acpi_predefined_data *data, union acpi_operand_object *return_object) { /* * Check the reference object for the correct reference type (opcode). * The only type of reference that can be converted to an union acpi_object is * a reference to a named object (reference class: NAME) */ if (return_object->reference.class == ACPI_REFCLASS_NAME) { return (AE_OK); } ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, "Return type mismatch - unexpected reference object type [%s] %2.2X", acpi_ut_get_reference_name(return_object), return_object->reference.class)); return (AE_AML_OPERAND_TYPE); } /******************************************************************************* * * FUNCTION: acpi_ns_get_expected_types * * PARAMETERS: Buffer - Pointer to where the string is returned * expected_btypes - Bitmap of expected return type(s) * * RETURN: Buffer is populated with type names. * * DESCRIPTION: Translate the expected types bitmap into a string of ascii * names of expected types, for use in warning messages. * ******************************************************************************/ static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes) { u32 this_rtype; u32 i; u32 j; j = 1; buffer[0] = 0; this_rtype = ACPI_RTYPE_INTEGER; for (i = 0; i < ACPI_NUM_RTYPES; i++) { /* If one of the expected types, concatenate the name of this type */ if (expected_btypes & this_rtype) { ACPI_STRCAT(buffer, &acpi_rtype_names[i][j]); j = 0; /* Use name separator from now on */ } this_rtype <<= 1; /* Next Rtype */ } }
gpl-2.0
jfdsmabalot/kernel_hammerhead
drivers/usb/host/ohci-tmio.c
4897
9812
/* * OHCI HCD(Host Controller Driver) for USB. * *(C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> *(C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> *(C) Copyright 2002 Hewlett-Packard Company * * Bus glue for Toshiba Mobile IO(TMIO) Controller's OHCI core * (C) Copyright 2005 Chris Humbert <mahadri-usb@drigon.com> * (C) Copyright 2007, 2008 Dmitry Baryshkov <dbaryshkov@gmail.com> * * This is known to work with the following variants: * TC6393XB revision 3 (32kB SRAM) * * The TMIO's OHCI core DMAs through a small internal buffer that * is directly addressable by the CPU. * * Written from sparse documentation from Toshiba and Sharp's driver * for the 2.4 kernel, * usb-ohci-tc6393.c(C) Copyright 2004 Lineo Solutions, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /*#include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/namei.h> #include <linux/sched.h>*/ #include <linux/platform_device.h> #include <linux/mfd/core.h> #include <linux/mfd/tmio.h> #include <linux/dma-mapping.h> /*-------------------------------------------------------------------------*/ /* * USB Host Controller Configuration Register */ #define CCR_REVID 0x08 /* b Revision ID */ #define CCR_BASE 0x10 /* l USB Control Register Base Address Low */ #define CCR_ILME 0x40 /* b Internal Local Memory Enable */ #define CCR_PM 0x4c /* w Power Management */ #define CCR_INTC 0x50 /* b INT Control */ #define CCR_LMW1L 0x54 /* w Local Memory Window 1 LMADRS Low */ #define CCR_LMW1H 0x56 /* w Local Memory Window 1 LMADRS High */ #define CCR_LMW1BL 0x58 /* w Local Memory Window 1 Base Address Low */ #define CCR_LMW1BH 0x5A /* w Local Memory Window 1 Base Address High */ #define CCR_LMW2L 0x5C /* w Local Memory Window 2 LMADRS Low */ #define CCR_LMW2H 0x5E /* w Local Memory Window 2 LMADRS High */ #define CCR_LMW2BL 0x60 /* w Local Memory Window 2 Base Address Low */ #define CCR_LMW2BH 0x62 /* w Local Memory Window 2 Base Address High */ #define CCR_MISC 0xFC /* b MISC */ #define CCR_PM_GKEN 0x0001 #define CCR_PM_CKRNEN 0x0002 #define CCR_PM_USBPW1 0x0004 #define CCR_PM_USBPW2 0x0008 #define CCR_PM_USBPW3 0x0008 #define CCR_PM_PMEE 0x0100 #define CCR_PM_PMES 0x8000 /*-------------------------------------------------------------------------*/ struct tmio_hcd { void __iomem *ccr; spinlock_t lock; /* protects RMW cycles */ }; #define hcd_to_tmio(hcd) ((struct tmio_hcd *)(hcd_to_ohci(hcd) + 1)) /*-------------------------------------------------------------------------*/ static void tmio_write_pm(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); struct tmio_hcd *tmio = hcd_to_tmio(hcd); u16 pm; unsigned long flags; spin_lock_irqsave(&tmio->lock, flags); pm = CCR_PM_GKEN | CCR_PM_CKRNEN | CCR_PM_PMEE | CCR_PM_PMES; tmio_iowrite16(pm, tmio->ccr + CCR_PM); spin_unlock_irqrestore(&tmio->lock, flags); } static void tmio_stop_hc(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); struct tmio_hcd *tmio = hcd_to_tmio(hcd); u16 pm; pm = CCR_PM_GKEN | CCR_PM_CKRNEN; switch (ohci->num_ports) { default: dev_err(&dev->dev, "Unsupported amount of ports: %d\n", ohci->num_ports); case 3: pm |= CCR_PM_USBPW3; case 2: pm |= CCR_PM_USBPW2; case 1: pm |= CCR_PM_USBPW1; } tmio_iowrite8(0, tmio->ccr + CCR_INTC); tmio_iowrite8(0, tmio->ccr + CCR_ILME); tmio_iowrite16(0, tmio->ccr + CCR_BASE); tmio_iowrite16(0, tmio->ccr + CCR_BASE + 2); tmio_iowrite16(pm, tmio->ccr + CCR_PM); } static void tmio_start_hc(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); struct tmio_hcd *tmio = hcd_to_tmio(hcd); unsigned long base = hcd->rsrc_start; tmio_write_pm(dev); tmio_iowrite16(base, tmio->ccr + CCR_BASE); tmio_iowrite16(base >> 16, tmio->ccr + CCR_BASE + 2); tmio_iowrite8(1, tmio->ccr + CCR_ILME); tmio_iowrite8(2, tmio->ccr + CCR_INTC); dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n", tmio_ioread8(tmio->ccr + CCR_REVID), hcd->rsrc_start, hcd->irq); } static int ohci_tmio_start(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); int ret; if ((ret = ohci_init(ohci)) < 0) return ret; if ((ret = ohci_run(ohci)) < 0) { err("can't start %s", hcd->self.bus_name); ohci_stop(hcd); return ret; } return 0; } static const struct hc_driver ohci_tmio_hc_driver = { .description = hcd_name, .product_desc = "TMIO OHCI USB Host Controller", .hcd_priv_size = sizeof(struct ohci_hcd) + sizeof (struct tmio_hcd), /* generic hardware linkage */ .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY | HCD_LOCAL_MEM, /* basic lifecycle operations */ .start = ohci_tmio_start, .stop = ohci_stop, .shutdown = ohci_shutdown, /* managing i/o requests and associated device resources */ .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, /* scheduling support */ .get_frame_number = ohci_get_frame, /* root hub support */ .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif .start_port_reset = ohci_start_port_reset, }; /*-------------------------------------------------------------------------*/ static struct platform_driver ohci_hcd_tmio_driver; static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev) { const struct mfd_cell *cell = mfd_get_cell(dev); struct resource *regs = platform_get_resource(dev, IORESOURCE_MEM, 0); struct resource *config = platform_get_resource(dev, IORESOURCE_MEM, 1); struct resource *sram = platform_get_resource(dev, IORESOURCE_MEM, 2); int irq = platform_get_irq(dev, 0); struct tmio_hcd *tmio; struct ohci_hcd *ohci; struct usb_hcd *hcd; int ret; if (usb_disabled()) return -ENODEV; if (!cell) return -EINVAL; hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev)); if (!hcd) { ret = -ENOMEM; goto err_usb_create_hcd; } hcd->rsrc_start = regs->start; hcd->rsrc_len = resource_size(regs); tmio = hcd_to_tmio(hcd); spin_lock_init(&tmio->lock); tmio->ccr = ioremap(config->start, resource_size(config)); if (!tmio->ccr) { ret = -ENOMEM; goto err_ioremap_ccr; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { ret = -ENOMEM; goto err_ioremap_regs; } if (!dma_declare_coherent_memory(&dev->dev, sram->start, sram->start, resource_size(sram), DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE)) { ret = -EBUSY; goto err_dma_declare; } if (cell->enable) { ret = cell->enable(dev); if (ret) goto err_enable; } tmio_start_hc(dev); ohci = hcd_to_ohci(hcd); ohci_hcd_init(ohci); ret = usb_add_hcd(hcd, irq, 0); if (ret) goto err_add_hcd; if (ret == 0) return ret; usb_remove_hcd(hcd); err_add_hcd: tmio_stop_hc(dev); if (cell->disable) cell->disable(dev); err_enable: dma_release_declared_memory(&dev->dev); err_dma_declare: iounmap(hcd->regs); err_ioremap_regs: iounmap(tmio->ccr); err_ioremap_ccr: usb_put_hcd(hcd); err_usb_create_hcd: return ret; } static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); struct tmio_hcd *tmio = hcd_to_tmio(hcd); const struct mfd_cell *cell = mfd_get_cell(dev); usb_remove_hcd(hcd); tmio_stop_hc(dev); if (cell->disable) cell->disable(dev); dma_release_declared_memory(&dev->dev); iounmap(hcd->regs); iounmap(tmio->ccr); usb_put_hcd(hcd); platform_set_drvdata(dev, NULL); return 0; } #ifdef CONFIG_PM static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t state) { const struct mfd_cell *cell = mfd_get_cell(dev); struct usb_hcd *hcd = platform_get_drvdata(dev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); struct tmio_hcd *tmio = hcd_to_tmio(hcd); unsigned long flags; u8 misc; int ret; if (time_before(jiffies, ohci->next_statechange)) msleep(5); ohci->next_statechange = jiffies; spin_lock_irqsave(&tmio->lock, flags); misc = tmio_ioread8(tmio->ccr + CCR_MISC); misc |= 1 << 3; /* USSUSP */ tmio_iowrite8(misc, tmio->ccr + CCR_MISC); spin_unlock_irqrestore(&tmio->lock, flags); if (cell->suspend) { ret = cell->suspend(dev); if (ret) return ret; } return 0; } static int ohci_hcd_tmio_drv_resume(struct platform_device *dev) { const struct mfd_cell *cell = mfd_get_cell(dev); struct usb_hcd *hcd = platform_get_drvdata(dev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); struct tmio_hcd *tmio = hcd_to_tmio(hcd); unsigned long flags; u8 misc; int ret; if (time_before(jiffies, ohci->next_statechange)) msleep(5); ohci->next_statechange = jiffies; if (cell->resume) { ret = cell->resume(dev); if (ret) return ret; } tmio_start_hc(dev); spin_lock_irqsave(&tmio->lock, flags); misc = tmio_ioread8(tmio->ccr + CCR_MISC); misc &= ~(1 << 3); /* USSUSP */ tmio_iowrite8(misc, tmio->ccr + CCR_MISC); spin_unlock_irqrestore(&tmio->lock, flags); ohci_finish_controller_resume(hcd); return 0; } #else #define ohci_hcd_tmio_drv_suspend NULL #define ohci_hcd_tmio_drv_resume NULL #endif static struct platform_driver ohci_hcd_tmio_driver = { .probe = ohci_hcd_tmio_drv_probe, .remove = __devexit_p(ohci_hcd_tmio_drv_remove), .shutdown = usb_hcd_platform_shutdown, .suspend = ohci_hcd_tmio_drv_suspend, .resume = ohci_hcd_tmio_drv_resume, .driver = { .name = "tmio-ohci", .owner = THIS_MODULE, }, };
gpl-2.0
SOKP/kernel_cyanogen_msm8916
arch/sh/mm/cache-sh4.c
6945
9918
/* * arch/sh/mm/cache-sh4.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka * Copyright (C) 2001 - 2009 Paul Mundt * Copyright (C) 2003 Richard Curnow * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/fs.h> #include <linux/highmem.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/cache_insns.h> #include <asm/cacheflush.h> /* * The maximum number of pages we support up to when doing ranged dcache * flushing. Anything exceeding this will simply flush the dcache in its * entirety. */ #define MAX_ICACHE_PAGES 32 static void __flush_cache_one(unsigned long addr, unsigned long phys, unsigned long exec_offset); /* * Write back the range of D-cache, and purge the I-cache. * * Called from kernel/module.c:sys_init_module and routine for a.out format, * signal handler code and kprobes code */ static void sh4_flush_icache_range(void *args) { struct flusher_data *data = args; unsigned long start, end; unsigned long flags, v; int i; start = data->addr1; end = data->addr2; /* If there are too many pages then just blow away the caches */ if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { local_flush_cache_all(NULL); return; } /* * Selectively flush d-cache then invalidate the i-cache. * This is inefficient, so only use this for small ranges. */ start &= ~(L1_CACHE_BYTES-1); end += L1_CACHE_BYTES-1; end &= ~(L1_CACHE_BYTES-1); local_irq_save(flags); jump_to_uncached(); for (v = start; v < end; v += L1_CACHE_BYTES) { unsigned long icacheaddr; int j, n; __ocbwb(v); icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask); /* Clear i-cache line valid-bit */ n = boot_cpu_data.icache.n_aliases; for (i = 0; i < cpu_data->icache.ways; i++) { for (j = 0; j < n; j++) __raw_writel(0, icacheaddr + (j * PAGE_SIZE)); icacheaddr += cpu_data->icache.way_incr; } } back_to_cached(); local_irq_restore(flags); } static inline void flush_cache_one(unsigned long start, unsigned long phys) { unsigned long flags, exec_offset = 0; /* * All types of SH-4 require PC to be uncached to operate on the I-cache. * Some types of SH-4 require PC to be uncached to operate on the D-cache. */ if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || (start < CACHE_OC_ADDRESS_ARRAY)) exec_offset = cached_to_uncached; local_irq_save(flags); __flush_cache_one(start, phys, exec_offset); local_irq_restore(flags); } /* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ static void sh4_flush_dcache_page(void *arg) { struct page *page = arg; unsigned long addr = (unsigned long)page_address(page); #ifndef CONFIG_SMP struct address_space *mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) clear_bit(PG_dcache_clean, &page->flags); else #endif flush_cache_one(CACHE_OC_ADDRESS_ARRAY | (addr & shm_align_mask), page_to_phys(page)); wmb(); } /* TODO: Selective icache invalidation through IC address array.. */ static void flush_icache_all(void) { unsigned long flags, ccr; local_irq_save(flags); jump_to_uncached(); /* Flush I-cache */ ccr = __raw_readl(CCR); ccr |= CCR_CACHE_ICI; __raw_writel(ccr, CCR); /* * back_to_cached() will take care of the barrier for us, don't add * another one! */ back_to_cached(); local_irq_restore(flags); } static void flush_dcache_all(void) { unsigned long addr, end_addr, entry_offset; end_addr = CACHE_OC_ADDRESS_ARRAY + (current_cpu_data.dcache.sets << current_cpu_data.dcache.entry_shift) * current_cpu_data.dcache.ways; entry_offset = 1 << current_cpu_data.dcache.entry_shift; for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) { __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; } } static void sh4_flush_cache_all(void *unused) { flush_dcache_all(); flush_icache_all(); } /* * Note : (RPC) since the caches are physically tagged, the only point * of flush_cache_mm for SH-4 is to get rid of aliases from the * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that * lines can stay resident so long as the virtual address they were * accessed with (hence cache set) is in accord with the physical * address (i.e. tag). It's no different here. * * Caller takes mm->mmap_sem. */ static void sh4_flush_cache_mm(void *arg) { struct mm_struct *mm = arg; if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) return; flush_dcache_all(); } /* * Write back and invalidate I/D-caches for the page. * * ADDR: Virtual Address (U0 address) * PFN: Physical page number */ static void sh4_flush_cache_page(void *args) { struct flusher_data *data = args; struct vm_area_struct *vma; struct page *page; unsigned long address, pfn, phys; int map_coherent = 0; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; void *vaddr; vma = data->vma; address = data->addr1 & PAGE_MASK; pfn = data->addr2; phys = pfn << PAGE_SHIFT; page = pfn_to_page(pfn); if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) return; pgd = pgd_offset(vma->vm_mm, address); pud = pud_offset(pgd, address); pmd = pmd_offset(pud, address); pte = pte_offset_kernel(pmd, address); /* If the page isn't present, there is nothing to do here. */ if (!(pte_val(*pte) & _PAGE_PRESENT)) return; if ((vma->vm_mm == current->active_mm)) vaddr = NULL; else { /* * Use kmap_coherent or kmap_atomic to do flushes for * another ASID than the current one. */ map_coherent = (current_cpu_data.dcache.n_aliases && test_bit(PG_dcache_clean, &page->flags) && page_mapped(page)); if (map_coherent) vaddr = kmap_coherent(page, address); else vaddr = kmap_atomic(page); address = (unsigned long)vaddr; } flush_cache_one(CACHE_OC_ADDRESS_ARRAY | (address & shm_align_mask), phys); if (vma->vm_flags & VM_EXEC) flush_icache_all(); if (vaddr) { if (map_coherent) kunmap_coherent(vaddr); else kunmap_atomic(vaddr); } } /* * Write back and invalidate D-caches. * * START, END: Virtual Address (U0 address) * * NOTE: We need to flush the _physical_ page entry. * Flushing the cache lines for U0 only isn't enough. * We need to flush for P1 too, which may contain aliases. */ static void sh4_flush_cache_range(void *args) { struct flusher_data *data = args; struct vm_area_struct *vma; unsigned long start, end; vma = data->vma; start = data->addr1; end = data->addr2; if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) return; /* * If cache is only 4k-per-way, there are never any 'aliases'. Since * the cache is physically tagged, the data can just be left in there. */ if (boot_cpu_data.dcache.n_aliases == 0) return; flush_dcache_all(); if (vma->vm_flags & VM_EXEC) flush_icache_all(); } /** * __flush_cache_one * * @addr: address in memory mapped cache array * @phys: P1 address to flush (has to match tags if addr has 'A' bit * set i.e. associative write) * @exec_offset: set to 0x20000000 if flush has to be executed from P2 * region else 0x0 * * The offset into the cache array implied by 'addr' selects the * 'colour' of the virtual address range that will be flushed. The * operation (purge/write-back) is selected by the lower 2 bits of * 'phys'. */ static void __flush_cache_one(unsigned long addr, unsigned long phys, unsigned long exec_offset) { int way_count; unsigned long base_addr = addr; struct cache_info *dcache; unsigned long way_incr; unsigned long a, ea, p; unsigned long temp_pc; dcache = &boot_cpu_data.dcache; /* Write this way for better assembly. */ way_count = dcache->ways; way_incr = dcache->way_incr; /* * Apply exec_offset (i.e. branch to P2 if required.). * * FIXME: * * If I write "=r" for the (temp_pc), it puts this in r6 hence * trashing exec_offset before it's been added on - why? Hence * "=&r" as a 'workaround' */ asm volatile("mov.l 1f, %0\n\t" "add %1, %0\n\t" "jmp @%0\n\t" "nop\n\t" ".balign 4\n\t" "1: .long 2f\n\t" "2:\n" : "=&r" (temp_pc) : "r" (exec_offset)); /* * We know there will be >=1 iteration, so write as do-while to avoid * pointless nead-of-loop check for 0 iterations. */ do { ea = base_addr + PAGE_SIZE; a = base_addr; p = phys; do { *(volatile unsigned long *)a = p; /* * Next line: intentionally not p+32, saves an add, p * will do since only the cache tag bits need to * match. */ *(volatile unsigned long *)(a+32) = p; a += 64; p += 64; } while (a < ea); base_addr += way_incr; } while (--way_count != 0); } extern void __weak sh4__flush_region_init(void); /* * SH-4 has virtually indexed and physically tagged cache. */ void __init sh4_cache_init(void) { printk("PVR=%08x CVR=%08x PRR=%08x\n", __raw_readl(CCN_PVR), __raw_readl(CCN_CVR), __raw_readl(CCN_PRR)); local_flush_icache_range = sh4_flush_icache_range; local_flush_dcache_page = sh4_flush_dcache_page; local_flush_cache_all = sh4_flush_cache_all; local_flush_cache_mm = sh4_flush_cache_mm; local_flush_cache_dup_mm = sh4_flush_cache_mm; local_flush_cache_page = sh4_flush_cache_page; local_flush_cache_range = sh4_flush_cache_range; sh4__flush_region_init(); }
gpl-2.0
StarkDroid/android_kernel_motorola_msm8610
arch/sh/mm/cache-sh4.c
6945
9918
/* * arch/sh/mm/cache-sh4.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka * Copyright (C) 2001 - 2009 Paul Mundt * Copyright (C) 2003 Richard Curnow * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/fs.h> #include <linux/highmem.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/cache_insns.h> #include <asm/cacheflush.h> /* * The maximum number of pages we support up to when doing ranged dcache * flushing. Anything exceeding this will simply flush the dcache in its * entirety. */ #define MAX_ICACHE_PAGES 32 static void __flush_cache_one(unsigned long addr, unsigned long phys, unsigned long exec_offset); /* * Write back the range of D-cache, and purge the I-cache. * * Called from kernel/module.c:sys_init_module and routine for a.out format, * signal handler code and kprobes code */ static void sh4_flush_icache_range(void *args) { struct flusher_data *data = args; unsigned long start, end; unsigned long flags, v; int i; start = data->addr1; end = data->addr2; /* If there are too many pages then just blow away the caches */ if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { local_flush_cache_all(NULL); return; } /* * Selectively flush d-cache then invalidate the i-cache. * This is inefficient, so only use this for small ranges. */ start &= ~(L1_CACHE_BYTES-1); end += L1_CACHE_BYTES-1; end &= ~(L1_CACHE_BYTES-1); local_irq_save(flags); jump_to_uncached(); for (v = start; v < end; v += L1_CACHE_BYTES) { unsigned long icacheaddr; int j, n; __ocbwb(v); icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask); /* Clear i-cache line valid-bit */ n = boot_cpu_data.icache.n_aliases; for (i = 0; i < cpu_data->icache.ways; i++) { for (j = 0; j < n; j++) __raw_writel(0, icacheaddr + (j * PAGE_SIZE)); icacheaddr += cpu_data->icache.way_incr; } } back_to_cached(); local_irq_restore(flags); } static inline void flush_cache_one(unsigned long start, unsigned long phys) { unsigned long flags, exec_offset = 0; /* * All types of SH-4 require PC to be uncached to operate on the I-cache. * Some types of SH-4 require PC to be uncached to operate on the D-cache. */ if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || (start < CACHE_OC_ADDRESS_ARRAY)) exec_offset = cached_to_uncached; local_irq_save(flags); __flush_cache_one(start, phys, exec_offset); local_irq_restore(flags); } /* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ static void sh4_flush_dcache_page(void *arg) { struct page *page = arg; unsigned long addr = (unsigned long)page_address(page); #ifndef CONFIG_SMP struct address_space *mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) clear_bit(PG_dcache_clean, &page->flags); else #endif flush_cache_one(CACHE_OC_ADDRESS_ARRAY | (addr & shm_align_mask), page_to_phys(page)); wmb(); } /* TODO: Selective icache invalidation through IC address array.. */ static void flush_icache_all(void) { unsigned long flags, ccr; local_irq_save(flags); jump_to_uncached(); /* Flush I-cache */ ccr = __raw_readl(CCR); ccr |= CCR_CACHE_ICI; __raw_writel(ccr, CCR); /* * back_to_cached() will take care of the barrier for us, don't add * another one! */ back_to_cached(); local_irq_restore(flags); } static void flush_dcache_all(void) { unsigned long addr, end_addr, entry_offset; end_addr = CACHE_OC_ADDRESS_ARRAY + (current_cpu_data.dcache.sets << current_cpu_data.dcache.entry_shift) * current_cpu_data.dcache.ways; entry_offset = 1 << current_cpu_data.dcache.entry_shift; for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) { __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; __raw_writel(0, addr); addr += entry_offset; } } static void sh4_flush_cache_all(void *unused) { flush_dcache_all(); flush_icache_all(); } /* * Note : (RPC) since the caches are physically tagged, the only point * of flush_cache_mm for SH-4 is to get rid of aliases from the * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that * lines can stay resident so long as the virtual address they were * accessed with (hence cache set) is in accord with the physical * address (i.e. tag). It's no different here. * * Caller takes mm->mmap_sem. */ static void sh4_flush_cache_mm(void *arg) { struct mm_struct *mm = arg; if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) return; flush_dcache_all(); } /* * Write back and invalidate I/D-caches for the page. * * ADDR: Virtual Address (U0 address) * PFN: Physical page number */ static void sh4_flush_cache_page(void *args) { struct flusher_data *data = args; struct vm_area_struct *vma; struct page *page; unsigned long address, pfn, phys; int map_coherent = 0; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; void *vaddr; vma = data->vma; address = data->addr1 & PAGE_MASK; pfn = data->addr2; phys = pfn << PAGE_SHIFT; page = pfn_to_page(pfn); if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) return; pgd = pgd_offset(vma->vm_mm, address); pud = pud_offset(pgd, address); pmd = pmd_offset(pud, address); pte = pte_offset_kernel(pmd, address); /* If the page isn't present, there is nothing to do here. */ if (!(pte_val(*pte) & _PAGE_PRESENT)) return; if ((vma->vm_mm == current->active_mm)) vaddr = NULL; else { /* * Use kmap_coherent or kmap_atomic to do flushes for * another ASID than the current one. */ map_coherent = (current_cpu_data.dcache.n_aliases && test_bit(PG_dcache_clean, &page->flags) && page_mapped(page)); if (map_coherent) vaddr = kmap_coherent(page, address); else vaddr = kmap_atomic(page); address = (unsigned long)vaddr; } flush_cache_one(CACHE_OC_ADDRESS_ARRAY | (address & shm_align_mask), phys); if (vma->vm_flags & VM_EXEC) flush_icache_all(); if (vaddr) { if (map_coherent) kunmap_coherent(vaddr); else kunmap_atomic(vaddr); } } /* * Write back and invalidate D-caches. * * START, END: Virtual Address (U0 address) * * NOTE: We need to flush the _physical_ page entry. * Flushing the cache lines for U0 only isn't enough. * We need to flush for P1 too, which may contain aliases. */ static void sh4_flush_cache_range(void *args) { struct flusher_data *data = args; struct vm_area_struct *vma; unsigned long start, end; vma = data->vma; start = data->addr1; end = data->addr2; if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) return; /* * If cache is only 4k-per-way, there are never any 'aliases'. Since * the cache is physically tagged, the data can just be left in there. */ if (boot_cpu_data.dcache.n_aliases == 0) return; flush_dcache_all(); if (vma->vm_flags & VM_EXEC) flush_icache_all(); } /** * __flush_cache_one * * @addr: address in memory mapped cache array * @phys: P1 address to flush (has to match tags if addr has 'A' bit * set i.e. associative write) * @exec_offset: set to 0x20000000 if flush has to be executed from P2 * region else 0x0 * * The offset into the cache array implied by 'addr' selects the * 'colour' of the virtual address range that will be flushed. The * operation (purge/write-back) is selected by the lower 2 bits of * 'phys'. */ static void __flush_cache_one(unsigned long addr, unsigned long phys, unsigned long exec_offset) { int way_count; unsigned long base_addr = addr; struct cache_info *dcache; unsigned long way_incr; unsigned long a, ea, p; unsigned long temp_pc; dcache = &boot_cpu_data.dcache; /* Write this way for better assembly. */ way_count = dcache->ways; way_incr = dcache->way_incr; /* * Apply exec_offset (i.e. branch to P2 if required.). * * FIXME: * * If I write "=r" for the (temp_pc), it puts this in r6 hence * trashing exec_offset before it's been added on - why? Hence * "=&r" as a 'workaround' */ asm volatile("mov.l 1f, %0\n\t" "add %1, %0\n\t" "jmp @%0\n\t" "nop\n\t" ".balign 4\n\t" "1: .long 2f\n\t" "2:\n" : "=&r" (temp_pc) : "r" (exec_offset)); /* * We know there will be >=1 iteration, so write as do-while to avoid * pointless nead-of-loop check for 0 iterations. */ do { ea = base_addr + PAGE_SIZE; a = base_addr; p = phys; do { *(volatile unsigned long *)a = p; /* * Next line: intentionally not p+32, saves an add, p * will do since only the cache tag bits need to * match. */ *(volatile unsigned long *)(a+32) = p; a += 64; p += 64; } while (a < ea); base_addr += way_incr; } while (--way_count != 0); } extern void __weak sh4__flush_region_init(void); /* * SH-4 has virtually indexed and physically tagged cache. */ void __init sh4_cache_init(void) { printk("PVR=%08x CVR=%08x PRR=%08x\n", __raw_readl(CCN_PVR), __raw_readl(CCN_CVR), __raw_readl(CCN_PRR)); local_flush_icache_range = sh4_flush_icache_range; local_flush_dcache_page = sh4_flush_dcache_page; local_flush_cache_all = sh4_flush_cache_all; local_flush_cache_mm = sh4_flush_cache_mm; local_flush_cache_dup_mm = sh4_flush_cache_mm; local_flush_cache_page = sh4_flush_cache_page; local_flush_cache_range = sh4_flush_cache_range; sh4__flush_region_init(); }
gpl-2.0
yoctobsp/linux-yocto-3.14
sound/soc/pxa/magician.c
7713
14297
/* * SoC audio for HTC Magician * * Copyright (c) 2006 Philipp Zabel <philipp.zabel@gmail.com> * * based on spitz.c, * Authors: Liam Girdwood <lrg@slimlogic.co.uk> * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/uda1380.h> #include <mach/magician.h> #include <asm/mach-types.h> #include "../codecs/uda1380.h" #include "pxa2xx-i2s.h" #include "pxa-ssp.h" #define MAGICIAN_MIC 0 #define MAGICIAN_MIC_EXT 1 static int magician_hp_switch; static int magician_spk_switch = 1; static int magician_in_sel = MAGICIAN_MIC; static void magician_ext_control(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = &codec->dapm; if (magician_spk_switch) snd_soc_dapm_enable_pin(dapm, "Speaker"); else snd_soc_dapm_disable_pin(dapm, "Speaker"); if (magician_hp_switch) snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); else snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); switch (magician_in_sel) { case MAGICIAN_MIC: snd_soc_dapm_disable_pin(dapm, "Headset Mic"); snd_soc_dapm_enable_pin(dapm, "Call Mic"); break; case MAGICIAN_MIC_EXT: snd_soc_dapm_disable_pin(dapm, "Call Mic"); snd_soc_dapm_enable_pin(dapm, "Headset Mic"); break; } snd_soc_dapm_sync(dapm); } static int magician_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; mutex_lock(&codec->mutex); /* check the jack status at stream startup */ magician_ext_control(codec); mutex_unlock(&codec->mutex); return 0; } /* * Magician uses SSP port for playback. */ static int magician_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; unsigned int acps, acds, width; unsigned int div4 = PXA_SSP_CLK_SCDB_4; int ret = 0; width = snd_pcm_format_physical_width(params_format(params)); /* * rate = SSPSCLK / (2 * width(16 or 32)) * SSPSCLK = (ACPS / ACDS) / SSPSCLKDIV(div4 or div1) */ switch (params_rate(params)) { case 8000: /* off by a factor of 2: bug in the PXA27x audio clock? */ acps = 32842000; switch (width) { case 16: /* 513156 Hz ~= _2_ * 8000 Hz * 32 (+0.23%) */ acds = PXA_SSP_CLK_AUDIO_DIV_16; break; default: /* 32 */ /* 1026312 Hz ~= _2_ * 8000 Hz * 64 (+0.23%) */ acds = PXA_SSP_CLK_AUDIO_DIV_8; } break; case 11025: acps = 5622000; switch (width) { case 16: /* 351375 Hz ~= 11025 Hz * 32 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_4; break; default: /* 32 */ /* 702750 Hz ~= 11025 Hz * 64 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_2; } break; case 22050: acps = 5622000; switch (width) { case 16: /* 702750 Hz ~= 22050 Hz * 32 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_2; break; default: /* 32 */ /* 1405500 Hz ~= 22050 Hz * 64 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_1; } break; case 44100: acps = 5622000; switch (width) { case 16: /* 1405500 Hz ~= 44100 Hz * 32 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_2; break; default: /* 32 */ /* 2811000 Hz ~= 44100 Hz * 64 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_1; } break; case 48000: acps = 12235000; switch (width) { case 16: /* 1529375 Hz ~= 48000 Hz * 32 (-0.44%) */ acds = PXA_SSP_CLK_AUDIO_DIV_2; break; default: /* 32 */ /* 3058750 Hz ~= 48000 Hz * 64 (-0.44%) */ acds = PXA_SSP_CLK_AUDIO_DIV_1; } break; case 96000: default: acps = 12235000; switch (width) { case 16: /* 3058750 Hz ~= 96000 Hz * 32 (-0.44%) */ acds = PXA_SSP_CLK_AUDIO_DIV_1; break; default: /* 32 */ /* 6117500 Hz ~= 96000 Hz * 64 (-0.44%) */ acds = PXA_SSP_CLK_AUDIO_DIV_2; div4 = PXA_SSP_CLK_SCDB_1; break; } break; } /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_IF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; ret = snd_soc_dai_set_tdm_slot(cpu_dai, 1, 0, 1, width); if (ret < 0) return ret; /* set audio clock as clock source */ ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_AUDIO, 0, SND_SOC_CLOCK_OUT); if (ret < 0) return ret; /* set the SSP audio system clock ACDS divider */ ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_AUDIO_DIV_ACDS, acds); if (ret < 0) return ret; /* set the SSP audio system clock SCDB divider4 */ ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_AUDIO_DIV_SCDB, div4); if (ret < 0) return ret; /* set SSP audio pll clock */ ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, acps); if (ret < 0) return ret; return 0; } /* * Magician uses I2S for capture. */ static int magician_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret = 0; /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set the I2S system clock as output */ ret = snd_soc_dai_set_sysclk(cpu_dai, PXA2XX_I2S_SYSCLK, 0, SND_SOC_CLOCK_OUT); if (ret < 0) return ret; return 0; } static struct snd_soc_ops magician_capture_ops = { .startup = magician_startup, .hw_params = magician_capture_hw_params, }; static struct snd_soc_ops magician_playback_ops = { .startup = magician_startup, .hw_params = magician_playback_hw_params, }; static int magician_get_hp(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = magician_hp_switch; return 0; } static int magician_set_hp(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); if (magician_hp_switch == ucontrol->value.integer.value[0]) return 0; magician_hp_switch = ucontrol->value.integer.value[0]; magician_ext_control(codec); return 1; } static int magician_get_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = magician_spk_switch; return 0; } static int magician_set_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); if (magician_spk_switch == ucontrol->value.integer.value[0]) return 0; magician_spk_switch = ucontrol->value.integer.value[0]; magician_ext_control(codec); return 1; } static int magician_get_input(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = magician_in_sel; return 0; } static int magician_set_input(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { if (magician_in_sel == ucontrol->value.integer.value[0]) return 0; magician_in_sel = ucontrol->value.integer.value[0]; switch (magician_in_sel) { case MAGICIAN_MIC: gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 1); break; case MAGICIAN_MIC_EXT: gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 0); } return 1; } static int magician_spk_power(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, SND_SOC_DAPM_EVENT_ON(event)); return 0; } static int magician_hp_power(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value(EGPIO_MAGICIAN_EP_POWER, SND_SOC_DAPM_EVENT_ON(event)); return 0; } static int magician_mic_bias(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, SND_SOC_DAPM_EVENT_ON(event)); return 0; } /* magician machine dapm widgets */ static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", magician_hp_power), SND_SOC_DAPM_SPK("Speaker", magician_spk_power), SND_SOC_DAPM_MIC("Call Mic", magician_mic_bias), SND_SOC_DAPM_MIC("Headset Mic", magician_mic_bias), }; /* magician machine audio_map */ static const struct snd_soc_dapm_route audio_map[] = { /* Headphone connected to VOUTL, VOUTR */ {"Headphone Jack", NULL, "VOUTL"}, {"Headphone Jack", NULL, "VOUTR"}, /* Speaker connected to VOUTL, VOUTR */ {"Speaker", NULL, "VOUTL"}, {"Speaker", NULL, "VOUTR"}, /* Mics are connected to VINM */ {"VINM", NULL, "Headset Mic"}, {"VINM", NULL, "Call Mic"}, }; static const char *input_select[] = {"Call Mic", "Headset Mic"}; static const struct soc_enum magician_in_sel_enum = SOC_ENUM_SINGLE_EXT(2, input_select); static const struct snd_kcontrol_new uda1380_magician_controls[] = { SOC_SINGLE_BOOL_EXT("Headphone Switch", (unsigned long)&magician_hp_switch, magician_get_hp, magician_set_hp), SOC_SINGLE_BOOL_EXT("Speaker Switch", (unsigned long)&magician_spk_switch, magician_get_spk, magician_set_spk), SOC_ENUM_EXT("Input Select", magician_in_sel_enum, magician_get_input, magician_set_input), }; /* * Logic for a uda1380 as connected on a HTC Magician */ static int magician_uda1380_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; int err; /* NC codec pins */ snd_soc_dapm_nc_pin(dapm, "VOUTLHP"); snd_soc_dapm_nc_pin(dapm, "VOUTRHP"); /* FIXME: is anything connected here? */ snd_soc_dapm_nc_pin(dapm, "VINL"); snd_soc_dapm_nc_pin(dapm, "VINR"); /* Add magician specific controls */ err = snd_soc_add_codec_controls(codec, uda1380_magician_controls, ARRAY_SIZE(uda1380_magician_controls)); if (err < 0) return err; /* Add magician specific widgets */ snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets, ARRAY_SIZE(uda1380_dapm_widgets)); /* Set up magician specific audio path interconnects */ snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); return 0; } /* magician digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link magician_dai[] = { { .name = "uda1380", .stream_name = "UDA1380 Playback", .cpu_dai_name = "pxa-ssp-dai.0", .codec_dai_name = "uda1380-hifi-playback", .platform_name = "pxa-pcm-audio", .codec_name = "uda1380-codec.0-0018", .init = magician_uda1380_init, .ops = &magician_playback_ops, }, { .name = "uda1380", .stream_name = "UDA1380 Capture", .cpu_dai_name = "pxa2xx-i2s", .codec_dai_name = "uda1380-hifi-capture", .platform_name = "pxa-pcm-audio", .codec_name = "uda1380-codec.0-0018", .ops = &magician_capture_ops, } }; /* magician audio machine driver */ static struct snd_soc_card snd_soc_card_magician = { .name = "Magician", .owner = THIS_MODULE, .dai_link = magician_dai, .num_links = ARRAY_SIZE(magician_dai), }; static struct platform_device *magician_snd_device; /* * FIXME: move into magician board file once merged into the pxa tree */ static struct uda1380_platform_data uda1380_info = { .gpio_power = EGPIO_MAGICIAN_CODEC_POWER, .gpio_reset = EGPIO_MAGICIAN_CODEC_RESET, .dac_clk = UDA1380_DAC_CLK_WSPLL, }; static struct i2c_board_info i2c_board_info[] = { { I2C_BOARD_INFO("uda1380", 0x18), .platform_data = &uda1380_info, }, }; static int __init magician_init(void) { int ret; struct i2c_adapter *adapter; struct i2c_client *client; if (!machine_is_magician()) return -ENODEV; adapter = i2c_get_adapter(0); if (!adapter) return -ENODEV; client = i2c_new_device(adapter, i2c_board_info); i2c_put_adapter(adapter); if (!client) return -ENODEV; ret = gpio_request(EGPIO_MAGICIAN_SPK_POWER, "SPK_POWER"); if (ret) goto err_request_spk; ret = gpio_request(EGPIO_MAGICIAN_EP_POWER, "EP_POWER"); if (ret) goto err_request_ep; ret = gpio_request(EGPIO_MAGICIAN_MIC_POWER, "MIC_POWER"); if (ret) goto err_request_mic; ret = gpio_request(EGPIO_MAGICIAN_IN_SEL0, "IN_SEL0"); if (ret) goto err_request_in_sel0; ret = gpio_request(EGPIO_MAGICIAN_IN_SEL1, "IN_SEL1"); if (ret) goto err_request_in_sel1; gpio_set_value(EGPIO_MAGICIAN_IN_SEL0, 0); magician_snd_device = platform_device_alloc("soc-audio", -1); if (!magician_snd_device) { ret = -ENOMEM; goto err_pdev; } platform_set_drvdata(magician_snd_device, &snd_soc_card_magician); ret = platform_device_add(magician_snd_device); if (ret) { platform_device_put(magician_snd_device); goto err_pdev; } return 0; err_pdev: gpio_free(EGPIO_MAGICIAN_IN_SEL1); err_request_in_sel1: gpio_free(EGPIO_MAGICIAN_IN_SEL0); err_request_in_sel0: gpio_free(EGPIO_MAGICIAN_MIC_POWER); err_request_mic: gpio_free(EGPIO_MAGICIAN_EP_POWER); err_request_ep: gpio_free(EGPIO_MAGICIAN_SPK_POWER); err_request_spk: return ret; } static void __exit magician_exit(void) { platform_device_unregister(magician_snd_device); gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, 0); gpio_set_value(EGPIO_MAGICIAN_EP_POWER, 0); gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, 0); gpio_free(EGPIO_MAGICIAN_IN_SEL1); gpio_free(EGPIO_MAGICIAN_IN_SEL0); gpio_free(EGPIO_MAGICIAN_MIC_POWER); gpio_free(EGPIO_MAGICIAN_EP_POWER); gpio_free(EGPIO_MAGICIAN_SPK_POWER); } module_init(magician_init); module_exit(magician_exit); MODULE_AUTHOR("Philipp Zabel"); MODULE_DESCRIPTION("ALSA SoC Magician"); MODULE_LICENSE("GPL");
gpl-2.0
srikanthk16/android_hammerhead_msm
drivers/parport/parport_amiga.c
8993
7636
/* Low-level parallel port routines for the Amiga built-in port * * Author: Joerg Dorchain <joerg@dorchain.net> * * This is a complete rewrite of the code, but based heaviy upon the old * lp_intern. code. * * The built-in Amiga parallel port provides one port at a fixed address * with 8 bidirectional data lines (D0 - D7) and 3 bidirectional status * lines (BUSY, POUT, SEL), 1 output control line /STROBE (raised automatically * in hardware when the data register is accessed), and 1 input control line * /ACK, able to cause an interrupt, but both not directly settable by * software. */ #include <linux/module.h> #include <linux/init.h> #include <linux/parport.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/setup.h> #include <asm/amigahw.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/amigaints.h> #undef DEBUG #ifdef DEBUG #define DPRINTK printk #else #define DPRINTK(x...) do { } while (0) #endif static void amiga_write_data(struct parport *p, unsigned char data) { DPRINTK(KERN_DEBUG "write_data %c\n",data); /* Triggers also /STROBE. This behavior cannot be changed */ ciaa.prb = data; mb(); } static unsigned char amiga_read_data(struct parport *p) { /* Triggers also /STROBE. This behavior cannot be changed */ return ciaa.prb; } #if 0 static unsigned char control_pc_to_amiga(unsigned char control) { unsigned char ret = 0; if (control & PARPORT_CONTROL_SELECT) /* XXX: What is SELECP? */ ; if (control & PARPORT_CONTROL_INIT) /* INITP */ /* reset connected to cpu reset pin */; if (control & PARPORT_CONTROL_AUTOFD) /* AUTOLF */ /* Not connected */; if (control & PARPORT_CONTROL_STROBE) /* Strobe */ /* Handled only directly by hardware */; return ret; } #endif static unsigned char control_amiga_to_pc(unsigned char control) { return PARPORT_CONTROL_SELECT | PARPORT_CONTROL_AUTOFD | PARPORT_CONTROL_STROBE; /* fake value: interrupt enable, select in, no reset, no autolf, no strobe - seems to be closest the wiring diagram */ } static void amiga_write_control(struct parport *p, unsigned char control) { DPRINTK(KERN_DEBUG "write_control %02x\n",control); /* No implementation possible */ } static unsigned char amiga_read_control( struct parport *p) { DPRINTK(KERN_DEBUG "read_control \n"); return control_amiga_to_pc(0); } static unsigned char amiga_frob_control( struct parport *p, unsigned char mask, unsigned char val) { unsigned char old; DPRINTK(KERN_DEBUG "frob_control mask %02x, value %02x\n",mask,val); old = amiga_read_control(p); amiga_write_control(p, (old & ~mask) ^ val); return old; } #if 0 /* currently unused */ static unsigned char status_pc_to_amiga(unsigned char status) { unsigned char ret = 1; if (status & PARPORT_STATUS_BUSY) /* Busy */ ret &= ~1; if (status & PARPORT_STATUS_ACK) /* Ack */ /* handled in hardware */; if (status & PARPORT_STATUS_PAPEROUT) /* PaperOut */ ret |= 2; if (status & PARPORT_STATUS_SELECT) /* select */ ret |= 4; if (status & PARPORT_STATUS_ERROR) /* error */ /* not connected */; return ret; } #endif static unsigned char status_amiga_to_pc(unsigned char status) { unsigned char ret = PARPORT_STATUS_BUSY | PARPORT_STATUS_ACK | PARPORT_STATUS_ERROR; if (status & 1) /* Busy */ ret &= ~PARPORT_STATUS_BUSY; if (status & 2) /* PaperOut */ ret |= PARPORT_STATUS_PAPEROUT; if (status & 4) /* Selected */ ret |= PARPORT_STATUS_SELECT; /* the rest is not connected or handled autonomously in hardware */ return ret; } static unsigned char amiga_read_status(struct parport *p) { unsigned char status; status = status_amiga_to_pc(ciab.pra & 7); DPRINTK(KERN_DEBUG "read_status %02x\n", status); return status; } static void amiga_enable_irq(struct parport *p) { enable_irq(IRQ_AMIGA_CIAA_FLG); } static void amiga_disable_irq(struct parport *p) { disable_irq(IRQ_AMIGA_CIAA_FLG); } static void amiga_data_forward(struct parport *p) { DPRINTK(KERN_DEBUG "forward\n"); ciaa.ddrb = 0xff; /* all pins output */ mb(); } static void amiga_data_reverse(struct parport *p) { DPRINTK(KERN_DEBUG "reverse\n"); ciaa.ddrb = 0; /* all pins input */ mb(); } static void amiga_init_state(struct pardevice *dev, struct parport_state *s) { s->u.amiga.data = 0; s->u.amiga.datadir = 255; s->u.amiga.status = 0; s->u.amiga.statusdir = 0; } static void amiga_save_state(struct parport *p, struct parport_state *s) { mb(); s->u.amiga.data = ciaa.prb; s->u.amiga.datadir = ciaa.ddrb; s->u.amiga.status = ciab.pra & 7; s->u.amiga.statusdir = ciab.ddra & 7; mb(); } static void amiga_restore_state(struct parport *p, struct parport_state *s) { mb(); ciaa.prb = s->u.amiga.data; ciaa.ddrb = s->u.amiga.datadir; ciab.pra |= (ciab.pra & 0xf8) | s->u.amiga.status; ciab.ddra |= (ciab.ddra & 0xf8) | s->u.amiga.statusdir; mb(); } static struct parport_operations pp_amiga_ops = { .write_data = amiga_write_data, .read_data = amiga_read_data, .write_control = amiga_write_control, .read_control = amiga_read_control, .frob_control = amiga_frob_control, .read_status = amiga_read_status, .enable_irq = amiga_enable_irq, .disable_irq = amiga_disable_irq, .data_forward = amiga_data_forward, .data_reverse = amiga_data_reverse, .init_state = amiga_init_state, .save_state = amiga_save_state, .restore_state = amiga_restore_state, .epp_write_data = parport_ieee1284_epp_write_data, .epp_read_data = parport_ieee1284_epp_read_data, .epp_write_addr = parport_ieee1284_epp_write_addr, .epp_read_addr = parport_ieee1284_epp_read_addr, .ecp_write_data = parport_ieee1284_ecp_write_data, .ecp_read_data = parport_ieee1284_ecp_read_data, .ecp_write_addr = parport_ieee1284_ecp_write_addr, .compat_write_data = parport_ieee1284_write_compat, .nibble_read_data = parport_ieee1284_read_nibble, .byte_read_data = parport_ieee1284_read_byte, .owner = THIS_MODULE, }; /* ----------- Initialisation code --------------------------------- */ static int __init amiga_parallel_probe(struct platform_device *pdev) { struct parport *p; int err; ciaa.ddrb = 0xff; ciab.ddra &= 0xf8; mb(); p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG, PARPORT_DMA_NONE, &pp_amiga_ops); if (!p) return -EBUSY; err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name, p); if (err) goto out_irq; printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name); /* XXX: set operating mode */ parport_announce_port(p); platform_set_drvdata(pdev, p); return 0; out_irq: parport_put_port(p); return err; } static int __exit amiga_parallel_remove(struct platform_device *pdev) { struct parport *port = platform_get_drvdata(pdev); parport_remove_port(port); if (port->irq != PARPORT_IRQ_NONE) free_irq(IRQ_AMIGA_CIAA_FLG, port); parport_put_port(port); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver amiga_parallel_driver = { .remove = __exit_p(amiga_parallel_remove), .driver = { .name = "amiga-parallel", .owner = THIS_MODULE, }, }; static int __init amiga_parallel_init(void) { return platform_driver_probe(&amiga_parallel_driver, amiga_parallel_probe); } module_init(amiga_parallel_init); static void __exit amiga_parallel_exit(void) { platform_driver_unregister(&amiga_parallel_driver); } module_exit(amiga_parallel_exit); MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port"); MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:amiga-parallel");
gpl-2.0
CandyDevices/kernel_htc_msm8974
arch/sh/mm/cache-sh7705.c
12065
4964
/* * arch/sh/mm/cache-sh7705.c * * Copyright (C) 1999, 2000 Niibe Yutaka * Copyright (C) 2004 Alex Song * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * */ #include <linux/init.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/threads.h> #include <asm/addrspace.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/cache.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> /* * The 32KB cache on the SH7705 suffers from the same synonym problem * as SH4 CPUs */ static inline void cache_wback_all(void) { unsigned long ways, waysize, addrstart; ways = current_cpu_data.dcache.ways; waysize = current_cpu_data.dcache.sets; waysize <<= current_cpu_data.dcache.entry_shift; addrstart = CACHE_OC_ADDRESS_ARRAY; do { unsigned long addr; for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) { unsigned long data; int v = SH_CACHE_UPDATED | SH_CACHE_VALID; data = __raw_readl(addr); if ((data & v) == v) __raw_writel(data & ~v, addr); } addrstart += current_cpu_data.dcache.way_incr; } while (--ways); } /* * Write back the range of D-cache, and purge the I-cache. * * Called from kernel/module.c:sys_init_module and routine for a.out format. */ static void sh7705_flush_icache_range(void *args) { struct flusher_data *data = args; unsigned long start, end; start = data->addr1; end = data->addr2; __flush_wback_region((void *)start, end - start); } /* * Writeback&Invalidate the D-cache of the page */ static void __flush_dcache_page(unsigned long phys) { unsigned long ways, waysize, addrstart; unsigned long flags; phys |= SH_CACHE_VALID; /* * Here, phys is the physical address of the page. We check all the * tags in the cache for those with the same page number as this page * (by masking off the lowest 2 bits of the 19-bit tag; these bits are * derived from the offset within in the 4k page). Matching valid * entries are invalidated. * * Since 2 bits of the cache index are derived from the virtual page * number, knowing this would reduce the number of cache entries to be * searched by a factor of 4. However this function exists to deal with * potential cache aliasing, therefore the optimisation is probably not * possible. */ local_irq_save(flags); jump_to_uncached(); ways = current_cpu_data.dcache.ways; waysize = current_cpu_data.dcache.sets; waysize <<= current_cpu_data.dcache.entry_shift; addrstart = CACHE_OC_ADDRESS_ARRAY; do { unsigned long addr; for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) { unsigned long data; data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID); if (data == phys) { data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); __raw_writel(data, addr); } } addrstart += current_cpu_data.dcache.way_incr; } while (--ways); back_to_cached(); local_irq_restore(flags); } /* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ static void sh7705_flush_dcache_page(void *arg) { struct page *page = arg; struct address_space *mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) clear_bit(PG_dcache_clean, &page->flags); else __flush_dcache_page(__pa(page_address(page))); } static void sh7705_flush_cache_all(void *args) { unsigned long flags; local_irq_save(flags); jump_to_uncached(); cache_wback_all(); back_to_cached(); local_irq_restore(flags); } /* * Write back and invalidate I/D-caches for the page. * * ADDRESS: Virtual Address (U0 address) */ static void sh7705_flush_cache_page(void *args) { struct flusher_data *data = args; unsigned long pfn = data->addr2; __flush_dcache_page(pfn << PAGE_SHIFT); } /* * This is called when a page-cache page is about to be mapped into a * user process' address space. It offers an opportunity for a * port to ensure d-cache/i-cache coherency if necessary. * * Not entirely sure why this is necessary on SH3 with 32K cache but * without it we get occasional "Memory fault" when loading a program. */ static void sh7705_flush_icache_page(void *page) { __flush_purge_region(page_address(page), PAGE_SIZE); } void __init sh7705_cache_init(void) { local_flush_icache_range = sh7705_flush_icache_range; local_flush_dcache_page = sh7705_flush_dcache_page; local_flush_cache_all = sh7705_flush_cache_all; local_flush_cache_mm = sh7705_flush_cache_all; local_flush_cache_dup_mm = sh7705_flush_cache_all; local_flush_cache_range = sh7705_flush_cache_all; local_flush_cache_page = sh7705_flush_cache_page; local_flush_icache_page = sh7705_flush_icache_page; }
gpl-2.0
mptcp-nexus/android_external_iproute2
tc/q_drr.c
34
2586
/* * q_drr.c DRR. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Patrick McHardy <kaber@trash.net> * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <syslog.h> #include <fcntl.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <string.h> #include "utils.h" #include "tc_util.h" static void explain(void) { fprintf(stderr, "Usage: ... drr\n"); } static void explain2(void) { fprintf(stderr, "Usage: ... drr quantum SIZE\n"); } static int drr_parse_opt(struct qdisc_util *qu, int argc, char **argv, struct nlmsghdr *n) { while (argc > 0) { if (strcmp(*argv, "help") == 0) { explain(); return -1; } else { fprintf(stderr, "What is \"%s\"?\n", *argv); explain(); return -1; } argc--; argv++; } return 0; } static int drr_parse_class_opt(struct qdisc_util *qu, int argc, char **argv, struct nlmsghdr *n) { struct rtattr *tail; __u32 tmp; tail = NLMSG_TAIL(n); addattr_l(n, 1024, TCA_OPTIONS, NULL, 0); while (argc > 0) { if (strcmp(*argv, "quantum") == 0) { NEXT_ARG(); if (get_size(&tmp, *argv)) { fprintf(stderr, "Illegal \"quantum\"\n"); return -1; } addattr_l(n, 1024, TCA_DRR_QUANTUM, &tmp, sizeof(tmp)); } else if (strcmp(*argv, "help") == 0) { explain2(); return -1; } else { fprintf(stderr, "What is \"%s\"?\n", *argv); explain2(); return -1; } argc--; argv++; } tail->rta_len = (void *) NLMSG_TAIL(n) - (void *)tail; return 0; } static int drr_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt) { struct rtattr *tb[TCA_DRR_MAX + 1]; SPRINT_BUF(b1); if (opt == NULL) return 0; parse_rtattr_nested(tb, TCA_DRR_MAX, opt); if (tb[TCA_DRR_QUANTUM]) fprintf(f, "quantum %s ", sprint_size(*(__u32 *)RTA_DATA(tb[TCA_DRR_QUANTUM]), b1)); return 0; } static int drr_print_xstats(struct qdisc_util *qu, FILE *f, struct rtattr *xstats) { struct tc_drr_stats *x; SPRINT_BUF(b1); if (xstats == NULL) return 0; if (RTA_PAYLOAD(xstats) < sizeof(*x)) return -1; x = RTA_DATA(xstats); fprintf(f, " deficit %s ", sprint_size(x->deficit, b1)); return 0; } struct qdisc_util drr_qdisc_util = { .id = "drr", .parse_qopt = drr_parse_opt, .print_qopt = drr_print_opt, .print_xstats = drr_print_xstats, .parse_copt = drr_parse_class_opt, .print_copt = drr_print_opt, };
gpl-2.0
Blackburn29/PsycoKernel
arch/mips/math-emu/dp_mul.c
34
4467
/* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" ieee754dp ieee754dp_mul(ieee754dp x, ieee754dp y) { COMPXDP; COMPYDP; EXPLODEXDP; EXPLODEYDP; CLEARCX; FLUSHXDP; FLUSHYDP; switch (CLPAIR(xc, yc)) { case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_nanxcpt(ieee754dp_indef(), "mul", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return y; case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): return x; case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_xcpt(ieee754dp_indef(), "mul", x, y); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): return ieee754dp_inf(xs ^ ys); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return ieee754dp_zero(xs ^ ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): DPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): DPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): break; } assert(xm & DP_HIDDEN_BIT); assert(ym & DP_HIDDEN_BIT); { int re = xe + ye; int rs = xs ^ ys; u64 rm; xm <<= 64 - (DP_MBITS + 1); ym <<= 64 - (DP_MBITS + 1); #define DPXMULT(x, y) ((u64)(x) * (u64)y) { unsigned lxm = xm; unsigned hxm = xm >> 32; unsigned lym = ym; unsigned hym = ym >> 32; u64 lrm; u64 hrm; lrm = DPXMULT(lxm, lym); hrm = DPXMULT(hxm, hym); { u64 t = DPXMULT(lxm, hym); { u64 at = lrm + (t << 32); hrm += at < lrm; lrm = at; } hrm = hrm + (t >> 32); } { u64 t = DPXMULT(hxm, lym); { u64 at = lrm + (t << 32); hrm += at < lrm; lrm = at; } hrm = hrm + (t >> 32); } rm = hrm | (lrm != 0); } if ((s64) rm < 0) { rm = (rm >> (64 - (DP_MBITS + 1 + 3))) | ((rm << (DP_MBITS + 1 + 3)) != 0); re++; } else { rm = (rm >> (64 - (DP_MBITS + 1 + 3 + 1))) | ((rm << (DP_MBITS + 1 + 3 + 1)) != 0); } assert(rm & (DP_HIDDEN_BIT << 3)); DPNORMRET2(rs, re, rm, "mul", x, y); } }
gpl-2.0
jameshilliard/m8whl-3.4.0-g278eae8
drivers/net/wireless/b43/phy_n.c
34
138439
/* Broadcom B43 wireless driver IEEE 802.11n PHY support Copyright (c) 2008 Michael Buesch <m@bues.ch> Copyright (c) 2010-2011 Rafał Miłecki <zajec5@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/delay.h> #include <linux/slab.h> #include <linux/types.h> #include "b43.h" #include "phy_n.h" #include "tables_nphy.h" #include "radio_2055.h" #include "radio_2056.h" #include "main.h" struct nphy_txgains { u16 txgm[2]; u16 pga[2]; u16 pad[2]; u16 ipa[2]; }; struct nphy_iqcal_params { u16 txgm; u16 pga; u16 pad; u16 ipa; u16 cal_gain; u16 ncorr[5]; }; struct nphy_iq_est { s32 iq0_prod; u32 i0_pwr; u32 q0_pwr; s32 iq1_prod; u32 i1_pwr; u32 q1_pwr; }; enum b43_nphy_rf_sequence { B43_RFSEQ_RX2TX, B43_RFSEQ_TX2RX, B43_RFSEQ_RESET2RX, B43_RFSEQ_UPDATE_GAINH, B43_RFSEQ_UPDATE_GAINL, B43_RFSEQ_UPDATE_GAINU, }; enum b43_nphy_rssi_type { B43_NPHY_RSSI_X = 0, B43_NPHY_RSSI_Y, B43_NPHY_RSSI_Z, B43_NPHY_RSSI_PWRDET, B43_NPHY_RSSI_TSSI_I, B43_NPHY_RSSI_TSSI_Q, B43_NPHY_RSSI_TBD, }; static inline bool b43_nphy_ipa(struct b43_wldev *dev) { enum ieee80211_band band = b43_current_band(dev->wl); return ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) || (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)); } static u8 b43_nphy_get_rx_core_state(struct b43_wldev *dev) { return (b43_phy_read(dev, B43_NPHY_RFSEQCA) & B43_NPHY_RFSEQCA_RXEN) >> B43_NPHY_RFSEQCA_RXEN_SHIFT; } static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, enum b43_nphy_rf_sequence seq) { static const u16 trigger[] = { [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX, [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX, [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX, [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH, [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL, [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU, }; int i; u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); B43_WARN_ON(seq >= ARRAY_SIZE(trigger)); b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER); b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]); for (i = 0; i < 200; i++) { if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq])) goto ok; msleep(1); } b43err(dev->wl, "RF sequence status timeout\n"); ok: b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); } static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, u16 value, u8 core, bool off) { int i; u8 index = fls(field); u8 addr, en_addr, val_addr; B43_WARN_ON(field & (~(1 << (index - 1)))); if (dev->phy.rev >= 3) { const struct nphy_rf_control_override_rev3 *rf_ctrl; for (i = 0; i < 2; i++) { if (index == 0 || index == 16) { b43err(dev->wl, "Unsupported RF Ctrl Override call\n"); return; } rf_ctrl = &tbl_rf_control_override_rev3[index - 1]; en_addr = B43_PHY_N((i == 0) ? rf_ctrl->en_addr0 : rf_ctrl->en_addr1); val_addr = B43_PHY_N((i == 0) ? rf_ctrl->val_addr0 : rf_ctrl->val_addr1); if (off) { b43_phy_mask(dev, en_addr, ~(field)); b43_phy_mask(dev, val_addr, ~(rf_ctrl->val_mask)); } else { if (core == 0 || ((1 << i) & core)) { b43_phy_set(dev, en_addr, field); b43_phy_maskset(dev, val_addr, ~(rf_ctrl->val_mask), (value << rf_ctrl->val_shift)); } } } } else { const struct nphy_rf_control_override_rev2 *rf_ctrl; if (off) { b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field)); value = 0; } else { b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field); } for (i = 0; i < 2; i++) { if (index <= 1 || index == 16) { b43err(dev->wl, "Unsupported RF Ctrl Override call\n"); return; } if (index == 2 || index == 10 || (index >= 13 && index <= 15)) { core = 1; } rf_ctrl = &tbl_rf_control_override_rev2[index - 2]; addr = B43_PHY_N((i == 0) ? rf_ctrl->addr0 : rf_ctrl->addr1); if ((1 << i) & core) b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask), (value << rf_ctrl->shift)); b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_START); udelay(1); b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE); } } } static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, u16 value, u8 core) { u8 i, j; u16 reg, tmp, val; B43_WARN_ON(dev->phy.rev < 3); B43_WARN_ON(field > 4); for (i = 0; i < 2; i++) { if ((core == 1 && i == 1) || (core == 2 && !i)) continue; reg = (i == 0) ? B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2; b43_phy_set(dev, reg, 0x400); switch (field) { case 0: b43_phy_write(dev, reg, 0); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); break; case 1: if (!i) { b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1, 0xFC3F, (value << 6)); b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1, 0xFFFE, 1); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_START); for (j = 0; j < 100; j++) { if (!(b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START)) { j = 0; break; } udelay(10); } if (j) b43err(dev->wl, "intc override timeout\n"); b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, 0xFFFE); } else { b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2, 0xFC3F, (value << 6)); b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 1); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX); for (j = 0; j < 100; j++) { if (!(b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX)) { j = 0; break; } udelay(10); } if (j) b43err(dev->wl, "intc override timeout\n"); b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE); } break; case 2: if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { tmp = 0x0020; val = value << 5; } else { tmp = 0x0010; val = value << 4; } b43_phy_maskset(dev, reg, ~tmp, val); break; case 3: if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { tmp = 0x0001; val = value; } else { tmp = 0x0004; val = value << 2; } b43_phy_maskset(dev, reg, ~tmp, val); break; case 4: if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { tmp = 0x0002; val = value << 1; } else { tmp = 0x0008; val = value << 3; } b43_phy_maskset(dev, reg, ~tmp, val); break; } } } static void b43_nphy_write_clip_detection(struct b43_wldev *dev, const u16 *clip_st) { b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]); b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]); } static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st) { clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES); clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES); } static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) { u16 tmp; if (dev->dev->core_rev == 16) b43_mac_suspend(dev); tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL); tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN | B43_NPHY_CLASSCTL_WAITEDEN); tmp &= ~mask; tmp |= (val & mask); b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp); if (dev->dev->core_rev == 16) b43_mac_enable(dev); return tmp; } static void b43_nphy_reset_cca(struct b43_wldev *dev) { u16 bbcfg; b43_phy_force_clock(dev, 1); bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG); b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA); udelay(1); b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA); b43_phy_force_clock(dev, 0); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); } static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; if (enable) { static const u16 clip[] = { 0xFFFF, 0xFFFF }; if (nphy->deaf_count++ == 0) { nphy->classifier_state = b43_nphy_classifier(dev, 0, 0); b43_nphy_classifier(dev, 0x7, 0); b43_nphy_read_clip_detection(dev, nphy->clip_state); b43_nphy_write_clip_detection(dev, clip); } b43_nphy_reset_cca(dev); } else { if (--nphy->deaf_count == 0) { b43_nphy_classifier(dev, 0x7, nphy->classifier_state); b43_nphy_write_clip_detection(dev, nphy->clip_state); } } } static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u8 i; s16 tmp; u16 data[4]; s16 gain[2]; u16 minmax[2]; static const u16 lna_gain[4] = { -2, 10, 19, 25 }; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); if (nphy->gain_boost) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { gain[0] = 6; gain[1] = 6; } else { tmp = 40370 - 315 * dev->phy.channel; gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1)); tmp = 23242 - 224 * dev->phy.channel; gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1)); } } else { gain[0] = 0; gain[1] = 0; } for (i = 0; i < 2; i++) { if (nphy->elna_gain_config) { data[0] = 19 + gain[i]; data[1] = 25 + gain[i]; data[2] = 25 + gain[i]; data[3] = 25 + gain[i]; } else { data[0] = lna_gain[0] + gain[i]; data[1] = lna_gain[1] + gain[i]; data[2] = lna_gain[2] + gain[i]; data[3] = lna_gain[3] + gain[i]; } b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data); minmax[i] = 23 + gain[i]; } b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN, minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT); b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN, minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, u8 *events, u8 *delays, u8 length) { struct b43_phy_n *nphy = dev->phy.n; u8 i; u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F; u16 offset1 = cmd << 4; u16 offset2 = offset1 + 0x80; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events); b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays); for (i = length; i < 16; i++) { b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end); b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1); } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); } static void b43_chantab_radio_2056_upload(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev3 *e) { b43_radio_write(dev, B2056_SYN_PLL_VCOCAL1, e->radio_syn_pll_vcocal1); b43_radio_write(dev, B2056_SYN_PLL_VCOCAL2, e->radio_syn_pll_vcocal2); b43_radio_write(dev, B2056_SYN_PLL_REFDIV, e->radio_syn_pll_refdiv); b43_radio_write(dev, B2056_SYN_PLL_MMD2, e->radio_syn_pll_mmd2); b43_radio_write(dev, B2056_SYN_PLL_MMD1, e->radio_syn_pll_mmd1); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, e->radio_syn_pll_loopfilter1); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, e->radio_syn_pll_loopfilter2); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER3, e->radio_syn_pll_loopfilter3); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, e->radio_syn_pll_loopfilter4); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER5, e->radio_syn_pll_loopfilter5); b43_radio_write(dev, B2056_SYN_RESERVED_ADDR27, e->radio_syn_reserved_addr27); b43_radio_write(dev, B2056_SYN_RESERVED_ADDR28, e->radio_syn_reserved_addr28); b43_radio_write(dev, B2056_SYN_RESERVED_ADDR29, e->radio_syn_reserved_addr29); b43_radio_write(dev, B2056_SYN_LOGEN_VCOBUF1, e->radio_syn_logen_vcobuf1); b43_radio_write(dev, B2056_SYN_LOGEN_MIXER2, e->radio_syn_logen_mixer2); b43_radio_write(dev, B2056_SYN_LOGEN_BUF3, e->radio_syn_logen_buf3); b43_radio_write(dev, B2056_SYN_LOGEN_BUF4, e->radio_syn_logen_buf4); b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA_TUNE, e->radio_rx0_lnaa_tune); b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG_TUNE, e->radio_rx0_lnag_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAA_BOOST_TUNE, e->radio_tx0_intpaa_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAG_BOOST_TUNE, e->radio_tx0_intpag_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_PADA_BOOST_TUNE, e->radio_tx0_pada_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_PADG_BOOST_TUNE, e->radio_tx0_padg_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAA_BOOST_TUNE, e->radio_tx0_pgaa_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAG_BOOST_TUNE, e->radio_tx0_pgag_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXA_BOOST_TUNE, e->radio_tx0_mixa_boost_tune); b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXG_BOOST_TUNE, e->radio_tx0_mixg_boost_tune); b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA_TUNE, e->radio_rx1_lnaa_tune); b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG_TUNE, e->radio_rx1_lnag_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAA_BOOST_TUNE, e->radio_tx1_intpaa_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAG_BOOST_TUNE, e->radio_tx1_intpag_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_PADA_BOOST_TUNE, e->radio_tx1_pada_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_PADG_BOOST_TUNE, e->radio_tx1_padg_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAA_BOOST_TUNE, e->radio_tx1_pgaa_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAG_BOOST_TUNE, e->radio_tx1_pgag_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXA_BOOST_TUNE, e->radio_tx1_mixa_boost_tune); b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXG_BOOST_TUNE, e->radio_tx1_mixg_boost_tune); } static void b43_radio_2056_setup(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev3 *e) { struct ssb_sprom *sprom = dev->dev->bus_sprom; enum ieee80211_band band = b43_current_band(dev->wl); u16 offset; u8 i; u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost; B43_WARN_ON(dev->phy.rev < 3); b43_chantab_radio_2056_upload(dev, e); b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ); if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); if (dev->dev->chip_id == 0x4716) { b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14); b43_radio_write(dev, B2056_SYN_PLL_CP2, 0); } else { b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B); b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14); } } if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR && b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05); b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C); } if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) { for (i = 0; i < 2; i++) { offset = i ? B2056_TX1 : B2056_TX0; if (dev->phy.rev >= 5) { b43_radio_write(dev, offset | B2056_TX_PADG_IDAC, 0xcc); if (dev->dev->chip_id == 0x4716) { bias = 0x40; cbias = 0x45; pag_boost = 0x5; pgag_boost = 0x33; mixg_boost = 0x55; } else { bias = 0x25; cbias = 0x20; pag_boost = 0x4; pgag_boost = 0x03; mixg_boost = 0x65; } padg_boost = 0x77; b43_radio_write(dev, offset | B2056_TX_INTPAG_IMAIN_STAT, bias); b43_radio_write(dev, offset | B2056_TX_INTPAG_IAUX_STAT, bias); b43_radio_write(dev, offset | B2056_TX_INTPAG_CASCBIAS, cbias); b43_radio_write(dev, offset | B2056_TX_INTPAG_BOOST_TUNE, pag_boost); b43_radio_write(dev, offset | B2056_TX_PGAG_BOOST_TUNE, pgag_boost); b43_radio_write(dev, offset | B2056_TX_PADG_BOOST_TUNE, padg_boost); b43_radio_write(dev, offset | B2056_TX_MIXG_BOOST_TUNE, mixg_boost); } else { bias = dev->phy.is_40mhz ? 0x40 : 0x20; b43_radio_write(dev, offset | B2056_TX_INTPAG_IMAIN_STAT, bias); b43_radio_write(dev, offset | B2056_TX_INTPAG_IAUX_STAT, bias); b43_radio_write(dev, offset | B2056_TX_INTPAG_CASCBIAS, 0x30); } b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee); } } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) { } udelay(50); b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00); b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38); b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x18); b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38); b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x39); udelay(300); } static void b43_radio_init2056_pre(struct b43_wldev *dev) { b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU); b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_OEPORFORCE); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_OEPORFORCE); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU); } static void b43_radio_init2056_post(struct b43_wldev *dev) { b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB); b43_radio_set(dev, B2056_SYN_COM_PU, 0x2); b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2); msleep(1); b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2); b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC); b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1); } static void b43_radio_init2056(struct b43_wldev *dev) { b43_radio_init2056_pre(dev); b2056_upload_inittabs(dev, 0, 0); b43_radio_init2056_post(dev); } static void b43_chantab_radio_upload(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev2 *e) { b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref); b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0); b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1); b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail); b43_read32(dev, B43_MMIO_MACCTL); b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1); b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2); b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1); b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1); b43_read32(dev, B43_MMIO_MACCTL); b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2); b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf); b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1); b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2); b43_read32(dev, B43_MMIO_MACCTL); b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune); b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune); b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1); b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn); b43_read32(dev, B43_MMIO_MACCTL); b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim); b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune); b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune); b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1); b43_read32(dev, B43_MMIO_MACCTL); b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn); b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); } static void b43_radio_2055_setup(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev2 *e) { B43_WARN_ON(dev->phy.rev >= 3); b43_chantab_radio_upload(dev, e); udelay(50); b43_radio_write(dev, B2055_VCO_CAL10, 0x05); b43_radio_write(dev, B2055_VCO_CAL10, 0x45); b43_read32(dev, B43_MMIO_MACCTL); b43_radio_write(dev, B2055_VCO_CAL10, 0x65); udelay(300); } static void b43_radio_init2055_pre(struct b43_wldev *dev) { b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_PORFORCE); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU | B43_NPHY_RFCTL_CMD_OEPORFORCE); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_PORFORCE); } static void b43_radio_init2055_post(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; struct ssb_sprom *sprom = dev->dev->bus_sprom; int i; u16 val; bool workaround = false; if (sprom->revision < 4) workaround = (dev->dev->board_vendor != PCI_VENDOR_ID_BROADCOM && dev->dev->board_type == 0x46D && dev->dev->board_rev >= 0x41); else workaround = !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS); b43_radio_mask(dev, B2055_MASTER1, 0xFFF3); if (workaround) { b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F); b43_radio_mask(dev, B2055_C2_RX_BB_REG, 0x7F); } b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0xFFC0, 0x2C); b43_radio_write(dev, B2055_CAL_MISC, 0x3C); b43_radio_mask(dev, B2055_CAL_MISC, 0xFFBE); b43_radio_set(dev, B2055_CAL_LPOCTL, 0x80); b43_radio_set(dev, B2055_CAL_MISC, 0x1); msleep(1); b43_radio_set(dev, B2055_CAL_MISC, 0x40); for (i = 0; i < 200; i++) { val = b43_radio_read(dev, B2055_CAL_COUT2); if (val & 0x80) { i = 0; break; } udelay(10); } if (i) b43err(dev->wl, "radio post init timeout\n"); b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F); b43_switch_channel(dev, dev->phy.channel); b43_radio_write(dev, B2055_C1_RX_BB_LPF, 0x9); b43_radio_write(dev, B2055_C2_RX_BB_LPF, 0x9); b43_radio_write(dev, B2055_C1_RX_BB_MIDACHP, 0x83); b43_radio_write(dev, B2055_C2_RX_BB_MIDACHP, 0x83); b43_radio_maskset(dev, B2055_C1_LNA_GAINBST, 0xFFF8, 0x6); b43_radio_maskset(dev, B2055_C2_LNA_GAINBST, 0xFFF8, 0x6); if (!nphy->gain_boost) { b43_radio_set(dev, B2055_C1_RX_RFSPC1, 0x2); b43_radio_set(dev, B2055_C2_RX_RFSPC1, 0x2); } else { b43_radio_mask(dev, B2055_C1_RX_RFSPC1, 0xFFFD); b43_radio_mask(dev, B2055_C2_RX_RFSPC1, 0xFFFD); } udelay(2); } static void b43_radio_init2055(struct b43_wldev *dev) { b43_radio_init2055_pre(dev); if (b43_status(dev) < B43_STAT_INITIALIZED) { b2055_upload_inittab(dev, 0, 0); } else { bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ; b2055_upload_inittab(dev, ghz5, 0); } b43_radio_init2055_post(dev); } static int b43_nphy_load_samples(struct b43_wldev *dev, struct b43_c32 *samples, u16 len) { struct b43_phy_n *nphy = dev->phy.n; u16 i; u32 *data; data = kzalloc(len * sizeof(u32), GFP_KERNEL); if (!data) { b43err(dev->wl, "allocation for samples loading failed\n"); return -ENOMEM; } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); for (i = 0; i < len; i++) { data[i] = (samples[i].i & 0x3FF << 10); data[i] |= samples[i].q & 0x3FF; } b43_ntab_write_bulk(dev, B43_NTAB32(17, 0), len, data); kfree(data); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); return 0; } static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max, bool test) { int i; u16 bw, len, rot, angle; struct b43_c32 *samples; bw = (dev->phy.is_40mhz) ? 40 : 20; len = bw << 3; if (test) { if (b43_phy_read(dev, B43_NPHY_BBCFG) & B43_NPHY_BBCFG_RSTRX) bw = 82; else bw = 80; if (dev->phy.is_40mhz) bw <<= 1; len = bw << 1; } samples = kcalloc(len, sizeof(struct b43_c32), GFP_KERNEL); if (!samples) { b43err(dev->wl, "allocation for samples generation failed\n"); return 0; } rot = (((freq * 36) / bw) << 16) / 100; angle = 0; for (i = 0; i < len; i++) { samples[i] = b43_cordic(angle); angle += rot; samples[i].q = CORDIC_CONVERT(samples[i].q * max); samples[i].i = CORDIC_CONVERT(samples[i].i * max); } i = b43_nphy_load_samples(dev, samples, len); kfree(samples); return (i < 0) ? 0 : len; } static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops, u16 wait, bool iqmode, bool dac_test) { struct b43_phy_n *nphy = dev->phy.n; int i; u16 seq_mode; u32 tmp; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); if ((nphy->bb_mult_save & 0x80000000) == 0) { tmp = b43_ntab_read(dev, B43_NTAB16(15, 87)); nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000; } if (!dev->phy.is_40mhz) tmp = 0x6464; else tmp = 0x4747; b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1)); if (loops != 0xFFFF) b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, (loops - 1)); else b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, loops); b43_phy_write(dev, B43_NPHY_SAMP_WAITCNT, wait); seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER); if (iqmode) { b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); b43_phy_set(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8000); } else { if (dac_test) b43_phy_write(dev, B43_NPHY_SAMP_CMD, 5); else b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1); } for (i = 0; i < 100; i++) { if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & 1)) { i = 0; break; } udelay(10); } if (i) b43err(dev->wl, "run samples timeout\n"); b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); } static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale, s8 offset, u8 core, u8 rail, enum b43_nphy_rssi_type type) { u16 tmp; bool core1or5 = (core == 1) || (core == 5); bool core2or5 = (core == 2) || (core == 5); offset = clamp_val(offset, -32, 31); tmp = ((scale & 0x3F) << 8) | (offset & 0x3F); if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp); if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp); if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp); if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp); if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_X)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp); if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_X)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp); if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_X)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp); if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_X)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp); if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp); if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp); if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp); if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp); if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp); if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp); if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp); if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp); if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp); if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp); if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp); if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp); if (core1or5 && (type == B43_NPHY_RSSI_TSSI_I)) b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp); if (core2or5 && (type == B43_NPHY_RSSI_TSSI_I)) b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp); if (core1or5 && (type == B43_NPHY_RSSI_TSSI_Q)) b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp); if (core2or5 && (type == B43_NPHY_RSSI_TSSI_Q)) b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp); } static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type) { u8 i; u16 reg, val; if (code == 0) { b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, 0xFDFF); b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, 0xFDFF); b43_phy_mask(dev, B43_NPHY_AFECTL_C1, 0xFCFF); b43_phy_mask(dev, B43_NPHY_AFECTL_C2, 0xFCFF); b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S0, 0xFFDF); b43_phy_mask(dev, B43_NPHY_TXF_40CO_B32S1, 0xFFDF); b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0xFFC3); b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0xFFC3); } else { for (i = 0; i < 2; i++) { if ((code == 1 && i == 1) || (code == 2 && !i)) continue; reg = (i == 0) ? B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER; b43_phy_maskset(dev, reg, 0xFDFF, 0x0200); if (type < 3) { reg = (i == 0) ? B43_NPHY_AFECTL_C1 : B43_NPHY_AFECTL_C2; b43_phy_maskset(dev, reg, 0xFCFF, 0); reg = (i == 0) ? B43_NPHY_RFCTL_LUT_TRSW_UP1 : B43_NPHY_RFCTL_LUT_TRSW_UP2; b43_phy_maskset(dev, reg, 0xFFC3, 0); if (type == 0) val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8; else if (type == 1) val = 16; else val = 32; b43_phy_set(dev, reg, val); reg = (i == 0) ? B43_NPHY_TXF_40CO_B1S0 : B43_NPHY_TXF_40CO_B32S1; b43_phy_set(dev, reg, 0x0020); } else { if (type == 6) val = 0x0100; else if (type == 3) val = 0x0200; else val = 0x0300; reg = (i == 0) ? B43_NPHY_AFECTL_C1 : B43_NPHY_AFECTL_C2; b43_phy_maskset(dev, reg, 0xFCFF, val); b43_phy_maskset(dev, reg, 0xF3FF, val << 2); if (type != 3 && type != 6) { enum ieee80211_band band = b43_current_band(dev->wl); if (b43_nphy_ipa(dev)) val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE; else val = 0x11; reg = (i == 0) ? 0x2000 : 0x3000; reg |= B2055_PADDRV; b43_radio_write16(dev, reg, val); reg = (i == 0) ? B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER; b43_phy_set(dev, reg, 0x0200); } } } } } static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type) { u16 val; if (type < 3) val = 0; else if (type == 6) val = 1; else if (type == 3) val = 2; else val = 3; val = (val << 12) | (val << 14); b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val); b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val); if (type < 3) { b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF, (type + 1) << 4); b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF, (type + 1) << 4); } if (code == 0) { b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000); if (type < 3) { b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~(B43_NPHY_RFCTL_CMD_RXEN | B43_NPHY_RFCTL_CMD_CORESEL)); b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(0x1 << 12 | 0x1 << 5 | 0x1 << 1 | 0x1)); b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_START); udelay(20); b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); } } else { b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000); if (type < 3) { b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, ~(B43_NPHY_RFCTL_CMD_RXEN | B43_NPHY_RFCTL_CMD_CORESEL), (B43_NPHY_RFCTL_CMD_RXEN | code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT)); b43_phy_set(dev, B43_NPHY_RFCTL_OVER, (0x1 << 12 | 0x1 << 5 | 0x1 << 1 | 0x1)); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_START); udelay(20); b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1); } } } static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type) { if (dev->phy.rev >= 3) b43_nphy_rev3_rssi_select(dev, code, type); else b43_nphy_rev2_rssi_select(dev, code, type); } static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf) { int i; for (i = 0; i < 2; i++) { if (type == 2) { if (i == 0) { b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM, 0xFC, buf[0]); b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5, 0xFC, buf[1]); } else { b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM, 0xFC, buf[2 * i]); b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5, 0xFC, buf[2 * i + 1]); } } else { if (i == 0) b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5, 0xF3, buf[0] << 2); else b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5, 0xF3, buf[2 * i + 1] << 2); } } } static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf, u8 nsamp) { int i; int out; u16 save_regs_phy[9]; u16 s[2]; if (dev->phy.rev >= 3) { save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1); save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2); save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0); save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1); save_regs_phy[8] = 0; } else { save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_RFCTL_CMD); save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); save_regs_phy[7] = 0; save_regs_phy[8] = 0; } b43_nphy_rssi_select(dev, 5, type); if (dev->phy.rev < 2) { save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL); b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5); } for (i = 0; i < 4; i++) buf[i] = 0; for (i = 0; i < nsamp; i++) { if (dev->phy.rev < 2) { s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT); s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT); } else { s[0] = b43_phy_read(dev, B43_NPHY_RSSI1); s[1] = b43_phy_read(dev, B43_NPHY_RSSI2); } buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2; buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2; buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2; buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2; } out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 | (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF); if (dev->phy.rev < 2) b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]); if (dev->phy.rev >= 3) { b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]); b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, save_regs_phy[2]); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, save_regs_phy[3]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]); b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]); b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]); } else { b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]); b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]); b43_phy_write(dev, B43_NPHY_RFCTL_CMD, save_regs_phy[3]); b43_phy_write(dev, B43_NPHY_RFCTL_OVER, save_regs_phy[4]); b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, save_regs_phy[5]); b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, save_regs_phy[6]); } return out; } static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 saved_regs_phy_rfctl[2]; u16 saved_regs_phy[13]; u16 regs_to_store[] = { B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER, B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2, B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER, B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2, B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2 }; u16 class; u16 clip_state[2]; u16 clip_off[2] = { 0xFFFF, 0xFFFF }; u8 vcm_final = 0; s8 offset[4]; s32 results[8][4] = { }; s32 results_min[4] = { }; s32 poll_results[4] = { }; u16 *rssical_radio_regs = NULL; u16 *rssical_phy_regs = NULL; u16 r; u8 rx_core_state; u8 core, i, j; class = b43_nphy_classifier(dev, 0, 0); b43_nphy_classifier(dev, 7, 4); b43_nphy_read_clip_detection(dev, clip_state); b43_nphy_write_clip_detection(dev, clip_off); saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); for (i = 0; i < ARRAY_SIZE(regs_to_store); i++) saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]); b43_nphy_rf_control_intc_override(dev, 0, 0, 7); b43_nphy_rf_control_intc_override(dev, 1, 1, 7); b43_nphy_rf_control_override(dev, 0x1, 0, 0, false); b43_nphy_rf_control_override(dev, 0x2, 1, 0, false); b43_nphy_rf_control_override(dev, 0x80, 1, 0, false); b43_nphy_rf_control_override(dev, 0x40, 1, 0, false); if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { b43_nphy_rf_control_override(dev, 0x20, 0, 0, false); b43_nphy_rf_control_override(dev, 0x10, 1, 0, false); } else { b43_nphy_rf_control_override(dev, 0x10, 0, 0, false); b43_nphy_rf_control_override(dev, 0x20, 1, 0, false); } rx_core_state = b43_nphy_get_rx_core_state(dev); for (core = 0; core < 2; core++) { if (!(rx_core_state & (1 << core))) continue; r = core ? B2056_RX1 : B2056_RX0; b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 0, 2); b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, 2); for (i = 0; i < 8; i++) { b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3, i << 2); b43_nphy_poll_rssi(dev, 2, results[i], 8); } for (i = 0; i < 4; i++) { s32 curr; s32 mind = 40; s32 minpoll = 249; u8 minvcm = 0; if (2 * core != i) continue; for (j = 0; j < 8; j++) { curr = results[j][i] * results[j][i] + results[j][i + 1] * results[j][i]; if (curr < mind) { mind = curr; minvcm = j; } if (results[j][i] < minpoll) minpoll = results[j][i]; } vcm_final = minvcm; results_min[i] = minpoll; } b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3, vcm_final << 2); for (i = 0; i < 4; i++) { if (core != i / 2) continue; offset[i] = -results[vcm_final][i]; if (offset[i] < 0) offset[i] = -((abs(offset[i]) + 4) / 8); else offset[i] = (offset[i] + 4) / 8; if (results_min[i] == 248) offset[i] = -32; b43_nphy_scale_offset_rssi(dev, 0, offset[i], (i / 2 == 0) ? 1 : 2, (i % 2 == 0) ? 0 : 1, 2); } } for (core = 0; core < 2; core++) { if (!(rx_core_state & (1 << core))) continue; for (i = 0; i < 2; i++) { b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 0, i); b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, i); b43_nphy_poll_rssi(dev, i, poll_results, 8); for (j = 0; j < 4; j++) { if (j / 2 == core) offset[j] = 232 - poll_results[j]; if (offset[j] < 0) offset[j] = -(abs(offset[j] + 4) / 8); else offset[j] = (offset[j] + 4) / 8; b43_nphy_scale_offset_rssi(dev, 0, offset[2 * core], core + 1, j % 2, i); } } } b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, saved_regs_phy_rfctl[0]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, saved_regs_phy_rfctl[1]); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); b43_phy_set(dev, B43_NPHY_TXF_40CO_B1S1, 0x1); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_START); b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1); b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1); b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX); b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1); for (i = 0; i < ARRAY_SIZE(regs_to_store); i++) b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G; } else { rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G; rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G; } rssical_radio_regs[0] = b43_radio_read(dev, 0x602B); rssical_radio_regs[0] = b43_radio_read(dev, 0x702B); rssical_phy_regs[0] = b43_phy_read(dev, B43_NPHY_RSSIMC_0I_RSSI_Z); rssical_phy_regs[1] = b43_phy_read(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z); rssical_phy_regs[2] = b43_phy_read(dev, B43_NPHY_RSSIMC_1I_RSSI_Z); rssical_phy_regs[3] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z); rssical_phy_regs[4] = b43_phy_read(dev, B43_NPHY_RSSIMC_0I_RSSI_X); rssical_phy_regs[5] = b43_phy_read(dev, B43_NPHY_RSSIMC_0Q_RSSI_X); rssical_phy_regs[6] = b43_phy_read(dev, B43_NPHY_RSSIMC_1I_RSSI_X); rssical_phy_regs[7] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_X); rssical_phy_regs[8] = b43_phy_read(dev, B43_NPHY_RSSIMC_0I_RSSI_Y); rssical_phy_regs[9] = b43_phy_read(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y); rssical_phy_regs[10] = b43_phy_read(dev, B43_NPHY_RSSIMC_1I_RSSI_Y); rssical_phy_regs[11] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) nphy->rssical_chanspec_2G.center_freq = dev->phy.channel_freq; else nphy->rssical_chanspec_5G.center_freq = dev->phy.channel_freq; b43_nphy_classifier(dev, 7, class); b43_nphy_write_clip_detection(dev, clip_state); } static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) { int i, j; u8 state[4]; u8 code, val; u16 class, override; u8 regs_save_radio[2]; u16 regs_save_phy[2]; s8 offset[4]; u8 core; u8 rail; u16 clip_state[2]; u16 clip_off[2] = { 0xFFFF, 0xFFFF }; s32 results_min[4] = { }; u8 vcm_final[4] = { }; s32 results[4][4] = { }; s32 miniq[4][2] = { }; if (type == 2) { code = 0; val = 6; } else if (type < 2) { code = 25; val = 4; } else { B43_WARN_ON(1); return; } class = b43_nphy_classifier(dev, 0, 0); b43_nphy_classifier(dev, 7, 4); b43_nphy_read_clip_detection(dev, clip_state); b43_nphy_write_clip_detection(dev, clip_off); if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) override = 0x140; else override = 0x110; regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX); b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override); b43_radio_write16(dev, B2055_C1_PD_RXTX, val); regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override); b43_radio_write16(dev, B2055_C2_PD_RXTX, val); state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07; state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07; b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8); b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8); state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07; state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07; b43_nphy_rssi_select(dev, 5, type); b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type); b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type); for (i = 0; i < 4; i++) { u8 tmp[4]; for (j = 0; j < 4; j++) tmp[j] = i; if (type != 1) b43_nphy_set_rssi_2055_vcm(dev, type, tmp); b43_nphy_poll_rssi(dev, type, results[i], 8); if (type < 2) for (j = 0; j < 2; j++) miniq[i][j] = min(results[i][2 * j], results[i][2 * j + 1]); } for (i = 0; i < 4; i++) { s32 mind = 40; u8 minvcm = 0; s32 minpoll = 249; s32 curr; for (j = 0; j < 4; j++) { if (type == 2) curr = abs(results[j][i]); else curr = abs(miniq[j][i / 2] - code * 8); if (curr < mind) { mind = curr; minvcm = j; } if (results[j][i] < minpoll) minpoll = results[j][i]; } results_min[i] = minpoll; vcm_final[i] = minvcm; } if (type != 1) b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final); for (i = 0; i < 4; i++) { offset[i] = (code * 8) - results[vcm_final[i]][i]; if (offset[i] < 0) offset[i] = -((abs(offset[i]) + 4) / 8); else offset[i] = (offset[i] + 4) / 8; if (results_min[i] == 248) offset[i] = code - 32; core = (i / 2) ? 2 : 1; rail = (i % 2) ? 1 : 0; b43_nphy_scale_offset_rssi(dev, 0, offset[i], core, rail, type); } b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]); b43_radio_maskset(dev, B2055_C2_PD_RSSIMISC, 0xF8, state[1]); switch (state[2]) { case 1: b43_nphy_rssi_select(dev, 1, 2); break; case 4: b43_nphy_rssi_select(dev, 1, 0); break; case 2: b43_nphy_rssi_select(dev, 1, 1); break; default: b43_nphy_rssi_select(dev, 1, 1); break; } switch (state[3]) { case 1: b43_nphy_rssi_select(dev, 2, 2); break; case 4: b43_nphy_rssi_select(dev, 2, 0); break; default: b43_nphy_rssi_select(dev, 2, 1); break; } b43_nphy_rssi_select(dev, 0, type); b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]); b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]); b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]); b43_nphy_classifier(dev, 7, class); b43_nphy_write_clip_detection(dev, clip_state); b43_nphy_reset_cca(dev); } static void b43_nphy_rssi_cal(struct b43_wldev *dev) { if (dev->phy.rev >= 3) { b43_nphy_rev3_rssi_cal(dev); } else { b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Z); b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_X); b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Y); } } static void b43_nphy_gain_ctl_workarounds_rev3plus(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; bool ghz5; bool ext_lna; u16 rssi_gain; struct nphy_gain_ctl_workaround_entry *e; u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 }; u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 }; ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ; ext_lna = ghz5 ? sprom->boardflags_hi & B43_BFH_EXTLNA_5GHZ : sprom->boardflags_lo & B43_BFL_EXTLNA; e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna); if (ghz5 && dev->phy.rev >= 5) rssi_gain = 0x90; else rssi_gain = 0x50; b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040); b43_phy_set(dev, B43_NPHY_C1_CGAINI, B43_NPHY_C1_CGAINI_CL2DETECT); b43_phy_set(dev, B43_NPHY_C2_CGAINI, B43_NPHY_C2_CGAINI_CL2DETECT); b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC, 0x17); b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC, 0x17); b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0); b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0); b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00); b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00); b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN, rssi_gain); b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN, rssi_gain); b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC, 0x17); b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC, 0x17); b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF); b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF); b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain); b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain); b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain); b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain); b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db); b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db); b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits); b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits); b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain); b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain); b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits); b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits); b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); b43_phy_write(dev, 0x2A7, e->init_gain); b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2, e->rfseq_init); b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain); b43_phy_write(dev, 0x2A9, e->cliphi_gain); b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain); b43_phy_write(dev, 0x2AB, e->clipmd_gain); b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain); b43_phy_write(dev, 0x2AD, e->cliplo_gain); b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin); b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl); b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu); b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip); b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip); b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip); b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip); b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); } static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u8 i, j; u8 code; u16 tmp; u8 rfseq_events[3] = { 6, 8, 7 }; u8 rfseq_delays[3] = { 10, 30, 1 }; b43_phy_set(dev, B43_NPHY_C1_CGAINI, B43_NPHY_C1_CGAINI_CL2DETECT); b43_phy_set(dev, B43_NPHY_C2_CGAINI, B43_NPHY_C2_CGAINI_CL2DETECT); b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84); b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84); if (!dev->phy.is_40mhz) { b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B); b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B); b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009); b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009); } b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, 21); b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, 21); if (!dev->phy.is_40mhz) { b43_phy_maskset(dev, B43_NPHY_C1_CGAINI, ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1); b43_phy_maskset(dev, B43_NPHY_C2_CGAINI, ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1); b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI, ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1); b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI, ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1); } b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); if (nphy->gain_boost) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ && dev->phy.is_40mhz) code = 4; else code = 5; } else { code = dev->phy.is_40mhz ? 6 : 7; } b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, ~B43_NPHY_C1_INITGAIN_HPVGA2, code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT); b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, ~B43_NPHY_C2_INITGAIN_HPVGA2, code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); for (i = 0; i < 4; i++) b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (code << 8 | 0x7C)); b43_nphy_adjust_lna_gain_table(dev); if (nphy->elna_gain_config) { b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); for (i = 0; i < 4; i++) b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (code << 8 | 0x74)); } if (dev->phy.rev == 2) { for (i = 0; i < 4; i++) { b43_phy_write(dev, B43_NPHY_TABLE_ADDR, (0x0400 * i) + 0x0020); for (j = 0; j < 21; j++) { tmp = j * (i < 2 ? 3 : 1); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, tmp); } } } b43_nphy_set_rf_sequence(dev, 5, rfseq_events, rfseq_delays, 3); b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4); } static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev) { if (dev->phy.rev >= 3) b43_nphy_gain_ctl_workarounds_rev3plus(dev); else b43_nphy_gain_ctl_workarounds_rev1_2(dev); } static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; struct ssb_sprom *sprom = dev->dev->bus_sprom; u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F }; u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 }; u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3, 0x1F }; u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 }; u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F }; u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 }; u16 tmp16; u32 tmp32; b43_phy_write(dev, 0x23f, 0x1f8); b43_phy_write(dev, 0x240, 0x1f8); tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0)); tmp32 &= 0xffffff; b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32); b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125); b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3); b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105); b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E); b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD); b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020); b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C); b43_phy_write(dev, 0x2AE, 0x000C); b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, ARRAY_SIZE(tx2rx_events)); if (b43_nphy_ipa(dev)) b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa, rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa)); if (nphy->hw_phyrxchain != 3 && nphy->hw_phyrxchain != nphy->hw_phytxchain) { if (b43_nphy_ipa(dev)) { rx2tx_delays[5] = 59; rx2tx_delays[6] = 1; rx2tx_events[7] = 0x1F; } b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, ARRAY_SIZE(rx2tx_events)); } tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 0x2 : 0x9C40; b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16); b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700); b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D); b43_nphy_gain_ctl_workarounds(dev); b43_ntab_write(dev, B43_NTAB16(8, 0), 2); b43_ntab_write(dev, B43_NTAB16(8, 16), 2); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00); if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR && b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) tmp32 = 0x00088888; else tmp32 = 0x88888888; b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32); b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32); b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32); if (dev->phy.rev == 4 && b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC, 0x70); b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC, 0x70); } b43_phy_write(dev, 0x224, 0x03eb); b43_phy_write(dev, 0x225, 0x03eb); b43_phy_write(dev, 0x226, 0x0341); b43_phy_write(dev, 0x227, 0x0341); b43_phy_write(dev, 0x228, 0x042b); b43_phy_write(dev, 0x229, 0x042b); b43_phy_write(dev, 0x22a, 0x0381); b43_phy_write(dev, 0x22b, 0x0381); b43_phy_write(dev, 0x22c, 0x042b); b43_phy_write(dev, 0x22d, 0x042b); b43_phy_write(dev, 0x22e, 0x0381); b43_phy_write(dev, 0x22f, 0x0381); } static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 }; u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 }; u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && nphy->band5g_pwrgain) { b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8); b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8); } else { b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8); b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8); } b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A); b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A); b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA); b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA); if (dev->phy.rev < 2) { b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000); b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000); b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB); b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB); b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800); b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800); } b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD && dev->dev->board_type == 0x8B) { delays1[0] = 0x1; delays1[5] = 0x14; } b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7); b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7); b43_nphy_gain_ctl_workarounds(dev); if (dev->phy.rev < 2) { if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2) b43_hf_write(dev, b43_hf_read(dev) | B43_HF_MLADVW); } else if (dev->phy.rev == 2) { b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0); b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0); } if (dev->phy.rev < 2) b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL, ~B43_NPHY_SCRAM_SIGCTL_SCM); b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125); b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3); b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105); b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E); b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD); b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); b43_phy_mask(dev, B43_NPHY_PIL_DW1, ~B43_NPHY_PIL_DW_64QAM & 0xFFFF); b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5); b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4); b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00); if (dev->phy.rev == 2) b43_phy_set(dev, B43_NPHY_FINERX2_CGC, B43_NPHY_FINERX2_CGC_DECGC); } static void b43_nphy_workarounds(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) b43_nphy_classifier(dev, 1, 0); else b43_nphy_classifier(dev, 1, 1); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); b43_phy_set(dev, B43_NPHY_IQFLIP, B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); if (dev->phy.rev >= 3) b43_nphy_workarounds_rev3plus(dev); else b43_nphy_workarounds_rev1_2(dev); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val, bool iqmode, bool dac_test) { u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test); if (samp == 0) return -1; b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test); return 0; } static void b43_nphy_update_txrx_chain(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; bool override = false; u16 chain = 0x33; if (nphy->txrx_chain == 0) { chain = 0x11; override = true; } else if (nphy->txrx_chain == 1) { chain = 0x22; override = true; } b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN), chain); if (override) b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER); else b43_phy_mask(dev, B43_NPHY_RFSEQMODE, ~B43_NPHY_RFSEQMODE_CAOVER); } static void b43_nphy_stop_playback(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 tmp; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT); if (tmp & 0x1) b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP); else if (tmp & 0x2) b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004); if (nphy->bb_mult_save & 0x80000000) { tmp = nphy->bb_mult_save & 0xFFFF; b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); nphy->bb_mult_save = 0; } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, struct nphy_txgains target, struct nphy_iqcal_params *params) { int i, j, indx; u16 gain; if (dev->phy.rev >= 3) { params->txgm = target.txgm[core]; params->pga = target.pga[core]; params->pad = target.pad[core]; params->ipa = target.ipa[core]; params->cal_gain = (params->txgm << 12) | (params->pga << 8) | (params->pad << 4) | (params->ipa); for (j = 0; j < 5; j++) params->ncorr[j] = 0x79; } else { gain = (target.pad[core]) | (target.pga[core] << 4) | (target.txgm[core] << 8); indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 1 : 0; for (i = 0; i < 9; i++) if (tbl_iqcal_gainparams[indx][i][0] == gain) break; i = min(i, 8); params->txgm = tbl_iqcal_gainparams[indx][i][1]; params->pga = tbl_iqcal_gainparams[indx][i][2]; params->pad = tbl_iqcal_gainparams[indx][i][3]; params->cal_gain = (params->txgm << 7) | (params->pga << 4) | (params->pad << 2); for (j = 0; j < 4; j++) params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j]; } } void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) { } static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev) { } static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, bool ignore_tssi) { return B43_TXPWR_RES_DONE; } static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) { struct b43_phy_n *nphy = dev->phy.n; u8 i; u16 bmask, val, tmp; enum ieee80211_band band = b43_current_band(dev->wl); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); nphy->txpwrctrl = enable; if (!enable) { if (dev->phy.rev >= 3 && (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) & (B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN | B43_NPHY_TXPCTL_CMD_PCTLEN))) { nphy->tx_pwr_idx[0] = b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) & 0x7f; nphy->tx_pwr_idx[1] = b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) & 0x7f; } b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840); for (i = 0; i < 84; i++) b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40); for (i = 0; i < 84; i++) b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0); tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; if (dev->phy.rev >= 3) tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN; b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp); if (dev->phy.rev >= 3) { b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); } else { b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); } if (dev->phy.rev == 2) b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~B43_NPHY_BPHY_CTL3_SCALE, 0x53); else if (dev->phy.rev < 2) b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A); if (dev->phy.rev < 2 && dev->phy.is_40mhz) b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW); } else { b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl); b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl); bmask = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN; if (dev->phy.rev >= 3) { bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN; if (val) val |= B43_NPHY_TXPCTL_CMD_PCTLEN; } b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val); if (band == IEEE80211_BAND_5GHZ) { b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~B43_NPHY_TXPCTL_CMD_INIT, 0x64); if (dev->phy.rev > 1) b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, ~B43_NPHY_TXPCTL_INIT_PIDXI1, 0x64); } if (dev->phy.rev >= 3) { if (nphy->tx_pwr_idx[0] != 128 && nphy->tx_pwr_idx[1] != 128) { b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~B43_NPHY_TXPCTL_CMD_INIT, nphy->tx_pwr_idx[0]); if (dev->phy.rev > 1) b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, ~0xff, nphy->tx_pwr_idx[1]); } } if (dev->phy.rev >= 3) { b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100); b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100); } else { b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000); } if (dev->phy.rev == 2) b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b); else if (dev->phy.rev < 2) b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40); if (dev->phy.rev < 2 && dev->phy.is_40mhz) b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW); if (b43_nphy_ipa(dev)) { b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4); b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4); } } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } static void b43_nphy_tx_power_fix(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; struct ssb_sprom *sprom = dev->dev->bus_sprom; u8 txpi[2], bbmult, i; u16 tmp, radio_gain, dac_gain; u16 freq = dev->phy.channel_freq; u32 txgain; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); if (dev->phy.rev >= 7) { txpi[0] = txpi[1] = 30; } else if (dev->phy.rev >= 3) { txpi[0] = 40; txpi[1] = 40; } else if (sprom->revision < 4) { txpi[0] = 72; txpi[1] = 72; } else { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { txpi[0] = sprom->txpid2g[0]; txpi[1] = sprom->txpid2g[1]; } else if (freq >= 4900 && freq < 5100) { txpi[0] = sprom->txpid5gl[0]; txpi[1] = sprom->txpid5gl[1]; } else if (freq >= 5100 && freq < 5500) { txpi[0] = sprom->txpid5g[0]; txpi[1] = sprom->txpid5g[1]; } else if (freq >= 5500) { txpi[0] = sprom->txpid5gh[0]; txpi[1] = sprom->txpid5gh[1]; } else { txpi[0] = 91; txpi[1] = 91; } } if (dev->phy.rev < 7 && (txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 100)) txpi[0] = txpi[1] = 91; for (i = 0; i < 2; i++) { txgain = *(b43_nphy_get_tx_gain_table(dev) + txpi[i]); if (dev->phy.rev >= 3) radio_gain = (txgain >> 16) & 0x1FFFF; else radio_gain = (txgain >> 16) & 0x1FFF; if (dev->phy.rev >= 7) dac_gain = (txgain >> 8) & 0x7; else dac_gain = (txgain >> 8) & 0x3F; bbmult = txgain & 0xFF; if (dev->phy.rev >= 3) { if (i == 0) b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100); else b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100); } else { b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000); } if (i == 0) b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain); else b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain); b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain); tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57)); if (i == 0) tmp = (tmp & 0x00FF) | (bbmult << 8); else tmp = (tmp & 0xFF00) | bbmult; b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp); if (b43_nphy_ipa(dev)) { u32 tmp32; u16 reg = (i == 0) ? B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1; tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, 576 + txpi[i])); b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4); b43_phy_set(dev, reg, 0x4); } } b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; u8 core; u16 r; if (phy->rev >= 7) { for (core = 0; core < 2; core++) { r = core ? 0x190 : 0x170; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_radio_write(dev, r + 0x5, 0x5); b43_radio_write(dev, r + 0x9, 0xE); if (phy->rev != 5) b43_radio_write(dev, r + 0xA, 0); if (phy->rev != 7) b43_radio_write(dev, r + 0xB, 1); else b43_radio_write(dev, r + 0xB, 0x31); } else { b43_radio_write(dev, r + 0x5, 0x9); b43_radio_write(dev, r + 0x9, 0xC); b43_radio_write(dev, r + 0xB, 0x0); if (phy->rev != 5) b43_radio_write(dev, r + 0xA, 1); else b43_radio_write(dev, r + 0xA, 0x31); } b43_radio_write(dev, r + 0x6, 0); b43_radio_write(dev, r + 0x7, 0); b43_radio_write(dev, r + 0x8, 3); b43_radio_write(dev, r + 0xC, 0); } } else { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128); else b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80); b43_radio_write(dev, B2056_SYN_RESERVED_ADDR30, 0); b43_radio_write(dev, B2056_SYN_GPIO_MASTER1, 0x29); for (core = 0; core < 2; core++) { r = core ? B2056_TX1 : B2056_TX0; b43_radio_write(dev, r | B2056_TX_IQCAL_VCM_HG, 0); b43_radio_write(dev, r | B2056_TX_IQCAL_IDAC, 0); b43_radio_write(dev, r | B2056_TX_TSSI_VCM, 3); b43_radio_write(dev, r | B2056_TX_TX_AMP_DET, 0); b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8); b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0); b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER, 0x5); if (phy->rev != 5) b43_radio_write(dev, r | B2056_TX_TSSIA, 0x00); if (phy->rev >= 5) b43_radio_write(dev, r | B2056_TX_TSSIG, 0x31); else b43_radio_write(dev, r | B2056_TX_TSSIG, 0x11); b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX, 0xE); } else { b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER, 0x9); b43_radio_write(dev, r | B2056_TX_TSSIA, 0x31); b43_radio_write(dev, r | B2056_TX_TSSIG, 0x0); b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX, 0xC); } } } } static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = dev->phy.n; u32 tmp; s32 rssi[4] = { }; if (b43_nphy_ipa(dev)) b43_nphy_ipa_internal_tssi_setup(dev); if (phy->rev >= 7) ; else if (phy->rev >= 3) b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false); b43_nphy_stop_playback(dev); b43_nphy_tx_tone(dev, 0xFA0, 0, false, false); udelay(20); tmp = b43_nphy_poll_rssi(dev, 4, rssi, 1); b43_nphy_stop_playback(dev); b43_nphy_rssi_select(dev, 0, 0); if (phy->rev >= 7) ; else if (phy->rev >= 3) b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true); if (phy->rev >= 3) { nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF; nphy->pwr_ctl_info[1].idle_tssi_5g = (tmp >> 8) & 0xFF; } else { nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 16) & 0xFF; nphy->pwr_ctl_info[1].idle_tssi_5g = tmp & 0xFF; } nphy->pwr_ctl_info[0].idle_tssi_2g = (tmp >> 24) & 0xFF; nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF; } static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u8 idx, delta; u8 i, stf_mode; for (i = 0; i < 4; i++) nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i]; for (stf_mode = 0; stf_mode < 4; stf_mode++) { delta = 0; switch (stf_mode) { case 0: if (dev->phy.is_40mhz && dev->phy.rev >= 5) { idx = 68; } else { delta = 1; idx = dev->phy.is_40mhz ? 52 : 4; } break; case 1: idx = dev->phy.is_40mhz ? 76 : 28; break; case 2: idx = dev->phy.is_40mhz ? 84 : 36; break; case 3: idx = dev->phy.is_40mhz ? 92 : 44; break; } for (i = 0; i < 20; i++) { nphy->adj_pwr_tbl[4 + 4 * i + stf_mode] = nphy->tx_power_offset[idx]; if (i == 0) idx += delta; if (i == 14) idx += 1 - delta; if (i == 3 || i == 4 || i == 7 || i == 8 || i == 11 || i == 13) idx += 1; } } } static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; struct ssb_sprom *sprom = dev->dev->bus_sprom; s16 a1[2], b0[2], b1[2]; u8 idle[2]; s8 target[2]; s32 num, den, pwr; u32 regval[64]; u16 freq = dev->phy.channel_freq; u16 tmp; u16 r; u8 i, c; if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) { b43_maskset32(dev, B43_MMIO_MACCTL, ~0, 0x200000); b43_read32(dev, B43_MMIO_MACCTL); udelay(1); } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); b43_phy_set(dev, B43_NPHY_TSSIMODE, B43_NPHY_TSSIMODE_EN); if (dev->phy.rev >= 3) b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~B43_NPHY_TXPCTL_CMD_PCTLEN & 0xFFFF); else b43_phy_set(dev, B43_NPHY_TXPCTL_CMD, B43_NPHY_TXPCTL_CMD_PCTLEN); if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) b43_maskset32(dev, B43_MMIO_MACCTL, ~0x200000, 0); if (sprom->revision < 4) { idle[0] = nphy->pwr_ctl_info[0].idle_tssi_2g; idle[1] = nphy->pwr_ctl_info[1].idle_tssi_2g; target[0] = target[1] = 52; a1[0] = a1[1] = -424; b0[0] = b0[1] = 5612; b1[0] = b1[1] = -1393; } else { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { for (c = 0; c < 2; c++) { idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g; target[c] = sprom->core_pwr_info[c].maxpwr_2g; a1[c] = sprom->core_pwr_info[c].pa_2g[0]; b0[c] = sprom->core_pwr_info[c].pa_2g[1]; b1[c] = sprom->core_pwr_info[c].pa_2g[2]; } } else if (freq >= 4900 && freq < 5100) { for (c = 0; c < 2; c++) { idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g; target[c] = sprom->core_pwr_info[c].maxpwr_5gl; a1[c] = sprom->core_pwr_info[c].pa_5gl[0]; b0[c] = sprom->core_pwr_info[c].pa_5gl[1]; b1[c] = sprom->core_pwr_info[c].pa_5gl[2]; } } else if (freq >= 5100 && freq < 5500) { for (c = 0; c < 2; c++) { idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g; target[c] = sprom->core_pwr_info[c].maxpwr_5g; a1[c] = sprom->core_pwr_info[c].pa_5g[0]; b0[c] = sprom->core_pwr_info[c].pa_5g[1]; b1[c] = sprom->core_pwr_info[c].pa_5g[2]; } } else if (freq >= 5500) { for (c = 0; c < 2; c++) { idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g; target[c] = sprom->core_pwr_info[c].maxpwr_5gh; a1[c] = sprom->core_pwr_info[c].pa_5gh[0]; b0[c] = sprom->core_pwr_info[c].pa_5gh[1]; b1[c] = sprom->core_pwr_info[c].pa_5gh[2]; } } else { idle[0] = nphy->pwr_ctl_info[0].idle_tssi_5g; idle[1] = nphy->pwr_ctl_info[1].idle_tssi_5g; target[0] = target[1] = 52; a1[0] = a1[1] = -424; b0[0] = b0[1] = 5612; b1[0] = b1[1] = -1393; } } if (dev->phy.rev >= 3) { if (sprom->fem.ghz2.tssipos) b43_phy_set(dev, B43_NPHY_TXPCTL_ITSSI, 0x4000); if (dev->phy.rev >= 7) { for (c = 0; c < 2; c++) { r = c ? 0x190 : 0x170; if (b43_nphy_ipa(dev)) b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 0xE : 0xC); } } else { if (b43_nphy_ipa(dev)) { tmp = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 0xC : 0xE; b43_radio_write(dev, B2056_TX0 | B2056_TX_TX_SSI_MUX, tmp); b43_radio_write(dev, B2056_TX1 | B2056_TX_TX_SSI_MUX, tmp); } else { b43_radio_write(dev, B2056_TX0 | B2056_TX_TX_SSI_MUX, 0x11); b43_radio_write(dev, B2056_TX1 | B2056_TX_TX_SSI_MUX, 0x11); } } } if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) { b43_maskset32(dev, B43_MMIO_MACCTL, ~0, 0x200000); b43_read32(dev, B43_MMIO_MACCTL); udelay(1); } if (dev->phy.rev >= 7) { b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~B43_NPHY_TXPCTL_CMD_INIT, 0x19); b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, ~B43_NPHY_TXPCTL_INIT_PIDXI1, 0x19); } else { b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~B43_NPHY_TXPCTL_CMD_INIT, 0x40); if (dev->phy.rev > 1) b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT, ~B43_NPHY_TXPCTL_INIT_PIDXI1, 0x40); } if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) b43_maskset32(dev, B43_MMIO_MACCTL, ~0x200000, 0); b43_phy_write(dev, B43_NPHY_TXPCTL_N, 0xF0 << B43_NPHY_TXPCTL_N_TSSID_SHIFT | 3 << B43_NPHY_TXPCTL_N_NPTIL2_SHIFT); b43_phy_write(dev, B43_NPHY_TXPCTL_ITSSI, idle[0] << B43_NPHY_TXPCTL_ITSSI_0_SHIFT | idle[1] << B43_NPHY_TXPCTL_ITSSI_1_SHIFT | B43_NPHY_TXPCTL_ITSSI_BINF); b43_phy_write(dev, B43_NPHY_TXPCTL_TPWR, target[0] << B43_NPHY_TXPCTL_TPWR_0_SHIFT | target[1] << B43_NPHY_TXPCTL_TPWR_1_SHIFT); for (c = 0; c < 2; c++) { for (i = 0; i < 64; i++) { num = 8 * (16 * b0[c] + b1[c] * i); den = 32768 + a1[c] * i; pwr = max((4 * num + den / 2) / den, -8); if (dev->phy.rev < 3 && (i <= (31 - idle[c] + 1))) pwr = max(pwr, target[c] + 1); regval[i] = pwr; } b43_ntab_write_bulk(dev, B43_NTAB32(26 + c, 0), 64, regval); } b43_nphy_tx_prepare_adjusted_power_table(dev); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); } static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; const u32 *table = NULL; u32 rfpwr_offset; u8 pga_gain; int i; table = b43_nphy_get_tx_gain_table(dev); b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table); b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table); if (phy->rev >= 3) { #if 0 nphy->gmval = (table[0] >> 16) & 0x7000; #endif for (i = 0; i < 128; i++) { pga_gain = (table[i] >> 24) & 0xF; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain]; else rfpwr_offset = 0; b43_ntab_write(dev, B43_NTAB32(26, 576 + i), rfpwr_offset); b43_ntab_write(dev, B43_NTAB32(27, 576 + i), rfpwr_offset); } } } static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) { struct b43_phy_n *nphy = dev->phy.n; enum ieee80211_band band; u16 tmp; if (!enable) { nphy->rfctrl_intc1_save = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); nphy->rfctrl_intc2_save = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); band = b43_current_band(dev->wl); if (dev->phy.rev >= 3) { if (band == IEEE80211_BAND_5GHZ) tmp = 0x600; else tmp = 0x480; } else { if (band == IEEE80211_BAND_5GHZ) tmp = 0x180; else tmp = 0x120; } b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp); } else { b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, nphy->rfctrl_intc1_save); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, nphy->rfctrl_intc2_save); } } static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev) { u16 tmp; if (dev->phy.rev >= 3) { if (b43_nphy_ipa(dev)) { tmp = 4; b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2, (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); } tmp = 1; b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2, (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); } } static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est, u16 samps, u8 time, bool wait) { int i; u16 tmp; b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps); b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time); if (wait) b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE); else b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE); b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START); for (i = 1000; i; i--) { tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD); if (!(tmp & B43_NPHY_IQEST_CMD_START)) { est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0); est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0); est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0); est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1); est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1); est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) | b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1); return; } udelay(10); } memset(est, 0, sizeof(*est)); } static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write, struct b43_phy_n_iq_comp *pcomp) { if (write) { b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0); b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0); b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1); b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1); } else { pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0); pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0); pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1); pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1); } } #if 0 static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core) { u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]); if (core == 0) { b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]); } else { b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]); } b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]); b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]); b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]); b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]); b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]); b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]); b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); } static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core) { u8 rxval, txval; u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA); if (core == 0) { regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); } else { regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); } regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1); regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0); regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1); b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF, ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT)); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN, (core << B43_NPHY_RFSEQCA_RXEN_SHIFT)); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS, (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT)); if (core == 0) { b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007); b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007); } else { b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007); b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007); } b43_nphy_rf_control_intc_override(dev, 2, 0, 3); b43_nphy_rf_control_override(dev, 8, 0, 3, false); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); if (core == 0) { rxval = 1; txval = 8; } else { rxval = 4; txval = 2; } b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1)); b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core)); } #endif static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask) { int i; s32 iq; u32 ii; u32 qq; int iq_nbits, qq_nbits; int arsh, brsh; u16 tmp, a, b; struct nphy_iq_est est; struct b43_phy_n_iq_comp old; struct b43_phy_n_iq_comp new = { }; bool error = false; if (mask == 0) return; b43_nphy_rx_iq_coeffs(dev, false, &old); b43_nphy_rx_iq_coeffs(dev, true, &new); b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false); new = old; for (i = 0; i < 2; i++) { if (i == 0 && (mask & 1)) { iq = est.iq0_prod; ii = est.i0_pwr; qq = est.q0_pwr; } else if (i == 1 && (mask & 2)) { iq = est.iq1_prod; ii = est.i1_pwr; qq = est.q1_pwr; } else { continue; } if (ii + qq < 2) { error = true; break; } iq_nbits = fls(abs(iq)); qq_nbits = fls(qq); arsh = iq_nbits - 20; if (arsh >= 0) { a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh))); tmp = ii >> arsh; } else { a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh))); tmp = ii << -arsh; } if (tmp == 0) { error = true; break; } a /= tmp; brsh = qq_nbits - 11; if (brsh >= 0) { b = (qq << (31 - qq_nbits)); tmp = ii >> brsh; } else { b = (qq << (31 - qq_nbits)); tmp = ii << -brsh; } if (tmp == 0) { error = true; break; } b = int_sqrt(b / tmp - a * a) - (1 << 10); if (i == 0 && (mask & 0x1)) { if (dev->phy.rev >= 3) { new.a0 = a & 0x3FF; new.b0 = b & 0x3FF; } else { new.a0 = b & 0x3FF; new.b0 = a & 0x3FF; } } else if (i == 1 && (mask & 0x2)) { if (dev->phy.rev >= 3) { new.a1 = a & 0x3FF; new.b1 = b & 0x3FF; } else { new.a1 = b & 0x3FF; new.b1 = a & 0x3FF; } } } if (error) new = old; b43_nphy_rx_iq_coeffs(dev, true, &new); } static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev) { u16 array[4]; b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]); } static void b43_nphy_spur_workaround(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u8 channel = dev->phy.channel; int tone[2] = { 57, 58 }; u32 noise[2] = { 0x3FF, 0x3FF }; B43_WARN_ON(dev->phy.rev < 3); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); if (nphy->gband_spurwar_en) { if (channel == 11 && dev->phy.is_40mhz) ; else ; } if (nphy->aband_spurwar_en) { if (channel == 54) { tone[0] = 0x20; noise[0] = 0x25F; } else if (channel == 38 || channel == 102 || channel == 118) { if (0 ) { tone[0] = 0x20; noise[0] = 0x21F; } else { tone[0] = 0; noise[0] = 0; } } else if (channel == 134) { tone[0] = 0x20; noise[0] = 0x21F; } else if (channel == 151) { tone[0] = 0x10; noise[0] = 0x23F; } else if (channel == 153 || channel == 161) { tone[0] = 0x30; noise[0] = 0x23F; } else { tone[0] = 0; noise[0] = 0; } if (!tone[0] && !noise[0]) ; else ; } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; int i, j; u32 tmp; u32 cur_real, cur_imag, real_part, imag_part; u16 buffer[7]; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); for (i = 0; i < 2; i++) { tmp = ((buffer[i * 2] & 0x3FF) << 10) | (buffer[i * 2 + 1] & 0x3FF); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, (((i + 26) << 10) | 320)); for (j = 0; j < 128; j++) { b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, ((tmp >> 16) & 0xFFFF)); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (tmp & 0xFFFF)); } } for (i = 0; i < 2; i++) { tmp = buffer[5 + i]; real_part = (tmp >> 8) & 0xFF; imag_part = (tmp & 0xFF); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, (((i + 26) << 10) | 448)); if (dev->phy.rev >= 3) { cur_real = real_part; cur_imag = imag_part; tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF); } for (j = 0; j < 128; j++) { if (dev->phy.rev < 3) { cur_real = (real_part * loscale[j] + 128) >> 8; cur_imag = (imag_part * loscale[j] + 128) >> 8; tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF); } b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, ((tmp >> 16) & 0xFFFF)); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (tmp & 0xFFFF)); } } if (dev->phy.rev >= 3) { b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF); } if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); } static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 *rssical_radio_regs = NULL; u16 *rssical_phy_regs = NULL; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { if (!nphy->rssical_chanspec_2G.center_freq) return; rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G; } else { if (!nphy->rssical_chanspec_5G.center_freq) return; rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G; rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G; } b43_radio_maskset(dev, 0x602B, 0xE3, rssical_radio_regs[0]); b43_radio_maskset(dev, 0x702B, 0xE3, rssical_radio_regs[1]); b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]); b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]); b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, rssical_phy_regs[2]); b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, rssical_phy_regs[3]); b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, rssical_phy_regs[4]); b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, rssical_phy_regs[5]); b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, rssical_phy_regs[6]); b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, rssical_phy_regs[7]); b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, rssical_phy_regs[8]); b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, rssical_phy_regs[9]); b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, rssical_phy_regs[10]); b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]); } static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 *save = nphy->tx_rx_cal_radio_saveregs; u16 tmp; u8 offset, i; if (dev->phy.rev >= 3) { for (i = 0; i < 2; i++) { tmp = (i == 0) ? 0x2000 : 0x3000; offset = i * 11; save[offset + 0] = b43_radio_read16(dev, B2055_CAL_RVARCTL); save[offset + 1] = b43_radio_read16(dev, B2055_CAL_LPOCTL); save[offset + 2] = b43_radio_read16(dev, B2055_CAL_TS); save[offset + 3] = b43_radio_read16(dev, B2055_CAL_RCCALRTS); save[offset + 4] = b43_radio_read16(dev, B2055_CAL_RCALRTS); save[offset + 5] = b43_radio_read16(dev, B2055_PADDRV); save[offset + 6] = b43_radio_read16(dev, B2055_XOCTL1); save[offset + 7] = b43_radio_read16(dev, B2055_XOCTL2); save[offset + 8] = b43_radio_read16(dev, B2055_XOREGUL); save[offset + 9] = b43_radio_read16(dev, B2055_XOMISC); save[offset + 10] = b43_radio_read16(dev, B2055_PLL_LFC1); if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x0A); b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40); b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55); b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0); b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0); if (nphy->ipa5g_on) { b43_radio_write16(dev, tmp | B2055_PADDRV, 4); b43_radio_write16(dev, tmp | B2055_XOCTL1, 1); } else { b43_radio_write16(dev, tmp | B2055_PADDRV, 0); b43_radio_write16(dev, tmp | B2055_XOCTL1, 0x2F); } b43_radio_write16(dev, tmp | B2055_XOCTL2, 0); } else { b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x06); b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40); b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55); b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0); b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0); b43_radio_write16(dev, tmp | B2055_XOCTL1, 0); if (nphy->ipa2g_on) { b43_radio_write16(dev, tmp | B2055_PADDRV, 6); b43_radio_write16(dev, tmp | B2055_XOCTL2, (dev->phy.rev < 5) ? 0x11 : 0x01); } else { b43_radio_write16(dev, tmp | B2055_PADDRV, 0); b43_radio_write16(dev, tmp | B2055_XOCTL2, 0); } } b43_radio_write16(dev, tmp | B2055_XOREGUL, 0); b43_radio_write16(dev, tmp | B2055_XOMISC, 0); b43_radio_write16(dev, tmp | B2055_PLL_LFC1, 0); } } else { save[0] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL1); b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL1, 0x29); save[1] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL2); b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL2, 0x54); save[2] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL1); b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL1, 0x29); save[3] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL2); b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL2, 0x54); save[3] = b43_radio_read16(dev, B2055_C1_PWRDET_RXTX); save[4] = b43_radio_read16(dev, B2055_C2_PWRDET_RXTX); if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ)) { b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x04); b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x04); } else { b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x20); b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x20); } if (dev->phy.rev < 2) { b43_radio_set(dev, B2055_C1_TX_BB_MXGM, 0x20); b43_radio_set(dev, B2055_C2_TX_BB_MXGM, 0x20); } else { b43_radio_mask(dev, B2055_C1_TX_BB_MXGM, ~0x20); b43_radio_mask(dev, B2055_C2_TX_BB_MXGM, ~0x20); } } } static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core) { struct b43_phy_n *nphy = dev->phy.n; int i; u16 scale, entry; u16 tmp = nphy->txcal_bbmult; if (core == 0) tmp >>= 8; tmp &= 0xff; for (i = 0; i < 18; i++) { scale = (ladder_lo[i].percent * tmp) / 100; entry = ((scale & 0xFF) << 8) | ladder_lo[i].g_env; b43_ntab_write(dev, B43_NTAB16(15, i), entry); scale = (ladder_iq[i].percent * tmp) / 100; entry = ((scale & 0xFF) << 8) | ladder_iq[i].g_env; b43_ntab_write(dev, B43_NTAB16(15, i + 32), entry); } } static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev) { int i; for (i = 0; i < 15; i++) b43_phy_write(dev, B43_PHY_N(0x2C5 + i), tbl_tx_filter_coef_rev4[2][i]); } static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev) { int i, j; static const u16 offset[] = { 0x186, 0x195, 0x2C5 }; for (i = 0; i < 3; i++) for (j = 0; j < 15; j++) b43_phy_write(dev, B43_PHY_N(offset[i] + j), tbl_tx_filter_coef_rev4[i][j]); if (dev->phy.is_40mhz) { for (j = 0; j < 15; j++) b43_phy_write(dev, B43_PHY_N(offset[0] + j), tbl_tx_filter_coef_rev4[3][j]); } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { for (j = 0; j < 15; j++) b43_phy_write(dev, B43_PHY_N(offset[0] + j), tbl_tx_filter_coef_rev4[5][j]); } if (dev->phy.channel == 14) for (j = 0; j < 15; j++) b43_phy_write(dev, B43_PHY_N(offset[0] + j), tbl_tx_filter_coef_rev4[6][j]); } static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 curr_gain[2]; struct nphy_txgains target; const u32 *table = NULL; if (!nphy->txpwrctrl) { int i; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, curr_gain); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); for (i = 0; i < 2; ++i) { if (dev->phy.rev >= 3) { target.ipa[i] = curr_gain[i] & 0x000F; target.pad[i] = (curr_gain[i] & 0x00F0) >> 4; target.pga[i] = (curr_gain[i] & 0x0F00) >> 8; target.txgm[i] = (curr_gain[i] & 0x7000) >> 12; } else { target.ipa[i] = curr_gain[i] & 0x0003; target.pad[i] = (curr_gain[i] & 0x000C) >> 2; target.pga[i] = (curr_gain[i] & 0x0070) >> 4; target.txgm[i] = (curr_gain[i] & 0x0380) >> 7; } } } else { int i; u16 index[2]; index[0] = (b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) & B43_NPHY_TXPCTL_STAT_BIDX) >> B43_NPHY_TXPCTL_STAT_BIDX_SHIFT; index[1] = (b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) & B43_NPHY_TXPCTL_STAT_BIDX) >> B43_NPHY_TXPCTL_STAT_BIDX_SHIFT; for (i = 0; i < 2; ++i) { table = b43_nphy_get_tx_gain_table(dev); if (dev->phy.rev >= 3) { target.ipa[i] = (table[index[i]] >> 16) & 0xF; target.pad[i] = (table[index[i]] >> 20) & 0xF; target.pga[i] = (table[index[i]] >> 24) & 0xF; target.txgm[i] = (table[index[i]] >> 28) & 0xF; } else { target.ipa[i] = (table[index[i]] >> 16) & 0x3; target.pad[i] = (table[index[i]] >> 18) & 0x3; target.pga[i] = (table[index[i]] >> 20) & 0x7; target.txgm[i] = (table[index[i]] >> 23) & 0x7; } } } return target; } static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev) { u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; if (dev->phy.rev >= 3) { b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[0]); b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[3]); b43_phy_write(dev, B43_NPHY_BBCFG, regs[4]); b43_ntab_write(dev, B43_NTAB16(8, 3), regs[5]); b43_ntab_write(dev, B43_NTAB16(8, 19), regs[6]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[7]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[8]); b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]); b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); b43_nphy_reset_cca(dev); } else { b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, regs[0]); b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, regs[1]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]); b43_ntab_write(dev, B43_NTAB16(8, 2), regs[3]); b43_ntab_write(dev, B43_NTAB16(8, 18), regs[4]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[5]); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[6]); } } static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev) { u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; u16 tmp; regs[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); if (dev->phy.rev >= 3) { b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0xF0FF, 0x0A00); b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0xF0FF, 0x0A00); tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); regs[2] = tmp; b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, tmp | 0x0600); tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); regs[3] = tmp; b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x0600); regs[4] = b43_phy_read(dev, B43_NPHY_BBCFG); b43_phy_mask(dev, B43_NPHY_BBCFG, ~B43_NPHY_BBCFG_RSTRX & 0xFFFF); tmp = b43_ntab_read(dev, B43_NTAB16(8, 3)); regs[5] = tmp; b43_ntab_write(dev, B43_NTAB16(8, 3), 0); tmp = b43_ntab_read(dev, B43_NTAB16(8, 19)); regs[6] = tmp; b43_ntab_write(dev, B43_NTAB16(8, 19), 0); regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); b43_nphy_rf_control_intc_override(dev, 2, 1, 3); b43_nphy_rf_control_intc_override(dev, 1, 2, 1); b43_nphy_rf_control_intc_override(dev, 1, 8, 2); regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0); regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1); b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); } else { b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, 0xA000); b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, 0xA000); tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); regs[2] = tmp; b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x3000); tmp = b43_ntab_read(dev, B43_NTAB16(8, 2)); regs[3] = tmp; tmp |= 0x2000; b43_ntab_write(dev, B43_NTAB16(8, 2), tmp); tmp = b43_ntab_read(dev, B43_NTAB16(8, 18)); regs[4] = tmp; tmp |= 0x2000; b43_ntab_write(dev, B43_NTAB16(8, 18), tmp); regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) tmp = 0x0180; else tmp = 0x0120; b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp); } } static void b43_nphy_save_cal(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; u16 *txcal_radio_regs = NULL; struct b43_chanspec *iqcal_chanspec; u16 *table = NULL; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G; txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G; iqcal_chanspec = &nphy->iqcal_chanspec_2G; table = nphy->cal_cache.txcal_coeffs_2G; } else { rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G; txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G; iqcal_chanspec = &nphy->iqcal_chanspec_5G; table = nphy->cal_cache.txcal_coeffs_5G; } b43_nphy_rx_iq_coeffs(dev, false, rxcal_coeffs); if (dev->phy.rev >= 3) { txcal_radio_regs[0] = b43_radio_read(dev, 0x2021); txcal_radio_regs[1] = b43_radio_read(dev, 0x2022); txcal_radio_regs[2] = b43_radio_read(dev, 0x3021); txcal_radio_regs[3] = b43_radio_read(dev, 0x3022); txcal_radio_regs[4] = b43_radio_read(dev, 0x2023); txcal_radio_regs[5] = b43_radio_read(dev, 0x2024); txcal_radio_regs[6] = b43_radio_read(dev, 0x3023); txcal_radio_regs[7] = b43_radio_read(dev, 0x3024); } else { txcal_radio_regs[0] = b43_radio_read(dev, 0x8B); txcal_radio_regs[1] = b43_radio_read(dev, 0xBA); txcal_radio_regs[2] = b43_radio_read(dev, 0x8D); txcal_radio_regs[3] = b43_radio_read(dev, 0xBC); } iqcal_chanspec->center_freq = dev->phy.channel_freq; iqcal_chanspec->channel_type = dev->phy.channel_type; b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 8, table); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 0); } static void b43_nphy_restore_cal(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u16 coef[4]; u16 *loft = NULL; u16 *table = NULL; int i; u16 *txcal_radio_regs = NULL; struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { if (!nphy->iqcal_chanspec_2G.center_freq) return; table = nphy->cal_cache.txcal_coeffs_2G; loft = &nphy->cal_cache.txcal_coeffs_2G[5]; } else { if (!nphy->iqcal_chanspec_5G.center_freq) return; table = nphy->cal_cache.txcal_coeffs_5G; loft = &nphy->cal_cache.txcal_coeffs_5G[5]; } b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, table); for (i = 0; i < 4; i++) { if (dev->phy.rev >= 3) table[i] = coef[i]; else coef[i] = 0; } b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, coef); b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, loft); b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, loft); if (dev->phy.rev < 2) b43_nphy_tx_iq_workaround(dev); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G; rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G; } else { txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G; rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G; } if (dev->phy.rev >= 3) { b43_radio_write(dev, 0x2021, txcal_radio_regs[0]); b43_radio_write(dev, 0x2022, txcal_radio_regs[1]); b43_radio_write(dev, 0x3021, txcal_radio_regs[2]); b43_radio_write(dev, 0x3022, txcal_radio_regs[3]); b43_radio_write(dev, 0x2023, txcal_radio_regs[4]); b43_radio_write(dev, 0x2024, txcal_radio_regs[5]); b43_radio_write(dev, 0x3023, txcal_radio_regs[6]); b43_radio_write(dev, 0x3024, txcal_radio_regs[7]); } else { b43_radio_write(dev, 0x8B, txcal_radio_regs[0]); b43_radio_write(dev, 0xBA, txcal_radio_regs[1]); b43_radio_write(dev, 0x8D, txcal_radio_regs[2]); b43_radio_write(dev, 0xBC, txcal_radio_regs[3]); } b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs); } static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, struct nphy_txgains target, bool full, bool mphase) { struct b43_phy_n *nphy = dev->phy.n; int i; int error = 0; int freq; bool avoid = false; u8 length; u16 tmp, core, type, count, max, numb, last = 0, cmd; const u16 *table; bool phy6or5x; u16 buffer[11]; u16 diq_start = 0; u16 save[2]; u16 gain[2]; struct nphy_iqcal_params params[2]; bool updated[2] = { }; b43_nphy_stay_in_carrier_search(dev, true); if (dev->phy.rev >= 4) { avoid = nphy->hang_avoid; nphy->hang_avoid = false; } b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save); for (i = 0; i < 2; i++) { b43_nphy_iq_cal_gain_params(dev, i, target, &params[i]); gain[i] = params[i].cal_gain; } b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain); b43_nphy_tx_cal_radio_setup(dev); b43_nphy_tx_cal_phy_setup(dev); phy6or5x = dev->phy.rev >= 6 || (dev->phy.rev == 5 && nphy->ipa2g_on && b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ); if (phy6or5x) { if (dev->phy.is_40mhz) { b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18, tbl_tx_iqlo_cal_loft_ladder_40); b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18, tbl_tx_iqlo_cal_iqimb_ladder_40); } else { b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18, tbl_tx_iqlo_cal_loft_ladder_20); b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18, tbl_tx_iqlo_cal_iqimb_ladder_20); } } b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9); if (!dev->phy.is_40mhz) freq = 2500; else freq = 5000; if (nphy->mphase_cal_phase_id > 2) b43_nphy_run_samples(dev, (dev->phy.is_40mhz ? 40 : 20) * 8, 0xFFFF, 0, true, false); else error = b43_nphy_tx_tone(dev, freq, 250, true, false); if (error == 0) { if (nphy->mphase_cal_phase_id > 2) { table = nphy->mphase_txcal_bestcoeffs; length = 11; if (dev->phy.rev < 3) length -= 2; } else { if (!full && nphy->txiqlocal_coeffsvalid) { table = nphy->txiqlocal_bestc; length = 11; if (dev->phy.rev < 3) length -= 2; } else { full = true; if (dev->phy.rev >= 3) { table = tbl_tx_iqlo_cal_startcoefs_nphyrev3; length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3; } else { table = tbl_tx_iqlo_cal_startcoefs; length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS; } } } b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, table); if (full) { if (dev->phy.rev >= 3) max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3; else max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL; } else { if (dev->phy.rev >= 3) max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3; else max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL; } if (mphase) { count = nphy->mphase_txcal_cmdidx; numb = min(max, (u16)(count + nphy->mphase_txcal_numcmds)); } else { count = 0; numb = max; } for (; count < numb; count++) { if (full) { if (dev->phy.rev >= 3) cmd = tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[count]; else cmd = tbl_tx_iqlo_cal_cmds_fullcal[count]; } else { if (dev->phy.rev >= 3) cmd = tbl_tx_iqlo_cal_cmds_recal_nphyrev3[count]; else cmd = tbl_tx_iqlo_cal_cmds_recal[count]; } core = (cmd & 0x3000) >> 12; type = (cmd & 0x0F00) >> 8; if (phy6or5x && updated[core] == 0) { b43_nphy_update_tx_cal_ladder(dev, core); updated[core] = true; } tmp = (params[core].ncorr[type] << 8) | 0x66; b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDNNUM, tmp); if (type == 1 || type == 3 || type == 4) { buffer[0] = b43_ntab_read(dev, B43_NTAB16(15, 69 + core)); diq_start = buffer[0]; buffer[0] = 0; b43_ntab_write(dev, B43_NTAB16(15, 69 + core), 0); } b43_phy_write(dev, B43_NPHY_IQLOCAL_CMD, cmd); for (i = 0; i < 2000; i++) { tmp = b43_phy_read(dev, B43_NPHY_IQLOCAL_CMD); if (tmp & 0xC000) break; udelay(10); } b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, buffer); b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, buffer); if (type == 1 || type == 3 || type == 4) buffer[0] = diq_start; } if (mphase) nphy->mphase_txcal_cmdidx = (numb >= max) ? 0 : numb; last = (dev->phy.rev < 3) ? 6 : 7; if (!mphase || nphy->mphase_cal_phase_id == last) { b43_ntab_write_bulk(dev, B43_NTAB16(15, 96), 4, buffer); b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 4, buffer); if (dev->phy.rev < 3) { buffer[0] = 0; buffer[1] = 0; buffer[2] = 0; buffer[3] = 0; } b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, buffer); b43_ntab_read_bulk(dev, B43_NTAB16(15, 101), 2, buffer); b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, buffer); b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, buffer); length = 11; if (dev->phy.rev < 3) length -= 2; b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, nphy->txiqlocal_bestc); nphy->txiqlocal_coeffsvalid = true; nphy->txiqlocal_chanspec.center_freq = dev->phy.channel_freq; nphy->txiqlocal_chanspec.channel_type = dev->phy.channel_type; } else { length = 11; if (dev->phy.rev < 3) length -= 2; b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, nphy->mphase_txcal_bestcoeffs); } b43_nphy_stop_playback(dev); b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0); } b43_nphy_tx_cal_phy_cleanup(dev); b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, save); if (dev->phy.rev < 2 && (!mphase || nphy->mphase_cal_phase_id == last)) b43_nphy_tx_iq_workaround(dev); if (dev->phy.rev >= 4) nphy->hang_avoid = avoid; b43_nphy_stay_in_carrier_search(dev, false); return error; } static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; u8 i; u16 buffer[7]; bool equal = true; if (!nphy->txiqlocal_coeffsvalid || nphy->txiqlocal_chanspec.center_freq != dev->phy.channel_freq || nphy->txiqlocal_chanspec.channel_type != dev->phy.channel_type) return; b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); for (i = 0; i < 4; i++) { if (buffer[i] != nphy->txiqlocal_bestc[i]) { equal = false; break; } } if (!equal) { b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, nphy->txiqlocal_bestc); for (i = 0; i < 4; i++) buffer[i] = 0; b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, buffer); b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, &nphy->txiqlocal_bestc[5]); b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, &nphy->txiqlocal_bestc[5]); } } static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev, struct nphy_txgains target, u8 type, bool debug) { struct b43_phy_n *nphy = dev->phy.n; int i, j, index; u8 rfctl[2]; u8 afectl_core; u16 tmp[6]; u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna; u32 real, imag; enum ieee80211_band band; u8 use; u16 cur_hpf; u16 lna[3] = { 3, 3, 1 }; u16 hpf1[3] = { 7, 2, 0 }; u16 hpf2[3] = { 2, 0, 0 }; u32 power[3] = { }; u16 gain_save[2]; u16 cal_gain[2]; struct nphy_iqcal_params cal_params[2]; struct nphy_iq_est est; int ret = 0; bool playtone = true; int desired = 13; b43_nphy_stay_in_carrier_search(dev, 1); if (dev->phy.rev < 2) b43_nphy_reapply_tx_cal_coeffs(dev); b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save); for (i = 0; i < 2; i++) { b43_nphy_iq_cal_gain_params(dev, i, target, &cal_params[i]); cal_gain[i] = cal_params[i].cal_gain; } b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, cal_gain); for (i = 0; i < 2; i++) { if (i == 0) { rfctl[0] = B43_NPHY_RFCTL_INTC1; rfctl[1] = B43_NPHY_RFCTL_INTC2; afectl_core = B43_NPHY_AFECTL_C1; } else { rfctl[0] = B43_NPHY_RFCTL_INTC2; rfctl[1] = B43_NPHY_RFCTL_INTC1; afectl_core = B43_NPHY_AFECTL_C2; } tmp[1] = b43_phy_read(dev, B43_NPHY_RFSEQCA); tmp[2] = b43_phy_read(dev, afectl_core); tmp[3] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); tmp[4] = b43_phy_read(dev, rfctl[0]); tmp[5] = b43_phy_read(dev, rfctl[1]); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF, ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, (1 - i)); b43_phy_set(dev, afectl_core, 0x0006); b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0006); band = b43_current_band(dev->wl); if (nphy->rxcalparams & 0xFF000000) { if (band == IEEE80211_BAND_5GHZ) b43_phy_write(dev, rfctl[0], 0x140); else b43_phy_write(dev, rfctl[0], 0x110); } else { if (band == IEEE80211_BAND_5GHZ) b43_phy_write(dev, rfctl[0], 0x180); else b43_phy_write(dev, rfctl[0], 0x120); } if (band == IEEE80211_BAND_5GHZ) b43_phy_write(dev, rfctl[1], 0x148); else b43_phy_write(dev, rfctl[1], 0x114); if (nphy->rxcalparams & 0x10000) { b43_radio_maskset(dev, B2055_C1_GENSPARE2, 0xFC, (i + 1)); b43_radio_maskset(dev, B2055_C2_GENSPARE2, 0xFC, (2 - i)); } for (j = 0; j < 4; j++) { if (j < 3) { cur_lna = lna[j]; cur_hpf1 = hpf1[j]; cur_hpf2 = hpf2[j]; } else { if (power[1] > 10000) { use = 1; cur_hpf = cur_hpf1; index = 2; } else { if (power[0] > 10000) { use = 1; cur_hpf = cur_hpf1; index = 1; } else { index = 0; use = 2; cur_hpf = cur_hpf2; } } cur_lna = lna[index]; cur_hpf1 = hpf1[index]; cur_hpf2 = hpf2[index]; cur_hpf += desired - hweight32(power[index]); cur_hpf = clamp_val(cur_hpf, 0, 10); if (use == 1) cur_hpf1 = cur_hpf; else cur_hpf2 = cur_hpf; } tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) | (cur_lna << 2)); b43_nphy_rf_control_override(dev, 0x400, tmp[0], 3, false); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); b43_nphy_stop_playback(dev); if (playtone) { ret = b43_nphy_tx_tone(dev, 4000, (nphy->rxcalparams & 0xFFFF), false, false); playtone = false; } else { b43_nphy_run_samples(dev, 160, 0xFFFF, 0, false, false); } if (ret == 0) { if (j < 3) { b43_nphy_rx_iq_est(dev, &est, 1024, 32, false); if (i == 0) { real = est.i0_pwr; imag = est.q0_pwr; } else { real = est.i1_pwr; imag = est.q1_pwr; } power[i] = ((real + imag) / 1024) + 1; } else { b43_nphy_calc_rx_iq_comp(dev, 1 << i); } b43_nphy_stop_playback(dev); } if (ret != 0) break; } b43_radio_mask(dev, B2055_C1_GENSPARE2, 0xFC); b43_radio_mask(dev, B2055_C2_GENSPARE2, 0xFC); b43_phy_write(dev, rfctl[1], tmp[5]); b43_phy_write(dev, rfctl[0], tmp[4]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp[3]); b43_phy_write(dev, afectl_core, tmp[2]); b43_phy_write(dev, B43_NPHY_RFSEQCA, tmp[1]); if (ret != 0) break; } b43_nphy_rf_control_override(dev, 0x400, 0, 3, true); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save); b43_nphy_stay_in_carrier_search(dev, 0); return ret; } static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev, struct nphy_txgains target, u8 type, bool debug) { return -1; } static int b43_nphy_cal_rx_iq(struct b43_wldev *dev, struct nphy_txgains target, u8 type, bool debug) { if (dev->phy.rev >= 3) return b43_nphy_rev3_cal_rx_iq(dev, target, type, debug); else return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug); } static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; nphy->phyrxchain = mask; if (0 ) return; b43_mac_suspend(dev); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, true); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN, (mask & 0x3) << B43_NPHY_RFSEQCA_RXEN_SHIFT); if ((mask & 0x3) != 0x3) { b43_phy_write(dev, B43_NPHY_HPANT_SWTHRES, 1); if (dev->phy.rev >= 3) { } } else { b43_phy_write(dev, B43_NPHY_HPANT_SWTHRES, 0x1E); if (dev->phy.rev >= 3) { } } b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, false); b43_mac_enable(dev); } static void b43_nphy_tables_init(struct b43_wldev *dev) { if (dev->phy.rev < 3) b43_nphy_rev0_1_2_tables_init(dev); else b43_nphy_rev3plus_tables_init(dev); } static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble) { u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG); mimocfg |= B43_NPHY_MIMOCFG_AUTO; if (preamble == 1) mimocfg |= B43_NPHY_MIMOCFG_GFMIX; else mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX; b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg); } static void b43_nphy_bphy_init(struct b43_wldev *dev) { unsigned int i; u16 val; val = 0x1E1F; for (i = 0; i < 16; i++) { b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val); val -= 0x202; } val = 0x3E3F; for (i = 0; i < 16; i++) { b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val); val -= 0x202; } b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); } static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init) { if (dev->phy.rev >= 3) { if (!init) return; if (0 ) { b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211); b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222); b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144); b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188); } } else { b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0); b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0); switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, 0xFC00, 0xFC00); break; #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco, 0xFC00, 0xFC00); break; #endif } b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_GPOUTSMSK, 0); b43_maskset16(dev, B43_MMIO_GPIO_MASK, ~0, 0xFC00); b43_maskset16(dev, B43_MMIO_GPIO_CONTROL, (~0xFC00 & 0xFFFF), 0); if (init) { b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); } } } int b43_phy_initn(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; u8 tx_pwr_state; struct nphy_txgains target; u16 tmp; enum ieee80211_band tmp2; bool do_rssi_cal; u16 clip[2]; bool do_cal = false; if ((dev->phy.rev >= 3) && (sprom->boardflags_lo & B43_BFL_EXTLNA) && (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) { switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: bcma_cc_set32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_CHIPCTL, 0x40); break; #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: chipco_set32(&dev->dev->sdev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40); break; #endif } } nphy->deaf_count = 0; b43_nphy_tables_init(dev); nphy->crsminpwr_adjusted = false; nphy->noisevars_adjusted = false; if (dev->phy.rev >= 3) { b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, 0); b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0); b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, 0); b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, 0); } else { b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0); } b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0); b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0); if (dev->phy.rev < 6) { b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0); b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0); } b43_phy_mask(dev, B43_NPHY_RFSEQMODE, ~(B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER)); if (dev->phy.rev >= 3) b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, 0); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0); if (dev->phy.rev <= 2) { tmp = (dev->phy.rev == 2) ? 0x3B : 0x40; b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~B43_NPHY_BPHY_CTL3_SCALE, tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT); } b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20); b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20); if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD || (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE && dev->dev->board_type == 0x8B)) b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0); else b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8); b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 0xC8); b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x50); b43_phy_write(dev, B43_NPHY_TXRIFS_FRDEL, 0x30); b43_nphy_update_mimo_config(dev, nphy->preamble_override); b43_nphy_update_txrx_chain(dev); if (phy->rev < 2) { b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8); b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4); } tmp2 = b43_current_band(dev->wl); if (b43_nphy_ipa(dev)) { b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1); b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F, nphy->papd_epsilon_offset[0] << 7); b43_phy_set(dev, B43_NPHY_PAPD_EN1, 0x1); b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ1, 0x007F, nphy->papd_epsilon_offset[1] << 7); b43_nphy_int_pa_set_tx_dig_filters(dev); } else if (phy->rev >= 5) { b43_nphy_ext_pa_set_tx_dig_filters(dev); } b43_nphy_workarounds(dev); b43_phy_force_clock(dev, 1); tmp = b43_phy_read(dev, B43_NPHY_BBCFG); b43_phy_write(dev, B43_NPHY_BBCFG, tmp | B43_NPHY_BBCFG_RSTCCA); b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA); b43_phy_force_clock(dev, 0); b43_mac_phy_clock_set(dev, true); b43_nphy_pa_override(dev, false); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); b43_nphy_pa_override(dev, true); b43_nphy_classifier(dev, 0, 0); b43_nphy_read_clip_detection(dev, clip); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) b43_nphy_bphy_init(dev); tx_pwr_state = nphy->txpwrctrl; b43_nphy_tx_power_ctrl(dev, false); b43_nphy_tx_power_fix(dev); b43_nphy_tx_power_ctl_idle_tssi(dev); b43_nphy_tx_power_ctl_setup(dev); b43_nphy_tx_gain_table_upload(dev); if (nphy->phyrxchain != 3) b43_nphy_set_rx_core_state(dev, nphy->phyrxchain); if (nphy->mphase_cal_phase_id > 0) ; do_rssi_cal = false; if (phy->rev >= 3) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) do_rssi_cal = !nphy->rssical_chanspec_2G.center_freq; else do_rssi_cal = !nphy->rssical_chanspec_5G.center_freq; if (do_rssi_cal) b43_nphy_rssi_cal(dev); else b43_nphy_restore_rssi_cal(dev); } else { b43_nphy_rssi_cal(dev); } if (!((nphy->measure_hold & 0x6) != 0)) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) do_cal = !nphy->iqcal_chanspec_2G.center_freq; else do_cal = !nphy->iqcal_chanspec_5G.center_freq; if (nphy->mute) do_cal = false; if (do_cal) { target = b43_nphy_get_tx_gains(dev); if (nphy->antsel_type == 2) b43_nphy_superswitch_init(dev, true); if (nphy->perical != 2) { b43_nphy_rssi_cal(dev); if (phy->rev >= 3) { nphy->cal_orig_pwr_idx[0] = nphy->txpwrindex[0].index_internal; nphy->cal_orig_pwr_idx[1] = nphy->txpwrindex[1].index_internal; target = b43_nphy_get_tx_gains(dev); } if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0) b43_nphy_save_cal(dev); } else if (nphy->mphase_cal_phase_id == 0) ; } else { b43_nphy_restore_cal(dev); } } b43_nphy_tx_pwr_ctrl_coef_setup(dev); b43_nphy_tx_power_ctrl(dev, tx_pwr_state); b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015); b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320); if (phy->rev >= 3 && phy->rev <= 6) b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014); b43_nphy_tx_lp_fbw(dev); if (phy->rev >= 3) b43_nphy_spur_workaround(dev); return 0; } static void b43_chantab_phy_upload(struct b43_wldev *dev, const struct b43_phy_n_sfo_cfg *e) { b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a); b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2); b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3); b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4); b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5); b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6); } static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) { struct bcma_drv_cc __maybe_unused *cc; u32 __maybe_unused pmu_ctl; switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: cc = &dev->dev->bdev->bus->drv_cc; if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) { if (avoid) { bcma_chipco_pll_write(cc, 0x0, 0x11500010); bcma_chipco_pll_write(cc, 0x1, 0x000C0C06); bcma_chipco_pll_write(cc, 0x2, 0x0F600a08); bcma_chipco_pll_write(cc, 0x3, 0x00000000); bcma_chipco_pll_write(cc, 0x4, 0x2001E920); bcma_chipco_pll_write(cc, 0x5, 0x88888815); } else { bcma_chipco_pll_write(cc, 0x0, 0x11100010); bcma_chipco_pll_write(cc, 0x1, 0x000c0c06); bcma_chipco_pll_write(cc, 0x2, 0x03000a08); bcma_chipco_pll_write(cc, 0x3, 0x00000000); bcma_chipco_pll_write(cc, 0x4, 0x200005c0); bcma_chipco_pll_write(cc, 0x5, 0x88888815); } pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD; } else if (dev->dev->chip_id == 0x4716) { if (avoid) { bcma_chipco_pll_write(cc, 0x0, 0x11500060); bcma_chipco_pll_write(cc, 0x1, 0x080C0C06); bcma_chipco_pll_write(cc, 0x2, 0x0F600000); bcma_chipco_pll_write(cc, 0x3, 0x00000000); bcma_chipco_pll_write(cc, 0x4, 0x2001E924); bcma_chipco_pll_write(cc, 0x5, 0x88888815); } else { bcma_chipco_pll_write(cc, 0x0, 0x11100060); bcma_chipco_pll_write(cc, 0x1, 0x080c0c06); bcma_chipco_pll_write(cc, 0x2, 0x03000000); bcma_chipco_pll_write(cc, 0x3, 0x00000000); bcma_chipco_pll_write(cc, 0x4, 0x200005c0); bcma_chipco_pll_write(cc, 0x5, 0x88888815); } pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD | BCMA_CC_PMU_CTL_NOILPONW; } else if (dev->dev->chip_id == 0x4322 || dev->dev->chip_id == 0x4340 || dev->dev->chip_id == 0x4341) { bcma_chipco_pll_write(cc, 0x0, 0x11100070); bcma_chipco_pll_write(cc, 0x1, 0x1014140a); bcma_chipco_pll_write(cc, 0x5, 0x88888854); if (avoid) bcma_chipco_pll_write(cc, 0x2, 0x05201828); else bcma_chipco_pll_write(cc, 0x2, 0x05001828); pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD; } else { return; } bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl); break; #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: break; #endif } } static void b43_nphy_channel_setup(struct b43_wldev *dev, const struct b43_phy_n_sfo_cfg *e, struct ieee80211_channel *new_channel) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = dev->phy.n; int ch = new_channel->hw_value; u16 old_band_5ghz; u32 tmp32; old_band_5ghz = b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ; if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) { tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000); b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) { b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF); b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); } b43_chantab_phy_upload(dev, e); if (new_channel->hw_value == 14) { b43_nphy_classifier(dev, 2, 0); b43_phy_set(dev, B43_PHY_B_TEST, 0x0800); } else { b43_nphy_classifier(dev, 2, 2); if (new_channel->band == IEEE80211_BAND_2GHZ) b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840); } if (!nphy->txpwrctrl) b43_nphy_tx_power_fix(dev); if (dev->phy.rev < 3) b43_nphy_adjust_lna_gain_table(dev); b43_nphy_tx_lp_fbw(dev); if (dev->phy.rev >= 3 && dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) { bool avoid = false; if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) { avoid = true; } else if (!b43_channel_type_is_40mhz(phy->channel_type)) { if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14) avoid = true; } else { if (nphy->aband_spurwar_en && (ch == 38 || ch == 102 || ch == 118)) avoid = dev->dev->chip_id == 0x4716; } b43_nphy_pmu_spur_avoid(dev, avoid); if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) { b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, avoid ? 0x5341 : 0x8889); b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8); } if (dev->phy.rev == 3 || dev->phy.rev == 4) ; if (avoid) b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX); else b43_phy_mask(dev, B43_NPHY_BBCFG, ~B43_NPHY_BBCFG_RSTRX & 0xFFFF); b43_nphy_reset_cca(dev); } b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830); if (phy->rev >= 3) b43_nphy_spur_workaround(dev); } static int b43_nphy_set_channel(struct b43_wldev *dev, struct ieee80211_channel *channel, enum nl80211_channel_type channel_type) { struct b43_phy *phy = &dev->phy; const struct b43_nphy_channeltab_entry_rev2 *tabent_r2 = NULL; const struct b43_nphy_channeltab_entry_rev3 *tabent_r3 = NULL; u8 tmp; if (dev->phy.rev >= 3) { tabent_r3 = b43_nphy_get_chantabent_rev3(dev, channel->center_freq); if (!tabent_r3) return -ESRCH; } else { tabent_r2 = b43_nphy_get_chantabent_rev2(dev, channel->hw_value); if (!tabent_r2) return -ESRCH; } phy->channel = channel->hw_value; phy->channel_freq = channel->center_freq; if (b43_channel_type_is_40mhz(phy->channel_type) != b43_channel_type_is_40mhz(channel_type)) ; if (channel_type == NL80211_CHAN_HT40PLUS) b43_phy_set(dev, B43_NPHY_RXCTL, B43_NPHY_RXCTL_BSELU20); else if (channel_type == NL80211_CHAN_HT40MINUS) b43_phy_mask(dev, B43_NPHY_RXCTL, ~B43_NPHY_RXCTL_BSELU20); if (dev->phy.rev >= 3) { tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0; b43_radio_maskset(dev, 0x08, 0xFFFB, tmp); b43_radio_2056_setup(dev, tabent_r3); b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel); } else { tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050; b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp); b43_radio_2055_setup(dev, tabent_r2); b43_nphy_channel_setup(dev, &(tabent_r2->phy_regs), channel); } return 0; } static int b43_nphy_op_allocate(struct b43_wldev *dev) { struct b43_phy_n *nphy; nphy = kzalloc(sizeof(*nphy), GFP_KERNEL); if (!nphy) return -ENOMEM; dev->phy.n = nphy; return 0; } static void b43_nphy_op_prepare_structs(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; struct ssb_sprom *sprom = dev->dev->bus_sprom; memset(nphy, 0, sizeof(*nphy)); nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4); nphy->spur_avoid = (phy->rev >= 3) ? B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE; nphy->gain_boost = true; nphy->txrx_chain = 2; nphy->phyrxchain = 3; nphy->perical = 2; nphy->tx_pwr_idx[0] = 128; nphy->tx_pwr_idx[1] = 128; nphy->txpwrctrl = false; nphy->pwg_gain_5ghz = false; if (dev->phy.rev >= 3 || (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE && (dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) { nphy->txpwrctrl = true; nphy->pwg_gain_5ghz = true; } else if (sprom->revision >= 4) { if (dev->phy.rev >= 2 && (sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) { nphy->txpwrctrl = true; #ifdef CONFIG_B43_SSB if (dev->dev->bus_type == B43_BUS_SSB && dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) { struct pci_dev *pdev = dev->dev->sdev->bus->host_pci; if (pdev->device == 0x4328 || pdev->device == 0x432a) nphy->pwg_gain_5ghz = true; } #endif } else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) { nphy->pwg_gain_5ghz = true; } } if (dev->phy.rev >= 3) { nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2; nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2; } } static void b43_nphy_op_free(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; kfree(nphy); phy->n = NULL; } static int b43_nphy_op_init(struct b43_wldev *dev) { return b43_phy_initn(dev); } static inline void check_phyreg(struct b43_wldev *dev, u16 offset) { #if B43_DEBUG if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) { b43err(dev->wl, "Invalid OFDM PHY access at " "0x%04X on N-PHY\n", offset); dump_stack(); } if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) { b43err(dev->wl, "Invalid EXT-G PHY access at " "0x%04X on N-PHY\n", offset); dump_stack(); } #endif } static u16 b43_nphy_op_read(struct b43_wldev *dev, u16 reg) { check_phyreg(dev, reg); b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); return b43_read16(dev, B43_MMIO_PHY_DATA); } static void b43_nphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) { check_phyreg(dev, reg); b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, value); } static void b43_nphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { check_phyreg(dev, reg); b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); b43_maskset16(dev, B43_MMIO_PHY_DATA, mask, set); } static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg) { B43_WARN_ON(reg == 1); reg |= 0x100; b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); } static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { B43_WARN_ON(reg == 1); b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, bool blocked) { if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED) b43err(dev->wl, "MAC not suspended\n"); if (blocked) { b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU); if (dev->phy.rev >= 3) { b43_radio_mask(dev, 0x09, ~0x2); b43_radio_write(dev, 0x204D, 0); b43_radio_write(dev, 0x2053, 0); b43_radio_write(dev, 0x2058, 0); b43_radio_write(dev, 0x205E, 0); b43_radio_mask(dev, 0x2062, ~0xF0); b43_radio_write(dev, 0x2064, 0); b43_radio_write(dev, 0x304D, 0); b43_radio_write(dev, 0x3053, 0); b43_radio_write(dev, 0x3058, 0); b43_radio_write(dev, 0x305E, 0); b43_radio_mask(dev, 0x3062, ~0xF0); b43_radio_write(dev, 0x3064, 0); } } else { if (dev->phy.rev >= 3) { b43_radio_init2056(dev); b43_switch_channel(dev, dev->phy.channel); } else { b43_radio_init2055(dev); } } } static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on) { u16 override = on ? 0x0 : 0x7FFF; u16 core = on ? 0xD : 0x00FD; if (dev->phy.rev >= 3) { if (on) { b43_phy_write(dev, B43_NPHY_AFECTL_C1, core); b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, override); b43_phy_write(dev, B43_NPHY_AFECTL_C2, core); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, override); } else { b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, override); b43_phy_write(dev, B43_NPHY_AFECTL_C1, core); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, override); b43_phy_write(dev, B43_NPHY_AFECTL_C2, core); } } else { b43_phy_write(dev, B43_NPHY_AFECTL_OVER, override); } } static int b43_nphy_op_switch_channel(struct b43_wldev *dev, unsigned int new_channel) { struct ieee80211_channel *channel = dev->wl->hw->conf.channel; enum nl80211_channel_type channel_type = dev->wl->hw->conf.channel_type; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { if ((new_channel < 1) || (new_channel > 14)) return -EINVAL; } else { if (new_channel > 200) return -EINVAL; } return b43_nphy_set_channel(dev, channel, channel_type); } static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) return 1; return 36; } const struct b43_phy_operations b43_phyops_n = { .allocate = b43_nphy_op_allocate, .free = b43_nphy_op_free, .prepare_structs = b43_nphy_op_prepare_structs, .init = b43_nphy_op_init, .phy_read = b43_nphy_op_read, .phy_write = b43_nphy_op_write, .phy_maskset = b43_nphy_op_maskset, .radio_read = b43_nphy_op_radio_read, .radio_write = b43_nphy_op_radio_write, .software_rfkill = b43_nphy_op_software_rfkill, .switch_analog = b43_nphy_op_switch_analog, .switch_channel = b43_nphy_op_switch_channel, .get_default_chan = b43_nphy_op_get_default_chan, .recalc_txpower = b43_nphy_op_recalc_txpower, .adjust_txpower = b43_nphy_op_adjust_txpower, };
gpl-2.0
janbobo/ppsspp
GPU/Common/TransformCommon.cpp
34
5655
// Copyright (c) 2013- PPSSPP Project. // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, version 2.0 or later versions. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License 2.0 for more details. // A copy of the GPL 2.0 should have been included with the program. // If not, see http://www.gnu.org/licenses/ // Official git repository and contact information can be found at // https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/. #include <stdio.h> #include "GPU/GPUState.h" #include "GPU/Common/TransformCommon.h" // Check for max first as clamping to max is more common than min when lighting. inline float clamp(float in, float min, float max) { return in > max ? max : (in < min ? min : in); } Lighter::Lighter(int vertType) { if (!gstate.isLightingEnabled()) return; doShadeMapping_ = gstate.getUVGenMode() == GE_TEXMAP_ENVIRONMENT_MAP; materialEmissive.GetFromRGB(gstate.materialemissive); materialEmissive.a = 0.0f; globalAmbient.GetFromRGB(gstate.ambientcolor); globalAmbient.GetFromA(gstate.ambientalpha); materialAmbient.GetFromRGB(gstate.materialambient); materialAmbient.GetFromA(gstate.materialalpha); materialDiffuse.GetFromRGB(gstate.materialdiffuse); materialDiffuse.a = 1.0f; materialSpecular.GetFromRGB(gstate.materialspecular); materialSpecular.a = 1.0f; specCoef_ = getFloat24(gstate.materialspecularcoef); // viewer_ = Vec3f(-gstate.viewMatrix[9], -gstate.viewMatrix[10], -gstate.viewMatrix[11]); bool hasColor = (vertType & GE_VTYPE_COL_MASK) != 0; materialUpdate_ = hasColor ? (gstate.materialupdate & 7) : 0; for (int l = 0; l < 4; l++) { lcutoff[l] = getFloat24(gstate.lcutoff[l]); lconv[l] = getFloat24(gstate.lconv[l]); int i = l * 3; if (gstate.isLightChanEnabled(l)) { lpos[i] = getFloat24(gstate.lpos[i]); lpos[i + 1] = getFloat24(gstate.lpos[i + 1]); lpos[i + 2] = getFloat24(gstate.lpos[i + 2]); ldir[i] = getFloat24(gstate.ldir[i]); ldir[i + 1] = getFloat24(gstate.ldir[i + 1]); ldir[i + 2] = getFloat24(gstate.ldir[i + 2]); latt[i] = getFloat24(gstate.latt[i]); latt[i + 1] = getFloat24(gstate.latt[i + 1]); latt[i + 2] = getFloat24(gstate.latt[i + 2]); for (int t = 0; t < 3; t++) { u32 data = gstate.lcolor[l * 3 + t] & 0xFFFFFF; float r = (float)(data & 0xff) * (1.0f / 255.0f); float g = (float)((data >> 8) & 0xff) * (1.0f / 255.0f); float b = (float)(data >> 16) * (1.0f / 255.0f); lcolor[t][l][0] = r; lcolor[t][l][1] = g; lcolor[t][l][2] = b; } } } } void Lighter::Light(float colorOut0[4], float colorOut1[4], const float colorIn[4], const Vec3f &pos, const Vec3f &norm) { Color4 in(colorIn); const Color4 *ambient; if (materialUpdate_ & 1) ambient = &in; else ambient = &materialAmbient; const Color4 *diffuse; if (materialUpdate_ & 2) diffuse = &in; else diffuse = &materialDiffuse; const Color4 *specular; if (materialUpdate_ & 4) specular = &in; else specular = &materialSpecular; Color4 lightSum0 = globalAmbient * *ambient + materialEmissive; Color4 lightSum1(0, 0, 0, 0); for (int l = 0; l < 4; l++) { // can we skip this light? if (!gstate.isLightChanEnabled(l)) continue; GELightType type = gstate.getLightType(l); Vec3f toLight(0, 0, 0); Vec3f lightDir(0, 0, 0); if (type == GE_LIGHTTYPE_DIRECTIONAL) toLight = Vec3f(&lpos[l * 3]); // lightdir is for spotlights else toLight = Vec3f(&lpos[l * 3]) - pos; bool doSpecular = gstate.isUsingSpecularLight(l); bool poweredDiffuse = gstate.isUsingPoweredDiffuseLight(l); float distanceToLight = toLight.Length(); float dot = 0.0f; float angle = 0.0f; float lightScale = 0.0f; if (distanceToLight > 0.0f) { toLight /= distanceToLight; dot = Dot(toLight, norm); } // Clamp dot to zero. if (dot < 0.0f) dot = 0.0f; if (poweredDiffuse) dot = powf(dot, specCoef_); // Attenuation switch (type) { case GE_LIGHTTYPE_DIRECTIONAL: lightScale = 1.0f; break; case GE_LIGHTTYPE_POINT: lightScale = clamp(1.0f / (latt[l * 3] + latt[l * 3 + 1] * distanceToLight + latt[l * 3 + 2] * distanceToLight*distanceToLight), 0.0f, 1.0f); break; case GE_LIGHTTYPE_SPOT: case GE_LIGHTTYPE_UNKNOWN: lightDir = Vec3f(&ldir[l * 3]); angle = Dot(toLight.Normalized(), lightDir.Normalized()); if (angle >= lcutoff[l]) lightScale = clamp(1.0f / (latt[l * 3] + latt[l * 3 + 1] * distanceToLight + latt[l * 3 + 2] * distanceToLight*distanceToLight), 0.0f, 1.0f) * powf(angle, lconv[l]); break; default: // ILLEGAL break; } Color4 lightDiff(lcolor[1][l], 0.0f); Color4 diff = (lightDiff * *diffuse) * dot; // Real PSP specular Vec3f toViewer(0, 0, 1); // Better specular // Vec3f toViewer = (viewer - pos).Normalized(); if (doSpecular) { Vec3f halfVec = (toLight + toViewer); halfVec.Normalize(); dot = Dot(halfVec, norm); if (dot > 0.0f) { Color4 lightSpec(lcolor[2][l], 0.0f); lightSum1 += (lightSpec * *specular * (powf(dot, specCoef_) * lightScale)); } } if (gstate.isLightChanEnabled(l)) { Color4 lightAmbient(lcolor[0][l], 0.0f); lightSum0 += (lightAmbient * *ambient + diff) * lightScale; } } // The colors must eventually be clamped, but we expect the caller to do that. for (int i = 0; i < 4; i++) { colorOut0[i] = lightSum0[i]; colorOut1[i] = lightSum1[i]; } }
gpl-2.0
GuneetAtwal/kernel_m8
fs/hpfs/dnode.c
34
29764
#include "hpfs_fn.h" static loff_t get_pos(struct dnode *d, struct hpfs_dirent *fde) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); int i = 1; for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { if (de == fde) return ((loff_t) le32_to_cpu(d->self) << 4) | (loff_t)i; i++; } printk("HPFS: get_pos: not_found\n"); return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1; } void hpfs_add_pos(struct inode *inode, loff_t *pos) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); int i = 0; loff_t **ppos; if (hpfs_inode->i_rddir_off) for (; hpfs_inode->i_rddir_off[i]; i++) if (hpfs_inode->i_rddir_off[i] == pos) return; if (!(i&0x0f)) { if (!(ppos = kmalloc((i+0x11) * sizeof(loff_t*), GFP_NOFS))) { printk("HPFS: out of memory for position list\n"); return; } if (hpfs_inode->i_rddir_off) { memcpy(ppos, hpfs_inode->i_rddir_off, i * sizeof(loff_t)); kfree(hpfs_inode->i_rddir_off); } hpfs_inode->i_rddir_off = ppos; } hpfs_inode->i_rddir_off[i] = pos; hpfs_inode->i_rddir_off[i + 1] = NULL; } void hpfs_del_pos(struct inode *inode, loff_t *pos) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); loff_t **i, **j; if (!hpfs_inode->i_rddir_off) goto not_f; for (i = hpfs_inode->i_rddir_off; *i; i++) if (*i == pos) goto fnd; goto not_f; fnd: for (j = i + 1; *j; j++) ; *i = *(j - 1); *(j - 1) = NULL; if (j - 1 == hpfs_inode->i_rddir_off) { kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } return; not_f: return; } static void for_all_poss(struct inode *inode, void (*f)(loff_t *, loff_t, loff_t), loff_t p1, loff_t p2) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); loff_t **i; if (!hpfs_inode->i_rddir_off) return; for (i = hpfs_inode->i_rddir_off; *i; i++) (*f)(*i, p1, p2); return; } static void hpfs_pos_subst(loff_t *p, loff_t f, loff_t t) { if (*p == f) *p = t; } static void hpfs_pos_ins(loff_t *p, loff_t d, loff_t c) { if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) { int n = (*p & 0x3f) + c; if (n > 0x3f) printk("HPFS: hpfs_pos_ins: %08x + %d\n", (int)*p, (int)c >> 8); else *p = (*p & ~0x3f) | n; } } static void hpfs_pos_del(loff_t *p, loff_t d, loff_t c) { if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) { int n = (*p & 0x3f) - c; if (n < 1) printk("HPFS: hpfs_pos_ins: %08x - %d\n", (int)*p, (int)c >> 8); else *p = (*p & ~0x3f) | n; } } static struct hpfs_dirent *dnode_pre_last_de(struct dnode *d) { struct hpfs_dirent *de, *de_end, *dee = NULL, *deee = NULL; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { deee = dee; dee = de; } return deee; } static struct hpfs_dirent *dnode_last_de(struct dnode *d) { struct hpfs_dirent *de, *de_end, *dee = NULL; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { dee = de; } return dee; } static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno ptr) { struct hpfs_dirent *de; if (!(de = dnode_last_de(d))) { hpfs_error(s, "set_last_pointer: empty dnode %08x", le32_to_cpu(d->self)); return; } if (hpfs_sb(s)->sb_chk) { if (de->down) { hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x", le32_to_cpu(d->self), de_down_pointer(de)); return; } if (le16_to_cpu(de->length) != 32) { hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", le32_to_cpu(d->self)); return; } } if (ptr) { d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + 4); if (le32_to_cpu(d->first_free) > 2048) { hpfs_error(s, "set_last_pointer: too long dnode %08x", le32_to_cpu(d->self)); d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - 4); return; } de->length = cpu_to_le16(36); de->down = 1; *(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr); } } struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, const unsigned char *name, unsigned namelen, secno down_ptr) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); unsigned d_size = de_size(namelen, down_ptr); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last); if (!c) { hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, le32_to_cpu(d->self)); return NULL; } if (c < 0) break; } memmove((char *)de + d_size, de, (char *)de_end - (char *)de); memset(de, 0, d_size); if (down_ptr) { *(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr); de->down = 1; } de->length = cpu_to_le16(d_size); de->not_8x3 = hpfs_is_name_long(name, namelen); de->namelen = namelen; memcpy(de->name, name, namelen); d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + d_size); return de; } static void hpfs_delete_de(struct super_block *s, struct dnode *d, struct hpfs_dirent *de) { if (de->last) { hpfs_error(s, "attempt to delete last dirent in dnode %08x", le32_to_cpu(d->self)); return; } d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - le16_to_cpu(de->length)); memmove(de, de_next_de(de), le32_to_cpu(d->first_free) + (char *)d - (char *)de); } static void fix_up_ptrs(struct super_block *s, struct dnode *d) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); dnode_secno dno = le32_to_cpu(d->self); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) if (de->down) { struct quad_buffer_head qbh; struct dnode *dd; if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) { if (le32_to_cpu(dd->up) != dno || dd->root_dnode) { dd->up = cpu_to_le32(dno); dd->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); } hpfs_brelse4(&qbh); } } } static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de, dnode_secno down_ptr) { struct quad_buffer_head qbh, qbh1, qbh2; struct dnode *d, *ad, *rd, *nd = NULL; dnode_secno adno, rdno; struct hpfs_dirent *de; struct hpfs_dirent nde; unsigned char *nname; int h; int pos; struct buffer_head *bh; struct fnode *fnode; int c1, c2 = 0; if (!(nname = kmalloc(256, GFP_NOFS))) { printk("HPFS: out of memory, can't add to dnode\n"); return 1; } go_up: if (namelen >= 256) { hpfs_error(i->i_sb, "hpfs_add_to_dnode: namelen == %d", namelen); kfree(nd); kfree(nname); return 1; } if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) { kfree(nd); kfree(nname); return 1; } go_up_a: if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_to_dnode")) { hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 1; } if (le32_to_cpu(d->first_free) + de_size(namelen, down_ptr) <= 2048) { loff_t t; copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de); t = get_pos(d, de); for_all_poss(i, hpfs_pos_ins, t, 1); for_all_poss(i, hpfs_pos_subst, 4, t); for_all_poss(i, hpfs_pos_subst, 5, t + 1); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 0; } if (!nd) if (!(nd = kmalloc(0x924, GFP_NOFS))) { printk("HPFS: out of memory for dnode splitting\n"); hpfs_brelse4(&qbh); kfree(nname); return 1; } memcpy(nd, d, le32_to_cpu(d->first_free)); copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de); for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1); h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10; if (!(ad = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &adno, &qbh1))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 1; } i->i_size += 2048; i->i_blocks += 4; pos = 1; for (de = dnode_first_de(nd); (char *)de_next_de(de) - (char *)nd < h; de = de_next_de(de)) { copy_de(hpfs_add_de(i->i_sb, ad, de->name, de->namelen, de->down ? de_down_pointer(de) : 0), de); for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, ((loff_t)adno << 4) | pos); pos++; } copy_de(new_de = &nde, de); memcpy(nname, de->name, de->namelen); name = nname; namelen = de->namelen; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4); down_ptr = adno; set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); de = de_next_de(de); memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de); nd->first_free = cpu_to_le32(le32_to_cpu(nd->first_free) - ((char *)de - (char *)nd - 20)); memcpy(d, nd, le32_to_cpu(nd->first_free)); for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos); fix_up_ptrs(i->i_sb, ad); if (!d->root_dnode) { ad->up = d->up; dno = le32_to_cpu(ad->up); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); goto go_up; } if (!(rd = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &rdno, &qbh2))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); kfree(nd); kfree(nname); return 1; } i->i_size += 2048; i->i_blocks += 4; rd->root_dnode = 1; rd->up = d->up; if (!(fnode = hpfs_map_fnode(i->i_sb, le32_to_cpu(d->up), &bh))) { hpfs_free_dnode(i->i_sb, rdno); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); hpfs_brelse4(&qbh2); kfree(nd); kfree(nname); return 1; } fnode->u.external[0].disk_secno = cpu_to_le32(rdno); mark_buffer_dirty(bh); brelse(bh); hpfs_i(i)->i_dno = rdno; d->up = ad->up = cpu_to_le32(rdno); d->root_dnode = ad->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); qbh = qbh2; set_last_pointer(i->i_sb, rd, dno); dno = rdno; d = rd; goto go_up_a; } int hpfs_add_dirent(struct inode *i, const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct dnode *d; struct hpfs_dirent *de, *de_end; struct quad_buffer_head qbh; dnode_secno dno; int c; int c1, c2 = 0; dno = hpfs_inode->i_dno; down: if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_dirent")) return 1; if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 1; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { if (!(c = hpfs_compare_names(i->i_sb, name, namelen, de->name, de->namelen, de->last))) { hpfs_brelse4(&qbh); return -1; } if (c < 0) { if (de->down) { dno = de_down_pointer(de); hpfs_brelse4(&qbh); goto down; } break; } } hpfs_brelse4(&qbh); if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) { c = 1; goto ret; } i->i_version++; c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); ret: return c; } static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) { dnode_secno dno, ddno; dnode_secno chk_up = to; struct dnode *dnode; struct quad_buffer_head qbh; struct hpfs_dirent *de, *nde; int a; loff_t t; int c1, c2 = 0; dno = from; while (1) { if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "move_to_top")) return 0; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0; if (hpfs_sb(i->i_sb)->sb_chk) { if (le32_to_cpu(dnode->up) != chk_up) { hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x", dno, chk_up, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh); return 0; } chk_up = dno; } if (!(de = dnode_last_de(dnode))) { hpfs_error(i->i_sb, "move_to_top: dnode %08x has no last de", dno); hpfs_brelse4(&qbh); return 0; } if (!de->down) break; dno = de_down_pointer(de); hpfs_brelse4(&qbh); } while (!(de = dnode_pre_last_de(dnode))) { dnode_secno up = le32_to_cpu(dnode->up); hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; i->i_blocks -= 4; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, 5); if (up == to) return to; if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return 0; if (dnode->root_dnode) { hpfs_error(i->i_sb, "move_to_top: got to root_dnode while moving from %08x to %08x", from, to); hpfs_brelse4(&qbh); return 0; } de = dnode_last_de(dnode); if (!de || !de->down) { hpfs_error(i->i_sb, "move_to_top: dnode %08x doesn't point down to %08x", up, dno); hpfs_brelse4(&qbh); return 0; } dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4); de->length = cpu_to_le16(le16_to_cpu(de->length) - 4); de->down = 0; hpfs_mark_4buffers_dirty(&qbh); dno = up; } t = get_pos(dnode, de); for_all_poss(i, hpfs_pos_subst, t, 4); for_all_poss(i, hpfs_pos_subst, t + 1, 5); if (!(nde = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted"); hpfs_brelse4(&qbh); return 0; } memcpy(nde, de, le16_to_cpu(de->length)); ddno = de->down ? de_down_pointer(de) : 0; hpfs_delete_de(i->i_sb, dnode, de); set_last_pointer(i->i_sb, dnode, ddno); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); a = hpfs_add_to_dnode(i, to, nde->name, nde->namelen, nde, from); kfree(nde); if (a) return 0; return dno; } static void delete_empty_dnode(struct inode *i, dnode_secno dno) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct quad_buffer_head qbh; struct dnode *dnode; dnode_secno down, up, ndown; int p; struct hpfs_dirent *de; int c1, c2 = 0; try_it_again: if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return; if (le32_to_cpu(dnode->first_free) > 56) goto end; if (le32_to_cpu(dnode->first_free) == 52 || le32_to_cpu(dnode->first_free) == 56) { struct hpfs_dirent *de_end; int root = dnode->root_dnode; up = le32_to_cpu(dnode->up); de = dnode_first_de(dnode); down = de->down ? de_down_pointer(de) : 0; if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) { hpfs_error(i->i_sb, "delete_empty_dnode: root dnode %08x is empty", dno); goto end; } hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; i->i_blocks -= 4; if (root) { struct fnode *fnode; struct buffer_head *bh; struct dnode *d1; struct quad_buffer_head qbh1; if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) { hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08lx", dno, up, (unsigned long)i->i_ino); return; } if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = cpu_to_le32(up); d1->root_dnode = 1; hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) { fnode->u.external[0].disk_secno = cpu_to_le32(down); mark_buffer_dirty(bh); brelse(bh); } hpfs_inode->i_dno = down; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, (loff_t) 12); return; } if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return; p = 1; de_end = dnode_end_de(dnode); for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de), p++) if (de->down) if (de_down_pointer(de) == dno) goto fnd; hpfs_error(i->i_sb, "delete_empty_dnode: pointer to dnode %08x not found in dnode %08x", dno, up); goto end; fnd: for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p); if (!down) { de->down = 0; de->length = cpu_to_le16(le16_to_cpu(de->length) - 4); dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4); memmove(de_next_de(de), (char *)de_next_de(de) + 4, (char *)dnode + le32_to_cpu(dnode->first_free) - (char *)de_next_de(de)); } else { struct dnode *d1; struct quad_buffer_head qbh1; *(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4) = down; if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = cpu_to_le32(up); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } } } else { hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, le32_to_cpu(dnode->first_free)); goto end; } if (!de->last) { struct hpfs_dirent *de_next = de_next_de(de); struct hpfs_dirent *de_cp; struct dnode *d1; struct quad_buffer_head qbh1; if (!de_next->down) goto endm; ndown = de_down_pointer(de_next); if (!(de_cp = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); goto endm; } memcpy(de_cp, de, le16_to_cpu(de->length)); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4); for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1); if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) { d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, de_cp->down ? de_down_pointer(de_cp) : 0); dno = up; kfree(de_cp); goto try_it_again; } else { struct hpfs_dirent *de_prev = dnode_pre_last_de(dnode); struct hpfs_dirent *de_cp; struct dnode *d1; struct quad_buffer_head qbh1; dnode_secno dlp; if (!de_prev) { hpfs_error(i->i_sb, "delete_empty_dnode: empty dnode %08x", up); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); dno = up; goto try_it_again; } if (!de_prev->down) goto endm; ndown = de_down_pointer(de_prev); if ((d1 = hpfs_map_dnode(i->i_sb, ndown, &qbh1))) { struct hpfs_dirent *del = dnode_last_de(d1); dlp = del->down ? de_down_pointer(del) : 0; if (!dlp && down) { if (le32_to_cpu(d1->first_free) > 2044) { if (hpfs_sb(i->i_sb)->sb_chk >= 2) { printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: terminating balancing operation\n"); } hpfs_brelse4(&qbh1); goto endm; } if (hpfs_sb(i->i_sb)->sb_chk >= 2) { printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: goin'on\n"); } del->length = cpu_to_le16(le16_to_cpu(del->length) + 4); del->down = 1; d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) + 4); } if (dlp && !down) { del->length = cpu_to_le16(le16_to_cpu(del->length) - 4); del->down = 0; d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4); } else if (down) *(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down); } else goto endm; if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); hpfs_brelse4(&qbh1); goto endm; } hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); memcpy(de_cp, de_prev, le16_to_cpu(de_prev->length)); hpfs_delete_de(i->i_sb, dnode, de_prev); if (!de_prev->down) { de_prev->length = cpu_to_le16(le16_to_cpu(de_prev->length) + 4); de_prev->down = 1; dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4); } *(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1)); if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) { d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, dlp); dno = up; kfree(de_cp); goto try_it_again; } endm: hpfs_mark_4buffers_dirty(&qbh); end: hpfs_brelse4(&qbh); } int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de, struct quad_buffer_head *qbh, int depth) { struct dnode *dnode = qbh->data; dnode_secno down = 0; loff_t t; if (de->first || de->last) { hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno); hpfs_brelse4(qbh); return 1; } if (de->down) down = de_down_pointer(de); if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) { if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) { hpfs_brelse4(qbh); return 2; } } i->i_version++; for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(qbh); hpfs_brelse4(qbh); if (down) { dnode_secno a = move_to_top(i, down, dno); for_all_poss(i, hpfs_pos_subst, 5, t); if (a) delete_empty_dnode(i, a); return !a; } delete_empty_dnode(i, dno); return 0; } void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes, int *n_subdirs, int *n_items) { struct dnode *dnode; struct quad_buffer_head qbh; struct hpfs_dirent *de; dnode_secno ptr, odno = 0; int c1, c2 = 0; int d1, d2 = 0; go_down: if (n_dnodes) (*n_dnodes)++; if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, dno, &c1, &c2, "hpfs_count_dnodes #1")) return; ptr = 0; go_up: if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && le32_to_cpu(dnode->up) != odno) hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, le32_to_cpu(dnode->up)); de = dnode_first_de(dnode); if (ptr) while(1) { if (de->down) if (de_down_pointer(de) == ptr) goto process_de; if (de->last) { hpfs_brelse4(&qbh); hpfs_error(s, "hpfs_count_dnodes: pointer to dnode %08x not found in dnode %08x, got here from %08x", ptr, dno, odno); return; } de = de_next_de(de); } next_de: if (de->down) { odno = dno; dno = de_down_pointer(de); hpfs_brelse4(&qbh); goto go_down; } process_de: if (!de->first && !de->last && de->directory && n_subdirs) (*n_subdirs)++; if (!de->first && !de->last && n_items) (*n_items)++; if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de; ptr = dno; dno = le32_to_cpu(dnode->up); if (dnode->root_dnode) { hpfs_brelse4(&qbh); return; } hpfs_brelse4(&qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ptr, &d1, &d2, "hpfs_count_dnodes #2")) return; odno = -1; goto go_up; } static struct hpfs_dirent *map_nth_dirent(struct super_block *s, dnode_secno dno, int n, struct quad_buffer_head *qbh, struct dnode **dn) { int i; struct hpfs_dirent *de, *de_end; struct dnode *dnode; dnode = hpfs_map_dnode(s, dno, qbh); if (!dnode) return NULL; if (dn) *dn=dnode; de = dnode_first_de(dnode); de_end = dnode_end_de(dnode); for (i = 1; de < de_end; i++, de = de_next_de(de)) { if (i == n) { return de; } if (de->last) break; } hpfs_brelse4(qbh); hpfs_error(s, "map_nth_dirent: n too high; dnode = %08x, requested %08x", dno, n); return NULL; } dnode_secno hpfs_de_as_down_as_possible(struct super_block *s, dnode_secno dno) { struct quad_buffer_head qbh; dnode_secno d = dno; dnode_secno up = 0; struct hpfs_dirent *de; int c1, c2 = 0; again: if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, d, &c1, &c2, "hpfs_de_as_down_as_possible")) return d; if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno; if (hpfs_sb(s)->sb_chk) if (up && le32_to_cpu(((struct dnode *)qbh.data)->up) != up) hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, le32_to_cpu(((struct dnode *)qbh.data)->up)); if (!de->down) { hpfs_brelse4(&qbh); return d; } up = d; d = de_down_pointer(de); hpfs_brelse4(&qbh); goto again; } struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, struct quad_buffer_head *qbh) { loff_t pos; unsigned c; dnode_secno dno; struct hpfs_dirent *de, *d; struct hpfs_dirent *up_de; struct hpfs_dirent *end_up_de; struct dnode *dnode; struct dnode *up_dnode; struct quad_buffer_head qbh0; pos = *posp; dno = pos >> 6 << 2; pos &= 077; if (!(de = map_nth_dirent(inode->i_sb, dno, pos, qbh, &dnode))) goto bail; if ((d = de_next_de(de)) < dnode_end_de(dnode)) { if (!(++*posp & 077)) { hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08llx", (unsigned long long)*posp); goto bail; } if (d->down) { *posp = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, de_down_pointer(d)) << 4) + 1; } return de; } if (dnode->root_dnode) goto bail; if (!(up_dnode = hpfs_map_dnode(inode->i_sb, le32_to_cpu(dnode->up), &qbh0))) goto bail; end_up_de = dnode_end_de(up_dnode); c = 0; for (up_de = dnode_first_de(up_dnode); up_de < end_up_de; up_de = de_next_de(up_de)) { if (!(++c & 077)) hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", le32_to_cpu(dnode->up)); if (up_de->down && de_down_pointer(up_de) == dno) { *posp = ((loff_t) le32_to_cpu(dnode->up) << 4) + c; hpfs_brelse4(&qbh0); return de; } } hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x", dno, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh0); bail: *posp = 12; return de; } struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, const unsigned char *name, unsigned len, dnode_secno *dd, struct quad_buffer_head *qbh) { struct dnode *dnode; struct hpfs_dirent *de; struct hpfs_dirent *de_end; int c1, c2 = 0; if (!S_ISDIR(inode->i_mode)) hpfs_error(inode->i_sb, "map_dirent: not a directory\n"); again: if (hpfs_sb(inode->i_sb)->sb_chk) if (hpfs_stop_cycles(inode->i_sb, dno, &c1, &c2, "map_dirent")) return NULL; if (!(dnode = hpfs_map_dnode(inode->i_sb, dno, qbh))) return NULL; de_end = dnode_end_de(dnode); for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de)) { int t = hpfs_compare_names(inode->i_sb, name, len, de->name, de->namelen, de->last); if (!t) { if (dd) *dd = dno; return de; } if (t < 0) { if (de->down) { dno = de_down_pointer(de); hpfs_brelse4(qbh); goto again; } break; } } hpfs_brelse4(qbh); return NULL; } void hpfs_remove_dtree(struct super_block *s, dnode_secno dno) { struct quad_buffer_head qbh; struct dnode *dnode; struct hpfs_dirent *de; dnode_secno d1, d2, rdno = dno; while (1) { if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; de = dnode_first_de(dnode); if (de->last) { if (de->down) d1 = de_down_pointer(de); else goto error; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); dno = d1; } else break; } if (!de->first) goto error; d1 = de->down ? de_down_pointer(de) : 0; de = de_next_de(de); if (!de->last) goto error; d2 = de->down ? de_down_pointer(de) : 0; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); do { while (d1) { if (!(dnode = hpfs_map_dnode(s, dno = d1, &qbh))) return; de = dnode_first_de(dnode); if (!de->last) goto error; d1 = de->down ? de_down_pointer(de) : 0; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); } d1 = d2; d2 = 0; } while (d1); return; error: hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); hpfs_error(s, "directory %08x is corrupted or not empty", rdno); } struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, struct fnode *f, struct quad_buffer_head *qbh) { unsigned char *name1; unsigned char *name2; int name1len, name2len; struct dnode *d; dnode_secno dno, downd; struct fnode *upf; struct buffer_head *bh; struct hpfs_dirent *de, *de_end; int c; int c1, c2 = 0; int d1, d2 = 0; name1 = f->name; if (!(name2 = kmalloc(256, GFP_NOFS))) { printk("HPFS: out of memory, can't map dirent\n"); return NULL; } if (f->len <= 15) memcpy(name2, name1, name1len = name2len = f->len); else { memcpy(name2, name1, 15); memset(name2 + 15, 0xff, 256 - 15); name1len = 15; name2len = 256; } if (!(upf = hpfs_map_fnode(s, le32_to_cpu(f->up), &bh))) { kfree(name2); return NULL; } if (!upf->dirflag) { brelse(bh); hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up)); kfree(name2); return NULL; } dno = le32_to_cpu(upf->u.external[0].disk_secno); brelse(bh); go_down: downd = 0; go_up: if (!(d = hpfs_map_dnode(s, dno, qbh))) { kfree(name2); return NULL; } de_end = dnode_end_de(d); de = dnode_first_de(d); if (downd) { while (de < de_end) { if (de->down) if (de_down_pointer(de) == downd) goto f; de = de_next_de(de); } hpfs_error(s, "pointer to dnode %08x not found in dnode %08x", downd, dno); hpfs_brelse4(qbh); kfree(name2); return NULL; } next_de: if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } c = hpfs_compare_names(s, name1, name1len, de->name, de->namelen, de->last); if (c < 0 && de->down) { dno = de_down_pointer(de); hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, dno, &c1, &c2, "map_fnode_dirent #1")) { kfree(name2); return NULL; } goto go_down; } f: if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } c = hpfs_compare_names(s, name2, name2len, de->name, de->namelen, de->last); if (c < 0 && !de->last) goto not_found; if ((de = de_next_de(de)) < de_end) goto next_de; if (d->root_dnode) goto not_found; downd = dno; dno = le32_to_cpu(d->up); hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) { kfree(name2); return NULL; } goto go_up; not_found: hpfs_brelse4(qbh); hpfs_error(s, "dirent for fnode %08x not found", fno); kfree(name2); return NULL; }
gpl-2.0
jfdsmabalot/kernel_sense_m8
drivers/misc/ibmasm/ibmasmfs.c
34
13547
/* * IBM ASM Service Processor Device Driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2004 * * Author: Max Asböck <amax@us.ibm.com> * */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/io.h> #include "ibmasm.h" #include "remote.h" #include "dot_command.h" #define IBMASMFS_MAGIC 0x66726f67 static LIST_HEAD(service_processors); static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode); static void ibmasmfs_create_files (struct super_block *sb); static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent); static struct dentry *ibmasmfs_mount(struct file_system_type *fst, int flags, const char *name, void *data) { return mount_single(fst, flags, data, ibmasmfs_fill_super); } static const struct super_operations ibmasmfs_s_ops = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, }; static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations; static struct file_system_type ibmasmfs_type = { .owner = THIS_MODULE, .name = "ibmasmfs", .mount = ibmasmfs_mount, .kill_sb = kill_litter_super, }; static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent) { struct inode *root; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = IBMASMFS_MAGIC; sb->s_op = &ibmasmfs_s_ops; sb->s_time_gran = 1; root = ibmasmfs_make_inode (sb, S_IFDIR | 0500); if (!root) return -ENOMEM; root->i_op = &simple_dir_inode_operations; root->i_fop = ibmasmfs_dir_ops; sb->s_root = d_make_root(root); if (!sb->s_root) return -ENOMEM; ibmasmfs_create_files(sb); return 0; } static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode) { struct inode *ret = new_inode(sb); if (ret) { ret->i_ino = get_next_ino(); ret->i_mode = mode; ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; } return ret; } static struct dentry *ibmasmfs_create_file (struct super_block *sb, struct dentry *parent, const char *name, const struct file_operations *fops, void *data, int mode) { struct dentry *dentry; struct inode *inode; dentry = d_alloc_name(parent, name); if (!dentry) return NULL; inode = ibmasmfs_make_inode(sb, S_IFREG | mode); if (!inode) { dput(dentry); return NULL; } inode->i_fop = fops; inode->i_private = data; d_add(dentry, inode); return dentry; } static struct dentry *ibmasmfs_create_dir (struct super_block *sb, struct dentry *parent, const char *name) { struct dentry *dentry; struct inode *inode; dentry = d_alloc_name(parent, name); if (!dentry) return NULL; inode = ibmasmfs_make_inode(sb, S_IFDIR | 0500); if (!inode) { dput(dentry); return NULL; } inode->i_op = &simple_dir_inode_operations; inode->i_fop = ibmasmfs_dir_ops; d_add(dentry, inode); return dentry; } int ibmasmfs_register(void) { return register_filesystem(&ibmasmfs_type); } void ibmasmfs_unregister(void) { unregister_filesystem(&ibmasmfs_type); } void ibmasmfs_add_sp(struct service_processor *sp) { list_add(&sp->node, &service_processors); } struct ibmasmfs_command_data { struct service_processor *sp; struct command *command; }; struct ibmasmfs_event_data { struct service_processor *sp; struct event_reader reader; int active; }; struct ibmasmfs_heartbeat_data { struct service_processor *sp; struct reverse_heartbeat heartbeat; int active; }; static int command_file_open(struct inode *inode, struct file *file) { struct ibmasmfs_command_data *command_data; if (!inode->i_private) return -ENODEV; command_data = kmalloc(sizeof(struct ibmasmfs_command_data), GFP_KERNEL); if (!command_data) return -ENOMEM; command_data->command = NULL; command_data->sp = inode->i_private; file->private_data = command_data; return 0; } static int command_file_close(struct inode *inode, struct file *file) { struct ibmasmfs_command_data *command_data = file->private_data; if (command_data->command) command_put(command_data->command); kfree(command_data); return 0; } static ssize_t command_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct ibmasmfs_command_data *command_data = file->private_data; struct command *cmd; int len; unsigned long flags; if (*offset < 0) return -EINVAL; if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE) return 0; if (*offset != 0) return 0; spin_lock_irqsave(&command_data->sp->lock, flags); cmd = command_data->command; if (cmd == NULL) { spin_unlock_irqrestore(&command_data->sp->lock, flags); return 0; } command_data->command = NULL; spin_unlock_irqrestore(&command_data->sp->lock, flags); if (cmd->status != IBMASM_CMD_COMPLETE) { command_put(cmd); return -EIO; } len = min(count, cmd->buffer_size); if (copy_to_user(buf, cmd->buffer, len)) { command_put(cmd); return -EFAULT; } command_put(cmd); return len; } static ssize_t command_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) { struct ibmasmfs_command_data *command_data = file->private_data; struct command *cmd; unsigned long flags; if (*offset < 0) return -EINVAL; if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE) return 0; if (*offset != 0) return 0; if (command_data->command) return -EAGAIN; cmd = ibmasm_new_command(command_data->sp, count); if (!cmd) return -ENOMEM; if (copy_from_user(cmd->buffer, ubuff, count)) { command_put(cmd); return -EFAULT; } spin_lock_irqsave(&command_data->sp->lock, flags); if (command_data->command) { spin_unlock_irqrestore(&command_data->sp->lock, flags); command_put(cmd); return -EAGAIN; } command_data->command = cmd; spin_unlock_irqrestore(&command_data->sp->lock, flags); ibmasm_exec_command(command_data->sp, cmd); ibmasm_wait_for_response(cmd, get_dot_command_timeout(cmd->buffer)); return count; } static int event_file_open(struct inode *inode, struct file *file) { struct ibmasmfs_event_data *event_data; struct service_processor *sp; if (!inode->i_private) return -ENODEV; sp = inode->i_private; event_data = kmalloc(sizeof(struct ibmasmfs_event_data), GFP_KERNEL); if (!event_data) return -ENOMEM; ibmasm_event_reader_register(sp, &event_data->reader); event_data->sp = sp; event_data->active = 0; file->private_data = event_data; return 0; } static int event_file_close(struct inode *inode, struct file *file) { struct ibmasmfs_event_data *event_data = file->private_data; ibmasm_event_reader_unregister(event_data->sp, &event_data->reader); kfree(event_data); return 0; } static ssize_t event_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct ibmasmfs_event_data *event_data = file->private_data; struct event_reader *reader = &event_data->reader; struct service_processor *sp = event_data->sp; int ret; unsigned long flags; if (*offset < 0) return -EINVAL; if (count == 0 || count > IBMASM_EVENT_MAX_SIZE) return 0; if (*offset != 0) return 0; spin_lock_irqsave(&sp->lock, flags); if (event_data->active) { spin_unlock_irqrestore(&sp->lock, flags); return -EBUSY; } event_data->active = 1; spin_unlock_irqrestore(&sp->lock, flags); ret = ibmasm_get_next_event(sp, reader); if (ret <= 0) goto out; if (count < reader->data_size) { ret = -EINVAL; goto out; } if (copy_to_user(buf, reader->data, reader->data_size)) { ret = -EFAULT; goto out; } ret = reader->data_size; out: event_data->active = 0; return ret; } static ssize_t event_file_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct ibmasmfs_event_data *event_data = file->private_data; if (*offset < 0) return -EINVAL; if (count != 1) return 0; if (*offset != 0) return 0; ibmasm_cancel_next_event(&event_data->reader); return 0; } static int r_heartbeat_file_open(struct inode *inode, struct file *file) { struct ibmasmfs_heartbeat_data *rhbeat; if (!inode->i_private) return -ENODEV; rhbeat = kmalloc(sizeof(struct ibmasmfs_heartbeat_data), GFP_KERNEL); if (!rhbeat) return -ENOMEM; rhbeat->sp = inode->i_private; rhbeat->active = 0; ibmasm_init_reverse_heartbeat(rhbeat->sp, &rhbeat->heartbeat); file->private_data = rhbeat; return 0; } static int r_heartbeat_file_close(struct inode *inode, struct file *file) { struct ibmasmfs_heartbeat_data *rhbeat = file->private_data; kfree(rhbeat); return 0; } static ssize_t r_heartbeat_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct ibmasmfs_heartbeat_data *rhbeat = file->private_data; unsigned long flags; int result; if (*offset < 0) return -EINVAL; if (count == 0 || count > 1024) return 0; if (*offset != 0) return 0; spin_lock_irqsave(&rhbeat->sp->lock, flags); if (rhbeat->active) { spin_unlock_irqrestore(&rhbeat->sp->lock, flags); return -EBUSY; } rhbeat->active = 1; spin_unlock_irqrestore(&rhbeat->sp->lock, flags); result = ibmasm_start_reverse_heartbeat(rhbeat->sp, &rhbeat->heartbeat); rhbeat->active = 0; return result; } static ssize_t r_heartbeat_file_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct ibmasmfs_heartbeat_data *rhbeat = file->private_data; if (*offset < 0) return -EINVAL; if (count != 1) return 0; if (*offset != 0) return 0; if (rhbeat->active) ibmasm_stop_reverse_heartbeat(&rhbeat->heartbeat); return 1; } static int remote_settings_file_close(struct inode *inode, struct file *file) { return 0; } static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { void __iomem *address = (void __iomem *)file->private_data; unsigned char *page; int retval; int len = 0; unsigned int value; if (*offset < 0) return -EINVAL; if (count == 0 || count > 1024) return 0; if (*offset != 0) return 0; page = (unsigned char *)__get_free_page(GFP_KERNEL); if (!page) return -ENOMEM; value = readl(address); len = sprintf(page, "%d\n", value); if (copy_to_user(buf, page, len)) { retval = -EFAULT; goto exit; } *offset += len; retval = len; exit: free_page((unsigned long)page); return retval; } static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) { void __iomem *address = (void __iomem *)file->private_data; char *buff; unsigned int value; if (*offset < 0) return -EINVAL; if (count == 0 || count > 1024) return 0; if (*offset != 0) return 0; buff = kzalloc (count + 1, GFP_KERNEL); if (!buff) return -ENOMEM; if (copy_from_user(buff, ubuff, count)) { kfree(buff); return -EFAULT; } value = simple_strtoul(buff, NULL, 10); writel(value, address); kfree(buff); return count; } static const struct file_operations command_fops = { .open = command_file_open, .release = command_file_close, .read = command_file_read, .write = command_file_write, .llseek = generic_file_llseek, }; static const struct file_operations event_fops = { .open = event_file_open, .release = event_file_close, .read = event_file_read, .write = event_file_write, .llseek = generic_file_llseek, }; static const struct file_operations r_heartbeat_fops = { .open = r_heartbeat_file_open, .release = r_heartbeat_file_close, .read = r_heartbeat_file_read, .write = r_heartbeat_file_write, .llseek = generic_file_llseek, }; static const struct file_operations remote_settings_fops = { .open = simple_open, .release = remote_settings_file_close, .read = remote_settings_file_read, .write = remote_settings_file_write, .llseek = generic_file_llseek, }; static void ibmasmfs_create_files (struct super_block *sb) { struct list_head *entry; struct service_processor *sp; list_for_each(entry, &service_processors) { struct dentry *dir; struct dentry *remote_dir; sp = list_entry(entry, struct service_processor, node); dir = ibmasmfs_create_dir(sb, sb->s_root, sp->dirname); if (!dir) continue; ibmasmfs_create_file(sb, dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR); ibmasmfs_create_file(sb, dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR); ibmasmfs_create_file(sb, dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR); remote_dir = ibmasmfs_create_dir(sb, dir, "remote_video"); if (!remote_dir) continue; ibmasmfs_create_file(sb, remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR); ibmasmfs_create_file(sb, remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR); ibmasmfs_create_file(sb, remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR); } }
gpl-2.0
Evervolv/android_kernel_htc_msm8974
drivers/net/ethernet/8390/ne3210.c
34
8584
/* ne3210.c Linux driver for Novell NE3210 EISA Network Adapter Copyright (C) 1998, Paul Gortmaker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Information and Code Sources: 1) Based upon my other EISA 8390 drivers (lne390, es3210, smc-ultra32) 2) The existing myriad of other Linux 8390 drivers by Donald Becker. 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg file The NE3210 is an EISA shared memory NS8390 implementation. Shared memory address > 1MB should work with this driver. Note that the .cfg file (3/11/93, v1.0) has AUI and BNC switched around (or perhaps there are some defective/backwards cards ???) This driver WILL NOT WORK FOR THE NE3200 - it is completely different and does not use an 8390 at all. Updated to EISA probing API 5/2003 by Marc Zyngier. */ #include <linux/module.h> #include <linux/eisa.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mm.h> #include <asm/io.h> #include "8390.h" #define DRV_NAME "ne3210" static void ne3210_reset_8390(struct net_device *dev); static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page); #define NE3210_START_PG 0x00 #define NE3210_STOP_PG 0x80 #define NE3210_IO_EXTENT 0x20 #define NE3210_SA_PROM 0x16 #define NE3210_RESET_PORT 0xc84 #define NE3210_NIC_OFFSET 0x00 #define NE3210_ADDR0 0x00 #define NE3210_ADDR1 0x00 #define NE3210_ADDR2 0x1b #define NE3210_CFG1 0xc84 #define NE3210_CFG2 0xc90 #define NE3210_CFG_EXTENT (NE3210_CFG2 - NE3210_CFG1 + 1) #define NE3210_D_PROBE 0x01 #define NE3210_D_RX_PKT 0x02 #define NE3210_D_TX_PKT 0x04 #define NE3210_D_IRQ 0x08 #define NE3210_DEBUG 0x0 static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0}; static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"}; static int ifmap_val[] __initdata = { IF_PORT_10BASET, IF_PORT_UNKNOWN, IF_PORT_10BASE2, IF_PORT_AUI, }; static int __init ne3210_eisa_probe (struct device *device) { unsigned long ioaddr, phys_mem; int i, retval, port_index; struct eisa_device *edev = to_eisa_device (device); struct net_device *dev; if (!(dev = alloc_ei_netdev ())) { printk ("ne3210.c: unable to allocate memory for dev!\n"); return -ENOMEM; } SET_NETDEV_DEV(dev, device); dev_set_drvdata(device, dev); ioaddr = edev->base_addr; if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) { retval = -EBUSY; goto out; } if (!request_region(ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT, DRV_NAME)) { retval = -EBUSY; goto out1; } #if NE3210_DEBUG & NE3210_D_PROBE printk("ne3210-debug: probe at %#x, ID %s\n", ioaddr, edev->id.sig); printk("ne3210-debug: config regs: %#x %#x\n", inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2)); #endif port_index = inb(ioaddr + NE3210_CFG2) >> 6; for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i); printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n", edev->slot, ifmap[port_index], dev->dev_addr); dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07]; printk("ne3210.c: using IRQ %d, ", dev->irq); retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); if (retval) { printk (" unable to get IRQ %d.\n", dev->irq); goto out2; } phys_mem = shmem_map[inb(ioaddr + NE3210_CFG2) & 0x07] * 0x1000; if (phys_mem > 1024*1024) { if (phys_mem < virt_to_phys(high_memory)) { printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n"); printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n"); printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n", (u64)virt_to_phys(high_memory)); printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n"); retval = -EINVAL; goto out3; } } if (!request_mem_region (phys_mem, NE3210_STOP_PG*0x100, DRV_NAME)) { printk ("ne3210.c: Unable to request shared memory at physical address %#lx\n", phys_mem); goto out3; } printk("%dkB memory at physical address %#lx\n", NE3210_STOP_PG/4, phys_mem); ei_status.mem = ioremap(phys_mem, NE3210_STOP_PG*0x100); if (!ei_status.mem) { printk(KERN_ERR "ne3210.c: Unable to remap card memory !!\n"); printk(KERN_ERR "ne3210.c: Driver NOT installed.\n"); retval = -EAGAIN; goto out4; } printk("ne3210.c: remapped %dkB card memory to virtual address %p\n", NE3210_STOP_PG/4, ei_status.mem); dev->mem_start = (unsigned long)ei_status.mem; dev->mem_end = dev->mem_start + (NE3210_STOP_PG - NE3210_START_PG)*256; dev->base_addr = ioaddr; ei_status.name = "NE3210"; ei_status.tx_start_page = NE3210_START_PG; ei_status.rx_start_page = NE3210_START_PG + TX_PAGES; ei_status.stop_page = NE3210_STOP_PG; ei_status.word16 = 1; ei_status.priv = phys_mem; if (ei_debug > 0) printk("ne3210 loaded.\n"); ei_status.reset_8390 = &ne3210_reset_8390; ei_status.block_input = &ne3210_block_input; ei_status.block_output = &ne3210_block_output; ei_status.get_8390_hdr = &ne3210_get_8390_hdr; dev->netdev_ops = &ei_netdev_ops; dev->if_port = ifmap_val[port_index]; if ((retval = register_netdev (dev))) goto out5; NS8390_init(dev, 0); return 0; out5: iounmap(ei_status.mem); out4: release_mem_region (phys_mem, NE3210_STOP_PG*0x100); out3: free_irq (dev->irq, dev); out2: release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); out1: release_region (ioaddr, NE3210_IO_EXTENT); out: free_netdev (dev); return retval; } static int __devexit ne3210_eisa_remove (struct device *device) { struct net_device *dev = dev_get_drvdata(device); unsigned long ioaddr = to_eisa_device (device)->base_addr; unregister_netdev (dev); iounmap(ei_status.mem); release_mem_region (ei_status.priv, NE3210_STOP_PG*0x100); free_irq (dev->irq, dev); release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); release_region (ioaddr, NE3210_IO_EXTENT); free_netdev (dev); return 0; } static void ne3210_reset_8390(struct net_device *dev) { unsigned short ioaddr = dev->base_addr; outb(0x04, ioaddr + NE3210_RESET_PORT); if (ei_debug > 1) printk("%s: resetting the NE3210...", dev->name); mdelay(2); ei_status.txing = 0; outb(0x01, ioaddr + NE3210_RESET_PORT); if (ei_debug > 1) printk("reset done\n"); } static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { void __iomem *hdr_start = ei_status.mem + ((ring_page - NE3210_START_PG)<<8); memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); hdr->count = (hdr->count + 3) & ~3; } static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { void __iomem *start = ei_status.mem + ring_offset - NE3210_START_PG*256; if (ring_offset + count > NE3210_STOP_PG*256) { int semi_count = NE3210_STOP_PG*256 - ring_offset; memcpy_fromio(skb->data, start, semi_count); count -= semi_count; memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES*256, count); } else { memcpy_fromio(skb->data, start, count); } } static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page) { void __iomem *shmem = ei_status.mem + ((start_page - NE3210_START_PG)<<8); count = (count + 3) & ~3; memcpy_toio(shmem, buf, count); } static struct eisa_device_id ne3210_ids[] = { { "EGL0101" }, { "NVL1801" }, { "" }, }; MODULE_DEVICE_TABLE(eisa, ne3210_ids); static struct eisa_driver ne3210_eisa_driver = { .id_table = ne3210_ids, .driver = { .name = "ne3210", .probe = ne3210_eisa_probe, .remove = __devexit_p (ne3210_eisa_remove), }, }; MODULE_DESCRIPTION("NE3210 EISA Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(eisa, ne3210_ids); static int ne3210_init(void) { return eisa_driver_register (&ne3210_eisa_driver); } static void ne3210_cleanup(void) { eisa_driver_unregister (&ne3210_eisa_driver); } module_init (ne3210_init); module_exit (ne3210_cleanup);
gpl-2.0
invisiblek/android_kernel_htc_m8
arch/arm/plat-mxc/devices/platform-imx-i2c.c
34
3580
/* * Copyright (C) 2009-2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_imx_i2c_data_entry_single(soc, _id, _hwid, _size) \ { \ .id = _id, \ .iobase = soc ## _I2C ## _hwid ## _BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_I2C ## _hwid, \ } #define imx_imx_i2c_data_entry(soc, _id, _hwid, _size) \ [_id] = imx_imx_i2c_data_entry_single(soc, _id, _hwid, _size) #ifdef CONFIG_SOC_IMX1 const struct imx_imx_i2c_data imx1_imx_i2c_data __initconst = imx_imx_i2c_data_entry_single(MX1, 0, , SZ_4K); #endif #ifdef CONFIG_SOC_IMX21 const struct imx_imx_i2c_data imx21_imx_i2c_data __initconst = imx_imx_i2c_data_entry_single(MX21, 0, , SZ_4K); #endif #ifdef CONFIG_SOC_IMX25 const struct imx_imx_i2c_data imx25_imx_i2c_data[] __initconst = { #define imx25_imx_i2c_data_entry(_id, _hwid) \ imx_imx_i2c_data_entry(MX25, _id, _hwid, SZ_16K) imx25_imx_i2c_data_entry(0, 1), imx25_imx_i2c_data_entry(1, 2), imx25_imx_i2c_data_entry(2, 3), }; #endif #ifdef CONFIG_SOC_IMX27 const struct imx_imx_i2c_data imx27_imx_i2c_data[] __initconst = { #define imx27_imx_i2c_data_entry(_id, _hwid) \ imx_imx_i2c_data_entry(MX27, _id, _hwid, SZ_4K) imx27_imx_i2c_data_entry(0, 1), imx27_imx_i2c_data_entry(1, 2), }; #endif #ifdef CONFIG_SOC_IMX31 const struct imx_imx_i2c_data imx31_imx_i2c_data[] __initconst = { #define imx31_imx_i2c_data_entry(_id, _hwid) \ imx_imx_i2c_data_entry(MX31, _id, _hwid, SZ_4K) imx31_imx_i2c_data_entry(0, 1), imx31_imx_i2c_data_entry(1, 2), imx31_imx_i2c_data_entry(2, 3), }; #endif #ifdef CONFIG_SOC_IMX35 const struct imx_imx_i2c_data imx35_imx_i2c_data[] __initconst = { #define imx35_imx_i2c_data_entry(_id, _hwid) \ imx_imx_i2c_data_entry(MX35, _id, _hwid, SZ_4K) imx35_imx_i2c_data_entry(0, 1), imx35_imx_i2c_data_entry(1, 2), imx35_imx_i2c_data_entry(2, 3), }; #endif #ifdef CONFIG_SOC_IMX50 const struct imx_imx_i2c_data imx50_imx_i2c_data[] __initconst = { #define imx50_imx_i2c_data_entry(_id, _hwid) \ imx_imx_i2c_data_entry(MX50, _id, _hwid, SZ_4K) imx50_imx_i2c_data_entry(0, 1), imx50_imx_i2c_data_entry(1, 2), imx50_imx_i2c_data_entry(2, 3), }; #endif #ifdef CONFIG_SOC_IMX51 const struct imx_imx_i2c_data imx51_imx_i2c_data[] __initconst = { #define imx51_imx_i2c_data_entry(_id, _hwid) \ imx_imx_i2c_data_entry(MX51, _id, _hwid, SZ_4K) imx51_imx_i2c_data_entry(0, 1), imx51_imx_i2c_data_entry(1, 2), { .id = 2, .iobase = MX51_HSI2C_DMA_BASE_ADDR, .iosize = SZ_16K, .irq = MX51_INT_HS_I2C, }, }; #endif #ifdef CONFIG_SOC_IMX53 const struct imx_imx_i2c_data imx53_imx_i2c_data[] __initconst = { #define imx53_imx_i2c_data_entry(_id, _hwid) \ imx_imx_i2c_data_entry(MX53, _id, _hwid, SZ_4K) imx53_imx_i2c_data_entry(0, 1), imx53_imx_i2c_data_entry(1, 2), imx53_imx_i2c_data_entry(2, 3), }; #endif struct platform_device *__init imx_add_imx_i2c( const struct imx_imx_i2c_data *data, const struct imxi2c_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device("imx-i2c", data->id, res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); }
gpl-2.0
k5t4j5/kernel_htc_m8
arch/mips/pmc-sierra/yosemite/setup.c
34
5230
/* * Copyright (C) 2003 PMC-Sierra Inc. * Author: Manish Lachwani (lachwani@pmc-sierra.com) * * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/bcd.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/bootmem.h> #include <linux/swap.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <linux/termios.h> #include <linux/tty.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> #include <asm/time.h> #include <asm/bootinfo.h> #include <asm/page.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/processor.h> #include <asm/reboot.h> #include <asm/serial.h> #include <asm/titan_dep.h> #include <asm/m48t37.h> #include "setup.h" unsigned char titan_ge_mac_addr_base[6] = { 0x00, 0xe0, 0x04, 0x00, 0x00, 0x21 }; unsigned long cpu_clock_freq; unsigned long yosemite_base; static struct m48t37_rtc *m48t37_base; void __init bus_error_init(void) { } void read_persistent_clock(struct timespec *ts) { unsigned int year, month, day, hour, min, sec; unsigned long flags; spin_lock_irqsave(&rtc_lock, flags); m48t37_base->control = 0x40; year = bcd2bin(m48t37_base->year); year += bcd2bin(m48t37_base->century) * 100; month = bcd2bin(m48t37_base->month); day = bcd2bin(m48t37_base->date); hour = bcd2bin(m48t37_base->hour); min = bcd2bin(m48t37_base->min); sec = bcd2bin(m48t37_base->sec); m48t37_base->control = 0x00; spin_unlock_irqrestore(&rtc_lock, flags); ts->tv_sec = mktime(year, month, day, hour, min, sec); ts->tv_nsec = 0; } int rtc_mips_set_time(unsigned long tim) { struct rtc_time tm; unsigned long flags; rtc_time_to_tm(tim, &tm); tm.tm_year += 1900; tm.tm_mon += 1; spin_lock_irqsave(&rtc_lock, flags); m48t37_base->control = 0x80; m48t37_base->year = bin2bcd(tm.tm_year % 100); m48t37_base->century = bin2bcd(tm.tm_year / 100); m48t37_base->month = bin2bcd(tm.tm_mon); m48t37_base->date = bin2bcd(tm.tm_mday); m48t37_base->hour = bin2bcd(tm.tm_hour); m48t37_base->min = bin2bcd(tm.tm_min); m48t37_base->sec = bin2bcd(tm.tm_sec); m48t37_base->day = bin2bcd(tm.tm_wday + 1); m48t37_base->control = 0x00; spin_unlock_irqrestore(&rtc_lock, flags); return 0; } void __init plat_time_init(void) { mips_hpt_frequency = cpu_clock_freq / 2; mips_hpt_frequency = 33000000 * 3 * 5; } unsigned long ocd_base; EXPORT_SYMBOL(ocd_base); #define TITAN_UART_CLK 3686400 #define TITAN_SERIAL_BASE_BAUD (TITAN_UART_CLK / 16) #define TITAN_SERIAL_IRQ 4 #define TITAN_SERIAL_BASE 0xfd000008UL static void __init py_map_ocd(void) { ocd_base = (unsigned long) ioremap(OCD_BASE, OCD_SIZE); if (!ocd_base) panic("Mapping OCD failed - game over. Your score is 0."); OCD_WRITE(0x0710, 0x0ffff029); } static void __init py_uart_setup(void) { #ifdef CONFIG_SERIAL_8250 struct uart_port up; memset(&up, 0, sizeof(up)); up.membase = (unsigned char *) ioremap(TITAN_SERIAL_BASE, 8); up.irq = TITAN_SERIAL_IRQ; up.uartclk = TITAN_UART_CLK; up.regshift = 0; up.iotype = UPIO_MEM; up.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST; up.line = 0; if (early_serial_setup(&up)) printk(KERN_ERR "Early serial init of port 0 failed\n"); #endif } static void __init py_rtc_setup(void) { m48t37_base = ioremap(YOSEMITE_RTC_BASE, YOSEMITE_RTC_SIZE); if (!m48t37_base) printk(KERN_ERR "Mapping the RTC failed\n"); } static void __init py_late_time_init(void) { py_map_ocd(); py_uart_setup(); py_rtc_setup(); } void __init plat_mem_setup(void) { late_time_init = py_late_time_init; add_memory_region(0x00000000, 0x10000000, BOOT_MEM_RAM); #if 0 OCD_WRITE(RM9000x2_OCD_HTSC, OCD_READ(RM9000x2_OCD_HTSC) | HYPERTRANSPORT_ENABLE); OCD_WRITE(RM9000x2_OCD_HTBAR0, HYPERTRANSPORT_BAR0_ADDR); OCD_WRITE(RM9000x2_OCD_HTMASK0, HYPERTRANSPORT_SIZE0); #endif }
gpl-2.0
k5t4j5/kernel_htc_m8
arch/x86/math-emu/reg_compare.c
34
7362
/*---------------------------------------------------------------------------+ | reg_compare.c | | | | Compare two floating point registers | | | | Copyright (C) 1992,1993,1994,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ #include "fpu_system.h" #include "exception.h" #include "fpu_emu.h" #include "control_w.h" #include "status_w.h" static int compare(FPU_REG const *b, int tagb) { int diff, exp0, expb; u_char st0_tag; FPU_REG *st0_ptr; FPU_REG x, y; u_char st0_sign, signb = getsign(b); st0_ptr = &st(0); st0_tag = FPU_gettag0(); st0_sign = getsign(st0_ptr); if (tagb == TAG_Special) tagb = FPU_Special(b); if (st0_tag == TAG_Special) st0_tag = FPU_Special(st0_ptr); if (((st0_tag != TAG_Valid) && (st0_tag != TW_Denormal)) || ((tagb != TAG_Valid) && (tagb != TW_Denormal))) { if (st0_tag == TAG_Zero) { if (tagb == TAG_Zero) return COMP_A_eq_B; if (tagb == TAG_Valid) return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B); if (tagb == TW_Denormal) return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B) | COMP_Denormal; } else if (tagb == TAG_Zero) { if (st0_tag == TAG_Valid) return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B); if (st0_tag == TW_Denormal) return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B) | COMP_Denormal; } if (st0_tag == TW_Infinity) { if ((tagb == TAG_Valid) || (tagb == TAG_Zero)) return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B); else if (tagb == TW_Denormal) return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B) | COMP_Denormal; else if (tagb == TW_Infinity) { return (st0_sign == signb) ? COMP_A_eq_B : ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B); } } else if (tagb == TW_Infinity) { if ((st0_tag == TAG_Valid) || (st0_tag == TAG_Zero)) return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B); if (st0_tag == TW_Denormal) return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B) | COMP_Denormal; } if ((st0_tag == TW_NaN) || (tagb == TW_NaN)) { int signalling = 0, unsupported = 0; if (st0_tag == TW_NaN) { signalling = (st0_ptr->sigh & 0xc0000000) == 0x80000000; unsupported = !((exponent(st0_ptr) == EXP_OVER) && (st0_ptr-> sigh & 0x80000000)); } if (tagb == TW_NaN) { signalling |= (b->sigh & 0xc0000000) == 0x80000000; unsupported |= !((exponent(b) == EXP_OVER) && (b->sigh & 0x80000000)); } if (signalling || unsupported) return COMP_No_Comp | COMP_SNaN | COMP_NaN; else return COMP_No_Comp | COMP_NaN; } EXCEPTION(EX_Invalid); } if (st0_sign != signb) { return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B) | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ? COMP_Denormal : 0); } if ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) { FPU_to_exp16(st0_ptr, &x); FPU_to_exp16(b, &y); st0_ptr = &x; b = &y; exp0 = exponent16(st0_ptr); expb = exponent16(b); } else { exp0 = exponent(st0_ptr); expb = exponent(b); } #ifdef PARANOID if (!(st0_ptr->sigh & 0x80000000)) EXCEPTION(EX_Invalid); if (!(b->sigh & 0x80000000)) EXCEPTION(EX_Invalid); #endif diff = exp0 - expb; if (diff == 0) { diff = st0_ptr->sigh - b->sigh; if (diff == 0) { diff = st0_ptr->sigl > b->sigl; if (diff == 0) diff = -(st0_ptr->sigl < b->sigl); } } if (diff > 0) { return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B) | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ? COMP_Denormal : 0); } if (diff < 0) { return ((st0_sign == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B) | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ? COMP_Denormal : 0); } return COMP_A_eq_B | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ? COMP_Denormal : 0); } int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag) { int f = 0, c; c = compare(loaded_data, loaded_tag); if (c & COMP_NaN) { EXCEPTION(EX_Invalid); f = SW_C3 | SW_C2 | SW_C0; } else switch (c & 7) { case COMP_A_lt_B: f = SW_C0; break; case COMP_A_eq_B: f = SW_C3; break; case COMP_A_gt_B: f = 0; break; case COMP_No_Comp: f = SW_C3 | SW_C2 | SW_C0; break; #ifdef PARANOID default: EXCEPTION(EX_INTERNAL | 0x121); f = SW_C3 | SW_C2 | SW_C0; break; #endif } setcc(f); if (c & COMP_Denormal) { return denormal_operand() < 0; } return 0; } static int compare_st_st(int nr) { int f = 0, c; FPU_REG *st_ptr; if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) { setcc(SW_C3 | SW_C2 | SW_C0); EXCEPTION(EX_StackUnder); return !(control_word & CW_Invalid); } st_ptr = &st(nr); c = compare(st_ptr, FPU_gettagi(nr)); if (c & COMP_NaN) { setcc(SW_C3 | SW_C2 | SW_C0); EXCEPTION(EX_Invalid); return !(control_word & CW_Invalid); } else switch (c & 7) { case COMP_A_lt_B: f = SW_C0; break; case COMP_A_eq_B: f = SW_C3; break; case COMP_A_gt_B: f = 0; break; case COMP_No_Comp: f = SW_C3 | SW_C2 | SW_C0; break; #ifdef PARANOID default: EXCEPTION(EX_INTERNAL | 0x122); f = SW_C3 | SW_C2 | SW_C0; break; #endif } setcc(f); if (c & COMP_Denormal) { return denormal_operand() < 0; } return 0; } static int compare_u_st_st(int nr) { int f = 0, c; FPU_REG *st_ptr; if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) { setcc(SW_C3 | SW_C2 | SW_C0); EXCEPTION(EX_StackUnder); return !(control_word & CW_Invalid); } st_ptr = &st(nr); c = compare(st_ptr, FPU_gettagi(nr)); if (c & COMP_NaN) { setcc(SW_C3 | SW_C2 | SW_C0); if (c & COMP_SNaN) { EXCEPTION(EX_Invalid); return !(control_word & CW_Invalid); } return 0; } else switch (c & 7) { case COMP_A_lt_B: f = SW_C0; break; case COMP_A_eq_B: f = SW_C3; break; case COMP_A_gt_B: f = 0; break; case COMP_No_Comp: f = SW_C3 | SW_C2 | SW_C0; break; #ifdef PARANOID default: EXCEPTION(EX_INTERNAL | 0x123); f = SW_C3 | SW_C2 | SW_C0; break; #endif } setcc(f); if (c & COMP_Denormal) { return denormal_operand() < 0; } return 0; } void fcom_st(void) { compare_st_st(FPU_rm); } void fcompst(void) { if (!compare_st_st(FPU_rm)) FPU_pop(); } void fcompp(void) { if (FPU_rm != 1) { FPU_illegal(); return; } if (!compare_st_st(1)) poppop(); } void fucom_(void) { compare_u_st_st(FPU_rm); } void fucomp(void) { if (!compare_u_st_st(FPU_rm)) FPU_pop(); } void fucompp(void) { if (FPU_rm == 1) { if (!compare_u_st_st(1)) poppop(); } else FPU_illegal(); }
gpl-2.0
jabez1314/linux
drivers/tty/isicom.c
290
42252
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Original driver code supplied by Multi-Tech * * Changes * 1/9/98 alan@lxorguk.ukuu.org.uk * Merge to 2.0.x kernel tree * Obtain and use official major/minors * Loader switched to a misc device * (fixed range check bug as a side effect) * Printk clean up * 9/12/98 alan@lxorguk.ukuu.org.uk * Rough port to 2.1.x * * 10/6/99 sameer Merged the ISA and PCI drivers to * a new unified driver. * * 3/9/99 sameer Added support for ISI4616 cards. * * 16/9/99 sameer We do not force RTS low anymore. * This is to prevent the firmware * from getting confused. * * 26/10/99 sameer Cosmetic changes:The driver now * dumps the Port Count information * along with I/O address and IRQ. * * 13/12/99 sameer Fixed the problem with IRQ sharing. * * 10/5/00 sameer Fixed isicom_shutdown_board() * to not lower DTR on all the ports * when the last port on the card is * closed. * * 10/5/00 sameer Signal mask setup command added * to isicom_setup_port and * isicom_shutdown_port. * * 24/5/00 sameer The driver is now SMP aware. * * * 27/11/00 Vinayak P Risbud Fixed the Driver Crash Problem * * * 03/01/01 anil .s Added support for resetting the * internal modems on ISI cards. * * 08/02/01 anil .s Upgraded the driver for kernel * 2.4.x * * 11/04/01 Kevin Fixed firmware load problem with * ISIHP-4X card * * 30/04/01 anil .s Fixed the remote login through * ISI port problem. Now the link * does not go down before password * prompt. * * 03/05/01 anil .s Fixed the problem with IRQ sharing * among ISI-PCI cards. * * 03/05/01 anil .s Added support to display the version * info during insmod as well as module * listing by lsmod. * * 10/05/01 anil .s Done the modifications to the source * file and Install script so that the * same installation can be used for * 2.2.x and 2.4.x kernel. * * 06/06/01 anil .s Now we drop both dtr and rts during * shutdown_port as well as raise them * during isicom_config_port. * * 09/06/01 acme@conectiva.com.br use capable, not suser, do * restore_flags on failure in * isicom_send_break, verify put_user * result * * 11/02/03 ranjeeth Added support for 230 Kbps and 460 Kbps * Baud index extended to 21 * * 20/03/03 ranjeeth Made to work for Linux Advanced server. * Taken care of license warning. * * 10/12/03 Ravindra Made to work for Fedora Core 1 of * Red Hat Distribution * * 06/01/05 Alan Cox Merged the ISI and base kernel strands * into a single 2.6 driver * * *********************************************************** * * To use this driver you also need the support package. You * can find this in RPM format on * ftp://ftp.linux.org.uk/pub/linux/alan * * You can find the original tools for this direct from Multitech * ftp://ftp.multitech.com/ISI-Cards/ * * Having installed the cards the module options (/etc/modprobe.d/) * * options isicom io=card1,card2,card3,card4 irq=card1,card2,card3,card4 * * Omit those entries for boards you don't have installed. * * TODO * Merge testing * 64-bit verification */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/firmware.h> #include <linux/kernel.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/termios.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/serial.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/pci.h> #include <linux/isicom.h> #define InterruptTheCard(base) outw(0, (base) + 0xc) #define ClearInterrupt(base) inw((base) + 0x0a) #ifdef DEBUG #define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c)) #else #define isicom_paranoia_check(a, b, c) 0 #endif static int isicom_probe(struct pci_dev *, const struct pci_device_id *); static void isicom_remove(struct pci_dev *); static struct pci_device_id isicom_pci_tbl[] = { { PCI_DEVICE(VENDOR_ID, 0x2028) }, { PCI_DEVICE(VENDOR_ID, 0x2051) }, { PCI_DEVICE(VENDOR_ID, 0x2052) }, { PCI_DEVICE(VENDOR_ID, 0x2053) }, { PCI_DEVICE(VENDOR_ID, 0x2054) }, { PCI_DEVICE(VENDOR_ID, 0x2055) }, { PCI_DEVICE(VENDOR_ID, 0x2056) }, { PCI_DEVICE(VENDOR_ID, 0x2057) }, { PCI_DEVICE(VENDOR_ID, 0x2058) }, { 0 } }; MODULE_DEVICE_TABLE(pci, isicom_pci_tbl); static struct pci_driver isicom_driver = { .name = "isicom", .id_table = isicom_pci_tbl, .probe = isicom_probe, .remove = isicom_remove }; static int prev_card = 3; /* start servicing isi_card[0] */ static struct tty_driver *isicom_normal; static void isicom_tx(unsigned long _data); static void isicom_start(struct tty_struct *tty); static DEFINE_TIMER(tx, isicom_tx, 0, 0); /* baud index mappings from linux defns to isi */ static signed char linuxb_to_isib[] = { -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 13, 15, 16, 17, 18, 19, 20, 21 }; struct isi_board { unsigned long base; int irq; unsigned char port_count; unsigned short status; unsigned short port_status; /* each bit for each port */ unsigned short shift_count; struct isi_port *ports; signed char count; spinlock_t card_lock; /* Card wide lock 11/5/00 -sameer */ unsigned long flags; unsigned int index; }; struct isi_port { unsigned short magic; struct tty_port port; u16 channel; u16 status; struct isi_board *card; unsigned char *xmit_buf; int xmit_head; int xmit_tail; int xmit_cnt; }; static struct isi_board isi_card[BOARD_COUNT]; static struct isi_port isi_ports[PORT_COUNT]; /* * Locking functions for card level locking. We need to own both * the kernel lock for the card and have the card in a position that * it wants to talk. */ static int WaitTillCardIsFree(unsigned long base) { unsigned int count = 0; unsigned int a = in_atomic(); /* do we run under spinlock? */ while (!(inw(base + 0xe) & 0x1) && count++ < 100) if (a) mdelay(1); else msleep(1); return !(inw(base + 0xe) & 0x1); } static int lock_card(struct isi_board *card) { unsigned long base = card->base; unsigned int retries, a; for (retries = 0; retries < 10; retries++) { spin_lock_irqsave(&card->card_lock, card->flags); for (a = 0; a < 10; a++) { if (inw(base + 0xe) & 0x1) return 1; udelay(10); } spin_unlock_irqrestore(&card->card_lock, card->flags); msleep(10); } pr_warn("Failed to lock Card (0x%lx)\n", card->base); return 0; /* Failed to acquire the card! */ } static void unlock_card(struct isi_board *card) { spin_unlock_irqrestore(&card->card_lock, card->flags); } /* * ISI Card specific ops ... */ /* card->lock HAS to be held */ static void raise_dtr(struct isi_port *port) { struct isi_board *card = port->card; unsigned long base = card->base; u16 channel = port->channel; if (WaitTillCardIsFree(base)) return; outw(0x8000 | (channel << card->shift_count) | 0x02, base); outw(0x0504, base); InterruptTheCard(base); port->status |= ISI_DTR; } /* card->lock HAS to be held */ static void drop_dtr(struct isi_port *port) { struct isi_board *card = port->card; unsigned long base = card->base; u16 channel = port->channel; if (WaitTillCardIsFree(base)) return; outw(0x8000 | (channel << card->shift_count) | 0x02, base); outw(0x0404, base); InterruptTheCard(base); port->status &= ~ISI_DTR; } /* card->lock HAS to be held */ static inline void raise_rts(struct isi_port *port) { struct isi_board *card = port->card; unsigned long base = card->base; u16 channel = port->channel; if (WaitTillCardIsFree(base)) return; outw(0x8000 | (channel << card->shift_count) | 0x02, base); outw(0x0a04, base); InterruptTheCard(base); port->status |= ISI_RTS; } /* card->lock HAS to be held */ static inline void drop_rts(struct isi_port *port) { struct isi_board *card = port->card; unsigned long base = card->base; u16 channel = port->channel; if (WaitTillCardIsFree(base)) return; outw(0x8000 | (channel << card->shift_count) | 0x02, base); outw(0x0804, base); InterruptTheCard(base); port->status &= ~ISI_RTS; } /* card->lock MUST NOT be held */ static void isicom_dtr_rts(struct tty_port *port, int on) { struct isi_port *ip = container_of(port, struct isi_port, port); struct isi_board *card = ip->card; unsigned long base = card->base; u16 channel = ip->channel; if (!lock_card(card)) return; if (on) { outw(0x8000 | (channel << card->shift_count) | 0x02, base); outw(0x0f04, base); InterruptTheCard(base); ip->status |= (ISI_DTR | ISI_RTS); } else { outw(0x8000 | (channel << card->shift_count) | 0x02, base); outw(0x0C04, base); InterruptTheCard(base); ip->status &= ~(ISI_DTR | ISI_RTS); } unlock_card(card); } /* card->lock HAS to be held */ static void drop_dtr_rts(struct isi_port *port) { struct isi_board *card = port->card; unsigned long base = card->base; u16 channel = port->channel; if (WaitTillCardIsFree(base)) return; outw(0x8000 | (channel << card->shift_count) | 0x02, base); outw(0x0c04, base); InterruptTheCard(base); port->status &= ~(ISI_RTS | ISI_DTR); } /* * ISICOM Driver specific routines ... * */ static inline int __isicom_paranoia_check(struct isi_port const *port, char *name, const char *routine) { if (!port) { pr_warn("Warning: bad isicom magic for dev %s in %s\n", name, routine); return 1; } if (port->magic != ISICOM_MAGIC) { pr_warn("Warning: NULL isicom port for dev %s in %s\n", name, routine); return 1; } return 0; } /* * Transmitter. * * We shovel data into the card buffers on a regular basis. The card * will do the rest of the work for us. */ static void isicom_tx(unsigned long _data) { unsigned long flags, base; unsigned int retries; short count = (BOARD_COUNT-1), card; short txcount, wrd, residue, word_count, cnt; struct isi_port *port; struct tty_struct *tty; /* find next active board */ card = (prev_card + 1) & 0x0003; while (count-- > 0) { if (isi_card[card].status & BOARD_ACTIVE) break; card = (card + 1) & 0x0003; } if (!(isi_card[card].status & BOARD_ACTIVE)) goto sched_again; prev_card = card; count = isi_card[card].port_count; port = isi_card[card].ports; base = isi_card[card].base; spin_lock_irqsave(&isi_card[card].card_lock, flags); for (retries = 0; retries < 100; retries++) { if (inw(base + 0xe) & 0x1) break; udelay(2); } if (retries >= 100) goto unlock; tty = tty_port_tty_get(&port->port); if (tty == NULL) goto put_unlock; for (; count > 0; count--, port++) { /* port not active or tx disabled to force flow control */ if (!tty_port_initialized(&port->port) || !(port->status & ISI_TXOK)) continue; txcount = min_t(short, TX_SIZE, port->xmit_cnt); if (txcount <= 0 || tty->stopped || tty->hw_stopped) continue; if (!(inw(base + 0x02) & (1 << port->channel))) continue; pr_debug("txing %d bytes, port%d.\n", txcount, port->channel + 1); outw((port->channel << isi_card[card].shift_count) | txcount, base); residue = NO; wrd = 0; while (1) { cnt = min_t(int, txcount, (SERIAL_XMIT_SIZE - port->xmit_tail)); if (residue == YES) { residue = NO; if (cnt > 0) { wrd |= (port->port.xmit_buf[port->xmit_tail] << 8); port->xmit_tail = (port->xmit_tail + 1) & (SERIAL_XMIT_SIZE - 1); port->xmit_cnt--; txcount--; cnt--; outw(wrd, base); } else { outw(wrd, base); break; } } if (cnt <= 0) break; word_count = cnt >> 1; outsw(base, port->port.xmit_buf+port->xmit_tail, word_count); port->xmit_tail = (port->xmit_tail + (word_count << 1)) & (SERIAL_XMIT_SIZE - 1); txcount -= (word_count << 1); port->xmit_cnt -= (word_count << 1); if (cnt & 0x0001) { residue = YES; wrd = port->port.xmit_buf[port->xmit_tail]; port->xmit_tail = (port->xmit_tail + 1) & (SERIAL_XMIT_SIZE - 1); port->xmit_cnt--; txcount--; } } InterruptTheCard(base); if (port->xmit_cnt <= 0) port->status &= ~ISI_TXOK; if (port->xmit_cnt <= WAKEUP_CHARS) tty_wakeup(tty); } put_unlock: tty_kref_put(tty); unlock: spin_unlock_irqrestore(&isi_card[card].card_lock, flags); /* schedule another tx for hopefully in about 10ms */ sched_again: mod_timer(&tx, jiffies + msecs_to_jiffies(10)); } /* * Main interrupt handler routine */ static irqreturn_t isicom_interrupt(int irq, void *dev_id) { struct isi_board *card = dev_id; struct isi_port *port; struct tty_struct *tty; unsigned long base; u16 header, word_count, count, channel; short byte_count; unsigned char *rp; if (!card || !(card->status & FIRMWARE_LOADED)) return IRQ_NONE; base = card->base; /* did the card interrupt us? */ if (!(inw(base + 0x0e) & 0x02)) return IRQ_NONE; spin_lock(&card->card_lock); /* * disable any interrupts from the PCI card and lower the * interrupt line */ outw(0x8000, base+0x04); ClearInterrupt(base); inw(base); /* get the dummy word out */ header = inw(base); channel = (header & 0x7800) >> card->shift_count; byte_count = header & 0xff; if (channel + 1 > card->port_count) { pr_warn("%s(0x%lx): %d(channel) > port_count\n", __func__, base, channel + 1); outw(0x0000, base+0x04); /* enable interrupts */ spin_unlock(&card->card_lock); return IRQ_HANDLED; } port = card->ports + channel; if (!tty_port_initialized(&port->port)) { outw(0x0000, base+0x04); /* enable interrupts */ spin_unlock(&card->card_lock); return IRQ_HANDLED; } tty = tty_port_tty_get(&port->port); if (tty == NULL) { word_count = byte_count >> 1; while (byte_count > 1) { inw(base); byte_count -= 2; } if (byte_count & 0x01) inw(base); outw(0x0000, base+0x04); /* enable interrupts */ spin_unlock(&card->card_lock); return IRQ_HANDLED; } if (header & 0x8000) { /* Status Packet */ header = inw(base); switch (header & 0xff) { case 0: /* Change in EIA signals */ if (tty_port_check_carrier(&port->port)) { if (port->status & ISI_DCD) { if (!(header & ISI_DCD)) { /* Carrier has been lost */ pr_debug("%s: DCD->low.\n", __func__); port->status &= ~ISI_DCD; tty_hangup(tty); } } else if (header & ISI_DCD) { /* Carrier has been detected */ pr_debug("%s: DCD->high.\n", __func__); port->status |= ISI_DCD; wake_up_interruptible(&port->port.open_wait); } } else { if (header & ISI_DCD) port->status |= ISI_DCD; else port->status &= ~ISI_DCD; } if (tty_port_cts_enabled(&port->port)) { if (tty->hw_stopped) { if (header & ISI_CTS) { tty->hw_stopped = 0; /* start tx ing */ port->status |= (ISI_TXOK | ISI_CTS); tty_wakeup(tty); } } else if (!(header & ISI_CTS)) { tty->hw_stopped = 1; /* stop tx ing */ port->status &= ~(ISI_TXOK | ISI_CTS); } } else { if (header & ISI_CTS) port->status |= ISI_CTS; else port->status &= ~ISI_CTS; } if (header & ISI_DSR) port->status |= ISI_DSR; else port->status &= ~ISI_DSR; if (header & ISI_RI) port->status |= ISI_RI; else port->status &= ~ISI_RI; break; case 1: /* Received Break !!! */ tty_insert_flip_char(&port->port, 0, TTY_BREAK); if (port->port.flags & ASYNC_SAK) do_SAK(tty); tty_flip_buffer_push(&port->port); break; case 2: /* Statistics */ pr_debug("%s: stats!!!\n", __func__); break; default: pr_debug("%s: Unknown code in status packet.\n", __func__); break; } } else { /* Data Packet */ count = tty_prepare_flip_string(&port->port, &rp, byte_count & ~1); pr_debug("%s: Can rx %d of %d bytes.\n", __func__, count, byte_count); word_count = count >> 1; insw(base, rp, word_count); byte_count -= (word_count << 1); if (count & 0x0001) { tty_insert_flip_char(&port->port, inw(base) & 0xff, TTY_NORMAL); byte_count -= 2; } if (byte_count > 0) { pr_debug("%s(0x%lx:%d): Flip buffer overflow! dropping bytes...\n", __func__, base, channel + 1); /* drain out unread xtra data */ while (byte_count > 0) { inw(base); byte_count -= 2; } } tty_flip_buffer_push(&port->port); } outw(0x0000, base+0x04); /* enable interrupts */ spin_unlock(&card->card_lock); tty_kref_put(tty); return IRQ_HANDLED; } static void isicom_config_port(struct tty_struct *tty) { struct isi_port *port = tty->driver_data; struct isi_board *card = port->card; unsigned long baud; unsigned long base = card->base; u16 channel_setup, channel = port->channel, shift_count = card->shift_count; unsigned char flow_ctrl; /* FIXME: Switch to new tty baud API */ baud = C_BAUD(tty); if (baud & CBAUDEX) { baud &= ~CBAUDEX; /* if CBAUDEX bit is on and the baud is set to either 50 or 75 * then the card is programmed for 57.6Kbps or 115Kbps * respectively. */ /* 1,2,3,4 => 57.6, 115.2, 230, 460 kbps resp. */ if (baud < 1 || baud > 4) tty->termios.c_cflag &= ~CBAUDEX; else baud += 15; } if (baud == 15) { /* the ASYNC_SPD_HI and ASYNC_SPD_VHI options are set * by the set_serial_info ioctl ... this is done by * the 'setserial' utility. */ if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) baud++; /* 57.6 Kbps */ if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) baud += 2; /* 115 Kbps */ if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) baud += 3; /* 230 kbps*/ if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) baud += 4; /* 460 kbps*/ } if (linuxb_to_isib[baud] == -1) { /* hang up */ drop_dtr(port); return; } else raise_dtr(port); if (WaitTillCardIsFree(base) == 0) { outw(0x8000 | (channel << shift_count) | 0x03, base); outw(linuxb_to_isib[baud] << 8 | 0x03, base); channel_setup = 0; switch (C_CSIZE(tty)) { case CS5: channel_setup |= ISICOM_CS5; break; case CS6: channel_setup |= ISICOM_CS6; break; case CS7: channel_setup |= ISICOM_CS7; break; case CS8: channel_setup |= ISICOM_CS8; break; } if (C_CSTOPB(tty)) channel_setup |= ISICOM_2SB; if (C_PARENB(tty)) { channel_setup |= ISICOM_EVPAR; if (C_PARODD(tty)) channel_setup |= ISICOM_ODPAR; } outw(channel_setup, base); InterruptTheCard(base); } tty_port_set_check_carrier(&port->port, !C_CLOCAL(tty)); /* flow control settings ...*/ flow_ctrl = 0; tty_port_set_cts_flow(&port->port, C_CRTSCTS(tty)); if (C_CRTSCTS(tty)) flow_ctrl |= ISICOM_CTSRTS; if (I_IXON(tty)) flow_ctrl |= ISICOM_RESPOND_XONXOFF; if (I_IXOFF(tty)) flow_ctrl |= ISICOM_INITIATE_XONXOFF; if (WaitTillCardIsFree(base) == 0) { outw(0x8000 | (channel << shift_count) | 0x04, base); outw(flow_ctrl << 8 | 0x05, base); outw((STOP_CHAR(tty)) << 8 | (START_CHAR(tty)), base); InterruptTheCard(base); } /* rx enabled -> enable port for rx on the card */ if (C_CREAD(tty)) { card->port_status |= (1 << channel); outw(card->port_status, base + 0x02); } } /* open et all */ static inline void isicom_setup_board(struct isi_board *bp) { int channel; struct isi_port *port; bp->count++; if (!(bp->status & BOARD_INIT)) { port = bp->ports; for (channel = 0; channel < bp->port_count; channel++, port++) drop_dtr_rts(port); } bp->status |= BOARD_ACTIVE | BOARD_INIT; } /* Activate and thus setup board are protected from races against shutdown by the tty_port mutex */ static int isicom_activate(struct tty_port *tport, struct tty_struct *tty) { struct isi_port *port = container_of(tport, struct isi_port, port); struct isi_board *card = port->card; unsigned long flags; if (tty_port_alloc_xmit_buf(tport) < 0) return -ENOMEM; spin_lock_irqsave(&card->card_lock, flags); isicom_setup_board(card); port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; /* discard any residual data */ if (WaitTillCardIsFree(card->base) == 0) { outw(0x8000 | (port->channel << card->shift_count) | 0x02, card->base); outw(((ISICOM_KILLTX | ISICOM_KILLRX) << 8) | 0x06, card->base); InterruptTheCard(card->base); } isicom_config_port(tty); spin_unlock_irqrestore(&card->card_lock, flags); return 0; } static int isicom_carrier_raised(struct tty_port *port) { struct isi_port *ip = container_of(port, struct isi_port, port); return (ip->status & ISI_DCD)?1 : 0; } static struct tty_port *isicom_find_port(struct tty_struct *tty) { struct isi_port *port; struct isi_board *card; unsigned int board; int line = tty->index; board = BOARD(line); card = &isi_card[board]; if (!(card->status & FIRMWARE_LOADED)) return NULL; /* open on a port greater than the port count for the card !!! */ if (line > ((board * 16) + card->port_count - 1)) return NULL; port = &isi_ports[line]; if (isicom_paranoia_check(port, tty->name, "isicom_open")) return NULL; return &port->port; } static int isicom_open(struct tty_struct *tty, struct file *filp) { struct isi_port *port; struct tty_port *tport; tport = isicom_find_port(tty); if (tport == NULL) return -ENODEV; port = container_of(tport, struct isi_port, port); tty->driver_data = port; return tty_port_open(tport, tty, filp); } /* close et all */ /* card->lock HAS to be held */ static void isicom_shutdown_port(struct isi_port *port) { struct isi_board *card = port->card; if (--card->count < 0) { pr_debug("%s: bad board(0x%lx) count %d.\n", __func__, card->base, card->count); card->count = 0; } /* last port was closed, shutdown that board too */ if (!card->count) card->status &= BOARD_ACTIVE; } static void isicom_flush_buffer(struct tty_struct *tty) { struct isi_port *port = tty->driver_data; struct isi_board *card = port->card; unsigned long flags; if (isicom_paranoia_check(port, tty->name, "isicom_flush_buffer")) return; spin_lock_irqsave(&card->card_lock, flags); port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; spin_unlock_irqrestore(&card->card_lock, flags); tty_wakeup(tty); } static void isicom_shutdown(struct tty_port *port) { struct isi_port *ip = container_of(port, struct isi_port, port); struct isi_board *card = ip->card; unsigned long flags; /* indicate to the card that no more data can be received on this port */ spin_lock_irqsave(&card->card_lock, flags); card->port_status &= ~(1 << ip->channel); outw(card->port_status, card->base + 0x02); isicom_shutdown_port(ip); spin_unlock_irqrestore(&card->card_lock, flags); tty_port_free_xmit_buf(port); } static void isicom_close(struct tty_struct *tty, struct file *filp) { struct isi_port *ip = tty->driver_data; struct tty_port *port; if (ip == NULL) return; port = &ip->port; if (isicom_paranoia_check(ip, tty->name, "isicom_close")) return; tty_port_close(port, tty, filp); } /* write et all */ static int isicom_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct isi_port *port = tty->driver_data; struct isi_board *card = port->card; unsigned long flags; int cnt, total = 0; if (isicom_paranoia_check(port, tty->name, "isicom_write")) return 0; spin_lock_irqsave(&card->card_lock, flags); while (1) { cnt = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, SERIAL_XMIT_SIZE - port->xmit_head)); if (cnt <= 0) break; memcpy(port->port.xmit_buf + port->xmit_head, buf, cnt); port->xmit_head = (port->xmit_head + cnt) & (SERIAL_XMIT_SIZE - 1); port->xmit_cnt += cnt; buf += cnt; count -= cnt; total += cnt; } if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped) port->status |= ISI_TXOK; spin_unlock_irqrestore(&card->card_lock, flags); return total; } /* put_char et all */ static int isicom_put_char(struct tty_struct *tty, unsigned char ch) { struct isi_port *port = tty->driver_data; struct isi_board *card = port->card; unsigned long flags; if (isicom_paranoia_check(port, tty->name, "isicom_put_char")) return 0; spin_lock_irqsave(&card->card_lock, flags); if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) { spin_unlock_irqrestore(&card->card_lock, flags); return 0; } port->port.xmit_buf[port->xmit_head++] = ch; port->xmit_head &= (SERIAL_XMIT_SIZE - 1); port->xmit_cnt++; spin_unlock_irqrestore(&card->card_lock, flags); return 1; } /* flush_chars et all */ static void isicom_flush_chars(struct tty_struct *tty) { struct isi_port *port = tty->driver_data; if (isicom_paranoia_check(port, tty->name, "isicom_flush_chars")) return; if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || !port->port.xmit_buf) return; /* this tells the transmitter to consider this port for data output to the card ... that's the best we can do. */ port->status |= ISI_TXOK; } /* write_room et all */ static int isicom_write_room(struct tty_struct *tty) { struct isi_port *port = tty->driver_data; int free; if (isicom_paranoia_check(port, tty->name, "isicom_write_room")) return 0; free = SERIAL_XMIT_SIZE - port->xmit_cnt - 1; if (free < 0) free = 0; return free; } /* chars_in_buffer et all */ static int isicom_chars_in_buffer(struct tty_struct *tty) { struct isi_port *port = tty->driver_data; if (isicom_paranoia_check(port, tty->name, "isicom_chars_in_buffer")) return 0; return port->xmit_cnt; } /* ioctl et all */ static int isicom_send_break(struct tty_struct *tty, int length) { struct isi_port *port = tty->driver_data; struct isi_board *card = port->card; unsigned long base = card->base; if (length == -1) return -EOPNOTSUPP; if (!lock_card(card)) return -EINVAL; outw(0x8000 | ((port->channel) << (card->shift_count)) | 0x3, base); outw((length & 0xff) << 8 | 0x00, base); outw((length & 0xff00u), base); InterruptTheCard(base); unlock_card(card); return 0; } static int isicom_tiocmget(struct tty_struct *tty) { struct isi_port *port = tty->driver_data; /* just send the port status */ u16 status = port->status; if (isicom_paranoia_check(port, tty->name, "isicom_ioctl")) return -ENODEV; return ((status & ISI_RTS) ? TIOCM_RTS : 0) | ((status & ISI_DTR) ? TIOCM_DTR : 0) | ((status & ISI_DCD) ? TIOCM_CAR : 0) | ((status & ISI_DSR) ? TIOCM_DSR : 0) | ((status & ISI_CTS) ? TIOCM_CTS : 0) | ((status & ISI_RI ) ? TIOCM_RI : 0); } static int isicom_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct isi_port *port = tty->driver_data; unsigned long flags; if (isicom_paranoia_check(port, tty->name, "isicom_ioctl")) return -ENODEV; spin_lock_irqsave(&port->card->card_lock, flags); if (set & TIOCM_RTS) raise_rts(port); if (set & TIOCM_DTR) raise_dtr(port); if (clear & TIOCM_RTS) drop_rts(port); if (clear & TIOCM_DTR) drop_dtr(port); spin_unlock_irqrestore(&port->card->card_lock, flags); return 0; } static int isicom_set_serial_info(struct tty_struct *tty, struct serial_struct __user *info) { struct isi_port *port = tty->driver_data; struct serial_struct newinfo; int reconfig_port; if (copy_from_user(&newinfo, info, sizeof(newinfo))) return -EFAULT; mutex_lock(&port->port.mutex); reconfig_port = ((port->port.flags & ASYNC_SPD_MASK) != (newinfo.flags & ASYNC_SPD_MASK)); if (!capable(CAP_SYS_ADMIN)) { if ((newinfo.close_delay != port->port.close_delay) || (newinfo.closing_wait != port->port.closing_wait) || ((newinfo.flags & ~ASYNC_USR_MASK) != (port->port.flags & ~ASYNC_USR_MASK))) { mutex_unlock(&port->port.mutex); return -EPERM; } port->port.flags = ((port->port.flags & ~ASYNC_USR_MASK) | (newinfo.flags & ASYNC_USR_MASK)); } else { port->port.close_delay = newinfo.close_delay; port->port.closing_wait = newinfo.closing_wait; port->port.flags = ((port->port.flags & ~ASYNC_FLAGS) | (newinfo.flags & ASYNC_FLAGS)); } if (reconfig_port) { unsigned long flags; spin_lock_irqsave(&port->card->card_lock, flags); isicom_config_port(tty); spin_unlock_irqrestore(&port->card->card_lock, flags); } mutex_unlock(&port->port.mutex); return 0; } static int isicom_get_serial_info(struct isi_port *port, struct serial_struct __user *info) { struct serial_struct out_info; mutex_lock(&port->port.mutex); memset(&out_info, 0, sizeof(out_info)); /* out_info.type = ? */ out_info.line = port - isi_ports; out_info.port = port->card->base; out_info.irq = port->card->irq; out_info.flags = port->port.flags; /* out_info.baud_base = ? */ out_info.close_delay = port->port.close_delay; out_info.closing_wait = port->port.closing_wait; mutex_unlock(&port->port.mutex); if (copy_to_user(info, &out_info, sizeof(out_info))) return -EFAULT; return 0; } static int isicom_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct isi_port *port = tty->driver_data; void __user *argp = (void __user *)arg; if (isicom_paranoia_check(port, tty->name, "isicom_ioctl")) return -ENODEV; switch (cmd) { case TIOCGSERIAL: return isicom_get_serial_info(port, argp); case TIOCSSERIAL: return isicom_set_serial_info(tty, argp); default: return -ENOIOCTLCMD; } return 0; } /* set_termios et all */ static void isicom_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct isi_port *port = tty->driver_data; unsigned long flags; if (isicom_paranoia_check(port, tty->name, "isicom_set_termios")) return; if (tty->termios.c_cflag == old_termios->c_cflag && tty->termios.c_iflag == old_termios->c_iflag) return; spin_lock_irqsave(&port->card->card_lock, flags); isicom_config_port(tty); spin_unlock_irqrestore(&port->card->card_lock, flags); if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) { tty->hw_stopped = 0; isicom_start(tty); } } /* throttle et all */ static void isicom_throttle(struct tty_struct *tty) { struct isi_port *port = tty->driver_data; struct isi_board *card = port->card; if (isicom_paranoia_check(port, tty->name, "isicom_throttle")) return; /* tell the card that this port cannot handle any more data for now */ card->port_status &= ~(1 << port->channel); outw(card->port_status, card->base + 0x02); } /* unthrottle et all */ static void isicom_unthrottle(struct tty_struct *tty) { struct isi_port *port = tty->driver_data; struct isi_board *card = port->card; if (isicom_paranoia_check(port, tty->name, "isicom_unthrottle")) return; /* tell the card that this port is ready to accept more data */ card->port_status |= (1 << port->channel); outw(card->port_status, card->base + 0x02); } /* stop et all */ static void isicom_stop(struct tty_struct *tty) { struct isi_port *port = tty->driver_data; if (isicom_paranoia_check(port, tty->name, "isicom_stop")) return; /* this tells the transmitter not to consider this port for data output to the card. */ port->status &= ~ISI_TXOK; } /* start et all */ static void isicom_start(struct tty_struct *tty) { struct isi_port *port = tty->driver_data; if (isicom_paranoia_check(port, tty->name, "isicom_start")) return; /* this tells the transmitter to consider this port for data output to the card. */ port->status |= ISI_TXOK; } static void isicom_hangup(struct tty_struct *tty) { struct isi_port *port = tty->driver_data; if (isicom_paranoia_check(port, tty->name, "isicom_hangup")) return; tty_port_hangup(&port->port); } /* * Driver init and deinit functions */ static const struct tty_operations isicom_ops = { .open = isicom_open, .close = isicom_close, .write = isicom_write, .put_char = isicom_put_char, .flush_chars = isicom_flush_chars, .write_room = isicom_write_room, .chars_in_buffer = isicom_chars_in_buffer, .ioctl = isicom_ioctl, .set_termios = isicom_set_termios, .throttle = isicom_throttle, .unthrottle = isicom_unthrottle, .stop = isicom_stop, .start = isicom_start, .hangup = isicom_hangup, .flush_buffer = isicom_flush_buffer, .tiocmget = isicom_tiocmget, .tiocmset = isicom_tiocmset, .break_ctl = isicom_send_break, }; static const struct tty_port_operations isicom_port_ops = { .carrier_raised = isicom_carrier_raised, .dtr_rts = isicom_dtr_rts, .activate = isicom_activate, .shutdown = isicom_shutdown, }; static int reset_card(struct pci_dev *pdev, const unsigned int card, unsigned int *signature) { struct isi_board *board = pci_get_drvdata(pdev); unsigned long base = board->base; unsigned int sig, portcount = 0; int retval = 0; dev_dbg(&pdev->dev, "ISILoad:Resetting Card%d at 0x%lx\n", card + 1, base); inw(base + 0x8); msleep(10); outw(0, base + 0x8); /* Reset */ msleep(1000); sig = inw(base + 0x4) & 0xff; if (sig != 0xa5 && sig != 0xbb && sig != 0xcc && sig != 0xdd && sig != 0xee) { dev_warn(&pdev->dev, "ISILoad:Card%u reset failure (Possible " "bad I/O Port Address 0x%lx).\n", card + 1, base); dev_dbg(&pdev->dev, "Sig=0x%x\n", sig); retval = -EIO; goto end; } msleep(10); portcount = inw(base + 0x2); if (!(inw(base + 0xe) & 0x1) || (portcount != 0 && portcount != 4 && portcount != 8 && portcount != 16)) { dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", card + 1); retval = -EIO; goto end; } switch (sig) { case 0xa5: case 0xbb: case 0xdd: board->port_count = (portcount == 4) ? 4 : 8; board->shift_count = 12; break; case 0xcc: case 0xee: board->port_count = 16; board->shift_count = 11; break; } dev_info(&pdev->dev, "-Done\n"); *signature = sig; end: return retval; } static int load_firmware(struct pci_dev *pdev, const unsigned int index, const unsigned int signature) { struct isi_board *board = pci_get_drvdata(pdev); const struct firmware *fw; unsigned long base = board->base; unsigned int a; u16 word_count, status; int retval = -EIO; char *name; u8 *data; struct stframe { u16 addr; u16 count; u8 data[0]; } *frame; switch (signature) { case 0xa5: name = "isi608.bin"; break; case 0xbb: name = "isi608em.bin"; break; case 0xcc: name = "isi616em.bin"; break; case 0xdd: name = "isi4608.bin"; break; case 0xee: name = "isi4616.bin"; break; default: dev_err(&pdev->dev, "Unknown signature.\n"); goto end; } retval = request_firmware(&fw, name, &pdev->dev); if (retval) goto end; retval = -EIO; for (frame = (struct stframe *)fw->data; frame < (struct stframe *)(fw->data + fw->size); frame = (struct stframe *)((u8 *)(frame + 1) + frame->count)) { if (WaitTillCardIsFree(base)) goto errrelfw; outw(0xf0, base); /* start upload sequence */ outw(0x00, base); outw(frame->addr, base); /* lsb of address */ word_count = frame->count / 2 + frame->count % 2; outw(word_count, base); InterruptTheCard(base); udelay(100); /* 0x2f */ if (WaitTillCardIsFree(base)) goto errrelfw; status = inw(base + 0x4); if (status != 0) { dev_warn(&pdev->dev, "Card%d rejected load header:\n" "Address:0x%x\n" "Count:0x%x\n" "Status:0x%x\n", index + 1, frame->addr, frame->count, status); goto errrelfw; } outsw(base, frame->data, word_count); InterruptTheCard(base); udelay(50); /* 0x0f */ if (WaitTillCardIsFree(base)) goto errrelfw; status = inw(base + 0x4); if (status != 0) { dev_err(&pdev->dev, "Card%d got out of sync.Card " "Status:0x%x\n", index + 1, status); goto errrelfw; } } /* XXX: should we test it by reading it back and comparing with original like * in load firmware package? */ for (frame = (struct stframe *)fw->data; frame < (struct stframe *)(fw->data + fw->size); frame = (struct stframe *)((u8 *)(frame + 1) + frame->count)) { if (WaitTillCardIsFree(base)) goto errrelfw; outw(0xf1, base); /* start download sequence */ outw(0x00, base); outw(frame->addr, base); /* lsb of address */ word_count = (frame->count >> 1) + frame->count % 2; outw(word_count + 1, base); InterruptTheCard(base); udelay(50); /* 0xf */ if (WaitTillCardIsFree(base)) goto errrelfw; status = inw(base + 0x4); if (status != 0) { dev_warn(&pdev->dev, "Card%d rejected verify header:\n" "Address:0x%x\n" "Count:0x%x\n" "Status: 0x%x\n", index + 1, frame->addr, frame->count, status); goto errrelfw; } data = kmalloc(word_count * 2, GFP_KERNEL); if (data == NULL) { dev_err(&pdev->dev, "Card%d, firmware upload " "failed, not enough memory\n", index + 1); goto errrelfw; } inw(base); insw(base, data, word_count); InterruptTheCard(base); for (a = 0; a < frame->count; a++) if (data[a] != frame->data[a]) { kfree(data); dev_err(&pdev->dev, "Card%d, firmware upload " "failed\n", index + 1); goto errrelfw; } kfree(data); udelay(50); /* 0xf */ if (WaitTillCardIsFree(base)) goto errrelfw; status = inw(base + 0x4); if (status != 0) { dev_err(&pdev->dev, "Card%d verify got out of sync. " "Card Status:0x%x\n", index + 1, status); goto errrelfw; } } /* xfer ctrl */ if (WaitTillCardIsFree(base)) goto errrelfw; outw(0xf2, base); outw(0x800, base); outw(0x0, base); outw(0x0, base); InterruptTheCard(base); outw(0x0, base + 0x4); /* for ISI4608 cards */ board->status |= FIRMWARE_LOADED; retval = 0; errrelfw: release_firmware(fw); end: return retval; } /* * Insmod can set static symbols so keep these static */ static unsigned int card_count; static int isicom_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int uninitialized_var(signature), index; int retval = -EPERM; struct isi_board *board = NULL; if (card_count >= BOARD_COUNT) goto err; retval = pci_enable_device(pdev); if (retval) { dev_err(&pdev->dev, "failed to enable\n"); goto err; } dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device); /* allot the first empty slot in the array */ for (index = 0; index < BOARD_COUNT; index++) { if (isi_card[index].base == 0) { board = &isi_card[index]; break; } } if (index == BOARD_COUNT) { retval = -ENODEV; goto err_disable; } board->index = index; board->base = pci_resource_start(pdev, 3); board->irq = pdev->irq; card_count++; pci_set_drvdata(pdev, board); retval = pci_request_region(pdev, 3, ISICOM_NAME); if (retval) { dev_err(&pdev->dev, "I/O Region 0x%lx-0x%lx is busy. Card%d " "will be disabled.\n", board->base, board->base + 15, index + 1); retval = -EBUSY; goto errdec; } retval = request_irq(board->irq, isicom_interrupt, IRQF_SHARED, ISICOM_NAME, board); if (retval < 0) { dev_err(&pdev->dev, "Could not install handler at Irq %d. " "Card%d will be disabled.\n", board->irq, index + 1); goto errunrr; } retval = reset_card(pdev, index, &signature); if (retval < 0) goto errunri; retval = load_firmware(pdev, index, signature); if (retval < 0) goto errunri; for (index = 0; index < board->port_count; index++) { struct tty_port *tport = &board->ports[index].port; tty_port_init(tport); tport->ops = &isicom_port_ops; tport->close_delay = 50 * HZ/100; tport->closing_wait = 3000 * HZ/100; tty_port_register_device(tport, isicom_normal, board->index * 16 + index, &pdev->dev); } return 0; errunri: free_irq(board->irq, board); errunrr: pci_release_region(pdev, 3); errdec: board->base = 0; card_count--; err_disable: pci_disable_device(pdev); err: return retval; } static void isicom_remove(struct pci_dev *pdev) { struct isi_board *board = pci_get_drvdata(pdev); unsigned int i; for (i = 0; i < board->port_count; i++) { tty_unregister_device(isicom_normal, board->index * 16 + i); tty_port_destroy(&board->ports[i].port); } free_irq(board->irq, board); pci_release_region(pdev, 3); board->base = 0; card_count--; pci_disable_device(pdev); } static int __init isicom_init(void) { int retval, idx, channel; struct isi_port *port; for (idx = 0; idx < BOARD_COUNT; idx++) { port = &isi_ports[idx * 16]; isi_card[idx].ports = port; spin_lock_init(&isi_card[idx].card_lock); for (channel = 0; channel < 16; channel++, port++) { port->magic = ISICOM_MAGIC; port->card = &isi_card[idx]; port->channel = channel; port->status = 0; /* . . . */ } isi_card[idx].base = 0; isi_card[idx].irq = 0; } /* tty driver structure initialization */ isicom_normal = alloc_tty_driver(PORT_COUNT); if (!isicom_normal) { retval = -ENOMEM; goto error; } isicom_normal->name = "ttyM"; isicom_normal->major = ISICOM_NMAJOR; isicom_normal->minor_start = 0; isicom_normal->type = TTY_DRIVER_TYPE_SERIAL; isicom_normal->subtype = SERIAL_TYPE_NORMAL; isicom_normal->init_termios = tty_std_termios; isicom_normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; isicom_normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK; tty_set_operations(isicom_normal, &isicom_ops); retval = tty_register_driver(isicom_normal); if (retval) { pr_debug("Couldn't register the dialin driver\n"); goto err_puttty; } retval = pci_register_driver(&isicom_driver); if (retval < 0) { pr_err("Unable to register pci driver.\n"); goto err_unrtty; } mod_timer(&tx, jiffies + 1); return 0; err_unrtty: tty_unregister_driver(isicom_normal); err_puttty: put_tty_driver(isicom_normal); error: return retval; } static void __exit isicom_exit(void) { del_timer_sync(&tx); pci_unregister_driver(&isicom_driver); tty_unregister_driver(isicom_normal); put_tty_driver(isicom_normal); } module_init(isicom_init); module_exit(isicom_exit); MODULE_AUTHOR("MultiTech"); MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("isi608.bin"); MODULE_FIRMWARE("isi608em.bin"); MODULE_FIRMWARE("isi616em.bin"); MODULE_FIRMWARE("isi4608.bin"); MODULE_FIRMWARE("isi4616.bin");
gpl-2.0
Tkkg1994/SuperKernel
drivers/pci/hotplug/pciehp_hpc.c
290
23594
/* * PCI Express PCI Hot Plug Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/signal.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/slab.h> #include "../pci.h" #include "pciehp.h" static inline struct pci_dev *ctrl_dev(struct controller *ctrl) { return ctrl->pcie->port; } static irqreturn_t pcie_isr(int irq, void *dev_id); static void start_int_poll_timer(struct controller *ctrl, int sec); /* This is the interrupt polling timeout function. */ static void int_poll_timeout(unsigned long data) { struct controller *ctrl = (struct controller *)data; /* Poll for interrupt events. regs == NULL => polling */ pcie_isr(0, ctrl); init_timer(&ctrl->poll_timer); if (!pciehp_poll_time) pciehp_poll_time = 2; /* default polling interval is 2 sec */ start_int_poll_timer(ctrl, pciehp_poll_time); } /* This function starts the interrupt polling timer. */ static void start_int_poll_timer(struct controller *ctrl, int sec) { /* Clamp to sane value */ if ((sec <= 0) || (sec > 60)) sec = 2; ctrl->poll_timer.function = &int_poll_timeout; ctrl->poll_timer.data = (unsigned long)ctrl; ctrl->poll_timer.expires = jiffies + sec * HZ; add_timer(&ctrl->poll_timer); } static inline int pciehp_request_irq(struct controller *ctrl) { int retval, irq = ctrl->pcie->irq; /* Install interrupt polling timer. Start with 10 sec delay */ if (pciehp_poll_mode) { init_timer(&ctrl->poll_timer); start_int_poll_timer(ctrl, 10); return 0; } /* Installs the interrupt handler */ retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl); if (retval) ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n", irq); return retval; } static inline void pciehp_free_irq(struct controller *ctrl) { if (pciehp_poll_mode) del_timer_sync(&ctrl->poll_timer); else free_irq(ctrl->pcie->irq, ctrl); } static int pcie_poll_cmd(struct controller *ctrl, int timeout) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_status; pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); if (slot_status & PCI_EXP_SLTSTA_CC) { pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); return 1; } while (timeout > 0) { msleep(10); timeout -= 10; pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); if (slot_status & PCI_EXP_SLTSTA_CC) { pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); return 1; } } return 0; /* timeout */ } static void pcie_wait_cmd(struct controller *ctrl) { unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; unsigned long duration = msecs_to_jiffies(msecs); unsigned long cmd_timeout = ctrl->cmd_started + duration; unsigned long now, timeout; int rc; /* * If the controller does not generate notifications for command * completions, we never need to wait between writes. */ if (NO_CMD_CMPL(ctrl)) return; if (!ctrl->cmd_busy) return; /* * Even if the command has already timed out, we want to call * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC. */ now = jiffies; if (time_before_eq(cmd_timeout, now)) timeout = 1; else timeout = cmd_timeout - now; if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE && ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE) rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); else rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout)); /* * Controllers with errata like Intel CF118 don't generate * completion notifications unless the power/indicator/interlock * control bits are changed. On such controllers, we'll emit this * timeout message when we wait for completion of commands that * don't change those bits, e.g., commands that merely enable * interrupts. */ if (!rc) ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n", ctrl->slot_ctrl, jiffies_to_msecs(jiffies - ctrl->cmd_started)); } static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd, u16 mask, bool wait) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_ctrl; mutex_lock(&ctrl->ctrl_lock); /* * Always wait for any previous command that might still be in progress */ pcie_wait_cmd(ctrl); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); slot_ctrl &= ~mask; slot_ctrl |= (cmd & mask); ctrl->cmd_busy = 1; smp_mb(); pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl); ctrl->cmd_started = jiffies; ctrl->slot_ctrl = slot_ctrl; /* * Optionally wait for the hardware to be ready for a new command, * indicating completion of the above issued command. */ if (wait) pcie_wait_cmd(ctrl); mutex_unlock(&ctrl->ctrl_lock); } /** * pcie_write_cmd - Issue controller command * @ctrl: controller to which the command is issued * @cmd: command value written to slot control register * @mask: bitmask of slot control register to be modified */ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) { pcie_do_write_cmd(ctrl, cmd, mask, true); } /* Same as above without waiting for the hardware to latch */ static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask) { pcie_do_write_cmd(ctrl, cmd, mask, false); } bool pciehp_check_link_active(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 lnk_status; bool ret; pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); if (ret) ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); return ret; } static void __pcie_wait_link_active(struct controller *ctrl, bool active) { int timeout = 1000; if (pciehp_check_link_active(ctrl) == active) return; while (timeout > 0) { msleep(10); timeout -= 10; if (pciehp_check_link_active(ctrl) == active) return; } ctrl_dbg(ctrl, "Data Link Layer Link Active not %s in 1000 msec\n", active ? "set" : "cleared"); } static void pcie_wait_link_active(struct controller *ctrl) { __pcie_wait_link_active(ctrl, true); } static bool pci_bus_check_dev(struct pci_bus *bus, int devfn) { u32 l; int count = 0; int delay = 1000, step = 20; bool found = false; do { found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0); count++; if (found) break; msleep(step); delay -= step; } while (delay > 0); if (count > 1 && pciehp_debug) printk(KERN_DEBUG "pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), count, step, l); return found; } int pciehp_check_link_status(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); bool found; u16 lnk_status; /* * Data Link Layer Link Active Reporting must be capable for * hot-plug capable downstream port. But old controller might * not implement it. In this case, we wait for 1000 ms. */ if (ctrl->link_active_reporting) pcie_wait_link_active(ctrl); else msleep(1000); /* wait 100ms before read pci conf, and try in 1s */ msleep(100); found = pci_bus_check_dev(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0)); pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); if ((lnk_status & PCI_EXP_LNKSTA_LT) || !(lnk_status & PCI_EXP_LNKSTA_NLW)) { ctrl_err(ctrl, "Link Training Error occurs\n"); return -1; } pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); if (!found) return -1; return 0; } static int __pciehp_link_set(struct controller *ctrl, bool enable) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 lnk_ctrl; pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl); if (enable) lnk_ctrl &= ~PCI_EXP_LNKCTL_LD; else lnk_ctrl |= PCI_EXP_LNKCTL_LD; pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl); ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl); return 0; } static int pciehp_link_enable(struct controller *ctrl) { return __pciehp_link_set(ctrl, true); } void pciehp_get_attention_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_ctrl; pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) { case PCI_EXP_SLTCTL_ATTN_IND_ON: *status = 1; /* On */ break; case PCI_EXP_SLTCTL_ATTN_IND_BLINK: *status = 2; /* Blink */ break; case PCI_EXP_SLTCTL_ATTN_IND_OFF: *status = 0; /* Off */ break; default: *status = 0xFF; break; } } void pciehp_get_power_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_ctrl; pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) { case PCI_EXP_SLTCTL_PWR_ON: *status = 1; /* On */ break; case PCI_EXP_SLTCTL_PWR_OFF: *status = 0; /* Off */ break; default: *status = 0xFF; break; } } void pciehp_get_latch_status(struct slot *slot, u8 *status) { struct pci_dev *pdev = ctrl_dev(slot->ctrl); u16 slot_status; pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); } void pciehp_get_adapter_status(struct slot *slot, u8 *status) { struct pci_dev *pdev = ctrl_dev(slot->ctrl); u16 slot_status; pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); *status = !!(slot_status & PCI_EXP_SLTSTA_PDS); } int pciehp_query_power_fault(struct slot *slot) { struct pci_dev *pdev = ctrl_dev(slot->ctrl); u16 slot_status; pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); return !!(slot_status & PCI_EXP_SLTSTA_PFD); } void pciehp_set_attention_status(struct slot *slot, u8 value) { struct controller *ctrl = slot->ctrl; u16 slot_cmd; if (!ATTN_LED(ctrl)) return; switch (value) { case 0: /* turn off */ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF; break; case 1: /* turn on */ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON; break; case 2: /* turn blink */ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK; break; default: return; } pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); } void pciehp_green_led_on(struct slot *slot) { struct controller *ctrl = slot->ctrl; if (!PWR_LED(ctrl)) return; pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_PIC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PWR_IND_ON); } void pciehp_green_led_off(struct slot *slot) { struct controller *ctrl = slot->ctrl; if (!PWR_LED(ctrl)) return; pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_PIC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PWR_IND_OFF); } void pciehp_green_led_blink(struct slot *slot) { struct controller *ctrl = slot->ctrl; if (!PWR_LED(ctrl)) return; pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, PCI_EXP_SLTCTL_PIC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PWR_IND_BLINK); } int pciehp_power_on_slot(struct slot *slot) { struct controller *ctrl = slot->ctrl; struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_status; int retval; /* Clear sticky power-fault bit from previous power failures */ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); if (slot_status & PCI_EXP_SLTSTA_PFD) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PFD); ctrl->power_fault_detected = 0; pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PWR_ON); retval = pciehp_link_enable(ctrl); if (retval) ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__); return retval; } void pciehp_power_off_slot(struct slot *slot) { struct controller *ctrl = slot->ctrl; pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PWR_OFF); } static irqreturn_t pcie_isr(int irq, void *dev_id) { struct controller *ctrl = (struct controller *)dev_id; struct pci_dev *pdev = ctrl_dev(ctrl); struct pci_bus *subordinate = pdev->subordinate; struct pci_dev *dev; struct slot *slot = ctrl->slot; u16 detected, intr_loc; /* * In order to guarantee that all interrupt events are * serviced, we need to re-inspect Slot Status register after * clearing what is presumed to be the last pending interrupt. */ intr_loc = 0; do { pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected); detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC); detected &= ~intr_loc; intr_loc |= detected; if (!intr_loc) return IRQ_NONE; if (detected) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, intr_loc); } while (detected); ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); /* Check Command Complete Interrupt Pending */ if (intr_loc & PCI_EXP_SLTSTA_CC) { ctrl->cmd_busy = 0; smp_mb(); wake_up(&ctrl->queue); } if (subordinate) { list_for_each_entry(dev, &subordinate->devices, bus_list) { if (dev->ignore_hotplug) { ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n", intr_loc, pci_name(dev)); return IRQ_HANDLED; } } } if (!(intr_loc & ~PCI_EXP_SLTSTA_CC)) return IRQ_HANDLED; /* Check MRL Sensor Changed */ if (intr_loc & PCI_EXP_SLTSTA_MRLSC) pciehp_handle_switch_change(slot); /* Check Attention Button Pressed */ if (intr_loc & PCI_EXP_SLTSTA_ABP) pciehp_handle_attention_button(slot); /* Check Presence Detect Changed */ if (intr_loc & PCI_EXP_SLTSTA_PDC) pciehp_handle_presence_change(slot); /* Check Power Fault Detected */ if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { ctrl->power_fault_detected = 1; pciehp_handle_power_fault(slot); } if (intr_loc & PCI_EXP_SLTSTA_DLLSC) pciehp_handle_linkstate_change(slot); return IRQ_HANDLED; } void pcie_enable_notification(struct controller *ctrl) { u16 cmd, mask; /* * TBD: Power fault detected software notification support. * * Power fault detected software notification is not enabled * now, because it caused power fault detected interrupt storm * on some machines. On those machines, power fault detected * bit in the slot status register was set again immediately * when it is cleared in the interrupt service routine, and * next power fault detected interrupt was notified again. */ /* * Always enable link events: thus link-up and link-down shall * always be treated as hotplug and unplug respectively. Enable * presence detect only if Attention Button is not present. */ cmd = PCI_EXP_SLTCTL_DLLSCE; if (ATTN_BUTTN(ctrl)) cmd |= PCI_EXP_SLTCTL_ABPE; else cmd |= PCI_EXP_SLTCTL_PDCE; if (MRL_SENS(ctrl)) cmd |= PCI_EXP_SLTCTL_MRLSCE; if (!pciehp_poll_mode) cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE; mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_DLLSCE); pcie_write_cmd_nowait(ctrl, cmd, mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd); } static void pcie_disable_notification(struct controller *ctrl) { u16 mask; mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_DLLSCE); pcie_write_cmd(ctrl, 0, mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0); } /* * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary * bus reset of the bridge, but at the same time we want to ensure that it is * not seen as a hot-unplug, followed by the hot-plug of the device. Thus, * disable link state notification and presence detection change notification * momentarily, if we see that they could interfere. Also, clear any spurious * events after. */ int pciehp_reset_slot(struct slot *slot, int probe) { struct controller *ctrl = slot->ctrl; struct pci_dev *pdev = ctrl_dev(ctrl); u16 stat_mask = 0, ctrl_mask = 0; if (probe) return 0; if (!ATTN_BUTTN(ctrl)) { ctrl_mask |= PCI_EXP_SLTCTL_PDCE; stat_mask |= PCI_EXP_SLTSTA_PDC; } ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE; stat_mask |= PCI_EXP_SLTSTA_DLLSC; pcie_write_cmd(ctrl, 0, ctrl_mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0); if (pciehp_poll_mode) del_timer_sync(&ctrl->poll_timer); pci_reset_bridge_secondary_bus(ctrl->pcie->port); pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask); pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask); if (pciehp_poll_mode) int_poll_timeout(ctrl->poll_timer.data); return 0; } int pcie_init_notification(struct controller *ctrl) { if (pciehp_request_irq(ctrl)) return -1; pcie_enable_notification(ctrl); ctrl->notification_enabled = 1; return 0; } static void pcie_shutdown_notification(struct controller *ctrl) { if (ctrl->notification_enabled) { pcie_disable_notification(ctrl); pciehp_free_irq(ctrl); ctrl->notification_enabled = 0; } } static int pcie_init_slot(struct controller *ctrl) { struct slot *slot; slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) return -ENOMEM; slot->wq = alloc_workqueue("pciehp-%u", 0, 0, PSN(ctrl)); if (!slot->wq) goto abort; slot->ctrl = ctrl; mutex_init(&slot->lock); mutex_init(&slot->hotplug_lock); INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); ctrl->slot = slot; return 0; abort: kfree(slot); return -ENOMEM; } static void pcie_cleanup_slot(struct controller *ctrl) { struct slot *slot = ctrl->slot; cancel_delayed_work(&slot->work); destroy_workqueue(slot->wq); kfree(slot); } static inline void dbg_ctrl(struct controller *ctrl) { int i; u16 reg16; struct pci_dev *pdev = ctrl->pcie->port; if (!pciehp_debug) return; ctrl_info(ctrl, "Hotplug Controller:\n"); ctrl_info(ctrl, " Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", pci_name(pdev), pdev->irq); ctrl_info(ctrl, " Vendor ID : 0x%04x\n", pdev->vendor); ctrl_info(ctrl, " Device ID : 0x%04x\n", pdev->device); ctrl_info(ctrl, " Subsystem ID : 0x%04x\n", pdev->subsystem_device); ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n", pdev->subsystem_vendor); ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n", pci_pcie_cap(pdev)); for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if (!pci_resource_len(pdev, i)) continue; ctrl_info(ctrl, " PCI resource [%d] : %pR\n", i, &pdev->resource[i]); } ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl)); ctrl_info(ctrl, " Attention Button : %3s\n", ATTN_BUTTN(ctrl) ? "yes" : "no"); ctrl_info(ctrl, " Power Controller : %3s\n", POWER_CTRL(ctrl) ? "yes" : "no"); ctrl_info(ctrl, " MRL Sensor : %3s\n", MRL_SENS(ctrl) ? "yes" : "no"); ctrl_info(ctrl, " Attention Indicator : %3s\n", ATTN_LED(ctrl) ? "yes" : "no"); ctrl_info(ctrl, " Power Indicator : %3s\n", PWR_LED(ctrl) ? "yes" : "no"); ctrl_info(ctrl, " Hot-Plug Surprise : %3s\n", HP_SUPR_RM(ctrl) ? "yes" : "no"); ctrl_info(ctrl, " EMI Present : %3s\n", EMI(ctrl) ? "yes" : "no"); ctrl_info(ctrl, " Command Completed : %3s\n", NO_CMD_CMPL(ctrl) ? "no" : "yes"); pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16); ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16); ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); } #define FLAG(x, y) (((x) & (y)) ? '+' : '-') struct controller *pcie_init(struct pcie_device *dev) { struct controller *ctrl; u32 slot_cap, link_cap; struct pci_dev *pdev = dev->port; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { dev_err(&dev->device, "%s: Out of memory\n", __func__); goto abort; } ctrl->pcie = dev; pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); ctrl->slot_cap = slot_cap; mutex_init(&ctrl->ctrl_lock); init_waitqueue_head(&ctrl->queue); dbg_ctrl(ctrl); /* Check if Data Link Layer Link Active Reporting is implemented */ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap); if (link_cap & PCI_EXP_LNKCAP_DLLLARC) { ctrl_dbg(ctrl, "Link Active Reporting supported\n"); ctrl->link_active_reporting = 1; } /* Clear all remaining event bits in Slot Status register */ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC); ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n", (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19, FLAG(slot_cap, PCI_EXP_SLTCAP_ABP), FLAG(slot_cap, PCI_EXP_SLTCAP_AIP), FLAG(slot_cap, PCI_EXP_SLTCAP_PIP), FLAG(slot_cap, PCI_EXP_SLTCAP_PCP), FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP), FLAG(slot_cap, PCI_EXP_SLTCAP_EIP), FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS), FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC)); if (pcie_init_slot(ctrl)) goto abort_ctrl; return ctrl; abort_ctrl: kfree(ctrl); abort: return NULL; } void pciehp_release_ctrl(struct controller *ctrl) { pcie_shutdown_notification(ctrl); pcie_cleanup_slot(ctrl); kfree(ctrl); }
gpl-2.0
Blagus/STB8000-kernel
arch/mips/alchemy/common/time.c
546
5063
/* * Copyright (C) 2008 Manuel Lauss <mano@roarinelk.homelinux.net> * * Previous incarnations were: * Copyright (C) 2001, 2006, 2008 MontaVista Software, <source@mvista.com> * Copied and modified Carsten Langgaard's time.c * * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## * * Clocksource/event using the 32.768kHz-clocked Counter1 ('RTC' in the * databooks). Firmware/Board init code must enable the counters in the * counter control register, otherwise the CP0 counter clocksource/event * will be installed instead (and use of 'wait' instruction is prohibited). */ #include <linux/clockchips.h> #include <linux/clocksource.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <asm/processor.h> #include <asm/time.h> #include <asm/mach-au1x00/au1000.h> /* 32kHz clock enabled and detected */ #define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S) static cycle_t au1x_counter1_read(struct clocksource *cs) { return au_readl(SYS_RTCREAD); } static struct clocksource au1x_counter1_clocksource = { .name = "alchemy-counter1", .read = au1x_counter1_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, .rating = 100, }; static int au1x_rtcmatch2_set_next_event(unsigned long delta, struct clock_event_device *cd) { delta += au_readl(SYS_RTCREAD); /* wait for register access */ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M21) ; au_writel(delta, SYS_RTCMATCH2); au_sync(); return 0; } static void au1x_rtcmatch2_set_mode(enum clock_event_mode mode, struct clock_event_device *cd) { } static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id) { struct clock_event_device *cd = dev_id; cd->event_handler(cd); return IRQ_HANDLED; } static struct clock_event_device au1x_rtcmatch2_clockdev = { .name = "rtcmatch2", .features = CLOCK_EVT_FEAT_ONESHOT, .rating = 100, .irq = AU1000_RTC_MATCH2_INT, .set_next_event = au1x_rtcmatch2_set_next_event, .set_mode = au1x_rtcmatch2_set_mode, .cpumask = cpu_all_mask, }; static struct irqaction au1x_rtcmatch2_irqaction = { .handler = au1x_rtcmatch2_irq, .flags = IRQF_DISABLED | IRQF_TIMER, .name = "timer", .dev_id = &au1x_rtcmatch2_clockdev, }; void __init plat_time_init(void) { struct clock_event_device *cd = &au1x_rtcmatch2_clockdev; unsigned long t; /* Check if firmware (YAMON, ...) has enabled 32kHz and clock * has been detected. If so install the rtcmatch2 clocksource, * otherwise don't bother. Note that both bits being set is by * no means a definite guarantee that the counters actually work * (the 32S bit seems to be stuck set to 1 once a single clock- * edge is detected, hence the timeouts). */ if (CNTR_OK != (au_readl(SYS_COUNTER_CNTRL) & CNTR_OK)) goto cntr_err; /* * setup counter 1 (RTC) to tick at full speed */ t = 0xffffff; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; au_writel(0, SYS_RTCTRIM); /* 32.768 kHz */ au_sync(); t = 0xffffff; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; au_writel(0, SYS_RTCWRITE); au_sync(); t = 0xffffff; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; /* register counter1 clocksource and event device */ clocksource_set_clock(&au1x_counter1_clocksource, 32768); clocksource_register(&au1x_counter1_clocksource); cd->shift = 32; cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift); cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd); cd->min_delta_ns = clockevent_delta2ns(8, cd); /* ~0.25ms */ clockevents_register_device(cd); setup_irq(AU1000_RTC_MATCH2_INT, &au1x_rtcmatch2_irqaction); printk(KERN_INFO "Alchemy clocksource installed\n"); return; cntr_err: /* * MIPS kernel assigns 'au1k_wait' to 'cpu_wait' before this * function is called. Because the Alchemy counters are unusable * the C0 timekeeping code is installed and use of the 'wait' * instruction must be prohibited, which is done most easily by * assigning NULL to cpu_wait. */ cpu_wait = NULL; r4k_clockevent_init(); init_r4k_clocksource(); }
gpl-2.0
samno1607/NO-IDEA
net/sctp/output.c
802
22690
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * * This file is part of the SCTP kernel implementation * * These functions handle output processing. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@austin.ibm.com> * Sridhar Samudrala <sri@us.ibm.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/init.h> #include <linux/slab.h> #include <net/inet_ecn.h> #include <net/ip.h> #include <net/icmp.h> #include <net/net_namespace.h> #include <linux/socket.h> /* for sa_family_t */ #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/checksum.h> /* Forward declarations for private helpers. */ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk); static void sctp_packet_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk); static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, struct sctp_chunk *chunk, u16 chunk_len); static void sctp_packet_reset(struct sctp_packet *packet) { packet->size = packet->overhead; packet->has_cookie_echo = 0; packet->has_sack = 0; packet->has_data = 0; packet->has_auth = 0; packet->ipfragok = 0; packet->auth = NULL; } /* Config a packet. * This appears to be a followup set of initializations. */ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, __u32 vtag, int ecn_capable) { struct sctp_chunk *chunk = NULL; SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); packet->vtag = vtag; if (ecn_capable && sctp_packet_empty(packet)) { chunk = sctp_get_ecne_prepend(packet->transport->asoc); /* If there a is a prepend chunk stick it on the list before * any other chunks get appended. */ if (chunk) sctp_packet_append_chunk(packet, chunk); } return packet; } /* Initialize the packet structure. */ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, struct sctp_transport *transport, __u16 sport, __u16 dport) { struct sctp_association *asoc = transport->asoc; size_t overhead; SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __func__, packet, transport); packet->transport = transport; packet->source_port = sport; packet->destination_port = dport; INIT_LIST_HEAD(&packet->chunk_list); if (asoc) { struct sctp_sock *sp = sctp_sk(asoc->base.sk); overhead = sp->pf->af->net_header_len; } else { overhead = sizeof(struct ipv6hdr); } overhead += sizeof(struct sctphdr); packet->overhead = overhead; sctp_packet_reset(packet); packet->vtag = 0; packet->malloced = 0; return packet; } /* Free a packet. */ void sctp_packet_free(struct sctp_packet *packet) { struct sctp_chunk *chunk, *tmp; SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet); list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); sctp_chunk_free(chunk); } if (packet->malloced) kfree(packet); } /* This routine tries to append the chunk to the offered packet. If adding * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk * is not present in the packet, it transmits the input packet. * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long * as it can fit in the packet, but any more data that does not fit in this * packet can be sent only after receiving the COOKIE_ACK. */ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk, int one_packet) { sctp_xmit_t retval; int error = 0; SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet, chunk); switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { case SCTP_XMIT_PMTU_FULL: if (!packet->has_cookie_echo) { error = sctp_packet_transmit(packet); if (error < 0) chunk->skb->sk->sk_err = -error; /* If we have an empty packet, then we can NOT ever * return PMTU_FULL. */ if (!one_packet) retval = sctp_packet_append_chunk(packet, chunk); } break; case SCTP_XMIT_RWND_FULL: case SCTP_XMIT_OK: case SCTP_XMIT_NAGLE_DELAY: break; } return retval; } /* Try to bundle an auth chunk into the packet. */ static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt, struct sctp_chunk *chunk) { struct sctp_association *asoc = pkt->transport->asoc; struct sctp_chunk *auth; sctp_xmit_t retval = SCTP_XMIT_OK; /* if we don't have an association, we can't do authentication */ if (!asoc) return retval; /* See if this is an auth chunk we are bundling or if * auth is already bundled. */ if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) return retval; /* if the peer did not request this chunk to be authenticated, * don't do it */ if (!chunk->auth) return retval; auth = sctp_make_auth(asoc); if (!auth) return retval; retval = sctp_packet_append_chunk(pkt, auth); return retval; } /* Try to bundle a SACK with the packet. */ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt, struct sctp_chunk *chunk) { sctp_xmit_t retval = SCTP_XMIT_OK; /* If sending DATA and haven't aleady bundled a SACK, try to * bundle one in to the packet. */ if (sctp_chunk_is_data(chunk) && !pkt->has_sack && !pkt->has_cookie_echo) { struct sctp_association *asoc; struct timer_list *timer; asoc = pkt->transport->asoc; timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; /* If the SACK timer is running, we have a pending SACK */ if (timer_pending(timer)) { struct sctp_chunk *sack; asoc->a_rwnd = asoc->rwnd; sack = sctp_make_sack(asoc); if (sack) { retval = sctp_packet_append_chunk(pkt, sack); asoc->peer.sack_needed = 0; if (del_timer(timer)) sctp_association_put(asoc); } } } return retval; } /* Append a chunk to the offered packet reporting back any inability to do * so. */ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk) { sctp_xmit_t retval = SCTP_XMIT_OK; __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet, chunk); /* Data chunks are special. Before seeing what else we can * bundle into this packet, check to see if we are allowed to * send this DATA. */ if (sctp_chunk_is_data(chunk)) { retval = sctp_packet_can_append_data(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; } /* Try to bundle AUTH chunk */ retval = sctp_packet_bundle_auth(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; /* Try to bundle SACK chunk */ retval = sctp_packet_bundle_sack(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; /* Check to see if this chunk will fit into the packet */ retval = sctp_packet_will_fit(packet, chunk, chunk_len); if (retval != SCTP_XMIT_OK) goto finish; /* We believe that this chunk is OK to add to the packet */ switch (chunk->chunk_hdr->type) { case SCTP_CID_DATA: /* Account for the data being in the packet */ sctp_packet_append_data(packet, chunk); /* Disallow SACK bundling after DATA. */ packet->has_sack = 1; /* Disallow AUTH bundling after DATA */ packet->has_auth = 1; /* Let it be knows that packet has DATA in it */ packet->has_data = 1; /* timestamp the chunk for rtx purposes */ chunk->sent_at = jiffies; break; case SCTP_CID_COOKIE_ECHO: packet->has_cookie_echo = 1; break; case SCTP_CID_SACK: packet->has_sack = 1; break; case SCTP_CID_AUTH: packet->has_auth = 1; packet->auth = chunk; break; } /* It is OK to send this chunk. */ list_add_tail(&chunk->list, &packet->chunk_list); packet->size += chunk_len; chunk->transport = packet->transport; finish: return retval; } /* All packets are sent to the network through this function from * sctp_outq_tail(). * * The return value is a normal kernel error return value. */ int sctp_packet_transmit(struct sctp_packet *packet) { struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; struct sctphdr *sh; struct sk_buff *nskb; struct sctp_chunk *chunk, *tmp; struct sock *sk; int err = 0; int padding; /* How much padding do we need? */ __u8 has_data = 0; struct dst_entry *dst = tp->dst; unsigned char *auth = NULL; /* pointer to auth in skb data */ __u32 cksum_buf_len = sizeof(struct sctphdr); SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet); /* Do NOT generate a chunkless packet. */ if (list_empty(&packet->chunk_list)) return err; /* Set up convenience variables... */ chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); sk = chunk->skb->sk; /* Allocate the new skb. */ nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC); if (!nskb) goto nomem; /* Make sure the outbound skb has enough header room reserved. */ skb_reserve(nskb, packet->overhead + LL_MAX_HEADER); /* Set the owning socket so that we know where to get the * destination IP address. */ skb_set_owner_w(nskb, sk); /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ if (!dst || (dst->obsolete > 1)) { dst_release(dst); sctp_transport_route(tp, NULL, sctp_sk(sk)); if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { sctp_assoc_sync_pmtu(asoc); } } dst = dst_clone(tp->dst); skb_dst_set(nskb, dst); if (!dst) goto no_route; /* Build the SCTP header. */ sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); skb_reset_transport_header(nskb); sh->source = htons(packet->source_port); sh->dest = htons(packet->destination_port); /* From 6.8 Adler-32 Checksum Calculation: * After the packet is constructed (containing the SCTP common * header and one or more control or DATA chunks), the * transmitter shall: * * 1) Fill in the proper Verification Tag in the SCTP common * header and initialize the checksum field to 0's. */ sh->vtag = htonl(packet->vtag); sh->checksum = 0; /** * 6.10 Bundling * * An endpoint bundles chunks by simply including multiple * chunks in one outbound SCTP packet. ... */ /** * 3.2 Chunk Field Descriptions * * The total length of a chunk (including Type, Length and * Value fields) MUST be a multiple of 4 bytes. If the length * of the chunk is not a multiple of 4 bytes, the sender MUST * pad the chunk with all zero bytes and this padding is not * included in the chunk length field. The sender should * never pad with more than 3 bytes. * * [This whole comment explains WORD_ROUND() below.] */ SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); if (sctp_chunk_is_data(chunk)) { /* 6.3.1 C4) When data is in flight and when allowed * by rule C5, a new RTT measurement MUST be made each * round trip. Furthermore, new RTT measurements * SHOULD be made no more than once per round-trip * for a given destination transport address. */ if (!tp->rto_pending) { chunk->rtt_in_progress = 1; tp->rto_pending = 1; } has_data = 1; } padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len; if (padding) memset(skb_put(chunk->skb, padding), 0, padding); /* if this is the auth chunk that we are adding, * store pointer where it will be added and put * the auth into the packet. */ if (chunk == packet->auth) auth = skb_tail_pointer(nskb); cksum_buf_len += chunk->skb->len; memcpy(skb_put(nskb, chunk->skb->len), chunk->skb->data, chunk->skb->len); SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n", "*** Chunk", chunk, sctp_cname(SCTP_ST_CHUNK( chunk->chunk_hdr->type)), chunk->has_tsn ? "TSN" : "No TSN", chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, "length", ntohs(chunk->chunk_hdr->length), "chunk->skb->len", chunk->skb->len, "rtt_in_progress", chunk->rtt_in_progress); /* * If this is a control chunk, this is our last * reference. Free data chunks after they've been * acknowledged or have failed. */ if (!sctp_chunk_is_data(chunk)) sctp_chunk_free(chunk); } /* SCTP-AUTH, Section 6.2 * The sender MUST calculate the MAC as described in RFC2104 [2] * using the hash function H as described by the MAC Identifier and * the shared association key K based on the endpoint pair shared key * described by the shared key identifier. The 'data' used for the * computation of the AUTH-chunk is given by the AUTH chunk with its * HMAC field set to zero (as shown in Figure 6) followed by all * chunks that are placed after the AUTH chunk in the SCTP packet. */ if (auth) sctp_auth_calculate_hmac(asoc, nskb, (struct sctp_auth_chunk *)auth, GFP_ATOMIC); /* 2) Calculate the Adler-32 checksum of the whole packet, * including the SCTP common header and all the * chunks. * * Note: Adler-32 is no longer applicable, as has been replaced * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. */ if (!sctp_checksum_disable && !(dst->dev->features & (NETIF_F_NO_CSUM | NETIF_F_SCTP_CSUM))) { __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); /* 3) Put the resultant value into the checksum field in the * common header, and leave the rest of the bits unchanged. */ sh->checksum = sctp_end_cksum(crc32); } else { if (dst->dev->features & NETIF_F_SCTP_CSUM) { /* no need to seed psuedo checksum for SCTP */ nskb->ip_summed = CHECKSUM_PARTIAL; nskb->csum_start = (skb_transport_header(nskb) - nskb->head); nskb->csum_offset = offsetof(struct sctphdr, checksum); } else { nskb->ip_summed = CHECKSUM_UNNECESSARY; } } /* IP layer ECN support * From RFC 2481 * "The ECN-Capable Transport (ECT) bit would be set by the * data sender to indicate that the end-points of the * transport protocol are ECN-capable." * * Now setting the ECT bit all the time, as it should not cause * any problems protocol-wise even if our peer ignores it. * * Note: The works for IPv6 layer checks this bit too later * in transmission. See IP6_ECN_flow_xmit(). */ (*tp->af_specific->ecn_capable)(nskb->sk); /* Set up the IP options. */ /* BUG: not implemented * For v4 this all lives somewhere in sk->sk_opt... */ /* Dump that on IP! */ if (asoc && asoc->peer.last_sent_to != tp) { /* Considering the multiple CPU scenario, this is a * "correcter" place for last_sent_to. --xguo */ asoc->peer.last_sent_to = tp; } if (has_data) { struct timer_list *timer; unsigned long timeout; /* Restart the AUTOCLOSE timer when sending data. */ if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; if (!mod_timer(timer, jiffies + timeout)) sctp_association_hold(asoc); } } SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n", nskb->len); nskb->local_df = packet->ipfragok; (*tp->af_specific->sctp_xmit)(nskb, tp); out: sctp_packet_reset(packet); return err; no_route: kfree_skb(nskb); IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES); /* FIXME: Returning the 'err' will effect all the associations * associated with a socket, although only one of the paths of the * association is unreachable. * The real failure of a transport or association can be passed on * to the user via notifications. So setting this error may not be * required. */ /* err = -EHOSTUNREACH; */ err: /* Control chunks are unreliable so just drop them. DATA chunks * will get resent or dropped later. */ list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); if (!sctp_chunk_is_data(chunk)) sctp_chunk_free(chunk); } goto out; nomem: err = -ENOMEM; goto err; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* This private function check to see if a chunk can be added */ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk) { sctp_xmit_t retval = SCTP_XMIT_OK; size_t datasize, rwnd, inflight, flight_size; struct sctp_transport *transport = packet->transport; struct sctp_association *asoc = transport->asoc; struct sctp_outq *q = &asoc->outqueue; /* RFC 2960 6.1 Transmission of DATA Chunks * * A) At any given time, the data sender MUST NOT transmit new data to * any destination transport address if its peer's rwnd indicates * that the peer has no buffer space (i.e. rwnd is 0, see Section * 6.2.1). However, regardless of the value of rwnd (including if it * is 0), the data sender can always have one DATA chunk in flight to * the receiver if allowed by cwnd (see rule B below). This rule * allows the sender to probe for a change in rwnd that the sender * missed due to the SACK having been lost in transit from the data * receiver to the data sender. */ rwnd = asoc->peer.rwnd; inflight = q->outstanding_bytes; flight_size = transport->flight_size; datasize = sctp_data_size(chunk); if (datasize > rwnd) { if (inflight > 0) { /* We have (at least) one data chunk in flight, * so we can't fall back to rule 6.1 B). */ retval = SCTP_XMIT_RWND_FULL; goto finish; } } /* RFC 2960 6.1 Transmission of DATA Chunks * * B) At any given time, the sender MUST NOT transmit new data * to a given transport address if it has cwnd or more bytes * of data outstanding to that transport address. */ /* RFC 7.2.4 & the Implementers Guide 2.8. * * 3) ... * When a Fast Retransmit is being performed the sender SHOULD * ignore the value of cwnd and SHOULD NOT delay retransmission. */ if (chunk->fast_retransmit != SCTP_NEED_FRTX) if (flight_size >= transport->cwnd) { retval = SCTP_XMIT_RWND_FULL; goto finish; } /* Nagle's algorithm to solve small-packet problem: * Inhibit the sending of new chunks when new outgoing data arrives * if any previously transmitted data on the connection remains * unacknowledged. */ if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) && inflight && sctp_state(asoc, ESTABLISHED)) { unsigned max = transport->pathmtu - packet->overhead; unsigned len = chunk->skb->len + q->out_qlen; /* Check whether this chunk and all the rest of pending * data will fit or delay in hopes of bundling a full * sized packet. * Don't delay large message writes that may have been * fragmeneted into small peices. */ if ((len < max) && chunk->msg->can_delay) { retval = SCTP_XMIT_NAGLE_DELAY; goto finish; } } finish: return retval; } /* This private function does management things when adding DATA chunk */ static void sctp_packet_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk) { struct sctp_transport *transport = packet->transport; size_t datasize = sctp_data_size(chunk); struct sctp_association *asoc = transport->asoc; u32 rwnd = asoc->peer.rwnd; /* Keep track of how many bytes are in flight over this transport. */ transport->flight_size += datasize; /* Keep track of how many bytes are in flight to the receiver. */ asoc->outqueue.outstanding_bytes += datasize; /* Update our view of the receiver's rwnd. Include sk_buff overhead * while updating peer.rwnd so that it reduces the chances of a * receiver running out of receive buffer space even when receive * window is still open. This can happen when a sender is sending * sending small messages. */ datasize += sizeof(struct sk_buff); if (datasize < rwnd) rwnd -= datasize; else rwnd = 0; asoc->peer.rwnd = rwnd; /* Has been accepted for transmission. */ if (!asoc->peer.prsctp_capable) chunk->msg->can_abandon = 0; sctp_chunk_assign_tsn(chunk); sctp_chunk_assign_ssn(chunk); } static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, struct sctp_chunk *chunk, u16 chunk_len) { size_t psize; size_t pmtu; int too_big; sctp_xmit_t retval = SCTP_XMIT_OK; psize = packet->size; pmtu = ((packet->transport->asoc) ? (packet->transport->asoc->pathmtu) : (packet->transport->pathmtu)); too_big = (psize + chunk_len > pmtu); /* Decide if we need to fragment or resubmit later. */ if (too_big) { /* It's OK to fragmet at IP level if any one of the following * is true: * 1. The packet is empty (meaning this chunk is greater * the MTU) * 2. The chunk we are adding is a control chunk * 3. The packet doesn't have any data in it yet and data * requires authentication. */ if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) || (!packet->has_data && chunk->auth)) { /* We no longer do re-fragmentation. * Just fragment at the IP layer, if we * actually hit this condition */ packet->ipfragok = 1; } else { retval = SCTP_XMIT_PMTU_FULL; } } return retval; }
gpl-2.0
lucaspcamargo/litmus-rt
drivers/media/usb/gspca/xirlink_cit.c
1314
99347
/* * USB IBM C-It Video Camera driver * * Supports Xirlink C-It Video Camera, IBM PC Camera, * IBM NetCamera and Veo Stingray. * * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com> * * This driver is based on earlier work of: * * (C) Copyright 1999 Johannes Erdfelt * (C) Copyright 1999 Randy Dunlap * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "xirlink-cit" #include <linux/input.h> #include "gspca.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Xirlink C-IT"); MODULE_LICENSE("GPL"); /* FIXME we should autodetect this */ static int ibm_netcam_pro; module_param(ibm_netcam_pro, int, 0); MODULE_PARM_DESC(ibm_netcam_pro, "Use IBM Netcamera Pro init sequences for Model 3 cams"); /* FIXME this should be handled through the V4L2 input selection API */ static int rca_input; module_param(rca_input, int, 0644); MODULE_PARM_DESC(rca_input, "Use rca input instead of ccd sensor on Model 3 cams"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_ctrl *lighting; u8 model; #define CIT_MODEL0 0 /* bcd version 0.01 cams ie the xvp-500 */ #define CIT_MODEL1 1 /* The model 1 - 4 nomenclature comes from the old */ #define CIT_MODEL2 2 /* ibmcam driver */ #define CIT_MODEL3 3 #define CIT_MODEL4 4 #define CIT_IBM_NETCAM_PRO 5 u8 input_index; u8 button_state; u8 stop_on_control_change; u8 sof_read; u8 sof_len; }; static void sd_stop0(struct gspca_dev *gspca_dev); static const struct v4l2_pix_format cif_yuv_mode[] = { {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {352, 288, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; static const struct v4l2_pix_format vga_yuv_mode[] = { {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {320, 240, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {640, 480, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; static const struct v4l2_pix_format model0_mode[] = { {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {320, 240, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; static const struct v4l2_pix_format model2_mode[] = { {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {320, 240, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {352, 288, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; /* * 01.01.08 - Added for RCA video in support -LO * This struct is used to init the Model3 cam to use the RCA video in port * instead of the CCD sensor. */ static const u16 rca_initdata[][3] = { {0, 0x0000, 0x010c}, {0, 0x0006, 0x012c}, {0, 0x0078, 0x012d}, {0, 0x0046, 0x012f}, {0, 0xd141, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfea8, 0x0124}, {1, 0x0000, 0x0116}, {0, 0x0064, 0x0116}, {1, 0x0000, 0x0115}, {0, 0x0003, 0x0115}, {0, 0x0008, 0x0123}, {0, 0x0000, 0x0117}, {0, 0x0000, 0x0112}, {0, 0x0080, 0x0100}, {0, 0x0000, 0x0100}, {1, 0x0000, 0x0116}, {0, 0x0060, 0x0116}, {0, 0x0002, 0x0112}, {0, 0x0000, 0x0123}, {0, 0x0001, 0x0117}, {0, 0x0040, 0x0108}, {0, 0x0019, 0x012c}, {0, 0x0040, 0x0116}, {0, 0x000a, 0x0115}, {0, 0x000b, 0x0115}, {0, 0x0078, 0x012d}, {0, 0x0046, 0x012f}, {0, 0xd141, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfea8, 0x0124}, {0, 0x0064, 0x0116}, {0, 0x0000, 0x0115}, {0, 0x0001, 0x0115}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00aa, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f2, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x000f, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f8, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00fc, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f9, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x003c, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0027, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0019, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0021, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0006, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0045, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002a, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x000e, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002b, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f4, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002c, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0004, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002d, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0014, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002e, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0003, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002f, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0003, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0014, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0053, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0x0000, 0x0101}, {0, 0x00a0, 0x0103}, {0, 0x0078, 0x0105}, {0, 0x0000, 0x010a}, {0, 0x0024, 0x010b}, {0, 0x0028, 0x0119}, {0, 0x0088, 0x011b}, {0, 0x0002, 0x011d}, {0, 0x0003, 0x011e}, {0, 0x0000, 0x0129}, {0, 0x00fc, 0x012b}, {0, 0x0008, 0x0102}, {0, 0x0000, 0x0104}, {0, 0x0008, 0x011a}, {0, 0x0028, 0x011c}, {0, 0x0021, 0x012a}, {0, 0x0000, 0x0118}, {0, 0x0000, 0x0132}, {0, 0x0000, 0x0109}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0031, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00dc, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0032, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0020, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0030, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0008, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0x0003, 0x0111}, }; /* TESTME the old ibmcam driver repeats certain commands to Model1 cameras, we do the same for now (testing needed to see if this is really necessary) */ static const int cit_model1_ntries = 5; static const int cit_model1_ntries2 = 2; static int cit_write_reg(struct gspca_dev *gspca_dev, u16 value, u16 index) { struct usb_device *udev = gspca_dev->dev; int err; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, value, index, NULL, 0, 1000); if (err < 0) pr_err("Failed to write a register (index 0x%04X, value 0x%02X, error %d)\n", index, value, err); return 0; } static int cit_read_reg(struct gspca_dev *gspca_dev, u16 index, int verbose) { struct usb_device *udev = gspca_dev->dev; __u8 *buf = gspca_dev->usb_buf; int res; res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0x00, index, buf, 8, 1000); if (res < 0) { pr_err("Failed to read a register (index 0x%04X, error %d)\n", index, res); return res; } if (verbose) PDEBUG(D_PROBE, "Register %04x value: %02x", index, buf[0]); return 0; } /* * cit_send_FF_04_02() * * This procedure sends magic 3-command prefix to the camera. * The purpose of this prefix is not known. * * History: * 1/2/00 Created. */ static void cit_send_FF_04_02(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x00FF, 0x0127); cit_write_reg(gspca_dev, 0x0004, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); } static void cit_send_00_04_06(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x0004, 0x0124); cit_write_reg(gspca_dev, 0x0006, 0x0124); } static void cit_send_x_00(struct gspca_dev *gspca_dev, unsigned short x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); } static void cit_send_x_00_05(struct gspca_dev *gspca_dev, unsigned short x) { cit_send_x_00(gspca_dev, x); cit_write_reg(gspca_dev, 0x0005, 0x0124); } static void cit_send_x_00_05_02(struct gspca_dev *gspca_dev, unsigned short x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); } static void cit_send_x_01_00_05(struct gspca_dev *gspca_dev, u16 x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0001, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); } static void cit_send_x_00_05_02_01(struct gspca_dev *gspca_dev, u16 x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0124); } static void cit_send_x_00_05_02_08_01(struct gspca_dev *gspca_dev, u16 x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); cit_write_reg(gspca_dev, 0x0008, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0124); } static void cit_Packet_Format1(struct gspca_dev *gspca_dev, u16 fkey, u16 val) { cit_send_x_01_00_05(gspca_dev, 0x0088); cit_send_x_00_05(gspca_dev, fkey); cit_send_x_00_05_02_08_01(gspca_dev, val); cit_send_x_00_05(gspca_dev, 0x0088); cit_send_x_00_05_02_01(gspca_dev, fkey); cit_send_x_00_05(gspca_dev, 0x0089); cit_send_x_00(gspca_dev, fkey); cit_send_00_04_06(gspca_dev); cit_read_reg(gspca_dev, 0x0126, 0); cit_send_FF_04_02(gspca_dev); } static void cit_PacketFormat2(struct gspca_dev *gspca_dev, u16 fkey, u16 val) { cit_send_x_01_00_05(gspca_dev, 0x0088); cit_send_x_00_05(gspca_dev, fkey); cit_send_x_00_05_02(gspca_dev, val); } static void cit_model2_Packet2(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x00ff, 0x012d); cit_write_reg(gspca_dev, 0xfea3, 0x0124); } static void cit_model2_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2) { cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x00ff, 0x012e); cit_write_reg(gspca_dev, v1, 0x012f); cit_write_reg(gspca_dev, 0x00ff, 0x0130); cit_write_reg(gspca_dev, 0xc719, 0x0124); cit_write_reg(gspca_dev, v2, 0x0127); cit_model2_Packet2(gspca_dev); } /* * cit_model3_Packet1() * * 00_0078_012d * 00_0097_012f * 00_d141_0124 * 00_0096_0127 * 00_fea8_0124 */ static void cit_model3_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2) { cit_write_reg(gspca_dev, 0x0078, 0x012d); cit_write_reg(gspca_dev, v1, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, v2, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); } static void cit_model4_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2) { cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, v1, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, v2, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); } static void cit_model4_BrightnessPacket(struct gspca_dev *gspca_dev, u16 val) { cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0026, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, val, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0038, 0x012d); cit_write_reg(gspca_dev, 0x0004, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; sd->model = id->driver_info; if (sd->model == CIT_MODEL3 && ibm_netcam_pro) sd->model = CIT_IBM_NETCAM_PRO; cam = &gspca_dev->cam; switch (sd->model) { case CIT_MODEL0: cam->cam_mode = model0_mode; cam->nmodes = ARRAY_SIZE(model0_mode); sd->sof_len = 4; break; case CIT_MODEL1: cam->cam_mode = cif_yuv_mode; cam->nmodes = ARRAY_SIZE(cif_yuv_mode); sd->sof_len = 4; break; case CIT_MODEL2: cam->cam_mode = model2_mode + 1; /* no 160x120 */ cam->nmodes = 3; break; case CIT_MODEL3: cam->cam_mode = vga_yuv_mode; cam->nmodes = ARRAY_SIZE(vga_yuv_mode); sd->stop_on_control_change = 1; sd->sof_len = 4; break; case CIT_MODEL4: cam->cam_mode = model2_mode; cam->nmodes = ARRAY_SIZE(model2_mode); break; case CIT_IBM_NETCAM_PRO: cam->cam_mode = vga_yuv_mode; cam->nmodes = 2; /* no 640 x 480 */ cam->input_flags = V4L2_IN_ST_VFLIP; sd->stop_on_control_change = 1; sd->sof_len = 4; break; } return 0; } static int cit_init_model0(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x0000, 0x0100); /* turn on led */ cit_write_reg(gspca_dev, 0x0001, 0x0112); /* turn on autogain ? */ cit_write_reg(gspca_dev, 0x0000, 0x0400); cit_write_reg(gspca_dev, 0x0001, 0x0400); cit_write_reg(gspca_dev, 0x0000, 0x0420); cit_write_reg(gspca_dev, 0x0001, 0x0420); cit_write_reg(gspca_dev, 0x000d, 0x0409); cit_write_reg(gspca_dev, 0x0002, 0x040a); cit_write_reg(gspca_dev, 0x0018, 0x0405); cit_write_reg(gspca_dev, 0x0008, 0x0435); cit_write_reg(gspca_dev, 0x0026, 0x040b); cit_write_reg(gspca_dev, 0x0007, 0x0437); cit_write_reg(gspca_dev, 0x0015, 0x042f); cit_write_reg(gspca_dev, 0x002b, 0x0439); cit_write_reg(gspca_dev, 0x0026, 0x043a); cit_write_reg(gspca_dev, 0x0008, 0x0438); cit_write_reg(gspca_dev, 0x001e, 0x042b); cit_write_reg(gspca_dev, 0x0041, 0x042c); return 0; } static int cit_init_ibm_netcam_pro(struct gspca_dev *gspca_dev) { cit_read_reg(gspca_dev, 0x128, 1); cit_write_reg(gspca_dev, 0x0003, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0112); cit_write_reg(gspca_dev, 0x0000, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0123); cit_write_reg(gspca_dev, 0x0001, 0x0117); cit_write_reg(gspca_dev, 0x0040, 0x0108); cit_write_reg(gspca_dev, 0x0019, 0x012c); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0115); cit_write_reg(gspca_dev, 0x000b, 0x0115); cit_write_reg(gspca_dev, 0x0078, 0x012d); cit_write_reg(gspca_dev, 0x0001, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0079, 0x012d); cit_write_reg(gspca_dev, 0x00ff, 0x0130); cit_write_reg(gspca_dev, 0xcd41, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_read_reg(gspca_dev, 0x0126, 1); cit_model3_Packet1(gspca_dev, 0x0000, 0x0000); cit_model3_Packet1(gspca_dev, 0x0000, 0x0001); cit_model3_Packet1(gspca_dev, 0x000b, 0x0000); cit_model3_Packet1(gspca_dev, 0x000c, 0x0008); cit_model3_Packet1(gspca_dev, 0x000d, 0x003a); cit_model3_Packet1(gspca_dev, 0x000e, 0x0060); cit_model3_Packet1(gspca_dev, 0x000f, 0x0060); cit_model3_Packet1(gspca_dev, 0x0010, 0x0008); cit_model3_Packet1(gspca_dev, 0x0011, 0x0004); cit_model3_Packet1(gspca_dev, 0x0012, 0x0028); cit_model3_Packet1(gspca_dev, 0x0013, 0x0002); cit_model3_Packet1(gspca_dev, 0x0014, 0x0000); cit_model3_Packet1(gspca_dev, 0x0015, 0x00fb); cit_model3_Packet1(gspca_dev, 0x0016, 0x0002); cit_model3_Packet1(gspca_dev, 0x0017, 0x0037); cit_model3_Packet1(gspca_dev, 0x0018, 0x0036); cit_model3_Packet1(gspca_dev, 0x001e, 0x0000); cit_model3_Packet1(gspca_dev, 0x001f, 0x0008); cit_model3_Packet1(gspca_dev, 0x0020, 0x00c1); cit_model3_Packet1(gspca_dev, 0x0021, 0x0034); cit_model3_Packet1(gspca_dev, 0x0022, 0x0034); cit_model3_Packet1(gspca_dev, 0x0025, 0x0002); cit_model3_Packet1(gspca_dev, 0x0028, 0x0022); cit_model3_Packet1(gspca_dev, 0x0029, 0x000a); cit_model3_Packet1(gspca_dev, 0x002b, 0x0000); cit_model3_Packet1(gspca_dev, 0x002c, 0x0000); cit_model3_Packet1(gspca_dev, 0x002d, 0x00ff); cit_model3_Packet1(gspca_dev, 0x002e, 0x00ff); cit_model3_Packet1(gspca_dev, 0x002f, 0x00ff); cit_model3_Packet1(gspca_dev, 0x0030, 0x00ff); cit_model3_Packet1(gspca_dev, 0x0031, 0x00ff); cit_model3_Packet1(gspca_dev, 0x0032, 0x0007); cit_model3_Packet1(gspca_dev, 0x0033, 0x0005); cit_model3_Packet1(gspca_dev, 0x0037, 0x0040); cit_model3_Packet1(gspca_dev, 0x0039, 0x0000); cit_model3_Packet1(gspca_dev, 0x003a, 0x0000); cit_model3_Packet1(gspca_dev, 0x003b, 0x0001); cit_model3_Packet1(gspca_dev, 0x003c, 0x0000); cit_model3_Packet1(gspca_dev, 0x0040, 0x000c); cit_model3_Packet1(gspca_dev, 0x0041, 0x00fb); cit_model3_Packet1(gspca_dev, 0x0042, 0x0002); cit_model3_Packet1(gspca_dev, 0x0043, 0x0000); cit_model3_Packet1(gspca_dev, 0x0045, 0x0000); cit_model3_Packet1(gspca_dev, 0x0046, 0x0000); cit_model3_Packet1(gspca_dev, 0x0047, 0x0000); cit_model3_Packet1(gspca_dev, 0x0048, 0x0000); cit_model3_Packet1(gspca_dev, 0x0049, 0x0000); cit_model3_Packet1(gspca_dev, 0x004a, 0x00ff); cit_model3_Packet1(gspca_dev, 0x004b, 0x00ff); cit_model3_Packet1(gspca_dev, 0x004c, 0x00ff); cit_model3_Packet1(gspca_dev, 0x004f, 0x0000); cit_model3_Packet1(gspca_dev, 0x0050, 0x0000); cit_model3_Packet1(gspca_dev, 0x0051, 0x0002); cit_model3_Packet1(gspca_dev, 0x0055, 0x0000); cit_model3_Packet1(gspca_dev, 0x0056, 0x0000); cit_model3_Packet1(gspca_dev, 0x0057, 0x0000); cit_model3_Packet1(gspca_dev, 0x0058, 0x0002); cit_model3_Packet1(gspca_dev, 0x0059, 0x0000); cit_model3_Packet1(gspca_dev, 0x005c, 0x0016); cit_model3_Packet1(gspca_dev, 0x005d, 0x0022); cit_model3_Packet1(gspca_dev, 0x005e, 0x003c); cit_model3_Packet1(gspca_dev, 0x005f, 0x0050); cit_model3_Packet1(gspca_dev, 0x0060, 0x0044); cit_model3_Packet1(gspca_dev, 0x0061, 0x0005); cit_model3_Packet1(gspca_dev, 0x006a, 0x007e); cit_model3_Packet1(gspca_dev, 0x006f, 0x0000); cit_model3_Packet1(gspca_dev, 0x0072, 0x001b); cit_model3_Packet1(gspca_dev, 0x0073, 0x0005); cit_model3_Packet1(gspca_dev, 0x0074, 0x000a); cit_model3_Packet1(gspca_dev, 0x0075, 0x001b); cit_model3_Packet1(gspca_dev, 0x0076, 0x002a); cit_model3_Packet1(gspca_dev, 0x0077, 0x003c); cit_model3_Packet1(gspca_dev, 0x0078, 0x0050); cit_model3_Packet1(gspca_dev, 0x007b, 0x0000); cit_model3_Packet1(gspca_dev, 0x007c, 0x0011); cit_model3_Packet1(gspca_dev, 0x007d, 0x0024); cit_model3_Packet1(gspca_dev, 0x007e, 0x0043); cit_model3_Packet1(gspca_dev, 0x007f, 0x005a); cit_model3_Packet1(gspca_dev, 0x0084, 0x0020); cit_model3_Packet1(gspca_dev, 0x0085, 0x0033); cit_model3_Packet1(gspca_dev, 0x0086, 0x000a); cit_model3_Packet1(gspca_dev, 0x0087, 0x0030); cit_model3_Packet1(gspca_dev, 0x0088, 0x0070); cit_model3_Packet1(gspca_dev, 0x008b, 0x0008); cit_model3_Packet1(gspca_dev, 0x008f, 0x0000); cit_model3_Packet1(gspca_dev, 0x0090, 0x0006); cit_model3_Packet1(gspca_dev, 0x0091, 0x0028); cit_model3_Packet1(gspca_dev, 0x0092, 0x005a); cit_model3_Packet1(gspca_dev, 0x0093, 0x0082); cit_model3_Packet1(gspca_dev, 0x0096, 0x0014); cit_model3_Packet1(gspca_dev, 0x0097, 0x0020); cit_model3_Packet1(gspca_dev, 0x0098, 0x0000); cit_model3_Packet1(gspca_dev, 0x00b0, 0x0046); cit_model3_Packet1(gspca_dev, 0x00b1, 0x0000); cit_model3_Packet1(gspca_dev, 0x00b2, 0x0000); cit_model3_Packet1(gspca_dev, 0x00b3, 0x0004); cit_model3_Packet1(gspca_dev, 0x00b4, 0x0007); cit_model3_Packet1(gspca_dev, 0x00b6, 0x0002); cit_model3_Packet1(gspca_dev, 0x00b7, 0x0004); cit_model3_Packet1(gspca_dev, 0x00bb, 0x0000); cit_model3_Packet1(gspca_dev, 0x00bc, 0x0001); cit_model3_Packet1(gspca_dev, 0x00bd, 0x0000); cit_model3_Packet1(gspca_dev, 0x00bf, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c0, 0x00c8); cit_model3_Packet1(gspca_dev, 0x00c1, 0x0014); cit_model3_Packet1(gspca_dev, 0x00c2, 0x0001); cit_model3_Packet1(gspca_dev, 0x00c3, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c4, 0x0004); cit_model3_Packet1(gspca_dev, 0x00cb, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00cc, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00cd, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00ce, 0x0000); cit_model3_Packet1(gspca_dev, 0x00cf, 0x0020); cit_model3_Packet1(gspca_dev, 0x00d0, 0x0040); cit_model3_Packet1(gspca_dev, 0x00d1, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00d1, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00d2, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00d3, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00ea, 0x0008); cit_model3_Packet1(gspca_dev, 0x00eb, 0x0000); cit_model3_Packet1(gspca_dev, 0x00ec, 0x00e8); cit_model3_Packet1(gspca_dev, 0x00ed, 0x0001); cit_model3_Packet1(gspca_dev, 0x00ef, 0x0022); cit_model3_Packet1(gspca_dev, 0x00f0, 0x0000); cit_model3_Packet1(gspca_dev, 0x00f2, 0x0028); cit_model3_Packet1(gspca_dev, 0x00f4, 0x0002); cit_model3_Packet1(gspca_dev, 0x00f5, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fa, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fb, 0x0001); cit_model3_Packet1(gspca_dev, 0x00fc, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fd, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fe, 0x0000); cit_model3_Packet1(gspca_dev, 0x00ff, 0x0000); cit_model3_Packet1(gspca_dev, 0x00be, 0x0003); cit_model3_Packet1(gspca_dev, 0x00c8, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c9, 0x0020); cit_model3_Packet1(gspca_dev, 0x00ca, 0x0040); cit_model3_Packet1(gspca_dev, 0x0053, 0x0001); cit_model3_Packet1(gspca_dev, 0x0082, 0x000e); cit_model3_Packet1(gspca_dev, 0x0083, 0x0020); cit_model3_Packet1(gspca_dev, 0x0034, 0x003c); cit_model3_Packet1(gspca_dev, 0x006e, 0x0055); cit_model3_Packet1(gspca_dev, 0x0062, 0x0005); cit_model3_Packet1(gspca_dev, 0x0063, 0x0008); cit_model3_Packet1(gspca_dev, 0x0066, 0x000a); cit_model3_Packet1(gspca_dev, 0x0067, 0x0006); cit_model3_Packet1(gspca_dev, 0x006b, 0x0010); cit_model3_Packet1(gspca_dev, 0x005a, 0x0001); cit_model3_Packet1(gspca_dev, 0x005b, 0x000a); cit_model3_Packet1(gspca_dev, 0x0023, 0x0006); cit_model3_Packet1(gspca_dev, 0x0026, 0x0004); cit_model3_Packet1(gspca_dev, 0x0036, 0x0069); cit_model3_Packet1(gspca_dev, 0x0038, 0x0064); cit_model3_Packet1(gspca_dev, 0x003d, 0x0003); cit_model3_Packet1(gspca_dev, 0x003e, 0x0001); cit_model3_Packet1(gspca_dev, 0x00b8, 0x0014); cit_model3_Packet1(gspca_dev, 0x00b9, 0x0014); cit_model3_Packet1(gspca_dev, 0x00e6, 0x0004); cit_model3_Packet1(gspca_dev, 0x00e8, 0x0001); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: cit_init_model0(gspca_dev); sd_stop0(gspca_dev); break; case CIT_MODEL1: case CIT_MODEL2: case CIT_MODEL3: case CIT_MODEL4: break; /* All is done in sd_start */ case CIT_IBM_NETCAM_PRO: cit_init_ibm_netcam_pro(gspca_dev); sd_stop0(gspca_dev); break; } return 0; } static int cit_set_brightness(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; int i; switch (sd->model) { case CIT_MODEL0: case CIT_IBM_NETCAM_PRO: /* No (known) brightness control for these */ break; case CIT_MODEL1: /* Model 1: Brightness range 0 - 63 */ cit_Packet_Format1(gspca_dev, 0x0031, val); cit_Packet_Format1(gspca_dev, 0x0032, val); cit_Packet_Format1(gspca_dev, 0x0033, val); break; case CIT_MODEL2: /* Model 2: Brightness range 0x60 - 0xee */ /* Scale 0 - 63 to 0x60 - 0xee */ i = 0x60 + val * 2254 / 1000; cit_model2_Packet1(gspca_dev, 0x001a, i); break; case CIT_MODEL3: /* Model 3: Brightness range 'i' in [0x0C..0x3F] */ i = val; if (i < 0x0c) i = 0x0c; cit_model3_Packet1(gspca_dev, 0x0036, i); break; case CIT_MODEL4: /* Model 4: Brightness range 'i' in [0x04..0xb4] */ /* Scale 0 - 63 to 0x04 - 0xb4 */ i = 0x04 + val * 2794 / 1000; cit_model4_BrightnessPacket(gspca_dev, i); break; } return 0; } static int cit_set_contrast(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: { int i; /* gain 0-15, 0-20 -> 0-15 */ i = val * 1000 / 1333; cit_write_reg(gspca_dev, i, 0x0422); /* gain 0-31, may not be lower then 0x0422, 0-20 -> 0-31 */ i = val * 2000 / 1333; cit_write_reg(gspca_dev, i, 0x0423); /* gain 0-127, may not be lower then 0x0423, 0-20 -> 0-63 */ i = val * 4000 / 1333; cit_write_reg(gspca_dev, i, 0x0424); /* gain 0-127, may not be lower then 0x0424, , 0-20 -> 0-127 */ i = val * 8000 / 1333; cit_write_reg(gspca_dev, i, 0x0425); break; } case CIT_MODEL2: case CIT_MODEL4: /* These models do not have this control. */ break; case CIT_MODEL1: { /* Scale 0 - 20 to 15 - 0 */ int i, new_contrast = (20 - val) * 1000 / 1333; for (i = 0; i < cit_model1_ntries; i++) { cit_Packet_Format1(gspca_dev, 0x0014, new_contrast); cit_send_FF_04_02(gspca_dev); } break; } case CIT_MODEL3: { /* Preset hardware values */ static const struct { unsigned short cv1; unsigned short cv2; unsigned short cv3; } cv[7] = { { 0x05, 0x05, 0x0f }, /* Minimum */ { 0x04, 0x04, 0x16 }, { 0x02, 0x03, 0x16 }, { 0x02, 0x08, 0x16 }, { 0x01, 0x0c, 0x16 }, { 0x01, 0x0e, 0x16 }, { 0x01, 0x10, 0x16 } /* Maximum */ }; int i = val / 3; cit_model3_Packet1(gspca_dev, 0x0067, cv[i].cv1); cit_model3_Packet1(gspca_dev, 0x005b, cv[i].cv2); cit_model3_Packet1(gspca_dev, 0x005c, cv[i].cv3); break; } case CIT_IBM_NETCAM_PRO: cit_model3_Packet1(gspca_dev, 0x005b, val + 1); break; } return 0; } static int cit_set_hue(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL1: case CIT_IBM_NETCAM_PRO: /* No hue control for these models */ break; case CIT_MODEL2: cit_model2_Packet1(gspca_dev, 0x0024, val); /* cit_model2_Packet1(gspca_dev, 0x0020, sat); */ break; case CIT_MODEL3: { /* Model 3: Brightness range 'i' in [0x05..0x37] */ /* TESTME according to the ibmcam driver this does not work */ if (0) { /* Scale 0 - 127 to 0x05 - 0x37 */ int i = 0x05 + val * 1000 / 2540; cit_model3_Packet1(gspca_dev, 0x007e, i); } break; } case CIT_MODEL4: /* HDG: taken from ibmcam, setting the color gains does not * really belong here. * * I am not sure r/g/b_gain variables exactly control gain * of those channels. Most likely they subtly change some * very internal image processing settings in the camera. * In any case, here is what they do, and feel free to tweak: * * r_gain: seriously affects red gain * g_gain: seriously affects green gain * b_gain: seriously affects blue gain * hue: changes average color from violet (0) to red (0xFF) */ cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 160, 0x0127); /* Green gain */ cit_write_reg(gspca_dev, 160, 0x012e); /* Red gain */ cit_write_reg(gspca_dev, 160, 0x0130); /* Blue gain */ cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, val, 0x012d); /* Hue */ cit_write_reg(gspca_dev, 0xf545, 0x0124); break; } return 0; } static int cit_set_sharpness(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL2: case CIT_MODEL4: case CIT_IBM_NETCAM_PRO: /* These models do not have this control */ break; case CIT_MODEL1: { int i; const unsigned short sa[] = { 0x11, 0x13, 0x16, 0x18, 0x1a, 0x8, 0x0a }; for (i = 0; i < cit_model1_ntries; i++) cit_PacketFormat2(gspca_dev, 0x0013, sa[val]); break; } case CIT_MODEL3: { /* * "Use a table of magic numbers. * This setting doesn't really change much. * But that's how Windows does it." */ static const struct { unsigned short sv1; unsigned short sv2; unsigned short sv3; unsigned short sv4; } sv[7] = { { 0x00, 0x00, 0x05, 0x14 }, /* Smoothest */ { 0x01, 0x04, 0x05, 0x14 }, { 0x02, 0x04, 0x05, 0x14 }, { 0x03, 0x04, 0x05, 0x14 }, { 0x03, 0x05, 0x05, 0x14 }, { 0x03, 0x06, 0x05, 0x14 }, { 0x03, 0x07, 0x05, 0x14 } /* Sharpest */ }; cit_model3_Packet1(gspca_dev, 0x0060, sv[val].sv1); cit_model3_Packet1(gspca_dev, 0x0061, sv[val].sv2); cit_model3_Packet1(gspca_dev, 0x0062, sv[val].sv3); cit_model3_Packet1(gspca_dev, 0x0063, sv[val].sv4); break; } } return 0; } /* * cit_set_lighting() * * Camera model 1: * We have 3 levels of lighting conditions: 0=Bright, 1=Medium, 2=Low. * * Camera model 2: * We have 16 levels of lighting, 0 for bright light and up to 15 for * low light. But values above 5 or so are useless because camera is * not really capable to produce anything worth viewing at such light. * This setting may be altered only in certain camera state. * * Low lighting forces slower FPS. * * History: * 1/5/00 Created. * 2/20/00 Added support for Model 2 cameras. */ static void cit_set_lighting(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL2: case CIT_MODEL3: case CIT_MODEL4: case CIT_IBM_NETCAM_PRO: break; case CIT_MODEL1: { int i; for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x0027, val); break; } } } static void cit_set_hflip(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: if (val) cit_write_reg(gspca_dev, 0x0020, 0x0115); else cit_write_reg(gspca_dev, 0x0040, 0x0115); break; case CIT_MODEL1: case CIT_MODEL2: case CIT_MODEL3: case CIT_MODEL4: case CIT_IBM_NETCAM_PRO: break; } } static int cit_restart_stream(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL1: cit_write_reg(gspca_dev, 0x0001, 0x0114); /* Fall through */ case CIT_MODEL2: case CIT_MODEL4: cit_write_reg(gspca_dev, 0x00c0, 0x010c); /* Go! */ usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe); break; case CIT_MODEL3: case CIT_IBM_NETCAM_PRO: cit_write_reg(gspca_dev, 0x0001, 0x0114); cit_write_reg(gspca_dev, 0x00c0, 0x010c); /* Go! */ usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe); /* Clear button events from while we were not streaming */ cit_write_reg(gspca_dev, 0x0001, 0x0113); break; } sd->sof_read = 0; return 0; } static int cit_get_packet_size(struct gspca_dev *gspca_dev) { struct usb_host_interface *alt; struct usb_interface *intf; intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); alt = usb_altnum_to_altsetting(intf, gspca_dev->alt); if (!alt) { pr_err("Couldn't get altsetting\n"); return -EIO; } return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); } /* Calculate the clockdiv giving us max fps given the available bandwidth */ static int cit_get_clock_div(struct gspca_dev *gspca_dev) { int clock_div = 7; /* 0=30 1=25 2=20 3=15 4=12 5=7.5 6=6 7=3fps ?? */ int fps[8] = { 30, 25, 20, 15, 12, 8, 6, 3 }; int packet_size; packet_size = cit_get_packet_size(gspca_dev); if (packet_size < 0) return packet_size; while (clock_div > 3 && 1000 * packet_size > gspca_dev->pixfmt.width * gspca_dev->pixfmt.height * fps[clock_div - 1] * 3 / 2) clock_div--; PDEBUG(D_PROBE, "PacketSize: %d, res: %dx%d -> using clockdiv: %d (%d fps)", packet_size, gspca_dev->pixfmt.width, gspca_dev->pixfmt.height, clock_div, fps[clock_div]); return clock_div; } static int cit_start_model0(struct gspca_dev *gspca_dev) { const unsigned short compression = 0; /* 0=none, 7=best frame rate */ int clock_div; clock_div = cit_get_clock_div(gspca_dev); if (clock_div < 0) return clock_div; cit_write_reg(gspca_dev, 0x0000, 0x0100); /* turn on led */ cit_write_reg(gspca_dev, 0x0003, 0x0438); cit_write_reg(gspca_dev, 0x001e, 0x042b); cit_write_reg(gspca_dev, 0x0041, 0x042c); cit_write_reg(gspca_dev, 0x0008, 0x0436); cit_write_reg(gspca_dev, 0x0024, 0x0403); cit_write_reg(gspca_dev, 0x002c, 0x0404); cit_write_reg(gspca_dev, 0x0002, 0x0426); cit_write_reg(gspca_dev, 0x0014, 0x0427); switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0004, 0x010b); cit_write_reg(gspca_dev, 0x0001, 0x010a); cit_write_reg(gspca_dev, 0x0010, 0x0102); cit_write_reg(gspca_dev, 0x00a0, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0078, 0x0105); break; case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x0006, 0x010b); cit_write_reg(gspca_dev, 0x0000, 0x010a); cit_write_reg(gspca_dev, 0x0005, 0x0102); cit_write_reg(gspca_dev, 0x00b0, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0090, 0x0105); break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0008, 0x010b); cit_write_reg(gspca_dev, 0x0004, 0x010a); cit_write_reg(gspca_dev, 0x0005, 0x0102); cit_write_reg(gspca_dev, 0x00a0, 0x0103); cit_write_reg(gspca_dev, 0x0010, 0x0104); cit_write_reg(gspca_dev, 0x0078, 0x0105); break; } cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, clock_div, 0x0111); return 0; } static int cit_start_model1(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, clock_div; clock_div = cit_get_clock_div(gspca_dev); if (clock_div < 0) return clock_div; cit_read_reg(gspca_dev, 0x0128, 1); cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */ cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x81, 0x0100); /* LED Off */ cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */ cit_write_reg(gspca_dev, 0x01, 0x0108); cit_write_reg(gspca_dev, 0x03, 0x0112); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x06, 0x0115); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x44, 0x0116); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x40, 0x0116); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x0e, 0x0115); cit_write_reg(gspca_dev, 0x19, 0x012c); cit_Packet_Format1(gspca_dev, 0x00, 0x1e); cit_Packet_Format1(gspca_dev, 0x39, 0x0d); cit_Packet_Format1(gspca_dev, 0x39, 0x09); cit_Packet_Format1(gspca_dev, 0x3b, 0x00); cit_Packet_Format1(gspca_dev, 0x28, 0x22); cit_Packet_Format1(gspca_dev, 0x27, 0x00); cit_Packet_Format1(gspca_dev, 0x2b, 0x1f); cit_Packet_Format1(gspca_dev, 0x39, 0x08); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x2c, 0x00); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x30, 0x14); cit_PacketFormat2(gspca_dev, 0x39, 0x02); cit_PacketFormat2(gspca_dev, 0x01, 0xe1); cit_PacketFormat2(gspca_dev, 0x02, 0xcd); cit_PacketFormat2(gspca_dev, 0x03, 0xcd); cit_PacketFormat2(gspca_dev, 0x04, 0xfa); cit_PacketFormat2(gspca_dev, 0x3f, 0xff); cit_PacketFormat2(gspca_dev, 0x39, 0x00); cit_PacketFormat2(gspca_dev, 0x39, 0x02); cit_PacketFormat2(gspca_dev, 0x0a, 0x37); cit_PacketFormat2(gspca_dev, 0x0b, 0xb8); cit_PacketFormat2(gspca_dev, 0x0c, 0xf3); cit_PacketFormat2(gspca_dev, 0x0d, 0xe3); cit_PacketFormat2(gspca_dev, 0x0e, 0x0d); cit_PacketFormat2(gspca_dev, 0x0f, 0xf2); cit_PacketFormat2(gspca_dev, 0x10, 0xd5); cit_PacketFormat2(gspca_dev, 0x11, 0xba); cit_PacketFormat2(gspca_dev, 0x12, 0x53); cit_PacketFormat2(gspca_dev, 0x3f, 0xff); cit_PacketFormat2(gspca_dev, 0x39, 0x00); cit_PacketFormat2(gspca_dev, 0x39, 0x02); cit_PacketFormat2(gspca_dev, 0x16, 0x00); cit_PacketFormat2(gspca_dev, 0x17, 0x28); cit_PacketFormat2(gspca_dev, 0x18, 0x7d); cit_PacketFormat2(gspca_dev, 0x19, 0xbe); cit_PacketFormat2(gspca_dev, 0x3f, 0xff); cit_PacketFormat2(gspca_dev, 0x39, 0x00); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x00, 0x18); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x13, 0x18); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x14, 0x06); /* TESTME These are handled through controls KEEP until someone can test leaving this out is ok */ if (0) { /* This is default brightness */ for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x31, 0x37); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x32, 0x46); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x33, 0x55); } cit_Packet_Format1(gspca_dev, 0x2e, 0x04); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x2d, 0x04); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x29, 0x80); cit_Packet_Format1(gspca_dev, 0x2c, 0x01); cit_Packet_Format1(gspca_dev, 0x30, 0x17); cit_Packet_Format1(gspca_dev, 0x39, 0x08); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x34, 0x00); cit_write_reg(gspca_dev, 0x00, 0x0101); cit_write_reg(gspca_dev, 0x00, 0x010a); switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_write_reg(gspca_dev, 0x80, 0x0103); cit_write_reg(gspca_dev, 0x60, 0x0105); cit_write_reg(gspca_dev, 0x0c, 0x010b); cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */ cit_write_reg(gspca_dev, 0x0b, 0x011d); cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */ cit_write_reg(gspca_dev, 0x00, 0x0129); break; case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0xb0, 0x0103); cit_write_reg(gspca_dev, 0x8f, 0x0105); cit_write_reg(gspca_dev, 0x06, 0x010b); cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */ cit_write_reg(gspca_dev, 0x0d, 0x011d); cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */ cit_write_reg(gspca_dev, 0x03, 0x0129); break; case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0xb0, 0x0103); cit_write_reg(gspca_dev, 0x90, 0x0105); cit_write_reg(gspca_dev, 0x02, 0x010b); cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */ cit_write_reg(gspca_dev, 0x05, 0x011d); cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */ cit_write_reg(gspca_dev, 0x00, 0x0129); break; } cit_write_reg(gspca_dev, 0xff, 0x012b); /* TESTME These are handled through controls KEEP until someone can test leaving this out is ok */ if (0) { /* This is another brightness - don't know why */ for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x31, 0xc3); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x32, 0xd2); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x33, 0xe1); /* Default contrast */ for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x14, 0x0a); /* Default sharpness */ for (i = 0; i < cit_model1_ntries2; i++) cit_PacketFormat2(gspca_dev, 0x13, 0x1a); /* Default lighting conditions */ cit_Packet_Format1(gspca_dev, 0x0027, v4l2_ctrl_g_ctrl(sd->lighting)); } /* Assorted init */ switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e); cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */ cit_write_reg(gspca_dev, 0x36, 0x0102); cit_write_reg(gspca_dev, 0x1a, 0x0104); cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */ cit_write_reg(gspca_dev, 0x2b, 0x011c); cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */ break; case 176: /* 176x144 */ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e); cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */ cit_write_reg(gspca_dev, 0x04, 0x0102); cit_write_reg(gspca_dev, 0x02, 0x0104); cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */ cit_write_reg(gspca_dev, 0x2b, 0x011c); cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */ break; case 352: /* 352x288 */ cit_Packet_Format1(gspca_dev, 0x2b, 0x1f); cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */ cit_write_reg(gspca_dev, 0x08, 0x0102); cit_write_reg(gspca_dev, 0x01, 0x0104); cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */ cit_write_reg(gspca_dev, 0x2f, 0x011c); cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */ break; } cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */ cit_write_reg(gspca_dev, clock_div, 0x0111); return 0; } static int cit_start_model2(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int clock_div = 0; cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */ cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0112); cit_write_reg(gspca_dev, 0x00bc, 0x012c); cit_write_reg(gspca_dev, 0x0008, 0x012b); cit_write_reg(gspca_dev, 0x0000, 0x0108); cit_write_reg(gspca_dev, 0x0001, 0x0133); cit_write_reg(gspca_dev, 0x0001, 0x0102); switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x0024, 0x0105); /* 176x144, 352x288 */ cit_write_reg(gspca_dev, 0x00b9, 0x010a); /* Unique to this mode */ cit_write_reg(gspca_dev, 0x0038, 0x0119); /* Unique to this mode */ /* TESTME HDG: this does not seem right (it is 2 for all other resolutions) */ sd->sof_len = 10; break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0028, 0x0103); /* Unique to this mode */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x001e, 0x0105); /* 320x240, 352x240 */ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */ sd->sof_len = 2; break; /* case VIDEOSIZE_352x240: */ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x001e, 0x0105); /* 320x240, 352x240 */ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */ sd->sof_len = 2; break; case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x0024, 0x0105); /* 176x144, 352x288 */ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */ sd->sof_len = 2; break; } cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */ switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x0050, 0x0111); cit_write_reg(gspca_dev, 0x00d0, 0x0111); break; case 320: /* 320x240 */ case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0x0040, 0x0111); cit_write_reg(gspca_dev, 0x00c0, 0x0111); break; } cit_write_reg(gspca_dev, 0x009b, 0x010f); cit_write_reg(gspca_dev, 0x00bb, 0x010f); /* * Hardware settings, may affect CMOS sensor; not user controls! * ------------------------------------------------------------- * 0x0004: no effect * 0x0006: hardware effect * 0x0008: no effect * 0x000a: stops video stream, probably important h/w setting * 0x000c: changes color in hardware manner (not user setting) * 0x0012: changes number of colors (does not affect speed) * 0x002a: no effect * 0x002c: hardware setting (related to scan lines) * 0x002e: stops video stream, probably important h/w setting */ cit_model2_Packet1(gspca_dev, 0x000a, 0x005c); cit_model2_Packet1(gspca_dev, 0x0004, 0x0000); cit_model2_Packet1(gspca_dev, 0x0006, 0x00fb); cit_model2_Packet1(gspca_dev, 0x0008, 0x0000); cit_model2_Packet1(gspca_dev, 0x000c, 0x0009); cit_model2_Packet1(gspca_dev, 0x0012, 0x000a); cit_model2_Packet1(gspca_dev, 0x002a, 0x0000); cit_model2_Packet1(gspca_dev, 0x002c, 0x0000); cit_model2_Packet1(gspca_dev, 0x002e, 0x0008); /* * Function 0x0030 pops up all over the place. Apparently * it is a hardware control register, with every bit assigned to * do something. */ cit_model2_Packet1(gspca_dev, 0x0030, 0x0000); /* * Magic control of CMOS sensor. Only lower values like * 0-3 work, and picture shifts left or right. Don't change. */ switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0002); cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x004a); /* Another hardware setting */ clock_div = 6; break; case 320: /* 320x240 */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0009); cit_model2_Packet1(gspca_dev, 0x0016, 0x0005); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x0044); /* Another hardware setting */ clock_div = 8; break; /* case VIDEOSIZE_352x240: */ /* This mode doesn't work as Windows programs it; changed to work */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0009); /* Windows sets this to 8 */ cit_model2_Packet1(gspca_dev, 0x0016, 0x0003); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x0044); /* Windows sets this to 0x0045 */ clock_div = 10; break; case 352: /* 352x288 */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0003); cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x004a); /* Another hardware setting */ clock_div = 16; break; } /* TESTME These are handled through controls KEEP until someone can test leaving this out is ok */ if (0) cit_model2_Packet1(gspca_dev, 0x001a, 0x005a); /* * We have our own frame rate setting varying from 0 (slowest) to 6 * (fastest). The camera model 2 allows frame rate in range [0..0x1F] # where 0 is also the slowest setting. However for all practical # reasons high settings make no sense because USB is not fast enough # to support high FPS. Be aware that the picture datastream will be # severely disrupted if you ask for frame rate faster than allowed # for the video size - see below: * * Allowable ranges (obtained experimentally on OHCI, K6-3, 450 MHz): * ----------------------------------------------------------------- * 176x144: [6..31] * 320x240: [8..31] * 352x240: [10..31] * 352x288: [16..31] I have to raise lower threshold for stability... * * As usual, slower FPS provides better sensitivity. */ cit_model2_Packet1(gspca_dev, 0x001c, clock_div); /* * This setting does not visibly affect pictures; left it here * because it was present in Windows USB data stream. This function * does not allow arbitrary values and apparently is a bit mask, to * be activated only at appropriate time. Don't change it randomly! */ switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_model2_Packet1(gspca_dev, 0x0026, 0x00c2); break; case 320: /* 320x240 */ cit_model2_Packet1(gspca_dev, 0x0026, 0x0044); break; /* case VIDEOSIZE_352x240: */ cit_model2_Packet1(gspca_dev, 0x0026, 0x0046); break; case 352: /* 352x288 */ cit_model2_Packet1(gspca_dev, 0x0026, 0x0048); break; } cit_model2_Packet1(gspca_dev, 0x0028, v4l2_ctrl_g_ctrl(sd->lighting)); /* model2 cannot change the backlight compensation while streaming */ v4l2_ctrl_grab(sd->lighting, true); /* color balance rg2 */ cit_model2_Packet1(gspca_dev, 0x001e, 0x002f); /* saturation */ cit_model2_Packet1(gspca_dev, 0x0020, 0x0034); /* color balance yb */ cit_model2_Packet1(gspca_dev, 0x0022, 0x00a0); /* Hardware control command */ cit_model2_Packet1(gspca_dev, 0x0030, 0x0004); return 0; } static int cit_start_model3(struct gspca_dev *gspca_dev) { const unsigned short compression = 0; /* 0=none, 7=best frame rate */ int i, clock_div = 0; /* HDG not in ibmcam driver, added to see if it helps with auto-detecting between model3 and ibm netcamera pro */ cit_read_reg(gspca_dev, 0x128, 1); cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0112); cit_write_reg(gspca_dev, 0x0000, 0x0123); cit_write_reg(gspca_dev, 0x0001, 0x0117); cit_write_reg(gspca_dev, 0x0040, 0x0108); cit_write_reg(gspca_dev, 0x0019, 0x012c); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0115); cit_write_reg(gspca_dev, 0x0003, 0x0115); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x000b, 0x0115); /* TESTME HDG not in ibmcam driver, added to see if it helps with auto-detecting between model3 and ibm netcamera pro */ if (0) { cit_write_reg(gspca_dev, 0x0078, 0x012d); cit_write_reg(gspca_dev, 0x0001, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0079, 0x012d); cit_write_reg(gspca_dev, 0x00ff, 0x0130); cit_write_reg(gspca_dev, 0xcd41, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_read_reg(gspca_dev, 0x0126, 1); } cit_model3_Packet1(gspca_dev, 0x000a, 0x0040); cit_model3_Packet1(gspca_dev, 0x000b, 0x00f6); cit_model3_Packet1(gspca_dev, 0x000c, 0x0002); cit_model3_Packet1(gspca_dev, 0x000d, 0x0020); cit_model3_Packet1(gspca_dev, 0x000e, 0x0033); cit_model3_Packet1(gspca_dev, 0x000f, 0x0007); cit_model3_Packet1(gspca_dev, 0x0010, 0x0000); cit_model3_Packet1(gspca_dev, 0x0011, 0x0070); cit_model3_Packet1(gspca_dev, 0x0012, 0x0030); cit_model3_Packet1(gspca_dev, 0x0013, 0x0000); cit_model3_Packet1(gspca_dev, 0x0014, 0x0001); cit_model3_Packet1(gspca_dev, 0x0015, 0x0001); cit_model3_Packet1(gspca_dev, 0x0016, 0x0001); cit_model3_Packet1(gspca_dev, 0x0017, 0x0001); cit_model3_Packet1(gspca_dev, 0x0018, 0x0000); cit_model3_Packet1(gspca_dev, 0x001e, 0x00c3); cit_model3_Packet1(gspca_dev, 0x0020, 0x0000); cit_model3_Packet1(gspca_dev, 0x0028, 0x0010); cit_model3_Packet1(gspca_dev, 0x0029, 0x0054); cit_model3_Packet1(gspca_dev, 0x002a, 0x0013); cit_model3_Packet1(gspca_dev, 0x002b, 0x0007); cit_model3_Packet1(gspca_dev, 0x002d, 0x0028); cit_model3_Packet1(gspca_dev, 0x002e, 0x0000); cit_model3_Packet1(gspca_dev, 0x0031, 0x0000); cit_model3_Packet1(gspca_dev, 0x0032, 0x0000); cit_model3_Packet1(gspca_dev, 0x0033, 0x0000); cit_model3_Packet1(gspca_dev, 0x0034, 0x0000); cit_model3_Packet1(gspca_dev, 0x0035, 0x0038); cit_model3_Packet1(gspca_dev, 0x003a, 0x0001); cit_model3_Packet1(gspca_dev, 0x003c, 0x001e); cit_model3_Packet1(gspca_dev, 0x003f, 0x000a); cit_model3_Packet1(gspca_dev, 0x0041, 0x0000); cit_model3_Packet1(gspca_dev, 0x0046, 0x003f); cit_model3_Packet1(gspca_dev, 0x0047, 0x0000); cit_model3_Packet1(gspca_dev, 0x0050, 0x0005); cit_model3_Packet1(gspca_dev, 0x0052, 0x001a); cit_model3_Packet1(gspca_dev, 0x0053, 0x0003); cit_model3_Packet1(gspca_dev, 0x005a, 0x006b); cit_model3_Packet1(gspca_dev, 0x005d, 0x001e); cit_model3_Packet1(gspca_dev, 0x005e, 0x0030); cit_model3_Packet1(gspca_dev, 0x005f, 0x0041); cit_model3_Packet1(gspca_dev, 0x0064, 0x0008); cit_model3_Packet1(gspca_dev, 0x0065, 0x0015); cit_model3_Packet1(gspca_dev, 0x0068, 0x000f); cit_model3_Packet1(gspca_dev, 0x0079, 0x0000); cit_model3_Packet1(gspca_dev, 0x007a, 0x0000); cit_model3_Packet1(gspca_dev, 0x007c, 0x003f); cit_model3_Packet1(gspca_dev, 0x0082, 0x000f); cit_model3_Packet1(gspca_dev, 0x0085, 0x0000); cit_model3_Packet1(gspca_dev, 0x0099, 0x0000); cit_model3_Packet1(gspca_dev, 0x009b, 0x0023); cit_model3_Packet1(gspca_dev, 0x009c, 0x0022); cit_model3_Packet1(gspca_dev, 0x009d, 0x0096); cit_model3_Packet1(gspca_dev, 0x009e, 0x0096); cit_model3_Packet1(gspca_dev, 0x009f, 0x000a); switch (gspca_dev->pixfmt.width) { case 160: cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0024, 0x010b); /* Differs everywhere */ cit_write_reg(gspca_dev, 0x00a9, 0x0119); cit_write_reg(gspca_dev, 0x0016, 0x011b); cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0003, 0x011e); /* Same on 160x120, 640x480 */ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ cit_write_reg(gspca_dev, 0x0018, 0x0102); cit_write_reg(gspca_dev, 0x0004, 0x0104); cit_write_reg(gspca_dev, 0x0004, 0x011a); cit_write_reg(gspca_dev, 0x0028, 0x011c); cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ cit_write_reg(gspca_dev, 0x0000, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */ cit_write_reg(gspca_dev, compression, 0x0109); clock_div = 3; break; case 320: cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0028, 0x010b); /* Differs everywhere */ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same */ cit_write_reg(gspca_dev, 0x0000, 0x011e); cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ /* 4 commands from 160x120 skipped */ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */ cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, 0x00d9, 0x0119); cit_write_reg(gspca_dev, 0x0006, 0x011b); cit_write_reg(gspca_dev, 0x0021, 0x0102); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0010, 0x0104); cit_write_reg(gspca_dev, 0x0004, 0x011a); cit_write_reg(gspca_dev, 0x003f, 0x011c); cit_write_reg(gspca_dev, 0x001c, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); clock_div = 5; break; case 640: cit_write_reg(gspca_dev, 0x00f0, 0x0105); cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0038, 0x010b); /* Differs everywhere */ cit_write_reg(gspca_dev, 0x00d9, 0x0119); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0006, 0x011b); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0004, 0x011d); /* NC */ cit_write_reg(gspca_dev, 0x0003, 0x011e); /* Same on 160x120, 640x480 */ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ cit_write_reg(gspca_dev, 0x0021, 0x0102); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0016, 0x0104); /* NC */ cit_write_reg(gspca_dev, 0x0004, 0x011a); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x003f, 0x011c); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ cit_write_reg(gspca_dev, 0x001c, 0x0118); /* Same on 320x240, 640x480 */ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */ cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, 0x0040, 0x0101); cit_write_reg(gspca_dev, 0x0040, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0132); /* Same on 320x240, 640x480 */ clock_div = 7; break; } cit_model3_Packet1(gspca_dev, 0x007e, 0x000e); /* Hue */ cit_model3_Packet1(gspca_dev, 0x0036, 0x0011); /* Brightness */ cit_model3_Packet1(gspca_dev, 0x0060, 0x0002); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0061, 0x0004); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0062, 0x0005); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0063, 0x0014); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0096, 0x00a0); /* Red sharpness */ cit_model3_Packet1(gspca_dev, 0x0097, 0x0096); /* Blue sharpness */ cit_model3_Packet1(gspca_dev, 0x0067, 0x0001); /* Contrast */ cit_model3_Packet1(gspca_dev, 0x005b, 0x000c); /* Contrast */ cit_model3_Packet1(gspca_dev, 0x005c, 0x0016); /* Contrast */ cit_model3_Packet1(gspca_dev, 0x0098, 0x000b); cit_model3_Packet1(gspca_dev, 0x002c, 0x0003); /* Was 1, broke 640x480 */ cit_model3_Packet1(gspca_dev, 0x002f, 0x002a); cit_model3_Packet1(gspca_dev, 0x0030, 0x0029); cit_model3_Packet1(gspca_dev, 0x0037, 0x0002); cit_model3_Packet1(gspca_dev, 0x0038, 0x0059); cit_model3_Packet1(gspca_dev, 0x003d, 0x002e); cit_model3_Packet1(gspca_dev, 0x003e, 0x0028); cit_model3_Packet1(gspca_dev, 0x0078, 0x0005); cit_model3_Packet1(gspca_dev, 0x007b, 0x0011); cit_model3_Packet1(gspca_dev, 0x007d, 0x004b); cit_model3_Packet1(gspca_dev, 0x007f, 0x0022); cit_model3_Packet1(gspca_dev, 0x0080, 0x000c); cit_model3_Packet1(gspca_dev, 0x0081, 0x000b); cit_model3_Packet1(gspca_dev, 0x0083, 0x00fd); cit_model3_Packet1(gspca_dev, 0x0086, 0x000b); cit_model3_Packet1(gspca_dev, 0x0087, 0x000b); cit_model3_Packet1(gspca_dev, 0x007e, 0x000e); cit_model3_Packet1(gspca_dev, 0x0096, 0x00a0); /* Red sharpness */ cit_model3_Packet1(gspca_dev, 0x0097, 0x0096); /* Blue sharpness */ cit_model3_Packet1(gspca_dev, 0x0098, 0x000b); /* FIXME we should probably use cit_get_clock_div() here (in combination with isoc negotiation using the programmable isoc size) like with the IBM netcam pro). */ cit_write_reg(gspca_dev, clock_div, 0x0111); /* Clock Divider */ switch (gspca_dev->pixfmt.width) { case 160: cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */ cit_model3_Packet1(gspca_dev, 0x003b, 0x003c); /* Same */ cit_model3_Packet1(gspca_dev, 0x0040, 0x000a); cit_model3_Packet1(gspca_dev, 0x0051, 0x000a); break; case 320: cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */ cit_model3_Packet1(gspca_dev, 0x003b, 0x003c); /* Same */ cit_model3_Packet1(gspca_dev, 0x0040, 0x0008); cit_model3_Packet1(gspca_dev, 0x0051, 0x000b); break; case 640: cit_model3_Packet1(gspca_dev, 0x001f, 0x0002); /* !Same */ cit_model3_Packet1(gspca_dev, 0x0039, 0x003e); /* !Same */ cit_model3_Packet1(gspca_dev, 0x0040, 0x0008); cit_model3_Packet1(gspca_dev, 0x0051, 0x000a); break; } /* if (sd->input_index) { */ if (rca_input) { for (i = 0; i < ARRAY_SIZE(rca_initdata); i++) { if (rca_initdata[i][0]) cit_read_reg(gspca_dev, rca_initdata[i][2], 0); else cit_write_reg(gspca_dev, rca_initdata[i][1], rca_initdata[i][2]); } } return 0; } static int cit_start_model4(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_write_reg(gspca_dev, 0x00c0, 0x0111); cit_write_reg(gspca_dev, 0x00bc, 0x012c); cit_write_reg(gspca_dev, 0x0080, 0x012b); cit_write_reg(gspca_dev, 0x0000, 0x0108); cit_write_reg(gspca_dev, 0x0001, 0x0133); cit_write_reg(gspca_dev, 0x009b, 0x010f); cit_write_reg(gspca_dev, 0x00bb, 0x010f); cit_model4_Packet1(gspca_dev, 0x0038, 0x0000); cit_model4_Packet1(gspca_dev, 0x000a, 0x005c); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0004, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00fb, 0x012e); cit_write_reg(gspca_dev, 0x0000, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x000c, 0x0127); cit_write_reg(gspca_dev, 0x0009, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0012, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0008, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x002a, 0x012d); cit_write_reg(gspca_dev, 0x0000, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_model4_Packet1(gspca_dev, 0x0034, 0x0000); switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x0039, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x0028, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x001e, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x000a, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005a, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0043, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00eb, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x0031, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x012d); cit_write_reg(gspca_dev, 0x0078, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0038, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x00b9, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x0028, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x001e, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x000b, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005a, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0043, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00c7, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0025, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0048, 0x0127); cit_write_reg(gspca_dev, 0x0035, 0x012e); cit_write_reg(gspca_dev, 0x00d0, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0048, 0x012d); cit_write_reg(gspca_dev, 0x0090, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x0038, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x00b9, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x002c, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0024, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0007, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0001, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005e, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0049, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00c7, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0028, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x002a, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x012d); cit_write_reg(gspca_dev, 0x006d, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); /* TESTME HDG: this does not seem right (it is 2 for all other resolutions) */ sd->sof_len = 10; break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x0039, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x0028, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x001e, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x000a, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005a, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0043, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00eb, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x0031, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x012d); cit_write_reg(gspca_dev, 0x0078, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); cit_write_reg(gspca_dev, 0x00c0, 0x0111); cit_write_reg(gspca_dev, 0x0039, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x002c, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0024, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0006, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0002, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005e, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0049, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00cf, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x0025, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x012d); cit_write_reg(gspca_dev, 0x0048, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; } cit_model4_Packet1(gspca_dev, 0x0038, 0x0004); return 0; } static int cit_start_ibm_netcam_pro(struct gspca_dev *gspca_dev) { const unsigned short compression = 0; /* 0=none, 7=best frame rate */ int i, clock_div; clock_div = cit_get_clock_div(gspca_dev); if (clock_div < 0) return clock_div; cit_write_reg(gspca_dev, 0x0003, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_write_reg(gspca_dev, 0x0060, 0x0116); /* cit_write_reg(gspca_dev, 0x0002, 0x0112); see sd_stop0 */ cit_write_reg(gspca_dev, 0x0000, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0123); cit_write_reg(gspca_dev, 0x0001, 0x0117); cit_write_reg(gspca_dev, 0x0040, 0x0108); cit_write_reg(gspca_dev, 0x0019, 0x012c); cit_write_reg(gspca_dev, 0x0060, 0x0116); /* cit_write_reg(gspca_dev, 0x000b, 0x0115); see sd_stop0 */ cit_model3_Packet1(gspca_dev, 0x0049, 0x0000); cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x003a, 0x0102); /* Hstart */ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0024, 0x010b); cit_write_reg(gspca_dev, 0x0089, 0x0119); cit_write_reg(gspca_dev, 0x000a, 0x011b); cit_write_reg(gspca_dev, 0x0003, 0x011e); cit_write_reg(gspca_dev, 0x0007, 0x0104); cit_write_reg(gspca_dev, 0x0009, 0x011a); cit_write_reg(gspca_dev, 0x008b, 0x011c); cit_write_reg(gspca_dev, 0x0008, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0028, 0x010b); cit_write_reg(gspca_dev, 0x00d9, 0x0119); cit_write_reg(gspca_dev, 0x0006, 0x011b); cit_write_reg(gspca_dev, 0x0000, 0x011e); cit_write_reg(gspca_dev, 0x000e, 0x0104); cit_write_reg(gspca_dev, 0x0004, 0x011a); cit_write_reg(gspca_dev, 0x003f, 0x011c); cit_write_reg(gspca_dev, 0x000c, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); break; } cit_model3_Packet1(gspca_dev, 0x0019, 0x0031); cit_model3_Packet1(gspca_dev, 0x001a, 0x0003); cit_model3_Packet1(gspca_dev, 0x001b, 0x0038); cit_model3_Packet1(gspca_dev, 0x001c, 0x0000); cit_model3_Packet1(gspca_dev, 0x0024, 0x0001); cit_model3_Packet1(gspca_dev, 0x0027, 0x0001); cit_model3_Packet1(gspca_dev, 0x002a, 0x0004); cit_model3_Packet1(gspca_dev, 0x0035, 0x000b); cit_model3_Packet1(gspca_dev, 0x003f, 0x0001); cit_model3_Packet1(gspca_dev, 0x0044, 0x0000); cit_model3_Packet1(gspca_dev, 0x0054, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c4, 0x0000); cit_model3_Packet1(gspca_dev, 0x00e7, 0x0001); cit_model3_Packet1(gspca_dev, 0x00e9, 0x0001); cit_model3_Packet1(gspca_dev, 0x00ee, 0x0000); cit_model3_Packet1(gspca_dev, 0x00f3, 0x00c0); cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, clock_div, 0x0111); /* if (sd->input_index) { */ if (rca_input) { for (i = 0; i < ARRAY_SIZE(rca_initdata); i++) { if (rca_initdata[i][0]) cit_read_reg(gspca_dev, rca_initdata[i][2], 0); else cit_write_reg(gspca_dev, rca_initdata[i][1], rca_initdata[i][2]); } } return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int packet_size; packet_size = cit_get_packet_size(gspca_dev); if (packet_size < 0) return packet_size; switch (sd->model) { case CIT_MODEL0: cit_start_model0(gspca_dev); break; case CIT_MODEL1: cit_start_model1(gspca_dev); break; case CIT_MODEL2: cit_start_model2(gspca_dev); break; case CIT_MODEL3: cit_start_model3(gspca_dev); break; case CIT_MODEL4: cit_start_model4(gspca_dev); break; case CIT_IBM_NETCAM_PRO: cit_start_ibm_netcam_pro(gspca_dev); break; } /* Program max isoc packet size */ cit_write_reg(gspca_dev, packet_size >> 8, 0x0106); cit_write_reg(gspca_dev, packet_size & 0xff, 0x0107); cit_restart_stream(gspca_dev); return 0; } static int sd_isoc_init(struct gspca_dev *gspca_dev) { struct usb_host_interface *alt; int max_packet_size; switch (gspca_dev->pixfmt.width) { case 160: max_packet_size = 450; break; case 176: max_packet_size = 600; break; default: max_packet_size = 1022; break; } /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size); return 0; } static int sd_isoc_nego(struct gspca_dev *gspca_dev) { int ret, packet_size, min_packet_size; struct usb_host_interface *alt; switch (gspca_dev->pixfmt.width) { case 160: min_packet_size = 200; break; case 176: min_packet_size = 266; break; default: min_packet_size = 400; break; } alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); if (packet_size <= min_packet_size) return -EIO; packet_size -= 100; if (packet_size < min_packet_size) packet_size = min_packet_size; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(packet_size); ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); if (ret < 0) pr_err("set alt 1 err %d\n", ret); return ret; } static void sd_stopN(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x0000, 0x010c); } static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!gspca_dev->present) return; switch (sd->model) { case CIT_MODEL0: /* HDG windows does this, but it causes the cams autogain to restart from a gain of 0, which does not look good when changing resolutions. */ /* cit_write_reg(gspca_dev, 0x0000, 0x0112); */ cit_write_reg(gspca_dev, 0x00c0, 0x0100); /* LED Off */ break; case CIT_MODEL1: cit_send_FF_04_02(gspca_dev); cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x81, 0x0100); /* LED Off */ break; case CIT_MODEL2: v4l2_ctrl_grab(sd->lighting, false); /* Fall through! */ case CIT_MODEL4: cit_model2_Packet1(gspca_dev, 0x0030, 0x0004); cit_write_reg(gspca_dev, 0x0080, 0x0100); /* LED Off */ cit_write_reg(gspca_dev, 0x0020, 0x0111); cit_write_reg(gspca_dev, 0x00a0, 0x0111); cit_model2_Packet1(gspca_dev, 0x0030, 0x0002); cit_write_reg(gspca_dev, 0x0020, 0x0111); cit_write_reg(gspca_dev, 0x0000, 0x0112); break; case CIT_MODEL3: cit_write_reg(gspca_dev, 0x0006, 0x012c); cit_model3_Packet1(gspca_dev, 0x0046, 0x0000); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0064, 0x0116); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x0003, 0x0115); cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0000, 0x0112); cit_write_reg(gspca_dev, 0x0080, 0x0100); break; case CIT_IBM_NETCAM_PRO: cit_model3_Packet1(gspca_dev, 0x0049, 0x00ff); cit_write_reg(gspca_dev, 0x0006, 0x012c); cit_write_reg(gspca_dev, 0x0000, 0x0116); /* HDG windows does this, but I cannot get the camera to restart with this without redoing the entire init sequence which makes switching modes really slow */ /* cit_write_reg(gspca_dev, 0x0006, 0x0115); */ cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0003, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0111); /* HDG windows does this, but I get a green picture when restarting the stream after this */ /* cit_write_reg(gspca_dev, 0x0000, 0x0112); */ cit_write_reg(gspca_dev, 0x00c0, 0x0100); break; } #if IS_ENABLED(CONFIG_INPUT) /* If the last button state is pressed, release it now! */ if (sd->button_state) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); sd->button_state = 0; } #endif } static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; u8 byte3 = 0, byte4 = 0; int i; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL1: case CIT_MODEL3: case CIT_IBM_NETCAM_PRO: switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ byte3 = 0x02; byte4 = 0x0a; break; case 176: /* 176x144 */ byte3 = 0x02; byte4 = 0x0e; break; case 320: /* 320x240 */ byte3 = 0x02; byte4 = 0x08; break; case 352: /* 352x288 */ byte3 = 0x02; byte4 = 0x00; break; case 640: byte3 = 0x03; byte4 = 0x08; break; } /* These have a different byte3 */ if (sd->model <= CIT_MODEL1) byte3 = 0x00; for (i = 0; i < len; i++) { /* For this model the SOF always starts at offset 0 so no need to search the entire frame */ if (sd->model == CIT_MODEL0 && sd->sof_read != i) break; switch (sd->sof_read) { case 0: if (data[i] == 0x00) sd->sof_read++; break; case 1: if (data[i] == 0xff) sd->sof_read++; else if (data[i] == 0x00) sd->sof_read = 1; else sd->sof_read = 0; break; case 2: if (data[i] == byte3) sd->sof_read++; else if (data[i] == 0x00) sd->sof_read = 1; else sd->sof_read = 0; break; case 3: if (data[i] == byte4) { sd->sof_read = 0; return data + i + (sd->sof_len - 3); } if (byte3 == 0x00 && data[i] == 0xff) sd->sof_read = 2; else if (data[i] == 0x00) sd->sof_read = 1; else sd->sof_read = 0; break; } } break; case CIT_MODEL2: case CIT_MODEL4: /* TESTME we need to find a longer sof signature to avoid false positives */ for (i = 0; i < len; i++) { switch (sd->sof_read) { case 0: if (data[i] == 0x00) sd->sof_read++; break; case 1: sd->sof_read = 0; if (data[i] == 0xff) { if (i >= 4) PDEBUG(D_FRAM, "header found at offset: %d: %02x %02x 00 %3ph\n", i - 1, data[i - 4], data[i - 3], &data[i]); else PDEBUG(D_FRAM, "header found at offset: %d: 00 %3ph\n", i - 1, &data[i]); return data + i + (sd->sof_len - 1); } break; } } break; } return NULL; } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; unsigned char *sof; sof = cit_find_sof(gspca_dev, data, len); if (sof) { int n; /* finish decoding current frame */ n = sof - data; if (n > sd->sof_len) n -= sd->sof_len; else n = 0; gspca_frame_add(gspca_dev, LAST_PACKET, data, n); gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); len -= sof - data; data = sof; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } #if IS_ENABLED(CONFIG_INPUT) static void cit_check_button(struct gspca_dev *gspca_dev) { int new_button_state; struct sd *sd = (struct sd *)gspca_dev; switch (sd->model) { case CIT_MODEL3: case CIT_IBM_NETCAM_PRO: break; default: /* TEST ME unknown if this works on other models too */ return; } /* Read the button state */ cit_read_reg(gspca_dev, 0x0113, 0); new_button_state = !gspca_dev->usb_buf[0]; /* Tell the cam we've seen the button press, notice that this is a nop (iow the cam keeps reporting pressed) until the button is actually released. */ if (new_button_state) cit_write_reg(gspca_dev, 0x01, 0x0113); if (sd->button_state != new_button_state) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, new_button_state); input_sync(gspca_dev->input_dev); sd->button_state = new_button_state; } } #endif static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; if (sd->stop_on_control_change) sd_stopN(gspca_dev); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: cit_set_brightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: cit_set_contrast(gspca_dev, ctrl->val); break; case V4L2_CID_HUE: cit_set_hue(gspca_dev, ctrl->val); break; case V4L2_CID_HFLIP: cit_set_hflip(gspca_dev, ctrl->val); break; case V4L2_CID_SHARPNESS: cit_set_sharpness(gspca_dev, ctrl->val); break; case V4L2_CID_BACKLIGHT_COMPENSATION: cit_set_lighting(gspca_dev, ctrl->val); break; } if (sd->stop_on_control_change) cit_restart_stream(gspca_dev); return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; bool has_brightness; bool has_contrast; bool has_hue; bool has_sharpness; bool has_lighting; bool has_hflip; has_brightness = has_contrast = has_hue = has_sharpness = has_hflip = has_lighting = false; switch (sd->model) { case CIT_MODEL0: has_contrast = has_hflip = true; break; case CIT_MODEL1: has_brightness = has_contrast = has_sharpness = has_lighting = true; break; case CIT_MODEL2: has_brightness = has_hue = has_lighting = true; break; case CIT_MODEL3: has_brightness = has_contrast = has_sharpness = true; break; case CIT_MODEL4: has_brightness = has_hue = true; break; case CIT_IBM_NETCAM_PRO: has_brightness = has_hue = has_sharpness = has_hflip = has_lighting = true; break; } gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 5); if (has_brightness) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 63, 1, 32); if (has_contrast) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 20, 1, 10); if (has_hue) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HUE, 0, 127, 1, 63); if (has_sharpness) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, 0, 6, 1, 3); if (has_lighting) sd->lighting = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BACKLIGHT_COMPENSATION, 0, 2, 1, 1); if (has_hflip) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, #if IS_ENABLED(CONFIG_INPUT) .dq_callback = cit_check_button, .other_input = 1, #endif }; static const struct sd_desc sd_desc_isoc_nego = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .isoc_init = sd_isoc_init, .isoc_nego = sd_isoc_nego, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, #if IS_ENABLED(CONFIG_INPUT) .dq_callback = cit_check_button, .other_input = 1, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { { USB_DEVICE_VER(0x0545, 0x8080, 0x0001, 0x0001), .driver_info = CIT_MODEL0 }, { USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002), .driver_info = CIT_MODEL1 }, { USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a), .driver_info = CIT_MODEL2 }, { USB_DEVICE_VER(0x0545, 0x8080, 0x0301, 0x0301), .driver_info = CIT_MODEL3 }, { USB_DEVICE_VER(0x0545, 0x8002, 0x030a, 0x030a), .driver_info = CIT_MODEL4 }, { USB_DEVICE_VER(0x0545, 0x800c, 0x030a, 0x030a), .driver_info = CIT_MODEL2 }, { USB_DEVICE_VER(0x0545, 0x800d, 0x030a, 0x030a), .driver_info = CIT_MODEL4 }, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct sd_desc *desc = &sd_desc; switch (id->driver_info) { case CIT_MODEL0: case CIT_MODEL1: if (intf->cur_altsetting->desc.bInterfaceNumber != 2) return -ENODEV; break; case CIT_MODEL2: case CIT_MODEL4: if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; break; case CIT_MODEL3: if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; /* FIXME this likely applies to all model3 cams and probably to other models too. */ if (ibm_netcam_pro) desc = &sd_desc_isoc_nego; break; } return gspca_dev_probe2(intf, id, desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
BORETS24/Kernel-for-Asus-Zenfone-2
drivers/target/target_core_stat.c
2338
49270
/******************************************************************************* * Filename: target_core_stat.c * * Modern ConfigFS group context specific statistics based on original * target_core_mib.c code * * (c) Copyright 2006-2012 RisingTide Systems LLC. * * Nicholas A. Bellinger <nab@linux-iscsi.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ******************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/utsname.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/blkdev.h> #include <linux/configfs.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> #include <target/target_core_fabric.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> #include "target_core_internal.h" #ifndef INITIAL_JIFFIES #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) #endif #define NONE "None" #define ISPRINT(a) ((a >= ' ') && (a <= '~')) #define SCSI_LU_INDEX 1 #define LU_COUNT 1 /* * SCSI Device Table */ CONFIGFS_EATTR_STRUCT(target_stat_scsi_dev, se_dev_stat_grps); #define DEV_STAT_SCSI_DEV_ATTR(_name, _mode) \ static struct target_stat_scsi_dev_attribute \ target_stat_scsi_dev_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_stat_scsi_dev_show_attr_##_name, \ target_stat_scsi_dev_store_attr_##_name); #define DEV_STAT_SCSI_DEV_ATTR_RO(_name) \ static struct target_stat_scsi_dev_attribute \ target_stat_scsi_dev_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_stat_scsi_dev_show_attr_##_name); static ssize_t target_stat_scsi_dev_show_attr_inst( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); struct se_hba *hba = dev->se_hba; return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); } DEV_STAT_SCSI_DEV_ATTR_RO(inst); static ssize_t target_stat_scsi_dev_show_attr_indx( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); } DEV_STAT_SCSI_DEV_ATTR_RO(indx); static ssize_t target_stat_scsi_dev_show_attr_role( struct se_dev_stat_grps *sgrps, char *page) { return snprintf(page, PAGE_SIZE, "Target\n"); } DEV_STAT_SCSI_DEV_ATTR_RO(role); static ssize_t target_stat_scsi_dev_show_attr_ports( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); } DEV_STAT_SCSI_DEV_ATTR_RO(ports); CONFIGFS_EATTR_OPS(target_stat_scsi_dev, se_dev_stat_grps, scsi_dev_group); static struct configfs_attribute *target_stat_scsi_dev_attrs[] = { &target_stat_scsi_dev_inst.attr, &target_stat_scsi_dev_indx.attr, &target_stat_scsi_dev_role.attr, &target_stat_scsi_dev_ports.attr, NULL, }; static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = { .show_attribute = target_stat_scsi_dev_attr_show, .store_attribute = target_stat_scsi_dev_attr_store, }; static struct config_item_type target_stat_scsi_dev_cit = { .ct_item_ops = &target_stat_scsi_dev_attrib_ops, .ct_attrs = target_stat_scsi_dev_attrs, .ct_owner = THIS_MODULE, }; /* * SCSI Target Device Table */ CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_dev, se_dev_stat_grps); #define DEV_STAT_SCSI_TGT_DEV_ATTR(_name, _mode) \ static struct target_stat_scsi_tgt_dev_attribute \ target_stat_scsi_tgt_dev_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_stat_scsi_tgt_dev_show_attr_##_name, \ target_stat_scsi_tgt_dev_store_attr_##_name); #define DEV_STAT_SCSI_TGT_DEV_ATTR_RO(_name) \ static struct target_stat_scsi_tgt_dev_attribute \ target_stat_scsi_tgt_dev_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_stat_scsi_tgt_dev_show_attr_##_name); static ssize_t target_stat_scsi_tgt_dev_show_attr_inst( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); struct se_hba *hba = dev->se_hba; return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); } DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst); static ssize_t target_stat_scsi_tgt_dev_show_attr_indx( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); } DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx); static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus( struct se_dev_stat_grps *sgrps, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT); } DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus); static ssize_t target_stat_scsi_tgt_dev_show_attr_status( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); if (dev->export_count) return snprintf(page, PAGE_SIZE, "activated"); else return snprintf(page, PAGE_SIZE, "deactivated"); } DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status); static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); int non_accessible_lus; if (dev->export_count) non_accessible_lus = 0; else non_accessible_lus = 1; return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus); } DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus); static ssize_t target_stat_scsi_tgt_dev_show_attr_resets( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); } DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets); CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_dev, se_dev_stat_grps, scsi_tgt_dev_group); static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { &target_stat_scsi_tgt_dev_inst.attr, &target_stat_scsi_tgt_dev_indx.attr, &target_stat_scsi_tgt_dev_num_lus.attr, &target_stat_scsi_tgt_dev_status.attr, &target_stat_scsi_tgt_dev_non_access_lus.attr, &target_stat_scsi_tgt_dev_resets.attr, NULL, }; static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = { .show_attribute = target_stat_scsi_tgt_dev_attr_show, .store_attribute = target_stat_scsi_tgt_dev_attr_store, }; static struct config_item_type target_stat_scsi_tgt_dev_cit = { .ct_item_ops = &target_stat_scsi_tgt_dev_attrib_ops, .ct_attrs = target_stat_scsi_tgt_dev_attrs, .ct_owner = THIS_MODULE, }; /* * SCSI Logical Unit Table */ CONFIGFS_EATTR_STRUCT(target_stat_scsi_lu, se_dev_stat_grps); #define DEV_STAT_SCSI_LU_ATTR(_name, _mode) \ static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_stat_scsi_lu_show_attr_##_name, \ target_stat_scsi_lu_store_attr_##_name); #define DEV_STAT_SCSI_LU_ATTR_RO(_name) \ static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_stat_scsi_lu_show_attr_##_name); static ssize_t target_stat_scsi_lu_show_attr_inst( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); struct se_hba *hba = dev->se_hba; return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); } DEV_STAT_SCSI_LU_ATTR_RO(inst); static ssize_t target_stat_scsi_lu_show_attr_dev( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); } DEV_STAT_SCSI_LU_ATTR_RO(dev); static ssize_t target_stat_scsi_lu_show_attr_indx( struct se_dev_stat_grps *sgrps, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX); } DEV_STAT_SCSI_LU_ATTR_RO(indx); static ssize_t target_stat_scsi_lu_show_attr_lun( struct se_dev_stat_grps *sgrps, char *page) { /* FIXME: scsiLuDefaultLun */ return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0); } DEV_STAT_SCSI_LU_ATTR_RO(lun); static ssize_t target_stat_scsi_lu_show_attr_lu_name( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuWwnName */ return snprintf(page, PAGE_SIZE, "%s\n", (strlen(dev->t10_wwn.unit_serial)) ? dev->t10_wwn.unit_serial : "None"); } DEV_STAT_SCSI_LU_ATTR_RO(lu_name); static ssize_t target_stat_scsi_lu_show_attr_vend( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); int i; char str[sizeof(dev->t10_wwn.vendor)+1]; /* scsiLuVendorId */ for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ? dev->t10_wwn.vendor[i] : ' '; str[i] = '\0'; return snprintf(page, PAGE_SIZE, "%s\n", str); } DEV_STAT_SCSI_LU_ATTR_RO(vend); static ssize_t target_stat_scsi_lu_show_attr_prod( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); int i; char str[sizeof(dev->t10_wwn.model)+1]; /* scsiLuProductId */ for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) str[i] = ISPRINT(dev->t10_wwn.model[i]) ? dev->t10_wwn.model[i] : ' '; str[i] = '\0'; return snprintf(page, PAGE_SIZE, "%s\n", str); } DEV_STAT_SCSI_LU_ATTR_RO(prod); static ssize_t target_stat_scsi_lu_show_attr_rev( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); int i; char str[sizeof(dev->t10_wwn.revision)+1]; /* scsiLuRevisionId */ for (i = 0; i < sizeof(dev->t10_wwn.revision); i++) str[i] = ISPRINT(dev->t10_wwn.revision[i]) ? dev->t10_wwn.revision[i] : ' '; str[i] = '\0'; return snprintf(page, PAGE_SIZE, "%s\n", str); } DEV_STAT_SCSI_LU_ATTR_RO(rev); static ssize_t target_stat_scsi_lu_show_attr_dev_type( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuPeripheralType */ return snprintf(page, PAGE_SIZE, "%u\n", dev->transport->get_device_type(dev)); } DEV_STAT_SCSI_LU_ATTR_RO(dev_type); static ssize_t target_stat_scsi_lu_show_attr_status( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuStatus */ return snprintf(page, PAGE_SIZE, "%s\n", (dev->export_count) ? "available" : "notavailable"); } DEV_STAT_SCSI_LU_ATTR_RO(status); static ssize_t target_stat_scsi_lu_show_attr_state_bit( struct se_dev_stat_grps *sgrps, char *page) { /* scsiLuState */ return snprintf(page, PAGE_SIZE, "exposed\n"); } DEV_STAT_SCSI_LU_ATTR_RO(state_bit); static ssize_t target_stat_scsi_lu_show_attr_num_cmds( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuNumCommands */ return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)dev->num_cmds); } DEV_STAT_SCSI_LU_ATTR_RO(num_cmds); static ssize_t target_stat_scsi_lu_show_attr_read_mbytes( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuReadMegaBytes */ return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); } DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes); static ssize_t target_stat_scsi_lu_show_attr_write_mbytes( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuWrittenMegaBytes */ return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); } DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes); static ssize_t target_stat_scsi_lu_show_attr_resets( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuInResets */ return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); } DEV_STAT_SCSI_LU_ATTR_RO(resets); static ssize_t target_stat_scsi_lu_show_attr_full_stat( struct se_dev_stat_grps *sgrps, char *page) { /* FIXME: scsiLuOutTaskSetFullStatus */ return snprintf(page, PAGE_SIZE, "%u\n", 0); } DEV_STAT_SCSI_LU_ATTR_RO(full_stat); static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds( struct se_dev_stat_grps *sgrps, char *page) { /* FIXME: scsiLuHSInCommands */ return snprintf(page, PAGE_SIZE, "%u\n", 0); } DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds); static ssize_t target_stat_scsi_lu_show_attr_creation_time( struct se_dev_stat_grps *sgrps, char *page) { struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuCreationTime */ return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) * 100 / HZ)); } DEV_STAT_SCSI_LU_ATTR_RO(creation_time); CONFIGFS_EATTR_OPS(target_stat_scsi_lu, se_dev_stat_grps, scsi_lu_group); static struct configfs_attribute *target_stat_scsi_lu_attrs[] = { &target_stat_scsi_lu_inst.attr, &target_stat_scsi_lu_dev.attr, &target_stat_scsi_lu_indx.attr, &target_stat_scsi_lu_lun.attr, &target_stat_scsi_lu_lu_name.attr, &target_stat_scsi_lu_vend.attr, &target_stat_scsi_lu_prod.attr, &target_stat_scsi_lu_rev.attr, &target_stat_scsi_lu_dev_type.attr, &target_stat_scsi_lu_status.attr, &target_stat_scsi_lu_state_bit.attr, &target_stat_scsi_lu_num_cmds.attr, &target_stat_scsi_lu_read_mbytes.attr, &target_stat_scsi_lu_write_mbytes.attr, &target_stat_scsi_lu_resets.attr, &target_stat_scsi_lu_full_stat.attr, &target_stat_scsi_lu_hs_num_cmds.attr, &target_stat_scsi_lu_creation_time.attr, NULL, }; static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = { .show_attribute = target_stat_scsi_lu_attr_show, .store_attribute = target_stat_scsi_lu_attr_store, }; static struct config_item_type target_stat_scsi_lu_cit = { .ct_item_ops = &target_stat_scsi_lu_attrib_ops, .ct_attrs = target_stat_scsi_lu_attrs, .ct_owner = THIS_MODULE, }; /* * Called from target_core_configfs.c:target_core_make_subdev() to setup * the target statistics groups + configfs CITs located in target_core_stat.c */ void target_stat_setup_dev_default_groups(struct se_device *dev) { struct config_group *dev_stat_grp = &dev->dev_stat_grps.stat_group; config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group, "scsi_dev", &target_stat_scsi_dev_cit); config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group, "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group, "scsi_lu", &target_stat_scsi_lu_cit); dev_stat_grp->default_groups[0] = &dev->dev_stat_grps.scsi_dev_group; dev_stat_grp->default_groups[1] = &dev->dev_stat_grps.scsi_tgt_dev_group; dev_stat_grp->default_groups[2] = &dev->dev_stat_grps.scsi_lu_group; dev_stat_grp->default_groups[3] = NULL; } /* * SCSI Port Table */ CONFIGFS_EATTR_STRUCT(target_stat_scsi_port, se_port_stat_grps); #define DEV_STAT_SCSI_PORT_ATTR(_name, _mode) \ static struct target_stat_scsi_port_attribute \ target_stat_scsi_port_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_stat_scsi_port_show_attr_##_name, \ target_stat_scsi_port_store_attr_##_name); #define DEV_STAT_SCSI_PORT_ATTR_RO(_name) \ static struct target_stat_scsi_port_attribute \ target_stat_scsi_port_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_stat_scsi_port_show_attr_##_name); static ssize_t target_stat_scsi_port_show_attr_inst( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; struct se_device *dev = lun->lun_se_dev; struct se_hba *hba; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } hba = dev->se_hba; ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_PORT_ATTR_RO(inst); static ssize_t target_stat_scsi_port_show_attr_dev( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; struct se_device *dev = lun->lun_se_dev; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_PORT_ATTR_RO(dev); static ssize_t target_stat_scsi_port_show_attr_indx( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_PORT_ATTR_RO(indx); static ssize_t target_stat_scsi_port_show_attr_role( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_device *dev = lun->lun_se_dev; struct se_port *sep; ssize_t ret; if (!dev) return -ENODEV; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_PORT_ATTR_RO(role); static ssize_t target_stat_scsi_port_show_attr_busy_count( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } /* FIXME: scsiPortBusyStatuses */ ret = snprintf(page, PAGE_SIZE, "%u\n", 0); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_PORT_ATTR_RO(busy_count); CONFIGFS_EATTR_OPS(target_stat_scsi_port, se_port_stat_grps, scsi_port_group); static struct configfs_attribute *target_stat_scsi_port_attrs[] = { &target_stat_scsi_port_inst.attr, &target_stat_scsi_port_dev.attr, &target_stat_scsi_port_indx.attr, &target_stat_scsi_port_role.attr, &target_stat_scsi_port_busy_count.attr, NULL, }; static struct configfs_item_operations target_stat_scsi_port_attrib_ops = { .show_attribute = target_stat_scsi_port_attr_show, .store_attribute = target_stat_scsi_port_attr_store, }; static struct config_item_type target_stat_scsi_port_cit = { .ct_item_ops = &target_stat_scsi_port_attrib_ops, .ct_attrs = target_stat_scsi_port_attrs, .ct_owner = THIS_MODULE, }; /* * SCSI Target Port Table */ CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_port, se_port_stat_grps); #define DEV_STAT_SCSI_TGT_PORT_ATTR(_name, _mode) \ static struct target_stat_scsi_tgt_port_attribute \ target_stat_scsi_tgt_port_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_stat_scsi_tgt_port_show_attr_##_name, \ target_stat_scsi_tgt_port_store_attr_##_name); #define DEV_STAT_SCSI_TGT_PORT_ATTR_RO(_name) \ static struct target_stat_scsi_tgt_port_attribute \ target_stat_scsi_tgt_port_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_stat_scsi_tgt_port_show_attr_##_name); static ssize_t target_stat_scsi_tgt_port_show_attr_inst( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_device *dev = lun->lun_se_dev; struct se_port *sep; struct se_hba *hba; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } hba = dev->se_hba; ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst); static ssize_t target_stat_scsi_tgt_port_show_attr_dev( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_device *dev = lun->lun_se_dev; struct se_port *sep; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev); static ssize_t target_stat_scsi_tgt_port_show_attr_indx( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx); static ssize_t target_stat_scsi_tgt_port_show_attr_name( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; struct se_portal_group *tpg; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } tpg = sep->sep_tpg; ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name); static ssize_t target_stat_scsi_tgt_port_show_attr_port_index( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; struct se_portal_group *tpg; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } tpg = sep->sep_tpg; ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+", tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index); static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds); static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(sep->sep_stats.rx_data_octets >> 20)); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes); static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(sep->sep_stats.tx_data_octets >> 20)); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes); static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } /* FIXME: scsiTgtPortHsInCommands */ ret = snprintf(page, PAGE_SIZE, "%u\n", 0); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds); CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_port, se_port_stat_grps, scsi_tgt_port_group); static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = { &target_stat_scsi_tgt_port_inst.attr, &target_stat_scsi_tgt_port_dev.attr, &target_stat_scsi_tgt_port_indx.attr, &target_stat_scsi_tgt_port_name.attr, &target_stat_scsi_tgt_port_port_index.attr, &target_stat_scsi_tgt_port_in_cmds.attr, &target_stat_scsi_tgt_port_write_mbytes.attr, &target_stat_scsi_tgt_port_read_mbytes.attr, &target_stat_scsi_tgt_port_hs_in_cmds.attr, NULL, }; static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = { .show_attribute = target_stat_scsi_tgt_port_attr_show, .store_attribute = target_stat_scsi_tgt_port_attr_store, }; static struct config_item_type target_stat_scsi_tgt_port_cit = { .ct_item_ops = &target_stat_scsi_tgt_port_attrib_ops, .ct_attrs = target_stat_scsi_tgt_port_attrs, .ct_owner = THIS_MODULE, }; /* * SCSI Transport Table o */ CONFIGFS_EATTR_STRUCT(target_stat_scsi_transport, se_port_stat_grps); #define DEV_STAT_SCSI_TRANSPORT_ATTR(_name, _mode) \ static struct target_stat_scsi_transport_attribute \ target_stat_scsi_transport_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_stat_scsi_transport_show_attr_##_name, \ target_stat_scsi_transport_store_attr_##_name); #define DEV_STAT_SCSI_TRANSPORT_ATTR_RO(_name) \ static struct target_stat_scsi_transport_attribute \ target_stat_scsi_transport_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_stat_scsi_transport_show_attr_##_name); static ssize_t target_stat_scsi_transport_show_attr_inst( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_device *dev = lun->lun_se_dev; struct se_port *sep; struct se_hba *hba; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } hba = dev->se_hba; ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst); static ssize_t target_stat_scsi_transport_show_attr_device( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; struct se_portal_group *tpg; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } tpg = sep->sep_tpg; /* scsiTransportType */ ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", tpg->se_tpg_tfo->get_fabric_name()); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device); static ssize_t target_stat_scsi_transport_show_attr_indx( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_port *sep; struct se_portal_group *tpg; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } tpg = sep->sep_tpg; ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx); static ssize_t target_stat_scsi_transport_show_attr_dev_name( struct se_port_stat_grps *pgrps, char *page) { struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); struct se_device *dev = lun->lun_se_dev; struct se_port *sep; struct se_portal_group *tpg; struct t10_wwn *wwn; ssize_t ret; spin_lock(&lun->lun_sep_lock); sep = lun->lun_sep; if (!sep) { spin_unlock(&lun->lun_sep_lock); return -ENODEV; } tpg = sep->sep_tpg; wwn = &dev->t10_wwn; /* scsiTransportDevName */ ret = snprintf(page, PAGE_SIZE, "%s+%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), (strlen(wwn->unit_serial)) ? wwn->unit_serial : wwn->vendor); spin_unlock(&lun->lun_sep_lock); return ret; } DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name); CONFIGFS_EATTR_OPS(target_stat_scsi_transport, se_port_stat_grps, scsi_transport_group); static struct configfs_attribute *target_stat_scsi_transport_attrs[] = { &target_stat_scsi_transport_inst.attr, &target_stat_scsi_transport_device.attr, &target_stat_scsi_transport_indx.attr, &target_stat_scsi_transport_dev_name.attr, NULL, }; static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = { .show_attribute = target_stat_scsi_transport_attr_show, .store_attribute = target_stat_scsi_transport_attr_store, }; static struct config_item_type target_stat_scsi_transport_cit = { .ct_item_ops = &target_stat_scsi_transport_attrib_ops, .ct_attrs = target_stat_scsi_transport_attrs, .ct_owner = THIS_MODULE, }; /* * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup * the target port statistics groups + configfs CITs located in target_core_stat.c */ void target_stat_setup_port_default_groups(struct se_lun *lun) { struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group; config_group_init_type_name(&lun->port_stat_grps.scsi_port_group, "scsi_port", &target_stat_scsi_port_cit); config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group, "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group, "scsi_transport", &target_stat_scsi_transport_cit); port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group; port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group; port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group; port_stat_grp->default_groups[3] = NULL; } /* * SCSI Authorized Initiator Table */ CONFIGFS_EATTR_STRUCT(target_stat_scsi_auth_intr, se_ml_stat_grps); #define DEV_STAT_SCSI_AUTH_INTR_ATTR(_name, _mode) \ static struct target_stat_scsi_auth_intr_attribute \ target_stat_scsi_auth_intr_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_stat_scsi_auth_intr_show_attr_##_name, \ target_stat_scsi_auth_intr_store_attr_##_name); #define DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(_name) \ static struct target_stat_scsi_auth_intr_attribute \ target_stat_scsi_auth_intr_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_stat_scsi_auth_intr_show_attr_##_name); static ssize_t target_stat_scsi_auth_intr_show_attr_inst( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; struct se_portal_group *tpg; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } tpg = nacl->se_tpg; /* scsiInstIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst); static ssize_t target_stat_scsi_auth_intr_show_attr_dev( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; struct se_lun *lun; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } lun = deve->se_lun; /* scsiDeviceIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev); static ssize_t target_stat_scsi_auth_intr_show_attr_port( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; struct se_portal_group *tpg; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } tpg = nacl->se_tpg; /* scsiAuthIntrTgtPortIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port); static ssize_t target_stat_scsi_auth_intr_show_attr_indx( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* scsiAuthIntrIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx); static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* scsiAuthIntrDevOrPort */ ret = snprintf(page, PAGE_SIZE, "%u\n", 1); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port); static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* scsiAuthIntrName */ ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name); static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* FIXME: scsiAuthIntrLunMapIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", 0); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx); static ssize_t target_stat_scsi_auth_intr_show_attr_att_count( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* scsiAuthIntrAttachedTimes */ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count); static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* scsiAuthIntrOutCommands */ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds); static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* scsiAuthIntrReadMegaBytes */ ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20)); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes); static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* scsiAuthIntrWrittenMegaBytes */ ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20)); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes); static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* FIXME: scsiAuthIntrHSOutCommands */ ret = snprintf(page, PAGE_SIZE, "%u\n", 0); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds); static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* scsiAuthIntrLastCreation */ ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time - INITIAL_JIFFIES) * 100 / HZ)); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time); static ssize_t target_stat_scsi_auth_intr_show_attr_row_status( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* FIXME: scsiAuthIntrRowStatus */ ret = snprintf(page, PAGE_SIZE, "Ready\n"); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status); CONFIGFS_EATTR_OPS(target_stat_scsi_auth_intr, se_ml_stat_grps, scsi_auth_intr_group); static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = { &target_stat_scsi_auth_intr_inst.attr, &target_stat_scsi_auth_intr_dev.attr, &target_stat_scsi_auth_intr_port.attr, &target_stat_scsi_auth_intr_indx.attr, &target_stat_scsi_auth_intr_dev_or_port.attr, &target_stat_scsi_auth_intr_intr_name.attr, &target_stat_scsi_auth_intr_map_indx.attr, &target_stat_scsi_auth_intr_att_count.attr, &target_stat_scsi_auth_intr_num_cmds.attr, &target_stat_scsi_auth_intr_read_mbytes.attr, &target_stat_scsi_auth_intr_write_mbytes.attr, &target_stat_scsi_auth_intr_hs_num_cmds.attr, &target_stat_scsi_auth_intr_creation_time.attr, &target_stat_scsi_auth_intr_row_status.attr, NULL, }; static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = { .show_attribute = target_stat_scsi_auth_intr_attr_show, .store_attribute = target_stat_scsi_auth_intr_attr_store, }; static struct config_item_type target_stat_scsi_auth_intr_cit = { .ct_item_ops = &target_stat_scsi_auth_intr_attrib_ops, .ct_attrs = target_stat_scsi_auth_intr_attrs, .ct_owner = THIS_MODULE, }; /* * SCSI Attached Initiator Port Table */ CONFIGFS_EATTR_STRUCT(target_stat_scsi_att_intr_port, se_ml_stat_grps); #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR(_name, _mode) \ static struct target_stat_scsi_att_intr_port_attribute \ target_stat_scsi_att_intr_port_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_stat_scsi_att_intr_port_show_attr_##_name, \ target_stat_scsi_att_intr_port_store_attr_##_name); #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(_name) \ static struct target_stat_scsi_att_intr_port_attribute \ target_stat_scsi_att_intr_port_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_stat_scsi_att_intr_port_show_attr_##_name); static ssize_t target_stat_scsi_att_intr_port_show_attr_inst( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; struct se_portal_group *tpg; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } tpg = nacl->se_tpg; /* scsiInstIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst); static ssize_t target_stat_scsi_att_intr_port_show_attr_dev( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; struct se_lun *lun; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } lun = deve->se_lun; /* scsiDeviceIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev); static ssize_t target_stat_scsi_att_intr_port_show_attr_port( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; struct se_portal_group *tpg; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } tpg = nacl->se_tpg; /* scsiPortIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port); static ssize_t target_stat_scsi_att_intr_port_show_attr_indx( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_session *se_sess; struct se_portal_group *tpg; ssize_t ret; spin_lock_irq(&nacl->nacl_sess_lock); se_sess = nacl->nacl_sess; if (!se_sess) { spin_unlock_irq(&nacl->nacl_sess_lock); return -ENODEV; } tpg = nacl->se_tpg; /* scsiAttIntrPortIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->sess_get_index(se_sess)); spin_unlock_irq(&nacl->nacl_sess_lock); return ret; } DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(indx); static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t ret; spin_lock_irq(&nacl->device_list_lock); deve = nacl->device_list[lacl->mapped_lun]; if (!deve->se_lun || !deve->se_lun_acl) { spin_unlock_irq(&nacl->device_list_lock); return -ENODEV; } /* scsiAttIntrPortAuthIntrIdx */ ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); spin_unlock_irq(&nacl->device_list_lock); return ret; } DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx); static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident( struct se_ml_stat_grps *lgrps, char *page) { struct se_lun_acl *lacl = container_of(lgrps, struct se_lun_acl, ml_stat_grps); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_session *se_sess; struct se_portal_group *tpg; ssize_t ret; unsigned char buf[64]; spin_lock_irq(&nacl->nacl_sess_lock); se_sess = nacl->nacl_sess; if (!se_sess) { spin_unlock_irq(&nacl->nacl_sess_lock); return -ENODEV; } tpg = nacl->se_tpg; /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ memset(buf, 0, 64); if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64); ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); spin_unlock_irq(&nacl->nacl_sess_lock); return ret; } DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_ident); CONFIGFS_EATTR_OPS(target_stat_scsi_att_intr_port, se_ml_stat_grps, scsi_att_intr_port_group); static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = { &target_stat_scsi_att_intr_port_inst.attr, &target_stat_scsi_att_intr_port_dev.attr, &target_stat_scsi_att_intr_port_port.attr, &target_stat_scsi_att_intr_port_indx.attr, &target_stat_scsi_att_intr_port_port_auth_indx.attr, &target_stat_scsi_att_intr_port_port_ident.attr, NULL, }; static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = { .show_attribute = target_stat_scsi_att_intr_port_attr_show, .store_attribute = target_stat_scsi_att_intr_port_attr_store, }; static struct config_item_type target_stat_scsi_att_intr_port_cit = { .ct_item_ops = &target_stat_scsi_att_intr_port_attrib_ops, .ct_attrs = target_stat_scsi_ath_intr_port_attrs, .ct_owner = THIS_MODULE, }; /* * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c */ void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) { struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group; config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group, "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group, "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group; ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group; ml_stat_grp->default_groups[2] = NULL; }
gpl-2.0
Zenfone2-Dev/android_kernel_asus_moorefield
arch/arm/mach-netx/time.c
2594
4026
/* * arch/arm/mach-netx/time.c * * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/mach/time.h> #include <mach/netx-regs.h> #define TIMER_CLOCKEVENT 0 #define TIMER_CLOCKSOURCE 1 static void netx_set_mode(enum clock_event_mode mode, struct clock_event_device *clk) { u32 tmode; /* disable timer */ writel(0, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT)); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: writel(LATCH, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT)); tmode = NETX_GPIO_COUNTER_CTRL_RST_EN | NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN; break; case CLOCK_EVT_MODE_ONESHOT: writel(0, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT)); tmode = NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN; break; default: WARN(1, "%s: unhandled mode %d\n", __func__, mode); /* fall through */ case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_RESUME: tmode = 0; break; } writel(tmode, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT)); } static int netx_set_next_event(unsigned long evt, struct clock_event_device *clk) { writel(0 - evt, NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKEVENT)); return 0; } static struct clock_event_device netx_clockevent = { .name = "netx-timer" __stringify(TIMER_CLOCKEVENT), .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_next_event = netx_set_next_event, .set_mode = netx_set_mode, }; /* * IRQ handler for the timer */ static irqreturn_t netx_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &netx_clockevent; /* acknowledge interrupt */ writel(COUNTER_BIT(0), NETX_GPIO_IRQ); evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction netx_timer_irq = { .name = "NetX Timer Tick", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = netx_timer_interrupt, }; /* * Set up timer interrupt */ void __init netx_timer_init(void) { /* disable timer initially */ writel(0, NETX_GPIO_COUNTER_CTRL(0)); /* Reset the timer value to zero */ writel(0, NETX_GPIO_COUNTER_CURRENT(0)); writel(LATCH, NETX_GPIO_COUNTER_MAX(0)); /* acknowledge interrupt */ writel(COUNTER_BIT(0), NETX_GPIO_IRQ); /* Enable the interrupt in the specific timer * register and start timer */ writel(COUNTER_BIT(0), NETX_GPIO_IRQ_ENABLE); writel(NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN, NETX_GPIO_COUNTER_CTRL(0)); setup_irq(NETX_IRQ_TIMER0, &netx_timer_irq); /* Setup timer one for clocksource */ writel(0, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKSOURCE)); writel(0, NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE)); writel(0xffffffff, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKSOURCE)); writel(NETX_GPIO_COUNTER_CTRL_RUN, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKSOURCE)); clocksource_mmio_init(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE), "netx_timer", CLOCK_TICK_RATE, 200, 32, clocksource_mmio_readl_up); /* with max_delta_ns >= delta2ns(0x800) the system currently runs fine. * Adding some safety ... */ netx_clockevent.cpumask = cpumask_of(0); clockevents_config_and_register(&netx_clockevent, CLOCK_TICK_RATE, 0xa00, 0xfffffffe); }
gpl-2.0
jcadduono/nethunter_kernel_noblelte
arch/arm/mach-lpc32xx/timer.c
2594
4780
/* * arch/arm/mach-lpc32xx/timer.c * * Author: Kevin Wells <kevin.wells@nxp.com> * * Copyright (C) 2009 - 2010 NXP Semiconductors * Copyright (C) 2009 Fontys University of Applied Sciences, Eindhoven * Ed Schouten <e.schouten@fontys.nl> * Laurens Timmermans <l.timmermans@fontys.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/time.h> #include <linux/err.h> #include <linux/clockchips.h> #include <asm/mach/time.h> #include <mach/hardware.h> #include <mach/platform.h> #include "common.h" static int lpc32xx_clkevt_next_event(unsigned long delta, struct clock_event_device *dev) { __raw_writel(LPC32XX_TIMER_CNTR_TCR_RESET, LPC32XX_TIMER_TCR(LPC32XX_TIMER0_BASE)); __raw_writel(delta, LPC32XX_TIMER_PR(LPC32XX_TIMER0_BASE)); __raw_writel(LPC32XX_TIMER_CNTR_TCR_EN, LPC32XX_TIMER_TCR(LPC32XX_TIMER0_BASE)); return 0; } static void lpc32xx_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: WARN_ON(1); break; case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_SHUTDOWN: /* * Disable the timer. When using oneshot, we must also * disable the timer to wait for the first call to * set_next_event(). */ __raw_writel(0, LPC32XX_TIMER_TCR(LPC32XX_TIMER0_BASE)); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_RESUME: break; } } static struct clock_event_device lpc32xx_clkevt = { .name = "lpc32xx_clkevt", .features = CLOCK_EVT_FEAT_ONESHOT, .rating = 300, .set_next_event = lpc32xx_clkevt_next_event, .set_mode = lpc32xx_clkevt_mode, }; static irqreturn_t lpc32xx_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &lpc32xx_clkevt; /* Clear match */ __raw_writel(LPC32XX_TIMER_CNTR_MTCH_BIT(0), LPC32XX_TIMER_IR(LPC32XX_TIMER0_BASE)); evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction lpc32xx_timer_irq = { .name = "LPC32XX Timer Tick", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = lpc32xx_timer_interrupt, }; /* * The clock management driver isn't initialized at this point, so the * clocks need to be enabled here manually and then tagged as used in * the clock driver initialization */ void __init lpc32xx_timer_init(void) { u32 clkrate, pllreg; /* Enable timer clock */ __raw_writel(LPC32XX_CLKPWR_TMRPWMCLK_TIMER0_EN | LPC32XX_CLKPWR_TMRPWMCLK_TIMER1_EN, LPC32XX_CLKPWR_TIMERS_PWMS_CLK_CTRL_1); /* * The clock driver isn't initialized at this point. So determine if * the SYSCLK is driven from the PLL397 or main oscillator and then use * it to compute the PLL frequency and the PCLK divider to get the base * timer rates. This rate is needed to compute the tick rate. */ if (clk_is_sysclk_mainosc() != 0) clkrate = LPC32XX_MAIN_OSC_FREQ; else clkrate = 397 * LPC32XX_CLOCK_OSC_FREQ; /* Get ARM HCLKPLL register and convert it into a frequency */ pllreg = __raw_readl(LPC32XX_CLKPWR_HCLKPLL_CTRL) & 0x1FFFF; clkrate = clk_get_pllrate_from_reg(clkrate, pllreg); /* Get PCLK divider and divide ARM PLL clock by it to get timer rate */ clkrate = clkrate / clk_get_pclk_div(); /* Initial timer setup */ __raw_writel(0, LPC32XX_TIMER_TCR(LPC32XX_TIMER0_BASE)); __raw_writel(LPC32XX_TIMER_CNTR_MTCH_BIT(0), LPC32XX_TIMER_IR(LPC32XX_TIMER0_BASE)); __raw_writel(1, LPC32XX_TIMER_MR0(LPC32XX_TIMER0_BASE)); __raw_writel(LPC32XX_TIMER_CNTR_MCR_MTCH(0) | LPC32XX_TIMER_CNTR_MCR_STOP(0) | LPC32XX_TIMER_CNTR_MCR_RESET(0), LPC32XX_TIMER_MCR(LPC32XX_TIMER0_BASE)); /* Setup tick interrupt */ setup_irq(IRQ_LPC32XX_TIMER0, &lpc32xx_timer_irq); /* Setup the clockevent structure. */ lpc32xx_clkevt.cpumask = cpumask_of(0); clockevents_config_and_register(&lpc32xx_clkevt, clkrate, 1, -1); /* Use timer1 as clock source. */ __raw_writel(LPC32XX_TIMER_CNTR_TCR_RESET, LPC32XX_TIMER_TCR(LPC32XX_TIMER1_BASE)); __raw_writel(0, LPC32XX_TIMER_PR(LPC32XX_TIMER1_BASE)); __raw_writel(0, LPC32XX_TIMER_MCR(LPC32XX_TIMER1_BASE)); __raw_writel(LPC32XX_TIMER_CNTR_TCR_EN, LPC32XX_TIMER_TCR(LPC32XX_TIMER1_BASE)); clocksource_mmio_init(LPC32XX_TIMER_TC(LPC32XX_TIMER1_BASE), "lpc32xx_clksrc", clkrate, 300, 32, clocksource_mmio_readl_up); }
gpl-2.0
sainath24/android_kernel_samsung_smdk4412
arch/m68k/mac/psc.c
2850
4656
/* * Apple Peripheral System Controller (PSC) * * The PSC is used on the AV Macs to control IO functions not handled * by the VIAs (Ethernet, DSP, SCC). * * TO DO: * * Try to figure out what's going on in pIFR5 and pIFR6. There seem to be * persisant interrupt conditions in those registers and I have no idea what * they are. Granted it doesn't affect since we're not enabling any interrupts * on those levels at the moment, but it would be nice to know. I have a feeling * they aren't actually interrupt lines but data lines (to the DSP?) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/init.h> #include <asm/traps.h> #include <asm/bootinfo.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_psc.h> #define DEBUG_PSC int psc_present; volatile __u8 *psc; irqreturn_t psc_irq(int, void *); /* * Debugging dump, used in various places to see what's going on. */ static void psc_debug_dump(void) { int i; if (!psc_present) return; for (i = 0x30 ; i < 0x70 ; i += 0x10) { printk("PSC #%d: IFR = 0x%02X IER = 0x%02X\n", i >> 4, (int) psc_read_byte(pIFRbase + i), (int) psc_read_byte(pIERbase + i)); } } /* * Try to kill all DMA channels on the PSC. Not sure how this his * supposed to work; this is code lifted from macmace.c and then * expanded to cover what I think are the other 7 channels. */ static void psc_dma_die_die_die(void) { int i; printk("Killing all PSC DMA channels..."); for (i = 0 ; i < 9 ; i++) { psc_write_word(PSC_CTL_BASE + (i << 4), 0x8800); psc_write_word(PSC_CTL_BASE + (i << 4), 0x1000); psc_write_word(PSC_CMD_BASE + (i << 5), 0x1100); psc_write_word(PSC_CMD_BASE + (i << 5) + 0x10, 0x1100); } printk("done!\n"); } /* * Initialize the PSC. For now this just involves shutting down all * interrupt sources using the IERs. */ void __init psc_init(void) { int i; if (macintosh_config->ident != MAC_MODEL_C660 && macintosh_config->ident != MAC_MODEL_Q840) { psc = NULL; psc_present = 0; return; } /* * The PSC is always at the same spot, but using psc * keeps things consistent with the psc_xxxx functions. */ psc = (void *) PSC_BASE; psc_present = 1; printk("PSC detected at %p\n", psc); psc_dma_die_die_die(); #ifdef DEBUG_PSC psc_debug_dump(); #endif /* * Mask and clear all possible interrupts */ for (i = 0x30 ; i < 0x70 ; i += 0x10) { psc_write_byte(pIERbase + i, 0x0F); psc_write_byte(pIFRbase + i, 0x0F); } } /* * Register the PSC interrupt dispatchers for autovector interrupts 3-6. */ void __init psc_register_interrupts(void) { if (request_irq(IRQ_AUTO_3, psc_irq, 0, "psc3", (void *) 0x30)) pr_err("Couldn't register psc%d interrupt\n", 3); if (request_irq(IRQ_AUTO_4, psc_irq, 0, "psc4", (void *) 0x40)) pr_err("Couldn't register psc%d interrupt\n", 4); if (request_irq(IRQ_AUTO_5, psc_irq, 0, "psc5", (void *) 0x50)) pr_err("Couldn't register psc%d interrupt\n", 5); if (request_irq(IRQ_AUTO_6, psc_irq, 0, "psc6", (void *) 0x60)) pr_err("Couldn't register psc%d interrupt\n", 6); } /* * PSC interrupt handler. It's a lot like the VIA interrupt handler. */ irqreturn_t psc_irq(int irq, void *dev_id) { int pIFR = pIFRbase + ((int) dev_id); int pIER = pIERbase + ((int) dev_id); int irq_num; unsigned char irq_bit, events; #ifdef DEBUG_IRQS printk("psc_irq: irq %d pIFR = 0x%02X pIER = 0x%02X\n", irq, (int) psc_read_byte(pIFR), (int) psc_read_byte(pIER)); #endif events = psc_read_byte(pIFR) & psc_read_byte(pIER) & 0xF; if (!events) return IRQ_NONE; irq_num = irq << 3; irq_bit = 1; do { if (events & irq_bit) { psc_write_byte(pIFR, irq_bit); m68k_handle_int(irq_num); } irq_num++; irq_bit <<= 1; } while (events >= irq_bit); return IRQ_HANDLED; } void psc_irq_enable(int irq) { int irq_src = IRQ_SRC(irq); int irq_idx = IRQ_IDX(irq); int pIER = pIERbase + (irq_src << 4); #ifdef DEBUG_IRQUSE printk("psc_irq_enable(%d)\n", irq); #endif psc_write_byte(pIER, (1 << irq_idx) | 0x80); } void psc_irq_disable(int irq) { int irq_src = IRQ_SRC(irq); int irq_idx = IRQ_IDX(irq); int pIER = pIERbase + (irq_src << 4); #ifdef DEBUG_IRQUSE printk("psc_irq_disable(%d)\n", irq); #endif psc_write_byte(pIER, 1 << irq_idx); } void psc_irq_clear(int irq) { int irq_src = IRQ_SRC(irq); int irq_idx = IRQ_IDX(irq); int pIFR = pIERbase + (irq_src << 4); psc_write_byte(pIFR, 1 << irq_idx); } int psc_irq_pending(int irq) { int irq_src = IRQ_SRC(irq); int irq_idx = IRQ_IDX(irq); int pIFR = pIERbase + (irq_src << 4); return psc_read_byte(pIFR) & (1 << irq_idx); }
gpl-2.0
SomethingExplosive/android_kernel_asus_grouper
lib/rwsem-spinlock.c
3874
7103
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for * generic spinlock implementation * * Copyright (c) 2001 David Howells (dhowells@redhat.com). * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> * - Derived also from comments by Linus */ #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/module.h> struct rwsem_waiter { struct list_head list; struct task_struct *task; unsigned int flags; #define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; int rwsem_is_locked(struct rw_semaphore *sem) { int ret = 1; unsigned long flags; if (spin_trylock_irqsave(&sem->wait_lock, flags)) { ret = (sem->activity != 0); spin_unlock_irqrestore(&sem->wait_lock, flags); } return ret; } EXPORT_SYMBOL(rwsem_is_locked); /* * initialise the semaphore */ void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held semaphore: */ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); lockdep_init_map(&sem->dep_map, name, key, 0); #endif sem->activity = 0; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); } EXPORT_SYMBOL(__init_rwsem); /* * handle the lock release when processes blocked on it that can now run * - if we come here, then: * - the 'active count' _reached_ zero * - the 'waiting count' is non-zero * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if wakewrite is non-zero */ static inline struct rw_semaphore * __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct task_struct *tsk; int woken; waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (!wakewrite) { if (waiter->flags & RWSEM_WAITING_FOR_WRITE) goto out; goto dont_wake_writers; } /* if we are allowed to wake writers try to grant a single write lock * if there's a writer at the front of the queue * - we leave the 'waiting count' incremented to signify potential * contention */ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { sem->activity = -1; list_del(&waiter->list); tsk = waiter->task; /* Don't touch waiter after ->task has been NULLed */ smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); goto out; } /* grant an infinite number of read locks to the front of the queue */ dont_wake_writers: woken = 0; while (waiter->flags & RWSEM_WAITING_FOR_READ) { struct list_head *next = waiter->list.next; list_del(&waiter->list); tsk = waiter->task; smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); woken++; if (list_empty(&sem->wait_list)) break; waiter = list_entry(next, struct rwsem_waiter, list); } sem->activity += woken; out: return sem; } /* * wake a single writer */ static inline struct rw_semaphore * __rwsem_wake_one_writer(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; struct task_struct *tsk; sem->activity = -1; waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); list_del(&waiter->list); tsk = waiter->task; smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); return sem; } /* * get a read lock on the semaphore */ void __sched __down_read(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; unsigned long flags; spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; spin_unlock_irqrestore(&sem->wait_lock, flags); goto out; } tsk = current; set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_READ; get_task_struct(tsk); list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ spin_unlock_irqrestore(&sem->wait_lock, flags); /* wait to be given the lock */ for (;;) { if (!waiter.task) break; schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } tsk->state = TASK_RUNNING; out: ; } /* * trylock for reading -- returns 1 if successful, 0 if contention */ int __down_read_trylock(struct rw_semaphore *sem) { unsigned long flags; int ret = 0; spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; ret = 1; } spin_unlock_irqrestore(&sem->wait_lock, flags); return ret; } /* * get a write lock on the semaphore * - we increment the waiting count anyway to indicate an exclusive lock */ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) { struct rwsem_waiter waiter; struct task_struct *tsk; unsigned long flags; spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; spin_unlock_irqrestore(&sem->wait_lock, flags); goto out; } tsk = current; set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_WRITE; get_task_struct(tsk); list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ spin_unlock_irqrestore(&sem->wait_lock, flags); /* wait to be given the lock */ for (;;) { if (!waiter.task) break; schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } tsk->state = TASK_RUNNING; out: ; } void __sched __down_write(struct rw_semaphore *sem) { __down_write_nested(sem, 0); } /* * trylock for writing -- returns 1 if successful, 0 if contention */ int __down_write_trylock(struct rw_semaphore *sem) { unsigned long flags; int ret = 0; spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; ret = 1; } spin_unlock_irqrestore(&sem->wait_lock, flags); return ret; } /* * release a read lock on the semaphore */ void __up_read(struct rw_semaphore *sem) { unsigned long flags; spin_lock_irqsave(&sem->wait_lock, flags); if (--sem->activity == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); spin_unlock_irqrestore(&sem->wait_lock, flags); } /* * release a write lock on the semaphore */ void __up_write(struct rw_semaphore *sem) { unsigned long flags; spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 0; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); spin_unlock_irqrestore(&sem->wait_lock, flags); } /* * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ void __downgrade_write(struct rw_semaphore *sem) { unsigned long flags; spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 1; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); spin_unlock_irqrestore(&sem->wait_lock, flags); }
gpl-2.0
MH2033/VIPER_KERNEL_KK_D802
drivers/hwmon/wpce775x.c
4642
3898
/* Quanta EC driver for the Winbond Embedded Controller * * Copyright (C) 2009 Quanta Computer Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/slab.h> #define EC_ID_NAME "qci-i2cec" #define EC_BUFFER_LEN 16 #define EC_CMD_POWER_OFF 0xAC #define EC_CMD_RESTART 0xAB static struct i2c_client *g_i2cec_client; /* General structure to hold the driver data */ struct i2cec_drv_data { struct i2c_client *i2cec_client; struct work_struct work; char ec_data[EC_BUFFER_LEN+1]; }; static int __devinit wpce_probe(struct i2c_client *client, const struct i2c_device_id *id); static int __devexit wpce_remove(struct i2c_client *kbd); #ifdef CONFIG_PM static int wpce_suspend(struct device *dev) { return 0; } static int wpce_resume(struct device *dev) { return 0; } #endif #ifdef CONFIG_PM static struct dev_pm_ops wpce_pm_ops = { .suspend = wpce_suspend, .resume = wpce_resume, }; #endif static const struct i2c_device_id wpce_idtable[] = { { EC_ID_NAME, 0 }, { } }; static struct i2c_driver wpce_driver = { .driver = { .owner = THIS_MODULE, .name = EC_ID_NAME, #ifdef CONFIG_PM .pm = &wpce_pm_ops, #endif }, .probe = wpce_probe, .remove = __devexit_p(wpce_remove), .id_table = wpce_idtable, }; static int __devinit wpce_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err = -ENOMEM; struct i2cec_drv_data *context = 0; /* there is no need to call i2c_check_functionality() since it is the client's job to use the interface (I2C vs SMBUS) appropriate for it. */ client->driver = &wpce_driver; context = kzalloc(sizeof(struct i2cec_drv_data), GFP_KERNEL); if (!context) return err; context->i2cec_client = client; g_i2cec_client = client; i2c_set_clientdata(context->i2cec_client, context); return 0; } static int __devexit wpce_remove(struct i2c_client *dev) { struct i2cec_drv_data *context = i2c_get_clientdata(dev); g_i2cec_client = NULL; kfree(context); return 0; } static int __init wpce_init(void) { return i2c_add_driver(&wpce_driver); } static void __exit wpce_exit(void) { i2c_del_driver(&wpce_driver); } struct i2c_client *wpce_get_i2c_client(void) { return g_i2cec_client; } EXPORT_SYMBOL_GPL(wpce_get_i2c_client); void wpce_poweroff(void) { if (g_i2cec_client == NULL) return; i2c_smbus_write_byte(g_i2cec_client, EC_CMD_POWER_OFF); } EXPORT_SYMBOL_GPL(wpce_poweroff); void wpce_restart(void) { if (g_i2cec_client == NULL) return; i2c_smbus_write_byte(g_i2cec_client, EC_CMD_RESTART); } EXPORT_SYMBOL_GPL(wpce_restart); int wpce_i2c_transfer(struct i2c_msg *msg) { if (g_i2cec_client == NULL) return -1; msg->addr = g_i2cec_client->addr; return i2c_transfer(g_i2cec_client->adapter, msg, 1); } EXPORT_SYMBOL_GPL(wpce_i2c_transfer); int wpce_smbus_write_word_data(u8 command, u16 value) { if (g_i2cec_client == NULL) return -1; return i2c_smbus_write_word_data(g_i2cec_client, command, value); } EXPORT_SYMBOL_GPL(wpce_smbus_write_word_data); int wpce_smbus_write_byte_data(u8 command, u8 value) { if (g_i2cec_client == NULL) return -1; return i2c_smbus_write_byte_data(g_i2cec_client, command, value); } EXPORT_SYMBOL_GPL(wpce_smbus_write_byte_data); module_init(wpce_init); module_exit(wpce_exit); MODULE_AUTHOR("Quanta Computer Inc."); MODULE_DESCRIPTION("Quanta Embedded Controller I2C Bridge Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
racer123/android_kernel_samsung_msm8916-caf
drivers/pci/hotplug/acpi_pcihp.c
7458
13447
/* * Common ACPI functions for hot plug platforms * * Copyright (C) 2006 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <kristen.c.accardi@intel.com> * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/acpi.h> #include <linux/pci-acpi.h> #include <linux/slab.h> #define MY_NAME "acpi_pcihp" #define dbg(fmt, arg...) do { if (debug_acpi) printk(KERN_DEBUG "%s: %s: " fmt , MY_NAME , __func__ , ## arg); } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg) #define METHOD_NAME__SUN "_SUN" #define METHOD_NAME_OSHP "OSHP" static bool debug_acpi; static acpi_status decode_type0_hpx_record(union acpi_object *record, struct hotplug_params *hpx) { int i; union acpi_object *fields = record->package.elements; u32 revision = fields[1].integer.value; switch (revision) { case 1: if (record->package.count != 6) return AE_ERROR; for (i = 2; i < 6; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; hpx->t0 = &hpx->type0_data; hpx->t0->revision = revision; hpx->t0->cache_line_size = fields[2].integer.value; hpx->t0->latency_timer = fields[3].integer.value; hpx->t0->enable_serr = fields[4].integer.value; hpx->t0->enable_perr = fields[5].integer.value; break; default: printk(KERN_WARNING "%s: Type 0 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } static acpi_status decode_type1_hpx_record(union acpi_object *record, struct hotplug_params *hpx) { int i; union acpi_object *fields = record->package.elements; u32 revision = fields[1].integer.value; switch (revision) { case 1: if (record->package.count != 5) return AE_ERROR; for (i = 2; i < 5; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; hpx->t1 = &hpx->type1_data; hpx->t1->revision = revision; hpx->t1->max_mem_read = fields[2].integer.value; hpx->t1->avg_max_split = fields[3].integer.value; hpx->t1->tot_max_split = fields[4].integer.value; break; default: printk(KERN_WARNING "%s: Type 1 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } static acpi_status decode_type2_hpx_record(union acpi_object *record, struct hotplug_params *hpx) { int i; union acpi_object *fields = record->package.elements; u32 revision = fields[1].integer.value; switch (revision) { case 1: if (record->package.count != 18) return AE_ERROR; for (i = 2; i < 18; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; hpx->t2 = &hpx->type2_data; hpx->t2->revision = revision; hpx->t2->unc_err_mask_and = fields[2].integer.value; hpx->t2->unc_err_mask_or = fields[3].integer.value; hpx->t2->unc_err_sever_and = fields[4].integer.value; hpx->t2->unc_err_sever_or = fields[5].integer.value; hpx->t2->cor_err_mask_and = fields[6].integer.value; hpx->t2->cor_err_mask_or = fields[7].integer.value; hpx->t2->adv_err_cap_and = fields[8].integer.value; hpx->t2->adv_err_cap_or = fields[9].integer.value; hpx->t2->pci_exp_devctl_and = fields[10].integer.value; hpx->t2->pci_exp_devctl_or = fields[11].integer.value; hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value; hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value; hpx->t2->sec_unc_err_sever_and = fields[14].integer.value; hpx->t2->sec_unc_err_sever_or = fields[15].integer.value; hpx->t2->sec_unc_err_mask_and = fields[16].integer.value; hpx->t2->sec_unc_err_mask_or = fields[17].integer.value; break; default: printk(KERN_WARNING "%s: Type 2 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) { acpi_status status; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *package, *record, *fields; u32 type; int i; /* Clear the return buffer with zeros */ memset(hpx, 0, sizeof(struct hotplug_params)); status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); if (ACPI_FAILURE(status)) return status; package = (union acpi_object *)buffer.pointer; if (package->type != ACPI_TYPE_PACKAGE) { status = AE_ERROR; goto exit; } for (i = 0; i < package->package.count; i++) { record = &package->package.elements[i]; if (record->type != ACPI_TYPE_PACKAGE) { status = AE_ERROR; goto exit; } fields = record->package.elements; if (fields[0].type != ACPI_TYPE_INTEGER || fields[1].type != ACPI_TYPE_INTEGER) { status = AE_ERROR; goto exit; } type = fields[0].integer.value; switch (type) { case 0: status = decode_type0_hpx_record(record, hpx); if (ACPI_FAILURE(status)) goto exit; break; case 1: status = decode_type1_hpx_record(record, hpx); if (ACPI_FAILURE(status)) goto exit; break; case 2: status = decode_type2_hpx_record(record, hpx); if (ACPI_FAILURE(status)) goto exit; break; default: printk(KERN_ERR "%s: Type %d record not supported\n", __func__, type); status = AE_ERROR; goto exit; } } exit: kfree(buffer.pointer); return status; } static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) { acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *package, *fields; int i; memset(hpp, 0, sizeof(struct hotplug_params)); status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); if (ACPI_FAILURE(status)) return status; package = (union acpi_object *) buffer.pointer; if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 4) { status = AE_ERROR; goto exit; } fields = package->package.elements; for (i = 0; i < 4; i++) { if (fields[i].type != ACPI_TYPE_INTEGER) { status = AE_ERROR; goto exit; } } hpp->t0 = &hpp->type0_data; hpp->t0->revision = 1; hpp->t0->cache_line_size = fields[0].integer.value; hpp->t0->latency_timer = fields[1].integer.value; hpp->t0->enable_serr = fields[2].integer.value; hpp->t0->enable_perr = fields[3].integer.value; exit: kfree(buffer.pointer); return status; } /* acpi_run_oshp - get control of hotplug from the firmware * * @handle - the handle of the hotplug controller. */ static acpi_status acpi_run_oshp(acpi_handle handle) { acpi_status status; struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); /* run OSHP */ status = acpi_evaluate_object(handle, METHOD_NAME_OSHP, NULL, NULL); if (ACPI_FAILURE(status)) if (status != AE_NOT_FOUND) printk(KERN_ERR "%s:%s OSHP fails=0x%x\n", __func__, (char *)string.pointer, status); else dbg("%s:%s OSHP not found\n", __func__, (char *)string.pointer); else pr_debug("%s:%s OSHP passes\n", __func__, (char *)string.pointer); kfree(string.pointer); return status; } /* pci_get_hp_params * * @dev - the pci_dev for which we want parameters * @hpp - allocated by the caller */ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) { acpi_status status; acpi_handle handle, phandle; struct pci_bus *pbus; handle = NULL; for (pbus = dev->bus; pbus; pbus = pbus->parent) { handle = acpi_pci_get_bridge_handle(pbus); if (handle) break; } /* * _HPP settings apply to all child buses, until another _HPP is * encountered. If we don't find an _HPP for the input pci dev, * look for it in the parent device scope since that would apply to * this pci dev. */ while (handle) { status = acpi_run_hpx(handle, hpp); if (ACPI_SUCCESS(status)) return 0; status = acpi_run_hpp(handle, hpp); if (ACPI_SUCCESS(status)) return 0; if (acpi_is_root_bridge(handle)) break; status = acpi_get_parent(handle, &phandle); if (ACPI_FAILURE(status)) break; handle = phandle; } return -ENODEV; } EXPORT_SYMBOL_GPL(pci_get_hp_params); /** * acpi_get_hp_hw_control_from_firmware * @dev: the pci_dev of the bridge that has a hotplug controller * @flags: requested control bits for _OSC * * Attempt to take hotplug control from firmware. */ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) { acpi_status status; acpi_handle chandle, handle; struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; flags &= OSC_SHPC_NATIVE_HP_CONTROL; if (!flags) { err("Invalid flags %u specified!\n", flags); return -EINVAL; } /* * Per PCI firmware specification, we should run the ACPI _OSC * method to get control of hotplug hardware before using it. If * an _OSC is missing, we look for an OSHP to do the same thing. * To handle different BIOS behavior, we look for _OSC on a root * bridge preferentially (according to PCI fw spec). Later for * OSHP within the scope of the hotplug controller and its parents, * up to the host bridge under which this controller exists. */ handle = acpi_find_root_bridge_handle(pdev); if (handle) { acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); dbg("Trying to get hotplug control for %s\n", (char *)string.pointer); status = acpi_pci_osc_control_set(handle, &flags, flags); if (ACPI_SUCCESS(status)) goto got_one; if (status == AE_SUPPORT) goto no_control; kfree(string.pointer); string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; } handle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!handle) { /* * This hotplug controller was not listed in the ACPI name * space at all. Try to get acpi handle of parent pci bus. */ struct pci_bus *pbus; for (pbus = pdev->bus; pbus; pbus = pbus->parent) { handle = acpi_pci_get_bridge_handle(pbus); if (handle) break; } } while (handle) { acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); dbg("Trying to get hotplug control for %s \n", (char *)string.pointer); status = acpi_run_oshp(handle); if (ACPI_SUCCESS(status)) goto got_one; if (acpi_is_root_bridge(handle)) break; chandle = handle; status = acpi_get_parent(chandle, &handle); if (ACPI_FAILURE(status)) break; } no_control: dbg("Cannot get control of hotplug hardware for pci %s\n", pci_name(pdev)); kfree(string.pointer); return -ENODEV; got_one: dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(pdev), (char *)string.pointer); kfree(string.pointer); return 0; } EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); static int pcihp_is_ejectable(acpi_handle handle) { acpi_status status; acpi_handle tmp; unsigned long long removable; status = acpi_get_handle(handle, "_ADR", &tmp); if (ACPI_FAILURE(status)) return 0; status = acpi_get_handle(handle, "_EJ0", &tmp); if (ACPI_SUCCESS(status)) return 1; status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable); if (ACPI_SUCCESS(status) && removable) return 1; return 0; } /** * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot * @pbus: the PCI bus of the PCI slot corresponding to 'handle' * @handle: ACPI handle to check * * Return 1 if handle is ejectable PCI slot, 0 otherwise. */ int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle) { acpi_handle bridge_handle, parent_handle; if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus))) return 0; if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle)))) return 0; if (bridge_handle != parent_handle) return 0; return pcihp_is_ejectable(handle); } EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable); static acpi_status check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) { int *found = (int *)context; if (pcihp_is_ejectable(handle)) { *found = 1; return AE_CTRL_TERMINATE; } return AE_OK; } /** * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots * @handle - handle of the PCI bus to scan * * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. */ int acpi_pci_detect_ejectable(acpi_handle handle) { int found = 0; if (!handle) return found; acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, check_hotplug, NULL, (void *)&found, NULL); return found; } EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable); module_param(debug_acpi, bool, 0644); MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not");
gpl-2.0
bedalus/moggy
drivers/macintosh/therm_pm72.c
8226
63960
/* * Device driver for the thermostats & fan controller of the * Apple G5 "PowerMac7,2" desktop machines. * * (c) Copyright IBM Corp. 2003-2004 * * Maintained by: Benjamin Herrenschmidt * <benh@kernel.crashing.org> * * * The algorithm used is the PID control algorithm, used the same * way the published Darwin code does, using the same values that * are present in the Darwin 7.0 snapshot property lists. * * As far as the CPUs control loops are concerned, I use the * calibration & PID constants provided by the EEPROM, * I do _not_ embed any value from the property lists, as the ones * provided by Darwin 7.0 seem to always have an older version that * what I've seen on the actual computers. * It would be interesting to verify that though. Darwin has a * version code of 1.0.0d11 for all control loops it seems, while * so far, the machines EEPROMs contain a dataset versioned 1.0.0f * * Darwin doesn't provide source to all parts, some missing * bits like the AppleFCU driver or the actual scale of some * of the values returned by sensors had to be "guessed" some * way... or based on what Open Firmware does. * * I didn't yet figure out how to get the slots power consumption * out of the FCU, so that part has not been implemented yet and * the slots fan is set to a fixed 50% PWM, hoping this value is * safe enough ... * * Note: I have observed strange oscillations of the CPU control * loop on a dual G5 here. When idle, the CPU exhaust fan tend to * oscillates slowly (over several minutes) between the minimum * of 300RPMs and approx. 1000 RPMs. I don't know what is causing * this, it could be some incorrect constant or an error in the * way I ported the algorithm, or it could be just normal. I * don't have full understanding on the way Apple tweaked the PID * algorithm for the CPU control, it is definitely not a standard * implementation... * * TODO: - Check MPU structure version/signature * - Add things like /sbin/overtemp for non-critical * overtemp conditions so userland can take some policy * decisions, like slowing down CPUs * - Deal with fan and i2c failures in a better way * - Maybe do a generic PID based on params used for * U3 and Drives ? Definitely need to factor code a bit * better... also make sensor detection more robust using * the device-tree to probe for them * - Figure out how to get the slots consumption and set the * slots fan accordingly * * History: * * Nov. 13, 2003 : 0.5 * - First release * * Nov. 14, 2003 : 0.6 * - Read fan speed from FCU, low level fan routines now deal * with errors & check fan status, though higher level don't * do much. * - Move a bunch of definitions to .h file * * Nov. 18, 2003 : 0.7 * - Fix build on ppc64 kernel * - Move back statics definitions to .c file * - Avoid calling schedule_timeout with a negative number * * Dec. 18, 2003 : 0.8 * - Fix typo when reading back fan speed on 2 CPU machines * * Mar. 11, 2004 : 0.9 * - Rework code accessing the ADC chips, make it more robust and * closer to the chip spec. Also make sure it is configured properly, * I've seen yet unexplained cases where on startup, I would have stale * values in the configuration register * - Switch back to use of target fan speed for PID, thus lowering * pressure on i2c * * Oct. 20, 2004 : 1.1 * - Add device-tree lookup for fan IDs, should detect liquid cooling * pumps when present * - Enable driver for PowerMac7,3 machines * - Split the U3/Backside cooling on U3 & U3H versions as Darwin does * - Add new CPU cooling algorithm for machines with liquid cooling * - Workaround for some PowerMac7,3 with empty "fan" node in the devtree * - Fix a signed/unsigned compare issue in some PID loops * * Mar. 10, 2005 : 1.2 * - Add basic support for Xserve G5 * - Retrieve pumps min/max from EEPROM image in device-tree (broken) * - Use min/max macros here or there * - Latest darwin updated U3H min fan speed to 20% PWM * * July. 06, 2006 : 1.3 * - Fix setting of RPM fans on Xserve G5 (they were going too fast) * - Add missing slots fan control loop for Xserve G5 * - Lower fixed slots fan speed from 50% to 40% on desktop G5s. We * still can't properly implement the control loop for these, so let's * reduce the noise a little bit, it appears that 40% still gives us * a pretty good air flow * - Add code to "tickle" the FCU regulary so it doesn't think that * we are gone while in fact, the machine just didn't need any fan * speed change lately * */ #include <linux/types.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/reboot.h> #include <linux/kmod.h> #include <linux/i2c.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> #include <asm/macio.h> #include "therm_pm72.h" #define VERSION "1.3" #undef DEBUG #ifdef DEBUG #define DBG(args...) printk(args) #else #define DBG(args...) do { } while(0) #endif /* * Driver statics */ static struct platform_device * of_dev; static struct i2c_adapter * u3_0; static struct i2c_adapter * u3_1; static struct i2c_adapter * k2; static struct i2c_client * fcu; static struct cpu_pid_state processor_state[2]; static struct basckside_pid_params backside_params; static struct backside_pid_state backside_state; static struct drives_pid_state drives_state; static struct dimm_pid_state dimms_state; static struct slots_pid_state slots_state; static int state; static int cpu_count; static int cpu_pid_type; static struct task_struct *ctrl_task; static struct completion ctrl_complete; static int critical_state; static int rackmac; static s32 dimm_output_clamp; static int fcu_rpm_shift; static int fcu_tickle_ticks; static DEFINE_MUTEX(driver_lock); /* * We have 3 types of CPU PID control. One is "split" old style control * for intake & exhaust fans, the other is "combined" control for both * CPUs that also deals with the pumps when present. To be "compatible" * with OS X at this point, we only use "COMBINED" on the machines that * are identified as having the pumps (though that identification is at * least dodgy). Ultimately, we could probably switch completely to this * algorithm provided we hack it to deal with the UP case */ #define CPU_PID_TYPE_SPLIT 0 #define CPU_PID_TYPE_COMBINED 1 #define CPU_PID_TYPE_RACKMAC 2 /* * This table describes all fans in the FCU. The "id" and "type" values * are defaults valid for all earlier machines. Newer machines will * eventually override the table content based on the device-tree */ struct fcu_fan_table { char* loc; /* location code */ int type; /* 0 = rpm, 1 = pwm, 2 = pump */ int id; /* id or -1 */ }; #define FCU_FAN_RPM 0 #define FCU_FAN_PWM 1 #define FCU_FAN_ABSENT_ID -1 #define FCU_FAN_COUNT ARRAY_SIZE(fcu_fans) struct fcu_fan_table fcu_fans[] = { [BACKSIDE_FAN_PWM_INDEX] = { .loc = "BACKSIDE,SYS CTRLR FAN", .type = FCU_FAN_PWM, .id = BACKSIDE_FAN_PWM_DEFAULT_ID, }, [DRIVES_FAN_RPM_INDEX] = { .loc = "DRIVE BAY", .type = FCU_FAN_RPM, .id = DRIVES_FAN_RPM_DEFAULT_ID, }, [SLOTS_FAN_PWM_INDEX] = { .loc = "SLOT,PCI FAN", .type = FCU_FAN_PWM, .id = SLOTS_FAN_PWM_DEFAULT_ID, }, [CPUA_INTAKE_FAN_RPM_INDEX] = { .loc = "CPU A INTAKE", .type = FCU_FAN_RPM, .id = CPUA_INTAKE_FAN_RPM_DEFAULT_ID, }, [CPUA_EXHAUST_FAN_RPM_INDEX] = { .loc = "CPU A EXHAUST", .type = FCU_FAN_RPM, .id = CPUA_EXHAUST_FAN_RPM_DEFAULT_ID, }, [CPUB_INTAKE_FAN_RPM_INDEX] = { .loc = "CPU B INTAKE", .type = FCU_FAN_RPM, .id = CPUB_INTAKE_FAN_RPM_DEFAULT_ID, }, [CPUB_EXHAUST_FAN_RPM_INDEX] = { .loc = "CPU B EXHAUST", .type = FCU_FAN_RPM, .id = CPUB_EXHAUST_FAN_RPM_DEFAULT_ID, }, /* pumps aren't present by default, have to be looked up in the * device-tree */ [CPUA_PUMP_RPM_INDEX] = { .loc = "CPU A PUMP", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPUB_PUMP_RPM_INDEX] = { .loc = "CPU B PUMP", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, /* Xserve fans */ [CPU_A1_FAN_RPM_INDEX] = { .loc = "CPU A 1", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPU_A2_FAN_RPM_INDEX] = { .loc = "CPU A 2", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPU_A3_FAN_RPM_INDEX] = { .loc = "CPU A 3", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPU_B1_FAN_RPM_INDEX] = { .loc = "CPU B 1", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPU_B2_FAN_RPM_INDEX] = { .loc = "CPU B 2", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPU_B3_FAN_RPM_INDEX] = { .loc = "CPU B 3", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, }; static struct i2c_driver therm_pm72_driver; /* * Utility function to create an i2c_client structure and * attach it to one of u3 adapters */ static struct i2c_client *attach_i2c_chip(int id, const char *name) { struct i2c_client *clt; struct i2c_adapter *adap; struct i2c_board_info info; if (id & 0x200) adap = k2; else if (id & 0x100) adap = u3_1; else adap = u3_0; if (adap == NULL) return NULL; memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = (id >> 1) & 0x7f; strlcpy(info.type, "therm_pm72", I2C_NAME_SIZE); clt = i2c_new_device(adap, &info); if (!clt) { printk(KERN_ERR "therm_pm72: Failed to attach to i2c ID 0x%x\n", id); return NULL; } /* * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ list_add_tail(&clt->detected, &therm_pm72_driver.clients); return clt; } /* * Here are the i2c chip access wrappers */ static void initialize_adc(struct cpu_pid_state *state) { int rc; u8 buf[2]; /* Read ADC the configuration register and cache it. We * also make sure Config2 contains proper values, I've seen * cases where we got stale grabage in there, thus preventing * proper reading of conv. values */ /* Clear Config2 */ buf[0] = 5; buf[1] = 0; i2c_master_send(state->monitor, buf, 2); /* Read & cache Config1 */ buf[0] = 1; rc = i2c_master_send(state->monitor, buf, 1); if (rc > 0) { rc = i2c_master_recv(state->monitor, buf, 1); if (rc > 0) { state->adc_config = buf[0]; DBG("ADC config reg: %02x\n", state->adc_config); /* Disable shutdown mode */ state->adc_config &= 0xfe; buf[0] = 1; buf[1] = state->adc_config; rc = i2c_master_send(state->monitor, buf, 2); } } if (rc <= 0) printk(KERN_ERR "therm_pm72: Error reading ADC config" " register !\n"); } static int read_smon_adc(struct cpu_pid_state *state, int chan) { int rc, data, tries = 0; u8 buf[2]; for (;;) { /* Set channel */ buf[0] = 1; buf[1] = (state->adc_config & 0x1f) | (chan << 5); rc = i2c_master_send(state->monitor, buf, 2); if (rc <= 0) goto error; /* Wait for conversion */ msleep(1); /* Switch to data register */ buf[0] = 4; rc = i2c_master_send(state->monitor, buf, 1); if (rc <= 0) goto error; /* Read result */ rc = i2c_master_recv(state->monitor, buf, 2); if (rc < 0) goto error; data = ((u16)buf[0]) << 8 | (u16)buf[1]; return data >> 6; error: DBG("Error reading ADC, retrying...\n"); if (++tries > 10) { printk(KERN_ERR "therm_pm72: Error reading ADC !\n"); return -1; } msleep(10); } } static int read_lm87_reg(struct i2c_client * chip, int reg) { int rc, tries = 0; u8 buf; for (;;) { /* Set address */ buf = (u8)reg; rc = i2c_master_send(chip, &buf, 1); if (rc <= 0) goto error; rc = i2c_master_recv(chip, &buf, 1); if (rc <= 0) goto error; return (int)buf; error: DBG("Error reading LM87, retrying...\n"); if (++tries > 10) { printk(KERN_ERR "therm_pm72: Error reading LM87 !\n"); return -1; } msleep(10); } } static int fan_read_reg(int reg, unsigned char *buf, int nb) { int tries, nr, nw; buf[0] = reg; tries = 0; for (;;) { nw = i2c_master_send(fcu, buf, 1); if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100) break; msleep(10); ++tries; } if (nw <= 0) { printk(KERN_ERR "Failure writing address to FCU: %d", nw); return -EIO; } tries = 0; for (;;) { nr = i2c_master_recv(fcu, buf, nb); if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100) break; msleep(10); ++tries; } if (nr <= 0) printk(KERN_ERR "Failure reading data from FCU: %d", nw); return nr; } static int fan_write_reg(int reg, const unsigned char *ptr, int nb) { int tries, nw; unsigned char buf[16]; buf[0] = reg; memcpy(buf+1, ptr, nb); ++nb; tries = 0; for (;;) { nw = i2c_master_send(fcu, buf, nb); if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100) break; msleep(10); ++tries; } if (nw < 0) printk(KERN_ERR "Failure writing to FCU: %d", nw); return nw; } static int start_fcu(void) { unsigned char buf = 0xff; int rc; rc = fan_write_reg(0xe, &buf, 1); if (rc < 0) return -EIO; rc = fan_write_reg(0x2e, &buf, 1); if (rc < 0) return -EIO; rc = fan_read_reg(0, &buf, 1); if (rc < 0) return -EIO; fcu_rpm_shift = (buf == 1) ? 2 : 3; printk(KERN_DEBUG "FCU Initialized, RPM fan shift is %d\n", fcu_rpm_shift); return 0; } static int set_rpm_fan(int fan_index, int rpm) { unsigned char buf[2]; int rc, id, min, max; if (fcu_fans[fan_index].type != FCU_FAN_RPM) return -EINVAL; id = fcu_fans[fan_index].id; if (id == FCU_FAN_ABSENT_ID) return -EINVAL; min = 2400 >> fcu_rpm_shift; max = 56000 >> fcu_rpm_shift; if (rpm < min) rpm = min; else if (rpm > max) rpm = max; buf[0] = rpm >> (8 - fcu_rpm_shift); buf[1] = rpm << fcu_rpm_shift; rc = fan_write_reg(0x10 + (id * 2), buf, 2); if (rc < 0) return -EIO; return 0; } static int get_rpm_fan(int fan_index, int programmed) { unsigned char failure; unsigned char active; unsigned char buf[2]; int rc, id, reg_base; if (fcu_fans[fan_index].type != FCU_FAN_RPM) return -EINVAL; id = fcu_fans[fan_index].id; if (id == FCU_FAN_ABSENT_ID) return -EINVAL; rc = fan_read_reg(0xb, &failure, 1); if (rc != 1) return -EIO; if ((failure & (1 << id)) != 0) return -EFAULT; rc = fan_read_reg(0xd, &active, 1); if (rc != 1) return -EIO; if ((active & (1 << id)) == 0) return -ENXIO; /* Programmed value or real current speed */ reg_base = programmed ? 0x10 : 0x11; rc = fan_read_reg(reg_base + (id * 2), buf, 2); if (rc != 2) return -EIO; return (buf[0] << (8 - fcu_rpm_shift)) | buf[1] >> fcu_rpm_shift; } static int set_pwm_fan(int fan_index, int pwm) { unsigned char buf[2]; int rc, id; if (fcu_fans[fan_index].type != FCU_FAN_PWM) return -EINVAL; id = fcu_fans[fan_index].id; if (id == FCU_FAN_ABSENT_ID) return -EINVAL; if (pwm < 10) pwm = 10; else if (pwm > 100) pwm = 100; pwm = (pwm * 2559) / 1000; buf[0] = pwm; rc = fan_write_reg(0x30 + (id * 2), buf, 1); if (rc < 0) return rc; return 0; } static int get_pwm_fan(int fan_index) { unsigned char failure; unsigned char active; unsigned char buf[2]; int rc, id; if (fcu_fans[fan_index].type != FCU_FAN_PWM) return -EINVAL; id = fcu_fans[fan_index].id; if (id == FCU_FAN_ABSENT_ID) return -EINVAL; rc = fan_read_reg(0x2b, &failure, 1); if (rc != 1) return -EIO; if ((failure & (1 << id)) != 0) return -EFAULT; rc = fan_read_reg(0x2d, &active, 1); if (rc != 1) return -EIO; if ((active & (1 << id)) == 0) return -ENXIO; /* Programmed value or real current speed */ rc = fan_read_reg(0x30 + (id * 2), buf, 1); if (rc != 1) return -EIO; return (buf[0] * 1000) / 2559; } static void tickle_fcu(void) { int pwm; pwm = get_pwm_fan(SLOTS_FAN_PWM_INDEX); DBG("FCU Tickle, slots fan is: %d\n", pwm); if (pwm < 0) pwm = 100; if (!rackmac) { pwm = SLOTS_FAN_DEFAULT_PWM; } else if (pwm < SLOTS_PID_OUTPUT_MIN) pwm = SLOTS_PID_OUTPUT_MIN; /* That is hopefully enough to make the FCU happy */ set_pwm_fan(SLOTS_FAN_PWM_INDEX, pwm); } /* * Utility routine to read the CPU calibration EEPROM data * from the device-tree */ static int read_eeprom(int cpu, struct mpu_data *out) { struct device_node *np; char nodename[64]; const u8 *data; int len; /* prom.c routine for finding a node by path is a bit brain dead * and requires exact @xxx unit numbers. This is a bit ugly but * will work for these machines */ sprintf(nodename, "/u3@0,f8000000/i2c@f8001000/cpuid@a%d", cpu ? 2 : 0); np = of_find_node_by_path(nodename); if (np == NULL) { printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid node from device-tree\n"); return -ENODEV; } data = of_get_property(np, "cpuid", &len); if (data == NULL) { printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid property from device-tree\n"); of_node_put(np); return -ENODEV; } memcpy(out, data, sizeof(struct mpu_data)); of_node_put(np); return 0; } static void fetch_cpu_pumps_minmax(void) { struct cpu_pid_state *state0 = &processor_state[0]; struct cpu_pid_state *state1 = &processor_state[1]; u16 pump_min = 0, pump_max = 0xffff; u16 tmp[4]; /* Try to fetch pumps min/max infos from eeprom */ memcpy(&tmp, &state0->mpu.processor_part_num, 8); if (tmp[0] != 0xffff && tmp[1] != 0xffff) { pump_min = max(pump_min, tmp[0]); pump_max = min(pump_max, tmp[1]); } if (tmp[2] != 0xffff && tmp[3] != 0xffff) { pump_min = max(pump_min, tmp[2]); pump_max = min(pump_max, tmp[3]); } /* Double check the values, this _IS_ needed as the EEPROM on * some dual 2.5Ghz G5s seem, at least, to have both min & max * same to the same value ... (grrrr) */ if (pump_min == pump_max || pump_min == 0 || pump_max == 0xffff) { pump_min = CPU_PUMP_OUTPUT_MIN; pump_max = CPU_PUMP_OUTPUT_MAX; } state0->pump_min = state1->pump_min = pump_min; state0->pump_max = state1->pump_max = pump_max; } /* * Now, unfortunately, sysfs doesn't give us a nice void * we could * pass around to the attribute functions, so we don't really have * choice but implement a bunch of them... * * That sucks a bit, we take the lock because FIX32TOPRINT evaluates * the input twice... I accept patches :) */ #define BUILD_SHOW_FUNC_FIX(name, data) \ static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \ { \ ssize_t r; \ mutex_lock(&driver_lock); \ r = sprintf(buf, "%d.%03d", FIX32TOPRINT(data)); \ mutex_unlock(&driver_lock); \ return r; \ } #define BUILD_SHOW_FUNC_INT(name, data) \ static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \ { \ return sprintf(buf, "%d", data); \ } BUILD_SHOW_FUNC_FIX(cpu0_temperature, processor_state[0].last_temp) BUILD_SHOW_FUNC_FIX(cpu0_voltage, processor_state[0].voltage) BUILD_SHOW_FUNC_FIX(cpu0_current, processor_state[0].current_a) BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, processor_state[0].rpm) BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, processor_state[0].intake_rpm) BUILD_SHOW_FUNC_FIX(cpu1_temperature, processor_state[1].last_temp) BUILD_SHOW_FUNC_FIX(cpu1_voltage, processor_state[1].voltage) BUILD_SHOW_FUNC_FIX(cpu1_current, processor_state[1].current_a) BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, processor_state[1].rpm) BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, processor_state[1].intake_rpm) BUILD_SHOW_FUNC_FIX(backside_temperature, backside_state.last_temp) BUILD_SHOW_FUNC_INT(backside_fan_pwm, backside_state.pwm) BUILD_SHOW_FUNC_FIX(drives_temperature, drives_state.last_temp) BUILD_SHOW_FUNC_INT(drives_fan_rpm, drives_state.rpm) BUILD_SHOW_FUNC_FIX(slots_temperature, slots_state.last_temp) BUILD_SHOW_FUNC_INT(slots_fan_pwm, slots_state.pwm) BUILD_SHOW_FUNC_FIX(dimms_temperature, dimms_state.last_temp) static DEVICE_ATTR(cpu0_temperature,S_IRUGO,show_cpu0_temperature,NULL); static DEVICE_ATTR(cpu0_voltage,S_IRUGO,show_cpu0_voltage,NULL); static DEVICE_ATTR(cpu0_current,S_IRUGO,show_cpu0_current,NULL); static DEVICE_ATTR(cpu0_exhaust_fan_rpm,S_IRUGO,show_cpu0_exhaust_fan_rpm,NULL); static DEVICE_ATTR(cpu0_intake_fan_rpm,S_IRUGO,show_cpu0_intake_fan_rpm,NULL); static DEVICE_ATTR(cpu1_temperature,S_IRUGO,show_cpu1_temperature,NULL); static DEVICE_ATTR(cpu1_voltage,S_IRUGO,show_cpu1_voltage,NULL); static DEVICE_ATTR(cpu1_current,S_IRUGO,show_cpu1_current,NULL); static DEVICE_ATTR(cpu1_exhaust_fan_rpm,S_IRUGO,show_cpu1_exhaust_fan_rpm,NULL); static DEVICE_ATTR(cpu1_intake_fan_rpm,S_IRUGO,show_cpu1_intake_fan_rpm,NULL); static DEVICE_ATTR(backside_temperature,S_IRUGO,show_backside_temperature,NULL); static DEVICE_ATTR(backside_fan_pwm,S_IRUGO,show_backside_fan_pwm,NULL); static DEVICE_ATTR(drives_temperature,S_IRUGO,show_drives_temperature,NULL); static DEVICE_ATTR(drives_fan_rpm,S_IRUGO,show_drives_fan_rpm,NULL); static DEVICE_ATTR(slots_temperature,S_IRUGO,show_slots_temperature,NULL); static DEVICE_ATTR(slots_fan_pwm,S_IRUGO,show_slots_fan_pwm,NULL); static DEVICE_ATTR(dimms_temperature,S_IRUGO,show_dimms_temperature,NULL); /* * CPUs fans control loop */ static int do_read_one_cpu_values(struct cpu_pid_state *state, s32 *temp, s32 *power) { s32 ltemp, volts, amps; int index, rc = 0; /* Default (in case of error) */ *temp = state->cur_temp; *power = state->cur_power; if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) index = (state->index == 0) ? CPU_A1_FAN_RPM_INDEX : CPU_B1_FAN_RPM_INDEX; else index = (state->index == 0) ? CPUA_EXHAUST_FAN_RPM_INDEX : CPUB_EXHAUST_FAN_RPM_INDEX; /* Read current fan status */ rc = get_rpm_fan(index, !RPM_PID_USE_ACTUAL_SPEED); if (rc < 0) { /* XXX What do we do now ? Nothing for now, keep old value, but * return error upstream */ DBG(" cpu %d, fan reading error !\n", state->index); } else { state->rpm = rc; DBG(" cpu %d, exhaust RPM: %d\n", state->index, state->rpm); } /* Get some sensor readings and scale it */ ltemp = read_smon_adc(state, 1); if (ltemp == -1) { /* XXX What do we do now ? */ state->overtemp++; if (rc == 0) rc = -EIO; DBG(" cpu %d, temp reading error !\n", state->index); } else { /* Fixup temperature according to diode calibration */ DBG(" cpu %d, temp raw: %04x, m_diode: %04x, b_diode: %04x\n", state->index, ltemp, state->mpu.mdiode, state->mpu.bdiode); *temp = ((s32)ltemp * (s32)state->mpu.mdiode + ((s32)state->mpu.bdiode << 12)) >> 2; state->last_temp = *temp; DBG(" temp: %d.%03d\n", FIX32TOPRINT((*temp))); } /* * Read voltage & current and calculate power */ volts = read_smon_adc(state, 3); amps = read_smon_adc(state, 4); /* Scale voltage and current raw sensor values according to fixed scales * obtained in Darwin and calculate power from I and V */ volts *= ADC_CPU_VOLTAGE_SCALE; amps *= ADC_CPU_CURRENT_SCALE; *power = (((u64)volts) * ((u64)amps)) >> 16; state->voltage = volts; state->current_a = amps; state->last_power = *power; DBG(" cpu %d, current: %d.%03d, voltage: %d.%03d, power: %d.%03d W\n", state->index, FIX32TOPRINT(state->current_a), FIX32TOPRINT(state->voltage), FIX32TOPRINT(*power)); return 0; } static void do_cpu_pid(struct cpu_pid_state *state, s32 temp, s32 power) { s32 power_target, integral, derivative, proportional, adj_in_target, sval; s64 integ_p, deriv_p, prop_p, sum; int i; /* Calculate power target value (could be done once for all) * and convert to a 16.16 fp number */ power_target = ((u32)(state->mpu.pmaxh - state->mpu.padjmax)) << 16; DBG(" power target: %d.%03d, error: %d.%03d\n", FIX32TOPRINT(power_target), FIX32TOPRINT(power_target - power)); /* Store temperature and power in history array */ state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE; state->temp_history[state->cur_temp] = temp; state->cur_power = (state->cur_power + 1) % state->count_power; state->power_history[state->cur_power] = power; state->error_history[state->cur_power] = power_target - power; /* If first loop, fill the history table */ if (state->first) { for (i = 0; i < (state->count_power - 1); i++) { state->cur_power = (state->cur_power + 1) % state->count_power; state->power_history[state->cur_power] = power; state->error_history[state->cur_power] = power_target - power; } for (i = 0; i < (CPU_TEMP_HISTORY_SIZE - 1); i++) { state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE; state->temp_history[state->cur_temp] = temp; } state->first = 0; } /* Calculate the integral term normally based on the "power" values */ sum = 0; integral = 0; for (i = 0; i < state->count_power; i++) integral += state->error_history[i]; integral *= CPU_PID_INTERVAL; DBG(" integral: %08x\n", integral); /* Calculate the adjusted input (sense value). * G_r is 12.20 * integ is 16.16 * so the result is 28.36 * * input target is mpu.ttarget, input max is mpu.tmax */ integ_p = ((s64)state->mpu.pid_gr) * (s64)integral; DBG(" integ_p: %d\n", (int)(integ_p >> 36)); sval = (state->mpu.tmax << 16) - ((integ_p >> 20) & 0xffffffff); adj_in_target = (state->mpu.ttarget << 16); if (adj_in_target > sval) adj_in_target = sval; DBG(" adj_in_target: %d.%03d, ttarget: %d\n", FIX32TOPRINT(adj_in_target), state->mpu.ttarget); /* Calculate the derivative term */ derivative = state->temp_history[state->cur_temp] - state->temp_history[(state->cur_temp + CPU_TEMP_HISTORY_SIZE - 1) % CPU_TEMP_HISTORY_SIZE]; derivative /= CPU_PID_INTERVAL; deriv_p = ((s64)state->mpu.pid_gd) * (s64)derivative; DBG(" deriv_p: %d\n", (int)(deriv_p >> 36)); sum += deriv_p; /* Calculate the proportional term */ proportional = temp - adj_in_target; prop_p = ((s64)state->mpu.pid_gp) * (s64)proportional; DBG(" prop_p: %d\n", (int)(prop_p >> 36)); sum += prop_p; /* Scale sum */ sum >>= 36; DBG(" sum: %d\n", (int)sum); state->rpm += (s32)sum; } static void do_monitor_cpu_combined(void) { struct cpu_pid_state *state0 = &processor_state[0]; struct cpu_pid_state *state1 = &processor_state[1]; s32 temp0, power0, temp1, power1; s32 temp_combi, power_combi; int rc, intake, pump; rc = do_read_one_cpu_values(state0, &temp0, &power0); if (rc < 0) { /* XXX What do we do now ? */ } state1->overtemp = 0; rc = do_read_one_cpu_values(state1, &temp1, &power1); if (rc < 0) { /* XXX What do we do now ? */ } if (state1->overtemp) state0->overtemp++; temp_combi = max(temp0, temp1); power_combi = max(power0, power1); /* Check tmax, increment overtemp if we are there. At tmax+8, we go * full blown immediately and try to trigger a shutdown */ if (temp_combi >= ((state0->mpu.tmax + 8) << 16)) { printk(KERN_WARNING "Warning ! Temperature way above maximum (%d) !\n", temp_combi >> 16); state0->overtemp += CPU_MAX_OVERTEMP / 4; } else if (temp_combi > (state0->mpu.tmax << 16)) { state0->overtemp++; printk(KERN_WARNING "Temperature %d above max %d. overtemp %d\n", temp_combi >> 16, state0->mpu.tmax, state0->overtemp); } else { if (state0->overtemp) printk(KERN_WARNING "Temperature back down to %d\n", temp_combi >> 16); state0->overtemp = 0; } if (state0->overtemp >= CPU_MAX_OVERTEMP) critical_state = 1; if (state0->overtemp > 0) { state0->rpm = state0->mpu.rmaxn_exhaust_fan; state0->intake_rpm = intake = state0->mpu.rmaxn_intake_fan; pump = state0->pump_max; goto do_set_fans; } /* Do the PID */ do_cpu_pid(state0, temp_combi, power_combi); /* Range check */ state0->rpm = max(state0->rpm, (int)state0->mpu.rminn_exhaust_fan); state0->rpm = min(state0->rpm, (int)state0->mpu.rmaxn_exhaust_fan); /* Calculate intake fan speed */ intake = (state0->rpm * CPU_INTAKE_SCALE) >> 16; intake = max(intake, (int)state0->mpu.rminn_intake_fan); intake = min(intake, (int)state0->mpu.rmaxn_intake_fan); state0->intake_rpm = intake; /* Calculate pump speed */ pump = (state0->rpm * state0->pump_max) / state0->mpu.rmaxn_exhaust_fan; pump = min(pump, state0->pump_max); pump = max(pump, state0->pump_min); do_set_fans: /* We copy values from state 0 to state 1 for /sysfs */ state1->rpm = state0->rpm; state1->intake_rpm = state0->intake_rpm; DBG("** CPU %d RPM: %d Ex, %d, Pump: %d, In, overtemp: %d\n", state1->index, (int)state1->rpm, intake, pump, state1->overtemp); /* We should check for errors, shouldn't we ? But then, what * do we do once the error occurs ? For FCU notified fan * failures (-EFAULT) we probably want to notify userland * some way... */ set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake); set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state0->rpm); set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake); set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state0->rpm); if (fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) set_rpm_fan(CPUA_PUMP_RPM_INDEX, pump); if (fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) set_rpm_fan(CPUB_PUMP_RPM_INDEX, pump); } static void do_monitor_cpu_split(struct cpu_pid_state *state) { s32 temp, power; int rc, intake; /* Read current fan status */ rc = do_read_one_cpu_values(state, &temp, &power); if (rc < 0) { /* XXX What do we do now ? */ } /* Check tmax, increment overtemp if we are there. At tmax+8, we go * full blown immediately and try to trigger a shutdown */ if (temp >= ((state->mpu.tmax + 8) << 16)) { printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum" " (%d) !\n", state->index, temp >> 16); state->overtemp += CPU_MAX_OVERTEMP / 4; } else if (temp > (state->mpu.tmax << 16)) { state->overtemp++; printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n", state->index, temp >> 16, state->mpu.tmax, state->overtemp); } else { if (state->overtemp) printk(KERN_WARNING "CPU %d temperature back down to %d\n", state->index, temp >> 16); state->overtemp = 0; } if (state->overtemp >= CPU_MAX_OVERTEMP) critical_state = 1; if (state->overtemp > 0) { state->rpm = state->mpu.rmaxn_exhaust_fan; state->intake_rpm = intake = state->mpu.rmaxn_intake_fan; goto do_set_fans; } /* Do the PID */ do_cpu_pid(state, temp, power); /* Range check */ state->rpm = max(state->rpm, (int)state->mpu.rminn_exhaust_fan); state->rpm = min(state->rpm, (int)state->mpu.rmaxn_exhaust_fan); /* Calculate intake fan */ intake = (state->rpm * CPU_INTAKE_SCALE) >> 16; intake = max(intake, (int)state->mpu.rminn_intake_fan); intake = min(intake, (int)state->mpu.rmaxn_intake_fan); state->intake_rpm = intake; do_set_fans: DBG("** CPU %d RPM: %d Ex, %d In, overtemp: %d\n", state->index, (int)state->rpm, intake, state->overtemp); /* We should check for errors, shouldn't we ? But then, what * do we do once the error occurs ? For FCU notified fan * failures (-EFAULT) we probably want to notify userland * some way... */ if (state->index == 0) { set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake); set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state->rpm); } else { set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake); set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state->rpm); } } static void do_monitor_cpu_rack(struct cpu_pid_state *state) { s32 temp, power, fan_min; int rc; /* Read current fan status */ rc = do_read_one_cpu_values(state, &temp, &power); if (rc < 0) { /* XXX What do we do now ? */ } /* Check tmax, increment overtemp if we are there. At tmax+8, we go * full blown immediately and try to trigger a shutdown */ if (temp >= ((state->mpu.tmax + 8) << 16)) { printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum" " (%d) !\n", state->index, temp >> 16); state->overtemp = CPU_MAX_OVERTEMP / 4; } else if (temp > (state->mpu.tmax << 16)) { state->overtemp++; printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n", state->index, temp >> 16, state->mpu.tmax, state->overtemp); } else { if (state->overtemp) printk(KERN_WARNING "CPU %d temperature back down to %d\n", state->index, temp >> 16); state->overtemp = 0; } if (state->overtemp >= CPU_MAX_OVERTEMP) critical_state = 1; if (state->overtemp > 0) { state->rpm = state->intake_rpm = state->mpu.rmaxn_intake_fan; goto do_set_fans; } /* Do the PID */ do_cpu_pid(state, temp, power); /* Check clamp from dimms */ fan_min = dimm_output_clamp; fan_min = max(fan_min, (int)state->mpu.rminn_intake_fan); DBG(" CPU min mpu = %d, min dimm = %d\n", state->mpu.rminn_intake_fan, dimm_output_clamp); state->rpm = max(state->rpm, (int)fan_min); state->rpm = min(state->rpm, (int)state->mpu.rmaxn_intake_fan); state->intake_rpm = state->rpm; do_set_fans: DBG("** CPU %d RPM: %d overtemp: %d\n", state->index, (int)state->rpm, state->overtemp); /* We should check for errors, shouldn't we ? But then, what * do we do once the error occurs ? For FCU notified fan * failures (-EFAULT) we probably want to notify userland * some way... */ if (state->index == 0) { set_rpm_fan(CPU_A1_FAN_RPM_INDEX, state->rpm); set_rpm_fan(CPU_A2_FAN_RPM_INDEX, state->rpm); set_rpm_fan(CPU_A3_FAN_RPM_INDEX, state->rpm); } else { set_rpm_fan(CPU_B1_FAN_RPM_INDEX, state->rpm); set_rpm_fan(CPU_B2_FAN_RPM_INDEX, state->rpm); set_rpm_fan(CPU_B3_FAN_RPM_INDEX, state->rpm); } } /* * Initialize the state structure for one CPU control loop */ static int init_processor_state(struct cpu_pid_state *state, int index) { int err; state->index = index; state->first = 1; state->rpm = (cpu_pid_type == CPU_PID_TYPE_RACKMAC) ? 4000 : 1000; state->overtemp = 0; state->adc_config = 0x00; if (index == 0) state->monitor = attach_i2c_chip(SUPPLY_MONITOR_ID, "CPU0_monitor"); else if (index == 1) state->monitor = attach_i2c_chip(SUPPLY_MONITORB_ID, "CPU1_monitor"); if (state->monitor == NULL) goto fail; if (read_eeprom(index, &state->mpu)) goto fail; state->count_power = state->mpu.tguardband; if (state->count_power > CPU_POWER_HISTORY_SIZE) { printk(KERN_WARNING "Warning ! too many power history slots\n"); state->count_power = CPU_POWER_HISTORY_SIZE; } DBG("CPU %d Using %d power history entries\n", index, state->count_power); if (index == 0) { err = device_create_file(&of_dev->dev, &dev_attr_cpu0_temperature); err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_voltage); err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_current); err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm); err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm); } else { err = device_create_file(&of_dev->dev, &dev_attr_cpu1_temperature); err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_voltage); err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_current); err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm); err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm); } if (err) printk(KERN_WARNING "Failed to create some of the attribute" "files for CPU %d\n", index); return 0; fail: state->monitor = NULL; return -ENODEV; } /* * Dispose of the state data for one CPU control loop */ static void dispose_processor_state(struct cpu_pid_state *state) { if (state->monitor == NULL) return; if (state->index == 0) { device_remove_file(&of_dev->dev, &dev_attr_cpu0_temperature); device_remove_file(&of_dev->dev, &dev_attr_cpu0_voltage); device_remove_file(&of_dev->dev, &dev_attr_cpu0_current); device_remove_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm); device_remove_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm); } else { device_remove_file(&of_dev->dev, &dev_attr_cpu1_temperature); device_remove_file(&of_dev->dev, &dev_attr_cpu1_voltage); device_remove_file(&of_dev->dev, &dev_attr_cpu1_current); device_remove_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm); device_remove_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm); } state->monitor = NULL; } /* * Motherboard backside & U3 heatsink fan control loop */ static void do_monitor_backside(struct backside_pid_state *state) { s32 temp, integral, derivative, fan_min; s64 integ_p, deriv_p, prop_p, sum; int i, rc; if (--state->ticks != 0) return; state->ticks = backside_params.interval; DBG("backside:\n"); /* Check fan status */ rc = get_pwm_fan(BACKSIDE_FAN_PWM_INDEX); if (rc < 0) { printk(KERN_WARNING "Error %d reading backside fan !\n", rc); /* XXX What do we do now ? */ } else state->pwm = rc; DBG(" current pwm: %d\n", state->pwm); /* Get some sensor readings */ temp = i2c_smbus_read_byte_data(state->monitor, MAX6690_EXT_TEMP) << 16; state->last_temp = temp; DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp), FIX32TOPRINT(backside_params.input_target)); /* Store temperature and error in history array */ state->cur_sample = (state->cur_sample + 1) % BACKSIDE_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - backside_params.input_target; /* If first loop, fill the history table */ if (state->first) { for (i = 0; i < (BACKSIDE_PID_HISTORY_SIZE - 1); i++) { state->cur_sample = (state->cur_sample + 1) % BACKSIDE_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - backside_params.input_target; } state->first = 0; } /* Calculate the integral term */ sum = 0; integral = 0; for (i = 0; i < BACKSIDE_PID_HISTORY_SIZE; i++) integral += state->error_history[i]; integral *= backside_params.interval; DBG(" integral: %08x\n", integral); integ_p = ((s64)backside_params.G_r) * (s64)integral; DBG(" integ_p: %d\n", (int)(integ_p >> 36)); sum += integ_p; /* Calculate the derivative term */ derivative = state->error_history[state->cur_sample] - state->error_history[(state->cur_sample + BACKSIDE_PID_HISTORY_SIZE - 1) % BACKSIDE_PID_HISTORY_SIZE]; derivative /= backside_params.interval; deriv_p = ((s64)backside_params.G_d) * (s64)derivative; DBG(" deriv_p: %d\n", (int)(deriv_p >> 36)); sum += deriv_p; /* Calculate the proportional term */ prop_p = ((s64)backside_params.G_p) * (s64)(state->error_history[state->cur_sample]); DBG(" prop_p: %d\n", (int)(prop_p >> 36)); sum += prop_p; /* Scale sum */ sum >>= 36; DBG(" sum: %d\n", (int)sum); if (backside_params.additive) state->pwm += (s32)sum; else state->pwm = sum; /* Check for clamp */ fan_min = (dimm_output_clamp * 100) / 14000; fan_min = max(fan_min, backside_params.output_min); state->pwm = max(state->pwm, fan_min); state->pwm = min(state->pwm, backside_params.output_max); DBG("** BACKSIDE PWM: %d\n", (int)state->pwm); set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, state->pwm); } /* * Initialize the state structure for the backside fan control loop */ static int init_backside_state(struct backside_pid_state *state) { struct device_node *u3; int u3h = 1; /* conservative by default */ int err; /* * There are different PID params for machines with U3 and machines * with U3H, pick the right ones now */ u3 = of_find_node_by_path("/u3@0,f8000000"); if (u3 != NULL) { const u32 *vers = of_get_property(u3, "device-rev", NULL); if (vers) if (((*vers) & 0x3f) < 0x34) u3h = 0; of_node_put(u3); } if (rackmac) { backside_params.G_d = BACKSIDE_PID_RACK_G_d; backside_params.input_target = BACKSIDE_PID_RACK_INPUT_TARGET; backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN; backside_params.interval = BACKSIDE_PID_RACK_INTERVAL; backside_params.G_p = BACKSIDE_PID_RACK_G_p; backside_params.G_r = BACKSIDE_PID_G_r; backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX; backside_params.additive = 0; } else if (u3h) { backside_params.G_d = BACKSIDE_PID_U3H_G_d; backside_params.input_target = BACKSIDE_PID_U3H_INPUT_TARGET; backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN; backside_params.interval = BACKSIDE_PID_INTERVAL; backside_params.G_p = BACKSIDE_PID_G_p; backside_params.G_r = BACKSIDE_PID_G_r; backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX; backside_params.additive = 1; } else { backside_params.G_d = BACKSIDE_PID_U3_G_d; backside_params.input_target = BACKSIDE_PID_U3_INPUT_TARGET; backside_params.output_min = BACKSIDE_PID_U3_OUTPUT_MIN; backside_params.interval = BACKSIDE_PID_INTERVAL; backside_params.G_p = BACKSIDE_PID_G_p; backside_params.G_r = BACKSIDE_PID_G_r; backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX; backside_params.additive = 1; } state->ticks = 1; state->first = 1; state->pwm = 50; state->monitor = attach_i2c_chip(BACKSIDE_MAX_ID, "backside_temp"); if (state->monitor == NULL) return -ENODEV; err = device_create_file(&of_dev->dev, &dev_attr_backside_temperature); err |= device_create_file(&of_dev->dev, &dev_attr_backside_fan_pwm); if (err) printk(KERN_WARNING "Failed to create attribute file(s)" " for backside fan\n"); return 0; } /* * Dispose of the state data for the backside control loop */ static void dispose_backside_state(struct backside_pid_state *state) { if (state->monitor == NULL) return; device_remove_file(&of_dev->dev, &dev_attr_backside_temperature); device_remove_file(&of_dev->dev, &dev_attr_backside_fan_pwm); state->monitor = NULL; } /* * Drives bay fan control loop */ static void do_monitor_drives(struct drives_pid_state *state) { s32 temp, integral, derivative; s64 integ_p, deriv_p, prop_p, sum; int i, rc; if (--state->ticks != 0) return; state->ticks = DRIVES_PID_INTERVAL; DBG("drives:\n"); /* Check fan status */ rc = get_rpm_fan(DRIVES_FAN_RPM_INDEX, !RPM_PID_USE_ACTUAL_SPEED); if (rc < 0) { printk(KERN_WARNING "Error %d reading drives fan !\n", rc); /* XXX What do we do now ? */ } else state->rpm = rc; DBG(" current rpm: %d\n", state->rpm); /* Get some sensor readings */ temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor, DS1775_TEMP)) << 8; state->last_temp = temp; DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp), FIX32TOPRINT(DRIVES_PID_INPUT_TARGET)); /* Store temperature and error in history array */ state->cur_sample = (state->cur_sample + 1) % DRIVES_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - DRIVES_PID_INPUT_TARGET; /* If first loop, fill the history table */ if (state->first) { for (i = 0; i < (DRIVES_PID_HISTORY_SIZE - 1); i++) { state->cur_sample = (state->cur_sample + 1) % DRIVES_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - DRIVES_PID_INPUT_TARGET; } state->first = 0; } /* Calculate the integral term */ sum = 0; integral = 0; for (i = 0; i < DRIVES_PID_HISTORY_SIZE; i++) integral += state->error_history[i]; integral *= DRIVES_PID_INTERVAL; DBG(" integral: %08x\n", integral); integ_p = ((s64)DRIVES_PID_G_r) * (s64)integral; DBG(" integ_p: %d\n", (int)(integ_p >> 36)); sum += integ_p; /* Calculate the derivative term */ derivative = state->error_history[state->cur_sample] - state->error_history[(state->cur_sample + DRIVES_PID_HISTORY_SIZE - 1) % DRIVES_PID_HISTORY_SIZE]; derivative /= DRIVES_PID_INTERVAL; deriv_p = ((s64)DRIVES_PID_G_d) * (s64)derivative; DBG(" deriv_p: %d\n", (int)(deriv_p >> 36)); sum += deriv_p; /* Calculate the proportional term */ prop_p = ((s64)DRIVES_PID_G_p) * (s64)(state->error_history[state->cur_sample]); DBG(" prop_p: %d\n", (int)(prop_p >> 36)); sum += prop_p; /* Scale sum */ sum >>= 36; DBG(" sum: %d\n", (int)sum); state->rpm += (s32)sum; state->rpm = max(state->rpm, DRIVES_PID_OUTPUT_MIN); state->rpm = min(state->rpm, DRIVES_PID_OUTPUT_MAX); DBG("** DRIVES RPM: %d\n", (int)state->rpm); set_rpm_fan(DRIVES_FAN_RPM_INDEX, state->rpm); } /* * Initialize the state structure for the drives bay fan control loop */ static int init_drives_state(struct drives_pid_state *state) { int err; state->ticks = 1; state->first = 1; state->rpm = 1000; state->monitor = attach_i2c_chip(DRIVES_DALLAS_ID, "drives_temp"); if (state->monitor == NULL) return -ENODEV; err = device_create_file(&of_dev->dev, &dev_attr_drives_temperature); err |= device_create_file(&of_dev->dev, &dev_attr_drives_fan_rpm); if (err) printk(KERN_WARNING "Failed to create attribute file(s)" " for drives bay fan\n"); return 0; } /* * Dispose of the state data for the drives control loop */ static void dispose_drives_state(struct drives_pid_state *state) { if (state->monitor == NULL) return; device_remove_file(&of_dev->dev, &dev_attr_drives_temperature); device_remove_file(&of_dev->dev, &dev_attr_drives_fan_rpm); state->monitor = NULL; } /* * DIMMs temp control loop */ static void do_monitor_dimms(struct dimm_pid_state *state) { s32 temp, integral, derivative, fan_min; s64 integ_p, deriv_p, prop_p, sum; int i; if (--state->ticks != 0) return; state->ticks = DIMM_PID_INTERVAL; DBG("DIMM:\n"); DBG(" current value: %d\n", state->output); temp = read_lm87_reg(state->monitor, LM87_INT_TEMP); if (temp < 0) return; temp <<= 16; state->last_temp = temp; DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp), FIX32TOPRINT(DIMM_PID_INPUT_TARGET)); /* Store temperature and error in history array */ state->cur_sample = (state->cur_sample + 1) % DIMM_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - DIMM_PID_INPUT_TARGET; /* If first loop, fill the history table */ if (state->first) { for (i = 0; i < (DIMM_PID_HISTORY_SIZE - 1); i++) { state->cur_sample = (state->cur_sample + 1) % DIMM_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - DIMM_PID_INPUT_TARGET; } state->first = 0; } /* Calculate the integral term */ sum = 0; integral = 0; for (i = 0; i < DIMM_PID_HISTORY_SIZE; i++) integral += state->error_history[i]; integral *= DIMM_PID_INTERVAL; DBG(" integral: %08x\n", integral); integ_p = ((s64)DIMM_PID_G_r) * (s64)integral; DBG(" integ_p: %d\n", (int)(integ_p >> 36)); sum += integ_p; /* Calculate the derivative term */ derivative = state->error_history[state->cur_sample] - state->error_history[(state->cur_sample + DIMM_PID_HISTORY_SIZE - 1) % DIMM_PID_HISTORY_SIZE]; derivative /= DIMM_PID_INTERVAL; deriv_p = ((s64)DIMM_PID_G_d) * (s64)derivative; DBG(" deriv_p: %d\n", (int)(deriv_p >> 36)); sum += deriv_p; /* Calculate the proportional term */ prop_p = ((s64)DIMM_PID_G_p) * (s64)(state->error_history[state->cur_sample]); DBG(" prop_p: %d\n", (int)(prop_p >> 36)); sum += prop_p; /* Scale sum */ sum >>= 36; DBG(" sum: %d\n", (int)sum); state->output = (s32)sum; state->output = max(state->output, DIMM_PID_OUTPUT_MIN); state->output = min(state->output, DIMM_PID_OUTPUT_MAX); dimm_output_clamp = state->output; DBG("** DIMM clamp value: %d\n", (int)state->output); /* Backside PID is only every 5 seconds, force backside fan clamping now */ fan_min = (dimm_output_clamp * 100) / 14000; fan_min = max(fan_min, backside_params.output_min); if (backside_state.pwm < fan_min) { backside_state.pwm = fan_min; DBG(" -> applying clamp to backside fan now: %d !\n", fan_min); set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, fan_min); } } /* * Initialize the state structure for the DIMM temp control loop */ static int init_dimms_state(struct dimm_pid_state *state) { state->ticks = 1; state->first = 1; state->output = 4000; state->monitor = attach_i2c_chip(XSERVE_DIMMS_LM87, "dimms_temp"); if (state->monitor == NULL) return -ENODEV; if (device_create_file(&of_dev->dev, &dev_attr_dimms_temperature)) printk(KERN_WARNING "Failed to create attribute file" " for DIMM temperature\n"); return 0; } /* * Dispose of the state data for the DIMM control loop */ static void dispose_dimms_state(struct dimm_pid_state *state) { if (state->monitor == NULL) return; device_remove_file(&of_dev->dev, &dev_attr_dimms_temperature); state->monitor = NULL; } /* * Slots fan control loop */ static void do_monitor_slots(struct slots_pid_state *state) { s32 temp, integral, derivative; s64 integ_p, deriv_p, prop_p, sum; int i, rc; if (--state->ticks != 0) return; state->ticks = SLOTS_PID_INTERVAL; DBG("slots:\n"); /* Check fan status */ rc = get_pwm_fan(SLOTS_FAN_PWM_INDEX); if (rc < 0) { printk(KERN_WARNING "Error %d reading slots fan !\n", rc); /* XXX What do we do now ? */ } else state->pwm = rc; DBG(" current pwm: %d\n", state->pwm); /* Get some sensor readings */ temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor, DS1775_TEMP)) << 8; state->last_temp = temp; DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp), FIX32TOPRINT(SLOTS_PID_INPUT_TARGET)); /* Store temperature and error in history array */ state->cur_sample = (state->cur_sample + 1) % SLOTS_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - SLOTS_PID_INPUT_TARGET; /* If first loop, fill the history table */ if (state->first) { for (i = 0; i < (SLOTS_PID_HISTORY_SIZE - 1); i++) { state->cur_sample = (state->cur_sample + 1) % SLOTS_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - SLOTS_PID_INPUT_TARGET; } state->first = 0; } /* Calculate the integral term */ sum = 0; integral = 0; for (i = 0; i < SLOTS_PID_HISTORY_SIZE; i++) integral += state->error_history[i]; integral *= SLOTS_PID_INTERVAL; DBG(" integral: %08x\n", integral); integ_p = ((s64)SLOTS_PID_G_r) * (s64)integral; DBG(" integ_p: %d\n", (int)(integ_p >> 36)); sum += integ_p; /* Calculate the derivative term */ derivative = state->error_history[state->cur_sample] - state->error_history[(state->cur_sample + SLOTS_PID_HISTORY_SIZE - 1) % SLOTS_PID_HISTORY_SIZE]; derivative /= SLOTS_PID_INTERVAL; deriv_p = ((s64)SLOTS_PID_G_d) * (s64)derivative; DBG(" deriv_p: %d\n", (int)(deriv_p >> 36)); sum += deriv_p; /* Calculate the proportional term */ prop_p = ((s64)SLOTS_PID_G_p) * (s64)(state->error_history[state->cur_sample]); DBG(" prop_p: %d\n", (int)(prop_p >> 36)); sum += prop_p; /* Scale sum */ sum >>= 36; DBG(" sum: %d\n", (int)sum); state->pwm = (s32)sum; state->pwm = max(state->pwm, SLOTS_PID_OUTPUT_MIN); state->pwm = min(state->pwm, SLOTS_PID_OUTPUT_MAX); DBG("** DRIVES PWM: %d\n", (int)state->pwm); set_pwm_fan(SLOTS_FAN_PWM_INDEX, state->pwm); } /* * Initialize the state structure for the slots bay fan control loop */ static int init_slots_state(struct slots_pid_state *state) { int err; state->ticks = 1; state->first = 1; state->pwm = 50; state->monitor = attach_i2c_chip(XSERVE_SLOTS_LM75, "slots_temp"); if (state->monitor == NULL) return -ENODEV; err = device_create_file(&of_dev->dev, &dev_attr_slots_temperature); err |= device_create_file(&of_dev->dev, &dev_attr_slots_fan_pwm); if (err) printk(KERN_WARNING "Failed to create attribute file(s)" " for slots bay fan\n"); return 0; } /* * Dispose of the state data for the slots control loop */ static void dispose_slots_state(struct slots_pid_state *state) { if (state->monitor == NULL) return; device_remove_file(&of_dev->dev, &dev_attr_slots_temperature); device_remove_file(&of_dev->dev, &dev_attr_slots_fan_pwm); state->monitor = NULL; } static int call_critical_overtemp(void) { char *argv[] = { critical_overtemp_path, NULL }; static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; return call_usermodehelper(critical_overtemp_path, argv, envp, UMH_WAIT_EXEC); } /* * Here's the kernel thread that calls the various control loops */ static int main_control_loop(void *x) { DBG("main_control_loop started\n"); mutex_lock(&driver_lock); if (start_fcu() < 0) { printk(KERN_ERR "kfand: failed to start FCU\n"); mutex_unlock(&driver_lock); goto out; } /* Set the PCI fan once for now on non-RackMac */ if (!rackmac) set_pwm_fan(SLOTS_FAN_PWM_INDEX, SLOTS_FAN_DEFAULT_PWM); /* Initialize ADCs */ initialize_adc(&processor_state[0]); if (processor_state[1].monitor != NULL) initialize_adc(&processor_state[1]); fcu_tickle_ticks = FCU_TICKLE_TICKS; mutex_unlock(&driver_lock); while (state == state_attached) { unsigned long elapsed, start; start = jiffies; mutex_lock(&driver_lock); /* Tickle the FCU just in case */ if (--fcu_tickle_ticks < 0) { fcu_tickle_ticks = FCU_TICKLE_TICKS; tickle_fcu(); } /* First, we always calculate the new DIMMs state on an Xserve */ if (rackmac) do_monitor_dimms(&dimms_state); /* Then, the CPUs */ if (cpu_pid_type == CPU_PID_TYPE_COMBINED) do_monitor_cpu_combined(); else if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) { do_monitor_cpu_rack(&processor_state[0]); if (processor_state[1].monitor != NULL) do_monitor_cpu_rack(&processor_state[1]); // better deal with UP } else { do_monitor_cpu_split(&processor_state[0]); if (processor_state[1].monitor != NULL) do_monitor_cpu_split(&processor_state[1]); // better deal with UP } /* Then, the rest */ do_monitor_backside(&backside_state); if (rackmac) do_monitor_slots(&slots_state); else do_monitor_drives(&drives_state); mutex_unlock(&driver_lock); if (critical_state == 1) { printk(KERN_WARNING "Temperature control detected a critical condition\n"); printk(KERN_WARNING "Attempting to shut down...\n"); if (call_critical_overtemp()) { printk(KERN_WARNING "Can't call %s, power off now!\n", critical_overtemp_path); machine_power_off(); } } if (critical_state > 0) critical_state++; if (critical_state > MAX_CRITICAL_STATE) { printk(KERN_WARNING "Shutdown timed out, power off now !\n"); machine_power_off(); } // FIXME: Deal with signals elapsed = jiffies - start; if (elapsed < HZ) schedule_timeout_interruptible(HZ - elapsed); } out: DBG("main_control_loop ended\n"); ctrl_task = 0; complete_and_exit(&ctrl_complete, 0); } /* * Dispose the control loops when tearing down */ static void dispose_control_loops(void) { dispose_processor_state(&processor_state[0]); dispose_processor_state(&processor_state[1]); dispose_backside_state(&backside_state); dispose_drives_state(&drives_state); dispose_slots_state(&slots_state); dispose_dimms_state(&dimms_state); } /* * Create the control loops. U3-0 i2c bus is up, so we can now * get to the various sensors */ static int create_control_loops(void) { struct device_node *np; /* Count CPUs from the device-tree, we don't care how many are * actually used by Linux */ cpu_count = 0; for (np = NULL; NULL != (np = of_find_node_by_type(np, "cpu"));) cpu_count++; DBG("counted %d CPUs in the device-tree\n", cpu_count); /* Decide the type of PID algorithm to use based on the presence of * the pumps, though that may not be the best way, that is good enough * for now */ if (rackmac) cpu_pid_type = CPU_PID_TYPE_RACKMAC; else if (of_machine_is_compatible("PowerMac7,3") && (cpu_count > 1) && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) { printk(KERN_INFO "Liquid cooling pumps detected, using new algorithm !\n"); cpu_pid_type = CPU_PID_TYPE_COMBINED; } else cpu_pid_type = CPU_PID_TYPE_SPLIT; /* Create control loops for everything. If any fail, everything * fails */ if (init_processor_state(&processor_state[0], 0)) goto fail; if (cpu_pid_type == CPU_PID_TYPE_COMBINED) fetch_cpu_pumps_minmax(); if (cpu_count > 1 && init_processor_state(&processor_state[1], 1)) goto fail; if (init_backside_state(&backside_state)) goto fail; if (rackmac && init_dimms_state(&dimms_state)) goto fail; if (rackmac && init_slots_state(&slots_state)) goto fail; if (!rackmac && init_drives_state(&drives_state)) goto fail; DBG("all control loops up !\n"); return 0; fail: DBG("failure creating control loops, disposing\n"); dispose_control_loops(); return -ENODEV; } /* * Start the control loops after everything is up, that is create * the thread that will make them run */ static void start_control_loops(void) { init_completion(&ctrl_complete); ctrl_task = kthread_run(main_control_loop, NULL, "kfand"); } /* * Stop the control loops when tearing down */ static void stop_control_loops(void) { if (ctrl_task) wait_for_completion(&ctrl_complete); } /* * Attach to the i2c FCU after detecting U3-1 bus */ static int attach_fcu(void) { fcu = attach_i2c_chip(FAN_CTRLER_ID, "fcu"); if (fcu == NULL) return -ENODEV; DBG("FCU attached\n"); return 0; } /* * Detach from the i2c FCU when tearing down */ static void detach_fcu(void) { fcu = NULL; } /* * Attach to the i2c controller. We probe the various chips based * on the device-tree nodes and build everything for the driver to * run, we then kick the driver monitoring thread */ static int therm_pm72_attach(struct i2c_adapter *adapter) { mutex_lock(&driver_lock); /* Check state */ if (state == state_detached) state = state_attaching; if (state != state_attaching) { mutex_unlock(&driver_lock); return 0; } /* Check if we are looking for one of these */ if (u3_0 == NULL && !strcmp(adapter->name, "u3 0")) { u3_0 = adapter; DBG("found U3-0\n"); if (k2 || !rackmac) if (create_control_loops()) u3_0 = NULL; } else if (u3_1 == NULL && !strcmp(adapter->name, "u3 1")) { u3_1 = adapter; DBG("found U3-1, attaching FCU\n"); if (attach_fcu()) u3_1 = NULL; } else if (k2 == NULL && !strcmp(adapter->name, "mac-io 0")) { k2 = adapter; DBG("Found K2\n"); if (u3_0 && rackmac) if (create_control_loops()) k2 = NULL; } /* We got all we need, start control loops */ if (u3_0 != NULL && u3_1 != NULL && (k2 || !rackmac)) { DBG("everything up, starting control loops\n"); state = state_attached; start_control_loops(); } mutex_unlock(&driver_lock); return 0; } static int therm_pm72_probe(struct i2c_client *client, const struct i2c_device_id *id) { /* Always succeed, the real work was done in therm_pm72_attach() */ return 0; } /* * Called when any of the devices which participates into thermal management * is going away. */ static int therm_pm72_remove(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; mutex_lock(&driver_lock); if (state != state_detached) state = state_detaching; /* Stop control loops if any */ DBG("stopping control loops\n"); mutex_unlock(&driver_lock); stop_control_loops(); mutex_lock(&driver_lock); if (u3_0 != NULL && !strcmp(adapter->name, "u3 0")) { DBG("lost U3-0, disposing control loops\n"); dispose_control_loops(); u3_0 = NULL; } if (u3_1 != NULL && !strcmp(adapter->name, "u3 1")) { DBG("lost U3-1, detaching FCU\n"); detach_fcu(); u3_1 = NULL; } if (u3_0 == NULL && u3_1 == NULL) state = state_detached; mutex_unlock(&driver_lock); return 0; } /* * i2c_driver structure to attach to the host i2c controller */ static const struct i2c_device_id therm_pm72_id[] = { /* * Fake device name, thermal management is done by several * chips but we don't need to differentiate between them at * this point. */ { "therm_pm72", 0 }, { } }; static struct i2c_driver therm_pm72_driver = { .driver = { .name = "therm_pm72", }, .attach_adapter = therm_pm72_attach, .probe = therm_pm72_probe, .remove = therm_pm72_remove, .id_table = therm_pm72_id, }; static int fan_check_loc_match(const char *loc, int fan) { char tmp[64]; char *c, *e; strlcpy(tmp, fcu_fans[fan].loc, 64); c = tmp; for (;;) { e = strchr(c, ','); if (e) *e = 0; if (strcmp(loc, c) == 0) return 1; if (e == NULL) break; c = e + 1; } return 0; } static void fcu_lookup_fans(struct device_node *fcu_node) { struct device_node *np = NULL; int i; /* The table is filled by default with values that are suitable * for the old machines without device-tree informations. We scan * the device-tree and override those values with whatever is * there */ DBG("Looking up FCU controls in device-tree...\n"); while ((np = of_get_next_child(fcu_node, np)) != NULL) { int type = -1; const char *loc; const u32 *reg; DBG(" control: %s, type: %s\n", np->name, np->type); /* Detect control type */ if (!strcmp(np->type, "fan-rpm-control") || !strcmp(np->type, "fan-rpm")) type = FCU_FAN_RPM; if (!strcmp(np->type, "fan-pwm-control") || !strcmp(np->type, "fan-pwm")) type = FCU_FAN_PWM; /* Only care about fans for now */ if (type == -1) continue; /* Lookup for a matching location */ loc = of_get_property(np, "location", NULL); reg = of_get_property(np, "reg", NULL); if (loc == NULL || reg == NULL) continue; DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg); for (i = 0; i < FCU_FAN_COUNT; i++) { int fan_id; if (!fan_check_loc_match(loc, i)) continue; DBG(" location match, index: %d\n", i); fcu_fans[i].id = FCU_FAN_ABSENT_ID; if (type != fcu_fans[i].type) { printk(KERN_WARNING "therm_pm72: Fan type mismatch " "in device-tree for %s\n", np->full_name); break; } if (type == FCU_FAN_RPM) fan_id = ((*reg) - 0x10) / 2; else fan_id = ((*reg) - 0x30) / 2; if (fan_id > 7) { printk(KERN_WARNING "therm_pm72: Can't parse " "fan ID in device-tree for %s\n", np->full_name); break; } DBG(" fan id -> %d, type -> %d\n", fan_id, type); fcu_fans[i].id = fan_id; } } /* Now dump the array */ printk(KERN_INFO "Detected fan controls:\n"); for (i = 0; i < FCU_FAN_COUNT; i++) { if (fcu_fans[i].id == FCU_FAN_ABSENT_ID) continue; printk(KERN_INFO " %d: %s fan, id %d, location: %s\n", i, fcu_fans[i].type == FCU_FAN_RPM ? "RPM" : "PWM", fcu_fans[i].id, fcu_fans[i].loc); } } static int fcu_of_probe(struct platform_device* dev) { state = state_detached; of_dev = dev; dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION); /* Lookup the fans in the device tree */ fcu_lookup_fans(dev->dev.of_node); /* Add the driver */ return i2c_add_driver(&therm_pm72_driver); } static int fcu_of_remove(struct platform_device* dev) { i2c_del_driver(&therm_pm72_driver); return 0; } static const struct of_device_id fcu_match[] = { { .type = "fcu", }, {}, }; MODULE_DEVICE_TABLE(of, fcu_match); static struct platform_driver fcu_of_platform_driver = { .driver = { .name = "temperature", .owner = THIS_MODULE, .of_match_table = fcu_match, }, .probe = fcu_of_probe, .remove = fcu_of_remove }; /* * Check machine type, attach to i2c controller */ static int __init therm_pm72_init(void) { rackmac = of_machine_is_compatible("RackMac3,1"); if (!of_machine_is_compatible("PowerMac7,2") && !of_machine_is_compatible("PowerMac7,3") && !rackmac) return -ENODEV; return platform_driver_register(&fcu_of_platform_driver); } static void __exit therm_pm72_exit(void) { platform_driver_unregister(&fcu_of_platform_driver); } module_init(therm_pm72_init); module_exit(therm_pm72_exit); MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("Driver for Apple's PowerMac G5 thermal control"); MODULE_LICENSE("GPL");
gpl-2.0
shabinp555/https-github.com-torvalds-linux
arch/c6x/kernel/dma.c
8738
3650
/* * Copyright (C) 2011 Texas Instruments Incorporated * Author: Mark Salter <msalter@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/mm_types.h> #include <linux/scatterlist.h> #include <asm/cacheflush.h> static void c6x_dma_sync(dma_addr_t handle, size_t size, enum dma_data_direction dir) { unsigned long paddr = handle; BUG_ON(!valid_dma_direction(dir)); switch (dir) { case DMA_FROM_DEVICE: L2_cache_block_invalidate(paddr, paddr + size); break; case DMA_TO_DEVICE: L2_cache_block_writeback(paddr, paddr + size); break; case DMA_BIDIRECTIONAL: L2_cache_block_writeback_invalidate(paddr, paddr + size); break; default: break; } } dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { dma_addr_t addr = virt_to_phys(ptr); c6x_dma_sync(addr, size, dir); debug_dma_map_page(dev, virt_to_page(ptr), (unsigned long)ptr & ~PAGE_MASK, size, dir, addr, true); return addr; } EXPORT_SYMBOL(dma_map_single); void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { c6x_dma_sync(handle, size, dir); debug_dma_unmap_page(dev, handle, size, dir, true); } EXPORT_SYMBOL(dma_unmap_single); int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir) { struct scatterlist *sg; int i; for_each_sg(sglist, sg, nents, i) sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length, dir); debug_dma_map_sg(dev, sglist, nents, nents, dir); return nents; } EXPORT_SYMBOL(dma_map_sg); void dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir) { struct scatterlist *sg; int i; for_each_sg(sglist, sg, nents, i) dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir); debug_dma_unmap_sg(dev, sglist, nents, dir); } EXPORT_SYMBOL(dma_unmap_sg); void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { c6x_dma_sync(handle, size, dir); debug_dma_sync_single_for_cpu(dev, handle, size, dir); } EXPORT_SYMBOL(dma_sync_single_for_cpu); void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { c6x_dma_sync(handle, size, dir); debug_dma_sync_single_for_device(dev, handle, size, dir); } EXPORT_SYMBOL(dma_sync_single_for_device); void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir) { struct scatterlist *sg; int i; for_each_sg(sglist, sg, nents, i) dma_sync_single_for_cpu(dev, sg_dma_address(sg), sg->length, dir); debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir); } EXPORT_SYMBOL(dma_sync_sg_for_cpu); void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir) { struct scatterlist *sg; int i; for_each_sg(sglist, sg, nents, i) dma_sync_single_for_device(dev, sg_dma_address(sg), sg->length, dir); debug_dma_sync_sg_for_device(dev, sglist, nents, dir); } EXPORT_SYMBOL(dma_sync_sg_for_device); /* Number of entries preallocated for DMA-API debugging */ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) static int __init dma_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } fs_initcall(dma_init);
gpl-2.0
v-superuser/android_kernel_sony_msm8x27
arch/score/kernel/init_task.c
8994
1555
/* * arch/score/kernel/init_task.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init_task.h> #include <linux/mqueue.h> static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); /* * Initial thread structure. * * We need to make sure that this is THREAD_SIZE aligned due to the * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ union thread_union init_thread_union __init_task_data = { INIT_THREAD_INFO(init_task) }; /* * Initial task structure. * * All other task structs will be allocated on slabs in fork.c */ struct task_struct init_task = INIT_TASK(init_task); EXPORT_SYMBOL(init_task);
gpl-2.0
draekko/android_kernel_samsung_kylessopen
net/dccp/qpolicy.c
13090
3431
/* * net/dccp/qpolicy.c * * Policy-based packet dequeueing interface for DCCP. * * Copyright (c) 2008 Tomasz Grobelny <tomasz@grobelny.oswiecenia.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License v2 * as published by the Free Software Foundation. */ #include "dccp.h" /* * Simple Dequeueing Policy: * If tx_qlen is different from 0, enqueue up to tx_qlen elements. */ static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb) { skb_queue_tail(&sk->sk_write_queue, skb); } static bool qpolicy_simple_full(struct sock *sk) { return dccp_sk(sk)->dccps_tx_qlen && sk->sk_write_queue.qlen >= dccp_sk(sk)->dccps_tx_qlen; } static struct sk_buff *qpolicy_simple_top(struct sock *sk) { return skb_peek(&sk->sk_write_queue); } /* * Priority-based Dequeueing Policy: * If tx_qlen is different from 0 and the queue has reached its upper bound * of tx_qlen elements, replace older packets lowest-priority-first. */ static struct sk_buff *qpolicy_prio_best_skb(struct sock *sk) { struct sk_buff *skb, *best = NULL; skb_queue_walk(&sk->sk_write_queue, skb) if (best == NULL || skb->priority > best->priority) best = skb; return best; } static struct sk_buff *qpolicy_prio_worst_skb(struct sock *sk) { struct sk_buff *skb, *worst = NULL; skb_queue_walk(&sk->sk_write_queue, skb) if (worst == NULL || skb->priority < worst->priority) worst = skb; return worst; } static bool qpolicy_prio_full(struct sock *sk) { if (qpolicy_simple_full(sk)) dccp_qpolicy_drop(sk, qpolicy_prio_worst_skb(sk)); return false; } /** * struct dccp_qpolicy_operations - TX Packet Dequeueing Interface * @push: add a new @skb to the write queue * @full: indicates that no more packets will be admitted * @top: peeks at whatever the queueing policy defines as its `top' */ static struct dccp_qpolicy_operations { void (*push) (struct sock *sk, struct sk_buff *skb); bool (*full) (struct sock *sk); struct sk_buff* (*top) (struct sock *sk); __be32 params; } qpol_table[DCCPQ_POLICY_MAX] = { [DCCPQ_POLICY_SIMPLE] = { .push = qpolicy_simple_push, .full = qpolicy_simple_full, .top = qpolicy_simple_top, .params = 0, }, [DCCPQ_POLICY_PRIO] = { .push = qpolicy_simple_push, .full = qpolicy_prio_full, .top = qpolicy_prio_best_skb, .params = DCCP_SCM_PRIORITY, }, }; /* * Externally visible interface */ void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb) { qpol_table[dccp_sk(sk)->dccps_qpolicy].push(sk, skb); } bool dccp_qpolicy_full(struct sock *sk) { return qpol_table[dccp_sk(sk)->dccps_qpolicy].full(sk); } void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb) { if (skb != NULL) { skb_unlink(skb, &sk->sk_write_queue); kfree_skb(skb); } } struct sk_buff *dccp_qpolicy_top(struct sock *sk) { return qpol_table[dccp_sk(sk)->dccps_qpolicy].top(sk); } struct sk_buff *dccp_qpolicy_pop(struct sock *sk) { struct sk_buff *skb = dccp_qpolicy_top(sk); if (skb != NULL) { /* Clear any skb fields that we used internally */ skb->priority = 0; skb_unlink(skb, &sk->sk_write_queue); } return skb; } bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param) { /* check if exactly one bit is set */ if (!param || (param & (param - 1))) return false; return (qpol_table[dccp_sk(sk)->dccps_qpolicy].params & param) == param; }
gpl-2.0
Quarx2k/jordan-kernel
drivers/dsp/bridge/dynload/tramp.c
35
34802
/* * Copyright 2009 by Texas Instruments Incorporated. * All rights reserved. Property of Texas Instruments Incorporated. * Restricted rights to use, duplicate or disclose this code are * granted through contract. * * @(#) DSP/BIOS Bridge */ #include "header.h" #if TMS32060 #include "tramp_table_c6000.c" #endif #define MAX_RELOS_PER_PASS 4 /* * Function: priv_tramp_sect_tgt_alloc * Description: Allocate target memory for the trampoline section. The * target mem size is easily obtained as the next available address. */ static int priv_tramp_sect_tgt_alloc(struct dload_state *dlthis) { int ret_val = 0; struct LDR_SECTION_INFO *sect_info; /* Populate the trampoline loader section and allocate it on the * target. The section name is ALWAYS the first string in the final * string table for trampolines. The trampoline section is always * 1 beyond the total number of allocated sections. */ sect_info = &dlthis->ldr_sections[dlthis->allocated_secn_count]; sect_info->name = dlthis->tramp.final_string_table; sect_info->size = dlthis->tramp.tramp_sect_next_addr; sect_info->context = 0; sect_info->type = (4 << 8) | DLOAD_TEXT | DS_ALLOCATE_MASK | DS_DOWNLOAD_MASK; sect_info->page = 0; sect_info->run_addr = 0; sect_info->load_addr = 0; ret_val = dlthis->myalloc->Allocate(dlthis->myalloc, sect_info, DS_ALIGNMENT(sect_info->type)); if (ret_val == 0) dload_error(dlthis, "Failed to allocate target memory for" " trampoline"); return ret_val; } /* * Function: priv_h2a * Description: Helper function to convert a hex value to its ASCII * representation. Used for trampoline symbol name generation. */ static u8 priv_h2a(u8 value) { if (value > 0xF) return 0xFF; if (value <= 9) value += 0x30; else value += 0x37; return value; } /* * Function: priv_tramp_sym_gen_name * Description: Generate a trampoline symbol name (ASCII) using the value * of the symbol. This places the new name into the user buffer. * The name is fixed in length and of the form: __$dbTR__xxxxxxxx * (where "xxxxxxxx" is the hex value. */ static void priv_tramp_sym_gen_name(u32 value, char *dst) { u32 i; volatile char *prefix = TRAMP_SYM_PREFIX; volatile char *dst_local = dst; u8 tmp; /* Clear out the destination, including the ending NULL */ for (i = 0; i < (TRAMP_SYM_PREFIX_LEN + TRAMP_SYM_HEX_ASCII_LEN); i++) *(dst_local + i) = 0; /* Copy the prefix to start */ for (i = 0; i < strlen(TRAMP_SYM_PREFIX); i++) { *dst_local = *(prefix + i); dst_local++; } /* Now convert the value passed in to a string equiv of the hex */ for (i = 0; i < sizeof(value); i++) { #ifndef _BIG_ENDIAN tmp = *(((u8 *)&value) + (sizeof(value) - 1) - i); *dst_local = priv_h2a((tmp & 0xF0) >> 4); dst_local++; *dst_local = priv_h2a(tmp & 0x0F); dst_local++; #else tmp = *(((u8 *)&value) + i); *dst_local = priv_h2a((tmp & 0xF0) >> 4); dst_local++; *dst_local = priv_h2a(tmp & 0x0F); dst_local++; #endif } /* NULL terminate */ *dst_local = 0; } /* * Function: priv_tramp_string_create * Description: Create a new string specific to the trampoline loading and add * it to the trampoline string list. This list contains the * trampoline section name and trampoline point symbols. */ static struct tramp_string *priv_tramp_string_create(struct dload_state *dlthis, u32 str_len, char *str) { struct tramp_string *new_string = NULL; u32 i; /* Create a new string object with the specified size. */ new_string = (struct tramp_string *)dlthis->mysym->Allocate( dlthis->mysym, (sizeof(struct tramp_string) + str_len + 1)); if (new_string != NULL) { /* Clear the string first. This ensures the ending NULL is * present and the optimizer won't touch it. */ for (i = 0; i < (sizeof(struct tramp_string) + str_len + 1); i++) *((u8 *)new_string + i) = 0; /* Add this string to our virtual table by assigning it the * next index and pushing it to the tail of the list. */ new_string->index = dlthis->tramp.tramp_string_next_index; dlthis->tramp.tramp_string_next_index++; dlthis->tramp.tramp_string_size += str_len + 1; new_string->next = NULL; if (dlthis->tramp.string_head == NULL) dlthis->tramp.string_head = new_string; else dlthis->tramp.string_tail->next = new_string; dlthis->tramp.string_tail = new_string; /* Copy the string over to the new object */ for (i = 0; i < str_len; i++) new_string->str[i] = str[i]; } return new_string; } /* * Function: priv_tramp_string_find * Description: Walk the trampoline string list and find a match for the * provided string. If not match is found, NULL is returned. */ static struct tramp_string *priv_tramp_string_find(struct dload_state *dlthis, char *str) { struct tramp_string *cur_str = NULL; struct tramp_string *ret_val = NULL; u32 i; u32 str_len = strlen(str); for (cur_str = dlthis->tramp.string_head; (ret_val == NULL) && (cur_str != NULL); cur_str = cur_str->next) { /* If the string lengths aren't equal, don't bother * comparing */ if (str_len != strlen(cur_str->str)) continue; /* Walk the strings until one of them ends */ for (i = 0; i < str_len; i++) { /* If they don't match in the current position then * break out now, no sense in continuing to look at * this string. */ if (str[i] != cur_str->str[i]) break; } if (i == str_len) ret_val = cur_str; } return ret_val; } /* * Function: priv_string_tbl_finalize * Description: Flatten the trampoline string list into a table of NULL * terminated strings. This is the same format of string table * as used by the COFF/DOFF file. */ static int priv_string_tbl_finalize(struct dload_state *dlthis) { int ret_val = 0; struct tramp_string *cur_string; char *cur_loc; char *tmp; /* Allocate enough space for all strings that have been created. The * table is simply all strings concatenated together will NULL * endings. */ dlthis->tramp.final_string_table = (char *)dlthis->mysym->Allocate(dlthis->mysym, dlthis->tramp.tramp_string_size); if (dlthis->tramp.final_string_table != NULL) { /* We got our buffer, walk the list and release the nodes as* * we go */ cur_loc = dlthis->tramp.final_string_table; cur_string = dlthis->tramp.string_head; while (cur_string != NULL) { /* Move the head/tail pointers */ dlthis->tramp.string_head = cur_string->next; if (dlthis->tramp.string_tail == cur_string) dlthis->tramp.string_tail = NULL; /* Copy the string contents */ for (tmp = cur_string->str; *tmp != '\0'; tmp++, cur_loc++) *cur_loc = *tmp; /* Pick up the NULL termination since it was missed by * breaking using it to end the above loop. */ *cur_loc = '\0'; cur_loc++; /* Free the string node, we don't need it any more. */ dlthis->mysym->Deallocate(dlthis->mysym, cur_string); /* Move our pointer to the next one */ cur_string = dlthis->tramp.string_head; } /* Update our return value to success */ ret_val = 1; } else dload_error(dlthis, "Failed to allocate trampoline " "string table"); return ret_val; } /* * Function: priv_tramp_sect_alloc * Description: Virtually allocate space from the trampoline section. This * function returns the next offset within the trampoline section * that is available and moved the next available offset by the * requested size. NO TARGET ALLOCATION IS DONE AT THIS TIME. */ static u32 priv_tramp_sect_alloc(struct dload_state *dlthis, u32 tramp_size) { u32 ret_val; /* If the next available address is 0, this is our first allocation. * Create a section name string to go into the string table . */ if (dlthis->tramp.tramp_sect_next_addr == 0) { dload_syms_error(dlthis->mysym, "*** WARNING *** created " "dynamic TRAMPOLINE section for module %s", dlthis->str_head); } /* Reserve space for the new trampoline */ ret_val = dlthis->tramp.tramp_sect_next_addr; dlthis->tramp.tramp_sect_next_addr += tramp_size; return ret_val; } /* * Function: priv_tramp_sym_create * Description: Allocate and create a new trampoline specific symbol and add * it to the trampoline symbol list. These symbols will include * trampoline points as well as the external symbols they * reference. */ static struct tramp_sym *priv_tramp_sym_create(struct dload_state *dlthis, u32 str_index, struct Local_Symbol *tmp_sym) { struct tramp_sym *new_sym = NULL; u32 i; /* Allocate new space for the symbol in the symbol table. */ new_sym = (struct tramp_sym *)dlthis->mysym->Allocate(dlthis->mysym, sizeof(struct tramp_sym)); if (new_sym != NULL) { for (i = 0; i != sizeof(struct tramp_sym); i++) *((char *)new_sym + i) = 0; /* Assign this symbol the next symbol index for easier * reference later during relocation. */ new_sym->index = dlthis->tramp.tramp_sym_next_index; dlthis->tramp.tramp_sym_next_index++; /* Populate the symbol information. At this point any * trampoline symbols will be the offset location, not the * final. Copy over the symbol info to start, then be sure to * get the string index from the trampoline string table. */ new_sym->sym_info = *tmp_sym; new_sym->str_index = str_index; /* Push the new symbol to the tail of the symbol table list */ new_sym->next = NULL; if (dlthis->tramp.symbol_head == NULL) dlthis->tramp.symbol_head = new_sym; else dlthis->tramp.symbol_tail->next = new_sym; dlthis->tramp.symbol_tail = new_sym; } return new_sym; } /* * Function: priv_tramp_sym_get * Description: Search for the symbol with the matching string index (from * the trampoline string table) and return the trampoline * symbol object, if found. Otherwise return NULL. */ static struct tramp_sym *priv_tramp_sym_get(struct dload_state *dlthis, u32 string_index) { struct tramp_sym *sym_found = NULL; /* Walk the symbol table list and search vs. the string index */ for (sym_found = dlthis->tramp.symbol_head; sym_found != NULL; sym_found = sym_found->next) { if (sym_found->str_index == string_index) break; } return sym_found; } /* * Function: priv_tramp_sym_find * Description: Search for a trampoline symbol based on the string name of * the symbol. Return the symbol object, if found, otherwise * return NULL. */ static struct tramp_sym *priv_tramp_sym_find(struct dload_state *dlthis, char *string) { struct tramp_sym *sym_found = NULL; struct tramp_string *str_found = NULL; /* First, search for the string, then search for the sym based on the string index. */ str_found = priv_tramp_string_find(dlthis, string); if (str_found != NULL) sym_found = priv_tramp_sym_get(dlthis, str_found->index); return sym_found; } /* * Function: priv_tramp_sym_finalize * Description: Allocate a flat symbol table for the trampoline section, * put each trampoline symbol into the table, adjust the * symbol value based on the section address on the target and * free the trampoline symbol list nodes. */ static int priv_tramp_sym_finalize(struct dload_state *dlthis) { int ret_val = 0; struct tramp_sym *cur_sym; struct LDR_SECTION_INFO *tramp_sect = &dlthis->ldr_sections[dlthis->allocated_secn_count]; struct Local_Symbol *new_sym; /* Allocate a table to hold a flattened version of all symbols * created. */ dlthis->tramp.final_sym_table = (struct Local_Symbol *)dlthis->mysym->Allocate( dlthis->mysym, (sizeof(struct Local_Symbol) * dlthis->tramp.tramp_sym_next_index)); if (dlthis->tramp.final_sym_table != NULL) { /* Walk the list of all symbols, copy it over to the flattened * table. After it has been copied, the node can be freed as * it is no longer needed. */ new_sym = dlthis->tramp.final_sym_table; cur_sym = dlthis->tramp.symbol_head; while (cur_sym != NULL) { /* Pop it off the list */ dlthis->tramp.symbol_head = cur_sym->next; if (cur_sym == dlthis->tramp.symbol_tail) dlthis->tramp.symbol_tail = NULL; /* Copy the symbol contents into the flat table */ *new_sym = cur_sym->sym_info; /* Now finaize the symbol. If it is in the tramp * section, we need to adjust for the section start. * If it is external then we don't need to adjust at * all. * NOTE: THIS CODE ASSUMES THAT THE TRAMPOLINE IS * REFERENCED LIKE A CALL TO AN EXTERNAL SO VALUE AND * DELTA ARE THE SAME. SEE THE FUNCTION dload_symbols * WHERE DN_UNDEF IS HANDLED FOR MORE REFERENCE. */ if (new_sym->secnn < 0) { new_sym->value += tramp_sect->load_addr; new_sym->delta = new_sym->value; } /* Let go of the symbol node */ dlthis->mysym->Deallocate(dlthis->mysym, cur_sym); /* Move to the next node */ cur_sym = dlthis->tramp.symbol_head; new_sym++; } ret_val = 1; } else dload_error(dlthis, "Failed to alloc trampoline sym table"); return ret_val; } /* * Function: priv_tgt_img_gen * Description: Allocate storage for and copy the target specific image data * and fix up its relocations for the new external symbol. If * a trampoline image packet was successfully created it is added * to the trampoline list. */ static int priv_tgt_img_gen(struct dload_state *dlthis, u32 base, u32 gen_index, struct tramp_sym *new_ext_sym) { struct tramp_img_pkt *new_img_pkt = NULL; u32 i; u32 pkt_size = tramp_img_pkt_size_get(); u8 *gen_tbl_entry; u8 *pkt_data; struct reloc_record_t *cur_relo; int ret_val = 0; /* Allocate a new image packet and set it up. */ new_img_pkt = (struct tramp_img_pkt *)dlthis->mysym->Allocate(dlthis->mysym, pkt_size); if (new_img_pkt != NULL) { /* Save the base, this is where it goes in the section */ new_img_pkt->base = base; /* Copy over the image data and relos from the target table */ pkt_data = (u8 *)&new_img_pkt->hdr; gen_tbl_entry = (u8 *)&tramp_gen_info[gen_index]; for (i = 0; i < pkt_size; i++) { *pkt_data = *gen_tbl_entry; pkt_data++; gen_tbl_entry++; } /* Update the relocations to point to the external symbol */ cur_relo = (struct reloc_record_t *)((u8 *)&new_img_pkt->hdr + new_img_pkt->hdr.relo_offset); for (i = 0; i < new_img_pkt->hdr.num_relos; i++) cur_relo[i].r_symndx = new_ext_sym->index; /* Add it to the trampoline list. */ new_img_pkt->next = dlthis->tramp.tramp_pkts; dlthis->tramp.tramp_pkts = new_img_pkt; ret_val = 1; } return ret_val; } /* * Function: priv_pkt_relo * Description: Take the provided image data and the collection of relocations * for it and perform the relocations. Note that all relocations * at this stage are considered SECOND PASS since the original * image has already been processed in the first pass. This means * TRAMPOLINES ARE TREATED AS 2ND PASS even though this is really * the first (and only) relocation that will be performed on them. */ static int priv_pkt_relo(struct dload_state *dlthis, TgtAU_t *data, struct reloc_record_t *rp[], u32 relo_count) { int ret_val = 1; u32 i; bool tmp; /* Walk through all of the relos and process them. This function is * the equivalent of relocate_packet() from cload.c, but specialized * for trampolines and 2nd phase relocations. */ for (i = 0; i < relo_count; i++) dload_relocate(dlthis, data, rp[i], &tmp, true); return ret_val; } /* * Function: priv_tramp_pkt_finalize * Description: Walk the list of all trampoline packets and finalize them. * Each trampoline image packet will be relocated now that the * trampoline section has been allocated on the target. Once * all of the relocations are done the trampoline image data * is written into target memory and the trampoline packet * is freed: it is no longer needed after this point. */ static int priv_tramp_pkt_finalize(struct dload_state *dlthis) { int ret_val = 1; struct tramp_img_pkt *cur_pkt = NULL; struct reloc_record_t *relos[MAX_RELOS_PER_PASS]; u32 relos_done; u32 i; struct reloc_record_t *cur_relo; struct LDR_SECTION_INFO *sect_info = &dlthis->ldr_sections[dlthis->allocated_secn_count]; /* Walk the list of trampoline packets and relocate each packet. This * function is the trampoline equivalent of dload_data() from * cload.c. */ cur_pkt = dlthis->tramp.tramp_pkts; while ((ret_val != 0) && (cur_pkt != NULL)) { /* Remove the pkt from the list */ dlthis->tramp.tramp_pkts = cur_pkt->next; /* Setup section and image offset information for the relo */ dlthis->image_secn = sect_info; dlthis->image_offset = cur_pkt->base; dlthis->delta_runaddr = sect_info->run_addr; /* Walk through all relos for the packet */ relos_done = 0; cur_relo = (struct reloc_record_t *)((u8 *)&cur_pkt->hdr + cur_pkt->hdr.relo_offset); while (relos_done < cur_pkt->hdr.num_relos) { #ifdef ENABLE_TRAMP_DEBUG dload_syms_error(dlthis->mysym, "===> Trampoline %x branches to %x", sect_info->run_addr + dlthis->image_offset, dlthis->tramp. final_sym_table[cur_relo->r_symndx].value); #endif for (i = 0; ((i < MAX_RELOS_PER_PASS) && ((i + relos_done) < cur_pkt->hdr.num_relos)); i++) relos[i] = cur_relo + i; /* Do the actual relo */ ret_val = priv_pkt_relo(dlthis, (TgtAU_t *)&cur_pkt->payload, relos, i); if (ret_val == 0) { dload_error(dlthis, "Relocation of trampoline pkt at %x failed", cur_pkt->base + sect_info->run_addr); break; } relos_done += i; cur_relo += i; } /* Make sure we didn't hit a problem */ if (ret_val != 0) { /* Relos are done for the packet, write it to the * target */ ret_val = dlthis->myio->writemem(dlthis->myio, &cur_pkt->payload, sect_info->load_addr + cur_pkt->base , sect_info, BYTE_TO_HOST( cur_pkt->hdr.tramp_code_size)); if (ret_val == 0) { dload_error(dlthis, "Write to " FMT_UI32 " failed", sect_info->load_addr + cur_pkt->base); } /* Done with the pkt, let it go */ dlthis->mysym->Deallocate(dlthis->mysym, cur_pkt); /* Get the next packet to process */ cur_pkt = dlthis->tramp.tramp_pkts; } } return ret_val; } /* * Function: priv_dup_pkt_finalize * Description: Walk the list of duplicate image packets and finalize them. * Each duplicate packet will be relocated again for the * relocations that previously failed and have been adjusted * to point at a trampoline. Once all relocations for a packet * have been done, write the packet into target memory. The * duplicate packet and its relocation chain are all freed * after use here as they are no longer needed after this. */ static int priv_dup_pkt_finalize(struct dload_state *dlthis) { int ret_val = 1; struct tramp_img_dup_pkt *cur_pkt; struct tramp_img_dup_relo *cur_relo; struct reloc_record_t *relos[MAX_RELOS_PER_PASS]; struct doff_scnhdr_t *sect_hdr = NULL; s32 i; /* Similar to the trampoline pkt finalize, this function walks each dup * pkt that was generated and performs all relocations that were * deferred to a 2nd pass. This is the equivalent of dload_data() from * cload.c, but does not need the additional reorder and checksum * processing as it has already been done. */ cur_pkt = dlthis->tramp.dup_pkts; while ((ret_val != 0) && (cur_pkt != NULL)) { /* Remove the node from the list, we'll be freeing it * shortly */ dlthis->tramp.dup_pkts = cur_pkt->next; /* Setup the section and image offset for relocation */ dlthis->image_secn = &dlthis->ldr_sections[cur_pkt->secnn]; dlthis->image_offset = cur_pkt->offset; /* In order to get the delta run address, we need to reference * the original section header. It's a bit ugly, but needed * for relo. */ i = (s32)(dlthis->image_secn - dlthis->ldr_sections); sect_hdr = dlthis->sect_hdrs + i; dlthis->delta_runaddr = sect_hdr->ds_paddr; /* Walk all relos in the chain and process each. */ cur_relo = cur_pkt->relo_chain; while (cur_relo != NULL) { /* Process them a chunk at a time to be efficient */ for (i = 0; (i < MAX_RELOS_PER_PASS) && (cur_relo != NULL); i++, cur_relo = cur_relo->next) { relos[i] = &cur_relo->relo; cur_pkt->relo_chain = cur_relo->next; } /* Do the actual relo */ ret_val = priv_pkt_relo(dlthis, cur_pkt->img_pkt.i_bits, relos, i); if (ret_val == 0) { dload_error(dlthis, "Relocation of dup pkt at %x failed", cur_pkt->offset + dlthis->image_secn-> run_addr); break; } /* Release all of these relos, we're done with them */ while (i > 0) { dlthis->mysym->Deallocate(dlthis->mysym, GET_CONTAINER(relos[i - 1], struct tramp_img_dup_relo, relo)); i--; } /* DO NOT ADVANCE cur_relo, IT IS ALREADY READY TO * GO! */ } /* Done with all relos. Make sure we didn't have a problem and * write it out to the target */ if (ret_val != 0) { ret_val = dlthis->myio->writemem(dlthis->myio, cur_pkt->img_pkt.i_bits, dlthis->image_secn->load_addr + cur_pkt->offset, dlthis->image_secn, BYTE_TO_HOST(cur_pkt-> img_pkt.i_packet_size)); if (ret_val == 0) { dload_error(dlthis, "Write to " FMT_UI32 " failed", dlthis->image_secn->load_addr + cur_pkt->offset); } dlthis->mysym->Deallocate(dlthis->mysym, cur_pkt); /* Advance to the next packet */ cur_pkt = dlthis->tramp.dup_pkts; } } return ret_val; } /* * Function: priv_dup_find * Description: Walk the list of existing duplicate packets and find a * match based on the section number and image offset. Return * the duplicate packet if found, otherwise NULL. */ static struct tramp_img_dup_pkt *priv_dup_find(struct dload_state *dlthis, s16 secnn, u32 image_offset) { struct tramp_img_dup_pkt *cur_pkt = NULL; for (cur_pkt = dlthis->tramp.dup_pkts; cur_pkt != NULL; cur_pkt = cur_pkt->next) { if ((cur_pkt->secnn == secnn) && (cur_pkt->offset == image_offset)) { /* Found a match, break out */ break; } } return cur_pkt; } /* * Function: priv_img_pkt_dup * Description: Duplicate the original image packet. If this is the first * time this image packet has been seen (based on section number * and image offset), create a new duplicate packet and add it * to the dup packet list. If not, just get the existing one and * update it with the current packet contents (since relocation * on the packet is still ongoing in first pass.) Create a * duplicate of the provided relocation, but update it to point * to the new trampoline symbol. Add the new relocation dup to * the dup packet's relo chain for 2nd pass relocation later. */ static int priv_img_pkt_dup(struct dload_state *dlthis, s16 secnn, u32 image_offset, struct image_packet_t *ipacket, struct reloc_record_t *rp, struct tramp_sym *new_tramp_sym) { struct tramp_img_dup_pkt *dup_pkt = NULL; u32 new_dup_size; s32 i; int ret_val = 0; struct tramp_img_dup_relo *dup_relo = NULL; /* Determinne if this image packet is already being tracked in the dup list for other trampolines. */ dup_pkt = priv_dup_find(dlthis, secnn, image_offset); if (dup_pkt == NULL) { /* This image packet does not exist in our tracking, so create * a new one and add it to the head of the list. */ new_dup_size = sizeof(struct tramp_img_dup_pkt) + ipacket->i_packet_size; dup_pkt = (struct tramp_img_dup_pkt *) dlthis->mysym->Allocate(dlthis->mysym, new_dup_size); if (dup_pkt != NULL) { /* Save off the section and offset information */ dup_pkt->secnn = secnn; dup_pkt->offset = image_offset; dup_pkt->relo_chain = NULL; /* Copy the original packet content */ dup_pkt->img_pkt = *ipacket; dup_pkt->img_pkt.i_bits = (u8 *)(dup_pkt + 1); for (i = 0; i < ipacket->i_packet_size; i++) *(dup_pkt->img_pkt.i_bits + i) = *(ipacket->i_bits + i); /* Add the packet to the dup list */ dup_pkt->next = dlthis->tramp.dup_pkts; dlthis->tramp.dup_pkts = dup_pkt; } else dload_error(dlthis, "Failed to create dup packet!"); } else { /* The image packet contents could have changed since * trampoline detection happens during relocation of the image * packets. So, we need to update the image packet contents * before adding relo information. */ for (i = 0; i < dup_pkt->img_pkt.i_packet_size; i++) *(dup_pkt->img_pkt.i_bits + i) = *(ipacket->i_bits + i); } /* Since the previous code may have allocated a new dup packet for us, double check that we actually have one. */ if (dup_pkt != NULL) { /* Allocate a new node for the relo chain. Each image packet * can potentially have multiple relocations that cause a * trampoline to be generated. So, we keep them in a chain, * order is not important. */ dup_relo = dlthis->mysym->Allocate(dlthis->mysym, sizeof(struct tramp_img_dup_relo)); if (dup_relo != NULL) { /* Copy the relo contents, adjust for the new * trampoline and add it to the list. */ dup_relo->relo = *rp; dup_relo->relo.r_symndx = new_tramp_sym->index; dup_relo->next = dup_pkt->relo_chain; dup_pkt->relo_chain = dup_relo; /* That's it, we're done. Make sure we update our * return value to be success since everything finished * ok */ ret_val = 1; } else dload_error(dlthis, "Unable to alloc dup relo"); } return ret_val; } /* * Function: dload_tramp_avail * Description: Check to see if the target supports a trampoline for this type * of relocation. Return true if it does, otherwise false. */ bool dload_tramp_avail(struct dload_state *dlthis, struct reloc_record_t *rp) { bool ret_val = false; u16 map_index; u16 gen_index; /* Check type hash vs. target tramp table */ map_index = HASH_FUNC(rp->r_type); gen_index = tramp_map[map_index]; if (gen_index != TRAMP_NO_GEN_AVAIL) ret_val = true; return ret_val; } /* * Function: dload_tramp_generate * Description: Create a new trampoline for the provided image packet and * relocation causing problems. This will create the trampoline * as well as duplicate/update the image packet and relocation * causing the problem, which will be relo'd again during * finalization. */ int dload_tramp_generate(struct dload_state *dlthis, s16 secnn, u32 image_offset, struct image_packet_t *ipacket, struct reloc_record_t *rp) { u16 map_index; u16 gen_index; int ret_val = 1; char tramp_sym_str[TRAMP_SYM_PREFIX_LEN + TRAMP_SYM_HEX_ASCII_LEN]; struct Local_Symbol *ref_sym; struct tramp_sym *new_tramp_sym; struct tramp_sym *new_ext_sym; struct tramp_string *new_tramp_str; u32 new_tramp_base; struct Local_Symbol tmp_sym; struct Local_Symbol ext_tmp_sym; /* Hash the relo type to get our generator information */ map_index = HASH_FUNC(rp->r_type); gen_index = tramp_map[map_index]; if (gen_index != TRAMP_NO_GEN_AVAIL) { /* If this is the first trampoline, create the section name in * our string table for debug help later. */ if (dlthis->tramp.string_head == NULL) { priv_tramp_string_create(dlthis, strlen(TRAMP_SECT_NAME), TRAMP_SECT_NAME); } #ifdef ENABLE_TRAMP_DEBUG dload_syms_error(dlthis->mysym, "Trampoline at img loc %x, references %x", dlthis->ldr_sections[secnn].run_addr + image_offset + rp->r_vaddr, dlthis->local_symtab[rp->r_symndx].value); #endif /* Generate the trampoline string, check if already defined. * If the relo symbol index is -1, it means we need the section * info for relo later. To do this we'll dummy up a symbol * with the section delta and run addresses. */ if (rp->r_symndx == -1) { ext_tmp_sym.value = dlthis->ldr_sections[secnn].run_addr; ext_tmp_sym.delta = dlthis->sect_hdrs[secnn].ds_paddr; ref_sym = &ext_tmp_sym; } else ref_sym = &(dlthis->local_symtab[rp->r_symndx]); priv_tramp_sym_gen_name(ref_sym->value, tramp_sym_str); new_tramp_sym = priv_tramp_sym_find(dlthis, tramp_sym_str); if (new_tramp_sym == NULL) { /* If tramp string not defined, create it and a new * string, and symbol for it as well as the original * symbol which caused the trampoline. */ new_tramp_str = priv_tramp_string_create(dlthis, strlen(tramp_sym_str), tramp_sym_str); if (new_tramp_str == NULL) { dload_error(dlthis, "Failed to create new " "trampoline string\n"); ret_val = 0; } else { /* Allocate tramp section space for the new * tramp from the target */ new_tramp_base = priv_tramp_sect_alloc(dlthis, tramp_size_get()); /* We have a string, create the new symbol and * duplicate the external. */ tmp_sym.value = new_tramp_base; tmp_sym.delta = 0; tmp_sym.secnn = -1; tmp_sym.sclass = 0; new_tramp_sym = priv_tramp_sym_create(dlthis, new_tramp_str->index, &tmp_sym); new_ext_sym = priv_tramp_sym_create(dlthis, -1, ref_sym); if ((new_tramp_sym != NULL) && (new_ext_sym != NULL)) { /* Call the image generator to get the * new image data and fix up its * relocations for the external * symbol. */ ret_val = priv_tgt_img_gen(dlthis, new_tramp_base, gen_index, new_ext_sym); /* Add generated image data to tramp * image list */ if (ret_val != 1) { dload_error(dlthis, "Failed to" " create image packet for " "trampoline\n"); } } else { dload_error(dlthis, "Failed to create " "new tramp syms (%8.8X, %8.8X)\n", new_tramp_sym, new_ext_sym); ret_val = 0; } } } /* Duplicate the image data and relo record that caused the * tramp, including update the relo data to point to the tramp * symbol. */ if (ret_val == 1) { ret_val = priv_img_pkt_dup(dlthis, secnn, image_offset, ipacket, rp, new_tramp_sym); if (ret_val != 1) { dload_error(dlthis, "Failed to create dup of " "original img pkt\n"); } } } return ret_val; } /* * Function: dload_tramp_pkt_update * Description: Update the duplicate copy of this image packet, which the * trampoline layer is already tracking. This is call is critical * to make if trampolines were generated anywhere within the * packet and first pass relo continued on the remainder. The * trampoline layer needs the updates image data so when 2nd * pass relo is done during finalize the image packet can be * written to the target since all relo is done. */ int dload_tramp_pkt_udpate(struct dload_state *dlthis, s16 secnn, u32 image_offset, struct image_packet_t *ipacket) { struct tramp_img_dup_pkt *dup_pkt = NULL; s32 i; int ret_val = 0; /* Find the image packet in question, the caller needs us to update it since a trampoline was previously generated. */ dup_pkt = priv_dup_find(dlthis, secnn, image_offset); if (dup_pkt != NULL) { for (i = 0; i < dup_pkt->img_pkt.i_packet_size; i++) *(dup_pkt->img_pkt.i_bits + i) = *(ipacket->i_bits + i); ret_val = 1; } else { dload_error(dlthis, "Unable to find existing DUP pkt for %x, offset %x", secnn, image_offset); } return ret_val; } /* * Function: dload_tramp_finalize * Description: If any trampolines were created, finalize everything on the * target by allocating the trampoline section on the target, * finalizing the trampoline symbols, finalizing the trampoline * packets (write the new section to target memory) and finalize * the duplicate packets by doing 2nd pass relo over them. */ int dload_tramp_finalize(struct dload_state *dlthis) { int ret_val = 1; if (dlthis->tramp.tramp_sect_next_addr != 0) { /* Finalize strings into a flat table. This is needed so it * can be added to the debug string table later. */ ret_val = priv_string_tbl_finalize(dlthis); /* Do target allocation for section BEFORE finalizing * symbols. */ if (ret_val != 0) ret_val = priv_tramp_sect_tgt_alloc(dlthis); /* Finalize symbols with their correct target information and * flatten */ if (ret_val != 0) ret_val = priv_tramp_sym_finalize(dlthis); /* Finalize all trampoline packets. This performs the * relocation on the packets as well as writing them to target * memory. */ if (ret_val != 0) ret_val = priv_tramp_pkt_finalize(dlthis); /* Perform a 2nd pass relocation on the dup list. */ if (ret_val != 0) ret_val = priv_dup_pkt_finalize(dlthis); } return ret_val; } /* * Function: dload_tramp_cleanup * Description: Release all temporary resources used in the trampoline layer. * Note that the target memory which may have been allocated and * written to store the trampolines is NOT RELEASED HERE since it * is potentially still in use. It is automatically released * when the module is unloaded. */ void dload_tramp_cleanup(struct dload_state *dlthis) { struct tramp_info *tramp = &dlthis->tramp; struct tramp_sym *cur_sym; struct tramp_string *cur_string; struct tramp_img_pkt *cur_tramp_pkt; struct tramp_img_dup_pkt *cur_dup_pkt; struct tramp_img_dup_relo *cur_dup_relo; /* If there were no tramps generated, just return */ if (tramp->tramp_sect_next_addr == 0) return; /* Destroy all tramp information */ for (cur_sym = tramp->symbol_head; cur_sym != NULL; cur_sym = tramp->symbol_head) { tramp->symbol_head = cur_sym->next; if (tramp->symbol_tail == cur_sym) tramp->symbol_tail = NULL; dlthis->mysym->Deallocate(dlthis->mysym, cur_sym); } if (tramp->final_sym_table != NULL) dlthis->mysym->Deallocate(dlthis->mysym, tramp->final_sym_table); for (cur_string = tramp->string_head; cur_string != NULL; cur_string = tramp->string_head) { tramp->string_head = cur_string->next; if (tramp->string_tail == cur_string) tramp->string_tail = NULL; dlthis->mysym->Deallocate(dlthis->mysym, cur_string); } if (tramp->final_string_table != NULL) dlthis->mysym->Deallocate(dlthis->mysym, tramp->final_string_table); for (cur_tramp_pkt = tramp->tramp_pkts; cur_tramp_pkt != NULL; cur_tramp_pkt = tramp->tramp_pkts) { tramp->tramp_pkts = cur_tramp_pkt->next; dlthis->mysym->Deallocate(dlthis->mysym, cur_tramp_pkt); } for (cur_dup_pkt = tramp->dup_pkts; cur_dup_pkt != NULL; cur_dup_pkt = tramp->dup_pkts) { tramp->dup_pkts = cur_dup_pkt->next; for (cur_dup_relo = cur_dup_pkt->relo_chain; cur_dup_relo != NULL; cur_dup_relo = cur_dup_pkt->relo_chain) { cur_dup_pkt->relo_chain = cur_dup_relo->next; dlthis->mysym->Deallocate(dlthis->mysym, cur_dup_relo); } dlthis->mysym->Deallocate(dlthis->mysym, cur_dup_pkt); } }
gpl-2.0
thillux/coreboot
src/vendorcode/amd/agesa/f15tn/Proc/CPU/Family/0x15/TN/F15TnInitEarlyTable.c
35
11387
/* $NoKeywords:$ */ /** * @file * * Initialize the Family 15h Trinity specific way of running early initialization. * * Returns the table of initialization steps to perform at * AmdInitEarly. * * @xrefitem bom "File Content Label" "Release Content" * @e project: AGESA * @e sub-project: CPU/FAMILY/0x15/TN * @e \$Revision: 64491 $ @e \$Date: 2012-01-23 12:37:30 -0600 (Mon, 23 Jan 2012) $ * */ /* ****************************************************************************** * * Copyright (c) 2008 - 2012, Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ****************************************************************************** */ /*---------------------------------------------------------------------------------------- * M O D U L E S U S E D *---------------------------------------------------------------------------------------- */ #include "AGESA.h" #include "amdlib.h" #include "cpuFamilyTranslation.h" #include "Filecode.h" #include "GeneralServices.h" #include "heapManager.h" #include "Fch.h" #include "Gnb.h" #include "GnbLib.h" #include "cpuEarlyInit.h" #include "cpuF15TnPowerMgmt.h" CODE_GROUP (G2_PEI) RDATA_GROUP (G2_PEI) #define FILECODE PROC_CPU_FAMILY_0X15_TN_F15TNINITEARLYTABLE_FILECODE /*---------------------------------------------------------------------------------------- * D E F I N I T I O N S A N D M A C R O S *---------------------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------------------- * T Y P E D E F S A N D S T R U C T U R E S *---------------------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------------------- * P R O T O T Y P E S O F L O C A L F U N C T I O N S *---------------------------------------------------------------------------------------- */ VOID F15TnLoadMicrocodePatchAtEarly ( IN CPU_SPECIFIC_SERVICES *FamilyServices, IN AMD_CPU_EARLY_PARAMS *EarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ); VOID GetF15TnEarlyInitOnCoreTable ( IN CPU_SPECIFIC_SERVICES *FamilyServices, OUT CONST S_PERFORM_EARLY_INIT_ON_CORE **Table, IN AMD_CPU_EARLY_PARAMS *EarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ); VOID ApplyWorkaroundForFchErratum39 ( IN AMD_CONFIG_PARAMS *StdHeader ); VOID F15TnNbPstateForceBeforeApLaunchAtEarly ( IN CPU_SPECIFIC_SERVICES *FamilyServices, IN AMD_CPU_EARLY_PARAMS *EarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ); /*---------------------------------------------------------------------------------------- * E X P O R T E D F U N C T I O N S *---------------------------------------------------------------------------------------- */ extern F_PERFORM_EARLY_INIT_ON_CORE SetRegistersFromTablesAtEarly; extern F_PERFORM_EARLY_INIT_ON_CORE F15SetBrandIdRegistersAtEarly; extern F_PERFORM_EARLY_INIT_ON_CORE LocalApicInitializationAtEarly; CONST S_PERFORM_EARLY_INIT_ON_CORE ROMDATA F15TnEarlyInitOnCoreTable[] = { {SetRegistersFromTablesAtEarly, PERFORM_EARLY_ANY_CONDITION}, {F15SetBrandIdRegistersAtEarly, PERFORM_EARLY_ANY_CONDITION}, {LocalApicInitializationAtEarly, PERFORM_EARLY_ANY_CONDITION}, {F15TnLoadMicrocodePatchAtEarly, PERFORM_EARLY_ANY_CONDITION}, {F15TnNbPstateForceBeforeApLaunchAtEarly, PERFORM_EARLY_WARM_RESET}, {NULL, 0} }; /*------------------------------------------------------------------------------------*/ /** * Initializer routine that may be invoked at AmdCpuEarly to return the steps that a * processor that uses the standard initialization steps should take. * * @CpuServiceMethod{::F_GET_EARLY_INIT_TABLE}. * * @param[in] FamilyServices The current Family Specific Services. * @param[out] Table Table of appropriate init steps for the executing core. * @param[in] EarlyParams Service Interface structure to initialize. * @param[in] StdHeader Opaque handle to standard config header. * */ VOID GetF15TnEarlyInitOnCoreTable ( IN CPU_SPECIFIC_SERVICES *FamilyServices, OUT CONST S_PERFORM_EARLY_INIT_ON_CORE **Table, IN AMD_CPU_EARLY_PARAMS *EarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ) { *Table = F15TnEarlyInitOnCoreTable; } /*---------------------------------------------------------------------------------------*/ /** * Update microcode patch in current processor for Family15h TN. * * This function acts as a wrapper for calling the LoadMicrocodePatch * routine at AmdInitEarly. * * @param[in] FamilyServices The current Family Specific Services. * @param[in] EarlyParams Service parameters. * @param[in] StdHeader Config handle for library and services. * */ VOID F15TnLoadMicrocodePatchAtEarly ( IN CPU_SPECIFIC_SERVICES *FamilyServices, IN AMD_CPU_EARLY_PARAMS *EarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ) { BOOLEAN IsPatchLoaded; AGESA_TESTPOINT (TpProcCpuLoadUcode, StdHeader); if (IsCorePairPrimary (FirstCoreIsComputeUnitPrimary, StdHeader)) { IsPatchLoaded = LoadMicrocodePatch (StdHeader); } // After microcode patch has been loaded, apply the workaround for FCH erratum 39 ApplyWorkaroundForFchErratum39 (StdHeader); } /*---------------------------------------------------------------------------------------*/ /** * Apply the workaround for FCH H2/H3 erratum #39. * * This function detects the FCH version and applies the appropriate workaround, if * required. * * @param[in] StdHeader Config handle for library and services. * */ VOID ApplyWorkaroundForFchErratum39 ( IN AMD_CONFIG_PARAMS *StdHeader ) { UINT8 MiscReg51; UINT8 RevisionId; UINT16 AcpiPmTmrBlk; UINT32 VendorIdDeviceId; UINT64 MsrValue; PCI_ADDR PciAddress; AGESA_STATUS IgnoredSts; CPU_LOGICAL_ID LogicalId; // Read Vendor ID / Device ID PciAddress.AddressValue = MAKE_SBDFO (0, 0, 0x14, 0, 0); LibAmdPciRead (AccessWidth32, PciAddress, &VendorIdDeviceId, StdHeader); // For Hudson based system, perform workaround if (VendorIdDeviceId == 0x780B1022) { PciAddress.Address.Register = 0x8; LibAmdPciRead (AccessWidth8, PciAddress, &RevisionId, StdHeader); if ((RevisionId == 0x14) && IsBsp (StdHeader, &IgnoredSts)) { // Enable hardware workaround by setting Misc_reg x51[0] LibAmdMemRead (AccessWidth8, (UINT64) (ACPI_MMIO_BASE + MISC_BASE + 0x51), &MiscReg51, StdHeader); MiscReg51 |= BIT0; LibAmdMemWrite (AccessWidth8, (UINT64) (ACPI_MMIO_BASE + MISC_BASE + 0x51), &MiscReg51, StdHeader); } else if (RevisionId == 0x13) { GetLogicalIdOfCurrentCore (&LogicalId, StdHeader); if ((LogicalId.Revision & AMD_F15_TN_GT_A0) != 0) { // For revs A1+, set up the C0010055 MSR GnbLibIndirectIoBlockRead (0xCD6, 0xCD7, AccessWidth8, 0x64, 2, &AcpiPmTmrBlk, StdHeader); LibAmdMsrRead (0xC0010055, &MsrValue, StdHeader); MsrValue |= BIT30; MsrValue |= AcpiPmTmrBlk; LibAmdMsrWrite (0xC0010055, &MsrValue, StdHeader); } } } } /*---------------------------------------------------------------------------------------*/ /** * Prevent NB P-state transitions prior to AP launch on Family 15h TN. * * This function determines the current NB P-state and forces the NB to remain * in that P-state. * * @param[in] FamilyServices The current Family Specific Services. * @param[in] EarlyParams Service parameters. * @param[in] StdHeader Config handle for library and services. * */ VOID F15TnNbPstateForceBeforeApLaunchAtEarly ( IN CPU_SPECIFIC_SERVICES *FamilyServices, IN AMD_CPU_EARLY_PARAMS *EarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ) { UINT64 MsrValue; UINT64 PerfCtrlSave; UINT64 PerfStsSave; PCI_ADDR PciAddress; AGESA_STATUS IgnoredSts; ALLOCATE_HEAP_PARAMS Alloc; NB_PSTATE_CTRL_REGISTER NbPsCtrl; if (IsBsp (StdHeader, &IgnoredSts) && FamilyServices->IsNbPstateEnabled (FamilyServices, &EarlyParams->PlatformConfig, StdHeader)) { LibAmdMsrRead (MSR_NB_PERF_CTL3, &PerfCtrlSave, StdHeader); MsrValue = 0x00000006004004E9; LibAmdMsrRead (MSR_NB_PERF_CTR3, &PerfStsSave, StdHeader); LibAmdMsrWrite (MSR_NB_PERF_CTL3, &MsrValue, StdHeader); MsrValue = 0; LibAmdMsrWrite (MSR_NB_PERF_CTR3, &MsrValue, StdHeader); PciAddress.AddressValue = NB_PSTATE_CTRL_PCI_ADDR; LibAmdPciRead (AccessWidth32, PciAddress, &NbPsCtrl, StdHeader); Alloc.RequestedBufferSize = sizeof (NB_PSTATE_CTRL_REGISTER); Alloc.BufferHandle = AMD_CPU_NB_PSTATE_FIXUP_HANDLE; Alloc.Persist = 0; if (HeapAllocateBuffer (&Alloc, StdHeader) == AGESA_SUCCESS) { *((NB_PSTATE_CTRL_REGISTER *) Alloc.BufferPtr) = NbPsCtrl; } else { ASSERT (FALSE); } LibAmdMsrRead (MSR_NB_PERF_CTR3, &MsrValue, StdHeader); if (MsrValue == 0) { NbPsCtrl.SwNbPstateLoDis = 1; } else { NbPsCtrl.SwNbPstateLoDis = 0; NbPsCtrl.NbPstateDisOnP0 = 0; NbPsCtrl.NbPstateThreshold = 0; } LibAmdPciWrite (AccessWidth32, PciAddress, &NbPsCtrl, StdHeader); LibAmdMsrWrite (MSR_NB_PERF_CTL3, &PerfCtrlSave, StdHeader); LibAmdMsrWrite (MSR_NB_PERF_CTR3, &PerfStsSave, StdHeader); } }
gpl-2.0
artemh/asuswrt-merlin
release/src-rt/linux/linux-2.6/arch/i386/kernel/process.c
35
23693
/* * linux/arch/i386/kernel/process.c * * Copyright (C) 1995 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 */ /* * This file handles the architecture-dependent parts of process handling.. */ #include <stdarg.h> #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elfcore.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/user.h> #include <linux/a.out.h> #include <linux/interrupt.h> #include <linux/utsname.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/ptrace.h> #include <linux/random.h> #include <linux/personality.h> #include <linux/tick.h> #include <linux/percpu.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/io.h> #include <asm/ldt.h> #include <asm/processor.h> #include <asm/i387.h> #include <asm/desc.h> #include <asm/vm86.h> #ifdef CONFIG_MATH_EMULATION #include <asm/math_emu.h> #endif #include <linux/err.h> #include <asm/tlbflush.h> #include <asm/cpu.h> asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); static int hlt_counter; unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); DEFINE_PER_CPU(int, cpu_number); EXPORT_PER_CPU_SYMBOL(cpu_number); /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *tsk) { return ((unsigned long *)tsk->thread.esp)[3]; } /* * Powermanagement idle function, if any.. */ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); static DEFINE_PER_CPU(unsigned int, cpu_idle_state); void disable_hlt(void) { hlt_counter++; } EXPORT_SYMBOL(disable_hlt); void enable_hlt(void) { hlt_counter--; } EXPORT_SYMBOL(enable_hlt); /* * We use this if we don't have any better * idle routine.. */ void default_idle(void) { if (!hlt_counter && boot_cpu_data.hlt_works_ok) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); local_irq_disable(); if (!need_resched()) safe_halt(); /* enables interrupts racelessly */ else local_irq_enable(); current_thread_info()->status |= TS_POLLING; } else { /* loop is done by the caller */ cpu_relax(); } } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(default_idle); #endif /* * On SMP it's slightly faster (but much more power-consuming!) * to poll the ->work.need_resched flag instead of waiting for the * cross-CPU IPI to arrive. Use this option with caution. */ static void poll_idle (void) { cpu_relax(); } #ifdef CONFIG_HOTPLUG_CPU #include <asm/nmi.h> /* We don't actually take CPU down, just spin without interrupts. */ static inline void play_dead(void) { /* This must be done before dead CPU ack */ cpu_exit_clear(); wbinvd(); mb(); /* Ack it */ __get_cpu_var(cpu_state) = CPU_DEAD; /* * With physical CPU hotplug, we should halt the cpu */ local_irq_disable(); while (1) halt(); } #else static inline void play_dead(void) { BUG(); } #endif /* CONFIG_HOTPLUG_CPU */ /* * The idle thread. There's no useful work to be * done, so just try to conserve power and have a * low exit latency (ie sit in a loop waiting for * somebody to say that they'd like to reschedule) */ void cpu_idle(void) { int cpu = smp_processor_id(); current_thread_info()->status |= TS_POLLING; /* endless idle loop with no priority at all */ while (1) { tick_nohz_stop_sched_tick(); while (!need_resched()) { void (*idle)(void); if (__get_cpu_var(cpu_idle_state)) __get_cpu_var(cpu_idle_state) = 0; check_pgt_cache(); rmb(); idle = pm_idle; if (!idle) idle = default_idle; if (cpu_is_offline(cpu)) play_dead(); __get_cpu_var(irq_stat).idle_timestamp = jiffies; idle(); } tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); } } void cpu_idle_wait(void) { unsigned int cpu, this_cpu = get_cpu(); cpumask_t map, tmp = current->cpus_allowed; set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); put_cpu(); cpus_clear(map); for_each_online_cpu(cpu) { per_cpu(cpu_idle_state, cpu) = 1; cpu_set(cpu, map); } __get_cpu_var(cpu_idle_state) = 0; wmb(); do { ssleep(1); for_each_online_cpu(cpu) { if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) cpu_clear(cpu, map); } cpus_and(map, map, cpu_online_map); } while (!cpus_empty(map)); set_cpus_allowed(current, tmp); } EXPORT_SYMBOL_GPL(cpu_idle_wait); /* * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, * which can obviate IPI to trigger checking of need_resched. * We execute MONITOR against need_resched and enter optimized wait state * through MWAIT. Whenever someone changes need_resched, we would be woken * up from MWAIT (without an IPI). * * New with Core Duo processors, MWAIT can take some hints based on CPU * capability. */ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { if (!need_resched()) { __monitor((void *)&current_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __mwait(eax, ecx); } } /* Default MONITOR/MWAIT with no hints, used for default C1 state */ static void mwait_idle(void) { local_irq_enable(); mwait_idle_with_hints(0, 0); } void __devinit select_idle_routine(const struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_MWAIT)) { printk("monitor/mwait feature present.\n"); /* * Skip, if setup has overridden idle. * One CPU supports mwait => All CPUs supports mwait */ if (!pm_idle) { printk("using mwait in idle threads.\n"); pm_idle = mwait_idle; } } } static int __init idle_setup(char *str) { if (!strcmp(str, "poll")) { printk("using polling idle threads.\n"); pm_idle = poll_idle; #ifdef CONFIG_X86_SMP if (smp_num_siblings > 1) printk("WARNING: polling idle and HT enabled, performance may degrade.\n"); #endif } else if (!strcmp(str, "mwait")) force_mwait = 1; else return -1; boot_option_idle_override = 1; return 0; } early_param("idle", idle_setup); void show_regs(struct pt_regs * regs) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; printk("\n"); printk("Pid: %d, comm: %20s\n", current->pid, current->comm); printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); print_symbol("EIP is at %s\n", regs->eip); if (user_mode_vm(regs)) printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); printk(" EFLAGS: %08lx %s (%s %.*s)\n", regs->eflags, print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", regs->eax,regs->ebx,regs->ecx,regs->edx); printk("ESI: %08lx EDI: %08lx EBP: %08lx", regs->esi, regs->edi, regs->ebp); printk(" DS: %04x ES: %04x FS: %04x\n", 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs); cr0 = read_cr0(); cr2 = read_cr2(); cr3 = read_cr3(); cr4 = read_cr4_safe(); printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); show_trace(NULL, regs, &regs->esp); } /* * This gets run with %ebx containing the * function to call, and %edx containing * the "args". */ extern void kernel_thread_helper(void); /* * Create a kernel thread */ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) { struct pt_regs regs; memset(&regs, 0, sizeof(regs)); regs.ebx = (unsigned long) fn; regs.edx = (unsigned long) arg; regs.xds = __USER_DS; regs.xes = __USER_DS; regs.xfs = __KERNEL_PERCPU; regs.orig_eax = -1; regs.eip = (unsigned long) kernel_thread_helper; regs.xcs = __KERNEL_CS | get_kernel_rpl(); regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; /* Ok, create the new process.. */ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); } EXPORT_SYMBOL(kernel_thread); /* * Free current thread data structures etc.. */ void exit_thread(void) { /* The process may have allocated an io port bitmap... nuke it. */ if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { struct task_struct *tsk = current; struct thread_struct *t = &tsk->thread; int cpu = get_cpu(); struct tss_struct *tss = &per_cpu(init_tss, cpu); kfree(t->io_bitmap_ptr); t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); /* * Careful, clear this in the TSS too: */ memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); t->io_bitmap_max = 0; tss->io_bitmap_owner = NULL; tss->io_bitmap_max = 0; tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; put_cpu(); } } void flush_thread(void) { struct task_struct *tsk = current; memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); clear_tsk_thread_flag(tsk, TIF_DEBUG); /* * Forget coprocessor state.. */ clear_fpu(tsk); clear_used_math(); } void release_thread(struct task_struct *dead_task) { BUG_ON(dead_task->mm); release_vm86_irqs(dead_task); } /* * This gets called before we allocate a new thread and copy * the current task into it. */ void prepare_to_copy(struct task_struct *tsk) { unlazy_fpu(tsk); } int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, unsigned long unused, struct task_struct * p, struct pt_regs * regs) { struct pt_regs * childregs; struct task_struct *tsk; int err; childregs = task_pt_regs(p); *childregs = *regs; childregs->eax = 0; childregs->esp = esp; p->thread.esp = (unsigned long) childregs; p->thread.esp0 = (unsigned long) (childregs+1); p->thread.eip = (unsigned long) ret_from_fork; savesegment(gs,p->thread.gs); tsk = current; if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { p->thread.io_bitmap_max = 0; return -ENOMEM; } set_tsk_thread_flag(p, TIF_IO_BITMAP); } /* * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) { struct desc_struct *desc; struct user_desc info; int idx; err = -EFAULT; if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info))) goto out; err = -EINVAL; if (LDT_empty(&info)) goto out; idx = info.entry_number; if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) goto out; desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; desc->a = LDT_entry_a(&info); desc->b = LDT_entry_b(&info); } err = 0; out: if (err && p->thread.io_bitmap_ptr) { kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } return err; } /* * fill in the user structure for a core dump.. */ void dump_thread(struct pt_regs * regs, struct user * dump) { int i; /* changed the size calculations - should hopefully work better. lbt */ dump->magic = CMAGIC; dump->start_code = 0; dump->start_stack = regs->esp & ~(PAGE_SIZE - 1); dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; dump->u_dsize -= dump->u_tsize; dump->u_ssize = 0; for (i = 0; i < 8; i++) dump->u_debugreg[i] = current->thread.debugreg[i]; if (dump->start_stack < TASK_SIZE) dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; dump->regs.ebx = regs->ebx; dump->regs.ecx = regs->ecx; dump->regs.edx = regs->edx; dump->regs.esi = regs->esi; dump->regs.edi = regs->edi; dump->regs.ebp = regs->ebp; dump->regs.eax = regs->eax; dump->regs.ds = regs->xds; dump->regs.es = regs->xes; dump->regs.fs = regs->xfs; savesegment(gs,dump->regs.gs); dump->regs.orig_eax = regs->orig_eax; dump->regs.eip = regs->eip; dump->regs.cs = regs->xcs; dump->regs.eflags = regs->eflags; dump->regs.esp = regs->esp; dump->regs.ss = regs->xss; dump->u_fpvalid = dump_fpu (regs, &dump->i387); } EXPORT_SYMBOL(dump_thread); /* * Capture the user space registers if the task is not running (in user space) */ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) { struct pt_regs ptregs = *task_pt_regs(tsk); ptregs.xcs &= 0xffff; ptregs.xds &= 0xffff; ptregs.xes &= 0xffff; ptregs.xss &= 0xffff; elf_core_copy_regs(regs, &ptregs); return 1; } static noinline void __switch_to_xtra(struct task_struct *next_p, struct tss_struct *tss) { struct thread_struct *next; next = &next_p->thread; if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { set_debugreg(next->debugreg[0], 0); set_debugreg(next->debugreg[1], 1); set_debugreg(next->debugreg[2], 2); set_debugreg(next->debugreg[3], 3); /* no 4 and 5 */ set_debugreg(next->debugreg[6], 6); set_debugreg(next->debugreg[7], 7); } if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { /* * Disable the bitmap via an invalid offset. We still cache * the previous bitmap owner and the IO bitmap contents: */ tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; return; } if (likely(next == tss->io_bitmap_owner)) { /* * Previous owner of the bitmap (hence the bitmap content) * matches the next task, we dont have to do anything but * to set a valid offset in the TSS: */ tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; return; } /* * Lazy TSS's I/O bitmap copy. We set an invalid offset here * and we let the task to get a GPF in case an I/O instruction * is performed. The handler of the GPF will verify that the * faulting task has a valid I/O bitmap and, it true, does the * real copy and restart the instruction. This will save us * redundant copies when the currently switched task does not * perform any I/O during its timeslice. */ tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; } /* * This function selects if the context switch from prev to next * has to tweak the TSC disable bit in the cr4. */ static inline void disable_tsc(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_info *prev, *next; /* * gcc should eliminate the ->thread_info dereference if * has_secure_computing returns 0 at compile time (SECCOMP=n). */ prev = task_thread_info(prev_p); next = task_thread_info(next_p); if (has_secure_computing(prev) || has_secure_computing(next)) { /* slow path here */ if (has_secure_computing(prev) && !has_secure_computing(next)) { write_cr4(read_cr4() & ~X86_CR4_TSD); } else if (!has_secure_computing(prev) && has_secure_computing(next)) write_cr4(read_cr4() | X86_CR4_TSD); } } /* * switch_to(x,yn) should switch tasks from x to y. * * We fsave/fwait so that an exception goes off at the right time * (as a call from the fsave or fwait in effect) rather than to * the wrong process. Lazy FP saving no longer makes any sense * with modern CPU's, and this simplifies a lot of things (SMP * and UP become the same). * * NOTE! We used to use the x86 hardware context switching. The * reason for not using it any more becomes apparent when you * try to recover gracefully from saved state that is no longer * valid (stale segment register values in particular). With the * hardware task-switch, there is no way to fix up bad state in * a reasonable manner. * * The fact that Intel documents the hardware task-switching to * be slow is a fairly red herring - this code is not noticeably * faster. However, there _is_ some room for improvement here, * so the performance issues may eventually be a valid point. * More important, however, is the fact that this allows us much * more flexibility. * * The return value (in %eax) will be the "prev" task after * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ __unlazy_fpu(prev_p); /* we're going to use this soon, after a few expensive things */ if (next_p->fpu_counter > 5) prefetch(&next->i387.fxsave); /* * Reload esp0. */ load_esp0(tss, next); /* * Save away %gs. No need to save %fs, as it was saved on the * stack on entry. No need to save %es and %ds, as those are * always kernel segments while inside the kernel. Doing this * before setting the new TLS descriptors avoids the situation * where we temporarily have non-reloadable segments in %fs * and %gs. This could be an issue if the NMI handler ever * used %fs or %gs (it does not today), or if the kernel is * running inside of a hypervisor layer. */ savesegment(gs, prev->gs); /* * Load the per-thread Thread-Local Storage descriptor. */ load_TLS(next, cpu); /* * Restore IOPL if needed. In normal use, the flags restore * in the switch assembly will handle this. But if the kernel * is running virtualized at a non-zero CPL, the popf will * not restore flags, so it must be done in a separate step. */ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) set_iopl_mask(next->iopl); /* * Now maybe handle debug registers and/or IO bitmaps */ if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW) || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))) __switch_to_xtra(next_p, tss); disable_tsc(prev_p, next_p); /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so * the GDT and LDT are properly updated, and must be * done before math_state_restore, so the TS bit is up * to date. */ arch_leave_lazy_cpu_mode(); /* If the task has used fpu the last 5 timeslices, just do a full * restore of the math state immediately to avoid the trap; the * chances of needing FPU soon are obviously high now */ if (next_p->fpu_counter > 5) math_state_restore(); /* * Restore %gs if needed (which is common) */ if (prev->gs | next->gs) loadsegment(gs, next->gs); x86_write_percpu(current_task, next_p); return prev_p; } asmlinkage int sys_fork(struct pt_regs regs) { return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL); } asmlinkage int sys_clone(struct pt_regs regs) { unsigned long clone_flags; unsigned long newsp; int __user *parent_tidptr, *child_tidptr; clone_flags = regs.ebx; newsp = regs.ecx; parent_tidptr = (int __user *)regs.edx; child_tidptr = (int __user *)regs.edi; if (!newsp) newsp = regs.esp; return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr); } /* * This is trivial, and on the face of it looks like it * could equally well be done in user mode. * * Not so, for quite unobvious reasons - register pressure. * In user mode vfork() cannot have a stack frame, and if * done by calling the "clone()" system call directly, you * do not have enough call-clobbered registers to hold all * the information you need. */ asmlinkage int sys_vfork(struct pt_regs regs) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL); } /* * sys_execve() executes a new program. */ asmlinkage int sys_execve(struct pt_regs regs) { int error; char * filename; filename = getname((char __user *) regs.ebx); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, (char __user * __user *) regs.ecx, (char __user * __user *) regs.edx, &regs); if (error == 0) { task_lock(current); current->ptrace &= ~PT_DTRACE; task_unlock(current); /* Make sure we don't return using sysenter.. */ set_thread_flag(TIF_IRET); } putname(filename); out: return error; } #define top_esp (THREAD_SIZE - sizeof(unsigned long)) #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) unsigned long get_wchan(struct task_struct *p) { unsigned long ebp, esp, eip; unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; stack_page = (unsigned long)task_stack_page(p); esp = p->thread.esp; if (!stack_page || esp < stack_page || esp > top_esp+stack_page) return 0; /* include/asm-i386/system.h:switch_to() pushes ebp last. */ ebp = *(unsigned long *) esp; do { if (ebp < stack_page || ebp > top_ebp+stack_page) return 0; eip = *(unsigned long *) (ebp+4); if (!in_sched_functions(eip)) return eip; ebp = *(unsigned long *) ebp; } while (count++ < 16); return 0; } /* * sys_alloc_thread_area: get a yet unused TLS descriptor index. */ static int get_free_idx(void) { struct thread_struct *t = &current->thread; int idx; for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) if (desc_empty(t->tls_array + idx)) return idx + GDT_ENTRY_TLS_MIN; return -ESRCH; } /* * Set a given TLS descriptor: */ asmlinkage int sys_set_thread_area(struct user_desc __user *u_info) { struct thread_struct *t = &current->thread; struct user_desc info; struct desc_struct *desc; int cpu, idx; if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; idx = info.entry_number; /* * index -1 means the kernel should try to find and * allocate an empty descriptor: */ if (idx == -1) { idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT; } if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN; /* * We must not get preempted while modifying the TLS. */ cpu = get_cpu(); if (LDT_empty(&info)) { desc->a = 0; desc->b = 0; } else { desc->a = LDT_entry_a(&info); desc->b = LDT_entry_b(&info); } load_TLS(t, cpu); put_cpu(); return 0; } /* * Get the current Thread-Local Storage area: */ #define GET_BASE(desc) ( \ (((desc)->a >> 16) & 0x0000ffff) | \ (((desc)->b << 16) & 0x00ff0000) | \ ( (desc)->b & 0xff000000) ) #define GET_LIMIT(desc) ( \ ((desc)->a & 0x0ffff) | \ ((desc)->b & 0xf0000) ) #define GET_32BIT(desc) (((desc)->b >> 22) & 1) #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) #define GET_PRESENT(desc) (((desc)->b >> 15) & 1) #define GET_USEABLE(desc) (((desc)->b >> 20) & 1) asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) { struct user_desc info; struct desc_struct *desc; int idx; if (get_user(idx, &u_info->entry_number)) return -EFAULT; if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; memset(&info, 0, sizeof(info)); desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; info.entry_number = idx; info.base_addr = GET_BASE(desc); info.limit = GET_LIMIT(desc); info.seg_32bit = GET_32BIT(desc); info.contents = GET_CONTENTS(desc); info.read_exec_only = !GET_WRITABLE(desc); info.limit_in_pages = GET_LIMIT_PAGES(desc); info.seg_not_present = !GET_PRESENT(desc); info.useable = GET_USEABLE(desc); if (copy_to_user(u_info, &info, sizeof(info))) return -EFAULT; return 0; } unsigned long arch_align_stack(unsigned long sp) { if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) sp -= get_random_int() % 8192; return sp & ~0xf; }
gpl-2.0
rprata/boost
libs/interprocess/test/deque_test.cpp
35
11171
////////////////////////////////////////////////////////////////////////////// // // (C) Copyright Ion Gaztanaga 2004-2012. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // See http://www.boost.org/libs/interprocess for documentation. // ////////////////////////////////////////////////////////////////////////////// #include <boost/interprocess/detail/config_begin.hpp> #include <memory> #include <deque> #include <iostream> #include <list> #include <boost/interprocess/managed_shared_memory.hpp> #include <boost/interprocess/containers/deque.hpp> #include <boost/interprocess/indexes/flat_map_index.hpp> #include "print_container.hpp" #include "check_equal_containers.hpp" #include "dummy_test_allocator.hpp" #include "movable_int.hpp" #include <boost/interprocess/allocators/allocator.hpp> #include "allocator_v1.hpp" #include <boost/interprocess/exceptions.hpp> #include <boost/move/utility_core.hpp> #include <boost/interprocess/detail/mpl.hpp> #include <boost/interprocess/detail/type_traits.hpp> #include <string> #include "get_process_id_name.hpp" #include "emplace_test.hpp" /////////////////////////////////////////////////////////////////// // // // This example repeats the same operations with std::deque and // // shmem_deque using the node allocator // // and compares the values of both containers // // // /////////////////////////////////////////////////////////////////// using namespace boost::interprocess; //Function to check if both sets are equal template<class V1, class V2> bool copyable_only(V1 *, V2 *, ipcdetail::false_type) { return true; } //Function to check if both sets are equal template<class V1, class V2> bool copyable_only(V1 *shmdeque, V2 *stddeque, ipcdetail::true_type) { typedef typename V1::value_type IntType; std::size_t size = shmdeque->size(); stddeque->insert(stddeque->end(), 50, 1); shmdeque->insert(shmdeque->end(), 50, IntType(1)); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; { IntType move_me(1); stddeque->insert(stddeque->begin()+size/2, 50, 1); shmdeque->insert(shmdeque->begin()+size/2, 50, boost::move(move_me)); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; } { IntType move_me(2); shmdeque->assign(shmdeque->size()/2, boost::move(move_me)); stddeque->assign(stddeque->size()/2, 2); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; } { IntType move_me(1); stddeque->clear(); shmdeque->clear(); stddeque->insert(stddeque->begin(), 50, 1); shmdeque->insert(shmdeque->begin(), 50, boost::move(move_me)); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; stddeque->insert(stddeque->begin()+20, 50, 1); shmdeque->insert(shmdeque->begin()+20, 50, boost::move(move_me)); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; stddeque->insert(stddeque->begin()+20, 20, 1); shmdeque->insert(shmdeque->begin()+20, 20, boost::move(move_me)); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; } { IntType move_me(1); stddeque->clear(); shmdeque->clear(); stddeque->insert(stddeque->end(), 50, 1); shmdeque->insert(shmdeque->end(), 50, boost::move(move_me)); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; stddeque->insert(stddeque->end()-20, 50, 1); shmdeque->insert(shmdeque->end()-20, 50, boost::move(move_me)); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; stddeque->insert(stddeque->end()-20, 20, 1); shmdeque->insert(shmdeque->end()-20, 20, boost::move(move_me)); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; } return true; } template<class IntType, template<class T, class SegmentManager> class AllocatorType > bool do_test() { //Customize managed_shared_memory class typedef basic_managed_shared_memory <char, //simple_seq_fit<mutex_family>, rbtree_best_fit<mutex_family>, //flat_map_index iset_index > my_managed_shared_memory; //Alias AllocatorType type typedef AllocatorType<IntType, my_managed_shared_memory::segment_manager> shmem_allocator_t; //Alias deque types typedef deque<IntType, shmem_allocator_t> MyShmDeque; typedef std::deque<int> MyStdDeque; const int Memsize = 65536; const char *const shMemName = test::get_process_id_name(); const int max = 100; /*try*/{ shared_memory_object::remove(shMemName); //Create shared memory my_managed_shared_memory segment(create_only, shMemName, Memsize); segment.reserve_named_objects(100); //Shared memory allocator must be always be initialized //since it has no default constructor MyShmDeque *shmdeque = segment.template construct<MyShmDeque>("MyShmDeque") (segment.get_segment_manager()); MyStdDeque *stddeque = new MyStdDeque; /*try*/{ //Compare several shared memory deque operations with std::deque for(int i = 0; i < max*50; ++i){ IntType move_me(i); shmdeque->insert(shmdeque->end(), boost::move(move_me)); stddeque->insert(stddeque->end(), i); shmdeque->insert(shmdeque->end(), IntType(i)); stddeque->insert(stddeque->end(), int(i)); } if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; shmdeque->clear(); stddeque->clear(); for(int i = 0; i < max*50; ++i){ IntType move_me(i); shmdeque->push_back(boost::move(move_me)); stddeque->push_back(i); shmdeque->push_back(IntType(i)); stddeque->push_back(i); } if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; shmdeque->clear(); stddeque->clear(); for(int i = 0; i < max*50; ++i){ IntType move_me(i); shmdeque->push_front(boost::move(move_me)); stddeque->push_front(i); shmdeque->push_front(IntType(i)); stddeque->push_front(int(i)); } if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; typename MyShmDeque::iterator it; typename MyShmDeque::const_iterator cit = it; (void)cit; shmdeque->erase(shmdeque->begin()++); stddeque->erase(stddeque->begin()++); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; shmdeque->erase(shmdeque->begin()); stddeque->erase(stddeque->begin()); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; { //Initialize values IntType aux_vect[50]; for(int i = 0; i < 50; ++i){ IntType move_me (-1); aux_vect[i] = boost::move(move_me); } int aux_vect2[50]; for(int i = 0; i < 50; ++i){ aux_vect2[i] = -1; } shmdeque->insert(shmdeque->end() ,::boost::make_move_iterator(&aux_vect[0]) ,::boost::make_move_iterator(aux_vect + 50)); stddeque->insert(stddeque->end(), aux_vect2, aux_vect2 + 50); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; for(int i = 0, j = static_cast<int>(shmdeque->size()); i < j; ++i){ shmdeque->erase(shmdeque->begin()); stddeque->erase(stddeque->begin()); } if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; } { IntType aux_vect[50]; for(int i = 0; i < 50; ++i){ IntType move_me(-1); aux_vect[i] = boost::move(move_me); } int aux_vect2[50]; for(int i = 0; i < 50; ++i){ aux_vect2[i] = -1; } shmdeque->insert(shmdeque->begin() ,::boost::make_move_iterator(&aux_vect[0]) ,::boost::make_move_iterator(aux_vect + 50)); stddeque->insert(stddeque->begin(), aux_vect2, aux_vect2 + 50); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; } if(!copyable_only(shmdeque, stddeque ,ipcdetail::bool_<!ipcdetail::is_same<IntType, test::movable_int>::value>())){ return false; } shmdeque->erase(shmdeque->begin()); stddeque->erase(stddeque->begin()); if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; for(int i = 0; i < max; ++i){ IntType move_me(i); shmdeque->insert(shmdeque->begin(), boost::move(move_me)); stddeque->insert(stddeque->begin(), i); } if(!test::CheckEqualContainers(shmdeque, stddeque)) return false; //Test insertion from list { std::list<int> l(50, int(1)); shmdeque->insert(shmdeque->begin(), l.begin(), l.end()); stddeque->insert(stddeque->begin(), l.begin(), l.end()); if(!test::CheckEqualContainers(shmdeque, stddeque)) return 1; shmdeque->assign(l.begin(), l.end()); stddeque->assign(l.begin(), l.end()); if(!test::CheckEqualContainers(shmdeque, stddeque)) return 1; } shmdeque->resize(100); stddeque->resize(100); if(!test::CheckEqualContainers(shmdeque, stddeque)) return 1; shmdeque->resize(200); stddeque->resize(200); if(!test::CheckEqualContainers(shmdeque, stddeque)) return 1; segment.template destroy<MyShmDeque>("MyShmDeque"); delete stddeque; segment.shrink_to_fit_indexes(); if(!segment.all_memory_deallocated()) return false; }/* catch(std::exception &ex){ std::cout << ex.what() << std::endl; return false; }*/ std::cout << std::endl << "Test OK!" << std::endl; }/* catch(...){ shared_memory_object::remove(shMemName); throw; }*/ shared_memory_object::remove(shMemName); return true; } int main () { if(!do_test<int, allocator>()) return 1; if(!do_test<test::movable_int, allocator>()) return 1; if(!do_test<test::copyable_int, allocator>()) return 1; if(!do_test<int, test::allocator_v1>()) return 1; const test::EmplaceOptions Options = (test::EmplaceOptions)(test::EMPLACE_BACK | test::EMPLACE_FRONT | test::EMPLACE_BEFORE); if(!boost::interprocess::test::test_emplace < deque<test::EmplaceInt>, Options>()) return 1; return 0; } #include <boost/interprocess/detail/config_end.hpp>
gpl-2.0
PriceElectronics/linux-imx
drivers/net/ethernet/freescale/fec.c
35
44592
/* * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) * * Right now, I am very wasteful with the buffers. I allocate memory * pages and then divide them into 2K frame buffers. This way I know I * have buffers large enough to hold one frame within one buffer descriptor. * Once I get this working, I will use 64 or 128 byte CPM buffers, which * will be much more memory efficient and will easily handle lots of * small packets. * * Much better multiple PHY support by Magnus Damm. * Copyright (c) 2000 Ericsson Radio Systems AB. * * Support for FEC controller of ColdFire processors. * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) * * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) * Copyright (c) 2004-2006 Macq Electronique SA. * * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/phy.h> #include <linux/fec.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/of_net.h> #include <asm/cacheflush.h> #ifndef CONFIG_ARM #include <asm/coldfire.h> #include <asm/mcfsim.h> #endif #include "fec.h" #if defined(CONFIG_ARM) #define FEC_ALIGNMENT 0xf #else #define FEC_ALIGNMENT 0x3 #endif #define DRIVER_NAME "fec" /* Controller is ENET-MAC */ #define FEC_QUIRK_ENET_MAC (1 << 0) /* Controller needs driver to swap frame */ #define FEC_QUIRK_SWAP_FRAME (1 << 1) /* Controller uses gasket */ #define FEC_QUIRK_USE_GASKET (1 << 2) /* Controller has GBIT support */ #define FEC_QUIRK_HAS_GBIT (1 << 3) static struct platform_device_id fec_devtype[] = { { /* keep it for coldfire */ .name = DRIVER_NAME, .driver_data = 0, }, { .name = "imx25-fec", .driver_data = FEC_QUIRK_USE_GASKET, }, { .name = "imx27-fec", .driver_data = 0, }, { .name = "imx28-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, fec_devtype); enum imx_fec_type { IMX25_FEC = 1, /* runs on i.mx25/50/53 */ IMX27_FEC, /* runs on i.mx27/35/51 */ IMX28_FEC, IMX6Q_FEC, }; static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); static unsigned char macaddr[ETH_ALEN]; module_param_array(macaddr, byte, NULL, 0); MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #if defined(CONFIG_M5272) /* * Some hardware gets it MAC address out of local flash memory. * if this is non-zero then assume it is the address to get MAC from. */ #if defined(CONFIG_NETtel) #define FEC_FLASHMAC 0xf0006006 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) #define FEC_FLASHMAC 0xf0006000 #elif defined(CONFIG_CANCam) #define FEC_FLASHMAC 0xf0020000 #elif defined (CONFIG_M5272C3) #define FEC_FLASHMAC (0xffe04000 + 4) #elif defined(CONFIG_MOD5272) #define FEC_FLASHMAC 0xffc0406b #else #define FEC_FLASHMAC 0 #endif #endif /* CONFIG_M5272 */ /* The number of Tx and Rx buffers. These are allocated from the page * pool. The code may assume these are power of two, so it it best * to keep them that size. * We don't need to allocate pages for the transmitter. We just use * the skbuffer directly. */ #define FEC_ENET_RX_PAGES 8 #define FEC_ENET_RX_FRSIZE 2048 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) #define FEC_ENET_TX_FRSIZE 2048 #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) #define TX_RING_SIZE 16 /* Must be power of two */ #define TX_RING_MOD_MASK 15 /* for this to work */ #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) #error "FEC: descriptor ring size constants too large" #endif /* Interrupt events/masks. */ #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) /* The FEC stores dest/src/type, data, and checksum for receive packets. */ #define PKT_MAXBUF_SIZE 1518 #define PKT_MINBUF_SIZE 64 #define PKT_MAXBLR_SIZE 1520 /* This device has up to three irqs on some platforms */ #define FEC_IRQ_NUM 3 /* * The 5270/5271/5280/5282/532x RX control register also contains maximum frame * size bits. Other FEC hardware does not, so we need to take that into * account when setting it. */ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) #else #define OPT_FRAME_SIZE 0 #endif /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and * tx_bd_base always point to the base of the buffer descriptors. The * cur_rx and cur_tx point to the currently available buffer. * The dirty_tx tracks the current buffer that is being sent by the * controller. The cur_tx and dirty_tx are equal under both completely * empty and completely full conditions. The empty/ready indicator in * the buffer descriptor determines the actual condition. */ struct fec_enet_private { /* Hardware registers of the FEC device */ void __iomem *hwp; struct net_device *netdev; struct clk *clk; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ unsigned char *tx_bounce[TX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; struct sk_buff* rx_skbuff[RX_RING_SIZE]; ushort skb_cur; ushort skb_dirty; /* CPM dual port RAM relative addresses */ dma_addr_t bd_dma; /* Address of Rx and Tx buffers */ struct bufdesc *rx_bd_base; struct bufdesc *tx_bd_base; /* The next free ring entry */ struct bufdesc *cur_rx, *cur_tx; /* The ring entries to be free()ed */ struct bufdesc *dirty_tx; uint tx_full; /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ spinlock_t hw_lock; struct platform_device *pdev; int opened; int dev_id; /* Phylib and MDIO interface */ struct mii_bus *mii_bus; struct phy_device *phy_dev; int mii_timeout; uint phy_speed; phy_interface_t phy_interface; int link; int full_duplex; struct completion mdio_done; int irq[FEC_IRQ_NUM]; }; /* FEC MII MMFR bits definition */ #define FEC_MMFR_ST (1 << 30) #define FEC_MMFR_OP_READ (2 << 28) #define FEC_MMFR_OP_WRITE (1 << 28) #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) #define FEC_MMFR_TA (2 << 16) #define FEC_MMFR_DATA(v) (v & 0xffff) #define FEC_MII_TIMEOUT 30000 /* us */ /* Transmitter timeout */ #define TX_TIMEOUT (2 * HZ) static int mii_cnt; static void *swap_buffer(void *bufaddr, int len) { int i; unsigned int *buf = bufaddr; for (i = 0; i < (len + 3) / 4; i++, buf++) *buf = cpu_to_be32(*buf); return bufaddr; } static netdev_tx_t fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); struct bufdesc *bdp; void *bufaddr; unsigned short status; unsigned long flags; if (!fep->link) { /* Link is down or autonegotiation is in progress. */ return NETDEV_TX_BUSY; } spin_lock_irqsave(&fep->hw_lock, flags); /* Fill in a Tx ring entry */ bdp = fep->cur_tx; status = bdp->cbd_sc; if (status & BD_ENET_TX_READY) { /* Ooops. All transmit buffers are full. Bail out. * This should not happen, since ndev->tbusy should be set. */ printk("%s: tx queue full!.\n", ndev->name); spin_unlock_irqrestore(&fep->hw_lock, flags); return NETDEV_TX_BUSY; } /* Clear all of the status flags */ status &= ~BD_ENET_TX_STATS; /* Set buffer length and buffer pointer */ bufaddr = skb->data; bdp->cbd_datlen = skb->len; /* * On some FEC implementations data must be aligned on * 4-byte boundaries. Use bounce buffers to copy data * and get it aligned. Ugh. */ if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { unsigned int index; index = bdp - fep->tx_bd_base; memcpy(fep->tx_bounce[index], skb->data, skb->len); bufaddr = fep->tx_bounce[index]; } /* * Some design made an incorrect assumption on endian mode of * the system that it's running on. As the result, driver has to * swap every frame going to and coming from the controller. */ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, skb->len); /* Save skb pointer */ fep->tx_skbuff[fep->skb_cur] = skb; ndev->stats.tx_bytes += skb->len; fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; /* Push the data cache so the CPM does not get stale memory * data. */ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC); bdp->cbd_sc = status; /* Trigger transmission start */ writel(0, fep->hwp + FEC_X_DES_ACTIVE); /* If this was the last BD in the ring, start at the beginning again. */ if (status & BD_ENET_TX_WRAP) bdp = fep->tx_bd_base; else bdp++; if (bdp == fep->dirty_tx) { fep->tx_full = 1; netif_stop_queue(ndev); } fep->cur_tx = bdp; skb_tx_timestamp(skb); spin_unlock_irqrestore(&fep->hw_lock, flags); return NETDEV_TX_OK; } /* This function is called to start or restart the FEC during a link * change. This only happens when switching between half and full * duplex. */ static void fec_restart(struct net_device *ndev, int duplex) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); int i; u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ /* Whack a reset. We should wait for this. */ writel(1, fep->hwp + FEC_ECNTRL); udelay(10); /* * enet-mac reset will reset mac address registers too, * so need to reconfigure it. */ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); } /* Clear any outstanding interrupt. */ writel(0xffc00000, fep->hwp + FEC_IEVENT); /* Reset all multicast. */ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); #ifndef CONFIG_M5272 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); writel(0, fep->hwp + FEC_HASH_TABLE_LOW); #endif /* Set maximum receive buffer size. */ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); /* Set receive and transmit descriptor base. */ writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; fep->cur_rx = fep->rx_bd_base; /* Reset SKB transmit buffers. */ fep->skb_cur = fep->skb_dirty = 0; for (i = 0; i <= TX_RING_MOD_MASK; i++) { if (fep->tx_skbuff[i]) { dev_kfree_skb_any(fep->tx_skbuff[i]); fep->tx_skbuff[i] = NULL; } } /* Enable MII mode */ if (duplex) { /* FD enable */ writel(0x04, fep->hwp + FEC_X_CNTRL); } else { /* No Rcv on Xmit */ rcntl |= 0x02; writel(0x0, fep->hwp + FEC_X_CNTRL); } fep->full_duplex = duplex; /* Set MII speed */ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); /* * The phy interface and speed need to get configured * differently on enet-mac. */ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { /* Enable flow control and length check */ rcntl |= 0x40000000 | 0x00000020; /* RGMII, RMII or MII */ if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) rcntl |= (1 << 6); else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) rcntl |= (1 << 8); else rcntl &= ~(1 << 8); /* 1G, 100M or 10M */ if (fep->phy_dev) { if (fep->phy_dev->speed == SPEED_1000) ecntl |= (1 << 5); else if (fep->phy_dev->speed == SPEED_100) rcntl &= ~(1 << 9); else rcntl |= (1 << 9); } } else { #ifdef FEC_MIIGSK_ENR if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) { u32 cfgr; /* disable the gasket and wait */ writel(0, fep->hwp + FEC_MIIGSK_ENR); while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) udelay(1); /* * configure the gasket: * RMII, 50 MHz, no loopback, no echo * MII, 25 MHz, no loopback, no echo */ cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); /* re-enable the gasket */ writel(2, fep->hwp + FEC_MIIGSK_ENR); } #endif } writel(rcntl, fep->hwp + FEC_R_CNTRL); if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { /* enable ENET endian swap */ ecntl |= (1 << 8); /* enable ENET store and forward mode */ writel(1 << 8, fep->hwp + FEC_X_WMRK); } /* And last, enable the transmit and receive processing */ writel(ecntl, fep->hwp + FEC_ECNTRL); writel(0, fep->hwp + FEC_R_DES_ACTIVE); /* Enable interrupts we wish to service */ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } static void fec_stop(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); /* We cannot expect a graceful transmit stop without link !!! */ if (fep->link) { writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ udelay(10); if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) printk("fec_stop : Graceful transmit stop did not complete !\n"); } /* Whack a reset. We should wait for this. */ writel(1, fep->hwp + FEC_ECNTRL); udelay(10); writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); /* We have to keep ENET enabled to have MII interrupt stay working */ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { writel(2, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } } static void fec_timeout(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); ndev->stats.tx_errors++; fec_restart(ndev, fep->full_duplex); netif_wake_queue(ndev); } static void fec_enet_tx(struct net_device *ndev) { struct fec_enet_private *fep; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; fep = netdev_priv(ndev); spin_lock(&fep->hw_lock); bdp = fep->dirty_tx; while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { if (bdp == fep->cur_tx && fep->tx_full == 0) break; dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); bdp->cbd_bufaddr = 0; skb = fep->tx_skbuff[fep->skb_dirty]; /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { ndev->stats.tx_errors++; if (status & BD_ENET_TX_HB) /* No heartbeat */ ndev->stats.tx_heartbeat_errors++; if (status & BD_ENET_TX_LC) /* Late collision */ ndev->stats.tx_window_errors++; if (status & BD_ENET_TX_RL) /* Retrans limit */ ndev->stats.tx_aborted_errors++; if (status & BD_ENET_TX_UN) /* Underrun */ ndev->stats.tx_fifo_errors++; if (status & BD_ENET_TX_CSL) /* Carrier lost */ ndev->stats.tx_carrier_errors++; } else { ndev->stats.tx_packets++; } if (status & BD_ENET_TX_READY) printk("HEY! Enet xmit interrupt and TX_READY.\n"); /* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (status & BD_ENET_TX_DEF) ndev->stats.collisions++; /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); fep->tx_skbuff[fep->skb_dirty] = NULL; fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; /* Update pointer to next buffer descriptor to be transmitted */ if (status & BD_ENET_TX_WRAP) bdp = fep->tx_bd_base; else bdp++; /* Since we have freed up a buffer, the ring is no longer full */ if (fep->tx_full) { fep->tx_full = 0; if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); } } fep->dirty_tx = bdp; spin_unlock(&fep->hw_lock); } /* During a receive, the cur_rx points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, * effectively tossing the packet. */ static void fec_enet_rx(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; ushort pkt_len; __u8 *data; #ifdef CONFIG_M532x flush_cache_all(); #endif spin_lock(&fep->hw_lock); /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ bdp = fep->cur_rx; while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { /* Since we have allocated space to hold a complete frame, * the last indicator should be set. */ if ((status & BD_ENET_RX_LAST) == 0) printk("FEC ENET: rcv is not +last\n"); if (!fep->opened) goto rx_processing_done; /* Check for errors. */ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { ndev->stats.rx_errors++; if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { /* Frame too long or too short. */ ndev->stats.rx_length_errors++; } if (status & BD_ENET_RX_NO) /* Frame alignment */ ndev->stats.rx_frame_errors++; if (status & BD_ENET_RX_CR) /* CRC Error */ ndev->stats.rx_crc_errors++; if (status & BD_ENET_RX_OV) /* FIFO overrun */ ndev->stats.rx_fifo_errors++; } /* Report late collisions as a frame error. * On this error, the BD is closed, but we don't know what we * have in the buffer. So, just drop this frame on the floor. */ if (status & BD_ENET_RX_CL) { ndev->stats.rx_errors++; ndev->stats.rx_frame_errors++; goto rx_processing_done; } /* Process the incoming frame. */ ndev->stats.rx_packets++; pkt_len = bdp->cbd_datlen; ndev->stats.rx_bytes += pkt_len; data = (__u8*)__va(bdp->cbd_bufaddr); dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, pkt_len); /* This does 16 byte alignment, exactly what we need. * The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up * bridging applications. */ skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); if (unlikely(!skb)) { printk("%s: Memory squeeze, dropping packet.\n", ndev->name); ndev->stats.rx_dropped++; } else { skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, pkt_len - 4); /* Make room */ skb_copy_to_linear_data(skb, data, pkt_len - 4); skb->protocol = eth_type_trans(skb, ndev); if (!skb_defer_rx_timestamp(skb)) netif_rx(skb); } bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; /* Mark the buffer empty */ status |= BD_ENET_RX_EMPTY; bdp->cbd_sc = status; /* Update BD pointer to next entry */ if (status & BD_ENET_RX_WRAP) bdp = fep->rx_bd_base; else bdp++; /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. */ writel(0, fep->hwp + FEC_R_DES_ACTIVE); } fep->cur_rx = bdp; spin_unlock(&fep->hw_lock); } static irqreturn_t fec_enet_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct fec_enet_private *fep = netdev_priv(ndev); uint int_events; irqreturn_t ret = IRQ_NONE; do { int_events = readl(fep->hwp + FEC_IEVENT); writel(int_events, fep->hwp + FEC_IEVENT); if (int_events & FEC_ENET_RXF) { ret = IRQ_HANDLED; fec_enet_rx(ndev); } /* Transmit OK, or non-fatal error. Update the buffer * descriptors. FEC handles all errors, we just discover * them as part of the transmit process. */ if (int_events & FEC_ENET_TXF) { ret = IRQ_HANDLED; fec_enet_tx(ndev); } if (int_events & FEC_ENET_MII) { ret = IRQ_HANDLED; complete(&fep->mdio_done); } } while (int_events); return ret; } /* ------------------------------------------------------------------------- */ static void __inline__ fec_get_mac(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct fec_platform_data *pdata = fep->pdev->dev.platform_data; unsigned char *iap, tmpaddr[ETH_ALEN]; /* * try to get mac address in following order: * * 1) module parameter via kernel command line in form * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 */ iap = macaddr; #ifdef CONFIG_OF /* * 2) from device tree data */ if (!is_valid_ether_addr(iap)) { struct device_node *np = fep->pdev->dev.of_node; if (np) { const char *mac = of_get_mac_address(np); if (mac) iap = (unsigned char *) mac; } } #endif /* * 3) from flash or fuse (via platform data) */ if (!is_valid_ether_addr(iap)) { #ifdef CONFIG_M5272 if (FEC_FLASHMAC) iap = (unsigned char *)FEC_FLASHMAC; #else if (pdata) iap = (unsigned char *)&pdata->mac; #endif } /* * 4) FEC mac registers set by bootloader */ if (!is_valid_ether_addr(iap)) { *((unsigned long *) &tmpaddr[0]) = be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW)); *((unsigned short *) &tmpaddr[4]) = be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); iap = &tmpaddr[0]; } memcpy(ndev->dev_addr, iap, ETH_ALEN); /* Adjust MAC if using macaddr */ if (iap == macaddr) ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; } /* ------------------------------------------------------------------------- */ /* * Phy section */ static void fec_enet_adjust_link(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phy_dev = fep->phy_dev; unsigned long flags; int status_change = 0; spin_lock_irqsave(&fep->hw_lock, flags); /* Prevent a state halted on mii error */ if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { phy_dev->state = PHY_RESUMING; goto spin_unlock; } /* Duplex link change */ if (phy_dev->link) { if (fep->full_duplex != phy_dev->duplex) { fec_restart(ndev, phy_dev->duplex); /* prevent unnecessary second fec_restart() below */ fep->link = phy_dev->link; status_change = 1; } } /* Link on or off change */ if (phy_dev->link != fep->link) { fep->link = phy_dev->link; if (phy_dev->link) fec_restart(ndev, phy_dev->duplex); else fec_stop(ndev); status_change = 1; } spin_unlock: spin_unlock_irqrestore(&fep->hw_lock, flags); if (status_change) phy_print_status(phy_dev); } static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct fec_enet_private *fep = bus->priv; unsigned long time_left; fep->mii_timeout = 0; init_completion(&fep->mdio_done); /* start a read op */ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); /* wait for end of transfer */ time_left = wait_for_completion_timeout(&fep->mdio_done, usecs_to_jiffies(FEC_MII_TIMEOUT)); if (time_left == 0) { fep->mii_timeout = 1; printk(KERN_ERR "FEC: MDIO read timeout\n"); return -ETIMEDOUT; } /* return value */ return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); } static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct fec_enet_private *fep = bus->priv; unsigned long time_left; fep->mii_timeout = 0; init_completion(&fep->mdio_done); /* start a write op */ writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | FEC_MMFR_TA | FEC_MMFR_DATA(value), fep->hwp + FEC_MII_DATA); /* wait for end of transfer */ time_left = wait_for_completion_timeout(&fep->mdio_done, usecs_to_jiffies(FEC_MII_TIMEOUT)); if (time_left == 0) { fep->mii_timeout = 1; printk(KERN_ERR "FEC: MDIO write timeout\n"); return -ETIMEDOUT; } return 0; } static int fec_enet_mdio_reset(struct mii_bus *bus) { return 0; } static int fec_enet_mii_probe(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); struct phy_device *phy_dev = NULL; char mdio_bus_id[MII_BUS_ID_SIZE]; char phy_name[MII_BUS_ID_SIZE + 3]; int phy_id; int dev_id = fep->dev_id; fep->phy_dev = NULL; /* check for attached phy */ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { if ((fep->mii_bus->phy_mask & (1 << phy_id))) continue; if (fep->mii_bus->phy_map[phy_id] == NULL) continue; if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) continue; if (dev_id--) continue; strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); break; } if (phy_id >= PHY_MAX_ADDR) { printk(KERN_INFO "%s: no PHY, assuming direct connection to switch\n", ndev->name); strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); phy_id = 0; } snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0, fep->phy_interface); if (IS_ERR(phy_dev)) { printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name); return PTR_ERR(phy_dev); } /* mask with MAC supported features */ if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) phy_dev->supported &= PHY_GBIT_FEATURES; else phy_dev->supported &= PHY_BASIC_FEATURES; phy_dev->advertising = phy_dev->supported; fep->phy_dev = phy_dev; fep->link = 0; fep->full_duplex = 0; printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), fep->phy_dev->irq); return 0; } static int fec_enet_mii_init(struct platform_device *pdev) { static struct mii_bus *fec0_mii_bus; struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); int err = -ENXIO, i; /* * The dual fec interfaces are not equivalent with enet-mac. * Here are the differences: * * - fec0 supports MII & RMII modes while fec1 only supports RMII * - fec0 acts as the 1588 time master while fec1 is slave * - external phys can only be configured by fec0 * * That is to say fec1 can not work independently. It only works * when fec0 is working. The reason behind this design is that the * second interface is added primarily for Switch mode. * * Because of the last point above, both phys are attached on fec0 * mdio interface in board design, and need to be configured by * fec0 mii_bus. */ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { /* fec1 uses fec0 mii_bus */ if (mii_cnt && fec0_mii_bus) { fep->mii_bus = fec0_mii_bus; mii_cnt++; return 0; } return -ENOENT; } fep->mii_timeout = 0; /* * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) * * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 * Reference Manual has an error on this, and gets fixed on i.MX6Q * document. */ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000); if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) fep->phy_speed--; fep->phy_speed <<= 1; writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); fep->mii_bus = mdiobus_alloc(); if (fep->mii_bus == NULL) { err = -ENOMEM; goto err_out; } fep->mii_bus->name = "fec_enet_mii_bus"; fep->mii_bus->read = fec_enet_mdio_read; fep->mii_bus->write = fec_enet_mdio_write; fep->mii_bus->reset = fec_enet_mdio_reset; snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, fep->dev_id + 1); fep->mii_bus->priv = fep; fep->mii_bus->parent = &pdev->dev; fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!fep->mii_bus->irq) { err = -ENOMEM; goto err_out_free_mdiobus; } for (i = 0; i < PHY_MAX_ADDR; i++) fep->mii_bus->irq[i] = PHY_POLL; if (mdiobus_register(fep->mii_bus)) goto err_out_free_mdio_irq; mii_cnt++; /* save fec0 mii_bus */ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) fec0_mii_bus = fep->mii_bus; return 0; err_out_free_mdio_irq: kfree(fep->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(fep->mii_bus); err_out: return err; } static void fec_enet_mii_remove(struct fec_enet_private *fep) { if (--mii_cnt == 0) { mdiobus_unregister(fep->mii_bus); kfree(fep->mii_bus->irq); mdiobus_free(fep->mii_bus); } } static int fec_enet_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) { struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phydev = fep->phy_dev; if (!phydev) return -ENODEV; return phy_ethtool_gset(phydev, cmd); } static int fec_enet_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) { struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phydev = fep->phy_dev; if (!phydev) return -ENODEV; return phy_ethtool_sset(phydev, cmd); } static void fec_enet_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct fec_enet_private *fep = netdev_priv(ndev); strcpy(info->driver, fep->pdev->dev.driver->name); strcpy(info->version, "Revision: 1.0"); strcpy(info->bus_info, dev_name(&ndev->dev)); } static const struct ethtool_ops fec_enet_ethtool_ops = { .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, .get_link = ethtool_op_get_link, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phydev = fep->phy_dev; if (!netif_running(ndev)) return -EINVAL; if (!phydev) return -ENODEV; return phy_mii_ioctl(phydev, rq, cmd); } static void fec_enet_free_buffers(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int i; struct sk_buff *skb; struct bufdesc *bdp; bdp = fep->rx_bd_base; for (i = 0; i < RX_RING_SIZE; i++) { skb = fep->rx_skbuff[i]; if (bdp->cbd_bufaddr) dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); if (skb) dev_kfree_skb(skb); bdp++; } bdp = fep->tx_bd_base; for (i = 0; i < TX_RING_SIZE; i++) kfree(fep->tx_bounce[i]); } static int fec_enet_alloc_buffers(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int i; struct sk_buff *skb; struct bufdesc *bdp; bdp = fep->rx_bd_base; for (i = 0; i < RX_RING_SIZE; i++) { skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); if (!skb) { fec_enet_free_buffers(ndev); return -ENOMEM; } fep->rx_skbuff[i] = skb; bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); bdp->cbd_sc = BD_ENET_RX_EMPTY; bdp++; } /* Set the last buffer to wrap. */ bdp--; bdp->cbd_sc |= BD_SC_WRAP; bdp = fep->tx_bd_base; for (i = 0; i < TX_RING_SIZE; i++) { fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); bdp->cbd_sc = 0; bdp->cbd_bufaddr = 0; bdp++; } /* Set the last buffer to wrap. */ bdp--; bdp->cbd_sc |= BD_SC_WRAP; return 0; } static int fec_enet_open(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int ret; /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. */ ret = fec_enet_alloc_buffers(ndev); if (ret) return ret; /* Probe and connect to PHY when open the interface */ ret = fec_enet_mii_probe(ndev); if (ret) { fec_enet_free_buffers(ndev); return ret; } phy_start(fep->phy_dev); netif_start_queue(ndev); fep->opened = 1; return 0; } static int fec_enet_close(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); /* Don't know what to do yet. */ fep->opened = 0; netif_stop_queue(ndev); fec_stop(ndev); if (fep->phy_dev) { phy_stop(fep->phy_dev); phy_disconnect(fep->phy_dev); } fec_enet_free_buffers(ndev); return 0; } /* Set or clear the multicast filter for this adaptor. * Skeleton taken from sunlance driver. * The CPM Ethernet implementation allows Multicast as well as individual * MAC address filtering. Some of the drivers check to make sure it is * a group multicast address, and discard those that are not. I guess I * will do the same for now, but just remove the test if you want * individual filtering as well (do the upper net layers want or support * this kind of feature?). */ #define HASH_BITS 6 /* #bits in hash */ #define CRC32_POLY 0xEDB88320 static void set_multicast_list(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct netdev_hw_addr *ha; unsigned int i, bit, data, crc, tmp; unsigned char hash; if (ndev->flags & IFF_PROMISC) { tmp = readl(fep->hwp + FEC_R_CNTRL); tmp |= 0x8; writel(tmp, fep->hwp + FEC_R_CNTRL); return; } tmp = readl(fep->hwp + FEC_R_CNTRL); tmp &= ~0x8; writel(tmp, fep->hwp + FEC_R_CNTRL); if (ndev->flags & IFF_ALLMULTI) { /* Catch all multicast addresses, so set the * filter to all 1's */ writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); return; } /* Clear filter and add the addresses in hash register */ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); netdev_for_each_mc_addr(ha, ndev) { /* calculate crc32 value of mac address */ crc = 0xffffffff; for (i = 0; i < ndev->addr_len; i++) { data = ha->addr[i]; for (bit = 0; bit < 8; bit++, data >>= 1) { crc = (crc >> 1) ^ (((crc ^ data) & 1) ? CRC32_POLY : 0); } } /* only upper 6 bits (HASH_BITS) are used * which point to specific bit in he hash registers */ hash = (crc >> (32 - HASH_BITS)) & 0x3f; if (hash > 31) { tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); tmp |= 1 << (hash - 32); writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); } else { tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); tmp |= 1 << hash; writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); } } } /* Set a MAC change in hardware. */ static int fec_set_mac_address(struct net_device *ndev, void *p) { struct fec_enet_private *fep = netdev_priv(ndev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), fep->hwp + FEC_ADDR_LOW); writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), fep->hwp + FEC_ADDR_HIGH); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * fec_poll_controller: FEC Poll controller function * @dev: The FEC network adapter * * Polled functionality used by netconsole and others in non interrupt mode * */ void fec_poll_controller(struct net_device *dev) { int i; struct fec_enet_private *fep = netdev_priv(dev); for (i = 0; i < FEC_IRQ_NUM; i++) { if (fep->irq[i] > 0) { disable_irq(fep->irq[i]); fec_enet_interrupt(fep->irq[i], dev); enable_irq(fep->irq[i]); } } } #endif static const struct net_device_ops fec_netdev_ops = { .ndo_open = fec_enet_open, .ndo_stop = fec_enet_close, .ndo_start_xmit = fec_enet_start_xmit, .ndo_set_rx_mode = set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, .ndo_do_ioctl = fec_enet_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = fec_poll_controller, #endif }; /* * XXX: We need to clean up on failure exits here. * */ static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct bufdesc *cbd_base; struct bufdesc *bdp; int i; /* Allocate memory for buffer descriptors. */ cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, GFP_KERNEL); if (!cbd_base) { printk("FEC: allocate descriptor memory failed?\n"); return -ENOMEM; } spin_lock_init(&fep->hw_lock); fep->netdev = ndev; /* Get the Ethernet address */ fec_get_mac(ndev); /* Set receive and transmit descriptor base. */ fep->rx_bd_base = cbd_base; fep->tx_bd_base = cbd_base + RX_RING_SIZE; /* The FEC Ethernet specific entries in the device structure */ ndev->watchdog_timeo = TX_TIMEOUT; ndev->netdev_ops = &fec_netdev_ops; ndev->ethtool_ops = &fec_enet_ethtool_ops; /* Initialize the receive buffer descriptors. */ bdp = fep->rx_bd_base; for (i = 0; i < RX_RING_SIZE; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; bdp++; } /* Set the last buffer to wrap */ bdp--; bdp->cbd_sc |= BD_SC_WRAP; /* ...and the same for transmit */ bdp = fep->tx_bd_base; for (i = 0; i < TX_RING_SIZE; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; bdp->cbd_bufaddr = 0; bdp++; } /* Set the last buffer to wrap */ bdp--; bdp->cbd_sc |= BD_SC_WRAP; fec_restart(ndev, 0); return 0; } #ifdef CONFIG_OF static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; if (np) return of_get_phy_mode(np); return -ENODEV; } static void __devinit fec_reset_phy(struct platform_device *pdev) { int err, phy_reset; struct device_node *np = pdev->dev.of_node; if (!np) return; phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset"); if (err) { pr_debug("FEC: failed to get gpio phy-reset: %d\n", err); return; } msleep(1); gpio_set_value(phy_reset, 1); } #else /* CONFIG_OF */ static inline int fec_get_phy_mode_dt(struct platform_device *pdev) { return -ENODEV; } static inline void fec_reset_phy(struct platform_device *pdev) { /* * In case of platform probe, the reset has been done * by machine code. */ } #endif /* CONFIG_OF */ static int __devinit fec_probe(struct platform_device *pdev) { struct fec_enet_private *fep; struct fec_platform_data *pdata; struct net_device *ndev; int i, irq, ret = 0; struct resource *r; const struct of_device_id *of_id; static int dev_id; of_id = of_match_device(fec_dt_ids, &pdev->dev); if (of_id) pdev->id_entry = of_id->data; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) return -ENXIO; r = request_mem_region(r->start, resource_size(r), pdev->name); if (!r) return -EBUSY; /* Init network device */ ndev = alloc_etherdev(sizeof(struct fec_enet_private)); if (!ndev) { ret = -ENOMEM; goto failed_alloc_etherdev; } SET_NETDEV_DEV(ndev, &pdev->dev); /* setup board info structure */ fep = netdev_priv(ndev); fep->hwp = ioremap(r->start, resource_size(r)); fep->pdev = pdev; fep->dev_id = dev_id++; if (!fep->hwp) { ret = -ENOMEM; goto failed_ioremap; } platform_set_drvdata(pdev, ndev); ret = fec_get_phy_mode_dt(pdev); if (ret < 0) { pdata = pdev->dev.platform_data; if (pdata) fep->phy_interface = pdata->phy; else fep->phy_interface = PHY_INTERFACE_MODE_MII; } else { fep->phy_interface = ret; } fec_reset_phy(pdev); for (i = 0; i < FEC_IRQ_NUM; i++) { irq = platform_get_irq(pdev, i); if (irq < 0) { if (i) break; ret = irq; goto failed_irq; } ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); if (ret) { while (--i >= 0) { irq = platform_get_irq(pdev, i); free_irq(irq, ndev); } goto failed_irq; } } fep->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(fep->clk)) { ret = PTR_ERR(fep->clk); goto failed_clk; } clk_prepare_enable(fep->clk); ret = fec_enet_init(ndev); if (ret) goto failed_init; ret = fec_enet_mii_init(pdev); if (ret) goto failed_mii_init; /* Carrier starts down, phylib will bring it up */ netif_carrier_off(ndev); ret = register_netdev(ndev); if (ret) goto failed_register; return 0; failed_register: fec_enet_mii_remove(fep); failed_mii_init: failed_init: clk_disable_unprepare(fep->clk); clk_put(fep->clk); failed_clk: for (i = 0; i < FEC_IRQ_NUM; i++) { irq = platform_get_irq(pdev, i); if (irq > 0) free_irq(irq, ndev); } failed_irq: iounmap(fep->hwp); failed_ioremap: free_netdev(ndev); failed_alloc_etherdev: release_mem_region(r->start, resource_size(r)); return ret; } static int __devexit fec_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); struct resource *r; int i; unregister_netdev(ndev); fec_enet_mii_remove(fep); for (i = 0; i < FEC_IRQ_NUM; i++) { int irq = platform_get_irq(pdev, i); if (irq > 0) free_irq(irq, ndev); } clk_disable_unprepare(fep->clk); clk_put(fep->clk); iounmap(fep->hwp); free_netdev(ndev); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); BUG_ON(!r); release_mem_region(r->start, resource_size(r)); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int fec_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); if (netif_running(ndev)) { fec_stop(ndev); netif_device_detach(ndev); } clk_disable_unprepare(fep->clk); return 0; } static int fec_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); clk_prepare_enable(fep->clk); if (netif_running(ndev)) { fec_restart(ndev, fep->full_duplex); netif_device_attach(ndev); } return 0; } static const struct dev_pm_ops fec_pm_ops = { .suspend = fec_suspend, .resume = fec_resume, .freeze = fec_suspend, .thaw = fec_resume, .poweroff = fec_suspend, .restore = fec_resume, }; #endif static struct platform_driver fec_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &fec_pm_ops, #endif .of_match_table = fec_dt_ids, }, .id_table = fec_devtype, .probe = fec_probe, .remove = __devexit_p(fec_drv_remove), }; static int __init fec_enet_module_init(void) { printk(KERN_INFO "FEC Ethernet Driver\n"); return platform_driver_register(&fec_driver); } static void __exit fec_enet_cleanup(void) { platform_driver_unregister(&fec_driver); } module_exit(fec_enet_cleanup); module_init(fec_enet_module_init); MODULE_LICENSE("GPL");
gpl-2.0
Cpasjuste/android_kernel_lg_p999
drivers/gpu/drm/i915/i915_dma.c
291
44501
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- */ /* * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm.h" #include "drm_crtc_helper.h" #include "drm_fb_helper.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" #include "i915_trace.h" #include <linux/vgaarb.h> /* Really want an OS-independent resettable timer. Would like to have * this loop run for (eg) 3 sec, but have the timer reset every time * the head pointer changes, so that EBUSY only happens if the ring * actually stalls for (eg) 3 seconds. */ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_ring_buffer_t *ring = &(dev_priv->ring); u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; u32 last_acthd = I915_READ(acthd_reg); u32 acthd; u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; int i; trace_i915_ring_wait_begin (dev); for (i = 0; i < 100000; i++) { ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; acthd = I915_READ(acthd_reg); ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->Size; if (ring->space >= n) { trace_i915_ring_wait_end (dev); return 0; } if (dev->primary->master) { struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; } if (ring->head != last_head) i = 0; if (acthd != last_acthd) i = 0; last_head = ring->head; last_acthd = acthd; msleep_interruptible(10); } trace_i915_ring_wait_end (dev); return -EBUSY; } /* As a ringbuffer is only allowed to wrap between instructions, fill * the tail with NOOPs. */ int i915_wrap_ring(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; volatile unsigned int *virt; int rem; rem = dev_priv->ring.Size - dev_priv->ring.tail; if (dev_priv->ring.space < rem) { int ret = i915_wait_ring(dev, rem, __func__); if (ret) return ret; } dev_priv->ring.space -= rem; virt = (unsigned int *) (dev_priv->ring.virtual_start + dev_priv->ring.tail); rem /= 4; while (rem--) *virt++ = MI_NOOP; dev_priv->ring.tail = 0; return 0; } /** * Sets up the hardware status page for devices that need a physical address * in the register. */ static int i915_init_phys_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; /* Program Hardware Status Page */ dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); if (!dev_priv->status_page_dmah) { DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; memset(dev_priv->hw_status_page, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, dev_priv->dma_status_page); DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; } /** * Frees the hardware status page, whether it's a physical address or a virtual * address set up by the X Server. */ static void i915_free_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; if (dev_priv->status_page_dmah) { drm_pci_free(dev, dev_priv->status_page_dmah); dev_priv->status_page_dmah = NULL; } if (dev_priv->status_gfx_addr) { dev_priv->status_gfx_addr = 0; drm_core_ioremapfree(&dev_priv->hws_map, dev); } /* Need to rewrite hardware status page */ I915_WRITE(HWS_PGA, 0x1ffff000); } void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; drm_i915_ring_buffer_t *ring = &(dev_priv->ring); /* * We should never lose context on the ring with modesetting * as we don't expose it to userspace */ if (drm_core_check_feature(dev, DRIVER_MODESET)) return; ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->Size; if (!dev->primary->master) return; master_priv = dev->primary->master->driver_priv; if (ring->head == ring->tail && master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; } static int i915_dma_cleanup(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ if (dev->irq_enabled) drm_irq_uninstall(dev); if (dev_priv->ring.virtual_start) { drm_core_ioremapfree(&dev_priv->ring.map, dev); dev_priv->ring.virtual_start = NULL; dev_priv->ring.map.handle = NULL; dev_priv->ring.map.size = 0; } /* Clear the HWS virtual address at teardown */ if (I915_NEED_GFX_HWS(dev)) i915_free_hws(dev); return 0; } static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; master_priv->sarea = drm_getsarea(dev); if (master_priv->sarea) { master_priv->sarea_priv = (drm_i915_sarea_t *) ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); } else { DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); } if (init->ring_size != 0) { if (dev_priv->ring.ring_obj != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); return -EINVAL; } dev_priv->ring.Size = init->ring_size; dev_priv->ring.map.offset = init->ring_start; dev_priv->ring.map.size = init->ring_size; dev_priv->ring.map.type = 0; dev_priv->ring.map.flags = 0; dev_priv->ring.map.mtrr = 0; drm_core_ioremap_wc(&dev_priv->ring.map, dev); if (dev_priv->ring.map.handle == NULL) { i915_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); return -ENOMEM; } } dev_priv->ring.virtual_start = dev_priv->ring.map.handle; dev_priv->cpp = init->cpp; dev_priv->back_offset = init->back_offset; dev_priv->front_offset = init->front_offset; dev_priv->current_page = 0; if (master_priv->sarea_priv) master_priv->sarea_priv->pf_current_page = 0; /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; return 0; } static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; DRM_DEBUG_DRIVER("%s\n", __func__); if (dev_priv->ring.map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); return -ENOMEM; } /* Program Hardware Status Page */ if (!dev_priv->hw_status_page) { DRM_ERROR("Can not find hardware status page\n"); return -EINVAL; } DRM_DEBUG_DRIVER("hw status page @ %p\n", dev_priv->hw_status_page); if (dev_priv->status_gfx_addr != 0) I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); else I915_WRITE(HWS_PGA, dev_priv->dma_status_page); DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; } static int i915_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_init_t *init = data; int retcode = 0; switch (init->func) { case I915_INIT_DMA: retcode = i915_initialize(dev, init); break; case I915_CLEANUP_DMA: retcode = i915_dma_cleanup(dev); break; case I915_RESUME_DMA: retcode = i915_dma_resume(dev); break; default: retcode = -EINVAL; break; } return retcode; } /* Implement basically the same security restrictions as hardware does * for MI_BATCH_NON_SECURE. These can be made stricter at any time. * * Most of the calculations below involve calculating the size of a * particular instruction. It's important to get the size right as * that tells us where the next instruction to check is. Any illegal * instruction detected will be given a size of zero, which is a * signal to abort the rest of the buffer. */ static int do_validate_cmd(int cmd) { switch (((cmd >> 29) & 0x7)) { case 0x0: switch ((cmd >> 23) & 0x3f) { case 0x0: return 1; /* MI_NOOP */ case 0x4: return 1; /* MI_FLUSH */ default: return 0; /* disallow everything else */ } break; case 0x1: return 0; /* reserved */ case 0x2: return (cmd & 0xff) + 2; /* 2d commands */ case 0x3: if (((cmd >> 24) & 0x1f) <= 0x18) return 1; switch ((cmd >> 24) & 0x1f) { case 0x1c: return 1; case 0x1d: switch ((cmd >> 16) & 0xff) { case 0x3: return (cmd & 0x1f) + 2; case 0x4: return (cmd & 0xf) + 2; default: return (cmd & 0xffff) + 2; } case 0x1e: if (cmd & (1 << 23)) return (cmd & 0xffff) + 1; else return 1; case 0x1f: if ((cmd & (1 << 23)) == 0) /* inline vertices */ return (cmd & 0x1ffff) + 2; else if (cmd & (1 << 17)) /* indirect random */ if ((cmd & 0xffff) == 0) return 0; /* unknown length, too hard */ else return (((cmd & 0xffff) + 1) / 2) + 1; else return 2; /* indirect sequential */ default: return 0; } default: return 0; } return 0; } static int validate_cmd(int cmd) { int ret = do_validate_cmd(cmd); /* printk("validate_cmd( %x ): %d\n", cmd, ret); */ return ret; } static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i; RING_LOCALS; if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) return -EINVAL; BEGIN_LP_RING((dwords+1)&~1); for (i = 0; i < dwords;) { int cmd, sz; cmd = buffer[i]; if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) return -EINVAL; OUT_RING(cmd); while (++i, --sz) { OUT_RING(buffer[i]); } } if (dwords & 1) OUT_RING(0); ADVANCE_LP_RING(); return 0; } int i915_emit_box(struct drm_device *dev, struct drm_clip_rect *boxes, int i, int DR1, int DR4) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_clip_rect box = boxes[i]; RING_LOCALS; if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", box.x1, box.y1, box.x2, box.y2); return -EINVAL; } if (IS_I965G(dev)) { BEGIN_LP_RING(4); OUT_RING(GFX_OP_DRAWRECT_INFO_I965); OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); OUT_RING(DR4); ADVANCE_LP_RING(); } else { BEGIN_LP_RING(6); OUT_RING(GFX_OP_DRAWRECT_INFO); OUT_RING(DR1); OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); OUT_RING(DR4); OUT_RING(0); ADVANCE_LP_RING(); } return 0; } /* XXX: Emitting the counter should really be moved to part of the IRQ * emit. For now, do it in both places: */ static void i915_emit_breadcrumb(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; RING_LOCALS; dev_priv->counter++; if (dev_priv->counter > 0x7FFFFFFFUL) dev_priv->counter = 0; if (master_priv->sarea_priv) master_priv->sarea_priv->last_enqueue = dev_priv->counter; BEGIN_LP_RING(4); OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } static int i915_dispatch_cmdbuffer(struct drm_device * dev, drm_i915_cmdbuffer_t *cmd, struct drm_clip_rect *cliprects, void *cmdbuf) { int nbox = cmd->num_cliprects; int i = 0, count, ret; if (cmd->sz & 0x3) { DRM_ERROR("alignment"); return -EINVAL; } i915_kernel_lost_context(dev); count = nbox ? nbox : 1; for (i = 0; i < count; i++) { if (i < nbox) { ret = i915_emit_box(dev, cliprects, i, cmd->DR1, cmd->DR4); if (ret) return ret; } ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); if (ret) return ret; } i915_emit_breadcrumb(dev); return 0; } static int i915_dispatch_batchbuffer(struct drm_device * dev, drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects) { drm_i915_private_t *dev_priv = dev->dev_private; int nbox = batch->num_cliprects; int i = 0, count; RING_LOCALS; if ((batch->start | batch->used) & 0x7) { DRM_ERROR("alignment"); return -EINVAL; } i915_kernel_lost_context(dev); count = nbox ? nbox : 1; for (i = 0; i < count; i++) { if (i < nbox) { int ret = i915_emit_box(dev, cliprects, i, batch->DR1, batch->DR4); if (ret) return ret; } if (!IS_I830(dev) && !IS_845G(dev)) { BEGIN_LP_RING(2); if (IS_I965G(dev)) { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); OUT_RING(batch->start); } else { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); OUT_RING(batch->start | MI_BATCH_NON_SECURE); } ADVANCE_LP_RING(); } else { BEGIN_LP_RING(4); OUT_RING(MI_BATCH_BUFFER); OUT_RING(batch->start | MI_BATCH_NON_SECURE); OUT_RING(batch->start + batch->used - 4); OUT_RING(0); ADVANCE_LP_RING(); } } i915_emit_breadcrumb(dev); return 0; } static int i915_dispatch_flip(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; RING_LOCALS; if (!master_priv->sarea_priv) return -EINVAL; DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", __func__, dev_priv->current_page, master_priv->sarea_priv->pf_current_page); i915_kernel_lost_context(dev); BEGIN_LP_RING(2); OUT_RING(MI_FLUSH | MI_READ_FLUSH); OUT_RING(0); ADVANCE_LP_RING(); BEGIN_LP_RING(6); OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); OUT_RING(0); if (dev_priv->current_page == 0) { OUT_RING(dev_priv->back_offset); dev_priv->current_page = 1; } else { OUT_RING(dev_priv->front_offset); dev_priv->current_page = 0; } OUT_RING(0); ADVANCE_LP_RING(); BEGIN_LP_RING(2); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); OUT_RING(0); ADVANCE_LP_RING(); master_priv->sarea_priv->last_enqueue = dev_priv->counter++; BEGIN_LP_RING(4); OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); master_priv->sarea_priv->pf_current_page = dev_priv->current_page; return 0; } static int i915_quiescent(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; i915_kernel_lost_context(dev); return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); } static int i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; RING_LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->struct_mutex); ret = i915_quiescent(dev); mutex_unlock(&dev->struct_mutex); return ret; } static int i915_batchbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; drm_i915_batchbuffer_t *batch = data; int ret; struct drm_clip_rect *cliprects = NULL; if (!dev_priv->allow_batchbuffer) { DRM_ERROR("Batchbuffer ioctl disabled\n"); return -EINVAL; } DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", batch->start, batch->used, batch->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); if (batch->num_cliprects < 0) return -EINVAL; if (batch->num_cliprects) { cliprects = kcalloc(batch->num_cliprects, sizeof(struct drm_clip_rect), GFP_KERNEL); if (cliprects == NULL) return -ENOMEM; ret = copy_from_user(cliprects, batch->cliprects, batch->num_cliprects * sizeof(struct drm_clip_rect)); if (ret != 0) goto fail_free; } mutex_lock(&dev->struct_mutex); ret = i915_dispatch_batchbuffer(dev, batch, cliprects); mutex_unlock(&dev->struct_mutex); if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); fail_free: kfree(cliprects); return ret; } static int i915_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; drm_i915_cmdbuffer_t *cmdbuf = data; struct drm_clip_rect *cliprects = NULL; void *batch_data; int ret; DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); if (cmdbuf->num_cliprects < 0) return -EINVAL; batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); if (batch_data == NULL) return -ENOMEM; ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); if (ret != 0) goto fail_batch_free; if (cmdbuf->num_cliprects) { cliprects = kcalloc(cmdbuf->num_cliprects, sizeof(struct drm_clip_rect), GFP_KERNEL); if (cliprects == NULL) goto fail_batch_free; ret = copy_from_user(cliprects, cmdbuf->cliprects, cmdbuf->num_cliprects * sizeof(struct drm_clip_rect)); if (ret != 0) goto fail_clip_free; } mutex_lock(&dev->struct_mutex); ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); mutex_unlock(&dev->struct_mutex); if (ret) { DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); goto fail_clip_free; } if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); fail_clip_free: kfree(cliprects); fail_batch_free: kfree(batch_data); return ret; } static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; DRM_DEBUG_DRIVER("%s\n", __func__); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->struct_mutex); ret = i915_dispatch_flip(dev); mutex_unlock(&dev->struct_mutex); return ret; } static int i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_getparam_t *param = data; int value; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } switch (param->param) { case I915_PARAM_IRQ_ACTIVE: value = dev->pdev->irq ? 1 : 0; break; case I915_PARAM_ALLOW_BATCHBUFFER: value = dev_priv->allow_batchbuffer ? 1 : 0; break; case I915_PARAM_LAST_DISPATCH: value = READ_BREADCRUMB(dev_priv); break; case I915_PARAM_CHIPSET_ID: value = dev->pci_device; break; case I915_PARAM_HAS_GEM: value = dev_priv->has_gem; break; case I915_PARAM_NUM_FENCES_AVAIL: value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); return -EINVAL; } if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); return -EFAULT; } return 0; } static int i915_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_setparam_t *param = data; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } switch (param->param) { case I915_SETPARAM_USE_MI_BATCHBUFFER_START: break; case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: dev_priv->tex_lru_log_granularity = param->value; break; case I915_SETPARAM_ALLOW_BATCHBUFFER: dev_priv->allow_batchbuffer = param->value; break; case I915_SETPARAM_NUM_USED_FENCES: if (param->value > dev_priv->num_fence_regs || param->value < 0) return -EINVAL; /* Userspace can use first N regs */ dev_priv->fence_reg_start = param->value; break; default: DRM_DEBUG_DRIVER("unknown parameter %d\n", param->param); return -EINVAL; } return 0; } static int i915_set_status_page(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_hws_addr_t *hws = data; if (!I915_NEED_GFX_HWS(dev)) return -EINVAL; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } if (drm_core_check_feature(dev, DRIVER_MODESET)) { WARN(1, "tried to set status page when mode setting active\n"); return 0; } DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); dev_priv->hws_map.offset = dev->agp->base + hws->addr; dev_priv->hws_map.size = 4*1024; dev_priv->hws_map.type = 0; dev_priv->hws_map.flags = 0; dev_priv->hws_map.mtrr = 0; drm_core_ioremap_wc(&dev_priv->hws_map, dev); if (dev_priv->hws_map.handle == NULL) { i915_dma_cleanup(dev); dev_priv->status_gfx_addr = 0; DRM_ERROR("can not ioremap virtual address for" " G33 hw status page\n"); return -ENOMEM; } dev_priv->hw_status_page = dev_priv->hws_map.handle; memset(dev_priv->hw_status_page, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", dev_priv->status_gfx_addr); DRM_DEBUG_DRIVER("load hws at %p\n", dev_priv->hw_status_page); return 0; } static int i915_get_bridge_dev(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); if (!dev_priv->bridge_dev) { DRM_ERROR("bridge device not found\n"); return -1; } return 0; } /** * i915_probe_agp - get AGP bootup configuration * @pdev: PCI device * @aperture_size: returns AGP aperture configured size * @preallocated_size: returns size of BIOS preallocated AGP space * * Since Intel integrated graphics are UMA, the BIOS has to set aside * some RAM for the framebuffer at early boot. This code figures out * how much was set aside so we can use it for our own purposes. */ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, uint32_t *preallocated_size, uint32_t *start) { struct drm_i915_private *dev_priv = dev->dev_private; u16 tmp = 0; unsigned long overhead; unsigned long stolen; /* Get the fb aperture size and "stolen" memory amount. */ pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp); *aperture_size = 1024 * 1024; *preallocated_size = 1024 * 1024; switch (dev->pdev->device) { case PCI_DEVICE_ID_INTEL_82830_CGC: case PCI_DEVICE_ID_INTEL_82845G_IG: case PCI_DEVICE_ID_INTEL_82855GM_IG: case PCI_DEVICE_ID_INTEL_82865_IG: if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) *aperture_size *= 64; else *aperture_size *= 128; break; default: /* 9xx supports large sizes, just look at the length */ *aperture_size = pci_resource_len(dev->pdev, 2); break; } /* * Some of the preallocated space is taken by the GTT * and popup. GTT is 1K per MB of aperture size, and popup is 4K. */ if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev)) overhead = 4096; else overhead = (*aperture_size / 1024) + 4096; switch (tmp & INTEL_GMCH_GMS_MASK) { case INTEL_855_GMCH_GMS_DISABLED: DRM_ERROR("video memory is disabled\n"); return -1; case INTEL_855_GMCH_GMS_STOLEN_1M: stolen = 1 * 1024 * 1024; break; case INTEL_855_GMCH_GMS_STOLEN_4M: stolen = 4 * 1024 * 1024; break; case INTEL_855_GMCH_GMS_STOLEN_8M: stolen = 8 * 1024 * 1024; break; case INTEL_855_GMCH_GMS_STOLEN_16M: stolen = 16 * 1024 * 1024; break; case INTEL_855_GMCH_GMS_STOLEN_32M: stolen = 32 * 1024 * 1024; break; case INTEL_915G_GMCH_GMS_STOLEN_48M: stolen = 48 * 1024 * 1024; break; case INTEL_915G_GMCH_GMS_STOLEN_64M: stolen = 64 * 1024 * 1024; break; case INTEL_GMCH_GMS_STOLEN_128M: stolen = 128 * 1024 * 1024; break; case INTEL_GMCH_GMS_STOLEN_256M: stolen = 256 * 1024 * 1024; break; case INTEL_GMCH_GMS_STOLEN_96M: stolen = 96 * 1024 * 1024; break; case INTEL_GMCH_GMS_STOLEN_160M: stolen = 160 * 1024 * 1024; break; case INTEL_GMCH_GMS_STOLEN_224M: stolen = 224 * 1024 * 1024; break; case INTEL_GMCH_GMS_STOLEN_352M: stolen = 352 * 1024 * 1024; break; default: DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", tmp & INTEL_GMCH_GMS_MASK); return -1; } *preallocated_size = stolen - overhead; *start = overhead; return 0; } #define PTE_ADDRESS_MASK 0xfffff000 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ #define PTE_MAPPING_TYPE_CACHED (3 << 1) #define PTE_MAPPING_TYPE_MASK (3 << 1) #define PTE_VALID (1 << 0) /** * i915_gtt_to_phys - take a GTT address and turn it into a physical one * @dev: drm device * @gtt_addr: address to translate * * Some chip functions require allocations from stolen space but need the * physical address of the memory in question. We use this routine * to get a physical address suitable for register programming from a given * GTT address. */ static unsigned long i915_gtt_to_phys(struct drm_device *dev, unsigned long gtt_addr) { unsigned long *gtt; unsigned long entry, phys; int gtt_bar = IS_I9XX(dev) ? 0 : 1; int gtt_offset, gtt_size; if (IS_I965G(dev)) { if (IS_G4X(dev) || IS_IGDNG(dev)) { gtt_offset = 2*1024*1024; gtt_size = 2*1024*1024; } else { gtt_offset = 512*1024; gtt_size = 512*1024; } } else { gtt_bar = 3; gtt_offset = 0; gtt_size = pci_resource_len(dev->pdev, gtt_bar); } gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, gtt_size); if (!gtt) { DRM_ERROR("ioremap of GTT failed\n"); return 0; } entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); /* Mask out these reserved bits on this hardware. */ if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) { entry &= ~PTE_ADDRESS_MASK_HIGH; } /* If it's not a mapping type we know, then bail. */ if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { iounmap(gtt); return 0; } if (!(entry & PTE_VALID)) { DRM_ERROR("bad GTT entry in stolen space\n"); iounmap(gtt); return 0; } iounmap(gtt); phys =(entry & PTE_ADDRESS_MASK) | ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); return phys; } static void i915_warn_stolen(struct drm_device *dev) { DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); } static void i915_setup_compression(struct drm_device *dev, int size) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mm_node *compressed_fb, *compressed_llb; unsigned long cfb_base; unsigned long ll_base = 0; /* Leave 1M for line length buffer & misc. */ compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); if (!compressed_fb) { i915_warn_stolen(dev); return; } compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); if (!compressed_fb) { i915_warn_stolen(dev); return; } cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); if (!cfb_base) { DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); drm_mm_put_block(compressed_fb); } if (!IS_GM45(dev)) { compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, 4096, 0); if (!compressed_llb) { i915_warn_stolen(dev); return; } compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); if (!compressed_llb) { i915_warn_stolen(dev); return; } ll_base = i915_gtt_to_phys(dev, compressed_llb->start); if (!ll_base) { DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); drm_mm_put_block(compressed_fb); drm_mm_put_block(compressed_llb); } } dev_priv->cfb_size = size; if (IS_GM45(dev)) { g4x_disable_fbc(dev); I915_WRITE(DPFC_CB_BASE, compressed_fb->start); } else { i8xx_disable_fbc(dev); I915_WRITE(FBC_CFB_BASE, cfb_base); I915_WRITE(FBC_LL_BASE, ll_base); } DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, ll_base, size >> 20); } /* true = enable decode, false = disable decoder */ static unsigned int i915_vga_set_decode(void *cookie, bool state) { struct drm_device *dev = cookie; intel_modeset_vga_set_state(dev, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; else return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } static int i915_load_modeset_init(struct drm_device *dev, unsigned long prealloc_start, unsigned long prealloc_size, unsigned long agp_size) { struct drm_i915_private *dev_priv = dev->dev_private; int fb_bar = IS_I9XX(dev) ? 2 : 0; int ret = 0; dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 0xff000000; if (IS_MOBILE(dev) || IS_I9XX(dev)) dev_priv->cursor_needs_physical = true; else dev_priv->cursor_needs_physical = false; if (IS_I965G(dev) || IS_G33(dev)) dev_priv->cursor_needs_physical = false; /* Basic memrange allocator for stolen space (aka vram) */ drm_mm_init(&dev_priv->vram, 0, prealloc_size); DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); /* We're off and running w/KMS */ dev_priv->mm.suspended = 0; /* Let GEM Manage from end of prealloc space to end of aperture. * * However, leave one page at the end still bound to the scratch page. * There are a number of places where the hardware apparently * prefetches past the end of the object, and we've seen multiple * hangs with the GPU head pointer stuck in a batchbuffer bound * at the last page of the aperture. One page should be enough to * keep any prefetching inside of the aperture. */ i915_gem_do_init(dev, prealloc_size, agp_size - 4096); mutex_lock(&dev->struct_mutex); ret = i915_gem_init_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); if (ret) goto out; /* Try to set up FBC with a reasonable compressed buffer size */ if (I915_HAS_FBC(dev) && i915_powersave) { int cfb_size; /* Try to get an 8M buffer... */ if (prealloc_size > (9*1024*1024)) cfb_size = 8*1024*1024; else /* fall back to 7/8 of the stolen space */ cfb_size = prealloc_size * 7 / 8; i915_setup_compression(dev, cfb_size); } /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; ret = intel_init_bios(dev); if (ret) DRM_INFO("failed to find VBIOS tables\n"); /* if we have > 1 VGA cards, then disable the radeon VGA resources */ ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); if (ret) goto destroy_ringbuffer; intel_modeset_init(dev); ret = drm_irq_install(dev); if (ret) goto destroy_ringbuffer; /* Always safe in the mode setting case. */ /* FIXME: do pre/post-mode set stuff in core KMS code */ dev->vblank_disable_allowed = 1; /* * Initialize the hardware status page IRQ location. */ I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); drm_helper_initial_config(dev); return 0; destroy_ringbuffer: i915_gem_cleanup_ringbuffer(dev); out: return ret; } int i915_master_create(struct drm_device *dev, struct drm_master *master) { struct drm_i915_master_private *master_priv; master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); if (!master_priv) return -ENOMEM; master->driver_priv = master_priv; return 0; } void i915_master_destroy(struct drm_device *dev, struct drm_master *master) { struct drm_i915_master_private *master_priv = master->driver_priv; if (!master_priv) return; kfree(master_priv); master->driver_priv = NULL; } static void i915_get_mem_freq(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 tmp; if (!IS_IGD(dev)) return; tmp = I915_READ(CLKCFG); switch (tmp & CLKCFG_FSB_MASK) { case CLKCFG_FSB_533: dev_priv->fsb_freq = 533; /* 133*4 */ break; case CLKCFG_FSB_800: dev_priv->fsb_freq = 800; /* 200*4 */ break; case CLKCFG_FSB_667: dev_priv->fsb_freq = 667; /* 167*4 */ break; case CLKCFG_FSB_400: dev_priv->fsb_freq = 400; /* 100*4 */ break; } switch (tmp & CLKCFG_MEM_MASK) { case CLKCFG_MEM_533: dev_priv->mem_freq = 533; break; case CLKCFG_MEM_667: dev_priv->mem_freq = 667; break; case CLKCFG_MEM_800: dev_priv->mem_freq = 800; break; } } /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device * @flags: startup flags * * The driver load routine has to do several things: * - drive output discovery via intel_modeset_init() * - initialize the memory manager * - allocate initial config memory * - setup the DRM framebuffer with the allocated memory */ int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv = dev->dev_private; resource_size_t base, size; int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; uint32_t agp_size, prealloc_size, prealloc_start; /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; dev->types[7] = _DRM_STAT_PRIMARY; dev->types[8] = _DRM_STAT_SECONDARY; dev->types[9] = _DRM_STAT_DMA; dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->dev = dev; /* Add register map (needed for suspend/resume) */ base = drm_get_resource_start(dev, mmio_bar); size = drm_get_resource_len(dev, mmio_bar); if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; } dev_priv->regs = ioremap(base, size); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); ret = -EIO; goto put_bridge; } dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base, dev->agp->agp_info.aper_size * 1024*1024); if (dev_priv->mm.gtt_mapping == NULL) { ret = -EIO; goto out_rmmap; } /* Set up a WC MTRR for non-PAT systems. This is more common than * one would think, because the kernel disables PAT on first * generation Core chips because WC PAT gets overridden by a UC * MTRR if present. Even if a UC MTRR isn't present. */ dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, dev->agp->agp_info.aper_size * 1024 * 1024, MTRR_TYPE_WRCOMB, 1); if (dev_priv->mm.gtt_mtrr < 0) { DRM_INFO("MTRR allocation failed. Graphics " "performance may suffer.\n"); } ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); if (ret) goto out_iomapfree; dev_priv->wq = create_workqueue("i915"); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); ret = -ENOMEM; goto out_iomapfree; } /* enable GEM by default */ dev_priv->has_gem = 1; if (prealloc_size > agp_size * 3 / 4) { DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " "memory stolen.\n", prealloc_size / 1024, agp_size / 1024); DRM_ERROR("Disabling GEM. (try reducing stolen memory or " "updating the BIOS to fix).\n"); dev_priv->has_gem = 0; } dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ if (IS_G4X(dev) || IS_IGDNG(dev)) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } i915_gem_load(dev); /* Init HWS */ if (!I915_NEED_GFX_HWS(dev)) { ret = i915_init_phys_hws(dev); if (ret != 0) goto out_workqueue_free; } i915_get_mem_freq(dev); /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there * according to the published specs. It doesn't appear to function * correctly in testing on 945G. * This may be a side effect of MSI having been made available for PEG * and the registers being closely associated. * * According to chipset errata, on the 965GM, MSI interrupts may * be lost or delayed, but we use them anyways to avoid * stuck interrupts on some machines. */ if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); spin_lock_init(&dev_priv->user_irq_lock); spin_lock_init(&dev_priv->error_lock); dev_priv->user_irq_refcount = 0; dev_priv->trace_irq_seqno = 0; ret = drm_vblank_init(dev, I915_NUM_PIPE); if (ret) { (void) i915_driver_unload(dev); return ret; } /* Start out suspended */ dev_priv->mm.suspended = 1; if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev, prealloc_start, prealloc_size, agp_size); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); goto out_workqueue_free; } } /* Must be done after probing outputs */ /* FIXME: verify on IGDNG */ if (!IS_IGDNG(dev)) intel_opregion_init(dev, 0); setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); return 0; out_workqueue_free: destroy_workqueue(dev_priv->wq); out_iomapfree: io_mapping_free(dev_priv->mm.gtt_mapping); out_rmmap: iounmap(dev_priv->regs); put_bridge: pci_dev_put(dev_priv->bridge_dev); free_priv: kfree(dev_priv); return ret; } int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; destroy_workqueue(dev_priv->wq); del_timer_sync(&dev_priv->hangcheck_timer); io_mapping_free(dev_priv->mm.gtt_mapping); if (dev_priv->mm.gtt_mtrr >= 0) { mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, dev->agp->agp_info.aper_size * 1024 * 1024); dev_priv->mm.gtt_mtrr = -1; } if (drm_core_check_feature(dev, DRIVER_MODESET)) { drm_irq_uninstall(dev); vga_client_register(dev->pdev, NULL, NULL, NULL); } if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); if (dev_priv->regs != NULL) iounmap(dev_priv->regs); if (!IS_IGDNG(dev)) intel_opregion_free(dev, 0); if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_modeset_cleanup(dev); i915_gem_free_all_phys_object(dev); mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); drm_mm_takedown(&dev_priv->vram); i915_gem_lastclose(dev); } pci_dev_put(dev_priv->bridge_dev); kfree(dev->dev_private); return 0; } int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) { struct drm_i915_file_private *i915_file_priv; DRM_DEBUG_DRIVER("\n"); i915_file_priv = (struct drm_i915_file_private *) kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); if (!i915_file_priv) return -ENOMEM; file_priv->driver_priv = i915_file_priv; INIT_LIST_HEAD(&i915_file_priv->mm.request_list); return 0; } /** * i915_driver_lastclose - clean up after all DRM clients have exited * @dev: DRM device * * Take care of cleaning up after all DRM clients have exited. In the * mode setting case, we want to restore the kernel's initial mode (just * in case the last client left us in a bad state). * * Additionally, in the non-mode setting case, we'll tear down the AGP * and DMA structures, since the kernel won't be using them, and clea * up any GEM state. */ void i915_driver_lastclose(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { drm_fb_helper_restore(); return; } i915_gem_lastclose(dev); if (dev_priv->agp_heap) i915_mem_takedown(&(dev_priv->agp_heap)); i915_dma_cleanup(dev); } void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; i915_gem_release(dev, file_priv); if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_mem_release(dev, file_priv, dev_priv->agp_heap); } void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) { struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; kfree(i915_file_priv); } struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); /** * Determine if the device really is AGP or not. * * All Intel graphics chipsets are treated as AGP, even if they are really * PCI-e. * * \param dev The device to be tested. * * \returns * A value of 1 is always retured to indictate every i9x5 is AGP. */ int i915_driver_device_is_agp(struct drm_device * dev) { return 1; }
gpl-2.0