repo_name
string
path
string
copies
string
size
string
content
string
license
string
obek/linux-sunxi
modules/wifi/nano-c047.12/WiFiEngine/wifi_drv/src/mibtable.c
100
5726
/* Copyright 2010 by Nanoradio AB */ /* $Id: mibtable.c 18654 2011-04-11 13:46:16Z joda $ */ #include "wifi_engine_internal.h" #include "mib_idefs.h" struct mib_table { size_t size; /* size in bytes of table */ uint32_t vaddr; /* target address of first entry */ uint32_t rootindex; /* index of the main table */ uint32_t rootentries; /* number of entries of main table */ mib_object_entry_t table[1]; }; static struct mib_table *mibtable; #define MIBENTRYSIZE 8 #define ADDRVALID(A, T) ((A) >= (T)->vaddr \ && (A) - (T)->vaddr + MIBENTRYSIZE < (T)->size \ && (((A) - (T)->vaddr) & (MIBENTRYSIZE - 1)) == 0) #define ADDRTOINDEX(A, T) (((A) - (T)->vaddr) / MIBENTRYSIZE) #define GETFIELD(N, F) (((N) & MIB_##F##_MASK) >> MIB_##F##_OFFSET) #define GETADDR(N) ((N) & 0x3fffff) int wei_have_mibtable(void) { return mibtable != NULL; } #if DE_MIB_TABLE_SUPPORT == CFG_ON int wei_get_mib_object(const mib_id_t *mib_id, mib_object_entry_t *entry) { mib_object_reference_type_t type; mib_object_size_description_t sdesc; uint32_t addr; uint32_t size; unsigned int mib_id_index = 0; unsigned int component; unsigned int final_component; unsigned int index; unsigned int nentries; mib_object_entry_t *oe; if(mibtable == NULL || mib_id == NULL || entry == NULL) { return WIFI_ENGINE_FAILURE; } index = mibtable->rootindex; nentries = mibtable->rootentries; component = (unsigned int)(mib_id->octets[0]) - 1; while(1) { final_component = (mib_id_index >= MIB_IDENTIFIER_MAX_LENGTH - 1) || (mib_id->octets[mib_id_index + 1] == 0); if(component >= nentries) { DE_TRACE_STATIC(TR_MIB, "component too large\n"); return WIFI_ENGINE_FAILURE; } index += component; if(index >= mibtable->size / MIBENTRYSIZE) { DE_TRACE_STATIC(TR_MIB, "bad address\n"); return WIFI_ENGINE_FAILURE; } oe = &mibtable->table[index]; type = (mib_object_reference_type_t)GETFIELD(oe->storage_description, REFERENCE_TYPE); sdesc = (mib_object_size_description_t)GETFIELD(oe->storage_description, OBJECT_SIZE_DESCRIPTION); addr = GETADDR(oe->reference); size = GETFIELD(oe->storage_description, OBJECT_SIZE); if(type == MIB_OBJECT_REFERENCE_TYPE_MIB_TABLE) { if(final_component) { DE_TRACE_STATIC(TR_MIB, "non-leaf\n"); return WIFI_ENGINE_FAILURE; } if(sdesc != MIB_OBJECT_SIZE_DESCRIPTION_FIXED_SIZE) { DE_TRACE_STATIC(TR_MIB, "unexpected size description type\n"); return WIFI_ENGINE_FAILURE; } if(!ADDRVALID(addr, mibtable)) { DE_TRACE_STATIC(TR_MIB, "bad address\n"); return WIFI_ENGINE_FAILURE; } index = ADDRTOINDEX(addr, mibtable); nentries = size; mib_id_index++; component = (unsigned int)(mib_id->octets[mib_id_index]) - 1; continue; } else if(type == MIB_OBJECT_REFERENCE_TYPE_MIB_SUBTABLE) { if(sdesc != MIB_OBJECT_SIZE_DESCRIPTION_FIXED_SIZE) { DE_TRACE_STATIC(TR_MIB, "unexpected size description type\n"); return WIFI_ENGINE_FAILURE; } if(size != 2) { DE_TRACE_INT(TR_MIB, "unexpected subtable size (%u)\n", size); return WIFI_ENGINE_FAILURE; } if(!ADDRVALID(addr, mibtable)) { DE_TRACE_STATIC(TR_MIB, "bad address\n"); return WIFI_ENGINE_FAILURE; } index = ADDRTOINDEX(addr, mibtable); nentries = size; if(final_component) { component = 0; } else { component = 1; } continue; } else { if(!final_component) { DE_TRACE_STATIC(TR_MIB, "leaf with more components\n"); return WIFI_ENGINE_FAILURE; } *entry = *oe; break; } } return WIFI_ENGINE_SUCCESS; } #endif /* DE_MIB_TABLE_SUPPORT */ void wei_free_mibtable(void) { struct mib_table *tmp = mibtable; mibtable = NULL; if(tmp != NULL) DriverEnvironment_Free(tmp); } #if DE_MIB_TABLE_SUPPORT == CFG_ON int WiFiEngine_RegisterMIBTable(const void *table, size_t size, uint32_t vaddr) { struct mib_table *tmp; mib_object_reference_type_t type; mib_object_size_description_t sdesc; uint32_t addr; uint32_t esize; tmp = DriverEnvironment_Malloc(sizeof(*tmp) - sizeof(tmp->table) + size); if(tmp == NULL) { DE_TRACE_STATIC(TR_ALWAYS, "failed to allocate memory for MIB table\n"); return WIFI_ENGINE_FAILURE_RESOURCES; } DE_MEMCPY(tmp->table, table, size); tmp->size = size; tmp->vaddr = vaddr; type = (mib_object_reference_type_t)GETFIELD(tmp->table[0].storage_description, REFERENCE_TYPE); sdesc = (mib_object_size_description_t)GETFIELD(tmp->table[0].storage_description, OBJECT_SIZE_DESCRIPTION); addr = GETADDR(tmp->table[0].reference); esize = GETFIELD(tmp->table[0].storage_description, OBJECT_SIZE); if(type != MIB_OBJECT_REFERENCE_TYPE_MIB_TABLE || sdesc != MIB_OBJECT_SIZE_DESCRIPTION_FIXED_SIZE || !ADDRVALID(addr, tmp)) { DE_TRACE_INT5(TR_ALWAYS, "bad MIB table format t:%x s:%x a:%x e:%x v:%x\n", type, sdesc, addr, esize, ADDRVALID(addr, tmp)); DriverEnvironment_Free(tmp); return WIFI_ENGINE_FAILURE_INVALID_DATA; } tmp->rootindex = ADDRTOINDEX(addr, tmp); tmp->rootentries = esize; wei_free_mibtable(); mibtable = tmp; return WIFI_ENGINE_SUCCESS; } #endif /* DE_MIB_TABLE_SUPPORT */
gpl-2.0
DrKLO/Telegram
TMessagesProj/jni/opus/silk/float/noise_shape_analysis_FLP.c
100
15313
/*********************************************************************** Copyright (c) 2006-2011, Skype Limited. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of Internet Society, IETF or IETF Trust, nor the names of specific contributors, may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***********************************************************************/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "main_FLP.h" #include "tuning_parameters.h" /* Compute gain to make warped filter coefficients have a zero mean log frequency response on a */ /* non-warped frequency scale. (So that it can be implemented with a minimum-phase monic filter.) */ /* Note: A monic filter is one with the first coefficient equal to 1.0. In Silk we omit the first */ /* coefficient in an array of coefficients, for monic filters. */ static OPUS_INLINE silk_float warped_gain( const silk_float *coefs, silk_float lambda, opus_int order ) { opus_int i; silk_float gain; lambda = -lambda; gain = coefs[ order - 1 ]; for( i = order - 2; i >= 0; i-- ) { gain = lambda * gain + coefs[ i ]; } return (silk_float)( 1.0f / ( 1.0f - lambda * gain ) ); } /* Convert warped filter coefficients to monic pseudo-warped coefficients and limit maximum */ /* amplitude of monic warped coefficients by using bandwidth expansion on the true coefficients */ static OPUS_INLINE void warped_true2monic_coefs( silk_float *coefs, silk_float lambda, silk_float limit, opus_int order ) { opus_int i, iter, ind = 0; silk_float tmp, maxabs, chirp, gain; /* Convert to monic coefficients */ for( i = order - 1; i > 0; i-- ) { coefs[ i - 1 ] -= lambda * coefs[ i ]; } gain = ( 1.0f - lambda * lambda ) / ( 1.0f + lambda * coefs[ 0 ] ); for( i = 0; i < order; i++ ) { coefs[ i ] *= gain; } /* Limit */ for( iter = 0; iter < 10; iter++ ) { /* Find maximum absolute value */ maxabs = -1.0f; for( i = 0; i < order; i++ ) { tmp = silk_abs_float( coefs[ i ] ); if( tmp > maxabs ) { maxabs = tmp; ind = i; } } if( maxabs <= limit ) { /* Coefficients are within range - done */ return; } /* Convert back to true warped coefficients */ for( i = 1; i < order; i++ ) { coefs[ i - 1 ] += lambda * coefs[ i ]; } gain = 1.0f / gain; for( i = 0; i < order; i++ ) { coefs[ i ] *= gain; } /* Apply bandwidth expansion */ chirp = 0.99f - ( 0.8f + 0.1f * iter ) * ( maxabs - limit ) / ( maxabs * ( ind + 1 ) ); silk_bwexpander_FLP( coefs, order, chirp ); /* Convert to monic warped coefficients */ for( i = order - 1; i > 0; i-- ) { coefs[ i - 1 ] -= lambda * coefs[ i ]; } gain = ( 1.0f - lambda * lambda ) / ( 1.0f + lambda * coefs[ 0 ] ); for( i = 0; i < order; i++ ) { coefs[ i ] *= gain; } } silk_assert( 0 ); } static OPUS_INLINE void limit_coefs( silk_float *coefs, silk_float limit, opus_int order ) { opus_int i, iter, ind = 0; silk_float tmp, maxabs, chirp; for( iter = 0; iter < 10; iter++ ) { /* Find maximum absolute value */ maxabs = -1.0f; for( i = 0; i < order; i++ ) { tmp = silk_abs_float( coefs[ i ] ); if( tmp > maxabs ) { maxabs = tmp; ind = i; } } if( maxabs <= limit ) { /* Coefficients are within range - done */ return; } /* Apply bandwidth expansion */ chirp = 0.99f - ( 0.8f + 0.1f * iter ) * ( maxabs - limit ) / ( maxabs * ( ind + 1 ) ); silk_bwexpander_FLP( coefs, order, chirp ); } silk_assert( 0 ); } /* Compute noise shaping coefficients and initial gain values */ void silk_noise_shape_analysis_FLP( silk_encoder_state_FLP *psEnc, /* I/O Encoder state FLP */ silk_encoder_control_FLP *psEncCtrl, /* I/O Encoder control FLP */ const silk_float *pitch_res, /* I LPC residual from pitch analysis */ const silk_float *x /* I Input signal [frame_length + la_shape] */ ) { silk_shape_state_FLP *psShapeSt = &psEnc->sShape; opus_int k, nSamples, nSegs; silk_float SNR_adj_dB, HarmShapeGain, Tilt; silk_float nrg, log_energy, log_energy_prev, energy_variation; silk_float BWExp, gain_mult, gain_add, strength, b, warping; silk_float x_windowed[ SHAPE_LPC_WIN_MAX ]; silk_float auto_corr[ MAX_SHAPE_LPC_ORDER + 1 ]; silk_float rc[ MAX_SHAPE_LPC_ORDER + 1 ]; const silk_float *x_ptr, *pitch_res_ptr; /* Point to start of first LPC analysis block */ x_ptr = x - psEnc->sCmn.la_shape; /****************/ /* GAIN CONTROL */ /****************/ SNR_adj_dB = psEnc->sCmn.SNR_dB_Q7 * ( 1 / 128.0f ); /* Input quality is the average of the quality in the lowest two VAD bands */ psEncCtrl->input_quality = 0.5f * ( psEnc->sCmn.input_quality_bands_Q15[ 0 ] + psEnc->sCmn.input_quality_bands_Q15[ 1 ] ) * ( 1.0f / 32768.0f ); /* Coding quality level, between 0.0 and 1.0 */ psEncCtrl->coding_quality = silk_sigmoid( 0.25f * ( SNR_adj_dB - 20.0f ) ); if( psEnc->sCmn.useCBR == 0 ) { /* Reduce coding SNR during low speech activity */ b = 1.0f - psEnc->sCmn.speech_activity_Q8 * ( 1.0f / 256.0f ); SNR_adj_dB -= BG_SNR_DECR_dB * psEncCtrl->coding_quality * ( 0.5f + 0.5f * psEncCtrl->input_quality ) * b * b; } if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Reduce gains for periodic signals */ SNR_adj_dB += HARM_SNR_INCR_dB * psEnc->LTPCorr; } else { /* For unvoiced signals and low-quality input, adjust the quality slower than SNR_dB setting */ SNR_adj_dB += ( -0.4f * psEnc->sCmn.SNR_dB_Q7 * ( 1 / 128.0f ) + 6.0f ) * ( 1.0f - psEncCtrl->input_quality ); } /*************************/ /* SPARSENESS PROCESSING */ /*************************/ /* Set quantizer offset */ if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Initially set to 0; may be overruled in process_gains(..) */ psEnc->sCmn.indices.quantOffsetType = 0; } else { /* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */ nSamples = 2 * psEnc->sCmn.fs_kHz; energy_variation = 0.0f; log_energy_prev = 0.0f; pitch_res_ptr = pitch_res; nSegs = silk_SMULBB( SUB_FRAME_LENGTH_MS, psEnc->sCmn.nb_subfr ) / 2; for( k = 0; k < nSegs; k++ ) { nrg = ( silk_float )nSamples + ( silk_float )silk_energy_FLP( pitch_res_ptr, nSamples ); log_energy = silk_log2( nrg ); if( k > 0 ) { energy_variation += silk_abs_float( log_energy - log_energy_prev ); } log_energy_prev = log_energy; pitch_res_ptr += nSamples; } /* Set quantization offset depending on sparseness measure */ if( energy_variation > ENERGY_VARIATION_THRESHOLD_QNT_OFFSET * (nSegs-1) ) { psEnc->sCmn.indices.quantOffsetType = 0; } else { psEnc->sCmn.indices.quantOffsetType = 1; } } /*******************************/ /* Control bandwidth expansion */ /*******************************/ /* More BWE for signals with high prediction gain */ strength = FIND_PITCH_WHITE_NOISE_FRACTION * psEncCtrl->predGain; /* between 0.0 and 1.0 */ BWExp = BANDWIDTH_EXPANSION / ( 1.0f + strength * strength ); /* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */ warping = (silk_float)psEnc->sCmn.warping_Q16 / 65536.0f + 0.01f * psEncCtrl->coding_quality; /********************************************/ /* Compute noise shaping AR coefs and gains */ /********************************************/ for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { /* Apply window: sine slope followed by flat part followed by cosine slope */ opus_int shift, slope_part, flat_part; flat_part = psEnc->sCmn.fs_kHz * 3; slope_part = ( psEnc->sCmn.shapeWinLength - flat_part ) / 2; silk_apply_sine_window_FLP( x_windowed, x_ptr, 1, slope_part ); shift = slope_part; silk_memcpy( x_windowed + shift, x_ptr + shift, flat_part * sizeof(silk_float) ); shift += flat_part; silk_apply_sine_window_FLP( x_windowed + shift, x_ptr + shift, 2, slope_part ); /* Update pointer: next LPC analysis block */ x_ptr += psEnc->sCmn.subfr_length; if( psEnc->sCmn.warping_Q16 > 0 ) { /* Calculate warped auto correlation */ silk_warped_autocorrelation_FLP( auto_corr, x_windowed, warping, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder ); } else { /* Calculate regular auto correlation */ silk_autocorrelation_FLP( auto_corr, x_windowed, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder + 1 ); } /* Add white noise, as a fraction of energy */ auto_corr[ 0 ] += auto_corr[ 0 ] * SHAPE_WHITE_NOISE_FRACTION + 1.0f; /* Convert correlations to prediction coefficients, and compute residual energy */ nrg = silk_schur_FLP( rc, auto_corr, psEnc->sCmn.shapingLPCOrder ); silk_k2a_FLP( &psEncCtrl->AR[ k * MAX_SHAPE_LPC_ORDER ], rc, psEnc->sCmn.shapingLPCOrder ); psEncCtrl->Gains[ k ] = ( silk_float )sqrt( nrg ); if( psEnc->sCmn.warping_Q16 > 0 ) { /* Adjust gain for warping */ psEncCtrl->Gains[ k ] *= warped_gain( &psEncCtrl->AR[ k * MAX_SHAPE_LPC_ORDER ], warping, psEnc->sCmn.shapingLPCOrder ); } /* Bandwidth expansion for synthesis filter shaping */ silk_bwexpander_FLP( &psEncCtrl->AR[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder, BWExp ); if( psEnc->sCmn.warping_Q16 > 0 ) { /* Convert to monic warped prediction coefficients and limit absolute values */ warped_true2monic_coefs( &psEncCtrl->AR[ k * MAX_SHAPE_LPC_ORDER ], warping, 3.999f, psEnc->sCmn.shapingLPCOrder ); } else { /* Limit absolute values */ limit_coefs( &psEncCtrl->AR[ k * MAX_SHAPE_LPC_ORDER ], 3.999f, psEnc->sCmn.shapingLPCOrder ); } } /*****************/ /* Gain tweaking */ /*****************/ /* Increase gains during low speech activity */ gain_mult = (silk_float)pow( 2.0f, -0.16f * SNR_adj_dB ); gain_add = (silk_float)pow( 2.0f, 0.16f * MIN_QGAIN_DB ); for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { psEncCtrl->Gains[ k ] *= gain_mult; psEncCtrl->Gains[ k ] += gain_add; } /************************************************/ /* Control low-frequency shaping and noise tilt */ /************************************************/ /* Less low frequency shaping for noisy inputs */ strength = LOW_FREQ_SHAPING * ( 1.0f + LOW_QUALITY_LOW_FREQ_SHAPING_DECR * ( psEnc->sCmn.input_quality_bands_Q15[ 0 ] * ( 1.0f / 32768.0f ) - 1.0f ) ); strength *= psEnc->sCmn.speech_activity_Q8 * ( 1.0f / 256.0f ); if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Reduce low frequencies quantization noise for periodic signals, depending on pitch lag */ /*f = 400; freqz([1, -0.98 + 2e-4 * f], [1, -0.97 + 7e-4 * f], 2^12, Fs); axis([0, 1000, -10, 1])*/ for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { b = 0.2f / psEnc->sCmn.fs_kHz + 3.0f / psEncCtrl->pitchL[ k ]; psEncCtrl->LF_MA_shp[ k ] = -1.0f + b; psEncCtrl->LF_AR_shp[ k ] = 1.0f - b - b * strength; } Tilt = - HP_NOISE_COEF - (1 - HP_NOISE_COEF) * HARM_HP_NOISE_COEF * psEnc->sCmn.speech_activity_Q8 * ( 1.0f / 256.0f ); } else { b = 1.3f / psEnc->sCmn.fs_kHz; psEncCtrl->LF_MA_shp[ 0 ] = -1.0f + b; psEncCtrl->LF_AR_shp[ 0 ] = 1.0f - b - b * strength * 0.6f; for( k = 1; k < psEnc->sCmn.nb_subfr; k++ ) { psEncCtrl->LF_MA_shp[ k ] = psEncCtrl->LF_MA_shp[ 0 ]; psEncCtrl->LF_AR_shp[ k ] = psEncCtrl->LF_AR_shp[ 0 ]; } Tilt = -HP_NOISE_COEF; } /****************************/ /* HARMONIC SHAPING CONTROL */ /****************************/ if( USE_HARM_SHAPING && psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Harmonic noise shaping */ HarmShapeGain = HARMONIC_SHAPING; /* More harmonic noise shaping for high bitrates or noisy input */ HarmShapeGain += HIGH_RATE_OR_LOW_QUALITY_HARMONIC_SHAPING * ( 1.0f - ( 1.0f - psEncCtrl->coding_quality ) * psEncCtrl->input_quality ); /* Less harmonic noise shaping for less periodic signals */ HarmShapeGain *= ( silk_float )sqrt( psEnc->LTPCorr ); } else { HarmShapeGain = 0.0f; } /*************************/ /* Smooth over subframes */ /*************************/ for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { psShapeSt->HarmShapeGain_smth += SUBFR_SMTH_COEF * ( HarmShapeGain - psShapeSt->HarmShapeGain_smth ); psEncCtrl->HarmShapeGain[ k ] = psShapeSt->HarmShapeGain_smth; psShapeSt->Tilt_smth += SUBFR_SMTH_COEF * ( Tilt - psShapeSt->Tilt_smth ); psEncCtrl->Tilt[ k ] = psShapeSt->Tilt_smth; } }
gpl-2.0
sakerhsia/linux
drivers/clk/h8300/clk-h8s2678.c
356
3143
/* * H8S2678 clock driver * * Copyright 2015 Yoshinori Sato <ysato@users.sourceforge.jp> */ #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/device.h> #include <linux/of_address.h> #include <linux/slab.h> static DEFINE_SPINLOCK(clklock); #define MAX_FREQ 33333333 #define MIN_FREQ 8000000 struct pll_clock { struct clk_hw hw; void __iomem *sckcr; void __iomem *pllcr; }; #define to_pll_clock(_hw) container_of(_hw, struct pll_clock, hw) static unsigned long pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct pll_clock *pll_clock = to_pll_clock(hw); int mul = 1 << (readb(pll_clock->pllcr) & 3); return parent_rate * mul; } static long pll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { int i, m = -1; long offset[3]; if (rate > MAX_FREQ) rate = MAX_FREQ; if (rate < MIN_FREQ) rate = MIN_FREQ; for (i = 0; i < 3; i++) offset[i] = abs(rate - (*prate * (1 << i))); for (i = 0; i < 3; i++) if (m < 0) m = i; else m = (offset[i] < offset[m])?i:m; return *prate * (1 << m); } static int pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { int pll; unsigned char val; unsigned long flags; struct pll_clock *pll_clock = to_pll_clock(hw); pll = ((rate / parent_rate) / 2) & 0x03; spin_lock_irqsave(&clklock, flags); val = readb(pll_clock->sckcr); val |= 0x08; writeb(val, pll_clock->sckcr); val = readb(pll_clock->pllcr); val &= ~0x03; val |= pll; writeb(val, pll_clock->pllcr); spin_unlock_irqrestore(&clklock, flags); return 0; } static const struct clk_ops pll_ops = { .recalc_rate = pll_recalc_rate, .round_rate = pll_round_rate, .set_rate = pll_set_rate, }; static void __init h8s2678_pll_clk_setup(struct device_node *node) { int num_parents; struct clk *clk; const char *clk_name = node->name; const char *parent_name; struct pll_clock *pll_clock; struct clk_init_data init; num_parents = of_clk_get_parent_count(node); if (num_parents < 1) { pr_err("%s: no parent found", clk_name); return; } pll_clock = kzalloc(sizeof(*pll_clock), GFP_KERNEL); if (!pll_clock) return; pll_clock->sckcr = of_iomap(node, 0); if (pll_clock->sckcr == NULL) { pr_err("%s: failed to map divide register", clk_name); goto free_clock; } pll_clock->pllcr = of_iomap(node, 1); if (pll_clock->pllcr == NULL) { pr_err("%s: failed to map multiply register", clk_name); goto unmap_sckcr; } parent_name = of_clk_get_parent_name(node, 0); init.name = clk_name; init.ops = &pll_ops; init.flags = CLK_IS_BASIC; init.parent_names = &parent_name; init.num_parents = 1; pll_clock->hw.init = &init; clk = clk_register(NULL, &pll_clock->hw); if (IS_ERR(clk)) { pr_err("%s: failed to register %s div clock (%ld)\n", __func__, clk_name, PTR_ERR(clk)); goto unmap_pllcr; } of_clk_add_provider(node, of_clk_src_simple_get, clk); return; unmap_pllcr: iounmap(pll_clock->pllcr); unmap_sckcr: iounmap(pll_clock->sckcr); free_clock: kfree(pll_clock); } CLK_OF_DECLARE(h8s2678_div_clk, "renesas,h8s2678-pll-clock", h8s2678_pll_clk_setup);
gpl-2.0
Victor-android/kernel_u8800
drivers/serial/8250_gsc.c
612
3620
/* * Serial Device Initialisation for Lasi/Asp/Wax/Dino * * (c) Copyright Matthew Wilcox <willy@debian.org> 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/serial_core.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/types.h> #include <asm/hardware.h> #include <asm/parisc-device.h> #include <asm/io.h> #include "8250.h" static int __init serial_init_chip(struct parisc_device *dev) { struct uart_port port; unsigned long address; int err; if (!dev->irq) { /* We find some unattached serial ports by walking native * busses. These should be silently ignored. Otherwise, * what we have here is a missing parent device, so tell * the user what they're missing. */ if (parisc_parent(dev)->id.hw_type != HPHW_IOA) printk(KERN_INFO "Serial: device 0x%llx not configured.\n" "Enable support for Wax, Lasi, Asp or Dino.\n", (unsigned long long)dev->hpa.start); return -ENODEV; } address = dev->hpa.start; if (dev->id.sversion != 0x8d) address += 0x800; memset(&port, 0, sizeof(port)); port.iotype = UPIO_MEM; /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */ port.uartclk = 7272727; port.mapbase = address; port.membase = ioremap_nocache(address, 16); port.irq = dev->irq; port.flags = UPF_BOOT_AUTOCONF; port.dev = &dev->dev; err = serial8250_register_port(&port); if (err < 0) { printk(KERN_WARNING "serial8250_register_port returned error %d\n", err); iounmap(port.membase); return err; } return 0; } static struct parisc_device_id serial_tbl[] = { { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d }, { 0 } }; /* Hack. Some machines have SERIAL_0 attached to Lasi and SERIAL_1 * attached to Dino. Unfortunately, Dino appears before Lasi in the device * tree. To ensure that ttyS0 == SERIAL_0, we register two drivers; one * which only knows about Lasi and then a second which will find all the * other serial ports. HPUX ignores this problem. */ static struct parisc_device_id lasi_tbl[] = { { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03B, 0x0008C }, /* C1xx/C1xxL */ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03C, 0x0008C }, /* B132L */ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03D, 0x0008C }, /* B160L */ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03E, 0x0008C }, /* B132L+ */ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03F, 0x0008C }, /* B180L+ */ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x046, 0x0008C }, /* Rocky2 120 */ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x047, 0x0008C }, /* Rocky2 150 */ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x04E, 0x0008C }, /* Kiji L2 132 */ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x056, 0x0008C }, /* Raven+ */ { 0 } }; MODULE_DEVICE_TABLE(parisc, serial_tbl); static struct parisc_driver lasi_driver = { .name = "serial_1", .id_table = lasi_tbl, .probe = serial_init_chip, }; static struct parisc_driver serial_driver = { .name = "serial", .id_table = serial_tbl, .probe = serial_init_chip, }; static int __init probe_serial_gsc(void) { register_parisc_driver(&lasi_driver); register_parisc_driver(&serial_driver); return 0; } module_init(probe_serial_gsc); MODULE_LICENSE("GPL");
gpl-2.0
NAM-IL/LINUX-rpi-4.2.y
arch/arc/kernel/troubleshoot.c
612
9161
/* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as */ #include <linux/ptrace.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/kdev_t.h> #include <linux/fs_struct.h> #include <linux/proc_fs.h> #include <linux/file.h> #include <asm/arcregs.h> #include <asm/irqflags.h> /* * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) * -Prints 3 regs per line and a CR. * -To continue, callee regs right after scratch, special handling of CR */ static noinline void print_reg_file(long *reg_rev, int start_num) { unsigned int i; char buf[512]; int n = 0, len = sizeof(buf); for (i = start_num; i < start_num + 13; i++) { n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t", i, (unsigned long)*reg_rev); if (((i + 1) % 3) == 0) n += scnprintf(buf + n, len - n, "\n"); /* because pt_regs has regs reversed: r12..r0, r25..r13 */ if (is_isa_arcv2() && start_num == 0) reg_rev++; else reg_rev--; } if (start_num != 0) n += scnprintf(buf + n, len - n, "\n\n"); /* To continue printing callee regs on same line as scratch regs */ if (start_num == 0) pr_info("%s", buf); else pr_cont("%s\n", buf); } static void show_callee_regs(struct callee_regs *cregs) { print_reg_file(&(cregs->r13), 13); } static void print_task_path_n_nm(struct task_struct *tsk, char *buf) { char *path_nm = NULL; struct mm_struct *mm; struct file *exe_file; mm = get_task_mm(tsk); if (!mm) goto done; exe_file = get_mm_exe_file(mm); mmput(mm); if (exe_file) { path_nm = file_path(exe_file, buf, 255); fput(exe_file); } done: pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); } static void show_faulting_vma(unsigned long address, char *buf) { struct vm_area_struct *vma; struct inode *inode; unsigned long ino = 0; dev_t dev = 0; char *nm = buf; struct mm_struct *active_mm = current->active_mm; /* can't use print_vma_addr() yet as it doesn't check for * non-inclusive vma */ down_read(&active_mm->mmap_sem); vma = find_vma(active_mm, address); /* check against the find_vma( ) behaviour which returns the next VMA * if the container VMA is not found */ if (vma && (vma->vm_start <= address)) { struct file *file = vma->vm_file; if (file) { nm = file_path(file, buf, PAGE_SIZE - 1); inode = file_inode(vma->vm_file); dev = inode->i_sb->s_dev; ino = inode->i_ino; } pr_info(" @off 0x%lx in [%s]\n" " VMA: 0x%08lx to 0x%08lx\n", vma->vm_start < TASK_UNMAPPED_BASE ? address : address - vma->vm_start, nm, vma->vm_start, vma->vm_end); } else pr_info(" @No matching VMA found\n"); up_read(&active_mm->mmap_sem); } static void show_ecr_verbose(struct pt_regs *regs) { unsigned int vec, cause_code; unsigned long address; pr_info("\n[ECR ]: 0x%08lx => ", regs->event); /* For Data fault, this is data address not instruction addr */ address = current->thread.fault_address; vec = regs->ecr_vec; cause_code = regs->ecr_cause; /* For DTLB Miss or ProtV, display the memory involved too */ if (vec == ECR_V_DTLB_MISS) { pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n", (cause_code == 0x01) ? "Read" : ((cause_code == 0x02) ? "Write" : "EX"), address, regs->ret); } else if (vec == ECR_V_ITLB_MISS) { pr_cont("Insn could not be fetched\n"); } else if (vec == ECR_V_MACH_CHK) { pr_cont("%s\n", (cause_code == 0x0) ? "Double Fault" : "Other Fatal Err"); } else if (vec == ECR_V_PROTV) { if (cause_code == ECR_C_PROTV_INST_FETCH) pr_cont("Execute from Non-exec Page\n"); else if (cause_code == ECR_C_PROTV_MISALIG_DATA) pr_cont("Misaligned r/w from 0x%08lx\n", address); else pr_cont("%s access not allowed on page\n", (cause_code == 0x01) ? "Read" : ((cause_code == 0x02) ? "Write" : "EX")); } else if (vec == ECR_V_INSN_ERR) { pr_cont("Illegal Insn\n"); #ifdef CONFIG_ISA_ARCV2 } else if (vec == ECR_V_MEM_ERR) { if (cause_code == 0x00) pr_cont("Bus Error from Insn Mem\n"); else if (cause_code == 0x10) pr_cont("Bus Error from Data Mem\n"); else pr_cont("Bus Error, check PRM\n"); #endif } else { pr_cont("Check Programmer's Manual\n"); } } /************************************************************************ * API called by rest of kernel ***********************************************************************/ void show_regs(struct pt_regs *regs) { struct task_struct *tsk = current; struct callee_regs *cregs; char *buf; buf = (char *)__get_free_page(GFP_TEMPORARY); if (!buf) return; print_task_path_n_nm(tsk, buf); show_regs_print_info(KERN_INFO); show_ecr_verbose(regs); pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n", current->thread.fault_address, (void *)regs->blink, (void *)regs->ret); if (user_mode(regs)) show_faulting_vma(regs->ret, buf); /* faulting code, not data */ pr_info("[STAT32]: 0x%08lx", regs->status32); #define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit" " : "" #ifdef CONFIG_ISA_ARCOMPACT pr_cont(" : %2s%2s%2s%2s%2s%2s%2s\n", (regs->status32 & STATUS_U_MASK) ? "U " : "K ", STS_BIT(regs, DE), STS_BIT(regs, AE), STS_BIT(regs, A2), STS_BIT(regs, A1), STS_BIT(regs, E2), STS_BIT(regs, E1)); #else pr_cont(" : %2s%2s%2s%2s\n", STS_BIT(regs, IE), (regs->status32 & STATUS_U_MASK) ? "U " : "K ", STS_BIT(regs, DE), STS_BIT(regs, AE)); #endif pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n", regs->bta, regs->sp, regs->fp); pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n", regs->lp_start, regs->lp_end, regs->lp_count); /* print regs->r0 thru regs->r12 * Sequential printing was generating horrible code */ print_reg_file(&(regs->r0), 0); /* If Callee regs were saved, display them too */ cregs = (struct callee_regs *)current->thread.callee_reg; if (cregs) show_callee_regs(cregs); free_page((unsigned long)buf); } void show_kernel_fault_diag(const char *str, struct pt_regs *regs, unsigned long address) { current->thread.fault_address = address; /* Caller and Callee regs */ show_regs(regs); /* Show stack trace if this Fatality happened in kernel mode */ if (!user_mode(regs)) show_stacktrace(current, regs); } #ifdef CONFIG_DEBUG_FS #include <linux/module.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/namei.h> #include <linux/debugfs.h> static struct dentry *test_dentry; static struct dentry *test_dir; static struct dentry *test_u32_dentry; static u32 clr_on_read = 1; #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT u32 numitlb, numdtlb, num_pte_not_present; static int fill_display_data(char *kbuf) { size_t num = 0; num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb); num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb); num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present); if (clr_on_read) numitlb = numdtlb = num_pte_not_present = 0; return num; } static int tlb_stats_open(struct inode *inode, struct file *file) { file->private_data = (void *)__get_free_page(GFP_KERNEL); return 0; } /* called on user read(): display the couters */ static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ char __user *user_buf, /* user buffer */ size_t len, /* length of buffer */ loff_t *offset) /* offset in the file */ { size_t num; char *kbuf = (char *)file->private_data; /* All of the data can he shoved in one iteration */ if (*offset != 0) return 0; num = fill_display_data(kbuf); /* simple_read_from_buffer() is helper for copy to user space It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset @3 (offset) into the user space address starting at @1 (user_buf). @5 (len) is max size of user buffer */ return simple_read_from_buffer(user_buf, num, offset, kbuf, len); } /* called on user write : clears the counters */ static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf, size_t length, loff_t *offset) { numitlb = numdtlb = num_pte_not_present = 0; return length; } static int tlb_stats_close(struct inode *inode, struct file *file) { free_page((unsigned long)(file->private_data)); return 0; } static const struct file_operations tlb_stats_file_ops = { .read = tlb_stats_output, .write = tlb_stats_clear, .open = tlb_stats_open, .release = tlb_stats_close }; #endif static int __init arc_debugfs_init(void) { test_dir = debugfs_create_dir("arc", NULL); #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL, &tlb_stats_file_ops); #endif test_u32_dentry = debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read); return 0; } module_init(arc_debugfs_init); static void __exit arc_debugfs_exit(void) { debugfs_remove(test_u32_dentry); debugfs_remove(test_dentry); debugfs_remove(test_dir); } module_exit(arc_debugfs_exit); #endif
gpl-2.0
NamelessRom/android_kernel_nvidia_shieldtablet
drivers/input/mousedev.c
868
26453
/* * Input driver to ExplorerPS/2 device driver module. * * Copyright (c) 1999-2002 Vojtech Pavlik * Copyright (c) 2004 Dmitry Torokhov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MOUSEDEV_MINOR_BASE 32 #define MOUSEDEV_MINORS 31 #define MOUSEDEV_MIX 63 #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/random.h> #include <linux/major.h> #include <linux/device.h> #include <linux/cdev.h> #include <linux/kernel.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Mouse (ExplorerPS/2) device interfaces"); MODULE_LICENSE("GPL"); #ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_X #define CONFIG_INPUT_MOUSEDEV_SCREEN_X 1024 #endif #ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_Y #define CONFIG_INPUT_MOUSEDEV_SCREEN_Y 768 #endif static int xres = CONFIG_INPUT_MOUSEDEV_SCREEN_X; module_param(xres, uint, 0644); MODULE_PARM_DESC(xres, "Horizontal screen resolution"); static int yres = CONFIG_INPUT_MOUSEDEV_SCREEN_Y; module_param(yres, uint, 0644); MODULE_PARM_DESC(yres, "Vertical screen resolution"); static unsigned tap_time = 200; module_param(tap_time, uint, 0644); MODULE_PARM_DESC(tap_time, "Tap time for touchpads in absolute mode (msecs)"); struct mousedev_hw_data { int dx, dy, dz; int x, y; int abs_event; unsigned long buttons; }; struct mousedev { int open; struct input_handle handle; wait_queue_head_t wait; struct list_head client_list; spinlock_t client_lock; /* protects client_list */ struct mutex mutex; struct device dev; struct cdev cdev; bool exist; bool is_mixdev; struct list_head mixdev_node; bool opened_by_mixdev; struct mousedev_hw_data packet; unsigned int pkt_count; int old_x[4], old_y[4]; int frac_dx, frac_dy; unsigned long touch; }; enum mousedev_emul { MOUSEDEV_EMUL_PS2, MOUSEDEV_EMUL_IMPS, MOUSEDEV_EMUL_EXPS }; struct mousedev_motion { int dx, dy, dz; unsigned long buttons; }; #define PACKET_QUEUE_LEN 16 struct mousedev_client { struct fasync_struct *fasync; struct mousedev *mousedev; struct list_head node; struct mousedev_motion packets[PACKET_QUEUE_LEN]; unsigned int head, tail; spinlock_t packet_lock; int pos_x, pos_y; signed char ps2[6]; unsigned char ready, buffer, bufsiz; unsigned char imexseq, impsseq; enum mousedev_emul mode; unsigned long last_buttons; }; #define MOUSEDEV_SEQ_LEN 6 static unsigned char mousedev_imps_seq[] = { 0xf3, 200, 0xf3, 100, 0xf3, 80 }; static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 }; static struct mousedev *mousedev_mix; static LIST_HEAD(mousedev_mix_list); static void mixdev_open_devices(void); static void mixdev_close_devices(void); #define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03]) #define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03]) static void mousedev_touchpad_event(struct input_dev *dev, struct mousedev *mousedev, unsigned int code, int value) { int size, tmp; enum { FRACTION_DENOM = 128 }; switch (code) { case ABS_X: fx(0) = value; if (mousedev->touch && mousedev->pkt_count >= 2) { size = input_abs_get_max(dev, ABS_X) - input_abs_get_min(dev, ABS_X); if (size == 0) size = 256 * 2; tmp = ((value - fx(2)) * 256 * FRACTION_DENOM) / size; tmp += mousedev->frac_dx; mousedev->packet.dx = tmp / FRACTION_DENOM; mousedev->frac_dx = tmp - mousedev->packet.dx * FRACTION_DENOM; } break; case ABS_Y: fy(0) = value; if (mousedev->touch && mousedev->pkt_count >= 2) { /* use X size for ABS_Y to keep the same scale */ size = input_abs_get_max(dev, ABS_X) - input_abs_get_min(dev, ABS_X); if (size == 0) size = 256 * 2; tmp = -((value - fy(2)) * 256 * FRACTION_DENOM) / size; tmp += mousedev->frac_dy; mousedev->packet.dy = tmp / FRACTION_DENOM; mousedev->frac_dy = tmp - mousedev->packet.dy * FRACTION_DENOM; } break; } } static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, unsigned int code, int value) { int min, max, size; switch (code) { case ABS_X: min = input_abs_get_min(dev, ABS_X); max = input_abs_get_max(dev, ABS_X); size = max - min; if (size == 0) size = xres ? : 1; value = clamp(value, min, max); mousedev->packet.x = ((value - min) * xres) / size; mousedev->packet.abs_event = 1; break; case ABS_Y: min = input_abs_get_min(dev, ABS_Y); max = input_abs_get_max(dev, ABS_Y); size = max - min; if (size == 0) size = yres ? : 1; value = clamp(value, min, max); mousedev->packet.y = yres - ((value - min) * yres) / size; mousedev->packet.abs_event = 1; break; } } static void mousedev_rel_event(struct mousedev *mousedev, unsigned int code, int value) { switch (code) { case REL_X: mousedev->packet.dx += value; break; case REL_Y: mousedev->packet.dy -= value; break; case REL_WHEEL: mousedev->packet.dz -= value; break; } } static void mousedev_key_event(struct mousedev *mousedev, unsigned int code, int value) { int index; switch (code) { case BTN_TOUCH: case BTN_0: case BTN_LEFT: index = 0; break; case BTN_STYLUS: case BTN_1: case BTN_RIGHT: index = 1; break; case BTN_2: case BTN_FORWARD: case BTN_STYLUS2: case BTN_MIDDLE: index = 2; break; case BTN_3: case BTN_BACK: case BTN_SIDE: index = 3; break; case BTN_4: case BTN_EXTRA: index = 4; break; default: return; } if (value) { set_bit(index, &mousedev->packet.buttons); set_bit(index, &mousedev_mix->packet.buttons); } else { clear_bit(index, &mousedev->packet.buttons); clear_bit(index, &mousedev_mix->packet.buttons); } } static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_hw_data *packet) { struct mousedev_client *client; struct mousedev_motion *p; unsigned int new_head; int wake_readers = 0; rcu_read_lock(); list_for_each_entry_rcu(client, &mousedev->client_list, node) { /* Just acquire the lock, interrupts already disabled */ spin_lock(&client->packet_lock); p = &client->packets[client->head]; if (client->ready && p->buttons != mousedev->packet.buttons) { new_head = (client->head + 1) % PACKET_QUEUE_LEN; if (new_head != client->tail) { p = &client->packets[client->head = new_head]; memset(p, 0, sizeof(struct mousedev_motion)); } } if (packet->abs_event) { p->dx += packet->x - client->pos_x; p->dy += packet->y - client->pos_y; client->pos_x = packet->x; client->pos_y = packet->y; } client->pos_x += packet->dx; client->pos_x = client->pos_x < 0 ? 0 : (client->pos_x >= xres ? xres : client->pos_x); client->pos_y += packet->dy; client->pos_y = client->pos_y < 0 ? 0 : (client->pos_y >= yres ? yres : client->pos_y); p->dx += packet->dx; p->dy += packet->dy; p->dz += packet->dz; p->buttons = mousedev->packet.buttons; if (p->dx || p->dy || p->dz || p->buttons != client->last_buttons) client->ready = 1; spin_unlock(&client->packet_lock); if (client->ready) { kill_fasync(&client->fasync, SIGIO, POLL_IN); wake_readers = 1; } } rcu_read_unlock(); if (wake_readers) wake_up_interruptible(&mousedev->wait); } static void mousedev_touchpad_touch(struct mousedev *mousedev, int value) { if (!value) { if (mousedev->touch && time_before(jiffies, mousedev->touch + msecs_to_jiffies(tap_time))) { /* * Toggle left button to emulate tap. * We rely on the fact that mousedev_mix always has 0 * motion packet so we won't mess current position. */ set_bit(0, &mousedev->packet.buttons); set_bit(0, &mousedev_mix->packet.buttons); mousedev_notify_readers(mousedev, &mousedev_mix->packet); mousedev_notify_readers(mousedev_mix, &mousedev_mix->packet); clear_bit(0, &mousedev->packet.buttons); clear_bit(0, &mousedev_mix->packet.buttons); } mousedev->touch = mousedev->pkt_count = 0; mousedev->frac_dx = 0; mousedev->frac_dy = 0; } else if (!mousedev->touch) mousedev->touch = jiffies; } static void mousedev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { struct mousedev *mousedev = handle->private; switch (type) { case EV_ABS: /* Ignore joysticks */ if (test_bit(BTN_TRIGGER, handle->dev->keybit)) return; if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit)) mousedev_touchpad_event(handle->dev, mousedev, code, value); else mousedev_abs_event(handle->dev, mousedev, code, value); break; case EV_REL: mousedev_rel_event(mousedev, code, value); break; case EV_KEY: if (value != 2) { if (code == BTN_TOUCH && test_bit(BTN_TOOL_FINGER, handle->dev->keybit)) mousedev_touchpad_touch(mousedev, value); else mousedev_key_event(mousedev, code, value); } break; case EV_SYN: if (code == SYN_REPORT) { if (mousedev->touch) { mousedev->pkt_count++; /* * Input system eats duplicate events, * but we need all of them to do correct * averaging so apply present one forward */ fx(0) = fx(1); fy(0) = fy(1); } mousedev_notify_readers(mousedev, &mousedev->packet); mousedev_notify_readers(mousedev_mix, &mousedev->packet); mousedev->packet.dx = mousedev->packet.dy = mousedev->packet.dz = 0; mousedev->packet.abs_event = 0; } break; } } static int mousedev_fasync(int fd, struct file *file, int on) { struct mousedev_client *client = file->private_data; return fasync_helper(fd, file, on, &client->fasync); } static void mousedev_free(struct device *dev) { struct mousedev *mousedev = container_of(dev, struct mousedev, dev); input_put_device(mousedev->handle.dev); kfree(mousedev); } static int mousedev_open_device(struct mousedev *mousedev) { int retval; retval = mutex_lock_interruptible(&mousedev->mutex); if (retval) return retval; if (mousedev->is_mixdev) mixdev_open_devices(); else if (!mousedev->exist) retval = -ENODEV; else if (!mousedev->open++) { retval = input_open_device(&mousedev->handle); if (retval) mousedev->open--; } mutex_unlock(&mousedev->mutex); return retval; } static void mousedev_close_device(struct mousedev *mousedev) { mutex_lock(&mousedev->mutex); if (mousedev->is_mixdev) mixdev_close_devices(); else if (mousedev->exist && !--mousedev->open) input_close_device(&mousedev->handle); mutex_unlock(&mousedev->mutex); } /* * Open all available devices so they can all be multiplexed in one. * stream. Note that this function is called with mousedev_mix->mutex * held. */ static void mixdev_open_devices(void) { struct mousedev *mousedev; if (mousedev_mix->open++) return; list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { if (!mousedev->opened_by_mixdev) { if (mousedev_open_device(mousedev)) continue; mousedev->opened_by_mixdev = true; } } } /* * Close all devices that were opened as part of multiplexed * device. Note that this function is called with mousedev_mix->mutex * held. */ static void mixdev_close_devices(void) { struct mousedev *mousedev; if (--mousedev_mix->open) return; list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { if (mousedev->opened_by_mixdev) { mousedev->opened_by_mixdev = false; mousedev_close_device(mousedev); } } } static void mousedev_attach_client(struct mousedev *mousedev, struct mousedev_client *client) { spin_lock(&mousedev->client_lock); list_add_tail_rcu(&client->node, &mousedev->client_list); spin_unlock(&mousedev->client_lock); } static void mousedev_detach_client(struct mousedev *mousedev, struct mousedev_client *client) { spin_lock(&mousedev->client_lock); list_del_rcu(&client->node); spin_unlock(&mousedev->client_lock); synchronize_rcu(); } static int mousedev_release(struct inode *inode, struct file *file) { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; mousedev_detach_client(mousedev, client); kfree(client); mousedev_close_device(mousedev); return 0; } static int mousedev_open(struct inode *inode, struct file *file) { struct mousedev_client *client; struct mousedev *mousedev; int error; #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX if (imajor(inode) == MISC_MAJOR) mousedev = mousedev_mix; else #endif mousedev = container_of(inode->i_cdev, struct mousedev, cdev); client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL); if (!client) return -ENOMEM; spin_lock_init(&client->packet_lock); client->pos_x = xres / 2; client->pos_y = yres / 2; client->mousedev = mousedev; mousedev_attach_client(mousedev, client); error = mousedev_open_device(mousedev); if (error) goto err_free_client; file->private_data = client; nonseekable_open(inode, file); return 0; err_free_client: mousedev_detach_client(mousedev, client); kfree(client); return error; } static inline int mousedev_limit_delta(int delta, int limit) { return delta > limit ? limit : (delta < -limit ? -limit : delta); } static void mousedev_packet(struct mousedev_client *client, signed char *ps2_data) { struct mousedev_motion *p = &client->packets[client->tail]; ps2_data[0] = 0x08 | ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07); ps2_data[1] = mousedev_limit_delta(p->dx, 127); ps2_data[2] = mousedev_limit_delta(p->dy, 127); p->dx -= ps2_data[1]; p->dy -= ps2_data[2]; switch (client->mode) { case MOUSEDEV_EMUL_EXPS: ps2_data[3] = mousedev_limit_delta(p->dz, 7); p->dz -= ps2_data[3]; ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1); client->bufsiz = 4; break; case MOUSEDEV_EMUL_IMPS: ps2_data[0] |= ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); ps2_data[3] = mousedev_limit_delta(p->dz, 127); p->dz -= ps2_data[3]; client->bufsiz = 4; break; case MOUSEDEV_EMUL_PS2: default: ps2_data[0] |= ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); p->dz = 0; client->bufsiz = 3; break; } if (!p->dx && !p->dy && !p->dz) { if (client->tail == client->head) { client->ready = 0; client->last_buttons = p->buttons; } else client->tail = (client->tail + 1) % PACKET_QUEUE_LEN; } } static void mousedev_generate_response(struct mousedev_client *client, int command) { client->ps2[0] = 0xfa; /* ACK */ switch (command) { case 0xeb: /* Poll */ mousedev_packet(client, &client->ps2[1]); client->bufsiz++; /* account for leading ACK */ break; case 0xf2: /* Get ID */ switch (client->mode) { case MOUSEDEV_EMUL_PS2: client->ps2[1] = 0; break; case MOUSEDEV_EMUL_IMPS: client->ps2[1] = 3; break; case MOUSEDEV_EMUL_EXPS: client->ps2[1] = 4; break; } client->bufsiz = 2; break; case 0xe9: /* Get info */ client->ps2[1] = 0x60; client->ps2[2] = 3; client->ps2[3] = 200; client->bufsiz = 4; break; case 0xff: /* Reset */ client->impsseq = client->imexseq = 0; client->mode = MOUSEDEV_EMUL_PS2; client->ps2[1] = 0xaa; client->ps2[2] = 0x00; client->bufsiz = 3; break; default: client->bufsiz = 1; break; } client->buffer = client->bufsiz; } static ssize_t mousedev_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct mousedev_client *client = file->private_data; unsigned char c; unsigned int i; for (i = 0; i < count; i++) { if (get_user(c, buffer + i)) return -EFAULT; spin_lock_irq(&client->packet_lock); if (c == mousedev_imex_seq[client->imexseq]) { if (++client->imexseq == MOUSEDEV_SEQ_LEN) { client->imexseq = 0; client->mode = MOUSEDEV_EMUL_EXPS; } } else client->imexseq = 0; if (c == mousedev_imps_seq[client->impsseq]) { if (++client->impsseq == MOUSEDEV_SEQ_LEN) { client->impsseq = 0; client->mode = MOUSEDEV_EMUL_IMPS; } } else client->impsseq = 0; mousedev_generate_response(client, c); spin_unlock_irq(&client->packet_lock); } kill_fasync(&client->fasync, SIGIO, POLL_IN); wake_up_interruptible(&client->mousedev->wait); return count; } static ssize_t mousedev_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; signed char data[sizeof(client->ps2)]; int retval = 0; if (!client->ready && !client->buffer && mousedev->exist && (file->f_flags & O_NONBLOCK)) return -EAGAIN; retval = wait_event_interruptible(mousedev->wait, !mousedev->exist || client->ready || client->buffer); if (retval) return retval; if (!mousedev->exist) return -ENODEV; spin_lock_irq(&client->packet_lock); if (!client->buffer && client->ready) { mousedev_packet(client, client->ps2); client->buffer = client->bufsiz; } if (count > client->buffer) count = client->buffer; memcpy(data, client->ps2 + client->bufsiz - client->buffer, count); client->buffer -= count; spin_unlock_irq(&client->packet_lock); if (copy_to_user(buffer, data, count)) return -EFAULT; return count; } /* No kernel lock - fine */ static unsigned int mousedev_poll(struct file *file, poll_table *wait) { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; unsigned int mask; poll_wait(file, &mousedev->wait, wait); mask = mousedev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR; if (client->ready || client->buffer) mask |= POLLIN | POLLRDNORM; return mask; } static const struct file_operations mousedev_fops = { .owner = THIS_MODULE, .read = mousedev_read, .write = mousedev_write, .poll = mousedev_poll, .open = mousedev_open, .release = mousedev_release, .fasync = mousedev_fasync, .llseek = noop_llseek, }; /* * Mark device non-existent. This disables writes, ioctls and * prevents new users from opening the device. Already posted * blocking reads will stay, however new ones will fail. */ static void mousedev_mark_dead(struct mousedev *mousedev) { mutex_lock(&mousedev->mutex); mousedev->exist = false; mutex_unlock(&mousedev->mutex); } /* * Wake up users waiting for IO so they can disconnect from * dead device. */ static void mousedev_hangup(struct mousedev *mousedev) { struct mousedev_client *client; spin_lock(&mousedev->client_lock); list_for_each_entry(client, &mousedev->client_list, node) kill_fasync(&client->fasync, SIGIO, POLL_HUP); spin_unlock(&mousedev->client_lock); wake_up_interruptible(&mousedev->wait); } static void mousedev_cleanup(struct mousedev *mousedev) { struct input_handle *handle = &mousedev->handle; mousedev_mark_dead(mousedev); mousedev_hangup(mousedev); cdev_del(&mousedev->cdev); /* mousedev is marked dead so no one else accesses mousedev->open */ if (mousedev->open) input_close_device(handle); } static int mousedev_reserve_minor(bool mixdev) { int minor; if (mixdev) { minor = input_get_new_minor(MOUSEDEV_MIX, 1, false); if (minor < 0) pr_err("failed to reserve mixdev minor: %d\n", minor); } else { minor = input_get_new_minor(MOUSEDEV_MINOR_BASE, MOUSEDEV_MINORS, true); if (minor < 0) pr_err("failed to reserve new minor: %d\n", minor); } return minor; } static struct mousedev *mousedev_create(struct input_dev *dev, struct input_handler *handler, bool mixdev) { struct mousedev *mousedev; int minor; int error; minor = mousedev_reserve_minor(mixdev); if (minor < 0) { error = minor; goto err_out; } mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL); if (!mousedev) { error = -ENOMEM; goto err_free_minor; } INIT_LIST_HEAD(&mousedev->client_list); INIT_LIST_HEAD(&mousedev->mixdev_node); spin_lock_init(&mousedev->client_lock); mutex_init(&mousedev->mutex); lockdep_set_subclass(&mousedev->mutex, mixdev ? SINGLE_DEPTH_NESTING : 0); init_waitqueue_head(&mousedev->wait); if (mixdev) { dev_set_name(&mousedev->dev, "mice"); } else { int dev_no = minor; /* Normalize device number if it falls into legacy range */ if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS) dev_no -= MOUSEDEV_MINOR_BASE; dev_set_name(&mousedev->dev, "mouse%d", dev_no); } mousedev->exist = true; mousedev->is_mixdev = mixdev; mousedev->handle.dev = input_get_device(dev); mousedev->handle.name = dev_name(&mousedev->dev); mousedev->handle.handler = handler; mousedev->handle.private = mousedev; mousedev->dev.class = &input_class; if (dev) mousedev->dev.parent = &dev->dev; mousedev->dev.devt = MKDEV(INPUT_MAJOR, minor); mousedev->dev.release = mousedev_free; device_initialize(&mousedev->dev); if (!mixdev) { error = input_register_handle(&mousedev->handle); if (error) goto err_free_mousedev; } cdev_init(&mousedev->cdev, &mousedev_fops); mousedev->cdev.kobj.parent = &mousedev->dev.kobj; error = cdev_add(&mousedev->cdev, mousedev->dev.devt, 1); if (error) goto err_unregister_handle; error = device_add(&mousedev->dev); if (error) goto err_cleanup_mousedev; return mousedev; err_cleanup_mousedev: mousedev_cleanup(mousedev); err_unregister_handle: if (!mixdev) input_unregister_handle(&mousedev->handle); err_free_mousedev: put_device(&mousedev->dev); err_free_minor: input_free_minor(minor); err_out: return ERR_PTR(error); } static void mousedev_destroy(struct mousedev *mousedev) { device_del(&mousedev->dev); mousedev_cleanup(mousedev); input_free_minor(MINOR(mousedev->dev.devt)); if (!mousedev->is_mixdev) input_unregister_handle(&mousedev->handle); put_device(&mousedev->dev); } static int mixdev_add_device(struct mousedev *mousedev) { int retval; retval = mutex_lock_interruptible(&mousedev_mix->mutex); if (retval) return retval; if (mousedev_mix->open) { retval = mousedev_open_device(mousedev); if (retval) goto out; mousedev->opened_by_mixdev = true; } get_device(&mousedev->dev); list_add_tail(&mousedev->mixdev_node, &mousedev_mix_list); out: mutex_unlock(&mousedev_mix->mutex); return retval; } static void mixdev_remove_device(struct mousedev *mousedev) { mutex_lock(&mousedev_mix->mutex); if (mousedev->opened_by_mixdev) { mousedev->opened_by_mixdev = false; mousedev_close_device(mousedev); } list_del_init(&mousedev->mixdev_node); mutex_unlock(&mousedev_mix->mutex); put_device(&mousedev->dev); } static int mousedev_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct mousedev *mousedev; int error; mousedev = mousedev_create(dev, handler, false); if (IS_ERR(mousedev)) return PTR_ERR(mousedev); error = mixdev_add_device(mousedev); if (error) { mousedev_destroy(mousedev); return error; } return 0; } static void mousedev_disconnect(struct input_handle *handle) { struct mousedev *mousedev = handle->private; mixdev_remove_device(mousedev); mousedev_destroy(mousedev); } static const struct input_device_id mousedev_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_RELBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) }, .keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) }, .relbit = { BIT_MASK(REL_X) | BIT_MASK(REL_Y) }, }, /* A mouse like device, at least one button, two relative axes */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_RELBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) }, .relbit = { BIT_MASK(REL_WHEEL) }, }, /* A separate scrollwheel */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) }, .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, }, /* A tablet like device, at least touch detection, two absolute axes */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) }, .keybit = { [BIT_WORD(BTN_TOOL_FINGER)] = BIT_MASK(BTN_TOOL_FINGER) }, .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) | BIT_MASK(ABS_PRESSURE) | BIT_MASK(ABS_TOOL_WIDTH) }, }, /* A touchpad */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) }, .keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) }, .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, }, /* Mouse-like device with absolute X and Y but ordinary clicks, like hp ILO2 High Performance mouse */ { }, /* Terminating entry */ }; MODULE_DEVICE_TABLE(input, mousedev_ids); static struct input_handler mousedev_handler = { .event = mousedev_event, .connect = mousedev_connect, .disconnect = mousedev_disconnect, .legacy_minors = true, .minor = MOUSEDEV_MINOR_BASE, .name = "mousedev", .id_table = mousedev_ids, }; #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX #include <linux/miscdevice.h> static struct miscdevice psaux_mouse = { .minor = PSMOUSE_MINOR, .name = "psaux", .fops = &mousedev_fops, }; static bool psaux_registered; static void __init mousedev_psaux_register(void) { int error; error = misc_register(&psaux_mouse); if (error) pr_warn("could not register psaux device, error: %d\n", error); else psaux_registered = true; } static void __exit mousedev_psaux_unregister(void) { if (psaux_registered) misc_deregister(&psaux_mouse); } #else static inline void mousedev_psaux_register(void) { } static inline void mousedev_psaux_unregister(void) { } #endif static int __init mousedev_init(void) { int error; mousedev_mix = mousedev_create(NULL, &mousedev_handler, true); if (IS_ERR(mousedev_mix)) return PTR_ERR(mousedev_mix); error = input_register_handler(&mousedev_handler); if (error) { mousedev_destroy(mousedev_mix); return error; } mousedev_psaux_register(); pr_info("PS/2 mouse device common for all mice\n"); return 0; } static void __exit mousedev_exit(void) { mousedev_psaux_unregister(); input_unregister_handler(&mousedev_handler); mousedev_destroy(mousedev_mix); } module_init(mousedev_init); module_exit(mousedev_exit);
gpl-2.0
Jianwei-Wang/linux-drm-fsl-dcu
fs/squashfs/file.c
2148
14483
/* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@squashfs.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * file.c */ /* * This file contains code for handling regular files. A regular file * consists of a sequence of contiguous compressed blocks, and/or a * compressed fragment block (tail-end packed block). The compressed size * of each datablock is stored in a block list contained within the * file inode (itself stored in one or more compressed metadata blocks). * * To speed up access to datablocks when reading 'large' files (256 Mbytes or * larger), the code implements an index cache that caches the mapping from * block index to datablock location on disk. * * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while * retaining a simple and space-efficient block list on disk. The cache * is split into slots, caching up to eight 224 GiB files (128 KiB blocks). * Larger files use multiple slots, with 1.75 TiB files using all 8 slots. * The index cache is designed to be memory efficient, and by default uses * 16 KiB. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/pagemap.h> #include <linux/mutex.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs_fs_i.h" #include "squashfs.h" /* * Locate cache slot in range [offset, index] for specified inode. If * there's more than one return the slot closest to index. */ static struct meta_index *locate_meta_index(struct inode *inode, int offset, int index) { struct meta_index *meta = NULL; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int i; mutex_lock(&msblk->meta_index_mutex); TRACE("locate_meta_index: index %d, offset %d\n", index, offset); if (msblk->meta_index == NULL) goto not_allocated; for (i = 0; i < SQUASHFS_META_SLOTS; i++) { if (msblk->meta_index[i].inode_number == inode->i_ino && msblk->meta_index[i].offset >= offset && msblk->meta_index[i].offset <= index && msblk->meta_index[i].locked == 0) { TRACE("locate_meta_index: entry %d, offset %d\n", i, msblk->meta_index[i].offset); meta = &msblk->meta_index[i]; offset = meta->offset; } } if (meta) meta->locked = 1; not_allocated: mutex_unlock(&msblk->meta_index_mutex); return meta; } /* * Find and initialise an empty cache slot for index offset. */ static struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip) { struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; struct meta_index *meta = NULL; int i; mutex_lock(&msblk->meta_index_mutex); TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip); if (msblk->meta_index == NULL) { /* * First time cache index has been used, allocate and * initialise. The cache index could be allocated at * mount time but doing it here means it is allocated only * if a 'large' file is read. */ msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS, sizeof(*(msblk->meta_index)), GFP_KERNEL); if (msblk->meta_index == NULL) { ERROR("Failed to allocate meta_index\n"); goto failed; } for (i = 0; i < SQUASHFS_META_SLOTS; i++) { msblk->meta_index[i].inode_number = 0; msblk->meta_index[i].locked = 0; } msblk->next_meta_index = 0; } for (i = SQUASHFS_META_SLOTS; i && msblk->meta_index[msblk->next_meta_index].locked; i--) msblk->next_meta_index = (msblk->next_meta_index + 1) % SQUASHFS_META_SLOTS; if (i == 0) { TRACE("empty_meta_index: failed!\n"); goto failed; } TRACE("empty_meta_index: returned meta entry %d, %p\n", msblk->next_meta_index, &msblk->meta_index[msblk->next_meta_index]); meta = &msblk->meta_index[msblk->next_meta_index]; msblk->next_meta_index = (msblk->next_meta_index + 1) % SQUASHFS_META_SLOTS; meta->inode_number = inode->i_ino; meta->offset = offset; meta->skip = skip; meta->entries = 0; meta->locked = 1; failed: mutex_unlock(&msblk->meta_index_mutex); return meta; } static void release_meta_index(struct inode *inode, struct meta_index *meta) { struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; mutex_lock(&msblk->meta_index_mutex); meta->locked = 0; mutex_unlock(&msblk->meta_index_mutex); } /* * Read the next n blocks from the block list, starting from * metadata block <start_block, offset>. */ static long long read_indexes(struct super_block *sb, int n, u64 *start_block, int *offset) { int err, i; long long block = 0; __le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); if (blist == NULL) { ERROR("read_indexes: Failed to allocate block_list\n"); return -ENOMEM; } while (n) { int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2); err = squashfs_read_metadata(sb, blist, start_block, offset, blocks << 2); if (err < 0) { ERROR("read_indexes: reading block [%llx:%x]\n", *start_block, *offset); goto failure; } for (i = 0; i < blocks; i++) { int size = le32_to_cpu(blist[i]); block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); } n -= blocks; } kfree(blist); return block; failure: kfree(blist); return err; } /* * Each cache index slot has SQUASHFS_META_ENTRIES, each of which * can cache one index -> datablock/blocklist-block mapping. We wish * to distribute these over the length of the file, entry[0] maps index x, * entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on. * The larger the file, the greater the skip factor. The skip factor is * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure * the number of metadata blocks that need to be read fits into the cache. * If the skip factor is limited in this way then the file will use multiple * slots. */ static inline int calculate_skip(int blocks) { int skip = blocks / ((SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES); return min(SQUASHFS_CACHED_BLKS - 1, skip + 1); } /* * Search and grow the index cache for the specified inode, returning the * on-disk locations of the datablock and block list metadata block * <index_block, index_offset> for index (scaled to nearest cache index). */ static int fill_meta_index(struct inode *inode, int index, u64 *index_block, int *index_offset, u64 *data_block) { struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int skip = calculate_skip(i_size_read(inode) >> msblk->block_log); int offset = 0; struct meta_index *meta; struct meta_entry *meta_entry; u64 cur_index_block = squashfs_i(inode)->block_list_start; int cur_offset = squashfs_i(inode)->offset; u64 cur_data_block = squashfs_i(inode)->start; int err, i; /* * Scale index to cache index (cache slot entry) */ index /= SQUASHFS_META_INDEXES * skip; while (offset < index) { meta = locate_meta_index(inode, offset + 1, index); if (meta == NULL) { meta = empty_meta_index(inode, offset + 1, skip); if (meta == NULL) goto all_done; } else { offset = index < meta->offset + meta->entries ? index : meta->offset + meta->entries - 1; meta_entry = &meta->meta_entry[offset - meta->offset]; cur_index_block = meta_entry->index_block + msblk->inode_table; cur_offset = meta_entry->offset; cur_data_block = meta_entry->data_block; TRACE("get_meta_index: offset %d, meta->offset %d, " "meta->entries %d\n", offset, meta->offset, meta->entries); TRACE("get_meta_index: index_block 0x%llx, offset 0x%x" " data_block 0x%llx\n", cur_index_block, cur_offset, cur_data_block); } /* * If necessary grow cache slot by reading block list. Cache * slot is extended up to index or to the end of the slot, in * which case further slots will be used. */ for (i = meta->offset + meta->entries; i <= index && i < meta->offset + SQUASHFS_META_ENTRIES; i++) { int blocks = skip * SQUASHFS_META_INDEXES; long long res = read_indexes(inode->i_sb, blocks, &cur_index_block, &cur_offset); if (res < 0) { if (meta->entries == 0) /* * Don't leave an empty slot on read * error allocated to this inode... */ meta->inode_number = 0; err = res; goto failed; } cur_data_block += res; meta_entry = &meta->meta_entry[i - meta->offset]; meta_entry->index_block = cur_index_block - msblk->inode_table; meta_entry->offset = cur_offset; meta_entry->data_block = cur_data_block; meta->entries++; offset++; } TRACE("get_meta_index: meta->offset %d, meta->entries %d\n", meta->offset, meta->entries); release_meta_index(inode, meta); } all_done: *index_block = cur_index_block; *index_offset = cur_offset; *data_block = cur_data_block; /* * Scale cache index (cache slot entry) to index */ return offset * SQUASHFS_META_INDEXES * skip; failed: release_meta_index(inode, meta); return err; } /* * Get the on-disk location and compressed size of the datablock * specified by index. Fill_meta_index() does most of the work. */ static int read_blocklist(struct inode *inode, int index, u64 *block) { u64 start; long long blks; int offset; __le32 size; int res = fill_meta_index(inode, index, &start, &offset, block); TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset" " 0x%x, block 0x%llx\n", res, index, start, offset, *block); if (res < 0) return res; /* * res contains the index of the mapping returned by fill_meta_index(), * this will likely be less than the desired index (because the * meta_index cache works at a higher granularity). Read any * extra block indexes needed. */ if (res < index) { blks = read_indexes(inode->i_sb, index - res, &start, &offset); if (blks < 0) return (int) blks; *block += blks; } /* * Read length of block specified by index. */ res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset, sizeof(size)); if (res < 0) return res; return le32_to_cpu(size); } /* Copy data into page cache */ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, int bytes, int offset) { struct inode *inode = page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; void *pageaddr; int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; int start_index = page->index & ~mask, end_index = start_index | mask; /* * Loop copying datablock into pages. As the datablock likely covers * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly * grab the pages from the page cache, except for the page that we've * been called to fill. */ for (i = start_index; i <= end_index && bytes > 0; i++, bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { struct page *push_page; int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0; TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); push_page = (i == page->index) ? page : grab_cache_page_nowait(page->mapping, i); if (!push_page) continue; if (PageUptodate(push_page)) goto skip_page; pageaddr = kmap_atomic(push_page); squashfs_copy_data(pageaddr, buffer, offset, avail); memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); kunmap_atomic(pageaddr); flush_dcache_page(push_page); SetPageUptodate(push_page); skip_page: unlock_page(push_page); if (i != page->index) page_cache_release(push_page); } } /* Read datablock stored packed inside a fragment (tail-end packed block) */ static int squashfs_readpage_fragment(struct page *page) { struct inode *inode = page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); int res = buffer->error; if (res) ERROR("Unable to read page, block %llx, size %x\n", squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); else squashfs_copy_cache(page, buffer, i_size_read(inode) & (msblk->block_size - 1), squashfs_i(inode)->fragment_offset); squashfs_cache_put(buffer); return res; } static int squashfs_readpage_sparse(struct page *page, int index, int file_end) { struct inode *inode = page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int bytes = index == file_end ? (i_size_read(inode) & (msblk->block_size - 1)) : msblk->block_size; squashfs_copy_cache(page, NULL, bytes, 0); return 0; } static int squashfs_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT); int file_end = i_size_read(inode) >> msblk->block_log; int res; void *pageaddr; TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", page->index, squashfs_i(inode)->start); if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)) goto out; if (index < file_end || squashfs_i(inode)->fragment_block == SQUASHFS_INVALID_BLK) { u64 block = 0; int bsize = read_blocklist(inode, index, &block); if (bsize < 0) goto error_out; if (bsize == 0) res = squashfs_readpage_sparse(page, index, file_end); else res = squashfs_readpage_block(page, block, bsize); } else res = squashfs_readpage_fragment(page); if (!res) return 0; error_out: SetPageError(page); out: pageaddr = kmap_atomic(page); memset(pageaddr, 0, PAGE_CACHE_SIZE); kunmap_atomic(pageaddr); flush_dcache_page(page); if (!PageError(page)) SetPageUptodate(page); unlock_page(page); return 0; } const struct address_space_operations squashfs_aops = { .readpage = squashfs_readpage };
gpl-2.0
johnnyslt/OLD_android_kernel_shooter
arch/sparc/kernel/unaligned_64.c
2148
17603
/* * unaligned.c: Unaligned load/store trap handling with special * cases for the kernel to do them more quickly. * * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/module.h> #include <asm/asi.h> #include <asm/ptrace.h> #include <asm/pstate.h> #include <asm/processor.h> #include <asm/system.h> #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/bitops.h> #include <linux/perf_event.h> #include <linux/ratelimit.h> #include <linux/bitops.h> #include <asm/fpumacro.h> enum direction { load, /* ld, ldd, ldh, ldsh */ store, /* st, std, sth, stsh */ both, /* Swap, ldstub, cas, ... */ fpld, fpst, invalid, }; static inline enum direction decode_direction(unsigned int insn) { unsigned long tmp = (insn >> 21) & 1; if (!tmp) return load; else { switch ((insn>>19)&0xf) { case 15: /* swap* */ return both; default: return store; } } } /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ static inline int decode_access_size(struct pt_regs *regs, unsigned int insn) { unsigned int tmp; tmp = ((insn >> 19) & 0xf); if (tmp == 11 || tmp == 14) /* ldx/stx */ return 8; tmp &= 3; if (!tmp) return 4; else if (tmp == 3) return 16; /* ldd/std - Although it is actually 8 */ else if (tmp == 2) return 2; else { printk("Impossible unaligned trap. insn=%08x\n", insn); die_if_kernel("Byte sized unaligned access?!?!", regs); /* GCC should never warn that control reaches the end * of this function without returning a value because * die_if_kernel() is marked with attribute 'noreturn'. * Alas, some versions do... */ return 0; } } static inline int decode_asi(unsigned int insn, struct pt_regs *regs) { if (insn & 0x800000) { if (insn & 0x2000) return (unsigned char)(regs->tstate >> 24); /* %asi */ else return (unsigned char)(insn >> 5); /* imm_asi */ } else return ASI_P; } /* 0x400000 = signed, 0 = unsigned */ static inline int decode_signedness(unsigned int insn) { return (insn & 0x400000); } static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, unsigned int rd, int from_kernel) { if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { if (from_kernel != 0) __asm__ __volatile__("flushw"); else flushw_user(); } } static inline long sign_extend_imm13(long imm) { return imm << 51 >> 51; } static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { unsigned long value; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); value = win->locals[reg - 16]; } else if (test_thread_flag(TIF_32BIT)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); get_user(value, &win32->locals[reg - 16]); } else { struct reg_window __user *win; win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); get_user(value, &win->locals[reg - 16]); } return value; } static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) { if (reg < 16) return &regs->u_regs[reg]; if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); return &win->locals[reg - 16]; } else if (test_thread_flag(TIF_32BIT)) { struct reg_window32 *win32; win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP])); return (unsigned long *)&win32->locals[reg - 16]; } else { struct reg_window *win; win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); return &win->locals[reg - 16]; } } unsigned long compute_effective_address(struct pt_regs *regs, unsigned int insn, unsigned int rd) { unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs2 = insn & 0x1f; int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; if (insn & 0x2000) { maybe_flush_windows(rs1, 0, rd, from_kernel); return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); } else { maybe_flush_windows(rs1, rs2, rd, from_kernel); return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); } } /* This is just to make gcc think die_if_kernel does return... */ static void __used unaligned_panic(char *str, struct pt_regs *regs) { die_if_kernel(str, regs); } extern int do_int_load(unsigned long *dest_reg, int size, unsigned long *saddr, int is_signed, int asi); extern int __do_int_store(unsigned long *dst_addr, int size, unsigned long src_val, int asi); static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, struct pt_regs *regs, int asi, int orig_asi) { unsigned long zero = 0; unsigned long *src_val_p = &zero; unsigned long src_val; if (size == 16) { size = 8; zero = (((long)(reg_num ? (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | (unsigned)fetch_reg(reg_num + 1, regs); } else if (reg_num) { src_val_p = fetch_reg_addr(reg_num, regs); } src_val = *src_val_p; if (unlikely(asi != orig_asi)) { switch (size) { case 2: src_val = swab16(src_val); break; case 4: src_val = swab32(src_val); break; case 8: src_val = swab64(src_val); break; case 16: default: BUG(); break; } } return __do_int_store(dst_addr, size, src_val, asi); } static inline void advance(struct pt_regs *regs) { regs->tpc = regs->tnpc; regs->tnpc += 4; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } } static inline int floating_point_load_or_store_p(unsigned int insn) { return (insn >> 24) & 1; } static inline int ok_for_kernel(unsigned int insn) { return !floating_point_load_or_store_p(insn); } static void kernel_mna_trap_fault(int fixup_tstate_asi) { struct pt_regs *regs = current_thread_info()->kern_una_regs; unsigned int insn = current_thread_info()->kern_una_insn; const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (!entry) { unsigned long address; address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); if (address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL " "pointer dereference in mna handler"); } else printk(KERN_ALERT "Unable to handle kernel paging " "request in mna handler"); printk(KERN_ALERT " at virtual address %016lx\n",address); printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n", (current->mm ? CTX_HWBITS(current->mm->context) : CTX_HWBITS(current->active_mm->context))); printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n", (current->mm ? (unsigned long) current->mm->pgd : (unsigned long) current->active_mm->pgd)); die_if_kernel("Oops", regs); /* Not reached */ } regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; if (fixup_tstate_asi) { regs->tstate &= ~TSTATE_ASI; regs->tstate |= (ASI_AIUS << 24UL); } } static void log_unaligned(struct pt_regs *regs) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); if (__ratelimit(&ratelimit)) { printk("Kernel unaligned access at TPC[%lx] %pS\n", regs->tpc, (void *) regs->tpc); } } asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); int size = decode_access_size(regs, insn); int orig_asi, asi; current_thread_info()->kern_una_regs = regs; current_thread_info()->kern_una_insn = insn; orig_asi = asi = decode_asi(insn, regs); /* If this is a {get,put}_user() on an unaligned userspace pointer, * just signal a fault and do not log the event. */ if (asi == ASI_AIUS) { kernel_mna_trap_fault(0); return; } log_unaligned(regs); if (!ok_for_kernel(insn) || dir == both) { printk("Unsupported unaligned load/store trap for kernel " "at <%016lx>.\n", regs->tpc); unaligned_panic("Kernel does fpu/atomic " "unaligned load/store.", regs); kernel_mna_trap_fault(0); } else { unsigned long addr, *reg_addr; int err; addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); switch (asi) { case ASI_NL: case ASI_AIUPL: case ASI_AIUSL: case ASI_PL: case ASI_SL: case ASI_PNFL: case ASI_SNFL: asi &= ~0x08; break; } switch (dir) { case load: reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); err = do_int_load(reg_addr, size, (unsigned long *) addr, decode_signedness(insn), asi); if (likely(!err) && unlikely(asi != orig_asi)) { unsigned long val_in = *reg_addr; switch (size) { case 2: val_in = swab16(val_in); break; case 4: val_in = swab32(val_in); break; case 8: val_in = swab64(val_in); break; case 16: default: BUG(); break; } *reg_addr = val_in; } break; case store: err = do_int_store(((insn>>25)&0x1f), size, (unsigned long *) addr, regs, asi, orig_asi); break; default: panic("Impossible kernel unaligned trap."); /* Not reached... */ } if (unlikely(err)) kernel_mna_trap_fault(1); else advance(regs); } } int handle_popc(u32 insn, struct pt_regs *regs) { int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; int ret, rd = ((insn >> 25) & 0x1f); u64 value; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); if (insn & 0x2000) { maybe_flush_windows(0, 0, rd, from_kernel); value = sign_extend_imm13(insn); } else { maybe_flush_windows(0, insn & 0x1f, rd, from_kernel); value = fetch_reg(insn & 0x1f, regs); } ret = hweight64(value); if (rd < 16) { if (rd) regs->u_regs[rd] = ret; } else { if (test_thread_flag(TIF_32BIT)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); put_user(ret, &win32->locals[rd - 16]); } else { struct reg_window __user *win; win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); put_user(ret, &win->locals[rd - 16]); } } advance(regs); return 1; } extern void do_fpother(struct pt_regs *regs); extern void do_privact(struct pt_regs *regs); extern void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar); extern void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx); int handle_ldf_stq(u32 insn, struct pt_regs *regs) { unsigned long addr = compute_effective_address(regs, insn, 0); int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); struct fpustate *f = FPUSTATE; int asi = decode_asi(insn, regs); int flag = (freg < 32) ? FPRS_DL : FPRS_DU; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); save_and_clear_fpu(); current_thread_info()->xfsr[0] &= ~0x1c000; if (freg & 3) { current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; do_fpother(regs); return 0; } if (insn & 0x200000) { /* STQ */ u64 first = 0, second = 0; if (current_thread_info()->fpsaved[0] & flag) { first = *(u64 *)&f->regs[freg]; second = *(u64 *)&f->regs[freg+2]; } if (asi < 0x80) { do_privact(regs); return 1; } switch (asi) { case ASI_P: case ASI_S: break; case ASI_PL: case ASI_SL: { /* Need to convert endians */ u64 tmp = __swab64p(&first); first = __swab64p(&second); second = tmp; break; } default: if (tlb_type == hypervisor) sun4v_data_access_exception(regs, addr, 0); else spitfire_data_access_exception(regs, 0, addr); return 1; } if (put_user (first >> 32, (u32 __user *)addr) || __put_user ((u32)first, (u32 __user *)(addr + 4)) || __put_user (second >> 32, (u32 __user *)(addr + 8)) || __put_user ((u32)second, (u32 __user *)(addr + 12))) { if (tlb_type == hypervisor) sun4v_data_access_exception(regs, addr, 0); else spitfire_data_access_exception(regs, 0, addr); return 1; } } else { /* LDF, LDDF, LDQF */ u32 data[4] __attribute__ ((aligned(8))); int size, i; int err; if (asi < 0x80) { do_privact(regs); return 1; } else if (asi > ASI_SNFL) { if (tlb_type == hypervisor) sun4v_data_access_exception(regs, addr, 0); else spitfire_data_access_exception(regs, 0, addr); return 1; } switch (insn & 0x180000) { case 0x000000: size = 1; break; case 0x100000: size = 4; break; default: size = 2; break; } for (i = 0; i < size; i++) data[i] = 0; err = get_user (data[0], (u32 __user *) addr); if (!err) { for (i = 1; i < size; i++) err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); } if (err && !(asi & 0x2 /* NF */)) { if (tlb_type == hypervisor) sun4v_data_access_exception(regs, addr, 0); else spitfire_data_access_exception(regs, 0, addr); return 1; } if (asi & 0x8) /* Little */ { u64 tmp; switch (size) { case 1: data[0] = le32_to_cpup(data + 0); break; default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0)); break; case 4: tmp = le64_to_cpup((u64 *)(data + 0)); *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2)); *(u64 *)(data + 2) = tmp; break; } } if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { current_thread_info()->fpsaved[0] = FPRS_FEF; current_thread_info()->gsr[0] = 0; } if (!(current_thread_info()->fpsaved[0] & flag)) { if (freg < 32) memset(f->regs, 0, 32*sizeof(u32)); else memset(f->regs+32, 0, 32*sizeof(u32)); } memcpy(f->regs + freg, data, size * 4); current_thread_info()->fpsaved[0] |= flag; } advance(regs); return 1; } void handle_ld_nf(u32 insn, struct pt_regs *regs) { int rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned long *reg; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); maybe_flush_windows(0, 0, rd, from_kernel); reg = fetch_reg_addr(rd, regs); if (from_kernel || rd < 16) { reg[0] = 0; if ((insn & 0x780000) == 0x180000) reg[1] = 0; } else if (test_thread_flag(TIF_32BIT)) { put_user(0, (int __user *) reg); if ((insn & 0x780000) == 0x180000) put_user(0, ((int __user *) reg) + 1); } else { put_user(0, (unsigned long __user *) reg); if ((insn & 0x780000) == 0x180000) put_user(0, (unsigned long __user *) reg + 1); } advance(regs); } void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; u64 value; u8 freg; int flag; struct fpustate *f = FPUSTATE; if (tstate & TSTATE_PRIV) die_if_kernel("lddfmna from kernel", regs); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { int asi = decode_asi(insn, regs); u32 first, second; int err; if ((asi > ASI_SNFL) || (asi < ASI_P)) goto daex; first = second = 0; err = get_user(first, (u32 __user *)sfar); if (!err) err = get_user(second, (u32 __user *)(sfar + 4)); if (err) { if (!(asi & 0x2)) goto daex; first = second = 0; } save_and_clear_fpu(); freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); value = (((u64)first) << 32) | second; if (asi & 0x8) /* Little */ value = __swab64p(&value); flag = (freg < 32) ? FPRS_DL : FPRS_DU; if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { current_thread_info()->fpsaved[0] = FPRS_FEF; current_thread_info()->gsr[0] = 0; } if (!(current_thread_info()->fpsaved[0] & flag)) { if (freg < 32) memset(f->regs, 0, 32*sizeof(u32)); else memset(f->regs+32, 0, 32*sizeof(u32)); } *(u64 *)(f->regs + freg) = value; current_thread_info()->fpsaved[0] |= flag; } else { daex: if (tlb_type == hypervisor) sun4v_data_access_exception(regs, sfar, sfsr); else spitfire_data_access_exception(regs, sfsr, sfar); return; } advance(regs); } void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; u64 value; u8 freg; int flag; struct fpustate *f = FPUSTATE; if (tstate & TSTATE_PRIV) die_if_kernel("stdfmna from kernel", regs); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { int asi = decode_asi(insn, regs); freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); value = 0; flag = (freg < 32) ? FPRS_DL : FPRS_DU; if ((asi > ASI_SNFL) || (asi < ASI_P)) goto daex; save_and_clear_fpu(); if (current_thread_info()->fpsaved[0] & flag) value = *(u64 *)&f->regs[freg]; switch (asi) { case ASI_P: case ASI_S: break; case ASI_PL: case ASI_SL: value = __swab64p(&value); break; default: goto daex; } if (put_user (value >> 32, (u32 __user *) sfar) || __put_user ((u32)value, (u32 __user *)(sfar + 4))) goto daex; } else { daex: if (tlb_type == hypervisor) sun4v_data_access_exception(regs, sfar, sfsr); else spitfire_data_access_exception(regs, sfsr, sfar); return; } advance(regs); }
gpl-2.0
bemolxd/android_kernel_x2xtreme-test
drivers/watchdog/i6300esb.c
2404
12730
/* * i6300esb: Watchdog timer driver for Intel 6300ESB chipset * * (c) Copyright 2004 Google Inc. * (c) Copyright 2005 David Härdeman <david@2gen.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * based on i810-tco.c which is in turn based on softdog.c * * The timer is implemented in the following I/O controller hubs: * (See the intel documentation on http://developer.intel.com.) * 6300ESB chip : document number 300641-004 * * 2004YYZZ Ross Biro * Initial version 0.01 * 2004YYZZ Ross Biro * Version 0.02 * 20050210 David Härdeman <david@2gen.com> * Ported driver to kernel 2.6 */ /* * Includes, defines, variables, module parameters, ... */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/uaccess.h> #include <linux/io.h> /* Module and version information */ #define ESB_VERSION "0.05" #define ESB_MODULE_NAME "i6300ESB timer" #define ESB_DRIVER_NAME ESB_MODULE_NAME ", v" ESB_VERSION /* PCI configuration registers */ #define ESB_CONFIG_REG 0x60 /* Config register */ #define ESB_LOCK_REG 0x68 /* WDT lock register */ /* Memory mapped registers */ #define ESB_TIMER1_REG (BASEADDR + 0x00)/* Timer1 value after each reset */ #define ESB_TIMER2_REG (BASEADDR + 0x04)/* Timer2 value after each reset */ #define ESB_GINTSR_REG (BASEADDR + 0x08)/* General Interrupt Status Register */ #define ESB_RELOAD_REG (BASEADDR + 0x0c)/* Reload register */ /* Lock register bits */ #define ESB_WDT_FUNC (0x01 << 2) /* Watchdog functionality */ #define ESB_WDT_ENABLE (0x01 << 1) /* Enable WDT */ #define ESB_WDT_LOCK (0x01 << 0) /* Lock (nowayout) */ /* Config register bits */ #define ESB_WDT_REBOOT (0x01 << 5) /* Enable reboot on timeout */ #define ESB_WDT_FREQ (0x01 << 2) /* Decrement frequency */ #define ESB_WDT_INTTYPE (0x03 << 0) /* Interrupt type on timer1 timeout */ /* Reload register bits */ #define ESB_WDT_TIMEOUT (0x01 << 9) /* Watchdog timed out */ #define ESB_WDT_RELOAD (0x01 << 8) /* prevent timeout */ /* Magic constants */ #define ESB_UNLOCK1 0x80 /* Step 1 to unlock reset registers */ #define ESB_UNLOCK2 0x86 /* Step 2 to unlock reset registers */ /* internal variables */ static void __iomem *BASEADDR; static DEFINE_SPINLOCK(esb_lock); /* Guards the hardware */ static unsigned long timer_alive; static struct pci_dev *esb_pci; static unsigned short triggered; /* The status of the watchdog upon boot */ static char esb_expect_close; /* We can only use 1 card due to the /dev/watchdog restriction */ static int cards_found; /* module parameters */ /* 30 sec default heartbeat (1 < heartbeat < 2*1023) */ #define WATCHDOG_HEARTBEAT 30 static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (1<heartbeat<2046, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * Some i6300ESB specific functions */ /* * Prepare for reloading the timer by unlocking the proper registers. * This is performed by first writing 0x80 followed by 0x86 to the * reload register. After this the appropriate registers can be written * to once before they need to be unlocked again. */ static inline void esb_unlock_registers(void) { writew(ESB_UNLOCK1, ESB_RELOAD_REG); writew(ESB_UNLOCK2, ESB_RELOAD_REG); } static int esb_timer_start(void) { u8 val; spin_lock(&esb_lock); esb_unlock_registers(); writew(ESB_WDT_RELOAD, ESB_RELOAD_REG); /* Enable or Enable + Lock? */ val = ESB_WDT_ENABLE | (nowayout ? ESB_WDT_LOCK : 0x00); pci_write_config_byte(esb_pci, ESB_LOCK_REG, val); spin_unlock(&esb_lock); return 0; } static int esb_timer_stop(void) { u8 val; spin_lock(&esb_lock); /* First, reset timers as suggested by the docs */ esb_unlock_registers(); writew(ESB_WDT_RELOAD, ESB_RELOAD_REG); /* Then disable the WDT */ pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x0); pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val); spin_unlock(&esb_lock); /* Returns 0 if the timer was disabled, non-zero otherwise */ return val & ESB_WDT_ENABLE; } static void esb_timer_keepalive(void) { spin_lock(&esb_lock); esb_unlock_registers(); writew(ESB_WDT_RELOAD, ESB_RELOAD_REG); /* FIXME: Do we need to flush anything here? */ spin_unlock(&esb_lock); } static int esb_timer_set_heartbeat(int time) { u32 val; if (time < 0x1 || time > (2 * 0x03ff)) return -EINVAL; spin_lock(&esb_lock); /* We shift by 9, so if we are passed a value of 1 sec, * val will be 1 << 9 = 512, then write that to two * timers => 2 * 512 = 1024 (which is decremented at 1KHz) */ val = time << 9; /* Write timer 1 */ esb_unlock_registers(); writel(val, ESB_TIMER1_REG); /* Write timer 2 */ esb_unlock_registers(); writel(val, ESB_TIMER2_REG); /* Reload */ esb_unlock_registers(); writew(ESB_WDT_RELOAD, ESB_RELOAD_REG); /* FIXME: Do we need to flush everything out? */ /* Done */ heartbeat = time; spin_unlock(&esb_lock); return 0; } /* * /dev/watchdog handling */ static int esb_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &timer_alive)) return -EBUSY; /* Reload and activate timer */ esb_timer_start(); return nonseekable_open(inode, file); } static int esb_release(struct inode *inode, struct file *file) { /* Shut off the timer. */ if (esb_expect_close == 42) esb_timer_stop(); else { pr_crit("Unexpected close, not stopping watchdog!\n"); esb_timer_keepalive(); } clear_bit(0, &timer_alive); esb_expect_close = 0; return 0; } static ssize_t esb_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { if (!nowayout) { size_t i; /* note: just in case someone wrote the magic character * five months ago... */ esb_expect_close = 0; /* scan to see whether or not we got the * magic character */ for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') esb_expect_close = 42; } } /* someone wrote to us, we should reload the timer */ esb_timer_keepalive(); } return len; } static long esb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int new_options, retval = -EINVAL; int new_heartbeat; void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = ESB_MODULE_NAME, }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: return put_user(0, p); case WDIOC_GETBOOTSTATUS: return put_user(triggered, p); case WDIOC_SETOPTIONS: { if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { esb_timer_stop(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { esb_timer_start(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: esb_timer_keepalive(); return 0; case WDIOC_SETTIMEOUT: { if (get_user(new_heartbeat, p)) return -EFAULT; if (esb_timer_set_heartbeat(new_heartbeat)) return -EINVAL; esb_timer_keepalive(); /* Fall */ } case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); default: return -ENOTTY; } } /* * Kernel Interfaces */ static const struct file_operations esb_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = esb_write, .unlocked_ioctl = esb_ioctl, .open = esb_open, .release = esb_release, }; static struct miscdevice esb_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &esb_fops, }; /* * Data for PCI driver interface */ static DEFINE_PCI_DEVICE_TABLE(esb_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), }, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, esb_pci_tbl); /* * Init & exit routines */ static unsigned char esb_getdevice(struct pci_dev *pdev) { if (pci_enable_device(pdev)) { pr_err("failed to enable device\n"); goto err_devput; } if (pci_request_region(pdev, 0, ESB_MODULE_NAME)) { pr_err("failed to request region\n"); goto err_disable; } BASEADDR = pci_ioremap_bar(pdev, 0); if (BASEADDR == NULL) { /* Something's wrong here, BASEADDR has to be set */ pr_err("failed to get BASEADDR\n"); goto err_release; } /* Done */ esb_pci = pdev; return 1; err_release: pci_release_region(pdev, 0); err_disable: pci_disable_device(pdev); err_devput: return 0; } static void esb_initdevice(void) { u8 val1; u16 val2; /* * Config register: * Bit 5 : 0 = Enable WDT_OUTPUT * Bit 2 : 0 = set the timer frequency to the PCI clock * divided by 2^15 (approx 1KHz). * Bits 1:0 : 11 = WDT_INT_TYPE Disabled. * The watchdog has two timers, it can be setup so that the * expiry of timer1 results in an interrupt and the expiry of * timer2 results in a reboot. We set it to not generate * any interrupts as there is not much we can do with it * right now. */ pci_write_config_word(esb_pci, ESB_CONFIG_REG, 0x0003); /* Check that the WDT isn't already locked */ pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1); if (val1 & ESB_WDT_LOCK) pr_warn("nowayout already set\n"); /* Set the timer to watchdog mode and disable it for now */ pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00); /* Check if the watchdog was previously triggered */ esb_unlock_registers(); val2 = readw(ESB_RELOAD_REG); if (val2 & ESB_WDT_TIMEOUT) triggered = WDIOF_CARDRESET; /* Reset WDT_TIMEOUT flag and timers */ esb_unlock_registers(); writew((ESB_WDT_TIMEOUT | ESB_WDT_RELOAD), ESB_RELOAD_REG); /* And set the correct timeout value */ esb_timer_set_heartbeat(heartbeat); } static int esb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret; cards_found++; if (cards_found == 1) pr_info("Intel 6300ESB WatchDog Timer Driver v%s\n", ESB_VERSION); if (cards_found > 1) { pr_err("This driver only supports 1 device\n"); return -ENODEV; } /* Check whether or not the hardware watchdog is there */ if (!esb_getdevice(pdev) || esb_pci == NULL) return -ENODEV; /* Check that the heartbeat value is within it's range; if not reset to the default */ if (heartbeat < 0x1 || heartbeat > 2 * 0x03ff) { heartbeat = WATCHDOG_HEARTBEAT; pr_info("heartbeat value must be 1<heartbeat<2046, using %d\n", heartbeat); } /* Initialize the watchdog and make sure it does not run */ esb_initdevice(); /* Register the watchdog so that userspace has access to it */ ret = misc_register(&esb_miscdev); if (ret != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto err_unmap; } pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n", BASEADDR, heartbeat, nowayout); return 0; err_unmap: iounmap(BASEADDR); pci_release_region(esb_pci, 0); pci_disable_device(esb_pci); esb_pci = NULL; return ret; } static void esb_remove(struct pci_dev *pdev) { /* Stop the timer before we leave */ if (!nowayout) esb_timer_stop(); /* Deregister */ misc_deregister(&esb_miscdev); iounmap(BASEADDR); pci_release_region(esb_pci, 0); pci_disable_device(esb_pci); esb_pci = NULL; } static void esb_shutdown(struct pci_dev *pdev) { esb_timer_stop(); } static struct pci_driver esb_driver = { .name = ESB_MODULE_NAME, .id_table = esb_pci_tbl, .probe = esb_probe, .remove = esb_remove, .shutdown = esb_shutdown, }; module_pci_driver(esb_driver); MODULE_AUTHOR("Ross Biro and David Härdeman"); MODULE_DESCRIPTION("Watchdog driver for Intel 6300ESB chipsets"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
sigma-random/linux-source-3.13.2-cgc
drivers/pinctrl/spear/pinctrl-spear1340.c
2916
50957
/* * Driver for the ST Microelectronics SPEAr1340 pinmux * * Copyright (C) 2012 ST Microelectronics * Viresh Kumar <viresh.linux@gmail.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include "pinctrl-spear.h" #define DRIVER_NAME "spear1340-pinmux" /* pins */ static const struct pinctrl_pin_desc spear1340_pins[] = { SPEAR_PIN_0_TO_101, SPEAR_PIN_102_TO_245, PINCTRL_PIN(246, "PLGPIO246"), PINCTRL_PIN(247, "PLGPIO247"), PINCTRL_PIN(248, "PLGPIO248"), PINCTRL_PIN(249, "PLGPIO249"), PINCTRL_PIN(250, "PLGPIO250"), PINCTRL_PIN(251, "PLGPIO251"), }; /* In SPEAr1340 there are two levels of pad muxing */ /* - pads as gpio OR peripherals */ #define PAD_FUNCTION_EN_1 0x668 #define PAD_FUNCTION_EN_2 0x66C #define PAD_FUNCTION_EN_3 0x670 #define PAD_FUNCTION_EN_4 0x674 #define PAD_FUNCTION_EN_5 0x690 #define PAD_FUNCTION_EN_6 0x694 #define PAD_FUNCTION_EN_7 0x698 #define PAD_FUNCTION_EN_8 0x69C /* - If peripherals, then primary OR alternate peripheral */ #define PAD_SHARED_IP_EN_1 0x6A0 #define PAD_SHARED_IP_EN_2 0x6A4 /* * Macro's for first level of pmx - pads as gpio OR peripherals. There are 8 * registers with 32 bits each for handling gpio pads, register 8 has only 26 * relevant bits. */ /* macro's for making pads as gpio's */ #define PADS_AS_GPIO_REG0_MASK 0xFFFFFFFE #define PADS_AS_GPIO_REGS_MASK 0xFFFFFFFF #define PADS_AS_GPIO_REG7_MASK 0x07FFFFFF /* macro's for making pads as peripherals */ #define FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK 0x00000FFE #define UART0_ENH_AND_GPT_REG0_MASK 0x0003F000 #define PWM1_AND_KBD_COL5_REG0_MASK 0x00040000 #define I2C1_REG0_MASK 0x01080000 #define SPDIF_IN_REG0_MASK 0x00100000 #define PWM2_AND_GPT0_TMR0_CPT_REG0_MASK 0x00400000 #define PWM3_AND_GPT0_TMR1_CLK_REG0_MASK 0x00800000 #define PWM0_AND_SSP0_CS1_REG0_MASK 0x02000000 #define VIP_AND_CAM3_REG0_MASK 0xFC200000 #define VIP_AND_CAM3_REG1_MASK 0x0000000F #define VIP_REG1_MASK 0x00001EF0 #define VIP_AND_CAM2_REG1_MASK 0x007FE100 #define VIP_AND_CAM1_REG1_MASK 0xFF800000 #define VIP_AND_CAM1_REG2_MASK 0x00000003 #define VIP_AND_CAM0_REG2_MASK 0x00001FFC #define SMI_REG2_MASK 0x0021E000 #define SSP0_REG2_MASK 0x001E0000 #define TS_AND_SSP0_CS2_REG2_MASK 0x00400000 #define UART0_REG2_MASK 0x01800000 #define UART1_REG2_MASK 0x06000000 #define I2S_IN_REG2_MASK 0xF8000000 #define DEVS_GRP_AND_MIPHY_DBG_REG3_MASK 0x000001FE #define I2S_OUT_REG3_MASK 0x000001EF #define I2S_IN_REG3_MASK 0x00000010 #define GMAC_REG3_MASK 0xFFFFFE00 #define GMAC_REG4_MASK 0x0000001F #define DEVS_GRP_AND_MIPHY_DBG_REG4_MASK 0x7FFFFF20 #define SSP0_CS3_REG4_MASK 0x00000020 #define I2C0_REG4_MASK 0x000000C0 #define CEC0_REG4_MASK 0x00000100 #define CEC1_REG4_MASK 0x00000200 #define SPDIF_OUT_REG4_MASK 0x00000400 #define CLCD_REG4_MASK 0x7FFFF800 #define CLCD_AND_ARM_TRACE_REG4_MASK 0x80000000 #define CLCD_AND_ARM_TRACE_REG5_MASK 0xFFFFFFFF #define CLCD_AND_ARM_TRACE_REG6_MASK 0x00000001 #define FSMC_PNOR_AND_MCIF_REG6_MASK 0x073FFFFE #define MCIF_REG6_MASK 0xF8C00000 #define MCIF_REG7_MASK 0x000043FF #define FSMC_8BIT_REG7_MASK 0x07FFBC00 /* other registers */ #define PERIP_CFG 0x42C /* PERIP_CFG register masks */ #define SSP_CS_CTL_HW 0 #define SSP_CS_CTL_SW 1 #define SSP_CS_CTL_MASK 1 #define SSP_CS_CTL_SHIFT 21 #define SSP_CS_VAL_MASK 1 #define SSP_CS_VAL_SHIFT 20 #define SSP_CS_SEL_CS0 0 #define SSP_CS_SEL_CS1 1 #define SSP_CS_SEL_CS2 2 #define SSP_CS_SEL_MASK 3 #define SSP_CS_SEL_SHIFT 18 #define I2S_CHNL_2_0 (0) #define I2S_CHNL_3_1 (1) #define I2S_CHNL_5_1 (2) #define I2S_CHNL_7_1 (3) #define I2S_CHNL_PLAY_SHIFT (4) #define I2S_CHNL_PLAY_MASK (3 << 4) #define I2S_CHNL_REC_SHIFT (6) #define I2S_CHNL_REC_MASK (3 << 6) #define SPDIF_OUT_ENB_MASK (1 << 2) #define SPDIF_OUT_ENB_SHIFT 2 #define MCIF_SEL_SD 1 #define MCIF_SEL_CF 2 #define MCIF_SEL_XD 3 #define MCIF_SEL_MASK 3 #define MCIF_SEL_SHIFT 0 #define GMAC_CLK_CFG 0x248 #define GMAC_PHY_IF_GMII_VAL (0 << 3) #define GMAC_PHY_IF_RGMII_VAL (1 << 3) #define GMAC_PHY_IF_SGMII_VAL (2 << 3) #define GMAC_PHY_IF_RMII_VAL (4 << 3) #define GMAC_PHY_IF_SEL_MASK (7 << 3) #define GMAC_PHY_INPUT_ENB_VAL 0 #define GMAC_PHY_SYNT_ENB_VAL 1 #define GMAC_PHY_CLK_MASK 1 #define GMAC_PHY_CLK_SHIFT 2 #define GMAC_PHY_125M_PAD_VAL 0 #define GMAC_PHY_PLL2_VAL 1 #define GMAC_PHY_OSC3_VAL 2 #define GMAC_PHY_INPUT_CLK_MASK 3 #define GMAC_PHY_INPUT_CLK_SHIFT 0 #define PCIE_SATA_CFG 0x424 /* PCIE CFG MASks */ #define PCIE_CFG_DEVICE_PRESENT (1 << 11) #define PCIE_CFG_POWERUP_RESET (1 << 10) #define PCIE_CFG_CORE_CLK_EN (1 << 9) #define PCIE_CFG_AUX_CLK_EN (1 << 8) #define SATA_CFG_TX_CLK_EN (1 << 4) #define SATA_CFG_RX_CLK_EN (1 << 3) #define SATA_CFG_POWERUP_RESET (1 << 2) #define SATA_CFG_PM_CLK_EN (1 << 1) #define PCIE_SATA_SEL_PCIE (0) #define PCIE_SATA_SEL_SATA (1) #define SATA_PCIE_CFG_MASK 0xF1F #define PCIE_CFG_VAL (PCIE_SATA_SEL_PCIE | PCIE_CFG_AUX_CLK_EN | \ PCIE_CFG_CORE_CLK_EN | PCIE_CFG_POWERUP_RESET |\ PCIE_CFG_DEVICE_PRESENT) #define SATA_CFG_VAL (PCIE_SATA_SEL_SATA | SATA_CFG_PM_CLK_EN | \ SATA_CFG_POWERUP_RESET | SATA_CFG_RX_CLK_EN | \ SATA_CFG_TX_CLK_EN) /* Macro's for second level of pmx - pads as primary OR alternate peripheral */ /* Write 0 to enable FSMC_16_BIT */ #define KBD_ROW_COL_MASK (1 << 0) /* Write 0 to enable UART0_ENH */ #define GPT_MASK (1 << 1) /* Only clk & cpt */ /* Write 0 to enable PWM1 */ #define KBD_COL5_MASK (1 << 2) /* Write 0 to enable PWM2 */ #define GPT0_TMR0_CPT_MASK (1 << 3) /* Only clk & cpt */ /* Write 0 to enable PWM3 */ #define GPT0_TMR1_CLK_MASK (1 << 4) /* Only clk & cpt */ /* Write 0 to enable PWM0 */ #define SSP0_CS1_MASK (1 << 5) /* Write 0 to enable VIP */ #define CAM3_MASK (1 << 6) /* Write 0 to enable VIP */ #define CAM2_MASK (1 << 7) /* Write 0 to enable VIP */ #define CAM1_MASK (1 << 8) /* Write 0 to enable VIP */ #define CAM0_MASK (1 << 9) /* Write 0 to enable TS */ #define SSP0_CS2_MASK (1 << 10) /* Write 0 to enable FSMC PNOR */ #define MCIF_MASK (1 << 11) /* Write 0 to enable CLCD */ #define ARM_TRACE_MASK (1 << 12) /* Write 0 to enable I2S, SSP0_CS2, CEC0, 1, SPDIF out, CLCD */ #define MIPHY_DBG_MASK (1 << 13) /* * Pad multiplexing for making all pads as gpio's. This is done to override the * values passed from bootloader and start from scratch. */ static const unsigned pads_as_gpio_pins[] = { 12, 88, 89, 251 }; static struct spear_muxreg pads_as_gpio_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = PADS_AS_GPIO_REG0_MASK, .val = 0x0, }, { .reg = PAD_FUNCTION_EN_2, .mask = PADS_AS_GPIO_REGS_MASK, .val = 0x0, }, { .reg = PAD_FUNCTION_EN_3, .mask = PADS_AS_GPIO_REGS_MASK, .val = 0x0, }, { .reg = PAD_FUNCTION_EN_4, .mask = PADS_AS_GPIO_REGS_MASK, .val = 0x0, }, { .reg = PAD_FUNCTION_EN_5, .mask = PADS_AS_GPIO_REGS_MASK, .val = 0x0, }, { .reg = PAD_FUNCTION_EN_6, .mask = PADS_AS_GPIO_REGS_MASK, .val = 0x0, }, { .reg = PAD_FUNCTION_EN_7, .mask = PADS_AS_GPIO_REGS_MASK, .val = 0x0, }, { .reg = PAD_FUNCTION_EN_8, .mask = PADS_AS_GPIO_REG7_MASK, .val = 0x0, }, }; static struct spear_modemux pads_as_gpio_modemux[] = { { .muxregs = pads_as_gpio_muxreg, .nmuxregs = ARRAY_SIZE(pads_as_gpio_muxreg), }, }; static struct spear_pingroup pads_as_gpio_pingroup = { .name = "pads_as_gpio_grp", .pins = pads_as_gpio_pins, .npins = ARRAY_SIZE(pads_as_gpio_pins), .modemuxs = pads_as_gpio_modemux, .nmodemuxs = ARRAY_SIZE(pads_as_gpio_modemux), }; static const char *const pads_as_gpio_grps[] = { "pads_as_gpio_grp" }; static struct spear_function pads_as_gpio_function = { .name = "pads_as_gpio", .groups = pads_as_gpio_grps, .ngroups = ARRAY_SIZE(pads_as_gpio_grps), }; /* Pad multiplexing for fsmc_8bit device */ static const unsigned fsmc_8bit_pins[] = { 233, 234, 235, 236, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249 }; static struct spear_muxreg fsmc_8bit_muxreg[] = { { .reg = PAD_FUNCTION_EN_8, .mask = FSMC_8BIT_REG7_MASK, .val = FSMC_8BIT_REG7_MASK, } }; static struct spear_modemux fsmc_8bit_modemux[] = { { .muxregs = fsmc_8bit_muxreg, .nmuxregs = ARRAY_SIZE(fsmc_8bit_muxreg), }, }; static struct spear_pingroup fsmc_8bit_pingroup = { .name = "fsmc_8bit_grp", .pins = fsmc_8bit_pins, .npins = ARRAY_SIZE(fsmc_8bit_pins), .modemuxs = fsmc_8bit_modemux, .nmodemuxs = ARRAY_SIZE(fsmc_8bit_modemux), }; /* Pad multiplexing for fsmc_16bit device */ static const unsigned fsmc_16bit_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; static struct spear_muxreg fsmc_16bit_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = KBD_ROW_COL_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_1, .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK, .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK, }, }; static struct spear_modemux fsmc_16bit_modemux[] = { { .muxregs = fsmc_16bit_muxreg, .nmuxregs = ARRAY_SIZE(fsmc_16bit_muxreg), }, }; static struct spear_pingroup fsmc_16bit_pingroup = { .name = "fsmc_16bit_grp", .pins = fsmc_16bit_pins, .npins = ARRAY_SIZE(fsmc_16bit_pins), .modemuxs = fsmc_16bit_modemux, .nmodemuxs = ARRAY_SIZE(fsmc_16bit_modemux), }; /* pad multiplexing for fsmc_pnor device */ static const unsigned fsmc_pnor_pins[] = { 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 215, 216, 217 }; static struct spear_muxreg fsmc_pnor_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = MCIF_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_7, .mask = FSMC_PNOR_AND_MCIF_REG6_MASK, .val = FSMC_PNOR_AND_MCIF_REG6_MASK, }, }; static struct spear_modemux fsmc_pnor_modemux[] = { { .muxregs = fsmc_pnor_muxreg, .nmuxregs = ARRAY_SIZE(fsmc_pnor_muxreg), }, }; static struct spear_pingroup fsmc_pnor_pingroup = { .name = "fsmc_pnor_grp", .pins = fsmc_pnor_pins, .npins = ARRAY_SIZE(fsmc_pnor_pins), .modemuxs = fsmc_pnor_modemux, .nmodemuxs = ARRAY_SIZE(fsmc_pnor_modemux), }; static const char *const fsmc_grps[] = { "fsmc_8bit_grp", "fsmc_16bit_grp", "fsmc_pnor_grp" }; static struct spear_function fsmc_function = { .name = "fsmc", .groups = fsmc_grps, .ngroups = ARRAY_SIZE(fsmc_grps), }; /* pad multiplexing for keyboard rows-cols device */ static const unsigned keyboard_row_col_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; static struct spear_muxreg keyboard_row_col_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = KBD_ROW_COL_MASK, .val = KBD_ROW_COL_MASK, }, { .reg = PAD_FUNCTION_EN_1, .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK, .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK, }, }; static struct spear_modemux keyboard_row_col_modemux[] = { { .muxregs = keyboard_row_col_muxreg, .nmuxregs = ARRAY_SIZE(keyboard_row_col_muxreg), }, }; static struct spear_pingroup keyboard_row_col_pingroup = { .name = "keyboard_row_col_grp", .pins = keyboard_row_col_pins, .npins = ARRAY_SIZE(keyboard_row_col_pins), .modemuxs = keyboard_row_col_modemux, .nmodemuxs = ARRAY_SIZE(keyboard_row_col_modemux), }; /* pad multiplexing for keyboard col5 device */ static const unsigned keyboard_col5_pins[] = { 17 }; static struct spear_muxreg keyboard_col5_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = KBD_COL5_MASK, .val = KBD_COL5_MASK, }, { .reg = PAD_FUNCTION_EN_1, .mask = PWM1_AND_KBD_COL5_REG0_MASK, .val = PWM1_AND_KBD_COL5_REG0_MASK, }, }; static struct spear_modemux keyboard_col5_modemux[] = { { .muxregs = keyboard_col5_muxreg, .nmuxregs = ARRAY_SIZE(keyboard_col5_muxreg), }, }; static struct spear_pingroup keyboard_col5_pingroup = { .name = "keyboard_col5_grp", .pins = keyboard_col5_pins, .npins = ARRAY_SIZE(keyboard_col5_pins), .modemuxs = keyboard_col5_modemux, .nmodemuxs = ARRAY_SIZE(keyboard_col5_modemux), }; static const char *const keyboard_grps[] = { "keyboard_row_col_grp", "keyboard_col5_grp" }; static struct spear_function keyboard_function = { .name = "keyboard", .groups = keyboard_grps, .ngroups = ARRAY_SIZE(keyboard_grps), }; /* pad multiplexing for spdif_in device */ static const unsigned spdif_in_pins[] = { 19 }; static struct spear_muxreg spdif_in_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = SPDIF_IN_REG0_MASK, .val = SPDIF_IN_REG0_MASK, }, }; static struct spear_modemux spdif_in_modemux[] = { { .muxregs = spdif_in_muxreg, .nmuxregs = ARRAY_SIZE(spdif_in_muxreg), }, }; static struct spear_pingroup spdif_in_pingroup = { .name = "spdif_in_grp", .pins = spdif_in_pins, .npins = ARRAY_SIZE(spdif_in_pins), .modemuxs = spdif_in_modemux, .nmodemuxs = ARRAY_SIZE(spdif_in_modemux), }; static const char *const spdif_in_grps[] = { "spdif_in_grp" }; static struct spear_function spdif_in_function = { .name = "spdif_in", .groups = spdif_in_grps, .ngroups = ARRAY_SIZE(spdif_in_grps), }; /* pad multiplexing for spdif_out device */ static const unsigned spdif_out_pins[] = { 137 }; static struct spear_muxreg spdif_out_muxreg[] = { { .reg = PAD_FUNCTION_EN_5, .mask = SPDIF_OUT_REG4_MASK, .val = SPDIF_OUT_REG4_MASK, }, { .reg = PERIP_CFG, .mask = SPDIF_OUT_ENB_MASK, .val = SPDIF_OUT_ENB_MASK, } }; static struct spear_modemux spdif_out_modemux[] = { { .muxregs = spdif_out_muxreg, .nmuxregs = ARRAY_SIZE(spdif_out_muxreg), }, }; static struct spear_pingroup spdif_out_pingroup = { .name = "spdif_out_grp", .pins = spdif_out_pins, .npins = ARRAY_SIZE(spdif_out_pins), .modemuxs = spdif_out_modemux, .nmodemuxs = ARRAY_SIZE(spdif_out_modemux), }; static const char *const spdif_out_grps[] = { "spdif_out_grp" }; static struct spear_function spdif_out_function = { .name = "spdif_out", .groups = spdif_out_grps, .ngroups = ARRAY_SIZE(spdif_out_grps), }; /* pad multiplexing for gpt_0_1 device */ static const unsigned gpt_0_1_pins[] = { 11, 12, 13, 14, 15, 16, 21, 22 }; static struct spear_muxreg gpt_0_1_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK, .val = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK, }, { .reg = PAD_FUNCTION_EN_1, .mask = UART0_ENH_AND_GPT_REG0_MASK | PWM2_AND_GPT0_TMR0_CPT_REG0_MASK | PWM3_AND_GPT0_TMR1_CLK_REG0_MASK, .val = UART0_ENH_AND_GPT_REG0_MASK | PWM2_AND_GPT0_TMR0_CPT_REG0_MASK | PWM3_AND_GPT0_TMR1_CLK_REG0_MASK, }, }; static struct spear_modemux gpt_0_1_modemux[] = { { .muxregs = gpt_0_1_muxreg, .nmuxregs = ARRAY_SIZE(gpt_0_1_muxreg), }, }; static struct spear_pingroup gpt_0_1_pingroup = { .name = "gpt_0_1_grp", .pins = gpt_0_1_pins, .npins = ARRAY_SIZE(gpt_0_1_pins), .modemuxs = gpt_0_1_modemux, .nmodemuxs = ARRAY_SIZE(gpt_0_1_modemux), }; static const char *const gpt_0_1_grps[] = { "gpt_0_1_grp" }; static struct spear_function gpt_0_1_function = { .name = "gpt_0_1", .groups = gpt_0_1_grps, .ngroups = ARRAY_SIZE(gpt_0_1_grps), }; /* pad multiplexing for pwm0 device */ static const unsigned pwm0_pins[] = { 24 }; static struct spear_muxreg pwm0_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = SSP0_CS1_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_1, .mask = PWM0_AND_SSP0_CS1_REG0_MASK, .val = PWM0_AND_SSP0_CS1_REG0_MASK, }, }; static struct spear_modemux pwm0_modemux[] = { { .muxregs = pwm0_muxreg, .nmuxregs = ARRAY_SIZE(pwm0_muxreg), }, }; static struct spear_pingroup pwm0_pingroup = { .name = "pwm0_grp", .pins = pwm0_pins, .npins = ARRAY_SIZE(pwm0_pins), .modemuxs = pwm0_modemux, .nmodemuxs = ARRAY_SIZE(pwm0_modemux), }; /* pad multiplexing for pwm1 device */ static const unsigned pwm1_pins[] = { 17 }; static struct spear_muxreg pwm1_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = KBD_COL5_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_1, .mask = PWM1_AND_KBD_COL5_REG0_MASK, .val = PWM1_AND_KBD_COL5_REG0_MASK, }, }; static struct spear_modemux pwm1_modemux[] = { { .muxregs = pwm1_muxreg, .nmuxregs = ARRAY_SIZE(pwm1_muxreg), }, }; static struct spear_pingroup pwm1_pingroup = { .name = "pwm1_grp", .pins = pwm1_pins, .npins = ARRAY_SIZE(pwm1_pins), .modemuxs = pwm1_modemux, .nmodemuxs = ARRAY_SIZE(pwm1_modemux), }; /* pad multiplexing for pwm2 device */ static const unsigned pwm2_pins[] = { 21 }; static struct spear_muxreg pwm2_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = GPT0_TMR0_CPT_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_1, .mask = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK, .val = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK, }, }; static struct spear_modemux pwm2_modemux[] = { { .muxregs = pwm2_muxreg, .nmuxregs = ARRAY_SIZE(pwm2_muxreg), }, }; static struct spear_pingroup pwm2_pingroup = { .name = "pwm2_grp", .pins = pwm2_pins, .npins = ARRAY_SIZE(pwm2_pins), .modemuxs = pwm2_modemux, .nmodemuxs = ARRAY_SIZE(pwm2_modemux), }; /* pad multiplexing for pwm3 device */ static const unsigned pwm3_pins[] = { 22 }; static struct spear_muxreg pwm3_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = GPT0_TMR1_CLK_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_1, .mask = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK, .val = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK, }, }; static struct spear_modemux pwm3_modemux[] = { { .muxregs = pwm3_muxreg, .nmuxregs = ARRAY_SIZE(pwm3_muxreg), }, }; static struct spear_pingroup pwm3_pingroup = { .name = "pwm3_grp", .pins = pwm3_pins, .npins = ARRAY_SIZE(pwm3_pins), .modemuxs = pwm3_modemux, .nmodemuxs = ARRAY_SIZE(pwm3_modemux), }; static const char *const pwm_grps[] = { "pwm0_grp", "pwm1_grp", "pwm2_grp", "pwm3_grp" }; static struct spear_function pwm_function = { .name = "pwm", .groups = pwm_grps, .ngroups = ARRAY_SIZE(pwm_grps), }; /* pad multiplexing for vip_mux device */ static const unsigned vip_mux_pins[] = { 35, 36, 37, 38, 40, 41, 42, 43 }; static struct spear_muxreg vip_mux_muxreg[] = { { .reg = PAD_FUNCTION_EN_2, .mask = VIP_REG1_MASK, .val = VIP_REG1_MASK, }, }; static struct spear_modemux vip_mux_modemux[] = { { .muxregs = vip_mux_muxreg, .nmuxregs = ARRAY_SIZE(vip_mux_muxreg), }, }; static struct spear_pingroup vip_mux_pingroup = { .name = "vip_mux_grp", .pins = vip_mux_pins, .npins = ARRAY_SIZE(vip_mux_pins), .modemuxs = vip_mux_modemux, .nmodemuxs = ARRAY_SIZE(vip_mux_modemux), }; /* pad multiplexing for vip_mux_cam0 (disables cam0) device */ static const unsigned vip_mux_cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75 }; static struct spear_muxreg vip_mux_cam0_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = CAM0_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_3, .mask = VIP_AND_CAM0_REG2_MASK, .val = VIP_AND_CAM0_REG2_MASK, }, }; static struct spear_modemux vip_mux_cam0_modemux[] = { { .muxregs = vip_mux_cam0_muxreg, .nmuxregs = ARRAY_SIZE(vip_mux_cam0_muxreg), }, }; static struct spear_pingroup vip_mux_cam0_pingroup = { .name = "vip_mux_cam0_grp", .pins = vip_mux_cam0_pins, .npins = ARRAY_SIZE(vip_mux_cam0_pins), .modemuxs = vip_mux_cam0_modemux, .nmodemuxs = ARRAY_SIZE(vip_mux_cam0_modemux), }; /* pad multiplexing for vip_mux_cam1 (disables cam1) device */ static const unsigned vip_mux_cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 }; static struct spear_muxreg vip_mux_cam1_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = CAM1_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_2, .mask = VIP_AND_CAM1_REG1_MASK, .val = VIP_AND_CAM1_REG1_MASK, }, { .reg = PAD_FUNCTION_EN_3, .mask = VIP_AND_CAM1_REG2_MASK, .val = VIP_AND_CAM1_REG2_MASK, }, }; static struct spear_modemux vip_mux_cam1_modemux[] = { { .muxregs = vip_mux_cam1_muxreg, .nmuxregs = ARRAY_SIZE(vip_mux_cam1_muxreg), }, }; static struct spear_pingroup vip_mux_cam1_pingroup = { .name = "vip_mux_cam1_grp", .pins = vip_mux_cam1_pins, .npins = ARRAY_SIZE(vip_mux_cam1_pins), .modemuxs = vip_mux_cam1_modemux, .nmodemuxs = ARRAY_SIZE(vip_mux_cam1_modemux), }; /* pad multiplexing for vip_mux_cam2 (disables cam2) device */ static const unsigned vip_mux_cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 }; static struct spear_muxreg vip_mux_cam2_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = CAM2_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_2, .mask = VIP_AND_CAM2_REG1_MASK, .val = VIP_AND_CAM2_REG1_MASK, }, }; static struct spear_modemux vip_mux_cam2_modemux[] = { { .muxregs = vip_mux_cam2_muxreg, .nmuxregs = ARRAY_SIZE(vip_mux_cam2_muxreg), }, }; static struct spear_pingroup vip_mux_cam2_pingroup = { .name = "vip_mux_cam2_grp", .pins = vip_mux_cam2_pins, .npins = ARRAY_SIZE(vip_mux_cam2_pins), .modemuxs = vip_mux_cam2_modemux, .nmodemuxs = ARRAY_SIZE(vip_mux_cam2_modemux), }; /* pad multiplexing for vip_mux_cam3 (disables cam3) device */ static const unsigned vip_mux_cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34 }; static struct spear_muxreg vip_mux_cam3_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = CAM3_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_1, .mask = VIP_AND_CAM3_REG0_MASK, .val = VIP_AND_CAM3_REG0_MASK, }, { .reg = PAD_FUNCTION_EN_2, .mask = VIP_AND_CAM3_REG1_MASK, .val = VIP_AND_CAM3_REG1_MASK, }, }; static struct spear_modemux vip_mux_cam3_modemux[] = { { .muxregs = vip_mux_cam3_muxreg, .nmuxregs = ARRAY_SIZE(vip_mux_cam3_muxreg), }, }; static struct spear_pingroup vip_mux_cam3_pingroup = { .name = "vip_mux_cam3_grp", .pins = vip_mux_cam3_pins, .npins = ARRAY_SIZE(vip_mux_cam3_pins), .modemuxs = vip_mux_cam3_modemux, .nmodemuxs = ARRAY_SIZE(vip_mux_cam3_modemux), }; static const char *const vip_grps[] = { "vip_mux_grp", "vip_mux_cam0_grp" , "vip_mux_cam1_grp" , "vip_mux_cam2_grp", "vip_mux_cam3_grp" }; static struct spear_function vip_function = { .name = "vip", .groups = vip_grps, .ngroups = ARRAY_SIZE(vip_grps), }; /* pad multiplexing for cam0 device */ static const unsigned cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75 }; static struct spear_muxreg cam0_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = CAM0_MASK, .val = CAM0_MASK, }, { .reg = PAD_FUNCTION_EN_3, .mask = VIP_AND_CAM0_REG2_MASK, .val = VIP_AND_CAM0_REG2_MASK, }, }; static struct spear_modemux cam0_modemux[] = { { .muxregs = cam0_muxreg, .nmuxregs = ARRAY_SIZE(cam0_muxreg), }, }; static struct spear_pingroup cam0_pingroup = { .name = "cam0_grp", .pins = cam0_pins, .npins = ARRAY_SIZE(cam0_pins), .modemuxs = cam0_modemux, .nmodemuxs = ARRAY_SIZE(cam0_modemux), }; static const char *const cam0_grps[] = { "cam0_grp" }; static struct spear_function cam0_function = { .name = "cam0", .groups = cam0_grps, .ngroups = ARRAY_SIZE(cam0_grps), }; /* pad multiplexing for cam1 device */ static const unsigned cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 }; static struct spear_muxreg cam1_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = CAM1_MASK, .val = CAM1_MASK, }, { .reg = PAD_FUNCTION_EN_2, .mask = VIP_AND_CAM1_REG1_MASK, .val = VIP_AND_CAM1_REG1_MASK, }, { .reg = PAD_FUNCTION_EN_3, .mask = VIP_AND_CAM1_REG2_MASK, .val = VIP_AND_CAM1_REG2_MASK, }, }; static struct spear_modemux cam1_modemux[] = { { .muxregs = cam1_muxreg, .nmuxregs = ARRAY_SIZE(cam1_muxreg), }, }; static struct spear_pingroup cam1_pingroup = { .name = "cam1_grp", .pins = cam1_pins, .npins = ARRAY_SIZE(cam1_pins), .modemuxs = cam1_modemux, .nmodemuxs = ARRAY_SIZE(cam1_modemux), }; static const char *const cam1_grps[] = { "cam1_grp" }; static struct spear_function cam1_function = { .name = "cam1", .groups = cam1_grps, .ngroups = ARRAY_SIZE(cam1_grps), }; /* pad multiplexing for cam2 device */ static const unsigned cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 }; static struct spear_muxreg cam2_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = CAM2_MASK, .val = CAM2_MASK, }, { .reg = PAD_FUNCTION_EN_2, .mask = VIP_AND_CAM2_REG1_MASK, .val = VIP_AND_CAM2_REG1_MASK, }, }; static struct spear_modemux cam2_modemux[] = { { .muxregs = cam2_muxreg, .nmuxregs = ARRAY_SIZE(cam2_muxreg), }, }; static struct spear_pingroup cam2_pingroup = { .name = "cam2_grp", .pins = cam2_pins, .npins = ARRAY_SIZE(cam2_pins), .modemuxs = cam2_modemux, .nmodemuxs = ARRAY_SIZE(cam2_modemux), }; static const char *const cam2_grps[] = { "cam2_grp" }; static struct spear_function cam2_function = { .name = "cam2", .groups = cam2_grps, .ngroups = ARRAY_SIZE(cam2_grps), }; /* pad multiplexing for cam3 device */ static const unsigned cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34 }; static struct spear_muxreg cam3_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = CAM3_MASK, .val = CAM3_MASK, }, { .reg = PAD_FUNCTION_EN_1, .mask = VIP_AND_CAM3_REG0_MASK, .val = VIP_AND_CAM3_REG0_MASK, }, { .reg = PAD_FUNCTION_EN_2, .mask = VIP_AND_CAM3_REG1_MASK, .val = VIP_AND_CAM3_REG1_MASK, }, }; static struct spear_modemux cam3_modemux[] = { { .muxregs = cam3_muxreg, .nmuxregs = ARRAY_SIZE(cam3_muxreg), }, }; static struct spear_pingroup cam3_pingroup = { .name = "cam3_grp", .pins = cam3_pins, .npins = ARRAY_SIZE(cam3_pins), .modemuxs = cam3_modemux, .nmodemuxs = ARRAY_SIZE(cam3_modemux), }; static const char *const cam3_grps[] = { "cam3_grp" }; static struct spear_function cam3_function = { .name = "cam3", .groups = cam3_grps, .ngroups = ARRAY_SIZE(cam3_grps), }; /* pad multiplexing for smi device */ static const unsigned smi_pins[] = { 76, 77, 78, 79, 84 }; static struct spear_muxreg smi_muxreg[] = { { .reg = PAD_FUNCTION_EN_3, .mask = SMI_REG2_MASK, .val = SMI_REG2_MASK, }, }; static struct spear_modemux smi_modemux[] = { { .muxregs = smi_muxreg, .nmuxregs = ARRAY_SIZE(smi_muxreg), }, }; static struct spear_pingroup smi_pingroup = { .name = "smi_grp", .pins = smi_pins, .npins = ARRAY_SIZE(smi_pins), .modemuxs = smi_modemux, .nmodemuxs = ARRAY_SIZE(smi_modemux), }; static const char *const smi_grps[] = { "smi_grp" }; static struct spear_function smi_function = { .name = "smi", .groups = smi_grps, .ngroups = ARRAY_SIZE(smi_grps), }; /* pad multiplexing for ssp0 device */ static const unsigned ssp0_pins[] = { 80, 81, 82, 83 }; static struct spear_muxreg ssp0_muxreg[] = { { .reg = PAD_FUNCTION_EN_3, .mask = SSP0_REG2_MASK, .val = SSP0_REG2_MASK, }, }; static struct spear_modemux ssp0_modemux[] = { { .muxregs = ssp0_muxreg, .nmuxregs = ARRAY_SIZE(ssp0_muxreg), }, }; static struct spear_pingroup ssp0_pingroup = { .name = "ssp0_grp", .pins = ssp0_pins, .npins = ARRAY_SIZE(ssp0_pins), .modemuxs = ssp0_modemux, .nmodemuxs = ARRAY_SIZE(ssp0_modemux), }; /* pad multiplexing for ssp0_cs1 device */ static const unsigned ssp0_cs1_pins[] = { 24 }; static struct spear_muxreg ssp0_cs1_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = SSP0_CS1_MASK, .val = SSP0_CS1_MASK, }, { .reg = PAD_FUNCTION_EN_1, .mask = PWM0_AND_SSP0_CS1_REG0_MASK, .val = PWM0_AND_SSP0_CS1_REG0_MASK, }, }; static struct spear_modemux ssp0_cs1_modemux[] = { { .muxregs = ssp0_cs1_muxreg, .nmuxregs = ARRAY_SIZE(ssp0_cs1_muxreg), }, }; static struct spear_pingroup ssp0_cs1_pingroup = { .name = "ssp0_cs1_grp", .pins = ssp0_cs1_pins, .npins = ARRAY_SIZE(ssp0_cs1_pins), .modemuxs = ssp0_cs1_modemux, .nmodemuxs = ARRAY_SIZE(ssp0_cs1_modemux), }; /* pad multiplexing for ssp0_cs2 device */ static const unsigned ssp0_cs2_pins[] = { 85 }; static struct spear_muxreg ssp0_cs2_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = SSP0_CS2_MASK, .val = SSP0_CS2_MASK, }, { .reg = PAD_FUNCTION_EN_3, .mask = TS_AND_SSP0_CS2_REG2_MASK, .val = TS_AND_SSP0_CS2_REG2_MASK, }, }; static struct spear_modemux ssp0_cs2_modemux[] = { { .muxregs = ssp0_cs2_muxreg, .nmuxregs = ARRAY_SIZE(ssp0_cs2_muxreg), }, }; static struct spear_pingroup ssp0_cs2_pingroup = { .name = "ssp0_cs2_grp", .pins = ssp0_cs2_pins, .npins = ARRAY_SIZE(ssp0_cs2_pins), .modemuxs = ssp0_cs2_modemux, .nmodemuxs = ARRAY_SIZE(ssp0_cs2_modemux), }; /* pad multiplexing for ssp0_cs3 device */ static const unsigned ssp0_cs3_pins[] = { 132 }; static struct spear_muxreg ssp0_cs3_muxreg[] = { { .reg = PAD_FUNCTION_EN_5, .mask = SSP0_CS3_REG4_MASK, .val = SSP0_CS3_REG4_MASK, }, }; static struct spear_modemux ssp0_cs3_modemux[] = { { .muxregs = ssp0_cs3_muxreg, .nmuxregs = ARRAY_SIZE(ssp0_cs3_muxreg), }, }; static struct spear_pingroup ssp0_cs3_pingroup = { .name = "ssp0_cs3_grp", .pins = ssp0_cs3_pins, .npins = ARRAY_SIZE(ssp0_cs3_pins), .modemuxs = ssp0_cs3_modemux, .nmodemuxs = ARRAY_SIZE(ssp0_cs3_modemux), }; static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs1_grp", "ssp0_cs2_grp", "ssp0_cs3_grp" }; static struct spear_function ssp0_function = { .name = "ssp0", .groups = ssp0_grps, .ngroups = ARRAY_SIZE(ssp0_grps), }; /* pad multiplexing for uart0 device */ static const unsigned uart0_pins[] = { 86, 87 }; static struct spear_muxreg uart0_muxreg[] = { { .reg = PAD_FUNCTION_EN_3, .mask = UART0_REG2_MASK, .val = UART0_REG2_MASK, }, }; static struct spear_modemux uart0_modemux[] = { { .muxregs = uart0_muxreg, .nmuxregs = ARRAY_SIZE(uart0_muxreg), }, }; static struct spear_pingroup uart0_pingroup = { .name = "uart0_grp", .pins = uart0_pins, .npins = ARRAY_SIZE(uart0_pins), .modemuxs = uart0_modemux, .nmodemuxs = ARRAY_SIZE(uart0_modemux), }; /* pad multiplexing for uart0_enh device */ static const unsigned uart0_enh_pins[] = { 11, 12, 13, 14, 15, 16 }; static struct spear_muxreg uart0_enh_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = GPT_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_1, .mask = UART0_ENH_AND_GPT_REG0_MASK, .val = UART0_ENH_AND_GPT_REG0_MASK, }, }; static struct spear_modemux uart0_enh_modemux[] = { { .muxregs = uart0_enh_muxreg, .nmuxregs = ARRAY_SIZE(uart0_enh_muxreg), }, }; static struct spear_pingroup uart0_enh_pingroup = { .name = "uart0_enh_grp", .pins = uart0_enh_pins, .npins = ARRAY_SIZE(uart0_enh_pins), .modemuxs = uart0_enh_modemux, .nmodemuxs = ARRAY_SIZE(uart0_enh_modemux), }; static const char *const uart0_grps[] = { "uart0_grp", "uart0_enh_grp" }; static struct spear_function uart0_function = { .name = "uart0", .groups = uart0_grps, .ngroups = ARRAY_SIZE(uart0_grps), }; /* pad multiplexing for uart1 device */ static const unsigned uart1_pins[] = { 88, 89 }; static struct spear_muxreg uart1_muxreg[] = { { .reg = PAD_FUNCTION_EN_3, .mask = UART1_REG2_MASK, .val = UART1_REG2_MASK, }, }; static struct spear_modemux uart1_modemux[] = { { .muxregs = uart1_muxreg, .nmuxregs = ARRAY_SIZE(uart1_muxreg), }, }; static struct spear_pingroup uart1_pingroup = { .name = "uart1_grp", .pins = uart1_pins, .npins = ARRAY_SIZE(uart1_pins), .modemuxs = uart1_modemux, .nmodemuxs = ARRAY_SIZE(uart1_modemux), }; static const char *const uart1_grps[] = { "uart1_grp" }; static struct spear_function uart1_function = { .name = "uart1", .groups = uart1_grps, .ngroups = ARRAY_SIZE(uart1_grps), }; /* pad multiplexing for i2s_in device */ static const unsigned i2s_in_pins[] = { 90, 91, 92, 93, 94, 99 }; static struct spear_muxreg i2s_in_muxreg[] = { { .reg = PAD_FUNCTION_EN_3, .mask = I2S_IN_REG2_MASK, .val = I2S_IN_REG2_MASK, }, { .reg = PAD_FUNCTION_EN_4, .mask = I2S_IN_REG3_MASK, .val = I2S_IN_REG3_MASK, }, }; static struct spear_modemux i2s_in_modemux[] = { { .muxregs = i2s_in_muxreg, .nmuxregs = ARRAY_SIZE(i2s_in_muxreg), }, }; static struct spear_pingroup i2s_in_pingroup = { .name = "i2s_in_grp", .pins = i2s_in_pins, .npins = ARRAY_SIZE(i2s_in_pins), .modemuxs = i2s_in_modemux, .nmodemuxs = ARRAY_SIZE(i2s_in_modemux), }; /* pad multiplexing for i2s_out device */ static const unsigned i2s_out_pins[] = { 95, 96, 97, 98, 100, 101, 102, 103 }; static struct spear_muxreg i2s_out_muxreg[] = { { .reg = PAD_FUNCTION_EN_4, .mask = I2S_OUT_REG3_MASK, .val = I2S_OUT_REG3_MASK, }, }; static struct spear_modemux i2s_out_modemux[] = { { .muxregs = i2s_out_muxreg, .nmuxregs = ARRAY_SIZE(i2s_out_muxreg), }, }; static struct spear_pingroup i2s_out_pingroup = { .name = "i2s_out_grp", .pins = i2s_out_pins, .npins = ARRAY_SIZE(i2s_out_pins), .modemuxs = i2s_out_modemux, .nmodemuxs = ARRAY_SIZE(i2s_out_modemux), }; static const char *const i2s_grps[] = { "i2s_in_grp", "i2s_out_grp" }; static struct spear_function i2s_function = { .name = "i2s", .groups = i2s_grps, .ngroups = ARRAY_SIZE(i2s_grps), }; /* pad multiplexing for gmac device */ static const unsigned gmac_pins[] = { 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131 }; #define GMAC_MUXREG \ { \ .reg = PAD_FUNCTION_EN_4, \ .mask = GMAC_REG3_MASK, \ .val = GMAC_REG3_MASK, \ }, { \ .reg = PAD_FUNCTION_EN_5, \ .mask = GMAC_REG4_MASK, \ .val = GMAC_REG4_MASK, \ } /* pad multiplexing for gmii device */ static struct spear_muxreg gmii_muxreg[] = { GMAC_MUXREG, { .reg = GMAC_CLK_CFG, .mask = GMAC_PHY_IF_SEL_MASK, .val = GMAC_PHY_IF_GMII_VAL, }, }; static struct spear_modemux gmii_modemux[] = { { .muxregs = gmii_muxreg, .nmuxregs = ARRAY_SIZE(gmii_muxreg), }, }; static struct spear_pingroup gmii_pingroup = { .name = "gmii_grp", .pins = gmac_pins, .npins = ARRAY_SIZE(gmac_pins), .modemuxs = gmii_modemux, .nmodemuxs = ARRAY_SIZE(gmii_modemux), }; /* pad multiplexing for rgmii device */ static struct spear_muxreg rgmii_muxreg[] = { GMAC_MUXREG, { .reg = GMAC_CLK_CFG, .mask = GMAC_PHY_IF_SEL_MASK, .val = GMAC_PHY_IF_RGMII_VAL, }, }; static struct spear_modemux rgmii_modemux[] = { { .muxregs = rgmii_muxreg, .nmuxregs = ARRAY_SIZE(rgmii_muxreg), }, }; static struct spear_pingroup rgmii_pingroup = { .name = "rgmii_grp", .pins = gmac_pins, .npins = ARRAY_SIZE(gmac_pins), .modemuxs = rgmii_modemux, .nmodemuxs = ARRAY_SIZE(rgmii_modemux), }; /* pad multiplexing for rmii device */ static struct spear_muxreg rmii_muxreg[] = { GMAC_MUXREG, { .reg = GMAC_CLK_CFG, .mask = GMAC_PHY_IF_SEL_MASK, .val = GMAC_PHY_IF_RMII_VAL, }, }; static struct spear_modemux rmii_modemux[] = { { .muxregs = rmii_muxreg, .nmuxregs = ARRAY_SIZE(rmii_muxreg), }, }; static struct spear_pingroup rmii_pingroup = { .name = "rmii_grp", .pins = gmac_pins, .npins = ARRAY_SIZE(gmac_pins), .modemuxs = rmii_modemux, .nmodemuxs = ARRAY_SIZE(rmii_modemux), }; /* pad multiplexing for sgmii device */ static struct spear_muxreg sgmii_muxreg[] = { GMAC_MUXREG, { .reg = GMAC_CLK_CFG, .mask = GMAC_PHY_IF_SEL_MASK, .val = GMAC_PHY_IF_SGMII_VAL, }, }; static struct spear_modemux sgmii_modemux[] = { { .muxregs = sgmii_muxreg, .nmuxregs = ARRAY_SIZE(sgmii_muxreg), }, }; static struct spear_pingroup sgmii_pingroup = { .name = "sgmii_grp", .pins = gmac_pins, .npins = ARRAY_SIZE(gmac_pins), .modemuxs = sgmii_modemux, .nmodemuxs = ARRAY_SIZE(sgmii_modemux), }; static const char *const gmac_grps[] = { "gmii_grp", "rgmii_grp", "rmii_grp", "sgmii_grp" }; static struct spear_function gmac_function = { .name = "gmac", .groups = gmac_grps, .ngroups = ARRAY_SIZE(gmac_grps), }; /* pad multiplexing for i2c0 device */ static const unsigned i2c0_pins[] = { 133, 134 }; static struct spear_muxreg i2c0_muxreg[] = { { .reg = PAD_FUNCTION_EN_5, .mask = I2C0_REG4_MASK, .val = I2C0_REG4_MASK, }, }; static struct spear_modemux i2c0_modemux[] = { { .muxregs = i2c0_muxreg, .nmuxregs = ARRAY_SIZE(i2c0_muxreg), }, }; static struct spear_pingroup i2c0_pingroup = { .name = "i2c0_grp", .pins = i2c0_pins, .npins = ARRAY_SIZE(i2c0_pins), .modemuxs = i2c0_modemux, .nmodemuxs = ARRAY_SIZE(i2c0_modemux), }; static const char *const i2c0_grps[] = { "i2c0_grp" }; static struct spear_function i2c0_function = { .name = "i2c0", .groups = i2c0_grps, .ngroups = ARRAY_SIZE(i2c0_grps), }; /* pad multiplexing for i2c1 device */ static const unsigned i2c1_pins[] = { 18, 23 }; static struct spear_muxreg i2c1_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, .mask = I2C1_REG0_MASK, .val = I2C1_REG0_MASK, }, }; static struct spear_modemux i2c1_modemux[] = { { .muxregs = i2c1_muxreg, .nmuxregs = ARRAY_SIZE(i2c1_muxreg), }, }; static struct spear_pingroup i2c1_pingroup = { .name = "i2c1_grp", .pins = i2c1_pins, .npins = ARRAY_SIZE(i2c1_pins), .modemuxs = i2c1_modemux, .nmodemuxs = ARRAY_SIZE(i2c1_modemux), }; static const char *const i2c1_grps[] = { "i2c1_grp" }; static struct spear_function i2c1_function = { .name = "i2c1", .groups = i2c1_grps, .ngroups = ARRAY_SIZE(i2c1_grps), }; /* pad multiplexing for cec0 device */ static const unsigned cec0_pins[] = { 135 }; static struct spear_muxreg cec0_muxreg[] = { { .reg = PAD_FUNCTION_EN_5, .mask = CEC0_REG4_MASK, .val = CEC0_REG4_MASK, }, }; static struct spear_modemux cec0_modemux[] = { { .muxregs = cec0_muxreg, .nmuxregs = ARRAY_SIZE(cec0_muxreg), }, }; static struct spear_pingroup cec0_pingroup = { .name = "cec0_grp", .pins = cec0_pins, .npins = ARRAY_SIZE(cec0_pins), .modemuxs = cec0_modemux, .nmodemuxs = ARRAY_SIZE(cec0_modemux), }; static const char *const cec0_grps[] = { "cec0_grp" }; static struct spear_function cec0_function = { .name = "cec0", .groups = cec0_grps, .ngroups = ARRAY_SIZE(cec0_grps), }; /* pad multiplexing for cec1 device */ static const unsigned cec1_pins[] = { 136 }; static struct spear_muxreg cec1_muxreg[] = { { .reg = PAD_FUNCTION_EN_5, .mask = CEC1_REG4_MASK, .val = CEC1_REG4_MASK, }, }; static struct spear_modemux cec1_modemux[] = { { .muxregs = cec1_muxreg, .nmuxregs = ARRAY_SIZE(cec1_muxreg), }, }; static struct spear_pingroup cec1_pingroup = { .name = "cec1_grp", .pins = cec1_pins, .npins = ARRAY_SIZE(cec1_pins), .modemuxs = cec1_modemux, .nmodemuxs = ARRAY_SIZE(cec1_modemux), }; static const char *const cec1_grps[] = { "cec1_grp" }; static struct spear_function cec1_function = { .name = "cec1", .groups = cec1_grps, .ngroups = ARRAY_SIZE(cec1_grps), }; /* pad multiplexing for mcif devices */ static const unsigned mcif_pins[] = { 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 237 }; #define MCIF_MUXREG \ { \ .reg = PAD_SHARED_IP_EN_1, \ .mask = MCIF_MASK, \ .val = MCIF_MASK, \ }, { \ .reg = PAD_FUNCTION_EN_7, \ .mask = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK, \ .val = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK, \ }, { \ .reg = PAD_FUNCTION_EN_8, \ .mask = MCIF_REG7_MASK, \ .val = MCIF_REG7_MASK, \ } /* Pad multiplexing for sdhci device */ static struct spear_muxreg sdhci_muxreg[] = { MCIF_MUXREG, { .reg = PERIP_CFG, .mask = MCIF_SEL_MASK, .val = MCIF_SEL_SD, }, }; static struct spear_modemux sdhci_modemux[] = { { .muxregs = sdhci_muxreg, .nmuxregs = ARRAY_SIZE(sdhci_muxreg), }, }; static struct spear_pingroup sdhci_pingroup = { .name = "sdhci_grp", .pins = mcif_pins, .npins = ARRAY_SIZE(mcif_pins), .modemuxs = sdhci_modemux, .nmodemuxs = ARRAY_SIZE(sdhci_modemux), }; static const char *const sdhci_grps[] = { "sdhci_grp" }; static struct spear_function sdhci_function = { .name = "sdhci", .groups = sdhci_grps, .ngroups = ARRAY_SIZE(sdhci_grps), }; /* Pad multiplexing for cf device */ static struct spear_muxreg cf_muxreg[] = { MCIF_MUXREG, { .reg = PERIP_CFG, .mask = MCIF_SEL_MASK, .val = MCIF_SEL_CF, }, }; static struct spear_modemux cf_modemux[] = { { .muxregs = cf_muxreg, .nmuxregs = ARRAY_SIZE(cf_muxreg), }, }; static struct spear_pingroup cf_pingroup = { .name = "cf_grp", .pins = mcif_pins, .npins = ARRAY_SIZE(mcif_pins), .modemuxs = cf_modemux, .nmodemuxs = ARRAY_SIZE(cf_modemux), }; static const char *const cf_grps[] = { "cf_grp" }; static struct spear_function cf_function = { .name = "cf", .groups = cf_grps, .ngroups = ARRAY_SIZE(cf_grps), }; /* Pad multiplexing for xd device */ static struct spear_muxreg xd_muxreg[] = { MCIF_MUXREG, { .reg = PERIP_CFG, .mask = MCIF_SEL_MASK, .val = MCIF_SEL_XD, }, }; static struct spear_modemux xd_modemux[] = { { .muxregs = xd_muxreg, .nmuxregs = ARRAY_SIZE(xd_muxreg), }, }; static struct spear_pingroup xd_pingroup = { .name = "xd_grp", .pins = mcif_pins, .npins = ARRAY_SIZE(mcif_pins), .modemuxs = xd_modemux, .nmodemuxs = ARRAY_SIZE(xd_modemux), }; static const char *const xd_grps[] = { "xd_grp" }; static struct spear_function xd_function = { .name = "xd", .groups = xd_grps, .ngroups = ARRAY_SIZE(xd_grps), }; /* pad multiplexing for clcd device */ static const unsigned clcd_pins[] = { 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191 }; static struct spear_muxreg clcd_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_5, .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK, .val = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK, }, { .reg = PAD_FUNCTION_EN_6, .mask = CLCD_AND_ARM_TRACE_REG5_MASK, .val = CLCD_AND_ARM_TRACE_REG5_MASK, }, { .reg = PAD_FUNCTION_EN_7, .mask = CLCD_AND_ARM_TRACE_REG6_MASK, .val = CLCD_AND_ARM_TRACE_REG6_MASK, }, }; static struct spear_modemux clcd_modemux[] = { { .muxregs = clcd_muxreg, .nmuxregs = ARRAY_SIZE(clcd_muxreg), }, }; static struct spear_pingroup clcd_pingroup = { .name = "clcd_grp", .pins = clcd_pins, .npins = ARRAY_SIZE(clcd_pins), .modemuxs = clcd_modemux, .nmodemuxs = ARRAY_SIZE(clcd_modemux), }; /* Disable cld runtime to save panel damage */ static struct spear_muxreg clcd_sleep_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK, .val = 0, }, { .reg = PAD_FUNCTION_EN_5, .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK, .val = 0x0, }, { .reg = PAD_FUNCTION_EN_6, .mask = CLCD_AND_ARM_TRACE_REG5_MASK, .val = 0x0, }, { .reg = PAD_FUNCTION_EN_7, .mask = CLCD_AND_ARM_TRACE_REG6_MASK, .val = 0x0, }, }; static struct spear_modemux clcd_sleep_modemux[] = { { .muxregs = clcd_sleep_muxreg, .nmuxregs = ARRAY_SIZE(clcd_sleep_muxreg), }, }; static struct spear_pingroup clcd_sleep_pingroup = { .name = "clcd_sleep_grp", .pins = clcd_pins, .npins = ARRAY_SIZE(clcd_pins), .modemuxs = clcd_sleep_modemux, .nmodemuxs = ARRAY_SIZE(clcd_sleep_modemux), }; static const char *const clcd_grps[] = { "clcd_grp", "clcd_sleep_grp" }; static struct spear_function clcd_function = { .name = "clcd", .groups = clcd_grps, .ngroups = ARRAY_SIZE(clcd_grps), }; /* pad multiplexing for arm_trace device */ static const unsigned arm_trace_pins[] = { 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200 }; static struct spear_muxreg arm_trace_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = ARM_TRACE_MASK, .val = ARM_TRACE_MASK, }, { .reg = PAD_FUNCTION_EN_5, .mask = CLCD_AND_ARM_TRACE_REG4_MASK, .val = CLCD_AND_ARM_TRACE_REG4_MASK, }, { .reg = PAD_FUNCTION_EN_6, .mask = CLCD_AND_ARM_TRACE_REG5_MASK, .val = CLCD_AND_ARM_TRACE_REG5_MASK, }, { .reg = PAD_FUNCTION_EN_7, .mask = CLCD_AND_ARM_TRACE_REG6_MASK, .val = CLCD_AND_ARM_TRACE_REG6_MASK, }, }; static struct spear_modemux arm_trace_modemux[] = { { .muxregs = arm_trace_muxreg, .nmuxregs = ARRAY_SIZE(arm_trace_muxreg), }, }; static struct spear_pingroup arm_trace_pingroup = { .name = "arm_trace_grp", .pins = arm_trace_pins, .npins = ARRAY_SIZE(arm_trace_pins), .modemuxs = arm_trace_modemux, .nmodemuxs = ARRAY_SIZE(arm_trace_modemux), }; static const char *const arm_trace_grps[] = { "arm_trace_grp" }; static struct spear_function arm_trace_function = { .name = "arm_trace", .groups = arm_trace_grps, .ngroups = ARRAY_SIZE(arm_trace_grps), }; /* pad multiplexing for miphy_dbg device */ static const unsigned miphy_dbg_pins[] = { 96, 97, 98, 99, 100, 101, 102, 103, 132, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157 }; static struct spear_muxreg miphy_dbg_muxreg[] = { { .reg = PAD_SHARED_IP_EN_1, .mask = MIPHY_DBG_MASK, .val = MIPHY_DBG_MASK, }, { .reg = PAD_FUNCTION_EN_5, .mask = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK, .val = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK, }, }; static struct spear_modemux miphy_dbg_modemux[] = { { .muxregs = miphy_dbg_muxreg, .nmuxregs = ARRAY_SIZE(miphy_dbg_muxreg), }, }; static struct spear_pingroup miphy_dbg_pingroup = { .name = "miphy_dbg_grp", .pins = miphy_dbg_pins, .npins = ARRAY_SIZE(miphy_dbg_pins), .modemuxs = miphy_dbg_modemux, .nmodemuxs = ARRAY_SIZE(miphy_dbg_modemux), }; static const char *const miphy_dbg_grps[] = { "miphy_dbg_grp" }; static struct spear_function miphy_dbg_function = { .name = "miphy_dbg", .groups = miphy_dbg_grps, .ngroups = ARRAY_SIZE(miphy_dbg_grps), }; /* pad multiplexing for pcie device */ static const unsigned pcie_pins[] = { 250 }; static struct spear_muxreg pcie_muxreg[] = { { .reg = PCIE_SATA_CFG, .mask = SATA_PCIE_CFG_MASK, .val = PCIE_CFG_VAL, }, }; static struct spear_modemux pcie_modemux[] = { { .muxregs = pcie_muxreg, .nmuxregs = ARRAY_SIZE(pcie_muxreg), }, }; static struct spear_pingroup pcie_pingroup = { .name = "pcie_grp", .pins = pcie_pins, .npins = ARRAY_SIZE(pcie_pins), .modemuxs = pcie_modemux, .nmodemuxs = ARRAY_SIZE(pcie_modemux), }; static const char *const pcie_grps[] = { "pcie_grp" }; static struct spear_function pcie_function = { .name = "pcie", .groups = pcie_grps, .ngroups = ARRAY_SIZE(pcie_grps), }; /* pad multiplexing for sata device */ static const unsigned sata_pins[] = { 250 }; static struct spear_muxreg sata_muxreg[] = { { .reg = PCIE_SATA_CFG, .mask = SATA_PCIE_CFG_MASK, .val = SATA_CFG_VAL, }, }; static struct spear_modemux sata_modemux[] = { { .muxregs = sata_muxreg, .nmuxregs = ARRAY_SIZE(sata_muxreg), }, }; static struct spear_pingroup sata_pingroup = { .name = "sata_grp", .pins = sata_pins, .npins = ARRAY_SIZE(sata_pins), .modemuxs = sata_modemux, .nmodemuxs = ARRAY_SIZE(sata_modemux), }; static const char *const sata_grps[] = { "sata_grp" }; static struct spear_function sata_function = { .name = "sata", .groups = sata_grps, .ngroups = ARRAY_SIZE(sata_grps), }; /* pingroups */ static struct spear_pingroup *spear1340_pingroups[] = { &pads_as_gpio_pingroup, &fsmc_8bit_pingroup, &fsmc_16bit_pingroup, &fsmc_pnor_pingroup, &keyboard_row_col_pingroup, &keyboard_col5_pingroup, &spdif_in_pingroup, &spdif_out_pingroup, &gpt_0_1_pingroup, &pwm0_pingroup, &pwm1_pingroup, &pwm2_pingroup, &pwm3_pingroup, &vip_mux_pingroup, &vip_mux_cam0_pingroup, &vip_mux_cam1_pingroup, &vip_mux_cam2_pingroup, &vip_mux_cam3_pingroup, &cam0_pingroup, &cam1_pingroup, &cam2_pingroup, &cam3_pingroup, &smi_pingroup, &ssp0_pingroup, &ssp0_cs1_pingroup, &ssp0_cs2_pingroup, &ssp0_cs3_pingroup, &uart0_pingroup, &uart0_enh_pingroup, &uart1_pingroup, &i2s_in_pingroup, &i2s_out_pingroup, &gmii_pingroup, &rgmii_pingroup, &rmii_pingroup, &sgmii_pingroup, &i2c0_pingroup, &i2c1_pingroup, &cec0_pingroup, &cec1_pingroup, &sdhci_pingroup, &cf_pingroup, &xd_pingroup, &clcd_sleep_pingroup, &clcd_pingroup, &arm_trace_pingroup, &miphy_dbg_pingroup, &pcie_pingroup, &sata_pingroup, }; /* functions */ static struct spear_function *spear1340_functions[] = { &pads_as_gpio_function, &fsmc_function, &keyboard_function, &spdif_in_function, &spdif_out_function, &gpt_0_1_function, &pwm_function, &vip_function, &cam0_function, &cam1_function, &cam2_function, &cam3_function, &smi_function, &ssp0_function, &uart0_function, &uart1_function, &i2s_function, &gmac_function, &i2c0_function, &i2c1_function, &cec0_function, &cec1_function, &sdhci_function, &cf_function, &xd_function, &clcd_function, &arm_trace_function, &miphy_dbg_function, &pcie_function, &sata_function, }; static void gpio_request_endisable(struct spear_pmx *pmx, int pin, bool enable) { unsigned int regoffset, regindex, bitoffset; unsigned int val; /* pin++ as gpio configuration starts from 2nd bit of base register */ pin++; regindex = pin / 32; bitoffset = pin % 32; if (regindex <= 3) regoffset = PAD_FUNCTION_EN_1 + regindex * sizeof(int *); else regoffset = PAD_FUNCTION_EN_5 + (regindex - 4) * sizeof(int *); val = pmx_readl(pmx, regoffset); if (enable) val &= ~(0x1 << bitoffset); else val |= 0x1 << bitoffset; pmx_writel(pmx, val, regoffset); } static struct spear_pinctrl_machdata spear1340_machdata = { .pins = spear1340_pins, .npins = ARRAY_SIZE(spear1340_pins), .groups = spear1340_pingroups, .ngroups = ARRAY_SIZE(spear1340_pingroups), .functions = spear1340_functions, .nfunctions = ARRAY_SIZE(spear1340_functions), .gpio_request_endisable = gpio_request_endisable, .modes_supported = false, }; static struct of_device_id spear1340_pinctrl_of_match[] = { { .compatible = "st,spear1340-pinmux", }, {}, }; static int spear1340_pinctrl_probe(struct platform_device *pdev) { return spear_pinctrl_probe(pdev, &spear1340_machdata); } static int spear1340_pinctrl_remove(struct platform_device *pdev) { return spear_pinctrl_remove(pdev); } static struct platform_driver spear1340_pinctrl_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = spear1340_pinctrl_of_match, }, .probe = spear1340_pinctrl_probe, .remove = spear1340_pinctrl_remove, }; static int __init spear1340_pinctrl_init(void) { return platform_driver_register(&spear1340_pinctrl_driver); } arch_initcall(spear1340_pinctrl_init); static void __exit spear1340_pinctrl_exit(void) { platform_driver_unregister(&spear1340_pinctrl_driver); } module_exit(spear1340_pinctrl_exit); MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
gpl-2.0
Split-Screen/android_kernel_samsung_i9300
drivers/pci/hotplug/cpci_hotplug_core.c
4196
17239
/* * CompactPCI Hot Plug Driver * * Copyright (C) 2002,2005 SOMA Networks, Inc. * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <scottm@somanetworks.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/atomic.h> #include <linux/delay.h> #include <linux/kthread.h> #include "cpci_hotplug.h" #define DRIVER_AUTHOR "Scott Murray <scottm@somanetworks.com>" #define DRIVER_DESC "CompactPCI Hot Plug Core" #define MY_NAME "cpci_hotplug" #define dbg(format, arg...) \ do { \ if (cpci_debug) \ printk (KERN_DEBUG "%s: " format "\n", \ MY_NAME , ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg) /* local variables */ static DECLARE_RWSEM(list_rwsem); static LIST_HEAD(slot_list); static int slots; static atomic_t extracting; int cpci_debug; static struct cpci_hp_controller *controller; static struct task_struct *cpci_thread; static int thread_finished; static int enable_slot(struct hotplug_slot *slot); static int disable_slot(struct hotplug_slot *slot); static int set_attention_status(struct hotplug_slot *slot, u8 value); static int get_power_status(struct hotplug_slot *slot, u8 * value); static int get_attention_status(struct hotplug_slot *slot, u8 * value); static int get_adapter_status(struct hotplug_slot *slot, u8 * value); static int get_latch_status(struct hotplug_slot *slot, u8 * value); static struct hotplug_slot_ops cpci_hotplug_slot_ops = { .enable_slot = enable_slot, .disable_slot = disable_slot, .set_attention_status = set_attention_status, .get_power_status = get_power_status, .get_attention_status = get_attention_status, .get_adapter_status = get_adapter_status, .get_latch_status = get_latch_status, }; static int update_latch_status(struct hotplug_slot *hotplug_slot, u8 value) { struct hotplug_slot_info info; memcpy(&info, hotplug_slot->info, sizeof(struct hotplug_slot_info)); info.latch_status = value; return pci_hp_change_slot_info(hotplug_slot, &info); } static int update_adapter_status(struct hotplug_slot *hotplug_slot, u8 value) { struct hotplug_slot_info info; memcpy(&info, hotplug_slot->info, sizeof(struct hotplug_slot_info)); info.adapter_status = value; return pci_hp_change_slot_info(hotplug_slot, &info); } static int enable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; int retval = 0; dbg("%s - physical_slot = %s", __func__, slot_name(slot)); if (controller->ops->set_power) retval = controller->ops->set_power(slot, 1); return retval; } static int disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; int retval = 0; dbg("%s - physical_slot = %s", __func__, slot_name(slot)); down_write(&list_rwsem); /* Unconfigure device */ dbg("%s - unconfiguring slot %s", __func__, slot_name(slot)); if ((retval = cpci_unconfigure_slot(slot))) { err("%s - could not unconfigure slot %s", __func__, slot_name(slot)); goto disable_error; } dbg("%s - finished unconfiguring slot %s", __func__, slot_name(slot)); /* Clear EXT (by setting it) */ if (cpci_clear_ext(slot)) { err("%s - could not clear EXT for slot %s", __func__, slot_name(slot)); retval = -ENODEV; goto disable_error; } cpci_led_on(slot); if (controller->ops->set_power) if ((retval = controller->ops->set_power(slot, 0))) goto disable_error; if (update_adapter_status(slot->hotplug_slot, 0)) warn("failure to update adapter file"); if (slot->extracting) { slot->extracting = 0; atomic_dec(&extracting); } disable_error: up_write(&list_rwsem); return retval; } static u8 cpci_get_power_status(struct slot *slot) { u8 power = 1; if (controller->ops->get_power) power = controller->ops->get_power(slot); return power; } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 * value) { struct slot *slot = hotplug_slot->private; *value = cpci_get_power_status(slot); return 0; } static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value) { struct slot *slot = hotplug_slot->private; *value = cpci_get_attention_status(slot); return 0; } static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { return cpci_set_attention_status(hotplug_slot->private, status); } static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 * value) { *value = hotplug_slot->info->adapter_status; return 0; } static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 * value) { *value = hotplug_slot->info->latch_status; return 0; } static void release_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; kfree(slot->hotplug_slot->info); kfree(slot->hotplug_slot); if (slot->dev) pci_dev_put(slot->dev); kfree(slot); } #define SLOT_NAME_SIZE 6 int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) { struct slot *slot; struct hotplug_slot *hotplug_slot; struct hotplug_slot_info *info; char name[SLOT_NAME_SIZE]; int status = -ENOMEM; int i; if (!(controller && bus)) return -ENODEV; /* * Create a structure for each slot, and register that slot * with the pci_hotplug subsystem. */ for (i = first; i <= last; ++i) { slot = kzalloc(sizeof (struct slot), GFP_KERNEL); if (!slot) goto error; hotplug_slot = kzalloc(sizeof (struct hotplug_slot), GFP_KERNEL); if (!hotplug_slot) goto error_slot; slot->hotplug_slot = hotplug_slot; info = kzalloc(sizeof (struct hotplug_slot_info), GFP_KERNEL); if (!info) goto error_hpslot; hotplug_slot->info = info; slot->bus = bus; slot->number = i; slot->devfn = PCI_DEVFN(i, 0); snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i); hotplug_slot->private = slot; hotplug_slot->release = &release_slot; hotplug_slot->ops = &cpci_hotplug_slot_ops; /* * Initialize the slot info structure with some known * good values. */ dbg("initializing slot %s", name); info->power_status = cpci_get_power_status(slot); info->attention_status = cpci_get_attention_status(slot); dbg("registering slot %s", name); status = pci_hp_register(slot->hotplug_slot, bus, i, name); if (status) { err("pci_hp_register failed with error %d", status); goto error_info; } dbg("slot registered with name: %s", slot_name(slot)); /* Add slot to our internal list */ down_write(&list_rwsem); list_add(&slot->slot_list, &slot_list); slots++; up_write(&list_rwsem); } return 0; error_info: kfree(info); error_hpslot: kfree(hotplug_slot); error_slot: kfree(slot); error: return status; } int cpci_hp_unregister_bus(struct pci_bus *bus) { struct slot *slot; struct slot *tmp; int status = 0; down_write(&list_rwsem); if (!slots) { up_write(&list_rwsem); return -1; } list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) { if (slot->bus == bus) { list_del(&slot->slot_list); slots--; dbg("deregistering slot %s", slot_name(slot)); status = pci_hp_deregister(slot->hotplug_slot); if (status) { err("pci_hp_deregister failed with error %d", status); break; } } } up_write(&list_rwsem); return status; } /* This is the interrupt mode interrupt handler */ static irqreturn_t cpci_hp_intr(int irq, void *data) { dbg("entered cpci_hp_intr"); /* Check to see if it was our interrupt */ if ((controller->irq_flags & IRQF_SHARED) && !controller->ops->check_irq(controller->dev_id)) { dbg("exited cpci_hp_intr, not our interrupt"); return IRQ_NONE; } /* Disable ENUM interrupt */ controller->ops->disable_irq(); /* Trigger processing by the event thread */ wake_up_process(cpci_thread); return IRQ_HANDLED; } /* * According to PICMG 2.1 R2.0, section 6.3.2, upon * initialization, the system driver shall clear the * INS bits of the cold-inserted devices. */ static int init_slots(int clear_ins) { struct slot *slot; struct pci_dev* dev; dbg("%s - enter", __func__); down_read(&list_rwsem); if (!slots) { up_read(&list_rwsem); return -1; } list_for_each_entry(slot, &slot_list, slot_list) { dbg("%s - looking at slot %s", __func__, slot_name(slot)); if (clear_ins && cpci_check_and_clear_ins(slot)) dbg("%s - cleared INS for slot %s", __func__, slot_name(slot)); dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0)); if (dev) { if (update_adapter_status(slot->hotplug_slot, 1)) warn("failure to update adapter file"); if (update_latch_status(slot->hotplug_slot, 1)) warn("failure to update latch file"); slot->dev = dev; } } up_read(&list_rwsem); dbg("%s - exit", __func__); return 0; } static int check_slots(void) { struct slot *slot; int extracted; int inserted; u16 hs_csr; down_read(&list_rwsem); if (!slots) { up_read(&list_rwsem); err("no slots registered, shutting down"); return -1; } extracted = inserted = 0; list_for_each_entry(slot, &slot_list, slot_list) { dbg("%s - looking at slot %s", __func__, slot_name(slot)); if (cpci_check_and_clear_ins(slot)) { /* * Some broken hardware (e.g. PLX 9054AB) asserts * ENUM# twice... */ if (slot->dev) { warn("slot %s already inserted", slot_name(slot)); inserted++; continue; } /* Process insertion */ dbg("%s - slot %s inserted", __func__, slot_name(slot)); /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR (1) = %04x", __func__, slot_name(slot), hs_csr); /* Configure device */ dbg("%s - configuring slot %s", __func__, slot_name(slot)); if (cpci_configure_slot(slot)) { err("%s - could not configure slot %s", __func__, slot_name(slot)); continue; } dbg("%s - finished configuring slot %s", __func__, slot_name(slot)); /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR (2) = %04x", __func__, slot_name(slot), hs_csr); if (update_latch_status(slot->hotplug_slot, 1)) warn("failure to update latch file"); if (update_adapter_status(slot->hotplug_slot, 1)) warn("failure to update adapter file"); cpci_led_off(slot); /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR (3) = %04x", __func__, slot_name(slot), hs_csr); inserted++; } else if (cpci_check_ext(slot)) { /* Process extraction request */ dbg("%s - slot %s extracted", __func__, slot_name(slot)); /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR = %04x", __func__, slot_name(slot), hs_csr); if (!slot->extracting) { if (update_latch_status(slot->hotplug_slot, 0)) { warn("failure to update latch file"); } slot->extracting = 1; atomic_inc(&extracting); } extracted++; } else if (slot->extracting) { hs_csr = cpci_get_hs_csr(slot); if (hs_csr == 0xffff) { /* * Hmmm, we're likely hosed at this point, should we * bother trying to tell the driver or not? */ err("card in slot %s was improperly removed", slot_name(slot)); if (update_adapter_status(slot->hotplug_slot, 0)) warn("failure to update adapter file"); slot->extracting = 0; atomic_dec(&extracting); } } } up_read(&list_rwsem); dbg("inserted=%d, extracted=%d, extracting=%d", inserted, extracted, atomic_read(&extracting)); if (inserted || extracted) return extracted; else if (!atomic_read(&extracting)) { err("cannot find ENUM# source, shutting down"); return -1; } return 0; } /* This is the interrupt mode worker thread body */ static int event_thread(void *data) { int rc; dbg("%s - event thread started", __func__); while (1) { dbg("event thread sleeping"); set_current_state(TASK_INTERRUPTIBLE); schedule(); if (kthread_should_stop()) break; do { rc = check_slots(); if (rc > 0) { /* Give userspace a chance to handle extraction */ msleep(500); } else if (rc < 0) { dbg("%s - error checking slots", __func__); thread_finished = 1; goto out; } } while (atomic_read(&extracting) && !kthread_should_stop()); if (kthread_should_stop()) break; /* Re-enable ENUM# interrupt */ dbg("%s - re-enabling irq", __func__); controller->ops->enable_irq(); } out: return 0; } /* This is the polling mode worker thread body */ static int poll_thread(void *data) { int rc; while (1) { if (kthread_should_stop() || signal_pending(current)) break; if (controller->ops->query_enum()) { do { rc = check_slots(); if (rc > 0) { /* Give userspace a chance to handle extraction */ msleep(500); } else if (rc < 0) { dbg("%s - error checking slots", __func__); thread_finished = 1; goto out; } } while (atomic_read(&extracting) && !kthread_should_stop()); } msleep(100); } out: return 0; } static int cpci_start_thread(void) { if (controller->irq) cpci_thread = kthread_run(event_thread, NULL, "cpci_hp_eventd"); else cpci_thread = kthread_run(poll_thread, NULL, "cpci_hp_polld"); if (IS_ERR(cpci_thread)) { err("Can't start up our thread"); return PTR_ERR(cpci_thread); } thread_finished = 0; return 0; } static void cpci_stop_thread(void) { kthread_stop(cpci_thread); thread_finished = 1; } int cpci_hp_register_controller(struct cpci_hp_controller *new_controller) { int status = 0; if (controller) return -1; if (!(new_controller && new_controller->ops)) return -EINVAL; if (new_controller->irq) { if (!(new_controller->ops->enable_irq && new_controller->ops->disable_irq)) status = -EINVAL; if (request_irq(new_controller->irq, cpci_hp_intr, new_controller->irq_flags, MY_NAME, new_controller->dev_id)) { err("Can't get irq %d for the hotplug cPCI controller", new_controller->irq); status = -ENODEV; } dbg("%s - acquired controller irq %d", __func__, new_controller->irq); } if (!status) controller = new_controller; return status; } static void cleanup_slots(void) { struct slot *slot; struct slot *tmp; /* * Unregister all of our slots with the pci_hotplug subsystem, * and free up all memory that we had allocated. */ down_write(&list_rwsem); if (!slots) goto cleanup_null; list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) { list_del(&slot->slot_list); pci_hp_deregister(slot->hotplug_slot); } cleanup_null: up_write(&list_rwsem); return; } int cpci_hp_unregister_controller(struct cpci_hp_controller *old_controller) { int status = 0; if (controller) { if (!thread_finished) cpci_stop_thread(); if (controller->irq) free_irq(controller->irq, controller->dev_id); controller = NULL; cleanup_slots(); } else status = -ENODEV; return status; } int cpci_hp_start(void) { static int first = 1; int status; dbg("%s - enter", __func__); if (!controller) return -ENODEV; down_read(&list_rwsem); if (list_empty(&slot_list)) { up_read(&list_rwsem); return -ENODEV; } up_read(&list_rwsem); status = init_slots(first); if (first) first = 0; if (status) return status; status = cpci_start_thread(); if (status) return status; dbg("%s - thread started", __func__); if (controller->irq) { /* Start enum interrupt processing */ dbg("%s - enabling irq", __func__); controller->ops->enable_irq(); } dbg("%s - exit", __func__); return 0; } int cpci_hp_stop(void) { if (!controller) return -ENODEV; if (controller->irq) { /* Stop enum interrupt processing */ dbg("%s - disabling irq", __func__); controller->ops->disable_irq(); } cpci_stop_thread(); return 0; } int __init cpci_hotplug_init(int debug) { cpci_debug = debug; return 0; } void __exit cpci_hotplug_exit(void) { /* * Clean everything up. */ cpci_hp_stop(); cpci_hp_unregister_controller(controller); } EXPORT_SYMBOL_GPL(cpci_hp_register_controller); EXPORT_SYMBOL_GPL(cpci_hp_unregister_controller); EXPORT_SYMBOL_GPL(cpci_hp_register_bus); EXPORT_SYMBOL_GPL(cpci_hp_unregister_bus); EXPORT_SYMBOL_GPL(cpci_hp_start); EXPORT_SYMBOL_GPL(cpci_hp_stop);
gpl-2.0
vivilyu/android_kernel_huawei_c8813
arch/powerpc/kernel/vdso.c
4452
21083
/* * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/elf.h> #include <linux/security.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/sections.h> #include <asm/firmware.h> #include <asm/vdso.h> #include <asm/vdso_datapage.h> #include "setup.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt...) printk(fmt) #else #define DBG(fmt...) #endif /* Max supported size for symbol names */ #define MAX_SYMNAME 64 /* The alignment of the vDSO */ #define VDSO_ALIGNMENT (1 << 16) extern char vdso32_start, vdso32_end; static void *vdso32_kbase = &vdso32_start; static unsigned int vdso32_pages; static struct page **vdso32_pagelist; unsigned long vdso32_sigtramp; unsigned long vdso32_rt_sigtramp; #ifdef CONFIG_PPC64 extern char vdso64_start, vdso64_end; static void *vdso64_kbase = &vdso64_start; static unsigned int vdso64_pages; static struct page **vdso64_pagelist; unsigned long vdso64_rt_sigtramp; #endif /* CONFIG_PPC64 */ static int vdso_ready; /* * The vdso data page (aka. systemcfg for old ppc64 fans) is here. * Once the early boot kernel code no longer needs to muck around * with it, it will become dynamically allocated */ static union { struct vdso_data data; u8 page[PAGE_SIZE]; } vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = &vdso_data_store.data; /* Format of the patch table */ struct vdso_patch_def { unsigned long ftr_mask, ftr_value; const char *gen_name; const char *fix_name; }; /* Table of functions to patch based on the CPU type/revision * * Currently, we only change sync_dicache to do nothing on processors * with a coherent icache */ static struct vdso_patch_def vdso_patches[] = { { CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE, "__kernel_sync_dicache", "__kernel_sync_dicache_p5" }, { CPU_FTR_USE_TB, 0, "__kernel_gettimeofday", NULL }, { CPU_FTR_USE_TB, 0, "__kernel_clock_gettime", NULL }, { CPU_FTR_USE_TB, 0, "__kernel_clock_getres", NULL }, { CPU_FTR_USE_TB, 0, "__kernel_get_tbfreq", NULL }, }; /* * Some infos carried around for each of them during parsing at * boot time. */ struct lib32_elfinfo { Elf32_Ehdr *hdr; /* ptr to ELF */ Elf32_Sym *dynsym; /* ptr to .dynsym section */ unsigned long dynsymsize; /* size of .dynsym section */ char *dynstr; /* ptr to .dynstr section */ unsigned long text; /* offset of .text section in .so */ }; struct lib64_elfinfo { Elf64_Ehdr *hdr; Elf64_Sym *dynsym; unsigned long dynsymsize; char *dynstr; unsigned long text; }; #ifdef __DEBUG static void dump_one_vdso_page(struct page *pg, struct page *upg) { printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT), page_count(pg), pg->flags); if (upg && !IS_ERR(upg) /* && pg != upg*/) { printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) << PAGE_SHIFT), page_count(upg), upg->flags); } printk("\n"); } static void dump_vdso_pages(struct vm_area_struct * vma) { int i; if (!vma || is_32bit_task()) { printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); for (i=0; i<vdso32_pages; i++) { struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); struct page *upg = (vma && vma->vm_mm) ? follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) : NULL; dump_one_vdso_page(pg, upg); } } if (!vma || !is_32bit_task()) { printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); for (i=0; i<vdso64_pages; i++) { struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); struct page *upg = (vma && vma->vm_mm) ? follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) : NULL; dump_one_vdso_page(pg, upg); } } } #endif /* DEBUG */ /* * This is called from binfmt_elf, we create the special vma for the * vDSO and insert it into the mm struct tree */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; struct page **vdso_pagelist; unsigned long vdso_pages; unsigned long vdso_base; int rc; if (!vdso_ready) return 0; #ifdef CONFIG_PPC64 if (is_32bit_task()) { vdso_pagelist = vdso32_pagelist; vdso_pages = vdso32_pages; vdso_base = VDSO32_MBASE; } else { vdso_pagelist = vdso64_pagelist; vdso_pages = vdso64_pages; /* * On 64bit we don't have a preferred map address. This * allows get_unmapped_area to find an area near other mmaps * and most likely share a SLB entry. */ vdso_base = 0; } #else vdso_pagelist = vdso32_pagelist; vdso_pages = vdso32_pages; vdso_base = VDSO32_MBASE; #endif current->mm->context.vdso_base = 0; /* vDSO has a problem and was disabled, just don't "enable" it for the * process */ if (vdso_pages == 0) return 0; /* Add a page to the vdso size for the data page */ vdso_pages ++; /* * pick a base address for the vDSO in process space. We try to put it * at vdso_base which is the "natural" base for it, but we might fail * and end up putting it elsewhere. * Add enough to the size so that the result can be aligned. */ down_write(&mm->mmap_sem); vdso_base = get_unmapped_area(NULL, vdso_base, (vdso_pages << PAGE_SHIFT) + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), 0, 0); if (IS_ERR_VALUE(vdso_base)) { rc = vdso_base; goto fail_mmapsem; } /* Add required alignment. */ vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT); /* * Put vDSO base into mm struct. We need to do this before calling * install_special_mapping or the perf counter mmap tracking code * will fail to recognise it as a vDSO (since arch_vma_name fails). */ current->mm->context.vdso_base = vdso_base; /* * our vma flags don't have VM_WRITE so by default, the process isn't * allowed to write those pages. * gdb can break that with ptrace interface, and thus trigger COW on * those pages but it's then your responsibility to never do that on * the "data" page of the vDSO or you'll stop getting kernel updates * and your nice userland gettimeofday will be totally dead. * It's fine to use that for setting breakpoints in the vDSO code * pages though. */ rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pagelist); if (rc) { current->mm->context.vdso_base = 0; goto fail_mmapsem; } up_write(&mm->mmap_sem); return 0; fail_mmapsem: up_write(&mm->mmap_sem); return rc; } const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) return "[vdso]"; return NULL; } static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, unsigned long *size) { Elf32_Shdr *sechdrs; unsigned int i; char *secnames; /* Grab section headers and strings so we can tell who is who */ sechdrs = (void *)ehdr + ehdr->e_shoff; secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; /* Find the section they want */ for (i = 1; i < ehdr->e_shnum; i++) { if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { if (size) *size = sechdrs[i].sh_size; return (void *)ehdr + sechdrs[i].sh_offset; } } *size = 0; return NULL; } static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib, const char *symname) { unsigned int i; char name[MAX_SYMNAME], *c; for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) { if (lib->dynsym[i].st_name == 0) continue; strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, MAX_SYMNAME); c = strchr(name, '@'); if (c) *c = 0; if (strcmp(symname, name) == 0) return &lib->dynsym[i]; } return NULL; } /* Note that we assume the section is .text and the symbol is relative to * the library base */ static unsigned long __init find_function32(struct lib32_elfinfo *lib, const char *symname) { Elf32_Sym *sym = find_symbol32(lib, symname); if (sym == NULL) { printk(KERN_WARNING "vDSO32: function %s not found !\n", symname); return 0; } return sym->st_value - VDSO32_LBASE; } static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32, struct lib64_elfinfo *v64, const char *orig, const char *fix) { Elf32_Sym *sym32_gen, *sym32_fix; sym32_gen = find_symbol32(v32, orig); if (sym32_gen == NULL) { printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig); return -1; } if (fix == NULL) { sym32_gen->st_name = 0; return 0; } sym32_fix = find_symbol32(v32, fix); if (sym32_fix == NULL) { printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix); return -1; } sym32_gen->st_value = sym32_fix->st_value; sym32_gen->st_size = sym32_fix->st_size; sym32_gen->st_info = sym32_fix->st_info; sym32_gen->st_other = sym32_fix->st_other; sym32_gen->st_shndx = sym32_fix->st_shndx; return 0; } #ifdef CONFIG_PPC64 static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname, unsigned long *size) { Elf64_Shdr *sechdrs; unsigned int i; char *secnames; /* Grab section headers and strings so we can tell who is who */ sechdrs = (void *)ehdr + ehdr->e_shoff; secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; /* Find the section they want */ for (i = 1; i < ehdr->e_shnum; i++) { if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { if (size) *size = sechdrs[i].sh_size; return (void *)ehdr + sechdrs[i].sh_offset; } } if (size) *size = 0; return NULL; } static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib, const char *symname) { unsigned int i; char name[MAX_SYMNAME], *c; for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) { if (lib->dynsym[i].st_name == 0) continue; strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, MAX_SYMNAME); c = strchr(name, '@'); if (c) *c = 0; if (strcmp(symname, name) == 0) return &lib->dynsym[i]; } return NULL; } /* Note that we assume the section is .text and the symbol is relative to * the library base */ static unsigned long __init find_function64(struct lib64_elfinfo *lib, const char *symname) { Elf64_Sym *sym = find_symbol64(lib, symname); if (sym == NULL) { printk(KERN_WARNING "vDSO64: function %s not found !\n", symname); return 0; } #ifdef VDS64_HAS_DESCRIPTORS return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) - VDSO64_LBASE; #else return sym->st_value - VDSO64_LBASE; #endif } static int __init vdso_do_func_patch64(struct lib32_elfinfo *v32, struct lib64_elfinfo *v64, const char *orig, const char *fix) { Elf64_Sym *sym64_gen, *sym64_fix; sym64_gen = find_symbol64(v64, orig); if (sym64_gen == NULL) { printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig); return -1; } if (fix == NULL) { sym64_gen->st_name = 0; return 0; } sym64_fix = find_symbol64(v64, fix); if (sym64_fix == NULL) { printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix); return -1; } sym64_gen->st_value = sym64_fix->st_value; sym64_gen->st_size = sym64_fix->st_size; sym64_gen->st_info = sym64_fix->st_info; sym64_gen->st_other = sym64_fix->st_other; sym64_gen->st_shndx = sym64_fix->st_shndx; return 0; } #endif /* CONFIG_PPC64 */ static __init int vdso_do_find_sections(struct lib32_elfinfo *v32, struct lib64_elfinfo *v64) { void *sect; /* * Locate symbol tables & text section */ v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize); v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL); if (v32->dynsym == NULL || v32->dynstr == NULL) { printk(KERN_ERR "vDSO32: required symbol section not found\n"); return -1; } sect = find_section32(v32->hdr, ".text", NULL); if (sect == NULL) { printk(KERN_ERR "vDSO32: the .text section was not found\n"); return -1; } v32->text = sect - vdso32_kbase; #ifdef CONFIG_PPC64 v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize); v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL); if (v64->dynsym == NULL || v64->dynstr == NULL) { printk(KERN_ERR "vDSO64: required symbol section not found\n"); return -1; } sect = find_section64(v64->hdr, ".text", NULL); if (sect == NULL) { printk(KERN_ERR "vDSO64: the .text section was not found\n"); return -1; } v64->text = sect - vdso64_kbase; #endif /* CONFIG_PPC64 */ return 0; } static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32, struct lib64_elfinfo *v64) { /* * Find signal trampolines */ #ifdef CONFIG_PPC64 vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64"); #endif vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32"); vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32"); } static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32, struct lib64_elfinfo *v64) { Elf32_Sym *sym32; #ifdef CONFIG_PPC64 Elf64_Sym *sym64; sym64 = find_symbol64(v64, "__kernel_datapage_offset"); if (sym64 == NULL) { printk(KERN_ERR "vDSO64: Can't find symbol " "__kernel_datapage_offset !\n"); return -1; } *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) = (vdso64_pages << PAGE_SHIFT) - (sym64->st_value - VDSO64_LBASE); #endif /* CONFIG_PPC64 */ sym32 = find_symbol32(v32, "__kernel_datapage_offset"); if (sym32 == NULL) { printk(KERN_ERR "vDSO32: Can't find symbol " "__kernel_datapage_offset !\n"); return -1; } *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) = (vdso32_pages << PAGE_SHIFT) - (sym32->st_value - VDSO32_LBASE); return 0; } static __init int vdso_fixup_features(struct lib32_elfinfo *v32, struct lib64_elfinfo *v64) { void *start32; unsigned long size32; #ifdef CONFIG_PPC64 void *start64; unsigned long size64; start64 = find_section64(v64->hdr, "__ftr_fixup", &size64); if (start64) do_feature_fixups(cur_cpu_spec->cpu_features, start64, start64 + size64); start64 = find_section64(v64->hdr, "__mmu_ftr_fixup", &size64); if (start64) do_feature_fixups(cur_cpu_spec->mmu_features, start64, start64 + size64); start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64); if (start64) do_feature_fixups(powerpc_firmware_features, start64, start64 + size64); start64 = find_section64(v64->hdr, "__lwsync_fixup", &size64); if (start64) do_lwsync_fixups(cur_cpu_spec->cpu_features, start64, start64 + size64); #endif /* CONFIG_PPC64 */ start32 = find_section32(v32->hdr, "__ftr_fixup", &size32); if (start32) do_feature_fixups(cur_cpu_spec->cpu_features, start32, start32 + size32); start32 = find_section32(v32->hdr, "__mmu_ftr_fixup", &size32); if (start32) do_feature_fixups(cur_cpu_spec->mmu_features, start32, start32 + size32); #ifdef CONFIG_PPC64 start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32); if (start32) do_feature_fixups(powerpc_firmware_features, start32, start32 + size32); #endif /* CONFIG_PPC64 */ start32 = find_section32(v32->hdr, "__lwsync_fixup", &size32); if (start32) do_lwsync_fixups(cur_cpu_spec->cpu_features, start32, start32 + size32); return 0; } static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32, struct lib64_elfinfo *v64) { int i; for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) { struct vdso_patch_def *patch = &vdso_patches[i]; int match = (cur_cpu_spec->cpu_features & patch->ftr_mask) == patch->ftr_value; if (!match) continue; DBG("replacing %s with %s...\n", patch->gen_name, patch->fix_name ? "NONE" : patch->fix_name); /* * Patch the 32 bits and 64 bits symbols. Note that we do not * patch the "." symbol on 64 bits. * It would be easy to do, but doesn't seem to be necessary, * patching the OPD symbol is enough. */ vdso_do_func_patch32(v32, v64, patch->gen_name, patch->fix_name); #ifdef CONFIG_PPC64 vdso_do_func_patch64(v32, v64, patch->gen_name, patch->fix_name); #endif /* CONFIG_PPC64 */ } return 0; } static __init int vdso_setup(void) { struct lib32_elfinfo v32; struct lib64_elfinfo v64; v32.hdr = vdso32_kbase; #ifdef CONFIG_PPC64 v64.hdr = vdso64_kbase; #endif if (vdso_do_find_sections(&v32, &v64)) return -1; if (vdso_fixup_datapage(&v32, &v64)) return -1; if (vdso_fixup_features(&v32, &v64)) return -1; if (vdso_fixup_alt_funcs(&v32, &v64)) return -1; vdso_setup_trampolines(&v32, &v64); return 0; } /* * Called from setup_arch to initialize the bitmap of available * syscalls in the systemcfg page */ static void __init vdso_setup_syscall_map(void) { unsigned int i; extern unsigned long *sys_call_table; extern unsigned long sys_ni_syscall; for (i = 0; i < __NR_syscalls; i++) { #ifdef CONFIG_PPC64 if (sys_call_table[i*2] != sys_ni_syscall) vdso_data->syscall_map_64[i >> 5] |= 0x80000000UL >> (i & 0x1f); if (sys_call_table[i*2+1] != sys_ni_syscall) vdso_data->syscall_map_32[i >> 5] |= 0x80000000UL >> (i & 0x1f); #else /* CONFIG_PPC64 */ if (sys_call_table[i] != sys_ni_syscall) vdso_data->syscall_map_32[i >> 5] |= 0x80000000UL >> (i & 0x1f); #endif /* CONFIG_PPC64 */ } } static int __init vdso_init(void) { int i; #ifdef CONFIG_PPC64 /* * Fill up the "systemcfg" stuff for backward compatibility */ strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64"); vdso_data->version.major = SYSTEMCFG_MAJOR; vdso_data->version.minor = SYSTEMCFG_MINOR; vdso_data->processor = mfspr(SPRN_PVR); /* * Fake the old platform number for pSeries and add * in LPAR bit if necessary */ vdso_data->platform = 0x100; if (firmware_has_feature(FW_FEATURE_LPAR)) vdso_data->platform |= 1; vdso_data->physicalMemorySize = memblock_phys_mem_size(); vdso_data->dcache_size = ppc64_caches.dsize; vdso_data->dcache_line_size = ppc64_caches.dline_size; vdso_data->icache_size = ppc64_caches.isize; vdso_data->icache_line_size = ppc64_caches.iline_size; /* XXXOJN: Blocks should be added to ppc64_caches and used instead */ vdso_data->dcache_block_size = ppc64_caches.dline_size; vdso_data->icache_block_size = ppc64_caches.iline_size; vdso_data->dcache_log_block_size = ppc64_caches.log_dline_size; vdso_data->icache_log_block_size = ppc64_caches.log_iline_size; /* * Calculate the size of the 64 bits vDSO */ vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT; DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages); #else vdso_data->dcache_block_size = L1_CACHE_BYTES; vdso_data->dcache_log_block_size = L1_CACHE_SHIFT; vdso_data->icache_block_size = L1_CACHE_BYTES; vdso_data->icache_log_block_size = L1_CACHE_SHIFT; #endif /* CONFIG_PPC64 */ /* * Calculate the size of the 32 bits vDSO */ vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT; DBG("vdso32_kbase: %p, 0x%x pages\n", vdso32_kbase, vdso32_pages); /* * Setup the syscall map in the vDOS */ vdso_setup_syscall_map(); /* * Initialize the vDSO images in memory, that is do necessary * fixups of vDSO symbols, locate trampolines, etc... */ if (vdso_setup()) { printk(KERN_ERR "vDSO setup failure, not enabled !\n"); vdso32_pages = 0; #ifdef CONFIG_PPC64 vdso64_pages = 0; #endif return 0; } /* Make sure pages are in the correct state */ vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 2), GFP_KERNEL); BUG_ON(vdso32_pagelist == NULL); for (i = 0; i < vdso32_pages; i++) { struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); ClearPageReserved(pg); get_page(pg); vdso32_pagelist[i] = pg; } vdso32_pagelist[i++] = virt_to_page(vdso_data); vdso32_pagelist[i] = NULL; #ifdef CONFIG_PPC64 vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 2), GFP_KERNEL); BUG_ON(vdso64_pagelist == NULL); for (i = 0; i < vdso64_pages; i++) { struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); ClearPageReserved(pg); get_page(pg); vdso64_pagelist[i] = pg; } vdso64_pagelist[i++] = virt_to_page(vdso_data); vdso64_pagelist[i] = NULL; #endif /* CONFIG_PPC64 */ get_page(virt_to_page(vdso_data)); smp_wmb(); vdso_ready = 1; return 0; } arch_initcall(vdso_init); int in_gate_area_no_mm(unsigned long addr) { return 0; } int in_gate_area(struct mm_struct *mm, unsigned long addr) { return 0; } struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return NULL; }
gpl-2.0
MiCode/mi2_kernel
arch/parisc/kernel/pci.c
4452
7249
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1997, 1998 Ralf Baechle * Copyright (C) 1999 SuSE GmbH * Copyright (C) 1999-2001 Hewlett-Packard Company * Copyright (C) 1999-2001 Grant Grundler */ #include <linux/eisa.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/types.h> #include <asm/io.h> #include <asm/superio.h> #define DEBUG_RESOURCES 0 #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBGC(x...) printk(KERN_DEBUG x) #else # define DBGC(x...) #endif #if DEBUG_RESOURCES #define DBG_RES(x...) printk(KERN_DEBUG x) #else #define DBG_RES(x...) #endif /* To be used as: mdelay(pci_post_reset_delay); * * post_reset is the time the kernel should stall to prevent anyone from * accessing the PCI bus once #RESET is de-asserted. * PCI spec somewhere says 1 second but with multi-PCI bus systems, * this makes the boot time much longer than necessary. * 20ms seems to work for all the HP PCI implementations to date. * * #define pci_post_reset_delay 50 */ struct pci_port_ops *pci_port __read_mostly; struct pci_bios_ops *pci_bios __read_mostly; static int pci_hba_count __read_mostly; /* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */ #define PCI_HBA_MAX 32 static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __read_mostly; /******************************************************************** ** ** I/O port space support ** *********************************************************************/ /* EISA port numbers and PCI port numbers share the same interface. Some * machines have both EISA and PCI adapters installed. Rather than turn * pci_port into an array, we reserve bus 0 for EISA and call the EISA * routines if the access is to a port on bus 0. We don't want to fix * EISA and ISA drivers which assume port space is <= 0xffff. */ #ifdef CONFIG_EISA #define EISA_IN(size) if (EISA_bus && (b == 0)) return eisa_in##size(addr) #define EISA_OUT(size) if (EISA_bus && (b == 0)) return eisa_out##size(d, addr) #else #define EISA_IN(size) #define EISA_OUT(size) #endif #define PCI_PORT_IN(type, size) \ u##size in##type (int addr) \ { \ int b = PCI_PORT_HBA(addr); \ EISA_IN(size); \ if (!parisc_pci_hba[b]) return (u##size) -1; \ return pci_port->in##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr)); \ } \ EXPORT_SYMBOL(in##type); PCI_PORT_IN(b, 8) PCI_PORT_IN(w, 16) PCI_PORT_IN(l, 32) #define PCI_PORT_OUT(type, size) \ void out##type (u##size d, int addr) \ { \ int b = PCI_PORT_HBA(addr); \ EISA_OUT(size); \ if (!parisc_pci_hba[b]) return; \ pci_port->out##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr), d); \ } \ EXPORT_SYMBOL(out##type); PCI_PORT_OUT(b, 8) PCI_PORT_OUT(w, 16) PCI_PORT_OUT(l, 32) /* * BIOS32 replacement. */ static int __init pcibios_init(void) { if (!pci_bios) return -1; if (pci_bios->init) { pci_bios->init(); } else { printk(KERN_WARNING "pci_bios != NULL but init() is!\n"); } /* Set the CLS for PCI as early as possible. */ pci_cache_line_size = pci_dfl_cache_line_size; return 0; } /* Called from pci_do_scan_bus() *after* walking a bus but before walking PPBs. */ void pcibios_fixup_bus(struct pci_bus *bus) { if (pci_bios->fixup_bus) { pci_bios->fixup_bus(bus); } else { printk(KERN_WARNING "pci_bios != NULL but fixup_bus() is!\n"); } } char *pcibios_setup(char *str) { return str; } /* * Called by pci_set_master() - a driver interface. * * Legacy PDC guarantees to set: * Map Memory BAR's into PA IO space. * Map Expansion ROM BAR into one common PA IO space per bus. * Map IO BAR's into PCI IO space. * Command (see below) * Cache Line Size * Latency Timer * Interrupt Line * PPB: secondary latency timer, io/mmio base/limit, * bus numbers, bridge control * */ void pcibios_set_master(struct pci_dev *dev) { u8 lat; /* If someone already mucked with this, don't touch it. */ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); if (lat >= 16) return; /* ** HP generally has fewer devices on the bus than other architectures. ** upper byte is PCI_LATENCY_TIMER. */ pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, (0x80 << 8) | pci_cache_line_size); } void __init pcibios_init_bus(struct pci_bus *bus) { struct pci_dev *dev = bus->self; unsigned short bridge_ctl; /* We deal only with pci controllers and pci-pci bridges. */ if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) return; /* PCI-PCI bridge - set the cache line and default latency (32) for primary and secondary buses. */ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32); pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl); bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl); } /* * pcibios align resources() is called every time generic PCI code * wants to generate a new address. The process of looking for * an available address, each candidate is first "aligned" and * then checked if the resource is available until a match is found. * * Since we are just checking candidates, don't use any fields other * than res->start. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t alignment) { resource_size_t mask, align, start = res->start; DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n", pci_name(((struct pci_dev *) data)), res->parent, res->start, res->end, (int) res->flags, size, alignment); /* If it's not IO, then it's gotta be MEM */ align = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; /* Align to largest of MIN or input size */ mask = max(alignment, align) - 1; start += mask; start &= ~mask; return start; } /* * A driver is enabling the device. We make sure that all the appropriate * bits are set to allow the device to operate as the driver is expecting. * We enable the port IO and memory IO bits if the device has any BARs of * that type, and we enable the PERR and SERR bits unconditionally. * Drivers that do not need parity (eg graphics and possibly networking) * can clear these bits if they want. */ int pcibios_enable_device(struct pci_dev *dev, int mask) { int err; u16 cmd, old_cmd; err = pci_enable_resources(dev, mask); if (err < 0) return err; pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; cmd |= (PCI_COMMAND_SERR | PCI_COMMAND_PARITY); #if 0 /* If bridge/bus controller has FBB enabled, child must too. */ if (dev->bus->bridge_ctl & PCI_BRIDGE_CTL_FAST_BACK) cmd |= PCI_COMMAND_FAST_BACK; #endif if (cmd != old_cmd) { dev_info(&dev->dev, "enabling SERR and PARITY (%04x -> %04x)\n", old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; } /* PA-RISC specific */ void pcibios_register_hba(struct pci_hba_data *hba) { if (pci_hba_count >= PCI_HBA_MAX) { printk(KERN_ERR "PCI: Too many Host Bus Adapters\n"); return; } parisc_pci_hba[pci_hba_count] = hba; hba->hba_num = pci_hba_count++; } subsys_initcall(pcibios_init);
gpl-2.0
Schischu/android_kernel_samsung_lt03lte
drivers/staging/iio/accel/adis16220_core.c
4964
16689
/* * ADIS16220 Programmable Digital Vibration Sensor driver * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/delay.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/module.h> #include "../iio.h" #include "../sysfs.h" #include "adis16220.h" #define DRIVER_NAME "adis16220" /** * adis16220_spi_write_reg_8() - write single byte to a register * @indio_dev: iio device associated with child of actual device * @reg_address: the address of the register to be written * @val: the value to write **/ static int adis16220_spi_write_reg_8(struct iio_dev *indio_dev, u8 reg_address, u8 val) { int ret; struct adis16220_state *st = iio_priv(indio_dev); mutex_lock(&st->buf_lock); st->tx[0] = ADIS16220_WRITE_REG(reg_address); st->tx[1] = val; ret = spi_write(st->us, st->tx, 2); mutex_unlock(&st->buf_lock); return ret; } /** * adis16220_spi_write_reg_16() - write 2 bytes to a pair of registers * @indio_dev: iio device associated with child of actual device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: value to be written **/ static int adis16220_spi_write_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 value) { int ret; struct spi_message msg; struct adis16220_state *st = iio_priv(indio_dev); struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 35, }, { .tx_buf = st->tx + 2, .bits_per_word = 8, .len = 2, .delay_usecs = 35, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16220_WRITE_REG(lower_reg_address); st->tx[1] = value & 0xFF; st->tx[2] = ADIS16220_WRITE_REG(lower_reg_address + 1); st->tx[3] = (value >> 8) & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); mutex_unlock(&st->buf_lock); return ret; } /** * adis16220_spi_read_reg_16() - read 2 bytes from a 16-bit register * @indio_dev: iio device associated with child of actual device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: somewhere to pass back the value read **/ static int adis16220_spi_read_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 *val) { struct spi_message msg; struct adis16220_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 35, }, { .rx_buf = st->rx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 35, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16220_READ_REG(lower_reg_address); st->tx[1] = 0; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", lower_reg_address); goto error_ret; } *val = (st->rx[0] << 8) | st->rx[1]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static ssize_t adis16220_read_16bit(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); struct iio_dev *indio_dev = dev_get_drvdata(dev); ssize_t ret; s16 val = 0; /* Take the iio_dev status lock */ mutex_lock(&indio_dev->mlock); ret = adis16220_spi_read_reg_16(indio_dev, this_attr->address, (u16 *)&val); mutex_unlock(&indio_dev->mlock); if (ret) return ret; return sprintf(buf, "%d\n", val); } static ssize_t adis16220_write_16bit(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; u16 val; ret = kstrtou16(buf, 10, &val); if (ret) goto error_ret; ret = adis16220_spi_write_reg_16(indio_dev, this_attr->address, val); error_ret: return ret ? ret : len; } static int adis16220_capture(struct iio_dev *indio_dev) { int ret; ret = adis16220_spi_write_reg_16(indio_dev, ADIS16220_GLOB_CMD, 0xBF08); /* initiates a manual data capture */ if (ret) dev_err(&indio_dev->dev, "problem beginning capture"); msleep(10); /* delay for capture to finish */ return ret; } static int adis16220_reset(struct iio_dev *indio_dev) { int ret; ret = adis16220_spi_write_reg_8(indio_dev, ADIS16220_GLOB_CMD, ADIS16220_GLOB_CMD_SW_RESET); if (ret) dev_err(&indio_dev->dev, "problem resetting device"); return ret; } static ssize_t adis16220_write_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); bool val; int ret; ret = strtobool(buf, &val); if (ret) return ret; if (!val) return -EINVAL; ret = adis16220_reset(indio_dev); if (ret) return ret; return len; } static ssize_t adis16220_write_capture(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); bool val; int ret; ret = strtobool(buf, &val); if (ret) return ret; if (!val) return -EINVAL; ret = adis16220_capture(indio_dev); if (ret) return ret; return len; } static int adis16220_check_status(struct iio_dev *indio_dev) { u16 status; int ret; ret = adis16220_spi_read_reg_16(indio_dev, ADIS16220_DIAG_STAT, &status); if (ret < 0) { dev_err(&indio_dev->dev, "Reading status failed\n"); goto error_ret; } ret = status & 0x7F; if (status & ADIS16220_DIAG_STAT_VIOLATION) dev_err(&indio_dev->dev, "Capture period violation/interruption\n"); if (status & ADIS16220_DIAG_STAT_SPI_FAIL) dev_err(&indio_dev->dev, "SPI failure\n"); if (status & ADIS16220_DIAG_STAT_FLASH_UPT) dev_err(&indio_dev->dev, "Flash update failed\n"); if (status & ADIS16220_DIAG_STAT_POWER_HIGH) dev_err(&indio_dev->dev, "Power supply above 3.625V\n"); if (status & ADIS16220_DIAG_STAT_POWER_LOW) dev_err(&indio_dev->dev, "Power supply below 3.15V\n"); error_ret: return ret; } static int adis16220_self_test(struct iio_dev *indio_dev) { int ret; ret = adis16220_spi_write_reg_16(indio_dev, ADIS16220_MSC_CTRL, ADIS16220_MSC_CTRL_SELF_TEST_EN); if (ret) { dev_err(&indio_dev->dev, "problem starting self test"); goto err_ret; } adis16220_check_status(indio_dev); err_ret: return ret; } static int adis16220_initial_setup(struct iio_dev *indio_dev) { int ret; /* Do self test */ ret = adis16220_self_test(indio_dev); if (ret) { dev_err(&indio_dev->dev, "self test failure"); goto err_ret; } /* Read status register to check the result */ ret = adis16220_check_status(indio_dev); if (ret) { adis16220_reset(indio_dev); dev_err(&indio_dev->dev, "device not playing ball -> reset"); msleep(ADIS16220_STARTUP_DELAY); ret = adis16220_check_status(indio_dev); if (ret) { dev_err(&indio_dev->dev, "giving up"); goto err_ret; } } err_ret: return ret; } static ssize_t adis16220_capture_buffer_read(struct iio_dev *indio_dev, char *buf, loff_t off, size_t count, int addr) { struct adis16220_state *st = iio_priv(indio_dev); struct spi_message msg; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 25, }, { .tx_buf = st->tx, .rx_buf = st->rx, .bits_per_word = 8, .cs_change = 1, .delay_usecs = 25, }, }; int ret; int i; if (unlikely(!count)) return count; if ((off >= ADIS16220_CAPTURE_SIZE) || (count & 1) || (off & 1)) return -EINVAL; if (off + count > ADIS16220_CAPTURE_SIZE) count = ADIS16220_CAPTURE_SIZE - off; /* write the begin position of capture buffer */ ret = adis16220_spi_write_reg_16(indio_dev, ADIS16220_CAPT_PNTR, off > 1); if (ret) return -EIO; /* read count/2 values from capture buffer */ mutex_lock(&st->buf_lock); for (i = 0; i < count; i += 2) { st->tx[i] = ADIS16220_READ_REG(addr); st->tx[i + 1] = 0; } xfers[1].len = count; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); if (ret) { mutex_unlock(&st->buf_lock); return -EIO; } memcpy(buf, st->rx, count); mutex_unlock(&st->buf_lock); return count; } static ssize_t adis16220_accel_bin_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct iio_dev *indio_dev = dev_get_drvdata(dev); return adis16220_capture_buffer_read(indio_dev, buf, off, count, ADIS16220_CAPT_BUFA); } static struct bin_attribute accel_bin = { .attr = { .name = "accel_bin", .mode = S_IRUGO, }, .read = adis16220_accel_bin_read, .size = ADIS16220_CAPTURE_SIZE, }; static ssize_t adis16220_adc1_bin_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct iio_dev *indio_dev = dev_get_drvdata(dev); return adis16220_capture_buffer_read(indio_dev, buf, off, count, ADIS16220_CAPT_BUF1); } static struct bin_attribute adc1_bin = { .attr = { .name = "in0_bin", .mode = S_IRUGO, }, .read = adis16220_adc1_bin_read, .size = ADIS16220_CAPTURE_SIZE, }; static ssize_t adis16220_adc2_bin_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct iio_dev *indio_dev = dev_get_drvdata(dev); return adis16220_capture_buffer_read(indio_dev, buf, off, count, ADIS16220_CAPT_BUF2); } static struct bin_attribute adc2_bin = { .attr = { .name = "in1_bin", .mode = S_IRUGO, }, .read = adis16220_adc2_bin_read, .size = ADIS16220_CAPTURE_SIZE, }; static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16220_write_reset, 0); #define IIO_DEV_ATTR_CAPTURE(_store) \ IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0) static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture); #define IIO_DEV_ATTR_CAPTURE_COUNT(_mode, _show, _store, _addr) \ IIO_DEVICE_ATTR(capture_count, _mode, _show, _store, _addr) static IIO_DEV_ATTR_CAPTURE_COUNT(S_IWUSR | S_IRUGO, adis16220_read_16bit, adis16220_write_16bit, ADIS16220_CAPT_PNTR); enum adis16220_channel { in_supply, in_1, in_2, accel, temp }; struct adis16220_address_spec { u8 addr; u8 bits; bool sign; }; /* Address / bits / signed */ static const struct adis16220_address_spec adis16220_addresses[][3] = { [in_supply] = { { ADIS16220_CAPT_SUPPLY, 12, 0 }, }, [in_1] = { { ADIS16220_CAPT_BUF1, 16, 1 }, { ADIS16220_AIN1_NULL, 16, 1 }, { ADIS16220_CAPT_PEAK1, 16, 1 }, }, [in_2] = { { ADIS16220_CAPT_BUF2, 16, 1 }, { ADIS16220_AIN2_NULL, 16, 1 }, { ADIS16220_CAPT_PEAK2, 16, 1 }, }, [accel] = { { ADIS16220_CAPT_BUFA, 16, 1 }, { ADIS16220_ACCL_NULL, 16, 1 }, { ADIS16220_CAPT_PEAKA, 16, 1 }, }, [temp] = { { ADIS16220_CAPT_TEMP, 12, 0 }, } }; static int adis16220_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { int ret = -EINVAL; int addrind = 0; u16 uval; s16 sval; u8 bits; switch (mask) { case 0: addrind = 0; break; case IIO_CHAN_INFO_OFFSET: if (chan->type == IIO_TEMP) { *val = 25; return IIO_VAL_INT; } addrind = 1; break; case IIO_CHAN_INFO_PEAK: addrind = 2; break; case IIO_CHAN_INFO_SCALE: *val = 0; switch (chan->type) { case IIO_TEMP: *val2 = -470000; return IIO_VAL_INT_PLUS_MICRO; case IIO_ACCEL: *val2 = 1887042; return IIO_VAL_INT_PLUS_MICRO; case IIO_VOLTAGE: if (chan->channel == 0) *val2 = 0012221; else /* Should really be dependent on VDD */ *val2 = 305; return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } default: return -EINVAL; } if (adis16220_addresses[chan->address][addrind].sign) { ret = adis16220_spi_read_reg_16(indio_dev, adis16220_addresses[chan ->address] [addrind].addr, &sval); if (ret) return ret; bits = adis16220_addresses[chan->address][addrind].bits; sval &= (1 << bits) - 1; sval = (s16)(sval << (16 - bits)) >> (16 - bits); *val = sval; return IIO_VAL_INT; } else { ret = adis16220_spi_read_reg_16(indio_dev, adis16220_addresses[chan ->address] [addrind].addr, &uval); if (ret) return ret; bits = adis16220_addresses[chan->address][addrind].bits; uval &= (1 << bits) - 1; *val = uval; return IIO_VAL_INT; } } static const struct iio_chan_spec adis16220_channels[] = { { .type = IIO_VOLTAGE, .indexed = 1, .channel = 0, .extend_name = "supply", .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .address = in_supply, }, { .type = IIO_ACCEL, .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT | IIO_CHAN_INFO_PEAK_SEPARATE_BIT, .address = accel, }, { .type = IIO_TEMP, .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .address = temp, }, { .type = IIO_VOLTAGE, .indexed = 1, .channel = 1, .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .address = in_1, }, { .type = IIO_VOLTAGE, .indexed = 1, .channel = 2, .address = in_2, } }; static struct attribute *adis16220_attributes[] = { &iio_dev_attr_reset.dev_attr.attr, &iio_dev_attr_capture.dev_attr.attr, &iio_dev_attr_capture_count.dev_attr.attr, NULL }; static const struct attribute_group adis16220_attribute_group = { .attrs = adis16220_attributes, }; static const struct iio_info adis16220_info = { .attrs = &adis16220_attribute_group, .driver_module = THIS_MODULE, .read_raw = &adis16220_read_raw, }; static int __devinit adis16220_probe(struct spi_device *spi) { int ret; struct adis16220_state *st; struct iio_dev *indio_dev; /* setup the industrialio driver allocated elements */ indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); /* this is only used for removal purposes */ spi_set_drvdata(spi, indio_dev); st->us = spi; mutex_init(&st->buf_lock); indio_dev->name = spi->dev.driver->name; indio_dev->dev.parent = &spi->dev; indio_dev->info = &adis16220_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = adis16220_channels; indio_dev->num_channels = ARRAY_SIZE(adis16220_channels); ret = iio_device_register(indio_dev); if (ret) goto error_free_dev; ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin); if (ret) goto error_unregister_dev; ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc1_bin); if (ret) goto error_rm_accel_bin; ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc2_bin); if (ret) goto error_rm_adc1_bin; /* Get the device into a sane initial state */ ret = adis16220_initial_setup(indio_dev); if (ret) goto error_rm_adc2_bin; return 0; error_rm_adc2_bin: sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin); error_rm_adc1_bin: sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin); error_rm_accel_bin: sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin); error_unregister_dev: iio_device_unregister(indio_dev); error_free_dev: iio_free_device(indio_dev); error_ret: return ret; } static int adis16220_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); flush_scheduled_work(); sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin); sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin); sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin); iio_device_unregister(indio_dev); iio_free_device(indio_dev); return 0; } static struct spi_driver adis16220_driver = { .driver = { .name = "adis16220", .owner = THIS_MODULE, }, .probe = adis16220_probe, .remove = __devexit_p(adis16220_remove), }; module_spi_driver(adis16220_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16220 Digital Vibration Sensor"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16220");
gpl-2.0
SaatvikShukla/android_kernel_sony_msm8974-GPE
drivers/staging/iio/accel/adis16209_core.c
4964
13433
/* * ADIS16209 Programmable Digital Vibration Sensor driver * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/delay.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/module.h> #include "../iio.h" #include "../sysfs.h" #include "../buffer.h" #include "adis16209.h" #define DRIVER_NAME "adis16209" /** * adis16209_spi_write_reg_8() - write single byte to a register * @indio_dev: iio device associated with actual device * @reg_address: the address of the register to be written * @val: the value to write **/ static int adis16209_spi_write_reg_8(struct iio_dev *indio_dev, u8 reg_address, u8 val) { int ret; struct adis16209_state *st = iio_priv(indio_dev); mutex_lock(&st->buf_lock); st->tx[0] = ADIS16209_WRITE_REG(reg_address); st->tx[1] = val; ret = spi_write(st->us, st->tx, 2); mutex_unlock(&st->buf_lock); return ret; } /** * adis16209_spi_write_reg_16() - write 2 bytes to a pair of registers * @indio_dev: iio device associated actual device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: value to be written **/ static int adis16209_spi_write_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 value) { int ret; struct spi_message msg; struct adis16209_state *st = iio_priv(indio_dev); struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 30, }, { .tx_buf = st->tx + 2, .bits_per_word = 8, .len = 2, .delay_usecs = 30, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16209_WRITE_REG(lower_reg_address); st->tx[1] = value & 0xFF; st->tx[2] = ADIS16209_WRITE_REG(lower_reg_address + 1); st->tx[3] = (value >> 8) & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); mutex_unlock(&st->buf_lock); return ret; } /** * adis16209_spi_read_reg_16() - read 2 bytes from a 16-bit register * @indio_dev: iio device associated with device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: somewhere to pass back the value read **/ static int adis16209_spi_read_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 *val) { struct spi_message msg; struct adis16209_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 30, }, { .rx_buf = st->rx, .bits_per_word = 8, .len = 2, .delay_usecs = 30, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16209_READ_REG(lower_reg_address); st->tx[1] = 0; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", lower_reg_address); goto error_ret; } *val = (st->rx[0] << 8) | st->rx[1]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static int adis16209_reset(struct iio_dev *indio_dev) { int ret; ret = adis16209_spi_write_reg_8(indio_dev, ADIS16209_GLOB_CMD, ADIS16209_GLOB_CMD_SW_RESET); if (ret) dev_err(&indio_dev->dev, "problem resetting device"); return ret; } static ssize_t adis16209_write_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); if (len < 1) return -EINVAL; switch (buf[0]) { case '1': case 'y': case 'Y': return adis16209_reset(indio_dev); } return -EINVAL; } int adis16209_set_irq(struct iio_dev *indio_dev, bool enable) { int ret = 0; u16 msc; ret = adis16209_spi_read_reg_16(indio_dev, ADIS16209_MSC_CTRL, &msc); if (ret) goto error_ret; msc |= ADIS16209_MSC_CTRL_ACTIVE_HIGH; msc &= ~ADIS16209_MSC_CTRL_DATA_RDY_DIO2; if (enable) msc |= ADIS16209_MSC_CTRL_DATA_RDY_EN; else msc &= ~ADIS16209_MSC_CTRL_DATA_RDY_EN; ret = adis16209_spi_write_reg_16(indio_dev, ADIS16209_MSC_CTRL, msc); error_ret: return ret; } static int adis16209_check_status(struct iio_dev *indio_dev) { u16 status; int ret; ret = adis16209_spi_read_reg_16(indio_dev, ADIS16209_DIAG_STAT, &status); if (ret < 0) { dev_err(&indio_dev->dev, "Reading status failed\n"); goto error_ret; } ret = status & 0x1F; if (status & ADIS16209_DIAG_STAT_SELFTEST_FAIL) dev_err(&indio_dev->dev, "Self test failure\n"); if (status & ADIS16209_DIAG_STAT_SPI_FAIL) dev_err(&indio_dev->dev, "SPI failure\n"); if (status & ADIS16209_DIAG_STAT_FLASH_UPT) dev_err(&indio_dev->dev, "Flash update failed\n"); if (status & ADIS16209_DIAG_STAT_POWER_HIGH) dev_err(&indio_dev->dev, "Power supply above 3.625V\n"); if (status & ADIS16209_DIAG_STAT_POWER_LOW) dev_err(&indio_dev->dev, "Power supply below 3.15V\n"); error_ret: return ret; } static int adis16209_self_test(struct iio_dev *indio_dev) { int ret; ret = adis16209_spi_write_reg_16(indio_dev, ADIS16209_MSC_CTRL, ADIS16209_MSC_CTRL_SELF_TEST_EN); if (ret) { dev_err(&indio_dev->dev, "problem starting self test"); goto err_ret; } adis16209_check_status(indio_dev); err_ret: return ret; } static int adis16209_initial_setup(struct iio_dev *indio_dev) { int ret; /* Disable IRQ */ ret = adis16209_set_irq(indio_dev, false); if (ret) { dev_err(&indio_dev->dev, "disable irq failed"); goto err_ret; } /* Do self test */ ret = adis16209_self_test(indio_dev); if (ret) { dev_err(&indio_dev->dev, "self test failure"); goto err_ret; } /* Read status register to check the result */ ret = adis16209_check_status(indio_dev); if (ret) { adis16209_reset(indio_dev); dev_err(&indio_dev->dev, "device not playing ball -> reset"); msleep(ADIS16209_STARTUP_DELAY); ret = adis16209_check_status(indio_dev); if (ret) { dev_err(&indio_dev->dev, "giving up"); goto err_ret; } } err_ret: return ret; } enum adis16209_chan { in_supply, temp, accel_x, accel_y, incli_x, incli_y, in_aux, rot, }; static const u8 adis16209_addresses[8][2] = { [in_supply] = { ADIS16209_SUPPLY_OUT }, [in_aux] = { ADIS16209_AUX_ADC }, [accel_x] = { ADIS16209_XACCL_OUT, ADIS16209_XACCL_NULL }, [accel_y] = { ADIS16209_YACCL_OUT, ADIS16209_YACCL_NULL }, [incli_x] = { ADIS16209_XINCL_OUT, ADIS16209_XINCL_NULL }, [incli_y] = { ADIS16209_YINCL_OUT, ADIS16209_YINCL_NULL }, [rot] = { ADIS16209_ROT_OUT }, [temp] = { ADIS16209_TEMP_OUT }, }; static int adis16209_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { int bits; s16 val16; u8 addr; switch (mask) { case IIO_CHAN_INFO_CALIBBIAS: switch (chan->type) { case IIO_ACCEL: case IIO_INCLI: bits = 14; break; default: return -EINVAL; }; val16 = val & ((1 << bits) - 1); addr = adis16209_addresses[chan->address][1]; return adis16209_spi_write_reg_16(indio_dev, addr, val16); } return -EINVAL; } static int adis16209_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { int ret; int bits; u8 addr; s16 val16; switch (mask) { case 0: mutex_lock(&indio_dev->mlock); addr = adis16209_addresses[chan->address][0]; ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } if (val16 & ADIS16209_ERROR_ACTIVE) { ret = adis16209_check_status(indio_dev); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } } val16 = val16 & ((1 << chan->scan_type.realbits) - 1); if (chan->scan_type.sign == 's') val16 = (s16)(val16 << (16 - chan->scan_type.realbits)) >> (16 - chan->scan_type.realbits); *val = val16; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_VOLTAGE: *val = 0; if (chan->channel == 0) *val2 = 305180; else *val2 = 610500; return IIO_VAL_INT_PLUS_MICRO; case IIO_TEMP: *val = 0; *val2 = -470000; return IIO_VAL_INT_PLUS_MICRO; case IIO_ACCEL: *val = 0; *val2 = 2394; return IIO_VAL_INT_PLUS_MICRO; case IIO_INCLI: *val = 0; *val2 = 436; return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } break; case IIO_CHAN_INFO_OFFSET: *val = 25; return IIO_VAL_INT; case IIO_CHAN_INFO_CALIBBIAS: switch (chan->type) { case IIO_ACCEL: bits = 14; break; default: return -EINVAL; }; mutex_lock(&indio_dev->mlock); addr = adis16209_addresses[chan->address][1]; ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } val16 &= (1 << bits) - 1; val16 = (s16)(val16 << (16 - bits)) >> (16 - bits); *val = val16; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; } return -EINVAL; } static struct iio_chan_spec adis16209_channels[] = { IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0, IIO_CHAN_INFO_SCALE_SEPARATE_BIT, in_supply, ADIS16209_SCAN_SUPPLY, IIO_ST('u', 14, 16, 0), 0), IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0, IIO_CHAN_INFO_SCALE_SEPARATE_BIT | IIO_CHAN_INFO_OFFSET_SEPARATE_BIT, temp, ADIS16209_SCAN_TEMP, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X, IIO_CHAN_INFO_SCALE_SHARED_BIT | IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, accel_x, ADIS16209_SCAN_ACC_X, IIO_ST('s', 14, 16, 0), 0), IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y, IIO_CHAN_INFO_SCALE_SHARED_BIT | IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, accel_y, ADIS16209_SCAN_ACC_Y, IIO_ST('s', 14, 16, 0), 0), IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0, IIO_CHAN_INFO_SCALE_SEPARATE_BIT, in_aux, ADIS16209_SCAN_AUX_ADC, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X, IIO_CHAN_INFO_SCALE_SHARED_BIT, incli_x, ADIS16209_SCAN_INCLI_X, IIO_ST('s', 14, 16, 0), 0), IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y, IIO_CHAN_INFO_SCALE_SHARED_BIT, incli_y, ADIS16209_SCAN_INCLI_Y, IIO_ST('s', 14, 16, 0), 0), IIO_CHAN(IIO_ROT, 0, 1, 0, NULL, 0, IIO_MOD_X, 0, rot, ADIS16209_SCAN_ROT, IIO_ST('s', 14, 16, 0), 0), IIO_CHAN_SOFT_TIMESTAMP(8) }; static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16209_write_reset, 0); static struct attribute *adis16209_attributes[] = { &iio_dev_attr_reset.dev_attr.attr, NULL }; static const struct attribute_group adis16209_attribute_group = { .attrs = adis16209_attributes, }; static const struct iio_info adis16209_info = { .attrs = &adis16209_attribute_group, .read_raw = &adis16209_read_raw, .write_raw = &adis16209_write_raw, .driver_module = THIS_MODULE, }; static int __devinit adis16209_probe(struct spi_device *spi) { int ret; struct adis16209_state *st; struct iio_dev *indio_dev; /* setup the industrialio driver allocated elements */ indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); /* this is only used for removal purposes */ spi_set_drvdata(spi, indio_dev); st->us = spi; mutex_init(&st->buf_lock); indio_dev->name = spi->dev.driver->name; indio_dev->dev.parent = &spi->dev; indio_dev->info = &adis16209_info; indio_dev->channels = adis16209_channels; indio_dev->num_channels = ARRAY_SIZE(adis16209_channels); indio_dev->modes = INDIO_DIRECT_MODE; ret = adis16209_configure_ring(indio_dev); if (ret) goto error_free_dev; ret = iio_buffer_register(indio_dev, adis16209_channels, ARRAY_SIZE(adis16209_channels)); if (ret) { printk(KERN_ERR "failed to initialize the ring\n"); goto error_unreg_ring_funcs; } if (spi->irq) { ret = adis16209_probe_trigger(indio_dev); if (ret) goto error_uninitialize_ring; } /* Get the device into a sane initial state */ ret = adis16209_initial_setup(indio_dev); if (ret) goto error_remove_trigger; ret = iio_device_register(indio_dev); if (ret) goto error_remove_trigger; return 0; error_remove_trigger: adis16209_remove_trigger(indio_dev); error_uninitialize_ring: iio_buffer_unregister(indio_dev); error_unreg_ring_funcs: adis16209_unconfigure_ring(indio_dev); error_free_dev: iio_free_device(indio_dev); error_ret: return ret; } static int adis16209_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); flush_scheduled_work(); iio_device_unregister(indio_dev); adis16209_remove_trigger(indio_dev); iio_buffer_unregister(indio_dev); adis16209_unconfigure_ring(indio_dev); iio_free_device(indio_dev); return 0; } static struct spi_driver adis16209_driver = { .driver = { .name = "adis16209", .owner = THIS_MODULE, }, .probe = adis16209_probe, .remove = __devexit_p(adis16209_remove), }; module_spi_driver(adis16209_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16209 Digital Vibration Sensor driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16209");
gpl-2.0
davidmueller13/arter97_bb
drivers/platform/x86/classmate-laptop.c
8036
17418
/* * Copyright (C) 2009 Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <acpi/acpi_drivers.h> #include <linux/backlight.h> #include <linux/input.h> #include <linux/rfkill.h> MODULE_LICENSE("GPL"); struct cmpc_accel { int sensitivity; }; #define CMPC_ACCEL_SENSITIVITY_DEFAULT 5 #define CMPC_ACCEL_HID "ACCE0000" #define CMPC_TABLET_HID "TBLT0000" #define CMPC_IPML_HID "IPML200" #define CMPC_KEYS_HID "FnBT0000" /* * Generic input device code. */ typedef void (*input_device_init)(struct input_dev *dev); static int cmpc_add_acpi_notify_device(struct acpi_device *acpi, char *name, input_device_init idev_init) { struct input_dev *inputdev; int error; inputdev = input_allocate_device(); if (!inputdev) return -ENOMEM; inputdev->name = name; inputdev->dev.parent = &acpi->dev; idev_init(inputdev); error = input_register_device(inputdev); if (error) { input_free_device(inputdev); return error; } dev_set_drvdata(&acpi->dev, inputdev); return 0; } static int cmpc_remove_acpi_notify_device(struct acpi_device *acpi) { struct input_dev *inputdev = dev_get_drvdata(&acpi->dev); input_unregister_device(inputdev); return 0; } /* * Accelerometer code. */ static acpi_status cmpc_start_accel(acpi_handle handle) { union acpi_object param[2]; struct acpi_object_list input; acpi_status status; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0x3; param[1].type = ACPI_TYPE_INTEGER; input.count = 2; input.pointer = param; status = acpi_evaluate_object(handle, "ACMD", &input, NULL); return status; } static acpi_status cmpc_stop_accel(acpi_handle handle) { union acpi_object param[2]; struct acpi_object_list input; acpi_status status; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0x4; param[1].type = ACPI_TYPE_INTEGER; input.count = 2; input.pointer = param; status = acpi_evaluate_object(handle, "ACMD", &input, NULL); return status; } static acpi_status cmpc_accel_set_sensitivity(acpi_handle handle, int val) { union acpi_object param[2]; struct acpi_object_list input; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0x02; param[1].type = ACPI_TYPE_INTEGER; param[1].integer.value = val; input.count = 2; input.pointer = param; return acpi_evaluate_object(handle, "ACMD", &input, NULL); } static acpi_status cmpc_get_accel(acpi_handle handle, unsigned char *x, unsigned char *y, unsigned char *z) { union acpi_object param[2]; struct acpi_object_list input; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, 0 }; unsigned char *locs; acpi_status status; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0x01; param[1].type = ACPI_TYPE_INTEGER; input.count = 2; input.pointer = param; status = acpi_evaluate_object(handle, "ACMD", &input, &output); if (ACPI_SUCCESS(status)) { union acpi_object *obj; obj = output.pointer; locs = obj->buffer.pointer; *x = locs[0]; *y = locs[1]; *z = locs[2]; kfree(output.pointer); } return status; } static void cmpc_accel_handler(struct acpi_device *dev, u32 event) { if (event == 0x81) { unsigned char x, y, z; acpi_status status; status = cmpc_get_accel(dev->handle, &x, &y, &z); if (ACPI_SUCCESS(status)) { struct input_dev *inputdev = dev_get_drvdata(&dev->dev); input_report_abs(inputdev, ABS_X, x); input_report_abs(inputdev, ABS_Y, y); input_report_abs(inputdev, ABS_Z, z); input_sync(inputdev); } } } static ssize_t cmpc_accel_sensitivity_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi; struct input_dev *inputdev; struct cmpc_accel *accel; acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); accel = dev_get_drvdata(&inputdev->dev); return sprintf(buf, "%d\n", accel->sensitivity); } static ssize_t cmpc_accel_sensitivity_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct acpi_device *acpi; struct input_dev *inputdev; struct cmpc_accel *accel; unsigned long sensitivity; int r; acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); accel = dev_get_drvdata(&inputdev->dev); r = strict_strtoul(buf, 0, &sensitivity); if (r) return r; accel->sensitivity = sensitivity; cmpc_accel_set_sensitivity(acpi->handle, sensitivity); return strnlen(buf, count); } static struct device_attribute cmpc_accel_sensitivity_attr = { .attr = { .name = "sensitivity", .mode = 0660 }, .show = cmpc_accel_sensitivity_show, .store = cmpc_accel_sensitivity_store }; static int cmpc_accel_open(struct input_dev *input) { struct acpi_device *acpi; acpi = to_acpi_device(input->dev.parent); if (ACPI_SUCCESS(cmpc_start_accel(acpi->handle))) return 0; return -EIO; } static void cmpc_accel_close(struct input_dev *input) { struct acpi_device *acpi; acpi = to_acpi_device(input->dev.parent); cmpc_stop_accel(acpi->handle); } static void cmpc_accel_idev_init(struct input_dev *inputdev) { set_bit(EV_ABS, inputdev->evbit); input_set_abs_params(inputdev, ABS_X, 0, 255, 8, 0); input_set_abs_params(inputdev, ABS_Y, 0, 255, 8, 0); input_set_abs_params(inputdev, ABS_Z, 0, 255, 8, 0); inputdev->open = cmpc_accel_open; inputdev->close = cmpc_accel_close; } static int cmpc_accel_add(struct acpi_device *acpi) { int error; struct input_dev *inputdev; struct cmpc_accel *accel; accel = kmalloc(sizeof(*accel), GFP_KERNEL); if (!accel) return -ENOMEM; accel->sensitivity = CMPC_ACCEL_SENSITIVITY_DEFAULT; cmpc_accel_set_sensitivity(acpi->handle, accel->sensitivity); error = device_create_file(&acpi->dev, &cmpc_accel_sensitivity_attr); if (error) goto failed_file; error = cmpc_add_acpi_notify_device(acpi, "cmpc_accel", cmpc_accel_idev_init); if (error) goto failed_input; inputdev = dev_get_drvdata(&acpi->dev); dev_set_drvdata(&inputdev->dev, accel); return 0; failed_input: device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr); failed_file: kfree(accel); return error; } static int cmpc_accel_remove(struct acpi_device *acpi, int type) { struct input_dev *inputdev; struct cmpc_accel *accel; inputdev = dev_get_drvdata(&acpi->dev); accel = dev_get_drvdata(&inputdev->dev); device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr); return cmpc_remove_acpi_notify_device(acpi); } static const struct acpi_device_id cmpc_accel_device_ids[] = { {CMPC_ACCEL_HID, 0}, {"", 0} }; static struct acpi_driver cmpc_accel_acpi_driver = { .owner = THIS_MODULE, .name = "cmpc_accel", .class = "cmpc_accel", .ids = cmpc_accel_device_ids, .ops = { .add = cmpc_accel_add, .remove = cmpc_accel_remove, .notify = cmpc_accel_handler, } }; /* * Tablet mode code. */ static acpi_status cmpc_get_tablet(acpi_handle handle, unsigned long long *value) { union acpi_object param; struct acpi_object_list input; unsigned long long output; acpi_status status; param.type = ACPI_TYPE_INTEGER; param.integer.value = 0x01; input.count = 1; input.pointer = &param; status = acpi_evaluate_integer(handle, "TCMD", &input, &output); if (ACPI_SUCCESS(status)) *value = output; return status; } static void cmpc_tablet_handler(struct acpi_device *dev, u32 event) { unsigned long long val = 0; struct input_dev *inputdev = dev_get_drvdata(&dev->dev); if (event == 0x81) { if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) input_report_switch(inputdev, SW_TABLET_MODE, !val); } } static void cmpc_tablet_idev_init(struct input_dev *inputdev) { unsigned long long val = 0; struct acpi_device *acpi; set_bit(EV_SW, inputdev->evbit); set_bit(SW_TABLET_MODE, inputdev->swbit); acpi = to_acpi_device(inputdev->dev.parent); if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) input_report_switch(inputdev, SW_TABLET_MODE, !val); } static int cmpc_tablet_add(struct acpi_device *acpi) { return cmpc_add_acpi_notify_device(acpi, "cmpc_tablet", cmpc_tablet_idev_init); } static int cmpc_tablet_remove(struct acpi_device *acpi, int type) { return cmpc_remove_acpi_notify_device(acpi); } static int cmpc_tablet_resume(struct acpi_device *acpi) { struct input_dev *inputdev = dev_get_drvdata(&acpi->dev); unsigned long long val = 0; if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) input_report_switch(inputdev, SW_TABLET_MODE, !val); return 0; } static const struct acpi_device_id cmpc_tablet_device_ids[] = { {CMPC_TABLET_HID, 0}, {"", 0} }; static struct acpi_driver cmpc_tablet_acpi_driver = { .owner = THIS_MODULE, .name = "cmpc_tablet", .class = "cmpc_tablet", .ids = cmpc_tablet_device_ids, .ops = { .add = cmpc_tablet_add, .remove = cmpc_tablet_remove, .resume = cmpc_tablet_resume, .notify = cmpc_tablet_handler, } }; /* * Backlight code. */ static acpi_status cmpc_get_brightness(acpi_handle handle, unsigned long long *value) { union acpi_object param; struct acpi_object_list input; unsigned long long output; acpi_status status; param.type = ACPI_TYPE_INTEGER; param.integer.value = 0xC0; input.count = 1; input.pointer = &param; status = acpi_evaluate_integer(handle, "GRDI", &input, &output); if (ACPI_SUCCESS(status)) *value = output; return status; } static acpi_status cmpc_set_brightness(acpi_handle handle, unsigned long long value) { union acpi_object param[2]; struct acpi_object_list input; acpi_status status; unsigned long long output; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0xC0; param[1].type = ACPI_TYPE_INTEGER; param[1].integer.value = value; input.count = 2; input.pointer = param; status = acpi_evaluate_integer(handle, "GWRI", &input, &output); return status; } static int cmpc_bl_get_brightness(struct backlight_device *bd) { acpi_status status; acpi_handle handle; unsigned long long brightness; handle = bl_get_data(bd); status = cmpc_get_brightness(handle, &brightness); if (ACPI_SUCCESS(status)) return brightness; else return -1; } static int cmpc_bl_update_status(struct backlight_device *bd) { acpi_status status; acpi_handle handle; handle = bl_get_data(bd); status = cmpc_set_brightness(handle, bd->props.brightness); if (ACPI_SUCCESS(status)) return 0; else return -1; } static const struct backlight_ops cmpc_bl_ops = { .get_brightness = cmpc_bl_get_brightness, .update_status = cmpc_bl_update_status }; /* * RFKILL code. */ static acpi_status cmpc_get_rfkill_wlan(acpi_handle handle, unsigned long long *value) { union acpi_object param; struct acpi_object_list input; unsigned long long output; acpi_status status; param.type = ACPI_TYPE_INTEGER; param.integer.value = 0xC1; input.count = 1; input.pointer = &param; status = acpi_evaluate_integer(handle, "GRDI", &input, &output); if (ACPI_SUCCESS(status)) *value = output; return status; } static acpi_status cmpc_set_rfkill_wlan(acpi_handle handle, unsigned long long value) { union acpi_object param[2]; struct acpi_object_list input; acpi_status status; unsigned long long output; param[0].type = ACPI_TYPE_INTEGER; param[0].integer.value = 0xC1; param[1].type = ACPI_TYPE_INTEGER; param[1].integer.value = value; input.count = 2; input.pointer = param; status = acpi_evaluate_integer(handle, "GWRI", &input, &output); return status; } static void cmpc_rfkill_query(struct rfkill *rfkill, void *data) { acpi_status status; acpi_handle handle; unsigned long long state; bool blocked; handle = data; status = cmpc_get_rfkill_wlan(handle, &state); if (ACPI_SUCCESS(status)) { blocked = state & 1 ? false : true; rfkill_set_sw_state(rfkill, blocked); } } static int cmpc_rfkill_block(void *data, bool blocked) { acpi_status status; acpi_handle handle; unsigned long long state; bool is_blocked; handle = data; status = cmpc_get_rfkill_wlan(handle, &state); if (ACPI_FAILURE(status)) return -ENODEV; /* Check if we really need to call cmpc_set_rfkill_wlan */ is_blocked = state & 1 ? false : true; if (is_blocked != blocked) { state = blocked ? 0 : 1; status = cmpc_set_rfkill_wlan(handle, state); if (ACPI_FAILURE(status)) return -ENODEV; } return 0; } static const struct rfkill_ops cmpc_rfkill_ops = { .query = cmpc_rfkill_query, .set_block = cmpc_rfkill_block, }; /* * Common backlight and rfkill code. */ struct ipml200_dev { struct backlight_device *bd; struct rfkill *rf; }; static int cmpc_ipml_add(struct acpi_device *acpi) { int retval; struct ipml200_dev *ipml; struct backlight_properties props; ipml = kmalloc(sizeof(*ipml), GFP_KERNEL); if (ipml == NULL) return -ENOMEM; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = 7; ipml->bd = backlight_device_register("cmpc_bl", &acpi->dev, acpi->handle, &cmpc_bl_ops, &props); if (IS_ERR(ipml->bd)) { retval = PTR_ERR(ipml->bd); goto out_bd; } ipml->rf = rfkill_alloc("cmpc_rfkill", &acpi->dev, RFKILL_TYPE_WLAN, &cmpc_rfkill_ops, acpi->handle); /* * If RFKILL is disabled, rfkill_alloc will return ERR_PTR(-ENODEV). * This is OK, however, since all other uses of the device will not * derefence it. */ if (ipml->rf) { retval = rfkill_register(ipml->rf); if (retval) { rfkill_destroy(ipml->rf); ipml->rf = NULL; } } dev_set_drvdata(&acpi->dev, ipml); return 0; out_bd: kfree(ipml); return retval; } static int cmpc_ipml_remove(struct acpi_device *acpi, int type) { struct ipml200_dev *ipml; ipml = dev_get_drvdata(&acpi->dev); backlight_device_unregister(ipml->bd); if (ipml->rf) { rfkill_unregister(ipml->rf); rfkill_destroy(ipml->rf); } kfree(ipml); return 0; } static const struct acpi_device_id cmpc_ipml_device_ids[] = { {CMPC_IPML_HID, 0}, {"", 0} }; static struct acpi_driver cmpc_ipml_acpi_driver = { .owner = THIS_MODULE, .name = "cmpc", .class = "cmpc", .ids = cmpc_ipml_device_ids, .ops = { .add = cmpc_ipml_add, .remove = cmpc_ipml_remove } }; /* * Extra keys code. */ static int cmpc_keys_codes[] = { KEY_UNKNOWN, KEY_WLAN, KEY_SWITCHVIDEOMODE, KEY_BRIGHTNESSDOWN, KEY_BRIGHTNESSUP, KEY_VENDOR, KEY_UNKNOWN, KEY_CAMERA, KEY_BACK, KEY_FORWARD, KEY_MAX }; static void cmpc_keys_handler(struct acpi_device *dev, u32 event) { struct input_dev *inputdev; int code = KEY_MAX; if ((event & 0x0F) < ARRAY_SIZE(cmpc_keys_codes)) code = cmpc_keys_codes[event & 0x0F]; inputdev = dev_get_drvdata(&dev->dev); input_report_key(inputdev, code, !(event & 0x10)); input_sync(inputdev); } static void cmpc_keys_idev_init(struct input_dev *inputdev) { int i; set_bit(EV_KEY, inputdev->evbit); for (i = 0; cmpc_keys_codes[i] != KEY_MAX; i++) set_bit(cmpc_keys_codes[i], inputdev->keybit); } static int cmpc_keys_add(struct acpi_device *acpi) { return cmpc_add_acpi_notify_device(acpi, "cmpc_keys", cmpc_keys_idev_init); } static int cmpc_keys_remove(struct acpi_device *acpi, int type) { return cmpc_remove_acpi_notify_device(acpi); } static const struct acpi_device_id cmpc_keys_device_ids[] = { {CMPC_KEYS_HID, 0}, {"", 0} }; static struct acpi_driver cmpc_keys_acpi_driver = { .owner = THIS_MODULE, .name = "cmpc_keys", .class = "cmpc_keys", .ids = cmpc_keys_device_ids, .ops = { .add = cmpc_keys_add, .remove = cmpc_keys_remove, .notify = cmpc_keys_handler, } }; /* * General init/exit code. */ static int cmpc_init(void) { int r; r = acpi_bus_register_driver(&cmpc_keys_acpi_driver); if (r) goto failed_keys; r = acpi_bus_register_driver(&cmpc_ipml_acpi_driver); if (r) goto failed_bl; r = acpi_bus_register_driver(&cmpc_tablet_acpi_driver); if (r) goto failed_tablet; r = acpi_bus_register_driver(&cmpc_accel_acpi_driver); if (r) goto failed_accel; return r; failed_accel: acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver); failed_tablet: acpi_bus_unregister_driver(&cmpc_ipml_acpi_driver); failed_bl: acpi_bus_unregister_driver(&cmpc_keys_acpi_driver); failed_keys: return r; } static void cmpc_exit(void) { acpi_bus_unregister_driver(&cmpc_accel_acpi_driver); acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver); acpi_bus_unregister_driver(&cmpc_ipml_acpi_driver); acpi_bus_unregister_driver(&cmpc_keys_acpi_driver); } module_init(cmpc_init); module_exit(cmpc_exit); static const struct acpi_device_id cmpc_device_ids[] = { {CMPC_ACCEL_HID, 0}, {CMPC_TABLET_HID, 0}, {CMPC_IPML_HID, 0}, {CMPC_KEYS_HID, 0}, {"", 0} }; MODULE_DEVICE_TABLE(acpi, cmpc_device_ids);
gpl-2.0
Californication/lge-kernel-msm7x27-ICS-JB
Documentation/prctl/disable-tsc-ctxt-sw-stress-test.c
12900
1724
/* * Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...) * * Tests if the control register is updated correctly * at context switches * * Warning: this test will cause a very high load for a few seconds * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <signal.h> #include <inttypes.h> #include <wait.h> #include <sys/prctl.h> #include <linux/prctl.h> /* Get/set the process' ability to use the timestamp counter instruction */ #ifndef PR_GET_TSC #define PR_GET_TSC 25 #define PR_SET_TSC 26 # define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */ # define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */ #endif uint64_t rdtsc() { uint32_t lo, hi; /* We cannot use "=A", since this would use %rax on x86_64 */ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); return (uint64_t)hi << 32 | lo; } void sigsegv_expect(int sig) { /* */ } void segvtask(void) { if (prctl(PR_SET_TSC, PR_TSC_SIGSEGV) < 0) { perror("prctl"); exit(0); } signal(SIGSEGV, sigsegv_expect); alarm(10); rdtsc(); fprintf(stderr, "FATAL ERROR, rdtsc() succeeded while disabled\n"); exit(0); } void sigsegv_fail(int sig) { fprintf(stderr, "FATAL ERROR, rdtsc() failed while enabled\n"); exit(0); } void rdtsctask(void) { if (prctl(PR_SET_TSC, PR_TSC_ENABLE) < 0) { perror("prctl"); exit(0); } signal(SIGSEGV, sigsegv_fail); alarm(10); for(;;) rdtsc(); } int main(int argc, char **argv) { int n_tasks = 100, i; fprintf(stderr, "[No further output means we're allright]\n"); for (i=0; i<n_tasks; i++) if (fork() == 0) { if (i & 1) segvtask(); else rdtsctask(); } for (i=0; i<n_tasks; i++) wait(NULL); exit(0); }
gpl-2.0
jefby/uboot-arndale-octa-hyp
board/armltd/integrator/integrator.c
101
4680
/* * (C) Copyright 2002 * Sysgo Real-Time Solutions, GmbH <www.elinos.com> * Marius Groeger <mgroeger@sysgo.de> * * (C) Copyright 2002 * David Mueller, ELSOFT AG, <d.mueller@elsoft.ch> * * (C) Copyright 2003 * Texas Instruments, <www.ti.com> * Kshitij Gupta <Kshitij@ti.com> * * (C) Copyright 2004 * ARM Ltd. * Philippe Robin, <philippe.robin@arm.com> * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <netdev.h> #include <asm/io.h> #include "arm-ebi.h" #include "integrator-sc.h" DECLARE_GLOBAL_DATA_PTR; void peripheral_power_enable (void); #if defined(CONFIG_SHOW_BOOT_PROGRESS) void show_boot_progress(int progress) { printf("Boot reached stage %d\n", progress); } #endif #define COMP_MODE_ENABLE ((unsigned int)0x0000EAEF) /* * Miscellaneous platform dependent initialisations */ int board_init (void) { u32 val; /* arch number of Integrator Board */ #ifdef CONFIG_ARCH_CINTEGRATOR gd->bd->bi_arch_number = MACH_TYPE_CINTEGRATOR; #else gd->bd->bi_arch_number = MACH_TYPE_INTEGRATOR; #endif /* adress of boot parameters */ gd->bd->bi_boot_params = 0x00000100; gd->flags = 0; #ifdef CONFIG_CM_REMAP extern void cm_remap(void); cm_remap(); /* remaps writeable memory to 0x00000000 */ #endif #ifdef CONFIG_ARCH_CINTEGRATOR /* * Flash protection on the Integrator/CP is in a simple register */ val = readl(CP_FLASHPROG); val |= (CP_FLASHPROG_FLVPPEN | CP_FLASHPROG_FLWREN); writel(val, CP_FLASHPROG); #else /* * The Integrator/AP has some special protection mechanisms * for the external memories, first the External Bus Interface (EBI) * then the system controller (SC). * * The system comes up with the flash memory non-writable and * configuration locked. If we want U-Boot to be used for flash * access we cannot have the flash memory locked. */ writel(EBI_UNLOCK_MAGIC, EBI_BASE + EBI_LOCK_REG); val = readl(EBI_BASE + EBI_CSR1_REG); val &= EBI_CSR_WREN_MASK; val |= EBI_CSR_WREN_ENABLE; writel(val, EBI_BASE + EBI_CSR1_REG); writel(0, EBI_BASE + EBI_LOCK_REG); /* * Set up the system controller to remove write protection from * the flash memory and enable Vpp */ writel(SC_CTRL_FLASHVPP | SC_CTRL_FLASHWP, SC_CTRLS); #endif icache_enable (); return 0; } int misc_init_r (void) { setenv("verify", "n"); return (0); } /* * The Integrator remaps the Flash memory to 0x00000000 and executes U-Boot * from there, which means we cannot test the RAM underneath the ROM at this * point. It will be unmapped later on, when we are executing from the * relocated in RAM U-Boot. We simply assume that this RAM is usable if the * RAM on higher addresses works fine. */ #define REMAPPED_FLASH_SZ 0x40000 int dram_init (void) { gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE; #ifdef CONFIG_CM_SPD_DETECT { extern void dram_query(void); u32 cm_reg_sdram; u32 sdram_shift; dram_query(); /* Assembler accesses to CM registers */ /* Queries the SPD values */ /* Obtain the SDRAM size from the CM SDRAM register */ cm_reg_sdram = readl(CM_BASE + OS_SDRAM); /* Register SDRAM size * * 0xXXXXXXbbb000bb 16 MB * 0xXXXXXXbbb001bb 32 MB * 0xXXXXXXbbb010bb 64 MB * 0xXXXXXXbbb011bb 128 MB * 0xXXXXXXbbb100bb 256 MB * */ sdram_shift = ((cm_reg_sdram & 0x0000001C)/4)%4; gd->ram_size = get_ram_size((long *) CONFIG_SYS_SDRAM_BASE + REMAPPED_FLASH_SZ, 0x01000000 << sdram_shift); } #else gd->ram_size = get_ram_size((long *) CONFIG_SYS_SDRAM_BASE + REMAPPED_FLASH_SZ, PHYS_SDRAM_1_SIZE); #endif /* CM_SPD_DETECT */ /* We only have one bank of RAM, set it to whatever was detected */ gd->bd->bi_dram[0].size = gd->ram_size; return 0; } #ifdef CONFIG_CMD_NET int board_eth_init(bd_t *bis) { int rc = 0; #ifdef CONFIG_SMC91111 rc = smc91111_initialize(0, CONFIG_SMC91111_BASE); #endif rc += pci_eth_init(bis); return rc; } #endif
gpl-2.0
akhilnarang/ThugLife_sprout
drivers/misc/mediatek/fmradio/mt6626/pub/mt6626_fm_link.c
101
5909
/* mt6626_fm_link.c * * (C) Copyright 2009 * MediaTek <www.MediaTek.com> * Hongcheng <hongcheng.xia@MediaTek.com> * * MT6626 FM Radio Driver -- setup data link * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/slab.h> #include <linux/version.h> #include <linux/interrupt.h> #include <linux/cdev.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include "fm_typedef.h" #include "fm_dbg.h" #include "fm_err.h" #include "fm_stdlib.h" #include "mt6626_fm.h" #include "mt6626_fm_link.h" #include "mt6626_fm_reg.h" /* these functions are defined after Linux2.6.32 */ static int fm_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id); static int fm_i2c_detect(struct i2c_client *client, int kind, struct i2c_board_info *info); static int fm_i2c_remove(struct i2c_client *client); static const struct i2c_device_id fm_i2c_id = { MT6626_DEV, 0 }; static unsigned short force[] = { MT6626_I2C_PORT, MT6626_SLAVE_ADDR, I2C_CLIENT_END, I2C_CLIENT_END }; static const unsigned short *const forces[] = { force, NULL }; static struct i2c_client_address_data addr_data = { .forces = forces }; struct i2c_driver MT6626_driver = { .probe = fm_i2c_probe, .remove = fm_i2c_remove, .detect = fm_i2c_detect, .driver.name = MT6626_DEV, .id_table = &fm_i2c_id, .address_data = &addr_data, }; static struct i2c_client *g_client; static int fm_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0; WCN_DBG(FM_NTC | LINK, "%s\n", __func__); g_client = client; return ret; } static int fm_i2c_detect(struct i2c_client *client, int kind, struct i2c_board_info *info) { WCN_DBG(FM_NTC | LINK, "%s\n", __func__); strcpy(info->type, MT6626_DEV); return 0; } static int fm_i2c_remove(struct i2c_client *client) { WCN_DBG(FM_NTC | LINK, "%s\n", __func__); return 0; } static struct fm_link_event *link_event; fm_s32 fm_link_setup(void *data) { if (!(link_event = kzalloc(sizeof(struct fm_link_event), GFP_KERNEL))) { WCN_DBG(FM_ALT | LINK, "kzalloc(fm_link_event) -ENOMEM\n"); return -1; } link_event->ln_event = fm_flag_event_create("ln_evt"); if (!link_event->ln_event) { WCN_DBG(FM_ALT | LINK, "create mt6626_ln_event failed\n"); fm_free(link_event); return -1; } fm_flag_event_get(link_event->ln_event); WCN_DBG(FM_NTC | LINK, "fm link setup\n"); return i2c_add_driver(&MT6626_driver); } fm_s32 fm_link_release(void) { fm_flag_event_put(link_event->ln_event); if (link_event) { fm_free(link_event); } WCN_DBG(FM_NTC | LINK, "fm link release\n"); i2c_del_driver(&MT6626_driver); return 0; } /* * fm_ctrl_rx * the low level func to read a rigister * @addr - rigister address * @val - the pointer of target buf * If success, return 0; else error code */ fm_s32 fm_ctrl_rx(fm_u8 addr, fm_u16 *val) { fm_s32 n; fm_u8 b[2] = { 0 }; /* first, send addr to MT6626 */ n = i2c_master_send(g_client, (fm_u8 *) &addr, 1); if (n < 0) { WCN_DBG(FM_ALT | LINK, "rx 1, [addr=0x%02X] [err=%d]\n", addr, n); return -1; } /* second, receive two byte from MT6626 */ n = i2c_master_recv(g_client, b, 2); if (n < 0) { WCN_DBG(FM_ALT | LINK, "rx 2, [addr=0x%02X] [err=%d]\n", addr, n); return -2; } *val = ((fm_u16) b[0] << 8 | (fm_u16) b[1]); return 0; } /* * fm_ctrl_tx * the low level func to write a rigister * @addr - rigister address * @val - value will be writed in the rigister * If success, return 0; else error code */ fm_s32 fm_ctrl_tx(fm_u8 addr, fm_u16 val) { fm_s32 n; fm_u8 b[3]; b[0] = addr; b[1] = (fm_u8) (val >> 8); b[2] = (fm_u8) (val & 0xFF); n = i2c_master_send(g_client, b, 3); if (n < 0) { WCN_DBG(FM_ALT | LINK, "tx, [addr=0x%02X] [err=%d]\n", addr, n); return -1; } return 0; } /* * fm_cmd_tx() - send cmd to FM firmware and wait event * @buf - send buffer * @len - the length of cmd * @mask - the event flag mask * @ cnt - the retry conter * @timeout - timeout per cmd * Return 0, if success; error code, if failed */ fm_s32 fm_cmd_tx(fm_u8 *buf, fm_u16 len, fm_s32 mask, fm_s32 cnt, fm_s32 timeout, fm_s32(*callback) (struct fm_res_ctx *result)) { return 0; } fm_bool fm_wait_stc_done(fm_u32 sec) { fm_s32 ret_time = 0; ret_time = FM_EVENT_WAIT_TIMEOUT(link_event->ln_event, FLAG_TEST, sec); if (!ret_time) { WCN_DBG(FM_WAR | LINK, "wait stc done fail\n"); return fm_false; } else { WCN_DBG(FM_DBG | LINK, "wait stc done ok\n"); } FM_EVENT_CLR(link_event->ln_event, FLAG_TEST); return fm_true; } fm_s32 fm_event_parser(fm_s32(*rds_parser) (struct rds_rx_t *, fm_s32)) { fm_u16 tmp_reg; fm_ctrl_rx(FM_MAIN_INTR, &tmp_reg); if (tmp_reg & FM_INTR_STC_DONE) { /* clear status flag */ fm_ctrl_tx(FM_MAIN_INTR, tmp_reg | FM_INTR_STC_DONE); FM_EVENT_SEND(link_event->ln_event, FLAG_TEST); } if (tmp_reg & FM_INTR_RDS) { /* clear status flag */ fm_ctrl_tx(FM_MAIN_INTR, tmp_reg | FM_INTR_RDS); /*Handle the RDS data that we get */ if (rds_parser) { rds_parser(NULL, 0); /* mt6626 rds lib will get rds raw data by itself */ } else { WCN_DBG(FM_WAR | LINK, "no method to parse RDS data\n"); } } return 0; } fm_s32 fm_force_active_event(fm_u32 mask) { FM_EVENT_SEND(link_event->ln_event, FLAG_TEST); return 0; }
gpl-2.0
adrianguenter/linux-odroidc2
drivers/iio/adc/ti-ads1015.c
101
17828
/* * ADS1015 - Texas Instruments Analog-to-Digital Converter * * Copyright (c) 2016, Intel Corporation. * * This file is subject to the terms and conditions of version 2 of * the GNU General Public License. See the file COPYING in the main * directory of this archive for more details. * * IIO driver for ADS1015 ADC 7-bit I2C slave address: * * 0x48 - ADDR connected to Ground * * 0x49 - ADDR connected to Vdd * * 0x4A - ADDR connected to SDA * * 0x4B - ADDR connected to SCL */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/regmap.h> #include <linux/pm_runtime.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/i2c/ads1015.h> #include <linux/iio/iio.h> #include <linux/iio/types.h> #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> #include <linux/iio/triggered_buffer.h> #include <linux/iio/trigger_consumer.h> #define ADS1015_DRV_NAME "ads1015" #define ADS1015_CONV_REG 0x00 #define ADS1015_CFG_REG 0x01 #define ADS1015_CFG_DR_SHIFT 5 #define ADS1015_CFG_MOD_SHIFT 8 #define ADS1015_CFG_PGA_SHIFT 9 #define ADS1015_CFG_MUX_SHIFT 12 #define ADS1015_CFG_DR_MASK GENMASK(7, 5) #define ADS1015_CFG_MOD_MASK BIT(8) #define ADS1015_CFG_PGA_MASK GENMASK(11, 9) #define ADS1015_CFG_MUX_MASK GENMASK(14, 12) /* device operating modes */ #define ADS1015_CONTINUOUS 0 #define ADS1015_SINGLESHOT 1 #define ADS1015_SLEEP_DELAY_MS 2000 #define ADS1015_DEFAULT_PGA 2 #define ADS1015_DEFAULT_DATA_RATE 4 #define ADS1015_DEFAULT_CHAN 0 enum { ADS1015, ADS1115, }; enum ads1015_channels { ADS1015_AIN0_AIN1 = 0, ADS1015_AIN0_AIN3, ADS1015_AIN1_AIN3, ADS1015_AIN2_AIN3, ADS1015_AIN0, ADS1015_AIN1, ADS1015_AIN2, ADS1015_AIN3, ADS1015_TIMESTAMP, }; static const unsigned int ads1015_data_rate[] = { 128, 250, 490, 920, 1600, 2400, 3300, 3300 }; static const unsigned int ads1115_data_rate[] = { 8, 16, 32, 64, 128, 250, 475, 860 }; static const struct { int scale; int uscale; } ads1015_scale[] = { {3, 0}, {2, 0}, {1, 0}, {0, 500000}, {0, 250000}, {0, 125000}, {0, 125000}, {0, 125000}, }; #define ADS1015_V_CHAN(_chan, _addr) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .address = _addr, \ .channel = _chan, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = _addr, \ .scan_type = { \ .sign = 's', \ .realbits = 12, \ .storagebits = 16, \ .shift = 4, \ .endianness = IIO_CPU, \ }, \ .datasheet_name = "AIN"#_chan, \ } #define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) { \ .type = IIO_VOLTAGE, \ .differential = 1, \ .indexed = 1, \ .address = _addr, \ .channel = _chan, \ .channel2 = _chan2, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = _addr, \ .scan_type = { \ .sign = 's', \ .realbits = 12, \ .storagebits = 16, \ .shift = 4, \ .endianness = IIO_CPU, \ }, \ .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \ } #define ADS1115_V_CHAN(_chan, _addr) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .address = _addr, \ .channel = _chan, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = _addr, \ .scan_type = { \ .sign = 's', \ .realbits = 16, \ .storagebits = 16, \ .endianness = IIO_CPU, \ }, \ .datasheet_name = "AIN"#_chan, \ } #define ADS1115_V_DIFF_CHAN(_chan, _chan2, _addr) { \ .type = IIO_VOLTAGE, \ .differential = 1, \ .indexed = 1, \ .address = _addr, \ .channel = _chan, \ .channel2 = _chan2, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = _addr, \ .scan_type = { \ .sign = 's', \ .realbits = 16, \ .storagebits = 16, \ .endianness = IIO_CPU, \ }, \ .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \ } struct ads1015_data { struct regmap *regmap; /* * Protects ADC ops, e.g: concurrent sysfs/buffered * data reads, configuration updates */ struct mutex lock; struct ads1015_channel_data channel_data[ADS1015_CHANNELS]; unsigned int *data_rate; }; static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg) { return (reg == ADS1015_CFG_REG); } static const struct regmap_config ads1015_regmap_config = { .reg_bits = 8, .val_bits = 16, .max_register = ADS1015_CFG_REG, .writeable_reg = ads1015_is_writeable_reg, }; static const struct iio_chan_spec ads1015_channels[] = { ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1), ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3), ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3), ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3), ADS1015_V_CHAN(0, ADS1015_AIN0), ADS1015_V_CHAN(1, ADS1015_AIN1), ADS1015_V_CHAN(2, ADS1015_AIN2), ADS1015_V_CHAN(3, ADS1015_AIN3), IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), }; static const struct iio_chan_spec ads1115_channels[] = { ADS1115_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1), ADS1115_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3), ADS1115_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3), ADS1115_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3), ADS1115_V_CHAN(0, ADS1015_AIN0), ADS1115_V_CHAN(1, ADS1015_AIN1), ADS1115_V_CHAN(2, ADS1015_AIN2), ADS1115_V_CHAN(3, ADS1015_AIN3), IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), }; static int ads1015_set_power_state(struct ads1015_data *data, bool on) { int ret; struct device *dev = regmap_get_device(data->regmap); if (on) { ret = pm_runtime_get_sync(dev); if (ret < 0) pm_runtime_put_noidle(dev); } else { pm_runtime_mark_last_busy(dev); ret = pm_runtime_put_autosuspend(dev); } return ret; } static int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) { int ret, pga, dr, conv_time; bool change; if (chan < 0 || chan >= ADS1015_CHANNELS) return -EINVAL; pga = data->channel_data[chan].pga; dr = data->channel_data[chan].data_rate; ret = regmap_update_bits_check(data->regmap, ADS1015_CFG_REG, ADS1015_CFG_MUX_MASK | ADS1015_CFG_PGA_MASK, chan << ADS1015_CFG_MUX_SHIFT | pga << ADS1015_CFG_PGA_SHIFT, &change); if (ret < 0) return ret; if (change) { conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]); usleep_range(conv_time, conv_time + 1); } return regmap_read(data->regmap, ADS1015_CONV_REG, val); } static irqreturn_t ads1015_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ads1015_data *data = iio_priv(indio_dev); s16 buf[8]; /* 1x s16 ADC val + 3x s16 padding + 4x s16 timestamp */ int chan, ret, res; memset(buf, 0, sizeof(buf)); mutex_lock(&data->lock); chan = find_first_bit(indio_dev->active_scan_mask, indio_dev->masklength); ret = ads1015_get_adc_result(data, chan, &res); if (ret < 0) { mutex_unlock(&data->lock); goto err; } buf[0] = res; mutex_unlock(&data->lock); iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns(indio_dev)); err: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } static int ads1015_set_scale(struct ads1015_data *data, int chan, int scale, int uscale) { int i, ret, rindex = -1; for (i = 0; i < ARRAY_SIZE(ads1015_scale); i++) if (ads1015_scale[i].scale == scale && ads1015_scale[i].uscale == uscale) { rindex = i; break; } if (rindex < 0) return -EINVAL; ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG, ADS1015_CFG_PGA_MASK, rindex << ADS1015_CFG_PGA_SHIFT); if (ret < 0) return ret; data->channel_data[chan].pga = rindex; return 0; } static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate) { int i, ret, rindex = -1; for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++) if (data->data_rate[i] == rate) { rindex = i; break; } if (rindex < 0) return -EINVAL; ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG, ADS1015_CFG_DR_MASK, rindex << ADS1015_CFG_DR_SHIFT); if (ret < 0) return ret; data->channel_data[chan].data_rate = rindex; return 0; } static int ads1015_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { int ret, idx; struct ads1015_data *data = iio_priv(indio_dev); mutex_lock(&indio_dev->mlock); mutex_lock(&data->lock); switch (mask) { case IIO_CHAN_INFO_RAW: { int shift = chan->scan_type.shift; if (iio_buffer_enabled(indio_dev)) { ret = -EBUSY; break; } ret = ads1015_set_power_state(data, true); if (ret < 0) break; ret = ads1015_get_adc_result(data, chan->address, val); if (ret < 0) { ads1015_set_power_state(data, false); break; } *val = sign_extend32(*val >> shift, 15 - shift); ret = ads1015_set_power_state(data, false); if (ret < 0) break; ret = IIO_VAL_INT; break; } case IIO_CHAN_INFO_SCALE: idx = data->channel_data[chan->address].pga; *val = ads1015_scale[idx].scale; *val2 = ads1015_scale[idx].uscale; ret = IIO_VAL_INT_PLUS_MICRO; break; case IIO_CHAN_INFO_SAMP_FREQ: idx = data->channel_data[chan->address].data_rate; *val = data->data_rate[idx]; ret = IIO_VAL_INT; break; default: ret = -EINVAL; break; } mutex_unlock(&data->lock); mutex_unlock(&indio_dev->mlock); return ret; } static int ads1015_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct ads1015_data *data = iio_priv(indio_dev); int ret; mutex_lock(&data->lock); switch (mask) { case IIO_CHAN_INFO_SCALE: ret = ads1015_set_scale(data, chan->address, val, val2); break; case IIO_CHAN_INFO_SAMP_FREQ: ret = ads1015_set_data_rate(data, chan->address, val); break; default: ret = -EINVAL; break; } mutex_unlock(&data->lock); return ret; } static int ads1015_buffer_preenable(struct iio_dev *indio_dev) { return ads1015_set_power_state(iio_priv(indio_dev), true); } static int ads1015_buffer_postdisable(struct iio_dev *indio_dev) { return ads1015_set_power_state(iio_priv(indio_dev), false); } static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = { .preenable = ads1015_buffer_preenable, .postenable = iio_triggered_buffer_postenable, .predisable = iio_triggered_buffer_predisable, .postdisable = ads1015_buffer_postdisable, .validate_scan_mask = &iio_validate_scan_mask_onehot, }; static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125"); static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available, sampling_frequency_available, "128 250 490 920 1600 2400 3300"); static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available, sampling_frequency_available, "8 16 32 64 128 250 475 860"); static struct attribute *ads1015_attributes[] = { &iio_const_attr_scale_available.dev_attr.attr, &iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr, NULL, }; static const struct attribute_group ads1015_attribute_group = { .attrs = ads1015_attributes, }; static struct attribute *ads1115_attributes[] = { &iio_const_attr_scale_available.dev_attr.attr, &iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr, NULL, }; static const struct attribute_group ads1115_attribute_group = { .attrs = ads1115_attributes, }; static struct iio_info ads1015_info = { .driver_module = THIS_MODULE, .read_raw = ads1015_read_raw, .write_raw = ads1015_write_raw, .attrs = &ads1015_attribute_group, }; static struct iio_info ads1115_info = { .driver_module = THIS_MODULE, .read_raw = ads1015_read_raw, .write_raw = ads1015_write_raw, .attrs = &ads1115_attribute_group, }; #ifdef CONFIG_OF static int ads1015_get_channels_config_of(struct i2c_client *client) { struct iio_dev *indio_dev = i2c_get_clientdata(client); struct ads1015_data *data = iio_priv(indio_dev); struct device_node *node; if (!client->dev.of_node || !of_get_next_child(client->dev.of_node, NULL)) return -EINVAL; for_each_child_of_node(client->dev.of_node, node) { u32 pval; unsigned int channel; unsigned int pga = ADS1015_DEFAULT_PGA; unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE; if (of_property_read_u32(node, "reg", &pval)) { dev_err(&client->dev, "invalid reg on %s\n", node->full_name); continue; } channel = pval; if (channel >= ADS1015_CHANNELS) { dev_err(&client->dev, "invalid channel index %d on %s\n", channel, node->full_name); continue; } if (!of_property_read_u32(node, "ti,gain", &pval)) { pga = pval; if (pga > 6) { dev_err(&client->dev, "invalid gain on %s\n", node->full_name); of_node_put(node); return -EINVAL; } } if (!of_property_read_u32(node, "ti,datarate", &pval)) { data_rate = pval; if (data_rate > 7) { dev_err(&client->dev, "invalid data_rate on %s\n", node->full_name); of_node_put(node); return -EINVAL; } } data->channel_data[channel].pga = pga; data->channel_data[channel].data_rate = data_rate; } return 0; } #endif static void ads1015_get_channels_config(struct i2c_client *client) { unsigned int k; struct iio_dev *indio_dev = i2c_get_clientdata(client); struct ads1015_data *data = iio_priv(indio_dev); struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev); /* prefer platform data */ if (pdata) { memcpy(data->channel_data, pdata->channel_data, sizeof(data->channel_data)); return; } #ifdef CONFIG_OF if (!ads1015_get_channels_config_of(client)) return; #endif /* fallback on default configuration */ for (k = 0; k < ADS1015_CHANNELS; ++k) { data->channel_data[k].pga = ADS1015_DEFAULT_PGA; data->channel_data[k].data_rate = ADS1015_DEFAULT_DATA_RATE; } } static int ads1015_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct iio_dev *indio_dev; struct ads1015_data *data; int ret; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; data = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); mutex_init(&data->lock); indio_dev->dev.parent = &client->dev; indio_dev->dev.of_node = client->dev.of_node; indio_dev->name = ADS1015_DRV_NAME; indio_dev->modes = INDIO_DIRECT_MODE; switch (id->driver_data) { case ADS1015: indio_dev->channels = ads1015_channels; indio_dev->num_channels = ARRAY_SIZE(ads1015_channels); indio_dev->info = &ads1015_info; data->data_rate = (unsigned int *) &ads1015_data_rate; break; case ADS1115: indio_dev->channels = ads1115_channels; indio_dev->num_channels = ARRAY_SIZE(ads1115_channels); indio_dev->info = &ads1115_info; data->data_rate = (unsigned int *) &ads1115_data_rate; break; } /* we need to keep this ABI the same as used by hwmon ADS1015 driver */ ads1015_get_channels_config(client); data->regmap = devm_regmap_init_i2c(client, &ads1015_regmap_config); if (IS_ERR(data->regmap)) { dev_err(&client->dev, "Failed to allocate register map\n"); return PTR_ERR(data->regmap); } ret = iio_triggered_buffer_setup(indio_dev, NULL, ads1015_trigger_handler, &ads1015_buffer_setup_ops); if (ret < 0) { dev_err(&client->dev, "iio triggered buffer setup failed\n"); return ret; } ret = pm_runtime_set_active(&client->dev); if (ret) goto err_buffer_cleanup; pm_runtime_set_autosuspend_delay(&client->dev, ADS1015_SLEEP_DELAY_MS); pm_runtime_use_autosuspend(&client->dev); pm_runtime_enable(&client->dev); ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "Failed to register IIO device\n"); goto err_buffer_cleanup; } return 0; err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); return ret; } static int ads1015_remove(struct i2c_client *client) { struct iio_dev *indio_dev = i2c_get_clientdata(client); struct ads1015_data *data = iio_priv(indio_dev); iio_device_unregister(indio_dev); pm_runtime_disable(&client->dev); pm_runtime_set_suspended(&client->dev); pm_runtime_put_noidle(&client->dev); iio_triggered_buffer_cleanup(indio_dev); /* power down single shot mode */ return regmap_update_bits(data->regmap, ADS1015_CFG_REG, ADS1015_CFG_MOD_MASK, ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT); } #ifdef CONFIG_PM static int ads1015_runtime_suspend(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct ads1015_data *data = iio_priv(indio_dev); return regmap_update_bits(data->regmap, ADS1015_CFG_REG, ADS1015_CFG_MOD_MASK, ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT); } static int ads1015_runtime_resume(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct ads1015_data *data = iio_priv(indio_dev); return regmap_update_bits(data->regmap, ADS1015_CFG_REG, ADS1015_CFG_MOD_MASK, ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT); } #endif static const struct dev_pm_ops ads1015_pm_ops = { SET_RUNTIME_PM_OPS(ads1015_runtime_suspend, ads1015_runtime_resume, NULL) }; static const struct i2c_device_id ads1015_id[] = { {"ads1015", ADS1015}, {"ads1115", ADS1115}, {} }; MODULE_DEVICE_TABLE(i2c, ads1015_id); static struct i2c_driver ads1015_driver = { .driver = { .name = ADS1015_DRV_NAME, .pm = &ads1015_pm_ops, }, .probe = ads1015_probe, .remove = ads1015_remove, .id_table = ads1015_id, }; module_i2c_driver(ads1015_driver); MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>"); MODULE_DESCRIPTION("Texas Instruments ADS1015 ADC driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
minghuadev/chromeos-kernel-3-8
drivers/parisc/dino.c
101
31190
/* ** DINO manager ** ** (c) Copyright 1999 Red Hat Software ** (c) Copyright 1999 SuSE GmbH ** (c) Copyright 1999,2000 Hewlett-Packard Company ** (c) Copyright 2000 Grant Grundler ** (c) Copyright 2006 Helge Deller ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This module provides access to Dino PCI bus (config/IOport spaces) ** and helps manage Dino IRQ lines. ** ** Dino interrupt handling is a bit complicated. ** Dino always writes to the broadcast EIR via irr0 for now. ** (BIG WARNING: using broadcast EIR is a really bad thing for SMP!) ** Only one processor interrupt is used for the 11 IRQ line ** inputs to dino. ** ** The different between Built-in Dino and Card-Mode ** dino is in chip initialization and pci device initialization. ** ** Linux drivers can only use Card-Mode Dino if pci devices I/O port ** BARs are configured and used by the driver. Programming MMIO address ** requires substantial knowledge of available Host I/O address ranges ** is currently not supported. Port/Config accessor functions are the ** same. "BIOS" differences are handled within the existing routines. */ /* Changes : ** 2001-06-14 : Clement Moyroud (moyroudc@esiee.fr) ** - added support for the integrated RS232. */ /* ** TODO: create a virtual address for each Dino HPA. ** GSC code might be able to do this since IODC data tells us ** how many pages are used. PCI subsystem could (must?) do this ** for PCI drivers devices which implement/use MMIO registers. */ #include <linux/delay.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> /* for struct irqaction */ #include <linux/spinlock.h> /* for spinlock_t and prototypes */ #include <asm/pdc.h> #include <asm/page.h> #include <asm/io.h> #include <asm/hardware.h> #include "gsc.h" #undef DINO_DEBUG #ifdef DINO_DEBUG #define DBG(x...) printk(x) #else #define DBG(x...) #endif /* ** Config accessor functions only pass in the 8-bit bus number ** and not the 8-bit "PCI Segment" number. Each Dino will be ** assigned a PCI bus number based on "when" it's discovered. ** ** The "secondary" bus number is set to this before calling ** pci_scan_bus(). If any PPB's are present, the scan will ** discover them and update the "secondary" and "subordinate" ** fields in Dino's pci_bus structure. ** ** Changes in the configuration *will* result in a different ** bus number for each dino. */ #define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA) #define is_cujo(id) ((id)->hversion == 0x682) #define DINO_IAR0 0x004 #define DINO_IODC_ADDR 0x008 #define DINO_IODC_DATA_0 0x008 #define DINO_IODC_DATA_1 0x008 #define DINO_IRR0 0x00C #define DINO_IAR1 0x010 #define DINO_IRR1 0x014 #define DINO_IMR 0x018 #define DINO_IPR 0x01C #define DINO_TOC_ADDR 0x020 #define DINO_ICR 0x024 #define DINO_ILR 0x028 #define DINO_IO_COMMAND 0x030 #define DINO_IO_STATUS 0x034 #define DINO_IO_CONTROL 0x038 #define DINO_IO_GSC_ERR_RESP 0x040 #define DINO_IO_ERR_INFO 0x044 #define DINO_IO_PCI_ERR_RESP 0x048 #define DINO_IO_FBB_EN 0x05c #define DINO_IO_ADDR_EN 0x060 #define DINO_PCI_ADDR 0x064 #define DINO_CONFIG_DATA 0x068 #define DINO_IO_DATA 0x06c #define DINO_MEM_DATA 0x070 /* Dino 3.x only */ #define DINO_GSC2X_CONFIG 0x7b4 #define DINO_GMASK 0x800 #define DINO_PAMR 0x804 #define DINO_PAPR 0x808 #define DINO_DAMODE 0x80c #define DINO_PCICMD 0x810 #define DINO_PCISTS 0x814 #define DINO_MLTIM 0x81c #define DINO_BRDG_FEAT 0x820 #define DINO_PCIROR 0x824 #define DINO_PCIWOR 0x828 #define DINO_TLTIM 0x830 #define DINO_IRQS 11 /* bits 0-10 are architected */ #define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */ #define DINO_LOCAL_IRQS (DINO_IRQS+1) #define DINO_MASK_IRQ(x) (1<<(x)) #define PCIINTA 0x001 #define PCIINTB 0x002 #define PCIINTC 0x004 #define PCIINTD 0x008 #define PCIINTE 0x010 #define PCIINTF 0x020 #define GSCEXTINT 0x040 /* #define xxx 0x080 - bit 7 is "default" */ /* #define xxx 0x100 - bit 8 not used */ /* #define xxx 0x200 - bit 9 not used */ #define RS232INT 0x400 struct dino_device { struct pci_hba_data hba; /* 'C' inheritance - must be first */ spinlock_t dinosaur_pen; unsigned long txn_addr; /* EIR addr to generate interrupt */ u32 txn_data; /* EIR data assign to each dino */ u32 imr; /* IRQ's which are enabled */ int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */ #ifdef DINO_DEBUG unsigned int dino_irr0; /* save most recent IRQ line stat */ #endif }; /* Looks nice and keeps the compiler happy */ #define DINO_DEV(d) ((struct dino_device *) d) /* * Dino Configuration Space Accessor Functions */ #define DINO_CFG_TOK(bus,dfn,pos) ((u32) ((bus)<<16 | (dfn)<<8 | (pos))) /* * keep the current highest bus count to assist in allocating busses. This * tries to keep a global bus count total so that when we discover an * entirely new bus, it can be given a unique bus number. */ static int dino_current_bus = 0; static int dino_cfg_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge)); u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3); void __iomem *base_addr = d->hba.base_addr; unsigned long flags; DBG("%s: %p, %d, %d, %d\n", __func__, base_addr, devfn, where, size); spin_lock_irqsave(&d->dinosaur_pen, flags); /* tell HW which CFG address */ __raw_writel(v, base_addr + DINO_PCI_ADDR); /* generate cfg read cycle */ if (size == 1) { *val = readb(base_addr + DINO_CONFIG_DATA + (where & 3)); } else if (size == 2) { *val = readw(base_addr + DINO_CONFIG_DATA + (where & 2)); } else if (size == 4) { *val = readl(base_addr + DINO_CONFIG_DATA); } spin_unlock_irqrestore(&d->dinosaur_pen, flags); return 0; } /* * Dino address stepping "feature": * When address stepping, Dino attempts to drive the bus one cycle too soon * even though the type of cycle (config vs. MMIO) might be different. * The read of Ven/Prod ID is harmless and avoids Dino's address stepping. */ static int dino_cfg_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge)); u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3); void __iomem *base_addr = d->hba.base_addr; unsigned long flags; DBG("%s: %p, %d, %d, %d\n", __func__, base_addr, devfn, where, size); spin_lock_irqsave(&d->dinosaur_pen, flags); /* avoid address stepping feature */ __raw_writel(v & 0xffffff00, base_addr + DINO_PCI_ADDR); __raw_readl(base_addr + DINO_CONFIG_DATA); /* tell HW which CFG address */ __raw_writel(v, base_addr + DINO_PCI_ADDR); /* generate cfg read cycle */ if (size == 1) { writeb(val, base_addr + DINO_CONFIG_DATA + (where & 3)); } else if (size == 2) { writew(val, base_addr + DINO_CONFIG_DATA + (where & 2)); } else if (size == 4) { writel(val, base_addr + DINO_CONFIG_DATA); } spin_unlock_irqrestore(&d->dinosaur_pen, flags); return 0; } static struct pci_ops dino_cfg_ops = { .read = dino_cfg_read, .write = dino_cfg_write, }; /* * Dino "I/O Port" Space Accessor Functions * * Many PCI devices don't require use of I/O port space (eg Tulip, * NCR720) since they export the same registers to both MMIO and * I/O port space. Performance is going to stink if drivers use * I/O port instead of MMIO. */ #define DINO_PORT_IN(type, size, mask) \ static u##size dino_in##size (struct pci_hba_data *d, u16 addr) \ { \ u##size v; \ unsigned long flags; \ spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \ /* tell HW which IO Port address */ \ __raw_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \ /* generate I/O PORT read cycle */ \ v = read##type(d->base_addr+DINO_IO_DATA+(addr&mask)); \ spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \ return v; \ } DINO_PORT_IN(b, 8, 3) DINO_PORT_IN(w, 16, 2) DINO_PORT_IN(l, 32, 0) #define DINO_PORT_OUT(type, size, mask) \ static void dino_out##size (struct pci_hba_data *d, u16 addr, u##size val) \ { \ unsigned long flags; \ spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \ /* tell HW which IO port address */ \ __raw_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \ /* generate cfg write cycle */ \ write##type(val, d->base_addr+DINO_IO_DATA+(addr&mask)); \ spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \ } DINO_PORT_OUT(b, 8, 3) DINO_PORT_OUT(w, 16, 2) DINO_PORT_OUT(l, 32, 0) static struct pci_port_ops dino_port_ops = { .inb = dino_in8, .inw = dino_in16, .inl = dino_in32, .outb = dino_out8, .outw = dino_out16, .outl = dino_out32 }; static void dino_mask_irq(struct irq_data *d) { struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); /* Clear the matching bit in the IMR register */ dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq)); __raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR); } static void dino_unmask_irq(struct irq_data *d) { struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); u32 tmp; DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); /* ** clear pending IRQ bits ** ** This does NOT change ILR state! ** See comment below for ILR usage. */ __raw_readl(dino_dev->hba.base_addr+DINO_IPR); /* set the matching bit in the IMR register */ dino_dev->imr |= DINO_MASK_IRQ(local_irq); /* used in dino_isr() */ __raw_writel( dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR); /* Emulate "Level Triggered" Interrupt ** Basically, a driver is blowing it if the IRQ line is asserted ** while the IRQ is disabled. But tulip.c seems to do that.... ** Give 'em a kluge award and a nice round of applause! ** ** The gsc_write will generate an interrupt which invokes dino_isr(). ** dino_isr() will read IPR and find nothing. But then catch this ** when it also checks ILR. */ tmp = __raw_readl(dino_dev->hba.base_addr+DINO_ILR); if (tmp & DINO_MASK_IRQ(local_irq)) { DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n", __func__, tmp); gsc_writel(dino_dev->txn_data, dino_dev->txn_addr); } } static struct irq_chip dino_interrupt_type = { .name = "GSC-PCI", .irq_unmask = dino_unmask_irq, .irq_mask = dino_mask_irq, }; /* * Handle a Processor interrupt generated by Dino. * * ilr_loop counter is a kluge to prevent a "stuck" IRQ line from * wedging the CPU. Could be removed or made optional at some point. */ static irqreturn_t dino_isr(int irq, void *intr_dev) { struct dino_device *dino_dev = intr_dev; u32 mask; int ilr_loop = 100; /* read and acknowledge pending interrupts */ #ifdef DINO_DEBUG dino_dev->dino_irr0 = #endif mask = __raw_readl(dino_dev->hba.base_addr+DINO_IRR0) & DINO_IRR_MASK; if (mask == 0) return IRQ_NONE; ilr_again: do { int local_irq = __ffs(mask); int irq = dino_dev->global_irq[local_irq]; DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n", __func__, irq, intr_dev, mask); generic_handle_irq(irq); mask &= ~(1 << local_irq); } while (mask); /* Support for level triggered IRQ lines. ** ** Dropping this support would make this routine *much* faster. ** But since PCI requires level triggered IRQ line to share lines... ** device drivers may assume lines are level triggered (and not ** edge triggered like EISA/ISA can be). */ mask = __raw_readl(dino_dev->hba.base_addr+DINO_ILR) & dino_dev->imr; if (mask) { if (--ilr_loop > 0) goto ilr_again; printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n", dino_dev->hba.base_addr, mask); return IRQ_NONE; } return IRQ_HANDLED; } static void dino_assign_irq(struct dino_device *dino, int local_irq, int *irqp) { int irq = gsc_assign_irq(&dino_interrupt_type, dino); if (irq == NO_IRQ) return; *irqp = irq; dino->global_irq[local_irq] = irq; } static void dino_choose_irq(struct parisc_device *dev, void *ctrl) { int irq; struct dino_device *dino = ctrl; switch (dev->id.sversion) { case 0x00084: irq = 8; break; /* PS/2 */ case 0x0008c: irq = 10; break; /* RS232 */ case 0x00096: irq = 8; break; /* PS/2 */ default: return; /* Unknown */ } dino_assign_irq(dino, irq, &dev->irq); } /* * Cirrus 6832 Cardbus reports wrong irq on RDI Tadpole PARISC Laptop (deller@gmx.de) * (the irqs are off-by-one, not sure yet if this is a cirrus, dino-hardware or dino-driver problem...) */ static void quirk_cirrus_cardbus(struct pci_dev *dev) { u8 new_irq = dev->irq - 1; printk(KERN_INFO "PCI: Cirrus Cardbus IRQ fixup for %s, from %d to %d\n", pci_name(dev), dev->irq, new_irq); dev->irq = new_irq; } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus ); static void __init dino_bios_init(void) { DBG("dino_bios_init\n"); } /* * dino_card_setup - Set up the memory space for a Dino in card mode. * @bus: the bus under this dino * * Claim an 8MB chunk of unused IO space and call the generic PCI routines * to set up the addresses of the devices on this bus. */ #define _8MB 0x00800000UL static void __init dino_card_setup(struct pci_bus *bus, void __iomem *base_addr) { int i; struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge)); struct resource *res; char name[128]; int size; res = &dino_dev->hba.lmmio_space; res->flags = IORESOURCE_MEM; size = scnprintf(name, sizeof(name), "Dino LMMIO (%s)", dev_name(bus->bridge)); res->name = kmalloc(size+1, GFP_KERNEL); if(res->name) strcpy((char *)res->name, name); else res->name = dino_dev->hba.lmmio_space.name; if (ccio_allocate_resource(dino_dev->hba.dev, res, _8MB, F_EXTEND(0xf0000000UL) | _8MB, F_EXTEND(0xffffffffUL) &~ _8MB, _8MB) < 0) { struct pci_dev *dev, *tmp; printk(KERN_ERR "Dino: cannot attach bus %s\n", dev_name(bus->bridge)); /* kill the bus, we can't do anything with it */ list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { list_del(&dev->bus_list); } return; } bus->resource[1] = res; bus->resource[0] = &(dino_dev->hba.io_space); /* Now tell dino what range it has */ for (i = 1; i < 31; i++) { if (res->start == F_EXTEND(0xf0000000UL | (i * _8MB))) break; } DBG("DINO GSC WRITE i=%d, start=%lx, dino addr = %p\n", i, res->start, base_addr + DINO_IO_ADDR_EN); __raw_writel(1 << i, base_addr + DINO_IO_ADDR_EN); } static void __init dino_card_fixup(struct pci_dev *dev) { u32 irq_pin; /* ** REVISIT: card-mode PCI-PCI expansion chassis do exist. ** Not sure they were ever productized. ** Die here since we'll die later in dino_inb() anyway. */ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { panic("Card-Mode Dino: PCI-PCI Bridge not supported\n"); } /* ** Set Latency Timer to 0xff (not a shared bus) ** Set CACHELINE_SIZE. */ dino_cfg_write(dev->bus, dev->devfn, PCI_CACHE_LINE_SIZE, 2, 0xff00 | L1_CACHE_BYTES/4); /* ** Program INT_LINE for card-mode devices. ** The cards are hardwired according to this algorithm. ** And it doesn't matter if PPB's are present or not since ** the IRQ lines bypass the PPB. ** ** "-1" converts INTA-D (1-4) to PCIINTA-D (0-3) range. ** The additional "-1" adjusts for skewing the IRQ<->slot. */ dino_cfg_read(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 1, &irq_pin); dev->irq = pci_swizzle_interrupt_pin(dev, irq_pin) - 1; /* Shouldn't really need to do this but it's in case someone tries ** to bypass PCI services and look at the card themselves. */ dino_cfg_write(dev->bus, dev->devfn, PCI_INTERRUPT_LINE, 1, dev->irq); } /* The alignment contraints for PCI bridges under dino */ #define DINO_BRIDGE_ALIGN 0x100000 static void __init dino_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev; struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge)); DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n", __func__, bus, bus->busn_res.start, bus->bridge->platform_data); /* Firmware doesn't set up card-mode dino, so we have to */ if (is_card_dino(&dino_dev->hba.dev->id)) { dino_card_setup(bus, dino_dev->hba.base_addr); } else if (bus->parent) { int i; pci_read_bridge_bases(bus); for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { if((bus->self->resource[i].flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) continue; if(bus->self->resource[i].flags & IORESOURCE_MEM) { /* There's a quirk to alignment of * bridge memory resources: the start * is the alignment and start-end is * the size. However, firmware will * have assigned start and end, so we * need to take this into account */ bus->self->resource[i].end = bus->self->resource[i].end - bus->self->resource[i].start + DINO_BRIDGE_ALIGN; bus->self->resource[i].start = DINO_BRIDGE_ALIGN; } DBG("DEBUG %s assigning %d [0x%lx,0x%lx]\n", dev_name(&bus->self->dev), i, bus->self->resource[i].start, bus->self->resource[i].end); WARN_ON(pci_assign_resource(bus->self, i)); DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n", dev_name(&bus->self->dev), i, bus->self->resource[i].start, bus->self->resource[i].end); } } list_for_each_entry(dev, &bus->devices, bus_list) { if (is_card_dino(&dino_dev->hba.dev->id)) dino_card_fixup(dev); /* ** P2PB's only have 2 BARs, no IRQs. ** I'd like to just ignore them for now. */ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) continue; /* null out the ROM resource if there is one (we don't * care about an expansion rom on parisc, since it * usually contains (x86) bios code) */ dev->resource[PCI_ROM_RESOURCE].flags = 0; if(dev->irq == 255) { #define DINO_FIX_UNASSIGNED_INTERRUPTS #ifdef DINO_FIX_UNASSIGNED_INTERRUPTS /* This code tries to assign an unassigned * interrupt. Leave it disabled unless you * *really* know what you're doing since the * pin<->interrupt line mapping varies by bus * and machine */ u32 irq_pin; dino_cfg_read(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 1, &irq_pin); irq_pin = pci_swizzle_interrupt_pin(dev, irq_pin) - 1; printk(KERN_WARNING "Device %s has undefined IRQ, " "setting to %d\n", pci_name(dev), irq_pin); dino_cfg_write(dev->bus, dev->devfn, PCI_INTERRUPT_LINE, 1, irq_pin); dino_assign_irq(dino_dev, irq_pin, &dev->irq); #else dev->irq = 65535; printk(KERN_WARNING "Device %s has unassigned IRQ\n", pci_name(dev)); #endif } else { /* Adjust INT_LINE for that busses region */ dino_assign_irq(dino_dev, dev->irq, &dev->irq); } } } static struct pci_bios_ops dino_bios_ops = { .init = dino_bios_init, .fixup_bus = dino_fixup_bus }; /* * Initialise a DINO controller chip */ static void __init dino_card_init(struct dino_device *dino_dev) { u32 brdg_feat = 0x00784e05; unsigned long status; status = __raw_readl(dino_dev->hba.base_addr+DINO_IO_STATUS); if (status & 0x0000ff80) { __raw_writel(0x00000005, dino_dev->hba.base_addr+DINO_IO_COMMAND); udelay(1); } __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_GMASK); __raw_writel(0x00000001, dino_dev->hba.base_addr+DINO_IO_FBB_EN); __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_ICR); #if 1 /* REVISIT - should be a runtime check (eg if (CPU_IS_PCX_L) ...) */ /* ** PCX-L processors don't support XQL like Dino wants it. ** PCX-L2 ignore XQL signal and it doesn't matter. */ brdg_feat &= ~0x4; /* UXQL */ #endif __raw_writel( brdg_feat, dino_dev->hba.base_addr+DINO_BRDG_FEAT); /* ** Don't enable address decoding until we know which I/O range ** currently is available from the host. Only affects MMIO ** and not I/O port space. */ __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_IO_ADDR_EN); __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_DAMODE); __raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIROR); __raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIWOR); __raw_writel(0x00000040, dino_dev->hba.base_addr+DINO_MLTIM); __raw_writel(0x00000080, dino_dev->hba.base_addr+DINO_IO_CONTROL); __raw_writel(0x0000008c, dino_dev->hba.base_addr+DINO_TLTIM); /* Disable PAMR before writing PAPR */ __raw_writel(0x0000007e, dino_dev->hba.base_addr+DINO_PAMR); __raw_writel(0x0000007f, dino_dev->hba.base_addr+DINO_PAPR); __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_PAMR); /* ** Dino ERS encourages enabling FBB (0x6f). ** We can't until we know *all* devices below us can support it. ** (Something in device configuration header tells us). */ __raw_writel(0x0000004f, dino_dev->hba.base_addr+DINO_PCICMD); /* Somewhere, the PCI spec says give devices 1 second ** to recover from the #RESET being de-asserted. ** Experience shows most devices only need 10ms. ** This short-cut speeds up booting significantly. */ mdelay(pci_post_reset_delay); } static int __init dino_bridge_init(struct dino_device *dino_dev, const char *name) { unsigned long io_addr; int result, i, count=0; struct resource *res, *prevres = NULL; /* * Decoding IO_ADDR_EN only works for Built-in Dino * since PDC has already initialized this. */ io_addr = __raw_readl(dino_dev->hba.base_addr + DINO_IO_ADDR_EN); if (io_addr == 0) { printk(KERN_WARNING "%s: No PCI devices enabled.\n", name); return -ENODEV; } res = &dino_dev->hba.lmmio_space; for (i = 0; i < 32; i++) { unsigned long start, end; if((io_addr & (1 << i)) == 0) continue; start = F_EXTEND(0xf0000000UL) | (i << 23); end = start + 8 * 1024 * 1024 - 1; DBG("DINO RANGE %d is at 0x%lx-0x%lx\n", count, start, end); if(prevres && prevres->end + 1 == start) { prevres->end = end; } else { if(count >= DINO_MAX_LMMIO_RESOURCES) { printk(KERN_ERR "%s is out of resource windows for range %d (0x%lx-0x%lx)\n", name, count, start, end); break; } prevres = res; res->start = start; res->end = end; res->flags = IORESOURCE_MEM; res->name = kmalloc(64, GFP_KERNEL); if(res->name) snprintf((char *)res->name, 64, "%s LMMIO %d", name, count); res++; count++; } } res = &dino_dev->hba.lmmio_space; for(i = 0; i < DINO_MAX_LMMIO_RESOURCES; i++) { if(res[i].flags == 0) break; result = ccio_request_resource(dino_dev->hba.dev, &res[i]); if (result < 0) { printk(KERN_ERR "%s: failed to claim PCI Bus address " "space %d (0x%lx-0x%lx)!\n", name, i, (unsigned long)res[i].start, (unsigned long)res[i].end); return result; } } return 0; } static int __init dino_common_init(struct parisc_device *dev, struct dino_device *dino_dev, const char *name) { int status; u32 eim; struct gsc_irq gsc_irq; struct resource *res; pcibios_register_hba(&dino_dev->hba); pci_bios = &dino_bios_ops; /* used by pci_scan_bus() */ pci_port = &dino_port_ops; /* ** Note: SMP systems can make use of IRR1/IAR1 registers ** But it won't buy much performance except in very ** specific applications/configurations. Note Dino ** still only has 11 IRQ input lines - just map some of them ** to a different processor. */ dev->irq = gsc_alloc_irq(&gsc_irq); dino_dev->txn_addr = gsc_irq.txn_addr; dino_dev->txn_data = gsc_irq.txn_data; eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; /* ** Dino needs a PA "IRQ" to get a processor's attention. ** arch/parisc/kernel/irq.c returns an EIRR bit. */ if (dev->irq < 0) { printk(KERN_WARNING "%s: gsc_alloc_irq() failed\n", name); return 1; } status = request_irq(dev->irq, dino_isr, 0, name, dino_dev); if (status) { printk(KERN_WARNING "%s: request_irq() failed with %d\n", name, status); return 1; } /* Support the serial port which is sometimes attached on built-in * Dino / Cujo chips. */ gsc_fixup_irqs(dev, dino_dev, dino_choose_irq); /* ** This enables DINO to generate interrupts when it sees ** any of its inputs *change*. Just asserting an IRQ ** before it's enabled (ie unmasked) isn't good enough. */ __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0); /* ** Some platforms don't clear Dino's IRR0 register at boot time. ** Reading will clear it now. */ __raw_readl(dino_dev->hba.base_addr+DINO_IRR0); /* allocate I/O Port resource region */ res = &dino_dev->hba.io_space; if (!is_cujo(&dev->id)) { res->name = "Dino I/O Port"; } else { res->name = "Cujo I/O Port"; } res->start = HBA_PORT_BASE(dino_dev->hba.hba_num); res->end = res->start + (HBA_PORT_SPACE_SIZE - 1); res->flags = IORESOURCE_IO; /* do not mark it busy ! */ if (request_resource(&ioport_resource, res) < 0) { printk(KERN_ERR "%s: request I/O Port region failed " "0x%lx/%lx (hpa 0x%p)\n", name, (unsigned long)res->start, (unsigned long)res->end, dino_dev->hba.base_addr); return 1; } return 0; } #define CUJO_RAVEN_ADDR F_EXTEND(0xf1000000UL) #define CUJO_FIREHAWK_ADDR F_EXTEND(0xf1604000UL) #define CUJO_RAVEN_BADPAGE 0x01003000UL #define CUJO_FIREHAWK_BADPAGE 0x01607000UL static const char *dino_vers[] = { "2.0", "2.1", "3.0", "3.1" }; static const char *cujo_vers[] = { "1.0", "2.0" }; void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp); /* ** Determine if dino should claim this chip (return 0) or not (return 1). ** If so, initialize the chip appropriately (card-mode vs bridge mode). ** Much of the initialization is common though. */ static int __init dino_probe(struct parisc_device *dev) { struct dino_device *dino_dev; // Dino specific control struct const char *version = "unknown"; char *name; int is_cujo = 0; LIST_HEAD(resources); struct pci_bus *bus; unsigned long hpa = dev->hpa.start; int max; name = "Dino"; if (is_card_dino(&dev->id)) { version = "3.x (card mode)"; } else { if (!is_cujo(&dev->id)) { if (dev->id.hversion_rev < 4) { version = dino_vers[dev->id.hversion_rev]; } } else { name = "Cujo"; is_cujo = 1; if (dev->id.hversion_rev < 2) { version = cujo_vers[dev->id.hversion_rev]; } } } printk("%s version %s found at 0x%lx\n", name, version, hpa); if (!request_mem_region(hpa, PAGE_SIZE, name)) { printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n", hpa); return 1; } /* Check for bugs */ if (is_cujo && dev->id.hversion_rev == 1) { #ifdef CONFIG_IOMMU_CCIO printk(KERN_WARNING "Enabling Cujo 2.0 bug workaround\n"); if (hpa == (unsigned long)CUJO_RAVEN_ADDR) { ccio_cujo20_fixup(dev, CUJO_RAVEN_BADPAGE); } else if (hpa == (unsigned long)CUJO_FIREHAWK_ADDR) { ccio_cujo20_fixup(dev, CUJO_FIREHAWK_BADPAGE); } else { printk("Don't recognise Cujo at address 0x%lx, not enabling workaround\n", hpa); } #endif } else if (!is_cujo && !is_card_dino(&dev->id) && dev->id.hversion_rev < 3) { printk(KERN_WARNING "The GSCtoPCI (Dino hrev %d) bus converter found may exhibit\n" "data corruption. See Service Note Numbers: A4190A-01, A4191A-01.\n" "Systems shipped after Aug 20, 1997 will not exhibit this problem.\n" "Models affected: C180, C160, C160L, B160L, and B132L workstations.\n\n", dev->id.hversion_rev); /* REVISIT: why are C200/C240 listed in the README table but not ** "Models affected"? Could be an omission in the original literature. */ } dino_dev = kzalloc(sizeof(struct dino_device), GFP_KERNEL); if (!dino_dev) { printk("dino_init_chip - couldn't alloc dino_device\n"); return 1; } dino_dev->hba.dev = dev; dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */ spin_lock_init(&dino_dev->dinosaur_pen); dino_dev->hba.iommu = ccio_get_iommu(dev); if (is_card_dino(&dev->id)) { dino_card_init(dino_dev); } else { dino_bridge_init(dino_dev, name); } if (dino_common_init(dev, dino_dev, name)) return 1; dev->dev.platform_data = dino_dev; pci_add_resource_offset(&resources, &dino_dev->hba.io_space, HBA_PORT_BASE(dino_dev->hba.hba_num)); if (dino_dev->hba.lmmio_space.flags) pci_add_resource_offset(&resources, &dino_dev->hba.lmmio_space, dino_dev->hba.lmmio_space_offset); if (dino_dev->hba.elmmio_space.flags) pci_add_resource_offset(&resources, &dino_dev->hba.elmmio_space, dino_dev->hba.lmmio_space_offset); if (dino_dev->hba.gmmio_space.flags) pci_add_resource(&resources, &dino_dev->hba.gmmio_space); dino_dev->hba.bus_num.start = dino_current_bus; dino_dev->hba.bus_num.end = 255; dino_dev->hba.bus_num.flags = IORESOURCE_BUS; pci_add_resource(&resources, &dino_dev->hba.bus_num); /* ** It's not used to avoid chicken/egg problems ** with configuration accessor functions. */ dino_dev->hba.hba_bus = bus = pci_create_root_bus(&dev->dev, dino_current_bus, &dino_cfg_ops, NULL, &resources); if (!bus) { printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (duplicate bus number %d?)\n", dev_name(&dev->dev), dino_current_bus); pci_free_resource_list(&resources); /* increment the bus number in case of duplicates */ dino_current_bus++; return 0; } max = pci_scan_child_bus(bus); pci_bus_update_busn_res_end(bus, max); /* This code *depends* on scanning being single threaded * if it isn't, this global bus number count will fail */ dino_current_bus = max + 1; pci_bus_assign_resources(bus); pci_bus_add_devices(bus); return 0; } /* * Normally, we would just test sversion. But the Elroy PCI adapter has * the same sversion as Dino, so we have to check hversion as well. * Unfortunately, the J2240 PDC reports the wrong hversion for the first * Dino, so we have to test for Dino, Cujo and Dino-in-a-J2240. * For card-mode Dino, most machines report an sversion of 9D. But 715 * and 725 firmware misreport it as 0x08080 for no adequately explained * reason. */ static struct parisc_device_id dino_tbl[] = { { HPHW_A_DMA, HVERSION_REV_ANY_ID, 0x004, 0x0009D },/* Card-mode Dino */ { HPHW_A_DMA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x08080 }, /* XXX */ { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x680, 0xa }, /* Bridge-mode Dino */ { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x682, 0xa }, /* Bridge-mode Cujo */ { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x05d, 0xa }, /* Dino in a J2240 */ { 0, } }; static struct parisc_driver dino_driver = { .name = "dino", .id_table = dino_tbl, .probe = dino_probe, }; /* * One time initialization to let the world know Dino is here. * This is the only routine which is NOT static. * Must be called exactly once before pci_init(). */ int __init dino_init(void) { register_parisc_driver(&dino_driver); return 0; }
gpl-2.0
jemmy655/linux
drivers/media/platform/davinci/vpif_capture.c
613
44614
/* * Copyright (C) 2009 Texas Instruments Inc * Copyright (C) 2014 Lad, Prabhakar <prabhakar.csengg@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * TODO : add support for VBI & HBI data service * add static buffer allocation */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <media/v4l2-ioctl.h> #include "vpif.h" #include "vpif_capture.h" MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(VPIF_CAPTURE_VERSION); #define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg) #define vpif_dbg(level, debug, fmt, arg...) \ v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg) static int debug = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level 0-1"); #define VPIF_DRIVER_NAME "vpif_capture" /* global variables */ static struct vpif_device vpif_obj = { {NULL} }; static struct device *vpif_dev; static void vpif_calculate_offsets(struct channel_obj *ch); static void vpif_config_addr(struct channel_obj *ch, int muxmode); static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] = { {1, 1} }; /* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */ static int ycmux_mode; static inline struct vpif_cap_buffer *to_vpif_buffer(struct vb2_buffer *vb) { return container_of(vb, struct vpif_cap_buffer, vb); } /** * vpif_buffer_prepare : callback function for buffer prepare * @vb: ptr to vb2_buffer * * This is the callback function for buffer prepare when vb2_qbuf() * function is called. The buffer is prepared and user space virtual address * or user address is converted into physical address */ static int vpif_buffer_prepare(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; struct channel_obj *ch = vb2_get_drv_priv(q); struct common_obj *common; unsigned long addr; vpif_dbg(2, debug, "vpif_buffer_prepare\n"); common = &ch->common[VPIF_VIDEO_INDEX]; vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage); if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) return -EINVAL; vb->v4l2_buf.field = common->fmt.fmt.pix.field; addr = vb2_dma_contig_plane_dma_addr(vb, 0); if (!IS_ALIGNED((addr + common->ytop_off), 8) || !IS_ALIGNED((addr + common->ybtm_off), 8) || !IS_ALIGNED((addr + common->ctop_off), 8) || !IS_ALIGNED((addr + common->cbtm_off), 8)) { vpif_dbg(1, debug, "offset is not aligned\n"); return -EINVAL; } return 0; } /** * vpif_buffer_queue_setup : Callback function for buffer setup. * @vq: vb2_queue ptr * @fmt: v4l2 format * @nbuffers: ptr to number of buffers requested by application * @nplanes:: contains number of distinct video planes needed to hold a frame * @sizes[]: contains the size (in bytes) of each plane. * @alloc_ctxs: ptr to allocation context * * This callback function is called when reqbuf() is called to adjust * the buffer count and buffer size */ static int vpif_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common; common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_buffer_setup\n"); if (fmt && fmt->fmt.pix.sizeimage < common->fmt.fmt.pix.sizeimage) return -EINVAL; if (vq->num_buffers + *nbuffers < 3) *nbuffers = 3 - vq->num_buffers; *nplanes = 1; sizes[0] = fmt ? fmt->fmt.pix.sizeimage : common->fmt.fmt.pix.sizeimage; alloc_ctxs[0] = common->alloc_ctx; /* Calculate the offset for Y and C data in the buffer */ vpif_calculate_offsets(ch); return 0; } /** * vpif_buffer_queue : Callback function to add buffer to DMA queue * @vb: ptr to vb2_buffer */ static void vpif_buffer_queue(struct vb2_buffer *vb) { struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); struct vpif_cap_buffer *buf = to_vpif_buffer(vb); struct common_obj *common; unsigned long flags; common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_buffer_queue\n"); spin_lock_irqsave(&common->irqlock, flags); /* add the buffer to the DMA queue */ list_add_tail(&buf->list, &common->dma_queue); spin_unlock_irqrestore(&common->irqlock, flags); } /** * vpif_start_streaming : Starts the DMA engine for streaming * @vb: ptr to vb2_buffer * @count: number of buffers */ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vpif_capture_config *vpif_config_data = vpif_dev->platform_data; struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpif = &ch->vpifparams; struct vpif_cap_buffer *buf, *tmp; unsigned long addr, flags; int ret; spin_lock_irqsave(&common->irqlock, flags); /* Initialize field_id */ ch->field_id = 0; /* configure 1 or 2 channel mode */ if (vpif_config_data->setup_input_channel_mode) { ret = vpif_config_data-> setup_input_channel_mode(vpif->std_info.ycmux_mode); if (ret < 0) { vpif_dbg(1, debug, "can't set vpif channel mode\n"); goto err; } } ret = v4l2_subdev_call(ch->sd, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) { vpif_dbg(1, debug, "stream on failed in subdev\n"); goto err; } /* Call vpif_set_params function to set the parameters and addresses */ ret = vpif_set_video_params(vpif, ch->channel_id); if (ret < 0) { vpif_dbg(1, debug, "can't set video params\n"); goto err; } ycmux_mode = ret; vpif_config_addr(ch, ret); /* Get the next frame from the buffer queue */ common->cur_frm = common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); /* Remove buffer from the buffer queue */ list_del(&common->cur_frm->list); spin_unlock_irqrestore(&common->irqlock, flags); addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0); common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); /** * Set interrupt for both the fields in VPIF Register enable channel in * VPIF register */ channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { channel0_intr_assert(); channel0_intr_enable(1); enable_channel0(1); } if (VPIF_CHANNEL1_VIDEO == ch->channel_id || ycmux_mode == 2) { channel1_intr_assert(); channel1_intr_enable(1); enable_channel1(1); } return 0; err: list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); } spin_unlock_irqrestore(&common->irqlock, flags); return ret; } /** * vpif_stop_streaming : Stop the DMA engine * @vq: ptr to vb2_queue * * This callback stops the DMA engine and any remaining buffers * in the DMA queue are released. */ static void vpif_stop_streaming(struct vb2_queue *vq) { struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common; unsigned long flags; int ret; common = &ch->common[VPIF_VIDEO_INDEX]; /* Disable channel as per its device type and channel id */ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { enable_channel0(0); channel0_intr_enable(0); } if (VPIF_CHANNEL1_VIDEO == ch->channel_id || ycmux_mode == 2) { enable_channel1(0); channel1_intr_enable(0); } ycmux_mode = 0; ret = v4l2_subdev_call(ch->sd, video, s_stream, 0); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) vpif_dbg(1, debug, "stream off failed in subdev\n"); /* release all active buffers */ spin_lock_irqsave(&common->irqlock, flags); if (common->cur_frm == common->next_frm) { vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); } else { if (common->cur_frm != NULL) vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); if (common->next_frm != NULL) vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR); } while (!list_empty(&common->dma_queue)) { common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); list_del(&common->next_frm->list); vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR); } spin_unlock_irqrestore(&common->irqlock, flags); } static struct vb2_ops video_qops = { .queue_setup = vpif_buffer_queue_setup, .buf_prepare = vpif_buffer_prepare, .start_streaming = vpif_start_streaming, .stop_streaming = vpif_stop_streaming, .buf_queue = vpif_buffer_queue, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; /** * vpif_process_buffer_complete: process a completed buffer * @common: ptr to common channel object * * This function time stamp the buffer and mark it as DONE. It also * wake up any process waiting on the QUEUE and set the next buffer * as current */ static void vpif_process_buffer_complete(struct common_obj *common) { v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp); vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_DONE); /* Make curFrm pointing to nextFrm */ common->cur_frm = common->next_frm; } /** * vpif_schedule_next_buffer: set next buffer address for capture * @common : ptr to common channel object * * This function will get next buffer from the dma queue and * set the buffer address in the vpif register for capture. * the buffer is marked active */ static void vpif_schedule_next_buffer(struct common_obj *common) { unsigned long addr = 0; spin_lock(&common->irqlock); common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); /* Remove that buffer from the buffer queue */ list_del(&common->next_frm->list); spin_unlock(&common->irqlock); addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0); /* Set top and bottom field addresses in VPIF registers */ common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); } /** * vpif_channel_isr : ISR handler for vpif capture * @irq: irq number * @dev_id: dev_id ptr * * It changes status of the captured buffer, takes next buffer from the queue * and sets its address in VPIF registers */ static irqreturn_t vpif_channel_isr(int irq, void *dev_id) { struct vpif_device *dev = &vpif_obj; struct common_obj *common; struct channel_obj *ch; int channel_id = 0; int fid = -1, i; channel_id = *(int *)(dev_id); if (!vpif_intr_status(channel_id)) return IRQ_NONE; ch = dev->dev[channel_id]; for (i = 0; i < VPIF_NUMBER_OF_OBJECTS; i++) { common = &ch->common[i]; /* skip If streaming is not started in this channel */ /* Check the field format */ if (1 == ch->vpifparams.std_info.frm_fmt) { /* Progressive mode */ spin_lock(&common->irqlock); if (list_empty(&common->dma_queue)) { spin_unlock(&common->irqlock); continue; } spin_unlock(&common->irqlock); if (!channel_first_int[i][channel_id]) vpif_process_buffer_complete(common); channel_first_int[i][channel_id] = 0; vpif_schedule_next_buffer(common); channel_first_int[i][channel_id] = 0; } else { /** * Interlaced mode. If it is first interrupt, ignore * it */ if (channel_first_int[i][channel_id]) { channel_first_int[i][channel_id] = 0; continue; } if (0 == i) { ch->field_id ^= 1; /* Get field id from VPIF registers */ fid = vpif_channel_getfid(ch->channel_id); if (fid != ch->field_id) { /** * If field id does not match stored * field id, make them in sync */ if (0 == fid) ch->field_id = fid; return IRQ_HANDLED; } } /* device field id and local field id are in sync */ if (0 == fid) { /* this is even field */ if (common->cur_frm == common->next_frm) continue; /* mark the current buffer as done */ vpif_process_buffer_complete(common); } else if (1 == fid) { /* odd field */ spin_lock(&common->irqlock); if (list_empty(&common->dma_queue) || (common->cur_frm != common->next_frm)) { spin_unlock(&common->irqlock); continue; } spin_unlock(&common->irqlock); vpif_schedule_next_buffer(common); } } } return IRQ_HANDLED; } /** * vpif_update_std_info() - update standard related info * @ch: ptr to channel object * * For a given standard selected by application, update values * in the device data structures */ static int vpif_update_std_info(struct channel_obj *ch) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpifparams = &ch->vpifparams; const struct vpif_channel_config_params *config; struct vpif_channel_config_params *std_info = &vpifparams->std_info; struct video_obj *vid_ch = &ch->video; int index; vpif_dbg(2, debug, "vpif_update_std_info\n"); for (index = 0; index < vpif_ch_params_count; index++) { config = &vpif_ch_params[index]; if (config->hd_sd == 0) { vpif_dbg(2, debug, "SD format\n"); if (config->stdid & vid_ch->stdid) { memcpy(std_info, config, sizeof(*config)); break; } } else { vpif_dbg(2, debug, "HD format\n"); if (!memcmp(&config->dv_timings, &vid_ch->dv_timings, sizeof(vid_ch->dv_timings))) { memcpy(std_info, config, sizeof(*config)); break; } } } /* standard not found */ if (index == vpif_ch_params_count) return -EINVAL; common->fmt.fmt.pix.width = std_info->width; common->width = std_info->width; common->fmt.fmt.pix.height = std_info->height; common->height = std_info->height; common->fmt.fmt.pix.sizeimage = common->height * common->width * 2; common->fmt.fmt.pix.bytesperline = std_info->width; vpifparams->video_params.hpitch = std_info->width; vpifparams->video_params.storage_mode = std_info->frm_fmt; if (vid_ch->stdid) common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; else common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_REC709; if (ch->vpifparams.std_info.frm_fmt) common->fmt.fmt.pix.field = V4L2_FIELD_NONE; else common->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED; if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8; else common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P; common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; return 0; } /** * vpif_calculate_offsets : This function calculates buffers offsets * @ch : ptr to channel object * * This function calculates buffer offsets for Y and C in the top and * bottom field */ static void vpif_calculate_offsets(struct channel_obj *ch) { unsigned int hpitch, sizeimage; struct video_obj *vid_ch = &(ch->video); struct vpif_params *vpifparams = &ch->vpifparams; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; enum v4l2_field field = common->fmt.fmt.pix.field; vpif_dbg(2, debug, "vpif_calculate_offsets\n"); if (V4L2_FIELD_ANY == field) { if (vpifparams->std_info.frm_fmt) vid_ch->buf_field = V4L2_FIELD_NONE; else vid_ch->buf_field = V4L2_FIELD_INTERLACED; } else vid_ch->buf_field = common->fmt.fmt.pix.field; sizeimage = common->fmt.fmt.pix.sizeimage; hpitch = common->fmt.fmt.pix.bytesperline; if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = hpitch; common->ctop_off = sizeimage / 2; common->cbtm_off = sizeimage / 2 + hpitch; } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = sizeimage / 4; common->ctop_off = sizeimage / 2; common->cbtm_off = common->ctop_off + sizeimage / 4; } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ybtm_off = 0; common->ytop_off = sizeimage / 4; common->cbtm_off = sizeimage / 2; common->ctop_off = common->cbtm_off + sizeimage / 4; } if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) vpifparams->video_params.storage_mode = 1; else vpifparams->video_params.storage_mode = 0; if (1 == vpifparams->std_info.frm_fmt) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; else { if ((field == V4L2_FIELD_ANY) || (field == V4L2_FIELD_INTERLACED)) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline * 2; else vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; } ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid; } /** * vpif_get_default_field() - Get default field type based on interface * @vpif_params - ptr to vpif params */ static inline enum v4l2_field vpif_get_default_field( struct vpif_interface *iface) { return (iface->if_type == VPIF_IF_RAW_BAYER) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED; } /** * vpif_config_addr() - function to configure buffer address in vpif * @ch - channel ptr * @muxmode - channel mux mode */ static void vpif_config_addr(struct channel_obj *ch, int muxmode) { struct common_obj *common; vpif_dbg(2, debug, "vpif_config_addr\n"); common = &(ch->common[VPIF_VIDEO_INDEX]); if (VPIF_CHANNEL1_VIDEO == ch->channel_id) common->set_addr = ch1_set_videobuf_addr; else if (2 == muxmode) common->set_addr = ch0_set_videobuf_addr_yc_nmux; else common->set_addr = ch0_set_videobuf_addr; } /** * vpif_input_to_subdev() - Maps input to sub device * @vpif_cfg - global config ptr * @chan_cfg - channel config ptr * @input_index - Given input index from application * * lookup the sub device information for a given input index. * we report all the inputs to application. inputs table also * has sub device name for the each input */ static int vpif_input_to_subdev( struct vpif_capture_config *vpif_cfg, struct vpif_capture_chan_config *chan_cfg, int input_index) { struct vpif_subdev_info *subdev_info; const char *subdev_name; int i; vpif_dbg(2, debug, "vpif_input_to_subdev\n"); subdev_name = chan_cfg->inputs[input_index].subdev_name; if (subdev_name == NULL) return -1; /* loop through the sub device list to get the sub device info */ for (i = 0; i < vpif_cfg->subdev_count; i++) { subdev_info = &vpif_cfg->subdev_info[i]; if (!strcmp(subdev_info->name, subdev_name)) return i; } return -1; } /** * vpif_set_input() - Select an input * @vpif_cfg - global config ptr * @ch - channel * @_index - Given input index from application * * Select the given input. */ static int vpif_set_input( struct vpif_capture_config *vpif_cfg, struct channel_obj *ch, int index) { struct vpif_capture_chan_config *chan_cfg = &vpif_cfg->chan_config[ch->channel_id]; struct vpif_subdev_info *subdev_info = NULL; struct v4l2_subdev *sd = NULL; u32 input = 0, output = 0; int sd_index; int ret; sd_index = vpif_input_to_subdev(vpif_cfg, chan_cfg, index); if (sd_index >= 0) { sd = vpif_obj.sd[sd_index]; subdev_info = &vpif_cfg->subdev_info[sd_index]; } /* first setup input path from sub device to vpif */ if (sd && vpif_cfg->setup_input_path) { ret = vpif_cfg->setup_input_path(ch->channel_id, subdev_info->name); if (ret < 0) { vpif_dbg(1, debug, "couldn't setup input path for the" \ " sub device %s, for input index %d\n", subdev_info->name, index); return ret; } } if (sd) { input = chan_cfg->inputs[index].input_route; output = chan_cfg->inputs[index].output_route; ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0); if (ret < 0 && ret != -ENOIOCTLCMD) { vpif_dbg(1, debug, "Failed to set input\n"); return ret; } } ch->input_idx = index; ch->sd = sd; /* copy interface parameters to vpif */ ch->vpifparams.iface = chan_cfg->vpif_if; /* update tvnorms from the sub device input info */ ch->video_dev.tvnorms = chan_cfg->inputs[index].input.std; return 0; } /** * vpif_querystd() - querystd handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id * * This function is called to detect standard at the selected input */ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); int ret = 0; vpif_dbg(2, debug, "vpif_querystd\n"); /* Call querystd function of decoder device */ ret = v4l2_subdev_call(ch->sd, video, querystd, std_id); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -ENODATA; if (ret) { vpif_dbg(1, debug, "Failed to query standard for sub devices\n"); return ret; } return 0; } /** * vpif_g_std() - get STD handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id */ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; vpif_dbg(2, debug, "vpif_g_std\n"); if (config->chan_config[ch->channel_id].inputs == NULL) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_STD) return -ENODATA; *std = ch->video.stdid; return 0; } /** * vpif_s_std() - set STD handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id */ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; int ret; vpif_dbg(2, debug, "vpif_s_std\n"); if (config->chan_config[ch->channel_id].inputs == NULL) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_STD) return -ENODATA; if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; /* Call encoder subdevice function to set the standard */ ch->video.stdid = std_id; memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); /* Get the information about the standard */ if (vpif_update_std_info(ch)) { vpif_err("Error getting the standard info\n"); return -EINVAL; } /* set standard in the sub device */ ret = v4l2_subdev_call(ch->sd, video, s_std, std_id); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) { vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); return ret; } return 0; } /** * vpif_enum_input() - ENUMINPUT handler * @file: file ptr * @priv: file handle * @input: ptr to input structure */ static int vpif_enum_input(struct file *file, void *priv, struct v4l2_input *input) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_capture_chan_config *chan_cfg; chan_cfg = &config->chan_config[ch->channel_id]; if (input->index >= chan_cfg->input_count) return -EINVAL; memcpy(input, &chan_cfg->inputs[input->index].input, sizeof(*input)); return 0; } /** * vpif_g_input() - Get INPUT handler * @file: file ptr * @priv: file handle * @index: ptr to input index */ static int vpif_g_input(struct file *file, void *priv, unsigned int *index) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); *index = ch->input_idx; return 0; } /** * vpif_s_input() - Set INPUT handler * @file: file ptr * @priv: file handle * @index: input index */ static int vpif_s_input(struct file *file, void *priv, unsigned int index) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_capture_chan_config *chan_cfg; chan_cfg = &config->chan_config[ch->channel_id]; if (index >= chan_cfg->input_count) return -EINVAL; if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; return vpif_set_input(config, ch, index); } /** * vpif_enum_fmt_vid_cap() - ENUM_FMT handler * @file: file ptr * @priv: file handle * @index: input index */ static int vpif_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); if (fmt->index != 0) { vpif_dbg(1, debug, "Invalid format index\n"); return -EINVAL; } /* Fill in the information about format */ if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) { fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; strcpy(fmt->description, "Raw Mode -Bayer Pattern GrRBGb"); fmt->pixelformat = V4L2_PIX_FMT_SBGGR8; } else { fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; strcpy(fmt->description, "YCbCr4:2:2 YC Planar"); fmt->pixelformat = V4L2_PIX_FMT_YUV422P; } return 0; } /** * vpif_try_fmt_vid_cap() - TRY_FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]); struct vpif_params *vpif_params = &ch->vpifparams; /* * to supress v4l-compliance warnings silently correct * the pixelformat */ if (vpif_params->iface.if_type == VPIF_IF_RAW_BAYER) { if (pixfmt->pixelformat != V4L2_PIX_FMT_SBGGR8) pixfmt->pixelformat = V4L2_PIX_FMT_SBGGR8; } else { if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P) pixfmt->pixelformat = V4L2_PIX_FMT_YUV422P; } common->fmt.fmt.pix.pixelformat = pixfmt->pixelformat; vpif_update_std_info(ch); pixfmt->field = common->fmt.fmt.pix.field; pixfmt->colorspace = common->fmt.fmt.pix.colorspace; pixfmt->bytesperline = common->fmt.fmt.pix.width; pixfmt->width = common->fmt.fmt.pix.width; pixfmt->height = common->fmt.fmt.pix.height; pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height * 2; pixfmt->priv = 0; return 0; } /** * vpif_g_fmt_vid_cap() - Set INPUT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; /* Check the validity of the buffer type */ if (common->fmt.type != fmt->type) return -EINVAL; /* Fill in the information about format */ *fmt = common->fmt; return 0; } /** * vpif_s_fmt_vid_cap() - Set FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret; vpif_dbg(2, debug, "%s\n", __func__); if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; ret = vpif_try_fmt_vid_cap(file, priv, fmt); if (ret) return ret; /* store the format in the channel object */ common->fmt = *fmt; return 0; } /** * vpif_querycap() - QUERYCAP handler * @file: file ptr * @priv: file handle * @cap: ptr to v4l2_capability structure */ static int vpif_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vpif_capture_config *config = vpif_dev->platform_data; cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; strlcpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", dev_name(vpif_dev)); strlcpy(cap->card, config->card_name, sizeof(cap->card)); return 0; } /** * vpif_enum_dv_timings() - ENUM_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: input timings */ static int vpif_enum_dv_timings(struct file *file, void *priv, struct v4l2_enum_dv_timings *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; int ret; if (config->chan_config[ch->channel_id].inputs == NULL) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) return -ENODATA; timings->pad = 0; ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -EINVAL; return ret; } /** * vpif_query_dv_timings() - QUERY_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: input timings */ static int vpif_query_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; int ret; if (config->chan_config[ch->channel_id].inputs == NULL) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) return -ENODATA; ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -ENODATA; return ret; } /** * vpif_s_dv_timings() - S_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_params *vpifparams = &ch->vpifparams; struct vpif_channel_config_params *std_info = &vpifparams->std_info; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct video_obj *vid_ch = &ch->video; struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt; struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; int ret; if (config->chan_config[ch->channel_id].inputs == NULL) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) return -ENODATA; if (timings->type != V4L2_DV_BT_656_1120) { vpif_dbg(2, debug, "Timing type not defined\n"); return -EINVAL; } if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; /* Configure subdevice timings, if any */ ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) ret = 0; if (ret < 0) { vpif_dbg(2, debug, "Error setting custom DV timings\n"); return ret; } if (!(timings->bt.width && timings->bt.height && (timings->bt.hbackporch || timings->bt.hfrontporch || timings->bt.hsync) && timings->bt.vfrontporch && (timings->bt.vbackporch || timings->bt.vsync))) { vpif_dbg(2, debug, "Timings for width, height, " "horizontal back porch, horizontal sync, " "horizontal front porch, vertical back porch, " "vertical sync and vertical back porch " "must be defined\n"); return -EINVAL; } vid_ch->dv_timings = *timings; /* Configure video port timings */ std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8; std_info->sav2eav = bt->width; std_info->l1 = 1; std_info->l3 = bt->vsync + bt->vbackporch + 1; std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt); if (bt->interlaced) { if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) { std_info->l5 = std_info->vsize/2 - (bt->vfrontporch - 1); std_info->l7 = std_info->vsize/2 + 1; std_info->l9 = std_info->l7 + bt->il_vsync + bt->il_vbackporch + 1; std_info->l11 = std_info->vsize - (bt->il_vfrontporch - 1); } else { vpif_dbg(2, debug, "Required timing values for " "interlaced BT format missing\n"); return -EINVAL; } } else { std_info->l5 = std_info->vsize - (bt->vfrontporch - 1); } strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME); std_info->width = bt->width; std_info->height = bt->height; std_info->frm_fmt = bt->interlaced ? 0 : 1; std_info->ycmux_mode = 0; std_info->capture_format = 0; std_info->vbi_supported = 0; std_info->hd_sd = 1; std_info->stdid = 0; vid_ch->stdid = 0; return 0; } /** * vpif_g_dv_timings() - G_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_g_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct video_obj *vid_ch = &ch->video; struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; if (config->chan_config[ch->channel_id].inputs == NULL) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) return -ENODATA; *timings = vid_ch->dv_timings; return 0; } /* * vpif_log_status() - Status information * @file: file ptr * @priv: file handle * * Returns zero. */ static int vpif_log_status(struct file *filep, void *priv) { /* status for sub devices */ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status); return 0; } /* vpif capture ioctl operations */ static const struct v4l2_ioctl_ops vpif_ioctl_ops = { .vidioc_querycap = vpif_querycap, .vidioc_enum_fmt_vid_cap = vpif_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vpif_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vpif_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vpif_try_fmt_vid_cap, .vidioc_enum_input = vpif_enum_input, .vidioc_s_input = vpif_s_input, .vidioc_g_input = vpif_g_input, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_querystd = vpif_querystd, .vidioc_s_std = vpif_s_std, .vidioc_g_std = vpif_g_std, .vidioc_enum_dv_timings = vpif_enum_dv_timings, .vidioc_query_dv_timings = vpif_query_dv_timings, .vidioc_s_dv_timings = vpif_s_dv_timings, .vidioc_g_dv_timings = vpif_g_dv_timings, .vidioc_log_status = vpif_log_status, }; /* vpif file operations */ static struct v4l2_file_operations vpif_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll }; /** * initialize_vpif() - Initialize vpif data structures * * Allocate memory for data structures and initialize them */ static int initialize_vpif(void) { int err, i, j; int free_channel_objects_index; /* Allocate memory for six channel objects */ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { vpif_obj.dev[i] = kzalloc(sizeof(*vpif_obj.dev[i]), GFP_KERNEL); /* If memory allocation fails, return error */ if (!vpif_obj.dev[i]) { free_channel_objects_index = i; err = -ENOMEM; goto vpif_init_free_channel_objects; } } return 0; vpif_init_free_channel_objects: for (j = 0; j < free_channel_objects_index; j++) kfree(vpif_obj.dev[j]); return err; } static int vpif_async_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd) { int i; for (i = 0; i < vpif_obj.config->subdev_count; i++) if (!strcmp(vpif_obj.config->subdev_info[i].name, subdev->name)) { vpif_obj.sd[i] = subdev; return 0; } return -EINVAL; } static int vpif_probe_complete(void) { struct common_obj *common; struct video_device *vdev; struct channel_obj *ch; struct vb2_queue *q; int j, err, k; for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) { ch = vpif_obj.dev[j]; ch->channel_id = j; common = &(ch->common[VPIF_VIDEO_INDEX]); spin_lock_init(&common->irqlock); mutex_init(&common->lock); /* select input 0 */ err = vpif_set_input(vpif_obj.config, ch, 0); if (err) goto probe_out; /* set initial format */ ch->video.stdid = V4L2_STD_525_60; memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); vpif_update_std_info(ch); /* Initialize vb2 queue */ q = &common->buffer_queue; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q->drv_priv = ch; q->ops = &video_qops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct vpif_cap_buffer); q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_buffers_needed = 1; q->lock = &common->lock; err = vb2_queue_init(q); if (err) { vpif_err("vpif_capture: vb2_queue_init() failed\n"); goto probe_out; } common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev); if (IS_ERR(common->alloc_ctx)) { vpif_err("Failed to get the context\n"); err = PTR_ERR(common->alloc_ctx); goto probe_out; } INIT_LIST_HEAD(&common->dma_queue); /* Initialize the video_device structure */ vdev = &ch->video_dev; strlcpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name)); vdev->release = video_device_release_empty; vdev->fops = &vpif_fops; vdev->ioctl_ops = &vpif_ioctl_ops; vdev->v4l2_dev = &vpif_obj.v4l2_dev; vdev->vfl_dir = VFL_DIR_RX; vdev->queue = q; vdev->lock = &common->lock; video_set_drvdata(&ch->video_dev, ch); err = video_register_device(vdev, VFL_TYPE_GRABBER, (j ? 1 : 0)); if (err) goto probe_out; } v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n"); return 0; probe_out: for (k = 0; k < j; k++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[k]; common = &ch->common[k]; vb2_dma_contig_cleanup_ctx(common->alloc_ctx); /* Unregister video device */ video_unregister_device(&ch->video_dev); } kfree(vpif_obj.sd); v4l2_device_unregister(&vpif_obj.v4l2_dev); return err; } static int vpif_async_complete(struct v4l2_async_notifier *notifier) { return vpif_probe_complete(); } /** * vpif_probe : This function probes the vpif capture driver * @pdev: platform device pointer * * This creates device entries by register itself to the V4L2 driver and * initializes fields of each channel objects */ static __init int vpif_probe(struct platform_device *pdev) { struct vpif_subdev_info *subdevdata; struct i2c_adapter *i2c_adap; struct resource *res; int subdev_count; int res_idx = 0; int i, err; vpif_dev = &pdev->dev; err = initialize_vpif(); if (err) { v4l2_err(vpif_dev->driver, "Error initializing vpif\n"); return err; } err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev); if (err) { v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n"); return err; } while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) { err = devm_request_irq(&pdev->dev, res->start, vpif_channel_isr, IRQF_SHARED, VPIF_DRIVER_NAME, (void *)(&vpif_obj.dev[res_idx]-> channel_id)); if (err) { err = -EINVAL; goto vpif_unregister; } res_idx++; } vpif_obj.config = pdev->dev.platform_data; subdev_count = vpif_obj.config->subdev_count; vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count, GFP_KERNEL); if (vpif_obj.sd == NULL) { vpif_err("unable to allocate memory for subdevice pointers\n"); err = -ENOMEM; goto vpif_unregister; } if (!vpif_obj.config->asd_sizes) { i2c_adap = i2c_get_adapter(1); for (i = 0; i < subdev_count; i++) { subdevdata = &vpif_obj.config->subdev_info[i]; vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, i2c_adap, &subdevdata-> board_info, NULL); if (!vpif_obj.sd[i]) { vpif_err("Error registering v4l2 subdevice\n"); err = -ENODEV; goto probe_subdev_out; } v4l2_info(&vpif_obj.v4l2_dev, "registered sub device %s\n", subdevdata->name); } vpif_probe_complete(); } else { vpif_obj.notifier.subdevs = vpif_obj.config->asd; vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0]; vpif_obj.notifier.bound = vpif_async_bound; vpif_obj.notifier.complete = vpif_async_complete; err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev, &vpif_obj.notifier); if (err) { vpif_err("Error registering async notifier\n"); err = -EINVAL; goto probe_subdev_out; } } return 0; probe_subdev_out: /* free sub devices memory */ kfree(vpif_obj.sd); vpif_unregister: v4l2_device_unregister(&vpif_obj.v4l2_dev); return err; } /** * vpif_remove() - driver remove handler * @device: ptr to platform device structure * * The vidoe device is unregistered */ static int vpif_remove(struct platform_device *device) { struct common_obj *common; struct channel_obj *ch; int i; v4l2_device_unregister(&vpif_obj.v4l2_dev); kfree(vpif_obj.sd); /* un-register device */ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; vb2_dma_contig_cleanup_ctx(common->alloc_ctx); /* Unregister video device */ video_unregister_device(&ch->video_dev); kfree(vpif_obj.dev[i]); } return 0; } #ifdef CONFIG_PM_SLEEP /** * vpif_suspend: vpif device suspend */ static int vpif_suspend(struct device *dev) { struct common_obj *common; struct channel_obj *ch; int i; for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; if (!vb2_start_streaming_called(&common->buffer_queue)) continue; mutex_lock(&common->lock); /* Disable channel */ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { enable_channel0(0); channel0_intr_enable(0); } if (ch->channel_id == VPIF_CHANNEL1_VIDEO || ycmux_mode == 2) { enable_channel1(0); channel1_intr_enable(0); } mutex_unlock(&common->lock); } return 0; } /* * vpif_resume: vpif device suspend */ static int vpif_resume(struct device *dev) { struct common_obj *common; struct channel_obj *ch; int i; for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; if (!vb2_start_streaming_called(&common->buffer_queue)) continue; mutex_lock(&common->lock); /* Enable channel */ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { enable_channel0(1); channel0_intr_enable(1); } if (ch->channel_id == VPIF_CHANNEL1_VIDEO || ycmux_mode == 2) { enable_channel1(1); channel1_intr_enable(1); } mutex_unlock(&common->lock); } return 0; } #endif static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume); static __refdata struct platform_driver vpif_driver = { .driver = { .name = VPIF_DRIVER_NAME, .pm = &vpif_pm_ops, }, .probe = vpif_probe, .remove = vpif_remove, }; module_platform_driver(vpif_driver);
gpl-2.0
championswimmer/android_kernel_sony_huashan
arch/arm/mach-msm/qdsp5v2/audio_qcelp_in.c
613
41176
/* * qcelp audio input device * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <asm/atomic.h> #include <asm/ioctls.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/msm_audio_qcp.h> #include <linux/android_pmem.h> #include <linux/memory_alloc.h> #include <mach/msm_adsp.h> #include <mach/iommu.h> #include <mach/iommu_domains.h> #include <mach/socinfo.h> #include <mach/qdsp5v2/qdsp5audreccmdi.h> #include <mach/qdsp5v2/qdsp5audrecmsg.h> #include <mach/qdsp5v2/audpreproc.h> #include <mach/qdsp5v2/audio_dev_ctl.h> #include <mach/debug_mm.h> #include <mach/msm_memtypes.h> #define META_OUT_SIZE 24 /* FRAME_NUM must be a power of two */ #define FRAME_NUM 8 #define QCELP_FRAME_SIZE 36 /* 36 bytes data */ #define FRAME_SIZE (22 * 2) /* 36 bytes data */ /* 36 bytes data + 24 meta field*/ #define NT_FRAME_SIZE (QCELP_FRAME_SIZE + META_OUT_SIZE) #define DMASZ (NT_FRAME_SIZE * FRAME_NUM) #define OUT_FRAME_NUM (2) #define OUT_BUFFER_SIZE (4 * 1024 + META_OUT_SIZE) #define BUFFER_SIZE (OUT_BUFFER_SIZE * OUT_FRAME_NUM) #define AUDPREPROC_QCELP_EOS_FLG_OFFSET 0x0A #define AUDPREPROC_QCELP_EOS_FLG_MASK 0x01 #define AUDPREPROC_QCELP_EOS_NONE 0x0 /* No EOS detected */ #define AUDPREPROC_QCELP_EOS_SET 0x1 /* EOS set in meta field */ struct buffer { void *data; uint32_t size; uint32_t read; uint32_t addr; uint32_t used; uint32_t mfield_sz; }; struct audio_in { struct buffer in[FRAME_NUM]; spinlock_t dsp_lock; atomic_t in_bytes; atomic_t in_samples; struct mutex lock; struct mutex read_lock; wait_queue_head_t wait; wait_queue_head_t wait_enable; /*write section*/ struct buffer out[OUT_FRAME_NUM]; uint8_t out_head; uint8_t out_tail; uint8_t out_needed; /* number of buffers the dsp is waiting for */ uint32_t out_count; struct mutex write_lock; wait_queue_head_t write_wait; int32_t out_phys; /* physical address of write buffer */ char *out_data; int mfield; /* meta field embedded in data */ int wflush; /*write flush */ int rflush; /*read flush*/ int out_frame_cnt; struct msm_adsp_module *audrec; struct audrec_session_info session_info; /*audrec session info*/ /* configuration to use on next enable */ uint32_t buffer_size; /* Frame size (36 bytes) */ uint32_t samp_rate; uint32_t channel_mode; uint32_t enc_type; struct msm_audio_qcelp_enc_config cfg; uint32_t rec_mode; uint32_t dsp_cnt; uint32_t in_head; /* next buffer dsp will write */ uint32_t in_tail; /* next buffer read() will read */ uint32_t in_count; /* number of buffers available to read() */ uint32_t mode; uint32_t eos_ack; uint32_t flush_ack; const char *module_name; unsigned queue_ids; uint16_t enc_id; uint16_t source; /* Encoding source bit mask */ uint32_t device_events; uint32_t in_call; uint32_t dev_cnt; int voice_state; spinlock_t dev_lock; /* data allocated for various buffers */ char *data; dma_addr_t phys; void *map_v_read; void *map_v_write; int opened; int enabled; int running; int stopped; /* set when stopped, cleared on flush */ char *build_id; }; struct audio_frame { uint16_t frame_count_lsw; uint16_t frame_count_msw; uint16_t frame_length; uint16_t erased_pcm; unsigned char raw_bitstream[]; /* samples */ } __attribute__((packed)); struct audio_frame_nt { uint16_t metadata_len; uint16_t frame_count_lsw; uint16_t frame_count_msw; uint16_t frame_length; uint16_t erased_pcm; uint16_t reserved; uint16_t time_stamp_dword_lsw; uint16_t time_stamp_dword_msw; uint16_t time_stamp_lsw; uint16_t time_stamp_msw; uint16_t nflag_lsw; uint16_t nflag_msw; unsigned char raw_bitstream[]; /* samples */ } __attribute__((packed)); struct qcelp_encoded_meta_out { uint16_t metadata_len; uint16_t time_stamp_dword_lsw; uint16_t time_stamp_dword_msw; uint16_t time_stamp_lsw; uint16_t time_stamp_msw; uint16_t nflag_lsw; uint16_t nflag_msw; }; /* Audrec Queue command sent macro's */ #define audrec_send_bitstreamqueue(audio, cmd, len) \ msm_adsp_write(audio->audrec, ((audio->queue_ids & 0xFFFF0000) >> 16),\ cmd, len) #define audrec_send_audrecqueue(audio, cmd, len) \ msm_adsp_write(audio->audrec, (audio->queue_ids & 0x0000FFFF),\ cmd, len) /* DSP command send functions */ static int audqcelp_in_enc_config(struct audio_in *audio, int enable); static int audqcelp_in_param_config(struct audio_in *audio); static int audqcelp_in_mem_config(struct audio_in *audio); static int audqcelp_in_record_config(struct audio_in *audio, int enable); static int audqcelp_dsp_read_buffer(struct audio_in *audio, uint32_t read_cnt); static void audqcelp_in_get_dsp_frames(struct audio_in *audio); static int audpcm_config(struct audio_in *audio); static void audqcelp_out_flush(struct audio_in *audio); static int audpreproc_cmd_cfg_routing_mode(struct audio_in *audio); static void audpreproc_pcm_send_data(struct audio_in *audio, unsigned needed); static void audqcelp_nt_in_get_dsp_frames(struct audio_in *audio); static void audqcelp_in_flush(struct audio_in *audio); static void qcelp_in_listener(u32 evt_id, union auddev_evt_data *evt_payload, void *private_data) { struct audio_in *audio = (struct audio_in *) private_data; unsigned long flags; MM_DBG("evt_id = 0x%8x\n", evt_id); switch (evt_id) { case AUDDEV_EVT_DEV_RDY: { MM_DBG("AUDDEV_EVT_DEV_RDY\n"); spin_lock_irqsave(&audio->dev_lock, flags); audio->dev_cnt++; if (!audio->in_call) audio->source |= (0x1 << evt_payload->routing_id); spin_unlock_irqrestore(&audio->dev_lock, flags); if ((audio->running == 1) && (audio->enabled == 1) && (audio->mode == MSM_AUD_ENC_MODE_TUNNEL)) audqcelp_in_record_config(audio, 1); } break; case AUDDEV_EVT_DEV_RLS: { MM_DBG("AUDDEV_EVT_DEV_RLS\n"); spin_lock_irqsave(&audio->dev_lock, flags); audio->dev_cnt--; if (!audio->in_call) audio->source &= ~(0x1 << evt_payload->routing_id); spin_unlock_irqrestore(&audio->dev_lock, flags); if ((!audio->running) || (!audio->enabled)) break; if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { /* Turn of as per source */ if (audio->source) audqcelp_in_record_config(audio, 1); else /* Turn off all */ audqcelp_in_record_config(audio, 0); } } break; case AUDDEV_EVT_VOICE_STATE_CHG: { MM_DBG("AUDDEV_EVT_VOICE_STATE_CHG, state = %d\n", evt_payload->voice_state); audio->voice_state = evt_payload->voice_state; if (audio->in_call && audio->running && (audio->mode == MSM_AUD_ENC_MODE_TUNNEL)) { if (audio->voice_state == VOICE_STATE_INCALL) audqcelp_in_record_config(audio, 1); else if (audio->voice_state == VOICE_STATE_OFFCALL) { audqcelp_in_record_config(audio, 0); wake_up(&audio->wait); } } break; } default: MM_ERR("wrong event %d\n", evt_id); break; } } /* ------------------- dsp preproc event handler--------------------- */ static void audpreproc_dsp_event(void *data, unsigned id, void *msg) { struct audio_in *audio = data; switch (id) { case AUDPREPROC_ERROR_MSG: { struct audpreproc_err_msg *err_msg = msg; MM_ERR("ERROR_MSG: stream id %d err idx %d\n", err_msg->stream_id, err_msg->aud_preproc_err_idx); /* Error case */ wake_up(&audio->wait_enable); break; } case AUDPREPROC_CMD_CFG_DONE_MSG: { MM_DBG("CMD_CFG_DONE_MSG \n"); break; } case AUDPREPROC_CMD_ENC_CFG_DONE_MSG: { struct audpreproc_cmd_enc_cfg_done_msg *enc_cfg_msg = msg; MM_DBG("CMD_ENC_CFG_DONE_MSG: stream id %d enc type \ 0x%8x\n", enc_cfg_msg->stream_id, enc_cfg_msg->rec_enc_type); /* Encoder enable success */ if (enc_cfg_msg->rec_enc_type & ENCODE_ENABLE) { if(audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) { MM_DBG("routing command\n"); audpreproc_cmd_cfg_routing_mode(audio); } else { audqcelp_in_param_config(audio); } } else { /* Encoder disable success */ audio->running = 0; if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) audqcelp_in_record_config(audio, 0); else wake_up(&audio->wait_enable); } break; } case AUDPREPROC_CMD_ENC_PARAM_CFG_DONE_MSG: { MM_DBG("CMD_ENC_PARAM_CFG_DONE_MSG\n"); if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) audqcelp_in_mem_config(audio); else audpcm_config(audio); break; } case AUDPREPROC_CMD_ROUTING_MODE_DONE_MSG: { struct audpreproc_cmd_routing_mode_done\ *routing_cfg_done_msg = msg; if (routing_cfg_done_msg->configuration == 0) { MM_INFO("routing configuration failed\n"); audio->running = 0; } else audqcelp_in_param_config(audio); break; } case AUDPREPROC_AFE_CMD_AUDIO_RECORD_CFG_DONE_MSG: { MM_DBG("AFE_CMD_AUDIO_RECORD_CFG_DONE_MSG \n"); wake_up(&audio->wait_enable); break; } default: MM_ERR("Unknown Event id %d\n", id); } } /* ------------------- dsp audrec event handler--------------------- */ static void audrec_dsp_event(void *data, unsigned id, size_t len, void (*getevent)(void *ptr, size_t len)) { struct audio_in *audio = data; switch (id) { case AUDREC_CMD_MEM_CFG_DONE_MSG: { MM_DBG("CMD_MEM_CFG_DONE MSG DONE\n"); audio->running = 1; if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { if ((!audio->in_call && (audio->dev_cnt > 0)) || (audio->in_call && (audio->voice_state \ == VOICE_STATE_INCALL))) audqcelp_in_record_config(audio, 1); } else { audpreproc_pcm_send_data(audio, 1); wake_up(&audio->wait_enable); } break; } case AUDREC_FATAL_ERR_MSG: { struct audrec_fatal_err_msg fatal_err_msg; getevent(&fatal_err_msg, AUDREC_FATAL_ERR_MSG_LEN); MM_ERR("FATAL_ERR_MSG: err id %d\n", fatal_err_msg.audrec_err_id); /* Error stop the encoder */ audio->stopped = 1; wake_up(&audio->wait); if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) wake_up(&audio->write_wait); break; } case AUDREC_UP_PACKET_READY_MSG: { struct audrec_up_pkt_ready_msg pkt_ready_msg; getevent(&pkt_ready_msg, AUDREC_UP_PACKET_READY_MSG_LEN); MM_DBG("UP_PACKET_READY_MSG: write cnt lsw %d \ write cnt msw %d read cnt lsw %d read cnt msw %d \n",\ pkt_ready_msg.audrec_packet_write_cnt_lsw, \ pkt_ready_msg.audrec_packet_write_cnt_msw, \ pkt_ready_msg.audrec_up_prev_read_cnt_lsw, \ pkt_ready_msg.audrec_up_prev_read_cnt_msw); audqcelp_in_get_dsp_frames(audio); break; } case AUDREC_CMD_PCM_BUFFER_PTR_UPDATE_ARM_TO_ENC_MSG: { MM_DBG("ptr_update recieved from DSP\n"); audpreproc_pcm_send_data(audio, 1); break; } case AUDREC_CMD_PCM_CFG_ARM_TO_ENC_DONE_MSG: { MM_ERR("AUDREC_CMD_PCM_CFG_ARM_TO_ENC_DONE_MSG"); audqcelp_in_mem_config(audio); break; } case AUDREC_UP_NT_PACKET_READY_MSG: { struct audrec_up_nt_packet_ready_msg pkt_ready_msg; getevent(&pkt_ready_msg, AUDREC_UP_NT_PACKET_READY_MSG_LEN); MM_DBG("UP_NT_PACKET_READY_MSG: write cnt lsw %d \ write cnt msw %d read cnt lsw %d read cnt msw %d \n",\ pkt_ready_msg.audrec_packetwrite_cnt_lsw, \ pkt_ready_msg.audrec_packetwrite_cnt_msw, \ pkt_ready_msg.audrec_upprev_readcount_lsw, \ pkt_ready_msg.audrec_upprev_readcount_msw); audqcelp_nt_in_get_dsp_frames(audio); break; } case AUDREC_CMD_EOS_ACK_MSG: { MM_DBG("eos ack recieved\n"); break; } case AUDREC_CMD_FLUSH_DONE_MSG: { audio->wflush = 0; audio->rflush = 0; audio->flush_ack = 1; wake_up(&audio->write_wait); MM_DBG("flush ack recieved\n"); break; } case ADSP_MESSAGE_ID: { MM_DBG("Received ADSP event:module audrectask\n"); break; } default: MM_ERR("Unknown Event id %d\n", id); } } static void audqcelp_in_get_dsp_frames(struct audio_in *audio) { struct audio_frame *frame; uint32_t index; unsigned long flags; MM_DBG("head = %d\n", audio->in_head); index = audio->in_head; frame = (void *) (((char *)audio->in[index].data) - \ sizeof(*frame)); spin_lock_irqsave(&audio->dsp_lock, flags); audio->in[index].size = frame->frame_length; /* statistics of read */ atomic_add(audio->in[index].size, &audio->in_bytes); atomic_add(1, &audio->in_samples); audio->in_head = (audio->in_head + 1) & (FRAME_NUM - 1); /* If overflow, move the tail index foward. */ if (audio->in_head == audio->in_tail) { MM_ERR("Error! not able to keep up the read\n"); audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); MM_ERR("in_count = %d\n", audio->in_count); } else audio->in_count++; audqcelp_dsp_read_buffer(audio, audio->dsp_cnt++); spin_unlock_irqrestore(&audio->dsp_lock, flags); wake_up(&audio->wait); } static void audqcelp_nt_in_get_dsp_frames(struct audio_in *audio) { struct audio_frame_nt *nt_frame; uint32_t index; unsigned long flags; MM_DBG("head = %d\n", audio->in_head); index = audio->in_head; nt_frame = (void *) (((char *)audio->in[index].data) - \ sizeof(struct audio_frame_nt)); spin_lock_irqsave(&audio->dsp_lock, flags); audio->in[index].size = nt_frame->frame_length; /* statistics of read */ atomic_add(audio->in[index].size, &audio->in_bytes); atomic_add(1, &audio->in_samples); audio->in_head = (audio->in_head + 1) & (FRAME_NUM - 1); /* If overflow, move the tail index foward. */ if (audio->in_head == audio->in_tail) MM_DBG("Error! not able to keep up the read\n"); else audio->in_count++; spin_unlock_irqrestore(&audio->dsp_lock, flags); wake_up(&audio->wait); } struct msm_adsp_ops audrec_qcelp_adsp_ops = { .event = audrec_dsp_event, }; static int audpreproc_pcm_buffer_ptr_refresh(struct audio_in *audio, unsigned idx, unsigned len) { struct audrec_cmd_pcm_buffer_ptr_refresh_arm_enc cmd; if (len == META_OUT_SIZE) len = len / 2; else len = (len + META_OUT_SIZE) / 2; MM_DBG("len = %d\n", len); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_PCM_BUFFER_PTR_REFRESH_ARM_TO_ENC; cmd.num_buffers = 1; if (cmd.num_buffers == 1) { cmd.buf_address_length[0] = (audio->out[idx].addr & 0xffff0000) >> 16; cmd.buf_address_length[1] = (audio->out[idx].addr & 0x0000ffff); cmd.buf_address_length[2] = (len & 0xffff0000) >> 16; cmd.buf_address_length[3] = (len & 0x0000ffff); } audio->out_frame_cnt++; return audrec_send_audrecqueue(audio, (void *)&cmd, (unsigned int)sizeof(cmd)); } static int audpcm_config(struct audio_in *audio) { struct audrec_cmd_pcm_cfg_arm_to_enc cmd; MM_DBG("\n"); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_PCM_CFG_ARM_TO_ENC; cmd.config_update_flag = AUDREC_PCM_CONFIG_UPDATE_FLAG_ENABLE; cmd.enable_flag = AUDREC_ENABLE_FLAG_VALUE; cmd.sampling_freq = audio->samp_rate; if (!audio->channel_mode) cmd.channels = 1; else cmd.channels = 2; cmd.frequency_of_intimation = 1; cmd.max_number_of_buffers = OUT_FRAME_NUM; return audrec_send_audrecqueue(audio, (void *)&cmd, (unsigned int)sizeof(cmd)); } static int audpreproc_cmd_cfg_routing_mode(struct audio_in *audio) { struct audpreproc_audrec_cmd_routing_mode cmd; MM_DBG("\n"); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDPREPROC_AUDREC_CMD_ROUTING_MODE; cmd.stream_id = audio->enc_id; if (audio->mode == MSM_ADSP_ENC_MODE_NON_TUNNEL) cmd.routing_mode = 1; return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd)); } static int audqcelp_in_enc_config(struct audio_in *audio, int enable) { struct audpreproc_audrec_cmd_enc_cfg cmd; memset(&cmd, 0, sizeof(cmd)); if (audio->build_id[17] == '1') { cmd.cmd_id = AUDPREPROC_AUDREC_CMD_ENC_CFG_2; MM_ERR("sending AUDPREPROC_AUDREC_CMD_ENC_CFG_2 command"); } else { cmd.cmd_id = AUDPREPROC_AUDREC_CMD_ENC_CFG; MM_ERR("sending AUDPREPROC_AUDREC_CMD_ENC_CFG command"); } cmd.stream_id = audio->enc_id; if (enable) cmd.audrec_enc_type = audio->enc_type | ENCODE_ENABLE; else cmd.audrec_enc_type &= ~(ENCODE_ENABLE); return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd)); } static int audqcelp_in_param_config(struct audio_in *audio) { struct audpreproc_audrec_cmd_parm_cfg_qcelp13k cmd; memset(&cmd, 0, sizeof(cmd)); cmd.common.cmd_id = AUDPREPROC_AUDREC_CMD_PARAM_CFG; cmd.common.stream_id = audio->enc_id; cmd.enc_min_rate = audio->cfg.min_bit_rate; cmd.enc_max_rate = audio->cfg.max_bit_rate; cmd.rate_modulation_cmd = 0; /* Default set to 0 */ cmd.reduced_rate_level = 0; /* Default set to 0 */ return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd)); } /* To Do: msm_snddev_route_enc(audio->enc_id); */ static int audqcelp_in_record_config(struct audio_in *audio, int enable) { struct audpreproc_afe_cmd_audio_record_cfg cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDPREPROC_AFE_CMD_AUDIO_RECORD_CFG; cmd.stream_id = audio->enc_id; if (enable) cmd.destination_activity = AUDIO_RECORDING_TURN_ON; else cmd.destination_activity = AUDIO_RECORDING_TURN_OFF; cmd.source_mix_mask = audio->source; if (audio->enc_id == 2) { if ((cmd.source_mix_mask & INTERNAL_CODEC_TX_SOURCE_MIX_MASK) || (cmd.source_mix_mask & AUX_CODEC_TX_SOURCE_MIX_MASK) || (cmd.source_mix_mask & VOICE_UL_SOURCE_MIX_MASK) || (cmd.source_mix_mask & VOICE_DL_SOURCE_MIX_MASK)) { cmd.pipe_id = SOURCE_PIPE_1; } if (cmd.source_mix_mask & AUDPP_A2DP_PIPE_SOURCE_MIX_MASK) cmd.pipe_id |= SOURCE_PIPE_0; } MM_DBG("stream_id %x destination_activity %x \ source_mix_mask %x pipe_id %x",\ cmd.stream_id, cmd.destination_activity, cmd.source_mix_mask, cmd.pipe_id); return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd)); } static int audqcelp_in_mem_config(struct audio_in *audio) { struct audrec_cmd_arecmem_cfg cmd; uint16_t *data = (void *) audio->data; int n; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_MEM_CFG_CMD; cmd.audrec_up_pkt_intm_count = 1; cmd.audrec_ext_pkt_start_addr_msw = audio->phys >> 16; cmd.audrec_ext_pkt_start_addr_lsw = audio->phys; cmd.audrec_ext_pkt_buf_number = FRAME_NUM; MM_DBG("audio->phys = %x\n", audio->phys); /* prepare buffer pointers: * T:36 bytes qcelp ppacket + 4 halfword header * NT:36 bytes qcelp packet + 12 halfword header */ for (n = 0; n < FRAME_NUM; n++) { if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { audio->in[n].data = data + 4; data += (FRAME_SIZE/2); MM_DBG("0x%8x\n", (int)(audio->in[n].data - 8)); } else { audio->in[n].data = data + 12; data += ((QCELP_FRAME_SIZE) / 2) + 12; MM_DBG("0x%8x\n", (int)(audio->in[n].data - 24)); } } return audrec_send_audrecqueue(audio, &cmd, sizeof(cmd)); } static int audqcelp_dsp_read_buffer(struct audio_in *audio, uint32_t read_cnt) { struct up_audrec_packet_ext_ptr cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = UP_AUDREC_PACKET_EXT_PTR; cmd.audrec_up_curr_read_count_msw = read_cnt >> 16; cmd.audrec_up_curr_read_count_lsw = read_cnt; return audrec_send_bitstreamqueue(audio, &cmd, sizeof(cmd)); } static int audqcelp_flush_command(struct audio_in *audio) { struct audrec_cmd_flush cmd; MM_DBG("\n"); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_FLUSH; return audrec_send_audrecqueue(audio, &cmd, sizeof(cmd)); } /* must be called with audio->lock held */ static int audqcelp_in_enable(struct audio_in *audio) { if (audio->enabled) return 0; if (audpreproc_enable(audio->enc_id, &audpreproc_dsp_event, audio)) { MM_ERR("msm_adsp_enable(audpreproc) failed\n"); return -ENODEV; } if (msm_adsp_enable(audio->audrec)) { MM_ERR("msm_adsp_enable(audrec) failed\n"); audpreproc_disable(audio->enc_id, audio); return -ENODEV; } audio->enabled = 1; audqcelp_in_enc_config(audio, 1); return 0; } /* must be called with audio->lock held */ static int audqcelp_in_disable(struct audio_in *audio) { if (audio->enabled) { audio->enabled = 0; audqcelp_in_enc_config(audio, 0); wake_up(&audio->wait); wait_event_interruptible_timeout(audio->wait_enable, audio->running == 0, 1*HZ); msm_adsp_disable(audio->audrec); audpreproc_disable(audio->enc_id, audio); } return 0; } static void audqcelp_ioport_reset(struct audio_in *audio) { /* Make sure read/write thread are free from * sleep and knowing that system is not able * to process io request at the moment */ wake_up(&audio->write_wait); mutex_lock(&audio->write_lock); audqcelp_in_flush(audio); mutex_unlock(&audio->write_lock); wake_up(&audio->wait); mutex_lock(&audio->read_lock); audqcelp_out_flush(audio); mutex_unlock(&audio->read_lock); } static void audqcelp_in_flush(struct audio_in *audio) { int i; audio->dsp_cnt = 0; audio->in_head = 0; audio->in_tail = 0; audio->in_count = 0; audio->eos_ack = 0; for (i = 0; i < FRAME_NUM; i++) { audio->in[i].size = 0; audio->in[i].read = 0; } MM_DBG("in_bytes %d\n", atomic_read(&audio->in_bytes)); MM_DBG("in_samples %d\n", atomic_read(&audio->in_samples)); atomic_set(&audio->in_bytes, 0); atomic_set(&audio->in_samples, 0); } static void audqcelp_out_flush(struct audio_in *audio) { int i; audio->out_head = 0; audio->out_tail = 0; audio->out_count = 0; for (i = 0; i < OUT_FRAME_NUM; i++) { audio->out[i].size = 0; audio->out[i].read = 0; audio->out[i].used = 0; } } /* ------------------- device --------------------- */ static long audqcelp_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio_in *audio = file->private_data; int rc = 0; MM_DBG("\n"); if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; stats.byte_count = atomic_read(&audio->in_bytes); stats.sample_count = atomic_read(&audio->in_samples); if (copy_to_user((void *) arg, &stats, sizeof(stats))) return -EFAULT; return rc; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: { uint32_t freq; freq = 48000; MM_DBG("AUDIO_START\n"); if (audio->in_call && (audio->voice_state != VOICE_STATE_INCALL)) { rc = -EPERM; break; } rc = msm_snddev_request_freq(&freq, audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("sample rate configured %d\n", freq); if (rc < 0) { MM_DBG(" Sample rate can not be set, return code %d\n", rc); msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("msm_snddev_withdraw_freq\n"); break; } /*update aurec session info in audpreproc layer*/ audio->session_info.session_id = audio->enc_id; audio->session_info.sampling_freq = audio->samp_rate; audpreproc_update_audrec_info(&audio->session_info); rc = audqcelp_in_enable(audio); if (!rc) { rc = wait_event_interruptible_timeout(audio->wait_enable, audio->running != 0, 1*HZ); MM_DBG("state %d rc = %d\n", audio->running, rc); if (audio->running == 0) rc = -ENODEV; else rc = 0; } audio->stopped = 0; break; } case AUDIO_STOP: { /*reset the sampling frequency information at audpreproc layer*/ audio->session_info.sampling_freq = 0; audpreproc_update_audrec_info(&audio->session_info); rc = audqcelp_in_disable(audio); rc = msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("msm_snddev_withdraw_freq\n"); audio->stopped = 1; break; } case AUDIO_FLUSH: { MM_DBG("AUDIO_FLUSH\n"); audio->rflush = 1; audio->wflush = 1; audqcelp_ioport_reset(audio); if (audio->running) { audqcelp_flush_command(audio); rc = wait_event_interruptible(audio->write_wait, !audio->wflush); if (rc < 0) { MM_ERR("AUDIO_FLUSH interrupted\n"); rc = -EINTR; } } else { audio->rflush = 0; audio->wflush = 0; } break; } case AUDIO_SET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } /* Allow only single frame */ if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { if (cfg.buffer_size != (FRAME_SIZE - 8)) { rc = -EINVAL; break; } } else { if (cfg.buffer_size != (QCELP_FRAME_SIZE + 14)) { rc = -EINVAL; break; } } audio->buffer_size = cfg.buffer_size; break; } case AUDIO_GET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = audio->buffer_size; cfg.buffer_count = FRAME_NUM; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; break; } case AUDIO_GET_QCELP_ENC_CONFIG: { if (copy_to_user((void *) arg, &audio->cfg, sizeof(audio->cfg))) rc = -EFAULT; break; } case AUDIO_SET_QCELP_ENC_CONFIG: { struct msm_audio_qcelp_enc_config cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } MM_DBG("0X%8x, 0x%8x, 0x%8x\n", cfg.min_bit_rate, \ cfg.max_bit_rate, cfg.cdma_rate); if (cfg.min_bit_rate > CDMA_RATE_FULL || \ cfg.min_bit_rate < CDMA_RATE_EIGHTH) { MM_ERR("invalid min bitrate\n"); rc = -EFAULT; break; } if (cfg.max_bit_rate > CDMA_RATE_FULL || \ cfg.max_bit_rate < CDMA_RATE_EIGHTH) { MM_ERR("invalid max bitrate\n"); rc = -EFAULT; break; } /* Recording Does not support Erase and Blank */ if (cfg.cdma_rate > CDMA_RATE_FULL || cfg.cdma_rate < CDMA_RATE_EIGHTH) { MM_ERR("invalid qcelp cdma rate\n"); rc = -EFAULT; break; } memcpy(&audio->cfg, &cfg, sizeof(cfg)); break; } case AUDIO_GET_CONFIG: { struct msm_audio_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = OUT_BUFFER_SIZE; cfg.buffer_count = OUT_FRAME_NUM; cfg.sample_rate = audio->samp_rate; cfg.channel_count = audio->channel_mode; if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) rc = -EFAULT; break; } case AUDIO_SET_INCALL: { struct msm_voicerec_mode cfg; unsigned long flags; if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } if (cfg.rec_mode != VOC_REC_BOTH && cfg.rec_mode != VOC_REC_UPLINK && cfg.rec_mode != VOC_REC_DOWNLINK) { MM_ERR("invalid rec_mode\n"); rc = -EINVAL; break; } else { spin_lock_irqsave(&audio->dev_lock, flags); if (cfg.rec_mode == VOC_REC_UPLINK) audio->source = \ VOICE_UL_SOURCE_MIX_MASK; else if (cfg.rec_mode == VOC_REC_DOWNLINK) audio->source = \ VOICE_DL_SOURCE_MIX_MASK; else audio->source = \ VOICE_DL_SOURCE_MIX_MASK | VOICE_UL_SOURCE_MIX_MASK ; audio->in_call = 1; spin_unlock_irqrestore(&audio->dev_lock, flags); } } break; } case AUDIO_GET_SESSION_ID: { if (copy_to_user((void *) arg, &audio->enc_id, sizeof(unsigned short))) { rc = -EFAULT; } break; } default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; } static ssize_t audqcelp_in_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio_in *audio = file->private_data; unsigned long flags; const char __user *start = buf; void *data; uint32_t index; uint32_t size; int rc = 0; struct qcelp_encoded_meta_out meta_field; struct audio_frame_nt *nt_frame; MM_DBG(" count = %d\n", count); mutex_lock(&audio->read_lock); while (count > 0) { rc = wait_event_interruptible( audio->wait, (audio->in_count > 0) || audio->stopped || audio->rflush || ((audio->mode == MSM_AUD_ENC_MODE_TUNNEL) && audio->in_call && audio->running && (audio->voice_state == VOICE_STATE_OFFCALL))); if (rc < 0) break; if (audio->rflush) { rc = -EBUSY; break; } if (audio->stopped && !audio->in_count) { MM_DBG("Driver in stop state, No more buffer to read"); rc = 0;/* End of File */ break; } else if ((audio->mode == MSM_AUD_ENC_MODE_TUNNEL) && audio->in_call && audio->running && (audio->voice_state \ == VOICE_STATE_OFFCALL)) { MM_DBG("Not Permitted Voice Terminated\n"); rc = -EPERM; /* Voice Call stopped */ break; } index = audio->in_tail; data = (uint8_t *) audio->in[index].data; size = audio->in[index].size; if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) { nt_frame = (struct audio_frame_nt *)(data - sizeof(struct audio_frame_nt)); memcpy((char *)&meta_field.time_stamp_dword_lsw, (char *)&nt_frame->time_stamp_dword_lsw, (sizeof(struct qcelp_encoded_meta_out) - \ sizeof(uint16_t))); meta_field.metadata_len = sizeof(struct qcelp_encoded_meta_out); if (copy_to_user((char *)start, (char *)&meta_field, sizeof(struct qcelp_encoded_meta_out))) { rc = -EFAULT; break; } if (nt_frame->nflag_lsw & 0x0001) { MM_ERR("recieved EOS in read call\n"); audio->eos_ack = 1; } buf += sizeof(struct qcelp_encoded_meta_out); count -= sizeof(struct qcelp_encoded_meta_out); } if (count >= size) { if (copy_to_user(buf, data, size)) { rc = -EFAULT; break; } spin_lock_irqsave(&audio->dsp_lock, flags); if (index != audio->in_tail) { /* overrun -- data is * invalid and we need to retry */ spin_unlock_irqrestore(&audio->dsp_lock, flags); continue; } audio->in[index].size = 0; audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); audio->in_count--; spin_unlock_irqrestore(&audio->dsp_lock, flags); count -= size; buf += size; if ((audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL)) { if (!audio->eos_ack) { MM_DBG("sending read ptr command\ %d %d\n", audio->dsp_cnt, audio->in_tail); audqcelp_dsp_read_buffer(audio, audio->dsp_cnt++); } } } else { MM_ERR("short read\n"); break; } break; } mutex_unlock(&audio->read_lock); if (buf > start) return buf - start; return rc; } static void audpreproc_pcm_send_data(struct audio_in *audio, unsigned needed) { struct buffer *frame; unsigned long flags; MM_DBG("\n"); spin_lock_irqsave(&audio->dsp_lock, flags); if (!audio->running) goto done; if (needed && !audio->wflush) { /* We were called from the callback because the DSP * requested more data. Note that the DSP does want * more data, and if a buffer was in-flight, mark it * as available (since the DSP must now be done with * it). */ audio->out_needed = 1; frame = audio->out + audio->out_tail; if (frame->used == 0xffffffff) { MM_DBG("frame %d free\n", audio->out_tail); frame->used = 0; audio->out_tail ^= 1; wake_up(&audio->write_wait); } } if (audio->out_needed) { /* If the DSP currently wants data and we have a * buffer available, we will send it and reset * the needed flag. We'll mark the buffer as in-flight * so that it won't be recycled until the next buffer * is requested */ frame = audio->out + audio->out_tail; if (frame->used) { BUG_ON(frame->used == 0xffffffff); audpreproc_pcm_buffer_ptr_refresh(audio, audio->out_tail, frame->used); frame->used = 0xffffffff; audio->out_needed = 0; } } done: spin_unlock_irqrestore(&audio->dsp_lock, flags); } static int audqcelp_in_fsync(struct file *file, loff_t ppos1, loff_t ppos2, int datasync) { struct audio_in *audio = file->private_data; int rc = 0; MM_DBG("\n"); /* Macro prints the file name and function */ if (!audio->running || (audio->mode == MSM_AUD_ENC_MODE_TUNNEL)) { rc = -EINVAL; goto done_nolock; } mutex_lock(&audio->write_lock); rc = wait_event_interruptible(audio->write_wait, audio->wflush); MM_DBG("waked on by some event audio->wflush = %d\n", audio->wflush); if (rc < 0) goto done; else if (audio->wflush) { rc = -EBUSY; goto done; } done: mutex_unlock(&audio->write_lock); done_nolock: return rc; } int audpreproc_qcelp_process_eos(struct audio_in *audio, const char __user *buf_start, unsigned short mfield_size) { struct buffer *frame; int rc = 0; frame = audio->out + audio->out_head; rc = wait_event_interruptible(audio->write_wait, (audio->out_needed && audio->out[0].used == 0 && audio->out[1].used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto done; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto done; } if (copy_from_user(frame->data, buf_start, mfield_size)) { rc = -EFAULT; goto done; } frame->mfield_sz = mfield_size; audio->out_head ^= 1; frame->used = mfield_size; MM_DBG("copying meta_out frame->used = %d\n", frame->used); audpreproc_pcm_send_data(audio, 0); done: return rc; } static ssize_t audqcelp_in_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct audio_in *audio = file->private_data; const char __user *start = buf; struct buffer *frame; char *cpy_ptr; int rc = 0, eos_condition = AUDPREPROC_QCELP_EOS_NONE; unsigned short mfield_size = 0; int write_count = 0; MM_DBG("cnt=%d\n", count); if (count & 1) return -EINVAL; if (audio->mode != MSM_AUD_ENC_MODE_NONTUNNEL) return -EINVAL; mutex_lock(&audio->write_lock); frame = audio->out + audio->out_head; /* if supplied count is more than driver buffer size * then only copy driver buffer size */ if (count > frame->size) count = frame->size; write_count = count; cpy_ptr = frame->data; rc = wait_event_interruptible(audio->write_wait, (frame->used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto error; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto error; } if (audio->mfield) { if (buf == start) { /* Processing beginning of user buffer */ if (__get_user(mfield_size, (unsigned short __user *) buf)) { rc = -EFAULT; goto error; } else if (mfield_size > count) { rc = -EINVAL; goto error; } MM_DBG("mf offset_val %x\n", mfield_size); if (copy_from_user(cpy_ptr, buf, mfield_size)) { rc = -EFAULT; goto error; } /* Check if EOS flag is set and buffer has * contains just meta field */ if (cpy_ptr[AUDPREPROC_QCELP_EOS_FLG_OFFSET] & AUDPREPROC_QCELP_EOS_FLG_MASK) { eos_condition = AUDPREPROC_QCELP_EOS_SET; MM_DBG("EOS SET\n"); if (mfield_size == count) { buf += mfield_size; eos_condition = 0; goto exit; } else cpy_ptr[AUDPREPROC_QCELP_EOS_FLG_OFFSET] &= ~AUDPREPROC_QCELP_EOS_FLG_MASK; } cpy_ptr += mfield_size; count -= mfield_size; buf += mfield_size; } else { mfield_size = 0; MM_DBG("continuous buffer\n"); } frame->mfield_sz = mfield_size; } MM_DBG("copying the stream count = %d\n", count); if (copy_from_user(cpy_ptr, buf, count)) { rc = -EFAULT; goto error; } exit: frame->used = count; audio->out_head ^= 1; if (!audio->flush_ack) audpreproc_pcm_send_data(audio, 0); else { audpreproc_pcm_send_data(audio, 1); audio->flush_ack = 0; } if (eos_condition == AUDPREPROC_QCELP_EOS_SET) rc = audpreproc_qcelp_process_eos(audio, start, mfield_size); mutex_unlock(&audio->write_lock); return write_count; error: mutex_unlock(&audio->write_lock); return rc; } static int audqcelp_in_release(struct inode *inode, struct file *file) { struct audio_in *audio = file->private_data; mutex_lock(&audio->lock); audio->in_call = 0; /* with draw frequency for session incase not stopped the driver */ msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); auddev_unregister_evt_listner(AUDDEV_CLNT_ENC, audio->enc_id); /*reset the sampling frequency information at audpreproc layer*/ audio->session_info.sampling_freq = 0; audpreproc_update_audrec_info(&audio->session_info); audqcelp_in_disable(audio); audqcelp_in_flush(audio); msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); audio->audrec = NULL; audio->opened = 0; if (audio->data) { iounmap(audio->map_v_read); free_contiguous_memory_by_paddr(audio->phys); audio->data = NULL; } if (audio->out_data) { iounmap(audio->map_v_write); free_contiguous_memory_by_paddr(audio->out_phys); audio->out_data = NULL; } mutex_unlock(&audio->lock); return 0; } struct audio_in the_audio_qcelp_in; static int audqcelp_in_open(struct inode *inode, struct file *file) { struct audio_in *audio = &the_audio_qcelp_in; int rc; int encid; mutex_lock(&audio->lock); if (audio->opened) { rc = -EBUSY; goto done; } audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K); if (audio->phys) { audio->map_v_read = ioremap(audio->phys, DMASZ); if (IS_ERR(audio->map_v_read)) { MM_ERR("could not map DMA buffers\n"); rc = -ENOMEM; free_contiguous_memory_by_paddr(audio->phys); goto done; } audio->data = audio->map_v_read; } else { MM_ERR("could not allocate DMA buffers\n"); rc = -ENOMEM; goto done; } MM_DBG("Memory addr = 0x%8x phy addr = 0x%8x\n",\ (int) audio->data, (int) audio->phys); if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { audio->mode = MSM_AUD_ENC_MODE_NONTUNNEL; MM_DBG("Opened for non tunnel mode encoding\n"); } else if (!(file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { audio->mode = MSM_AUD_ENC_MODE_TUNNEL; MM_DBG("Opened for tunnel mode encoding\n"); } else { MM_ERR("Invalid mode\n"); rc = -EACCES; goto done; } /* Settings will be re-config at AUDIO_SET_CONFIG, * but at least we need to have initial config */ if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) audio->buffer_size = (QCELP_FRAME_SIZE + 14); else audio->buffer_size = (FRAME_SIZE - 8); audio->enc_type = ENC_TYPE_V13K | audio->mode; audio->samp_rate = 8000; audio->channel_mode = AUDREC_CMD_MODE_MONO; audio->cfg.cdma_rate = CDMA_RATE_FULL; audio->cfg.min_bit_rate = CDMA_RATE_FULL; audio->cfg.max_bit_rate = CDMA_RATE_FULL; audio->source = INTERNAL_CODEC_TX_SOURCE_MIX_MASK; audio->rec_mode = VOC_REC_UPLINK; encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name, &audio->queue_ids); if (encid < 0) { MM_ERR("No free encoder available\n"); rc = -ENODEV; goto done; } audio->enc_id = encid; rc = msm_adsp_get(audio->module_name, &audio->audrec, &audrec_qcelp_adsp_ops, audio); if (rc) { audpreproc_aenc_free(audio->enc_id); goto done; } audio->stopped = 0; audio->source = 0; audio->wflush = 0; audio->rflush = 0; audio->flush_ack = 0; audqcelp_in_flush(audio); audqcelp_out_flush(audio); audio->out_phys = allocate_contiguous_ebi_nomap(BUFFER_SIZE, SZ_4K); if (!audio->out_phys) { MM_ERR("could not allocate write buffers\n"); rc = -ENOMEM; goto evt_error; } else { audio->map_v_write = ioremap(audio->out_phys, BUFFER_SIZE); if (IS_ERR(audio->map_v_write)) { MM_ERR("could not map write buffers\n"); rc = -ENOMEM; free_contiguous_memory_by_paddr(audio->out_phys); goto evt_error; } audio->out_data = audio->map_v_write; MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->out_phys, (int)audio->out_data); } /* Initialize buffer */ audio->out[0].data = audio->out_data + 0; audio->out[0].addr = audio->out_phys + 0; audio->out[0].size = OUT_BUFFER_SIZE; audio->out[1].data = audio->out_data + OUT_BUFFER_SIZE; audio->out[1].addr = audio->out_phys + OUT_BUFFER_SIZE; audio->out[1].size = OUT_BUFFER_SIZE; MM_DBG("audio->out[0].data = %d audio->out[1].data = %d", (unsigned int)audio->out[0].data, (unsigned int)audio->out[1].data); audio->device_events = AUDDEV_EVT_DEV_RDY | AUDDEV_EVT_DEV_RLS | AUDDEV_EVT_VOICE_STATE_CHG; audio->voice_state = msm_get_voice_state(); rc = auddev_register_evt_listner(audio->device_events, AUDDEV_CLNT_ENC, audio->enc_id, qcelp_in_listener, (void *) audio); if (rc) { MM_ERR("failed to register device event listener\n"); iounmap(audio->map_v_write); free_contiguous_memory_by_paddr(audio->out_phys); goto evt_error; } audio->mfield = META_OUT_SIZE; file->private_data = audio; audio->opened = 1; audio->out_frame_cnt++; audio->build_id = socinfo_get_build_id(); MM_DBG("Modem build id = %s\n", audio->build_id); done: mutex_unlock(&audio->lock); return rc; evt_error: msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); mutex_unlock(&audio->lock); return rc; } static const struct file_operations audio_in_fops = { .owner = THIS_MODULE, .open = audqcelp_in_open, .release = audqcelp_in_release, .read = audqcelp_in_read, .write = audqcelp_in_write, .fsync = audqcelp_in_fsync, .unlocked_ioctl = audqcelp_in_ioctl, }; struct miscdevice audio_qcelp_in_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_qcelp_in", .fops = &audio_in_fops, }; static int __init audqcelp_in_init(void) { mutex_init(&the_audio_qcelp_in.lock); mutex_init(&the_audio_qcelp_in.read_lock); spin_lock_init(&the_audio_qcelp_in.dsp_lock); spin_lock_init(&the_audio_qcelp_in.dev_lock); init_waitqueue_head(&the_audio_qcelp_in.wait); init_waitqueue_head(&the_audio_qcelp_in.wait_enable); mutex_init(&the_audio_qcelp_in.write_lock); init_waitqueue_head(&the_audio_qcelp_in.write_wait); return misc_register(&audio_qcelp_in_misc); } device_initcall(audqcelp_in_init);
gpl-2.0
zparallax/amplitude_kernel_tw
drivers/usb/serial/visor.c
869
21696
/* * USB HandSpring Visor, Palm m50x, and Sony Clie driver * (supports all of the Palm OS USB devices) * * Copyright (C) 1999 - 2004 * Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * See Documentation/usb/usb-serial.txt for more information on using this * driver * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/usb/cdc.h> #include "visor.h" /* * Version Information */ #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>" #define DRIVER_DESC "USB HandSpring Visor / Palm OS driver" /* function prototypes for a handspring visor */ static int visor_open(struct tty_struct *tty, struct usb_serial_port *port); static void visor_close(struct usb_serial_port *port); static int visor_probe(struct usb_serial *serial, const struct usb_device_id *id); static int visor_calc_num_ports(struct usb_serial *serial); static void visor_read_int_callback(struct urb *urb); static int clie_3_5_startup(struct usb_serial *serial); static int treo_attach(struct usb_serial *serial); static int clie_5_attach(struct usb_serial *serial); static int palm_os_3_probe(struct usb_serial *serial, const struct usb_device_id *id); static int palm_os_4_probe(struct usb_serial *serial, const struct usb_device_id *id); /* Parameters that may be passed into the module. */ static bool debug; static __u16 vendor; static __u16 product; static struct usb_device_id id_table [] = { { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID), .driver_info = (kernel_ulong_t)&palm_os_3_probe }, { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO600_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(GSPDA_VENDOR_ID, GSPDA_XPLORE_M68_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M515_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_I705_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M100_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M125_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M130_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_S360_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_1_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NX60_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NZ90V_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_TJ25_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(TAPWAVE_VENDOR_ID, TAPWAVE_ZODIAC_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(GARMIN_VENDOR_ID, GARMIN_IQUE_3600_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(ACEECA_VENDOR_ID, ACEECA_MEZ1000_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_7135_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(FOSSIL_VENDOR_ID, FOSSIL_ABACUS_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { }, /* optional parameter entry */ { } /* Terminating entry */ }; static struct usb_device_id clie_id_5_table [] = { { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_UX50_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { }, /* optional parameter entry */ { } /* Terminating entry */ }; static struct usb_device_id clie_id_3_5_table [] = { { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) }, { } /* Terminating entry */ }; static struct usb_device_id id_table_combined [] = { { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID) }, { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO_ID) }, { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO600_ID) }, { USB_DEVICE(GSPDA_VENDOR_ID, GSPDA_XPLORE_M68_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M515_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_I705_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M100_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M125_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M130_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_S360_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_1_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NX60_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NZ90V_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_UX50_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_TJ25_ID) }, { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID) }, { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID) }, { USB_DEVICE(TAPWAVE_VENDOR_ID, TAPWAVE_ZODIAC_ID) }, { USB_DEVICE(GARMIN_VENDOR_ID, GARMIN_IQUE_3600_ID) }, { USB_DEVICE(ACEECA_VENDOR_ID, ACEECA_MEZ1000_ID) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_7135_ID) }, { USB_DEVICE(FOSSIL_VENDOR_ID, FOSSIL_ABACUS_ID) }, { }, /* optional parameter entry */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table_combined); static struct usb_driver visor_driver = { .name = "visor", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table_combined, }; /* All of the device info needed for the Handspring Visor, and Palm 4.0 devices */ static struct usb_serial_driver handspring_device = { .driver = { .owner = THIS_MODULE, .name = "visor", }, .description = "Handspring Visor / Palm OS", .id_table = id_table, .num_ports = 2, .bulk_out_size = 256, .open = visor_open, .close = visor_close, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .attach = treo_attach, .probe = visor_probe, .calc_num_ports = visor_calc_num_ports, .read_int_callback = visor_read_int_callback, }; /* All of the device info needed for the Clie UX50, TH55 Palm 5.0 devices */ static struct usb_serial_driver clie_5_device = { .driver = { .owner = THIS_MODULE, .name = "clie_5", }, .description = "Sony Clie 5.0", .id_table = clie_id_5_table, .num_ports = 2, .bulk_out_size = 256, .open = visor_open, .close = visor_close, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .attach = clie_5_attach, .probe = visor_probe, .calc_num_ports = visor_calc_num_ports, .read_int_callback = visor_read_int_callback, }; /* device info for the Sony Clie OS version 3.5 */ static struct usb_serial_driver clie_3_5_device = { .driver = { .owner = THIS_MODULE, .name = "clie_3.5", }, .description = "Sony Clie 3.5", .id_table = clie_id_3_5_table, .num_ports = 1, .bulk_out_size = 256, .open = visor_open, .close = visor_close, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .attach = clie_3_5_startup, }; static struct usb_serial_driver * const serial_drivers[] = { &handspring_device, &clie_5_device, &clie_3_5_device, NULL }; /****************************************************************************** * Handspring Visor specific driver functions ******************************************************************************/ static int visor_open(struct tty_struct *tty, struct usb_serial_port *port) { int result = 0; dbg("%s - port %d", __func__, port->number); if (!port->read_urb) { /* this is needed for some brain dead Sony devices */ dev_err(&port->dev, "Device lied about number of ports, please use a lower one.\n"); return -ENODEV; } /* Start reading from the device */ result = usb_serial_generic_open(tty, port); if (result) goto exit; if (port->interrupt_in_urb) { dbg("%s - adding interrupt input for treo", __func__); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) dev_err(&port->dev, "%s - failed submitting interrupt urb, error %d\n", __func__, result); } exit: return result; } static void visor_close(struct usb_serial_port *port) { unsigned char *transfer_buffer; dbg("%s - port %d", __func__, port->number); /* shutdown our urbs */ usb_serial_generic_close(port); usb_kill_urb(port->interrupt_in_urb); mutex_lock(&port->serial->disc_mutex); if (!port->serial->disconnected) { /* Try to send shutdown message, unless the device is gone */ transfer_buffer = kmalloc(0x12, GFP_KERNEL); if (transfer_buffer) { usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), VISOR_CLOSE_NOTIFICATION, 0xc2, 0x0000, 0x0000, transfer_buffer, 0x12, 300); kfree(transfer_buffer); } } mutex_unlock(&port->serial->disc_mutex); } static void visor_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; int status = urb->status; int result; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, status); return; default: dbg("%s - nonzero urb status received: %d", __func__, status); goto exit; } /* * This information is still unknown what it can be used for. * If anyone has an idea, please let the author know... * * Rumor has it this endpoint is used to notify when data * is ready to be read from the bulk ones. */ usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length, urb->transfer_buffer); exit: result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_err(&urb->dev->dev, "%s - Error %d submitting interrupt urb\n", __func__, result); } static int palm_os_3_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct device *dev = &serial->dev->dev; struct visor_connection_info *connection_info; unsigned char *transfer_buffer; char *string; int retval = 0; int i; int num_ports = 0; dbg("%s", __func__); transfer_buffer = kmalloc(sizeof(*connection_info), GFP_KERNEL); if (!transfer_buffer) { dev_err(dev, "%s - kmalloc(%Zd) failed.\n", __func__, sizeof(*connection_info)); return -ENOMEM; } /* send a get connection info request */ retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), VISOR_GET_CONNECTION_INFORMATION, 0xc2, 0x0000, 0x0000, transfer_buffer, sizeof(*connection_info), 300); if (retval < 0) { dev_err(dev, "%s - error %d getting connection information\n", __func__, retval); goto exit; } if (retval == sizeof(*connection_info)) { connection_info = (struct visor_connection_info *) transfer_buffer; num_ports = le16_to_cpu(connection_info->num_ports); for (i = 0; i < num_ports; ++i) { switch ( connection_info->connections[i].port_function_id) { case VISOR_FUNCTION_GENERIC: string = "Generic"; break; case VISOR_FUNCTION_DEBUGGER: string = "Debugger"; break; case VISOR_FUNCTION_HOTSYNC: string = "HotSync"; break; case VISOR_FUNCTION_CONSOLE: string = "Console"; break; case VISOR_FUNCTION_REMOTE_FILE_SYS: string = "Remote File System"; break; default: string = "unknown"; break; } dev_info(dev, "%s: port %d, is for %s use\n", serial->type->description, connection_info->connections[i].port, string); } } /* * Handle devices that report invalid stuff here. */ if (num_ports == 0 || num_ports > 2) { dev_warn(dev, "%s: No valid connect info available\n", serial->type->description); num_ports = 2; } dev_info(dev, "%s: Number of ports: %d\n", serial->type->description, num_ports); /* * save off our num_ports info so that we can use it in the * calc_num_ports callback */ usb_set_serial_data(serial, (void *)(long)num_ports); /* ask for the number of bytes available, but ignore the response as it is broken */ retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), VISOR_REQUEST_BYTES_AVAILABLE, 0xc2, 0x0000, 0x0005, transfer_buffer, 0x02, 300); if (retval < 0) dev_err(dev, "%s - error %d getting bytes available request\n", __func__, retval); retval = 0; exit: kfree(transfer_buffer); return retval; } static int palm_os_4_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct device *dev = &serial->dev->dev; struct palm_ext_connection_info *connection_info; unsigned char *transfer_buffer; int retval; dbg("%s", __func__); transfer_buffer = kmalloc(sizeof(*connection_info), GFP_KERNEL); if (!transfer_buffer) { dev_err(dev, "%s - kmalloc(%Zd) failed.\n", __func__, sizeof(*connection_info)); return -ENOMEM; } retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), PALM_GET_EXT_CONNECTION_INFORMATION, 0xc2, 0x0000, 0x0000, transfer_buffer, sizeof(*connection_info), 300); if (retval < 0) dev_err(dev, "%s - error %d getting connection info\n", __func__, retval); else usb_serial_debug_data(debug, &serial->dev->dev, __func__, retval, transfer_buffer); kfree(transfer_buffer); return 0; } static int visor_probe(struct usb_serial *serial, const struct usb_device_id *id) { int retval = 0; int (*startup)(struct usb_serial *serial, const struct usb_device_id *id); dbg("%s", __func__); /* * some Samsung Android phones in modem mode have the same ID * as SPH-I500, but they are ACM devices, so dont bind to them */ if (id->idVendor == SAMSUNG_VENDOR_ID && id->idProduct == SAMSUNG_SPH_I500_ID && serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM && serial->dev->descriptor.bDeviceSubClass == USB_CDC_SUBCLASS_ACM) return -ENODEV; if (serial->dev->actconfig->desc.bConfigurationValue != 1) { dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", serial->dev->actconfig->desc.bConfigurationValue); return -ENODEV; } if (id->driver_info) { startup = (void *)id->driver_info; retval = startup(serial, id); } return retval; } static int visor_calc_num_ports(struct usb_serial *serial) { int num_ports = (int)(long)(usb_get_serial_data(serial)); if (num_ports) usb_set_serial_data(serial, NULL); return num_ports; } static int clie_3_5_startup(struct usb_serial *serial) { struct device *dev = &serial->dev->dev; int result; u8 *data; dbg("%s", __func__); data = kmalloc(1, GFP_KERNEL); if (!data) return -ENOMEM; /* * Note that PEG-300 series devices expect the following two calls. */ /* get the config number */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), USB_REQ_GET_CONFIGURATION, USB_DIR_IN, 0, 0, data, 1, 3000); if (result < 0) { dev_err(dev, "%s: get config number failed: %d\n", __func__, result); goto out; } if (result != 1) { dev_err(dev, "%s: get config number bad return length: %d\n", __func__, result); result = -EIO; goto out; } /* get the interface number */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), USB_REQ_GET_INTERFACE, USB_DIR_IN | USB_RECIP_INTERFACE, 0, 0, data, 1, 3000); if (result < 0) { dev_err(dev, "%s: get interface number failed: %d\n", __func__, result); goto out; } if (result != 1) { dev_err(dev, "%s: get interface number bad return length: %d\n", __func__, result); result = -EIO; goto out; } result = 0; out: kfree(data); return result; } static int treo_attach(struct usb_serial *serial) { struct usb_serial_port *swap_port; /* Only do this endpoint hack for the Handspring devices with * interrupt in endpoints, which for now are the Treo devices. */ if (!((le16_to_cpu(serial->dev->descriptor.idVendor) == HANDSPRING_VENDOR_ID) || (le16_to_cpu(serial->dev->descriptor.idVendor) == KYOCERA_VENDOR_ID)) || (serial->num_interrupt_in == 0)) return 0; dbg("%s", __func__); /* * It appears that Treos and Kyoceras want to use the * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint, * so let's swap the 1st and 2nd bulk in and interrupt endpoints. * Note that swapping the bulk out endpoints would break lots of * apps that want to communicate on the second port. */ #define COPY_PORT(dest, src) \ do { \ int i; \ \ for (i = 0; i < ARRAY_SIZE(src->read_urbs); ++i) { \ dest->read_urbs[i] = src->read_urbs[i]; \ dest->read_urbs[i]->context = dest; \ dest->bulk_in_buffers[i] = src->bulk_in_buffers[i]; \ } \ dest->read_urb = src->read_urb; \ dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\ dest->bulk_in_buffer = src->bulk_in_buffer; \ dest->bulk_in_size = src->bulk_in_size; \ dest->interrupt_in_urb = src->interrupt_in_urb; \ dest->interrupt_in_urb->context = dest; \ dest->interrupt_in_endpointAddress = \ src->interrupt_in_endpointAddress;\ dest->interrupt_in_buffer = src->interrupt_in_buffer; \ } while (0); swap_port = kmalloc(sizeof(*swap_port), GFP_KERNEL); if (!swap_port) return -ENOMEM; COPY_PORT(swap_port, serial->port[0]); COPY_PORT(serial->port[0], serial->port[1]); COPY_PORT(serial->port[1], swap_port); kfree(swap_port); return 0; } static int clie_5_attach(struct usb_serial *serial) { struct usb_serial_port *port; unsigned int pipe; int j; dbg("%s", __func__); /* TH55 registers 2 ports. Communication in from the UX50/TH55 uses bulk_in_endpointAddress from port 0. Communication out to the UX50/TH55 uses bulk_out_endpointAddress from port 1 Lets do a quick and dirty mapping */ /* some sanity check */ if (serial->num_ports < 2) return -1; /* port 0 now uses the modified endpoint Address */ port = serial->port[0]; port->bulk_out_endpointAddress = serial->port[1]->bulk_out_endpointAddress; pipe = usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress); for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) port->write_urbs[j]->pipe = pipe; return 0; } static int __init visor_init(void) { int i, retval; /* Only if parameters were passed to us */ if (vendor > 0 && product > 0) { struct usb_device_id usb_dev_temp[] = { { USB_DEVICE(vendor, product), .driver_info = (kernel_ulong_t) &palm_os_4_probe } }; /* Find the last entry in id_table */ for (i = 0;; i++) { if (id_table[i].idVendor == 0) { id_table[i] = usb_dev_temp[0]; break; } } /* Find the last entry in id_table_combined */ for (i = 0;; i++) { if (id_table_combined[i].idVendor == 0) { id_table_combined[i] = usb_dev_temp[0]; break; } } printk(KERN_INFO KBUILD_MODNAME ": Untested USB device specified at time of module insertion\n"); printk(KERN_INFO KBUILD_MODNAME ": Warning: This is not guaranteed to work\n"); printk(KERN_INFO KBUILD_MODNAME ": Using a newer kernel is preferred to this method\n"); printk(KERN_INFO KBUILD_MODNAME ": Adding Palm OS protocol 4.x support for unknown device: 0x%x/0x%x\n", vendor, product); } retval = usb_serial_register_drivers(&visor_driver, serial_drivers); if (retval == 0) printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n"); return retval; } static void __exit visor_exit (void) { usb_serial_deregister_drivers(&visor_driver, serial_drivers); } module_init(visor_init); module_exit(visor_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not"); module_param(vendor, ushort, 0); MODULE_PARM_DESC(vendor, "User specified vendor ID"); module_param(product, ushort, 0); MODULE_PARM_DESC(product, "User specified product ID");
gpl-2.0
tenfar/pyramid-gb-kernel
arch/um/kernel/time.c
1381
2601
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/clockchips.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/threads.h> #include <asm/irq.h> #include <asm/param.h> #include "kern_util.h" #include "os.h" void timer_handler(int sig, struct uml_pt_regs *regs) { unsigned long flags; local_irq_save(flags); do_IRQ(TIMER_IRQ, regs); local_irq_restore(flags); } static void itimer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: set_interval(); break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_ONESHOT: disable_timer(); break; case CLOCK_EVT_MODE_RESUME: break; } } static int itimer_next_event(unsigned long delta, struct clock_event_device *evt) { return timer_one_shot(delta + 1); } static struct clock_event_device itimer_clockevent = { .name = "itimer", .rating = 250, .cpumask = cpu_all_mask, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_mode = itimer_set_mode, .set_next_event = itimer_next_event, .shift = 32, .irq = 0, }; static irqreturn_t um_timer(int irq, void *dev) { (*itimer_clockevent.event_handler)(&itimer_clockevent); return IRQ_HANDLED; } static cycle_t itimer_read(struct clocksource *cs) { return os_nsecs() / 1000; } static struct clocksource itimer_clocksource = { .name = "itimer", .rating = 300, .read = itimer_read, .mask = CLOCKSOURCE_MASK(64), .mult = 1000, .shift = 0, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static void __init setup_itimer(void) { int err; err = request_irq(TIMER_IRQ, um_timer, IRQF_DISABLED, "timer", NULL); if (err != 0) printk(KERN_ERR "register_timer : request_irq failed - " "errno = %d\n", -err); itimer_clockevent.mult = div_sc(HZ, NSEC_PER_SEC, 32); itimer_clockevent.max_delta_ns = clockevent_delta2ns(60 * HZ, &itimer_clockevent); itimer_clockevent.min_delta_ns = clockevent_delta2ns(1, &itimer_clockevent); err = clocksource_register(&itimer_clocksource); if (err) { printk(KERN_ERR "clocksource_register returned %d\n", err); return; } clockevents_register_device(&itimer_clockevent); } void __init time_init(void) { long long nsecs; timer_init(); nsecs = os_nsecs(); set_normalized_timespec(&wall_to_monotonic, -nsecs / NSEC_PER_SEC, -nsecs % NSEC_PER_SEC); set_normalized_timespec(&xtime, nsecs / NSEC_PER_SEC, nsecs % NSEC_PER_SEC); late_time_init = setup_itimer; }
gpl-2.0
NooNameR/k3_bravo
drivers/gpu/drm/radeon/r600_blit_shaders.c
1637
14592
/* * Copyright 2009 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Alex Deucher <alexander.deucher@amd.com> */ #include <linux/types.h> #include <linux/kernel.h> /* * R6xx+ cards need to use the 3D engine to blit data which requires * quite a bit of hw state setup. Rather than pull the whole 3D driver * (which normally generates the 3D state) into the DRM, we opt to use * statically generated state tables. The regsiter state and shaders * were hand generated to support blitting functionality. See the 3D * driver or documentation for descriptions of the registers and * shader instructions. */ const u32 r6xx_default_state[] = { 0xc0002400, /* START_3D_CMDBUF */ 0x00000000, 0xc0012800, /* CONTEXT_CONTROL */ 0x80000000, 0x80000000, 0xc0016800, 0x00000010, 0x00008000, /* WAIT_UNTIL */ 0xc0016800, 0x00000542, 0x07000003, /* TA_CNTL_AUX */ 0xc0016800, 0x000005c5, 0x00000000, /* VC_ENHANCE */ 0xc0016800, 0x00000363, 0x00000000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */ 0xc0016800, 0x0000060c, 0x82000000, /* DB_DEBUG */ 0xc0016800, 0x0000060e, 0x01020204, /* DB_WATERMARKS */ 0xc0026f00, 0x00000000, 0x00000000, /* SQ_VTX_BASE_VTX_LOC */ 0x00000000, /* SQ_VTX_START_INST_LOC */ 0xc0096900, 0x0000022a, 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0016900, 0x00000004, 0x00000000, /* DB_DEPTH_INFO */ 0xc0026900, 0x0000000a, 0x00000000, /* DB_STENCIL_CLEAR */ 0x00000000, /* DB_DEPTH_CLEAR */ 0xc0016900, 0x00000200, 0x00000000, /* DB_DEPTH_CONTROL */ 0xc0026900, 0x00000343, 0x00000060, /* DB_RENDER_CONTROL */ 0x00000040, /* DB_RENDER_OVERRIDE */ 0xc0016900, 0x00000351, 0x0000aa00, /* DB_ALPHA_TO_MASK */ 0xc00f6900, 0x00000100, 0x00000800, /* VGT_MAX_VTX_INDX */ 0x00000000, /* VGT_MIN_VTX_INDX */ 0x00000000, /* VGT_INDX_OFFSET */ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */ 0x00000000, /* SX_ALPHA_TEST_CONTROL */ 0x00000000, /* CB_BLEND_RED */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, /* CB_FOG_RED */ 0x00000000, 0x00000000, 0x00000000, /* DB_STENCILREFMASK */ 0x00000000, /* DB_STENCILREFMASK_BF */ 0x00000000, /* SX_ALPHA_REF */ 0xc0046900, 0x0000030c, 0x01000000, /* CB_CLRCMP_CNTL */ 0x00000000, 0x00000000, 0x00000000, 0xc0046900, 0x00000048, 0x3f800000, /* CB_CLEAR_RED */ 0x00000000, 0x3f800000, 0x3f800000, 0xc0016900, 0x00000080, 0x00000000, /* PA_SC_WINDOW_OFFSET */ 0xc00a6900, 0x00000083, 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */ 0x00000000, /* PA_SC_CLIPRECT_0_TL */ 0x20002000, 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0x00000000, /* PA_SC_EDGERULE */ 0xc0406900, 0x00000094, 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */ 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0xc0026900, 0x00000292, 0x00000000, /* PA_SC_MPASS_PS_CNTL */ 0x00004010, /* PA_SC_MODE_CNTL */ 0xc0096900, 0x00000300, 0x00000000, /* PA_SC_LINE_CNTL */ 0x00000000, /* PA_SC_AA_CONFIG */ 0x0000002d, /* PA_SU_VTX_CNTL */ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ 0x3f800000, 0x3f800000, 0x3f800000, 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */ 0x00000000, 0xc0016900, 0x00000312, 0xffffffff, /* PA_SC_AA_MASK */ 0xc0066900, 0x0000037e, 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */ 0xc0046900, 0x000001b6, 0x00000000, /* SPI_INPUT_Z */ 0x00000000, /* SPI_FOG_CNTL */ 0x00000000, /* SPI_FOG_FUNC_SCALE */ 0x00000000, /* SPI_FOG_FUNC_BIAS */ 0xc0016900, 0x00000225, 0x00000000, /* SQ_PGM_START_FS */ 0xc0016900, 0x00000229, 0x00000000, /* SQ_PGM_RESOURCES_FS */ 0xc0016900, 0x00000237, 0x00000000, /* SQ_PGM_CF_OFFSET_FS */ 0xc0026900, 0x000002a8, 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */ 0xc0116900, 0x00000280, 0x00000000, /* PA_SU_POINT_SIZE */ 0x00000000, /* PA_SU_POINT_MINMAX */ 0x00000008, /* PA_SU_LINE_CNTL */ 0x00000000, /* PA_SC_LINE_STIPPLE */ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ 0x00000000, /* VGT_HOS_CNTL */ 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */ 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */ 0x00000000, /* VGT_HOS_REUSE_DEPTH */ 0x00000000, /* VGT_GROUP_PRIM_TYPE */ 0x00000000, /* VGT_GROUP_FIRST_DECR */ 0x00000000, /* VGT_GROUP_DECR */ 0x00000000, /* VGT_GROUP_VECT_0_CNTL */ 0x00000000, /* VGT_GROUP_VECT_1_CNTL */ 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */ 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */ 0x00000000, /* VGT_GS_MODE */ 0xc0016900, 0x000002a1, 0x00000000, /* VGT_PRIMITIVEID_EN */ 0xc0016900, 0x000002a5, 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */ 0xc0036900, 0x000002ac, 0x00000000, /* VGT_STRMOUT_EN */ 0x00000000, /* VGT_REUSE_OFF */ 0x00000000, /* VGT_VTX_CNT_EN */ 0xc0016900, 0x000002c8, 0x00000000, /* VGT_STRMOUT_BUFFER_EN */ 0xc0076900, 0x00000202, 0x00cc0000, /* CB_COLOR_CONTROL */ 0x00000210, /* DB_SHADER_CNTL */ 0x00010000, /* PA_CL_CLIP_CNTL */ 0x00000244, /* PA_SU_SC_MODE_CNTL */ 0x00000100, /* PA_CL_VTE_CNTL */ 0x00000000, /* PA_CL_VS_OUT_CNTL */ 0x00000000, /* PA_CL_NANINF_CNTL */ 0xc0026900, 0x0000008e, 0x0000000f, /* CB_TARGET_MASK */ 0x0000000f, /* CB_SHADER_MASK */ 0xc0016900, 0x000001e8, 0x00000001, /* CB_SHADER_CONTROL */ 0xc0016900, 0x00000185, 0x00000000, /* SPI_VS_OUT_ID_0 */ 0xc0016900, 0x00000191, 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */ 0xc0056900, 0x000001b1, 0x00000000, /* SPI_VS_OUT_CONFIG */ 0x00000000, /* SPI_THREAD_GROUPING */ 0x00000001, /* SPI_PS_IN_CONTROL_0 */ 0x00000000, /* SPI_PS_IN_CONTROL_1 */ 0x00000000, /* SPI_INTERP_CONTROL_0 */ 0xc0036e00, /* SET_SAMPLER */ 0x00000000, 0x00000012, 0x00000000, 0x00000000, }; const u32 r7xx_default_state[] = { 0xc0012800, /* CONTEXT_CONTROL */ 0x80000000, 0x80000000, 0xc0016800, 0x00000010, 0x00008000, /* WAIT_UNTIL */ 0xc0016800, 0x00000542, 0x07000002, /* TA_CNTL_AUX */ 0xc0016800, 0x000005c5, 0x00000000, /* VC_ENHANCE */ 0xc0016800, 0x00000363, 0x00004000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */ 0xc0016800, 0x0000060c, 0x00000000, /* DB_DEBUG */ 0xc0016800, 0x0000060e, 0x00420204, /* DB_WATERMARKS */ 0xc0026f00, 0x00000000, 0x00000000, /* SQ_VTX_BASE_VTX_LOC */ 0x00000000, /* SQ_VTX_START_INST_LOC */ 0xc0096900, 0x0000022a, 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0016900, 0x00000004, 0x00000000, /* DB_DEPTH_INFO */ 0xc0026900, 0x0000000a, 0x00000000, /* DB_STENCIL_CLEAR */ 0x00000000, /* DB_DEPTH_CLEAR */ 0xc0016900, 0x00000200, 0x00000000, /* DB_DEPTH_CONTROL */ 0xc0026900, 0x00000343, 0x00000060, /* DB_RENDER_CONTROL */ 0x00000000, /* DB_RENDER_OVERRIDE */ 0xc0016900, 0x00000351, 0x0000aa00, /* DB_ALPHA_TO_MASK */ 0xc0096900, 0x00000100, 0x00000800, /* VGT_MAX_VTX_INDX */ 0x00000000, /* VGT_MIN_VTX_INDX */ 0x00000000, /* VGT_INDX_OFFSET */ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */ 0x00000000, /* SX_ALPHA_TEST_CONTROL */ 0x00000000, /* CB_BLEND_RED */ 0x00000000, 0x00000000, 0x00000000, 0xc0036900, 0x0000010c, 0x00000000, /* DB_STENCILREFMASK */ 0x00000000, /* DB_STENCILREFMASK_BF */ 0x00000000, /* SX_ALPHA_REF */ 0xc0046900, 0x0000030c, /* CB_CLRCMP_CNTL */ 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0xc0016900, 0x00000080, 0x00000000, /* PA_SC_WINDOW_OFFSET */ 0xc00a6900, 0x00000083, 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */ 0x00000000, /* PA_SC_CLIPRECT_0_TL */ 0x20002000, 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0xaaaaaaaa, /* PA_SC_EDGERULE */ 0xc0406900, 0x00000094, 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */ 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0x00000000, 0x3f800000, 0xc0026900, 0x00000292, 0x00000000, /* PA_SC_MPASS_PS_CNTL */ 0x00514000, /* PA_SC_MODE_CNTL */ 0xc0096900, 0x00000300, 0x00000000, /* PA_SC_LINE_CNTL */ 0x00000000, /* PA_SC_AA_CONFIG */ 0x0000002d, /* PA_SU_VTX_CNTL */ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ 0x3f800000, 0x3f800000, 0x3f800000, 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */ 0x00000000, 0xc0016900, 0x00000312, 0xffffffff, /* PA_SC_AA_MASK */ 0xc0066900, 0x0000037e, 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */ 0xc0046900, 0x000001b6, 0x00000000, /* SPI_INPUT_Z */ 0x00000000, /* SPI_FOG_CNTL */ 0x00000000, /* SPI_FOG_FUNC_SCALE */ 0x00000000, /* SPI_FOG_FUNC_BIAS */ 0xc0016900, 0x00000225, 0x00000000, /* SQ_PGM_START_FS */ 0xc0016900, 0x00000229, 0x00000000, /* SQ_PGM_RESOURCES_FS */ 0xc0016900, 0x00000237, 0x00000000, /* SQ_PGM_CF_OFFSET_FS */ 0xc0026900, 0x000002a8, 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */ 0xc0116900, 0x00000280, 0x00000000, /* PA_SU_POINT_SIZE */ 0x00000000, /* PA_SU_POINT_MINMAX */ 0x00000008, /* PA_SU_LINE_CNTL */ 0x00000000, /* PA_SC_LINE_STIPPLE */ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ 0x00000000, /* VGT_HOS_CNTL */ 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */ 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */ 0x00000000, /* VGT_HOS_REUSE_DEPTH */ 0x00000000, /* VGT_GROUP_PRIM_TYPE */ 0x00000000, /* VGT_GROUP_FIRST_DECR */ 0x00000000, /* VGT_GROUP_DECR */ 0x00000000, /* VGT_GROUP_VECT_0_CNTL */ 0x00000000, /* VGT_GROUP_VECT_1_CNTL */ 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */ 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */ 0x00000000, /* VGT_GS_MODE */ 0xc0016900, 0x000002a1, 0x00000000, /* VGT_PRIMITIVEID_EN */ 0xc0016900, 0x000002a5, 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */ 0xc0036900, 0x000002ac, 0x00000000, /* VGT_STRMOUT_EN */ 0x00000000, /* VGT_REUSE_OFF */ 0x00000000, /* VGT_VTX_CNT_EN */ 0xc0016900, 0x000002c8, 0x00000000, /* VGT_STRMOUT_BUFFER_EN */ 0xc0076900, 0x00000202, 0x00cc0000, /* CB_COLOR_CONTROL */ 0x00000210, /* DB_SHADER_CNTL */ 0x00010000, /* PA_CL_CLIP_CNTL */ 0x00000244, /* PA_SU_SC_MODE_CNTL */ 0x00000100, /* PA_CL_VTE_CNTL */ 0x00000000, /* PA_CL_VS_OUT_CNTL */ 0x00000000, /* PA_CL_NANINF_CNTL */ 0xc0026900, 0x0000008e, 0x0000000f, /* CB_TARGET_MASK */ 0x0000000f, /* CB_SHADER_MASK */ 0xc0016900, 0x000001e8, 0x00000001, /* CB_SHADER_CONTROL */ 0xc0016900, 0x00000185, 0x00000000, /* SPI_VS_OUT_ID_0 */ 0xc0016900, 0x00000191, 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */ 0xc0056900, 0x000001b1, 0x00000000, /* SPI_VS_OUT_CONFIG */ 0x00000001, /* SPI_THREAD_GROUPING */ 0x00000001, /* SPI_PS_IN_CONTROL_0 */ 0x00000000, /* SPI_PS_IN_CONTROL_1 */ 0x00000000, /* SPI_INTERP_CONTROL_0 */ 0xc0036e00, /* SET_SAMPLER */ 0x00000000, 0x00000012, 0x00000000, 0x00000000, }; /* same for r6xx/r7xx */ const u32 r6xx_vs[] = { 0x00000004, 0x81000000, 0x0000203c, 0x94000b08, 0x00004000, 0x14200b1a, 0x00000000, 0x00000000, 0x3c000000, 0x68cd1000, #ifdef __BIG_ENDIAN 0x000a0000, #else 0x00080000, #endif 0x00000000, }; const u32 r6xx_ps[] = { 0x00000002, 0x80800000, 0x00000000, 0x94200688, 0x00000010, 0x000d1000, 0xb0800000, 0x00000000, }; const u32 r6xx_ps_size = ARRAY_SIZE(r6xx_ps); const u32 r6xx_vs_size = ARRAY_SIZE(r6xx_vs); const u32 r6xx_default_size = ARRAY_SIZE(r6xx_default_state); const u32 r7xx_default_size = ARRAY_SIZE(r7xx_default_state);
gpl-2.0
mathur/rohan.kernel.op3
arch/powerpc/platforms/pseries/io_event_irq.c
1637
5218
/* * Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/list.h> #include <linux/notifier.h> #include <asm/machdep.h> #include <asm/rtas.h> #include <asm/irq.h> #include <asm/io_event_irq.h> #include "pseries.h" /* * IO event interrupt is a mechanism provided by RTAS to return * information about hardware error and non-error events. Device * drivers can register their event handlers to receive events. * Device drivers are expected to use atomic_notifier_chain_register() * and atomic_notifier_chain_unregister() to register and unregister * their event handlers. Since multiple IO event types and scopes * share an IO event interrupt, the event handlers are called one * by one until the IO event is claimed by one of the handlers. * The event handlers are expected to return NOTIFY_OK if the * event is handled by the event handler or NOTIFY_DONE if the * event does not belong to the handler. * * Usage: * * Notifier function: * #include <asm/io_event_irq.h> * int event_handler(struct notifier_block *nb, unsigned long val, void *data) { * p = (struct pseries_io_event_sect_data *) data; * if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE; * : * : * return NOTIFY_OK; * } * struct notifier_block event_nb = { * .notifier_call = event_handler, * } * * Registration: * atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb); * * Unregistration: * atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb); */ ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list); EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list); static int ioei_check_exception_token; static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; /** * Find the data portion of an IO Event section from event log. * @elog: RTAS error/event log. * * Return: * pointer to a valid IO event section data. NULL if not found. */ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) { struct pseries_errorlog *sect; /* We should only ever get called for io-event interrupts, but if * we do get called for another type then something went wrong so * make some noise about it. * RTAS_TYPE_IO only exists in extended event log version 6 or later. * No need to check event log version. */ if (unlikely(rtas_error_type(elog) != RTAS_TYPE_IO)) { printk_once(KERN_WARNING"io_event_irq: Unexpected event type %d", rtas_error_type(elog)); return NULL; } sect = get_pseries_errorlog(elog, PSERIES_ELOG_SECT_ID_IO_EVENT); if (unlikely(!sect)) { printk_once(KERN_WARNING "io_event_irq: RTAS extended event " "log does not contain an IO Event section. " "Could be a bug in system firmware!\n"); return NULL; } return (struct pseries_io_event *) &sect->data; } /* * PAPR: * - check-exception returns the first found error or event and clear that * error or event so it is reported once. * - Each interrupt returns one event. If a plateform chooses to report * multiple events through a single interrupt, it must ensure that the * interrupt remains asserted until check-exception has been used to * process all out-standing events for that interrupt. * * Implementation notes: * - Events must be processed in the order they are returned. Hence, * sequential in nature. * - The owner of an event is determined by combinations of scope, * event type, and sub-type. There is no easy way to pre-sort clients * by scope or event type alone. For example, Torrent ISR route change * event is reported with scope 0x00 (Not Applicatable) rather than * 0x3B (Torrent-hub). It is better to let the clients to identify * who owns the event. */ static irqreturn_t ioei_interrupt(int irq, void *dev_id) { struct pseries_io_event *event; int rtas_rc; for (;;) { rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL, RTAS_VECTOR_EXTERNAL_INTERRUPT, virq_to_hw(irq), RTAS_IO_EVENTS, 1 /* Time Critical */, __pa(ioei_rtas_buf), RTAS_DATA_BUF_SIZE); if (rtas_rc != 0) break; event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf); if (!event) continue; atomic_notifier_call_chain(&pseries_ioei_notifier_list, 0, event); } return IRQ_HANDLED; } static int __init ioei_init(void) { struct device_node *np; ioei_check_exception_token = rtas_token("check-exception"); if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) return -ENODEV; np = of_find_node_by_path("/event-sources/ibm,io-events"); if (np) { request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT"); pr_info("IBM I/O event interrupts enabled\n"); of_node_put(np); } else { return -ENODEV; } return 0; } machine_subsys_initcall(pseries, ioei_init);
gpl-2.0
sayeed99/flareM_old
drivers/staging/echo/echo.c
2149
19972
/* * SpanDSP - a series of DSP components for telephony * * echo.c - A line echo canceller. This code is being developed * against and partially complies with G168. * * Written by Steve Underwood <steveu@coppice.org> * and David Rowe <david_at_rowetel_dot_com> * * Copyright (C) 2001, 2003 Steve Underwood, 2007 David Rowe * * Based on a bit from here, a bit from there, eye of toad, ear of * bat, 15 years of failed attempts by David and a few fried brain * cells. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /*! \file */ /* Implementation Notes David Rowe April 2007 This code started life as Steve's NLMS algorithm with a tap rotation algorithm to handle divergence during double talk. I added a Geigel Double Talk Detector (DTD) [2] and performed some G168 tests. However I had trouble meeting the G168 requirements, especially for double talk - there were always cases where my DTD failed, for example where near end speech was under the 6dB threshold required for declaring double talk. So I tried a two path algorithm [1], which has so far given better results. The original tap rotation/Geigel algorithm is available in SVN http://svn.rowetel.com/software/oslec/tags/before_16bit. It's probably possible to make it work if some one wants to put some serious work into it. At present no special treatment is provided for tones, which generally cause NLMS algorithms to diverge. Initial runs of a subset of the G168 tests for tones (e.g ./echo_test 6) show the current algorithm is passing OK, which is kind of surprising. The full set of tests needs to be performed to confirm this result. One other interesting change is that I have managed to get the NLMS code to work with 16 bit coefficients, rather than the original 32 bit coefficents. This reduces the MIPs and storage required. I evaulated the 16 bit port using g168_tests.sh and listening tests on 4 real-world samples. I also attempted the implementation of a block based NLMS update [2] but although this passes g168_tests.sh it didn't converge well on the real-world samples. I have no idea why, perhaps a scaling problem. The block based code is also available in SVN http://svn.rowetel.com/software/oslec/tags/before_16bit. If this code can be debugged, it will lead to further reduction in MIPS, as the block update code maps nicely onto DSP instruction sets (it's a dot product) compared to the current sample-by-sample update. Steve also has some nice notes on echo cancellers in echo.h References: [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo Path Models", IEEE Transactions on communications, COM-25, No. 6, June 1977. http://www.rowetel.com/images/echo/dual_path_paper.pdf [2] The classic, very useful paper that tells you how to actually build a real world echo canceller: Messerschmitt, Hedberg, Cole, Haoui, Winship, "Digital Voice Echo Canceller with a TMS320020, http://www.rowetel.com/images/echo/spra129.pdf [3] I have written a series of blog posts on this work, here is Part 1: http://www.rowetel.com/blog/?p=18 [4] The source code http://svn.rowetel.com/software/oslec/ [5] A nice reference on LMS filters: http://en.wikipedia.org/wiki/Least_mean_squares_filter Credits: Thanks to Steve Underwood, Jean-Marc Valin, and Ramakrishnan Muthukrishnan for their suggestions and email discussions. Thanks also to those people who collected echo samples for me such as Mark, Pawel, and Pavel. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include "echo.h" #define MIN_TX_POWER_FOR_ADAPTION 64 #define MIN_RX_POWER_FOR_ADAPTION 64 #define DTD_HANGOVER 600 /* 600 samples, or 75ms */ #define DC_LOG2BETA 3 /* log2() of DC filter Beta */ /* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */ #ifdef __bfin__ static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift) { int i; int offset1; int offset2; int factor; int exp; int16_t *phist; int n; if (shift > 0) factor = clean << shift; else factor = clean >> -shift; /* Update the FIR taps */ offset2 = ec->curr_pos; offset1 = ec->taps - offset2; phist = &ec->fir_state_bg.history[offset2]; /* st: and en: help us locate the assembler in echo.s */ /* asm("st:"); */ n = ec->taps; for (i = 0; i < n; i++) { exp = *phist++ * factor; ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); } /* asm("en:"); */ /* Note the asm for the inner loop above generated by Blackfin gcc 4.1.1 is pretty good (note even parallel instructions used): R0 = W [P0++] (X); R0 *= R2; R0 = R0 + R3 (NS) || R1 = W [P1] (X) || nop; R0 >>>= 15; R0 = R0 + R1; W [P1++] = R0; A block based update algorithm would be much faster but the above can't be improved on much. Every instruction saved in the loop above is 2 MIPs/ch! The for loop above is where the Blackfin spends most of it's time - about 17 MIPs/ch measured with speedtest.c with 256 taps (32ms). Write-back and Write-through cache gave about the same performance. */ } /* IDEAS for further optimisation of lms_adapt_bg(): 1/ The rounding is quite costly. Could we keep as 32 bit coeffs then make filter pluck the MS 16-bits of the coeffs when filtering? However this would lower potential optimisation of filter, as I think the dual-MAC architecture requires packed 16 bit coeffs. 2/ Block based update would be more efficient, as per comments above, could use dual MAC architecture. 3/ Look for same sample Blackfin LMS code, see if we can get dual-MAC packing. 4/ Execute the whole e/c in a block of say 20ms rather than sample by sample. Processing a few samples every ms is inefficient. */ #else static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift) { int i; int offset1; int offset2; int factor; int exp; if (shift > 0) factor = clean << shift; else factor = clean >> -shift; /* Update the FIR taps */ offset2 = ec->curr_pos; offset1 = ec->taps - offset2; for (i = ec->taps - 1; i >= offset1; i--) { exp = (ec->fir_state_bg.history[i - offset1] * factor); ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); } for (; i >= 0; i--) { exp = (ec->fir_state_bg.history[i + offset2] * factor); ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); } } #endif static inline int top_bit(unsigned int bits) { if (bits == 0) return -1; else return (int)fls((int32_t) bits) - 1; } struct oslec_state *oslec_create(int len, int adaption_mode) { struct oslec_state *ec; int i; const int16_t *history; ec = kzalloc(sizeof(*ec), GFP_KERNEL); if (!ec) return NULL; ec->taps = len; ec->log2taps = top_bit(len); ec->curr_pos = ec->taps - 1; ec->fir_taps16[0] = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); if (!ec->fir_taps16[0]) goto error_oom_0; ec->fir_taps16[1] = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); if (!ec->fir_taps16[1]) goto error_oom_1; history = fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps); if (!history) goto error_state; history = fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps); if (!history) goto error_state_bg; for (i = 0; i < 5; i++) ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0; ec->cng_level = 1000; oslec_adaption_mode(ec, adaption_mode); ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); if (!ec->snapshot) goto error_snap; ec->cond_met = 0; ec->Pstates = 0; ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0; ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0; ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; ec->Lbgn = ec->Lbgn_acc = 0; ec->Lbgn_upper = 200; ec->Lbgn_upper_acc = ec->Lbgn_upper << 13; return ec; error_snap: fir16_free(&ec->fir_state_bg); error_state_bg: fir16_free(&ec->fir_state); error_state: kfree(ec->fir_taps16[1]); error_oom_1: kfree(ec->fir_taps16[0]); error_oom_0: kfree(ec); return NULL; } EXPORT_SYMBOL_GPL(oslec_create); void oslec_free(struct oslec_state *ec) { int i; fir16_free(&ec->fir_state); fir16_free(&ec->fir_state_bg); for (i = 0; i < 2; i++) kfree(ec->fir_taps16[i]); kfree(ec->snapshot); kfree(ec); } EXPORT_SYMBOL_GPL(oslec_free); void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode) { ec->adaption_mode = adaption_mode; } EXPORT_SYMBOL_GPL(oslec_adaption_mode); void oslec_flush(struct oslec_state *ec) { int i; ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0; ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0; ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; ec->Lbgn = ec->Lbgn_acc = 0; ec->Lbgn_upper = 200; ec->Lbgn_upper_acc = ec->Lbgn_upper << 13; ec->nonupdate_dwell = 0; fir16_flush(&ec->fir_state); fir16_flush(&ec->fir_state_bg); ec->fir_state.curr_pos = ec->taps - 1; ec->fir_state_bg.curr_pos = ec->taps - 1; for (i = 0; i < 2; i++) memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t)); ec->curr_pos = ec->taps - 1; ec->Pstates = 0; } EXPORT_SYMBOL_GPL(oslec_flush); void oslec_snapshot(struct oslec_state *ec) { memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t)); } EXPORT_SYMBOL_GPL(oslec_snapshot); /* Dual Path Echo Canceller */ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx) { int32_t echo_value; int clean_bg; int tmp; int tmp1; /* * Input scaling was found be required to prevent problems when tx * starts clipping. Another possible way to handle this would be the * filter coefficent scaling. */ ec->tx = tx; ec->rx = rx; tx >>= 1; rx >>= 1; /* * Filter DC, 3dB point is 160Hz (I think), note 32 bit precision * required otherwise values do not track down to 0. Zero at DC, Pole * at (1-Beta) on real axis. Some chip sets (like Si labs) don't * need this, but something like a $10 X100P card does. Any DC really * slows down convergence. * * Note: removes some low frequency from the signal, this reduces the * speech quality when listening to samples through headphones but may * not be obvious through a telephone handset. * * Note that the 3dB frequency in radians is approx Beta, e.g. for Beta * = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz. */ if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) { tmp = rx << 15; /* * Make sure the gain of the HPF is 1.0. This can still * saturate a little under impulse conditions, and it might * roll to 32768 and need clipping on sustained peak level * signals. However, the scale of such clipping is small, and * the error due to any saturation should not markedly affect * the downstream processing. */ tmp -= (tmp >> 4); ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2; /* * hard limit filter to prevent clipping. Note that at this * stage rx should be limited to +/- 16383 due to right shift * above */ tmp1 = ec->rx_1 >> 15; if (tmp1 > 16383) tmp1 = 16383; if (tmp1 < -16383) tmp1 = -16383; rx = tmp1; ec->rx_2 = tmp; } /* Block average of power in the filter states. Used for adaption power calculation. */ { int new, old; /* efficient "out with the old and in with the new" algorithm so we don't have to recalculate over the whole block of samples. */ new = (int)tx * (int)tx; old = (int)ec->fir_state.history[ec->fir_state.curr_pos] * (int)ec->fir_state.history[ec->fir_state.curr_pos]; ec->Pstates += ((new - old) + (1 << (ec->log2taps - 1))) >> ec->log2taps; if (ec->Pstates < 0) ec->Pstates = 0; } /* Calculate short term average levels using simple single pole IIRs */ ec->Ltxacc += abs(tx) - ec->Ltx; ec->Ltx = (ec->Ltxacc + (1 << 4)) >> 5; ec->Lrxacc += abs(rx) - ec->Lrx; ec->Lrx = (ec->Lrxacc + (1 << 4)) >> 5; /* Foreground filter */ ec->fir_state.coeffs = ec->fir_taps16[0]; echo_value = fir16(&ec->fir_state, tx); ec->clean = rx - echo_value; ec->Lcleanacc += abs(ec->clean) - ec->Lclean; ec->Lclean = (ec->Lcleanacc + (1 << 4)) >> 5; /* Background filter */ echo_value = fir16(&ec->fir_state_bg, tx); clean_bg = rx - echo_value; ec->Lclean_bgacc += abs(clean_bg) - ec->Lclean_bg; ec->Lclean_bg = (ec->Lclean_bgacc + (1 << 4)) >> 5; /* Background Filter adaption */ /* Almost always adap bg filter, just simple DT and energy detection to minimise adaption in cases of strong double talk. However this is not critical for the dual path algorithm. */ ec->factor = 0; ec->shift = 0; if ((ec->nonupdate_dwell == 0)) { int P, logP, shift; /* Determine: f = Beta * clean_bg_rx/P ------ (1) where P is the total power in the filter states. The Boffins have shown that if we obey (1) we converge quickly and avoid instability. The correct factor f must be in Q30, as this is the fixed point format required by the lms_adapt_bg() function, therefore the scaled version of (1) is: (2^30) * f = (2^30) * Beta * clean_bg_rx/P factor = (2^30) * Beta * clean_bg_rx/P ----- (2) We have chosen Beta = 0.25 by experiment, so: factor = (2^30) * (2^-2) * clean_bg_rx/P (30 - 2 - log2(P)) factor = clean_bg_rx 2 ----- (3) To avoid a divide we approximate log2(P) as top_bit(P), which returns the position of the highest non-zero bit in P. This approximation introduces an error as large as a factor of 2, but the algorithm seems to handle it OK. Come to think of it a divide may not be a big deal on a modern DSP, so its probably worth checking out the cycles for a divide versus a top_bit() implementation. */ P = MIN_TX_POWER_FOR_ADAPTION + ec->Pstates; logP = top_bit(P) + ec->log2taps; shift = 30 - 2 - logP; ec->shift = shift; lms_adapt_bg(ec, clean_bg, shift); } /* very simple DTD to make sure we dont try and adapt with strong near end speech */ ec->adapt = 0; if ((ec->Lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->Lrx > ec->Ltx)) ec->nonupdate_dwell = DTD_HANGOVER; if (ec->nonupdate_dwell) ec->nonupdate_dwell--; /* Transfer logic */ /* These conditions are from the dual path paper [1], I messed with them a bit to improve performance. */ if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) && (ec->nonupdate_dwell == 0) && /* (ec->Lclean_bg < 0.875*ec->Lclean) */ (8 * ec->Lclean_bg < 7 * ec->Lclean) && /* (ec->Lclean_bg < 0.125*ec->Ltx) */ (8 * ec->Lclean_bg < ec->Ltx)) { if (ec->cond_met == 6) { /* * BG filter has had better results for 6 consecutive * samples */ ec->adapt = 1; memcpy(ec->fir_taps16[0], ec->fir_taps16[1], ec->taps * sizeof(int16_t)); } else ec->cond_met++; } else ec->cond_met = 0; /* Non-Linear Processing */ ec->clean_nlp = ec->clean; if (ec->adaption_mode & ECHO_CAN_USE_NLP) { /* * Non-linear processor - a fancy way to say "zap small * signals, to avoid residual echo due to (uLaw/ALaw) * non-linearity in the channel.". */ if ((16 * ec->Lclean < ec->Ltx)) { /* * Our e/c has improved echo by at least 24 dB (each * factor of 2 is 6dB, so 2*2*2*2=16 is the same as * 6+6+6+6=24dB) */ if (ec->adaption_mode & ECHO_CAN_USE_CNG) { ec->cng_level = ec->Lbgn; /* * Very elementary comfort noise generation. * Just random numbers rolled off very vaguely * Hoth-like. DR: This noise doesn't sound * quite right to me - I suspect there are some * overflow issues in the filtering as it's too * "crackly". * TODO: debug this, maybe just play noise at * high level or look at spectrum. */ ec->cng_rndnum = 1664525U * ec->cng_rndnum + 1013904223U; ec->cng_filter = ((ec->cng_rndnum & 0xFFFF) - 32768 + 5 * ec->cng_filter) >> 3; ec->clean_nlp = (ec->cng_filter * ec->cng_level * 8) >> 14; } else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) { /* This sounds much better than CNG */ if (ec->clean_nlp > ec->Lbgn) ec->clean_nlp = ec->Lbgn; if (ec->clean_nlp < -ec->Lbgn) ec->clean_nlp = -ec->Lbgn; } else { /* * just mute the residual, doesn't sound very * good, used mainly in G168 tests */ ec->clean_nlp = 0; } } else { /* * Background noise estimator. I tried a few * algorithms here without much luck. This very simple * one seems to work best, we just average the level * using a slow (1 sec time const) filter if the * current level is less than a (experimentally * derived) constant. This means we dont include high * level signals like near end speech. When combined * with CNG or especially CLIP seems to work OK. */ if (ec->Lclean < 40) { ec->Lbgn_acc += abs(ec->clean) - ec->Lbgn; ec->Lbgn = (ec->Lbgn_acc + (1 << 11)) >> 12; } } } /* Roll around the taps buffer */ if (ec->curr_pos <= 0) ec->curr_pos = ec->taps; ec->curr_pos--; if (ec->adaption_mode & ECHO_CAN_DISABLE) ec->clean_nlp = rx; /* Output scaled back up again to match input scaling */ return (int16_t) ec->clean_nlp << 1; } EXPORT_SYMBOL_GPL(oslec_update); /* This function is separated from the echo canceller is it is usually called as part of the tx process. See rx HP (DC blocking) filter above, it's the same design. Some soft phones send speech signals with a lot of low frequency energy, e.g. down to 20Hz. This can make the hybrid non-linear which causes the echo canceller to fall over. This filter can help by removing any low frequency before it gets to the tx port of the hybrid. It can also help by removing and DC in the tx signal. DC is bad for LMS algorithms. This is one of the classic DC removal filters, adjusted to provide sufficient bass rolloff to meet the above requirement to protect hybrids from things that upset them. The difference between successive samples produces a lousy HPF, and then a suitably placed pole flattens things out. The final result is a nicely rolled off bass end. The filtering is implemented with extended fractional precision, which noise shapes things, giving very clean DC removal. */ int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx) { int tmp; int tmp1; if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) { tmp = tx << 15; /* * Make sure the gain of the HPF is 1.0. The first can still * saturate a little under impulse conditions, and it might * roll to 32768 and need clipping on sustained peak level * signals. However, the scale of such clipping is small, and * the error due to any saturation should not markedly affect * the downstream processing. */ tmp -= (tmp >> 4); ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2; tmp1 = ec->tx_1 >> 15; if (tmp1 > 32767) tmp1 = 32767; if (tmp1 < -32767) tmp1 = -32767; tx = tmp1; ec->tx_2 = tmp; } return tx; } EXPORT_SYMBOL_GPL(oslec_hpf_tx); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Rowe"); MODULE_DESCRIPTION("Open Source Line Echo Canceller"); MODULE_VERSION("0.3.0");
gpl-2.0
yuzaipiaofei/android_kernel_cyanogen_msm8916
drivers/acpi/button.c
2149
12387
/* * button.c - ACPI Button Driver * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/input.h> #include <linux/slab.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <acpi/button.h> #define PREFIX "ACPI: " #define ACPI_BUTTON_CLASS "button" #define ACPI_BUTTON_FILE_INFO "info" #define ACPI_BUTTON_FILE_STATE "state" #define ACPI_BUTTON_TYPE_UNKNOWN 0x00 #define ACPI_BUTTON_NOTIFY_STATUS 0x80 #define ACPI_BUTTON_SUBCLASS_POWER "power" #define ACPI_BUTTON_HID_POWER "PNP0C0C" #define ACPI_BUTTON_DEVICE_NAME_POWER "Power Button" #define ACPI_BUTTON_TYPE_POWER 0x01 #define ACPI_BUTTON_SUBCLASS_SLEEP "sleep" #define ACPI_BUTTON_HID_SLEEP "PNP0C0E" #define ACPI_BUTTON_DEVICE_NAME_SLEEP "Sleep Button" #define ACPI_BUTTON_TYPE_SLEEP 0x03 #define ACPI_BUTTON_SUBCLASS_LID "lid" #define ACPI_BUTTON_HID_LID "PNP0C0D" #define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch" #define ACPI_BUTTON_TYPE_LID 0x05 #define _COMPONENT ACPI_BUTTON_COMPONENT ACPI_MODULE_NAME("button"); MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI Button Driver"); MODULE_LICENSE("GPL"); static const struct acpi_device_id button_device_ids[] = { {ACPI_BUTTON_HID_LID, 0}, {ACPI_BUTTON_HID_SLEEP, 0}, {ACPI_BUTTON_HID_SLEEPF, 0}, {ACPI_BUTTON_HID_POWER, 0}, {ACPI_BUTTON_HID_POWERF, 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, button_device_ids); static int acpi_button_add(struct acpi_device *device); static int acpi_button_remove(struct acpi_device *device); static void acpi_button_notify(struct acpi_device *device, u32 event); #ifdef CONFIG_PM_SLEEP static int acpi_button_resume(struct device *dev); #endif static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume); static struct acpi_driver acpi_button_driver = { .name = "button", .class = ACPI_BUTTON_CLASS, .ids = button_device_ids, .ops = { .add = acpi_button_add, .remove = acpi_button_remove, .notify = acpi_button_notify, }, .drv.pm = &acpi_button_pm, }; struct acpi_button { unsigned int type; struct input_dev *input; char phys[32]; /* for input device */ unsigned long pushed; bool wakeup_enabled; }; static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); static struct acpi_device *lid_device; /* -------------------------------------------------------------------------- FS Interface (/proc) -------------------------------------------------------------------------- */ static struct proc_dir_entry *acpi_button_dir; static struct proc_dir_entry *acpi_lid_dir; static int acpi_button_state_seq_show(struct seq_file *seq, void *offset) { struct acpi_device *device = seq->private; acpi_status status; unsigned long long state; status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); seq_printf(seq, "state: %s\n", ACPI_FAILURE(status) ? "unsupported" : (state ? "open" : "closed")); return 0; } static int acpi_button_state_open_fs(struct inode *inode, struct file *file) { return single_open(file, acpi_button_state_seq_show, PDE_DATA(inode)); } static const struct file_operations acpi_button_state_fops = { .owner = THIS_MODULE, .open = acpi_button_state_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int acpi_button_add_fs(struct acpi_device *device) { struct acpi_button *button = acpi_driver_data(device); struct proc_dir_entry *entry = NULL; int ret = 0; /* procfs I/F for ACPI lid device only */ if (button->type != ACPI_BUTTON_TYPE_LID) return 0; if (acpi_button_dir || acpi_lid_dir) { printk(KERN_ERR PREFIX "More than one Lid device found!\n"); return -EEXIST; } /* create /proc/acpi/button */ acpi_button_dir = proc_mkdir(ACPI_BUTTON_CLASS, acpi_root_dir); if (!acpi_button_dir) return -ENODEV; /* create /proc/acpi/button/lid */ acpi_lid_dir = proc_mkdir(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir); if (!acpi_lid_dir) { ret = -ENODEV; goto remove_button_dir; } /* create /proc/acpi/button/lid/LID/ */ acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), acpi_lid_dir); if (!acpi_device_dir(device)) { ret = -ENODEV; goto remove_lid_dir; } /* create /proc/acpi/button/lid/LID/state */ entry = proc_create_data(ACPI_BUTTON_FILE_STATE, S_IRUGO, acpi_device_dir(device), &acpi_button_state_fops, device); if (!entry) { ret = -ENODEV; goto remove_dev_dir; } done: return ret; remove_dev_dir: remove_proc_entry(acpi_device_bid(device), acpi_lid_dir); acpi_device_dir(device) = NULL; remove_lid_dir: remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir); remove_button_dir: remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir); goto done; } static int acpi_button_remove_fs(struct acpi_device *device) { struct acpi_button *button = acpi_driver_data(device); if (button->type != ACPI_BUTTON_TYPE_LID) return 0; remove_proc_entry(ACPI_BUTTON_FILE_STATE, acpi_device_dir(device)); remove_proc_entry(acpi_device_bid(device), acpi_lid_dir); acpi_device_dir(device) = NULL; remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir); remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir); return 0; } /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ int acpi_lid_notifier_register(struct notifier_block *nb) { return blocking_notifier_chain_register(&acpi_lid_notifier, nb); } EXPORT_SYMBOL(acpi_lid_notifier_register); int acpi_lid_notifier_unregister(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb); } EXPORT_SYMBOL(acpi_lid_notifier_unregister); int acpi_lid_open(void) { acpi_status status; unsigned long long state; if (!lid_device) return -ENODEV; status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL, &state); if (ACPI_FAILURE(status)) return -ENODEV; return !!state; } EXPORT_SYMBOL(acpi_lid_open); static int acpi_lid_send_state(struct acpi_device *device) { struct acpi_button *button = acpi_driver_data(device); unsigned long long state; acpi_status status; int ret; status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); if (ACPI_FAILURE(status)) return -ENODEV; /* input layer checks if event is redundant */ input_report_switch(button->input, SW_LID, !state); input_sync(button->input); if (state) pm_wakeup_event(&device->dev, 0); ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); if (ret == NOTIFY_DONE) ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); if (ret == NOTIFY_DONE || ret == NOTIFY_OK) { /* * It is also regarded as success if the notifier_chain * returns NOTIFY_OK or NOTIFY_DONE. */ ret = 0; } return ret; } static void acpi_button_notify(struct acpi_device *device, u32 event) { struct acpi_button *button = acpi_driver_data(device); struct input_dev *input; switch (event) { case ACPI_FIXED_HARDWARE_EVENT: event = ACPI_BUTTON_NOTIFY_STATUS; /* fall through */ case ACPI_BUTTON_NOTIFY_STATUS: input = button->input; if (button->type == ACPI_BUTTON_TYPE_LID) { acpi_lid_send_state(device); } else { int keycode = test_bit(KEY_SLEEP, input->keybit) ? KEY_SLEEP : KEY_POWER; input_report_key(input, keycode, 1); input_sync(input); input_report_key(input, keycode, 0); input_sync(input); pm_wakeup_event(&device->dev, 0); } acpi_bus_generate_proc_event(device, event, ++button->pushed); break; default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported event [0x%x]\n", event)); break; } } #ifdef CONFIG_PM_SLEEP static int acpi_button_resume(struct device *dev) { struct acpi_device *device = to_acpi_device(dev); struct acpi_button *button = acpi_driver_data(device); if (button->type == ACPI_BUTTON_TYPE_LID) return acpi_lid_send_state(device); return 0; } #endif static int acpi_button_add(struct acpi_device *device) { struct acpi_button *button; struct input_dev *input; const char *hid = acpi_device_hid(device); char *name, *class; int error; button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL); if (!button) return -ENOMEM; device->driver_data = button; button->input = input = input_allocate_device(); if (!input) { error = -ENOMEM; goto err_free_button; } name = acpi_device_name(device); class = acpi_device_class(device); if (!strcmp(hid, ACPI_BUTTON_HID_POWER) || !strcmp(hid, ACPI_BUTTON_HID_POWERF)) { button->type = ACPI_BUTTON_TYPE_POWER; strcpy(name, ACPI_BUTTON_DEVICE_NAME_POWER); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER); } else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEP) || !strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) { button->type = ACPI_BUTTON_TYPE_SLEEP; strcpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP); } else if (!strcmp(hid, ACPI_BUTTON_HID_LID)) { button->type = ACPI_BUTTON_TYPE_LID; strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID); } else { printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid); error = -ENODEV; goto err_free_input; } error = acpi_button_add_fs(device); if (error) goto err_free_input; snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid); input->name = name; input->phys = button->phys; input->id.bustype = BUS_HOST; input->id.product = button->type; input->dev.parent = &device->dev; switch (button->type) { case ACPI_BUTTON_TYPE_POWER: input->evbit[0] = BIT_MASK(EV_KEY); set_bit(KEY_POWER, input->keybit); break; case ACPI_BUTTON_TYPE_SLEEP: input->evbit[0] = BIT_MASK(EV_KEY); set_bit(KEY_SLEEP, input->keybit); break; case ACPI_BUTTON_TYPE_LID: input->evbit[0] = BIT_MASK(EV_SW); set_bit(SW_LID, input->swbit); break; } error = input_register_device(input); if (error) goto err_remove_fs; if (button->type == ACPI_BUTTON_TYPE_LID) { acpi_lid_send_state(device); /* * This assumes there's only one lid device, or if there are * more we only care about the last one... */ lid_device = device; } if (device->wakeup.flags.valid) { /* Button's GPE is run-wake GPE */ acpi_enable_gpe(device->wakeup.gpe_device, device->wakeup.gpe_number); if (!device_may_wakeup(&device->dev)) { device_set_wakeup_enable(&device->dev, true); button->wakeup_enabled = true; } } printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); return 0; err_remove_fs: acpi_button_remove_fs(device); err_free_input: input_free_device(input); err_free_button: kfree(button); return error; } static int acpi_button_remove(struct acpi_device *device) { struct acpi_button *button = acpi_driver_data(device); if (device->wakeup.flags.valid) { acpi_disable_gpe(device->wakeup.gpe_device, device->wakeup.gpe_number); if (button->wakeup_enabled) device_set_wakeup_enable(&device->dev, false); } acpi_button_remove_fs(device); input_unregister_device(button->input); kfree(button); return 0; } module_acpi_driver(acpi_button_driver);
gpl-2.0
CyanideL/android_kernel_asus_grouper
fs/debugfs/inode.c
2405
15570
/* * file.c - part of debugfs, a tiny little debug file system * * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2004 IBM Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * debugfs is for people to use instead of /proc or /sys. * See Documentation/DocBook/kernel-api for more details. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/kobject.h> #include <linux/namei.h> #include <linux/debugfs.h> #include <linux/fsnotify.h> #include <linux/string.h> #include <linux/magic.h> #include <linux/slab.h> static struct vfsmount *debugfs_mount; static int debugfs_mount_count; static bool debugfs_registered; static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev, void *data, const struct file_operations *fops) { struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_fop = fops ? fops : &debugfs_file_operations; inode->i_private = data; break; case S_IFLNK: inode->i_op = &debugfs_link_operations; inode->i_fop = fops; inode->i_private = data; break; case S_IFDIR: inode->i_op = &simple_dir_inode_operations; inode->i_fop = fops ? fops : &simple_dir_operations; inode->i_private = data; /* directory inodes start off with i_nlink == 2 * (for "." entry) */ inc_nlink(inode); break; } } return inode; } /* SMP-safe */ static int debugfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev, void *data, const struct file_operations *fops) { struct inode *inode; int error = -EPERM; if (dentry->d_inode) return -EEXIST; inode = debugfs_get_inode(dir->i_sb, mode, dev, data, fops); if (inode) { d_instantiate(dentry, inode); dget(dentry); error = 0; } return error; } static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode, void *data, const struct file_operations *fops) { int res; mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; res = debugfs_mknod(dir, dentry, mode, 0, data, fops); if (!res) { inc_nlink(dir); fsnotify_mkdir(dir, dentry); } return res; } static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode, void *data, const struct file_operations *fops) { mode = (mode & S_IALLUGO) | S_IFLNK; return debugfs_mknod(dir, dentry, mode, 0, data, fops); } static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode, void *data, const struct file_operations *fops) { int res; mode = (mode & S_IALLUGO) | S_IFREG; res = debugfs_mknod(dir, dentry, mode, 0, data, fops); if (!res) fsnotify_create(dir, dentry); return res; } static inline int debugfs_positive(struct dentry *dentry) { return dentry->d_inode && !d_unhashed(dentry); } static int debug_fill_super(struct super_block *sb, void *data, int silent) { static struct tree_descr debug_files[] = {{""}}; return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files); } static struct dentry *debug_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_single(fs_type, flags, data, debug_fill_super); } static struct file_system_type debug_fs_type = { .owner = THIS_MODULE, .name = "debugfs", .mount = debug_mount, .kill_sb = kill_litter_super, }; static int debugfs_create_by_name(const char *name, mode_t mode, struct dentry *parent, struct dentry **dentry, void *data, const struct file_operations *fops) { int error = 0; /* If the parent is not specified, we create it in the root. * We need the root dentry to do this, which is in the super * block. A pointer to that is in the struct vfsmount that we * have around. */ if (!parent) parent = debugfs_mount->mnt_sb->s_root; *dentry = NULL; mutex_lock(&parent->d_inode->i_mutex); *dentry = lookup_one_len(name, parent, strlen(name)); if (!IS_ERR(*dentry)) { switch (mode & S_IFMT) { case S_IFDIR: error = debugfs_mkdir(parent->d_inode, *dentry, mode, data, fops); break; case S_IFLNK: error = debugfs_link(parent->d_inode, *dentry, mode, data, fops); break; default: error = debugfs_create(parent->d_inode, *dentry, mode, data, fops); break; } dput(*dentry); } else error = PTR_ERR(*dentry); mutex_unlock(&parent->d_inode->i_mutex); return error; } /** * debugfs_create_file - create a file in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this paramater is NULL, then the * file will be created in the root of the debugfs filesystem. * @data: a pointer to something that the caller will want to get to later * on. The inode.i_private pointer will point to this value on * the open() call. * @fops: a pointer to a struct file_operations that should be used for * this file. * * This is the basic "create a file" function for debugfs. It allows for a * wide range of flexibility in creating a file, or a directory (if you want * to create a directory, the debugfs_create_dir() function is * recommended to be used instead.) * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here.) If an error occurs, %NULL will be returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_create_file(const char *name, mode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct dentry *dentry = NULL; int error; pr_debug("debugfs: creating file '%s'\n",name); error = simple_pin_fs(&debug_fs_type, &debugfs_mount, &debugfs_mount_count); if (error) goto exit; error = debugfs_create_by_name(name, mode, parent, &dentry, data, fops); if (error) { dentry = NULL; simple_release_fs(&debugfs_mount, &debugfs_mount_count); goto exit; } exit: return dentry; } EXPORT_SYMBOL_GPL(debugfs_create_file); /** * debugfs_create_dir - create a directory in the debugfs filesystem * @name: a pointer to a string containing the name of the directory to * create. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this paramater is NULL, then the * directory will be created in the root of the debugfs filesystem. * * This function creates a directory in debugfs with the given name. * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here.) If an error occurs, %NULL will be returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) { return debugfs_create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, parent, NULL, NULL); } EXPORT_SYMBOL_GPL(debugfs_create_dir); /** * debugfs_create_symlink- create a symbolic link in the debugfs filesystem * @name: a pointer to a string containing the name of the symbolic link to * create. * @parent: a pointer to the parent dentry for this symbolic link. This * should be a directory dentry if set. If this paramater is NULL, * then the symbolic link will be created in the root of the debugfs * filesystem. * @target: a pointer to a string containing the path to the target of the * symbolic link. * * This function creates a symbolic link with the given name in debugfs that * links to the given target path. * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the symbolic * link is to be removed (no automatic cleanup happens if your module is * unloaded, you are responsible here.) If an error occurs, %NULL will be * returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *target) { struct dentry *result; char *link; link = kstrdup(target, GFP_KERNEL); if (!link) return NULL; result = debugfs_create_file(name, S_IFLNK | S_IRWXUGO, parent, link, NULL); if (!result) kfree(link); return result; } EXPORT_SYMBOL_GPL(debugfs_create_symlink); static int __debugfs_remove(struct dentry *dentry, struct dentry *parent) { int ret = 0; if (debugfs_positive(dentry)) { if (dentry->d_inode) { dget(dentry); switch (dentry->d_inode->i_mode & S_IFMT) { case S_IFDIR: ret = simple_rmdir(parent->d_inode, dentry); break; case S_IFLNK: kfree(dentry->d_inode->i_private); /* fall through */ default: simple_unlink(parent->d_inode, dentry); break; } if (!ret) d_delete(dentry); dput(dentry); } } return ret; } /** * debugfs_remove - removes a file or directory from the debugfs filesystem * @dentry: a pointer to a the dentry of the file or directory to be * removed. * * This function removes a file or directory in debugfs that was previously * created with a call to another debugfs function (like * debugfs_create_file() or variants thereof.) * * This function is required to be called in order for the file to be * removed, no automatic cleanup of files will happen when a module is * removed, you are responsible here. */ void debugfs_remove(struct dentry *dentry) { struct dentry *parent; int ret; if (!dentry) return; parent = dentry->d_parent; if (!parent || !parent->d_inode) return; mutex_lock(&parent->d_inode->i_mutex); ret = __debugfs_remove(dentry, parent); mutex_unlock(&parent->d_inode->i_mutex); if (!ret) simple_release_fs(&debugfs_mount, &debugfs_mount_count); } EXPORT_SYMBOL_GPL(debugfs_remove); /** * debugfs_remove_recursive - recursively removes a directory * @dentry: a pointer to a the dentry of the directory to be removed. * * This function recursively removes a directory tree in debugfs that * was previously created with a call to another debugfs function * (like debugfs_create_file() or variants thereof.) * * This function is required to be called in order for the file to be * removed, no automatic cleanup of files will happen when a module is * removed, you are responsible here. */ void debugfs_remove_recursive(struct dentry *dentry) { struct dentry *child; struct dentry *parent; if (!dentry) return; parent = dentry->d_parent; if (!parent || !parent->d_inode) return; parent = dentry; mutex_lock(&parent->d_inode->i_mutex); while (1) { /* * When all dentries under "parent" has been removed, * walk up the tree until we reach our starting point. */ if (list_empty(&parent->d_subdirs)) { mutex_unlock(&parent->d_inode->i_mutex); if (parent == dentry) break; parent = parent->d_parent; mutex_lock(&parent->d_inode->i_mutex); } child = list_entry(parent->d_subdirs.next, struct dentry, d_u.d_child); next_sibling: /* * If "child" isn't empty, walk down the tree and * remove all its descendants first. */ if (!list_empty(&child->d_subdirs)) { mutex_unlock(&parent->d_inode->i_mutex); parent = child; mutex_lock(&parent->d_inode->i_mutex); continue; } __debugfs_remove(child, parent); if (parent->d_subdirs.next == &child->d_u.d_child) { /* * Try the next sibling. */ if (child->d_u.d_child.next != &parent->d_subdirs) { child = list_entry(child->d_u.d_child.next, struct dentry, d_u.d_child); goto next_sibling; } /* * Avoid infinite loop if we fail to remove * one dentry. */ mutex_unlock(&parent->d_inode->i_mutex); break; } simple_release_fs(&debugfs_mount, &debugfs_mount_count); } parent = dentry->d_parent; mutex_lock(&parent->d_inode->i_mutex); __debugfs_remove(dentry, parent); mutex_unlock(&parent->d_inode->i_mutex); simple_release_fs(&debugfs_mount, &debugfs_mount_count); } EXPORT_SYMBOL_GPL(debugfs_remove_recursive); /** * debugfs_rename - rename a file/directory in the debugfs filesystem * @old_dir: a pointer to the parent dentry for the renamed object. This * should be a directory dentry. * @old_dentry: dentry of an object to be renamed. * @new_dir: a pointer to the parent dentry where the object should be * moved. This should be a directory dentry. * @new_name: a pointer to a string containing the target name. * * This function renames a file/directory in debugfs. The target must not * exist for rename to succeed. * * This function will return a pointer to old_dentry (which is updated to * reflect renaming) if it succeeds. If an error occurs, %NULL will be * returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name) { int error; struct dentry *dentry = NULL, *trap; const char *old_name; trap = lock_rename(new_dir, old_dir); /* Source or destination directories don't exist? */ if (!old_dir->d_inode || !new_dir->d_inode) goto exit; /* Source does not exist, cyclic rename, or mountpoint? */ if (!old_dentry->d_inode || old_dentry == trap || d_mountpoint(old_dentry)) goto exit; dentry = lookup_one_len(new_name, new_dir, strlen(new_name)); /* Lookup failed, cyclic rename or target exists? */ if (IS_ERR(dentry) || dentry == trap || dentry->d_inode) goto exit; old_name = fsnotify_oldname_init(old_dentry->d_name.name); error = simple_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, dentry); if (error) { fsnotify_oldname_free(old_name); goto exit; } d_move(old_dentry, dentry); fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name, S_ISDIR(old_dentry->d_inode->i_mode), NULL, old_dentry); fsnotify_oldname_free(old_name); unlock_rename(new_dir, old_dir); dput(dentry); return old_dentry; exit: if (dentry && !IS_ERR(dentry)) dput(dentry); unlock_rename(new_dir, old_dir); return NULL; } EXPORT_SYMBOL_GPL(debugfs_rename); /** * debugfs_initialized - Tells whether debugfs has been registered */ bool debugfs_initialized(void) { return debugfs_registered; } EXPORT_SYMBOL_GPL(debugfs_initialized); static struct kobject *debug_kobj; static int __init debugfs_init(void) { int retval; debug_kobj = kobject_create_and_add("debug", kernel_kobj); if (!debug_kobj) return -EINVAL; retval = register_filesystem(&debug_fs_type); if (retval) kobject_put(debug_kobj); else debugfs_registered = true; return retval; } core_initcall(debugfs_init);
gpl-2.0
libcg/raw_kernel_tw
arch/powerpc/lib/checksum_wrappers_64.c
2917
2380
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2010 * * Author: Anton Blanchard <anton@au.ibm.com> */ #include <linux/module.h> #include <linux/compiler.h> #include <linux/types.h> #include <asm/checksum.h> #include <asm/uaccess.h> __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { unsigned int csum; might_sleep(); *err_ptr = 0; if (!len) { csum = 0; goto out; } if (unlikely((len < 0) || !access_ok(VERIFY_READ, src, len))) { *err_ptr = -EFAULT; csum = (__force unsigned int)sum; goto out; } csum = csum_partial_copy_generic((void __force *)src, dst, len, sum, err_ptr, NULL); if (unlikely(*err_ptr)) { int missing = __copy_from_user(dst, src, len); if (missing) { memset(dst + len - missing, 0, missing); *err_ptr = -EFAULT; } else { *err_ptr = 0; } csum = csum_partial(dst, len, sum); } out: return (__force __wsum)csum; } EXPORT_SYMBOL(csum_and_copy_from_user); __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr) { unsigned int csum; might_sleep(); *err_ptr = 0; if (!len) { csum = 0; goto out; } if (unlikely((len < 0) || !access_ok(VERIFY_WRITE, dst, len))) { *err_ptr = -EFAULT; csum = -1; /* invalid checksum */ goto out; } csum = csum_partial_copy_generic(src, (void __force *)dst, len, sum, NULL, err_ptr); if (unlikely(*err_ptr)) { csum = csum_partial(src, len, sum); if (copy_to_user(dst, src, len)) { *err_ptr = -EFAULT; csum = -1; /* invalid checksum */ } } out: return (__force __wsum)csum; } EXPORT_SYMBOL(csum_and_copy_to_user);
gpl-2.0
civato/V30B-SithLord
drivers/ata/pata_mpc52xx.c
2917
26737
/* * drivers/ata/pata_mpc52xx.c * * libata driver for the Freescale MPC52xx on-chip IDE interface * * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com> * Copyright (C) 2003 Mipsys - Benjamin Herrenschmidt * * UDMA support based on patches by Freescale (Bernard Kuhn, John Rigby), * Domen Puncer and Tim Yamin. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/delay.h> #include <linux/libata.h> #include <linux/of_platform.h> #include <linux/types.h> #include <asm/cacheflush.h> #include <asm/prom.h> #include <asm/mpc52xx.h> #include <sysdev/bestcomm/bestcomm.h> #include <sysdev/bestcomm/bestcomm_priv.h> #include <sysdev/bestcomm/ata.h> #define DRV_NAME "mpc52xx_ata" /* Private structures used by the driver */ struct mpc52xx_ata_timings { u32 pio1; u32 pio2; u32 mdma1; u32 mdma2; u32 udma1; u32 udma2; u32 udma3; u32 udma4; u32 udma5; int using_udma; }; struct mpc52xx_ata_priv { unsigned int ipb_period; struct mpc52xx_ata __iomem *ata_regs; phys_addr_t ata_regs_pa; int ata_irq; struct mpc52xx_ata_timings timings[2]; int csel; /* DMA */ struct bcom_task *dmatsk; const struct udmaspec *udmaspec; const struct mdmaspec *mdmaspec; int mpc52xx_ata_dma_last_write; int waiting_for_dma; }; /* ATAPI-4 PIO specs (in ns) */ static const u16 ataspec_t0[5] = {600, 383, 240, 180, 120}; static const u16 ataspec_t1[5] = { 70, 50, 30, 30, 25}; static const u16 ataspec_t2_8[5] = {290, 290, 290, 80, 70}; static const u16 ataspec_t2_16[5] = {165, 125, 100, 80, 70}; static const u16 ataspec_t2i[5] = { 0, 0, 0, 70, 25}; static const u16 ataspec_t4[5] = { 30, 20, 15, 10, 10}; static const u16 ataspec_ta[5] = { 35, 35, 35, 35, 35}; #define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c))) /* ======================================================================== */ /* ATAPI-4 MDMA specs (in clocks) */ struct mdmaspec { u8 t0M; u8 td; u8 th; u8 tj; u8 tkw; u8 tm; u8 tn; }; static const struct mdmaspec mdmaspec66[3] = { { .t0M = 32, .td = 15, .th = 2, .tj = 2, .tkw = 15, .tm = 4, .tn = 1 }, { .t0M = 10, .td = 6, .th = 1, .tj = 1, .tkw = 4, .tm = 2, .tn = 1 }, { .t0M = 8, .td = 5, .th = 1, .tj = 1, .tkw = 2, .tm = 2, .tn = 1 }, }; static const struct mdmaspec mdmaspec132[3] = { { .t0M = 64, .td = 29, .th = 3, .tj = 3, .tkw = 29, .tm = 7, .tn = 2 }, { .t0M = 20, .td = 11, .th = 2, .tj = 1, .tkw = 7, .tm = 4, .tn = 1 }, { .t0M = 16, .td = 10, .th = 2, .tj = 1, .tkw = 4, .tm = 4, .tn = 1 }, }; /* ATAPI-4 UDMA specs (in clocks) */ struct udmaspec { u8 tcyc; u8 t2cyc; u8 tds; u8 tdh; u8 tdvs; u8 tdvh; u8 tfs; u8 tli; u8 tmli; u8 taz; u8 tzah; u8 tenv; u8 tsr; u8 trfs; u8 trp; u8 tack; u8 tss; }; static const struct udmaspec udmaspec66[6] = { { .tcyc = 8, .t2cyc = 16, .tds = 1, .tdh = 1, .tdvs = 5, .tdvh = 1, .tfs = 16, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, .tsr = 3, .trfs = 5, .trp = 11, .tack = 2, .tss = 4, }, { .tcyc = 5, .t2cyc = 11, .tds = 1, .tdh = 1, .tdvs = 4, .tdvh = 1, .tfs = 14, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, .tsr = 2, .trfs = 5, .trp = 9, .tack = 2, .tss = 4, }, { .tcyc = 4, .t2cyc = 8, .tds = 1, .tdh = 1, .tdvs = 3, .tdvh = 1, .tfs = 12, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, .tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4, }, { .tcyc = 3, .t2cyc = 6, .tds = 1, .tdh = 1, .tdvs = 2, .tdvh = 1, .tfs = 9, .tli = 7, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, .tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4, }, { .tcyc = 2, .t2cyc = 4, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1, .tfs = 8, .tli = 8, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, .tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4, }, { .tcyc = 2, .t2cyc = 2, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1, .tfs = 6, .tli = 5, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, .tsr = 2, .trfs = 4, .trp = 6, .tack = 2, .tss = 4, }, }; static const struct udmaspec udmaspec132[6] = { { .tcyc = 15, .t2cyc = 31, .tds = 2, .tdh = 1, .tdvs = 10, .tdvh = 1, .tfs = 30, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3, .tsr = 7, .trfs = 10, .trp = 22, .tack = 3, .tss = 7, }, { .tcyc = 10, .t2cyc = 21, .tds = 2, .tdh = 1, .tdvs = 7, .tdvh = 1, .tfs = 27, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3, .tsr = 4, .trfs = 10, .trp = 17, .tack = 3, .tss = 7, }, { .tcyc = 6, .t2cyc = 12, .tds = 1, .tdh = 1, .tdvs = 5, .tdvh = 1, .tfs = 23, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3, .tsr = 3, .trfs = 8, .trp = 14, .tack = 3, .tss = 7, }, { .tcyc = 7, .t2cyc = 12, .tds = 1, .tdh = 1, .tdvs = 3, .tdvh = 1, .tfs = 15, .tli = 13, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3, .tsr = 3, .trfs = 8, .trp = 14, .tack = 3, .tss = 7, }, { .tcyc = 2, .t2cyc = 5, .tds = 0, .tdh = 0, .tdvs = 1, .tdvh = 1, .tfs = 16, .tli = 14, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, .tsr = 2, .trfs = 7, .trp = 13, .tack = 2, .tss = 6, }, { .tcyc = 3, .t2cyc = 6, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1, .tfs = 12, .tli = 10, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3, .tsr = 3, .trfs = 7, .trp = 12, .tack = 3, .tss = 7, }, }; /* ======================================================================== */ /* Bit definitions inside the registers */ #define MPC52xx_ATA_HOSTCONF_SMR 0x80000000UL /* State machine reset */ #define MPC52xx_ATA_HOSTCONF_FR 0x40000000UL /* FIFO Reset */ #define MPC52xx_ATA_HOSTCONF_IE 0x02000000UL /* Enable interrupt in PIO */ #define MPC52xx_ATA_HOSTCONF_IORDY 0x01000000UL /* Drive supports IORDY protocol */ #define MPC52xx_ATA_HOSTSTAT_TIP 0x80000000UL /* Transaction in progress */ #define MPC52xx_ATA_HOSTSTAT_UREP 0x40000000UL /* UDMA Read Extended Pause */ #define MPC52xx_ATA_HOSTSTAT_RERR 0x02000000UL /* Read Error */ #define MPC52xx_ATA_HOSTSTAT_WERR 0x01000000UL /* Write Error */ #define MPC52xx_ATA_FIFOSTAT_EMPTY 0x01 /* FIFO Empty */ #define MPC52xx_ATA_FIFOSTAT_ERROR 0x40 /* FIFO Error */ #define MPC52xx_ATA_DMAMODE_WRITE 0x01 /* Write DMA */ #define MPC52xx_ATA_DMAMODE_READ 0x02 /* Read DMA */ #define MPC52xx_ATA_DMAMODE_UDMA 0x04 /* UDMA enabled */ #define MPC52xx_ATA_DMAMODE_IE 0x08 /* Enable drive interrupt to CPU in DMA mode */ #define MPC52xx_ATA_DMAMODE_FE 0x10 /* FIFO Flush enable in Rx mode */ #define MPC52xx_ATA_DMAMODE_FR 0x20 /* FIFO Reset */ #define MPC52xx_ATA_DMAMODE_HUT 0x40 /* Host UDMA burst terminate */ #define MAX_DMA_BUFFERS 128 #define MAX_DMA_BUFFER_SIZE 0x20000u /* Structure of the hardware registers */ struct mpc52xx_ata { /* Host interface registers */ u32 config; /* ATA + 0x00 Host configuration */ u32 host_status; /* ATA + 0x04 Host controller status */ u32 pio1; /* ATA + 0x08 PIO Timing 1 */ u32 pio2; /* ATA + 0x0c PIO Timing 2 */ u32 mdma1; /* ATA + 0x10 MDMA Timing 1 */ u32 mdma2; /* ATA + 0x14 MDMA Timing 2 */ u32 udma1; /* ATA + 0x18 UDMA Timing 1 */ u32 udma2; /* ATA + 0x1c UDMA Timing 2 */ u32 udma3; /* ATA + 0x20 UDMA Timing 3 */ u32 udma4; /* ATA + 0x24 UDMA Timing 4 */ u32 udma5; /* ATA + 0x28 UDMA Timing 5 */ u32 share_cnt; /* ATA + 0x2c ATA share counter */ u32 reserved0[3]; /* FIFO registers */ u32 fifo_data; /* ATA + 0x3c */ u8 fifo_status_frame; /* ATA + 0x40 */ u8 fifo_status; /* ATA + 0x41 */ u16 reserved7[1]; u8 fifo_control; /* ATA + 0x44 */ u8 reserved8[5]; u16 fifo_alarm; /* ATA + 0x4a */ u16 reserved9; u16 fifo_rdp; /* ATA + 0x4e */ u16 reserved10; u16 fifo_wrp; /* ATA + 0x52 */ u16 reserved11; u16 fifo_lfrdp; /* ATA + 0x56 */ u16 reserved12; u16 fifo_lfwrp; /* ATA + 0x5a */ /* Drive TaskFile registers */ u8 tf_control; /* ATA + 0x5c TASKFILE Control/Alt Status */ u8 reserved13[3]; u16 tf_data; /* ATA + 0x60 TASKFILE Data */ u16 reserved14; u8 tf_features; /* ATA + 0x64 TASKFILE Features/Error */ u8 reserved15[3]; u8 tf_sec_count; /* ATA + 0x68 TASKFILE Sector Count */ u8 reserved16[3]; u8 tf_sec_num; /* ATA + 0x6c TASKFILE Sector Number */ u8 reserved17[3]; u8 tf_cyl_low; /* ATA + 0x70 TASKFILE Cylinder Low */ u8 reserved18[3]; u8 tf_cyl_high; /* ATA + 0x74 TASKFILE Cylinder High */ u8 reserved19[3]; u8 tf_dev_head; /* ATA + 0x78 TASKFILE Device/Head */ u8 reserved20[3]; u8 tf_command; /* ATA + 0x7c TASKFILE Command/Status */ u8 dma_mode; /* ATA + 0x7d ATA Host DMA Mode configuration */ u8 reserved21[2]; }; /* ======================================================================== */ /* Aux fns */ /* ======================================================================== */ /* MPC52xx low level hw control */ static int mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio) { struct mpc52xx_ata_timings *timing = &priv->timings[dev]; unsigned int ipb_period = priv->ipb_period; u32 t0, t1, t2_8, t2_16, t2i, t4, ta; if ((pio < 0) || (pio > 4)) return -EINVAL; t0 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t0[pio]); t1 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t1[pio]); t2_8 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_8[pio]); t2_16 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_16[pio]); t2i = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2i[pio]); t4 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t4[pio]); ta = CALC_CLKCYC(ipb_period, 1000 * ataspec_ta[pio]); timing->pio1 = (t0 << 24) | (t2_8 << 16) | (t2_16 << 8) | (t2i); timing->pio2 = (t4 << 24) | (t1 << 16) | (ta << 8); return 0; } static int mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev, int speed) { struct mpc52xx_ata_timings *t = &priv->timings[dev]; const struct mdmaspec *s = &priv->mdmaspec[speed]; if (speed < 0 || speed > 2) return -EINVAL; t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm; t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8); t->using_udma = 0; return 0; } static int mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev, int speed) { struct mpc52xx_ata_timings *t = &priv->timings[dev]; const struct udmaspec *s = &priv->udmaspec[speed]; if (speed < 0 || speed > 2) return -EINVAL; t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh; t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli; t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr; t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack; t->udma5 = (u32)s->tzah << 24; t->using_udma = 1; return 0; } static void mpc52xx_ata_apply_timings(struct mpc52xx_ata_priv *priv, int device) { struct mpc52xx_ata __iomem *regs = priv->ata_regs; struct mpc52xx_ata_timings *timing = &priv->timings[device]; out_be32(&regs->pio1, timing->pio1); out_be32(&regs->pio2, timing->pio2); out_be32(&regs->mdma1, timing->mdma1); out_be32(&regs->mdma2, timing->mdma2); out_be32(&regs->udma1, timing->udma1); out_be32(&regs->udma2, timing->udma2); out_be32(&regs->udma3, timing->udma3); out_be32(&regs->udma4, timing->udma4); out_be32(&regs->udma5, timing->udma5); priv->csel = device; } static int mpc52xx_ata_hw_init(struct mpc52xx_ata_priv *priv) { struct mpc52xx_ata __iomem *regs = priv->ata_regs; int tslot; /* Clear share_cnt (all sample code do this ...) */ out_be32(&regs->share_cnt, 0); /* Configure and reset host */ out_be32(&regs->config, MPC52xx_ATA_HOSTCONF_IE | MPC52xx_ATA_HOSTCONF_IORDY | MPC52xx_ATA_HOSTCONF_SMR | MPC52xx_ATA_HOSTCONF_FR); udelay(10); out_be32(&regs->config, MPC52xx_ATA_HOSTCONF_IE | MPC52xx_ATA_HOSTCONF_IORDY); /* Set the time slot to 1us */ tslot = CALC_CLKCYC(priv->ipb_period, 1000000); out_be32(&regs->share_cnt, tslot << 16); /* Init timings to PIO0 */ memset(priv->timings, 0x00, 2*sizeof(struct mpc52xx_ata_timings)); mpc52xx_ata_compute_pio_timings(priv, 0, 0); mpc52xx_ata_compute_pio_timings(priv, 1, 0); mpc52xx_ata_apply_timings(priv, 0); return 0; } /* ======================================================================== */ /* libata driver */ /* ======================================================================== */ static void mpc52xx_ata_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct mpc52xx_ata_priv *priv = ap->host->private_data; int pio, rv; pio = adev->pio_mode - XFER_PIO_0; rv = mpc52xx_ata_compute_pio_timings(priv, adev->devno, pio); if (rv) { dev_err(ap->dev, "error: invalid PIO mode: %d\n", pio); return; } mpc52xx_ata_apply_timings(priv, adev->devno); } static void mpc52xx_ata_set_dmamode(struct ata_port *ap, struct ata_device *adev) { struct mpc52xx_ata_priv *priv = ap->host->private_data; int rv; if (adev->dma_mode >= XFER_UDMA_0) { int dma = adev->dma_mode - XFER_UDMA_0; rv = mpc52xx_ata_compute_udma_timings(priv, adev->devno, dma); } else { int dma = adev->dma_mode - XFER_MW_DMA_0; rv = mpc52xx_ata_compute_mdma_timings(priv, adev->devno, dma); } if (rv) { dev_alert(ap->dev, "Trying to select invalid DMA mode %d\n", adev->dma_mode); return; } mpc52xx_ata_apply_timings(priv, adev->devno); } static void mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device) { struct mpc52xx_ata_priv *priv = ap->host->private_data; if (device != priv->csel) mpc52xx_ata_apply_timings(priv, device); ata_sff_dev_select(ap, device); } static int mpc52xx_ata_build_dmatable(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mpc52xx_ata_priv *priv = ap->host->private_data; struct bcom_ata_bd *bd; unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE), si; struct scatterlist *sg; int count = 0; if (read) bcom_ata_rx_prepare(priv->dmatsk); else bcom_ata_tx_prepare(priv->dmatsk); for_each_sg(qc->sg, sg, qc->n_elem, si) { dma_addr_t cur_addr = sg_dma_address(sg); u32 cur_len = sg_dma_len(sg); while (cur_len) { unsigned int tc = min(cur_len, MAX_DMA_BUFFER_SIZE); bd = (struct bcom_ata_bd *) bcom_prepare_next_buffer(priv->dmatsk); if (read) { bd->status = tc; bd->src_pa = (__force u32) priv->ata_regs_pa + offsetof(struct mpc52xx_ata, fifo_data); bd->dst_pa = (__force u32) cur_addr; } else { bd->status = tc; bd->src_pa = (__force u32) cur_addr; bd->dst_pa = (__force u32) priv->ata_regs_pa + offsetof(struct mpc52xx_ata, fifo_data); } bcom_submit_next_buffer(priv->dmatsk, NULL); cur_addr += tc; cur_len -= tc; count++; if (count > MAX_DMA_BUFFERS) { dev_alert(ap->dev, "dma table" "too small\n"); goto use_pio_instead; } } } return 1; use_pio_instead: bcom_ata_reset_bd(priv->dmatsk); return 0; } static void mpc52xx_bmdma_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mpc52xx_ata_priv *priv = ap->host->private_data; struct mpc52xx_ata __iomem *regs = priv->ata_regs; unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE); u8 dma_mode; if (!mpc52xx_ata_build_dmatable(qc)) dev_alert(ap->dev, "%s: %i, return 1?\n", __func__, __LINE__); /* Check FIFO is OK... */ if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR) dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n", __func__, in_8(&priv->ata_regs->fifo_status)); if (read) { dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_READ | MPC52xx_ATA_DMAMODE_FE; /* Setup FIFO if direction changed */ if (priv->mpc52xx_ata_dma_last_write != 0) { priv->mpc52xx_ata_dma_last_write = 0; /* Configure FIFO with granularity to 7 */ out_8(&regs->fifo_control, 7); out_be16(&regs->fifo_alarm, 128); /* Set FIFO Reset bit (FR) */ out_8(&regs->dma_mode, MPC52xx_ATA_DMAMODE_FR); } } else { dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_WRITE; /* Setup FIFO if direction changed */ if (priv->mpc52xx_ata_dma_last_write != 1) { priv->mpc52xx_ata_dma_last_write = 1; /* Configure FIFO with granularity to 4 */ out_8(&regs->fifo_control, 4); out_be16(&regs->fifo_alarm, 128); } } if (priv->timings[qc->dev->devno].using_udma) dma_mode |= MPC52xx_ATA_DMAMODE_UDMA; out_8(&regs->dma_mode, dma_mode); priv->waiting_for_dma = ATA_DMA_ACTIVE; ata_wait_idle(ap); ap->ops->sff_exec_command(ap, &qc->tf); } static void mpc52xx_bmdma_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mpc52xx_ata_priv *priv = ap->host->private_data; bcom_set_task_auto_start(priv->dmatsk->tasknum, priv->dmatsk->tasknum); bcom_enable(priv->dmatsk); } static void mpc52xx_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mpc52xx_ata_priv *priv = ap->host->private_data; bcom_disable(priv->dmatsk); bcom_ata_reset_bd(priv->dmatsk); priv->waiting_for_dma = 0; /* Check FIFO is OK... */ if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR) dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n", __func__, in_8(&priv->ata_regs->fifo_status)); } static u8 mpc52xx_bmdma_status(struct ata_port *ap) { struct mpc52xx_ata_priv *priv = ap->host->private_data; /* Check FIFO is OK... */ if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR) { dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n", __func__, in_8(&priv->ata_regs->fifo_status)); return priv->waiting_for_dma | ATA_DMA_ERR; } return priv->waiting_for_dma; } static irqreturn_t mpc52xx_ata_task_irq(int irq, void *vpriv) { struct mpc52xx_ata_priv *priv = vpriv; while (bcom_buffer_done(priv->dmatsk)) bcom_retrieve_buffer(priv->dmatsk, NULL, NULL); priv->waiting_for_dma |= ATA_DMA_INTR; return IRQ_HANDLED; } static struct scsi_host_template mpc52xx_ata_sht = { ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations mpc52xx_ata_port_ops = { .inherits = &ata_bmdma_port_ops, .sff_dev_select = mpc52xx_ata_dev_select, .set_piomode = mpc52xx_ata_set_piomode, .set_dmamode = mpc52xx_ata_set_dmamode, .bmdma_setup = mpc52xx_bmdma_setup, .bmdma_start = mpc52xx_bmdma_start, .bmdma_stop = mpc52xx_bmdma_stop, .bmdma_status = mpc52xx_bmdma_status, .qc_prep = ata_noop_qc_prep, }; static int __devinit mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv, unsigned long raw_ata_regs, int mwdma_mask, int udma_mask) { struct ata_host *host; struct ata_port *ap; struct ata_ioports *aio; host = ata_host_alloc(dev, 1); if (!host) return -ENOMEM; ap = host->ports[0]; ap->flags |= ATA_FLAG_SLAVE_POSS; ap->pio_mask = ATA_PIO4; ap->mwdma_mask = mwdma_mask; ap->udma_mask = udma_mask; ap->ops = &mpc52xx_ata_port_ops; host->private_data = priv; aio = &ap->ioaddr; aio->cmd_addr = NULL; /* Don't have a classic reg block */ aio->altstatus_addr = &priv->ata_regs->tf_control; aio->ctl_addr = &priv->ata_regs->tf_control; aio->data_addr = &priv->ata_regs->tf_data; aio->error_addr = &priv->ata_regs->tf_features; aio->feature_addr = &priv->ata_regs->tf_features; aio->nsect_addr = &priv->ata_regs->tf_sec_count; aio->lbal_addr = &priv->ata_regs->tf_sec_num; aio->lbam_addr = &priv->ata_regs->tf_cyl_low; aio->lbah_addr = &priv->ata_regs->tf_cyl_high; aio->device_addr = &priv->ata_regs->tf_dev_head; aio->status_addr = &priv->ata_regs->tf_command; aio->command_addr = &priv->ata_regs->tf_command; ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); /* activate host */ return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0, &mpc52xx_ata_sht); } static struct mpc52xx_ata_priv * mpc52xx_ata_remove_one(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); struct mpc52xx_ata_priv *priv = host->private_data; ata_host_detach(host); return priv; } /* ======================================================================== */ /* OF Platform driver */ /* ======================================================================== */ static int __devinit mpc52xx_ata_probe(struct platform_device *op) { unsigned int ipb_freq; struct resource res_mem; int ata_irq = 0; struct mpc52xx_ata __iomem *ata_regs; struct mpc52xx_ata_priv *priv = NULL; int rv, ret, task_irq = 0; int mwdma_mask = 0, udma_mask = 0; const __be32 *prop; int proplen; struct bcom_task *dmatsk = NULL; /* Get ipb frequency */ ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node); if (!ipb_freq) { dev_err(&op->dev, "could not determine IPB bus frequency\n"); return -ENODEV; } /* Get device base address from device tree, request the region * and ioremap it. */ rv = of_address_to_resource(op->dev.of_node, 0, &res_mem); if (rv) { dev_err(&op->dev, "could not determine device base address\n"); return rv; } if (!devm_request_mem_region(&op->dev, res_mem.start, sizeof(*ata_regs), DRV_NAME)) { dev_err(&op->dev, "error requesting register region\n"); return -EBUSY; } ata_regs = devm_ioremap(&op->dev, res_mem.start, sizeof(*ata_regs)); if (!ata_regs) { dev_err(&op->dev, "error mapping device registers\n"); rv = -ENOMEM; goto err; } /* * By default, all DMA modes are disabled for the MPC5200. Some * boards don't have the required signals routed to make DMA work. * Also, the MPC5200B has a silicon bug that causes data corruption * with UDMA if it is used at the same time as the LocalPlus bus. * * Instead of trying to guess what modes are usable, check the * ATA device tree node to find out what DMA modes work on the board. * UDMA/MWDMA modes can also be forced by adding "libata.force=<mode>" * to the kernel boot parameters. * * The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and * UDMA modes 0, 1 and 2. */ prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen); if ((prop) && (proplen >= 4)) mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1); prop = of_get_property(op->dev.of_node, "udma-mode", &proplen); if ((prop) && (proplen >= 4)) udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1); ata_irq = irq_of_parse_and_map(op->dev.of_node, 0); if (ata_irq == NO_IRQ) { dev_err(&op->dev, "error mapping irq\n"); return -EINVAL; } /* Prepare our private structure */ priv = devm_kzalloc(&op->dev, sizeof(*priv), GFP_ATOMIC); if (!priv) { dev_err(&op->dev, "error allocating private structure\n"); rv = -ENOMEM; goto err; } priv->ipb_period = 1000000000 / (ipb_freq / 1000); priv->ata_regs = ata_regs; priv->ata_regs_pa = res_mem.start; priv->ata_irq = ata_irq; priv->csel = -1; priv->mpc52xx_ata_dma_last_write = -1; if (ipb_freq/1000000 == 66) { priv->mdmaspec = mdmaspec66; priv->udmaspec = udmaspec66; } else { priv->mdmaspec = mdmaspec132; priv->udmaspec = udmaspec132; } /* Allocate a BestComm task for DMA */ dmatsk = bcom_ata_init(MAX_DMA_BUFFERS, MAX_DMA_BUFFER_SIZE); if (!dmatsk) { dev_err(&op->dev, "bestcomm initialization failed\n"); rv = -ENOMEM; goto err; } task_irq = bcom_get_task_irq(dmatsk); ret = request_irq(task_irq, &mpc52xx_ata_task_irq, IRQF_DISABLED, "ATA task", priv); if (ret) { dev_err(&op->dev, "error requesting DMA IRQ\n"); goto err; } priv->dmatsk = dmatsk; /* Init the hw */ rv = mpc52xx_ata_hw_init(priv); if (rv) { dev_err(&op->dev, "error initializing hardware\n"); goto err; } /* Register ourselves to libata */ rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start, mwdma_mask, udma_mask); if (rv) { dev_err(&op->dev, "error registering with ATA layer\n"); goto err; } return 0; err: devm_release_mem_region(&op->dev, res_mem.start, sizeof(*ata_regs)); if (ata_irq) irq_dispose_mapping(ata_irq); if (task_irq) irq_dispose_mapping(task_irq); if (dmatsk) bcom_ata_release(dmatsk); if (ata_regs) devm_iounmap(&op->dev, ata_regs); if (priv) devm_kfree(&op->dev, priv); return rv; } static int mpc52xx_ata_remove(struct platform_device *op) { struct mpc52xx_ata_priv *priv; int task_irq; /* Deregister the ATA interface */ priv = mpc52xx_ata_remove_one(&op->dev); /* Clean up DMA */ task_irq = bcom_get_task_irq(priv->dmatsk); irq_dispose_mapping(task_irq); bcom_ata_release(priv->dmatsk); irq_dispose_mapping(priv->ata_irq); /* Clear up IO allocations */ devm_iounmap(&op->dev, priv->ata_regs); devm_release_mem_region(&op->dev, priv->ata_regs_pa, sizeof(*priv->ata_regs)); devm_kfree(&op->dev, priv); return 0; } #ifdef CONFIG_PM static int mpc52xx_ata_suspend(struct platform_device *op, pm_message_t state) { struct ata_host *host = dev_get_drvdata(&op->dev); return ata_host_suspend(host, state); } static int mpc52xx_ata_resume(struct platform_device *op) { struct ata_host *host = dev_get_drvdata(&op->dev); struct mpc52xx_ata_priv *priv = host->private_data; int rv; rv = mpc52xx_ata_hw_init(priv); if (rv) { dev_err(host->dev, "error initializing hardware\n"); return rv; } ata_host_resume(host); return 0; } #endif static struct of_device_id mpc52xx_ata_of_match[] = { { .compatible = "fsl,mpc5200-ata", }, { .compatible = "mpc5200-ata", }, {}, }; static struct platform_driver mpc52xx_ata_of_platform_driver = { .probe = mpc52xx_ata_probe, .remove = mpc52xx_ata_remove, #ifdef CONFIG_PM .suspend = mpc52xx_ata_suspend, .resume = mpc52xx_ata_resume, #endif .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .of_match_table = mpc52xx_ata_of_match, }, }; /* ======================================================================== */ /* Module */ /* ======================================================================== */ static int __init mpc52xx_ata_init(void) { printk(KERN_INFO "ata: MPC52xx IDE/ATA libata driver\n"); return platform_driver_register(&mpc52xx_ata_of_platform_driver); } static void __exit mpc52xx_ata_exit(void) { platform_driver_unregister(&mpc52xx_ata_of_platform_driver); } module_init(mpc52xx_ata_init); module_exit(mpc52xx_ata_exit); MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>"); MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(of, mpc52xx_ata_of_match);
gpl-2.0
Kinoma/acorn_kernel
drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
3173
6933
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/gpuobj.h> #include <core/option.h> #include <subdev/timer.h> #include <subdev/vm.h> #include "nv04.h" #define NV44_GART_SIZE (512 * 1024 * 1024) #define NV44_GART_PAGE ( 4 * 1024) /******************************************************************************* * VM map/unmap callbacks ******************************************************************************/ static void nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null, dma_addr_t *list, u32 pte, u32 cnt) { u32 base = (pte << 2) & ~0x0000000f; u32 tmp[4]; tmp[0] = nv_ro32(pgt, base + 0x0); tmp[1] = nv_ro32(pgt, base + 0x4); tmp[2] = nv_ro32(pgt, base + 0x8); tmp[3] = nv_ro32(pgt, base + 0xc); while (cnt--) { u32 addr = list ? (*list++ >> 12) : (null >> 12); switch (pte++ & 0x3) { case 0: tmp[0] &= ~0x07ffffff; tmp[0] |= addr; break; case 1: tmp[0] &= ~0xf8000000; tmp[0] |= addr << 27; tmp[1] &= ~0x003fffff; tmp[1] |= addr >> 5; break; case 2: tmp[1] &= ~0xffc00000; tmp[1] |= addr << 22; tmp[2] &= ~0x0001ffff; tmp[2] |= addr >> 10; break; case 3: tmp[2] &= ~0xfffe0000; tmp[2] |= addr << 17; tmp[3] &= ~0x00000fff; tmp[3] |= addr >> 15; break; } } nv_wo32(pgt, base + 0x0, tmp[0]); nv_wo32(pgt, base + 0x4, tmp[1]); nv_wo32(pgt, base + 0x8, tmp[2]); nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000); } static void nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) { struct nv04_vmmgr_priv *priv = (void *)vma->vm->vmm; u32 tmp[4]; int i; if (pte & 3) { u32 max = 4 - (pte & 3); u32 part = (cnt > max) ? max : cnt; nv44_vm_fill(pgt, priv->null, list, pte, part); pte += part; list += part; cnt -= part; } while (cnt >= 4) { for (i = 0; i < 4; i++) tmp[i] = *list++ >> 12; nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27); nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22); nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17); nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000); cnt -= 4; } if (cnt) nv44_vm_fill(pgt, priv->null, list, pte, cnt); } static void nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) { struct nv04_vmmgr_priv *priv = (void *)nouveau_vmmgr(pgt); if (pte & 3) { u32 max = 4 - (pte & 3); u32 part = (cnt > max) ? max : cnt; nv44_vm_fill(pgt, priv->null, NULL, pte, part); pte += part; cnt -= part; } while (cnt >= 4) { nv_wo32(pgt, pte++ * 4, 0x00000000); nv_wo32(pgt, pte++ * 4, 0x00000000); nv_wo32(pgt, pte++ * 4, 0x00000000); nv_wo32(pgt, pte++ * 4, 0x00000000); cnt -= 4; } if (cnt) nv44_vm_fill(pgt, priv->null, NULL, pte, cnt); } static void nv44_vm_flush(struct nouveau_vm *vm) { struct nv04_vmmgr_priv *priv = (void *)vm->vmm; nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE); nv_wr32(priv, 0x100808, 0x00000020); if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001)) nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808)); nv_wr32(priv, 0x100808, 0x00000000); } /******************************************************************************* * VMMGR subdev ******************************************************************************/ static int nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_device *device = nv_device(parent); struct nv04_vmmgr_priv *priv; int ret; if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, data, size, pobject); } ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART", "pciegart", &priv); *pobject = nv_object(priv); if (ret) return ret; priv->base.create = nv04_vm_create; priv->base.limit = NV44_GART_SIZE; priv->base.dma_bits = 39; priv->base.pgt_bits = 32 - 12; priv->base.spg_shift = 12; priv->base.lpg_shift = 12; priv->base.map_sg = nv44_vm_map_sg; priv->base.unmap = nv44_vm_unmap; priv->base.flush = nv44_vm_flush; priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null); if (!priv->nullp) { nv_error(priv, "unable to allocate dummy pages\n"); return -ENOMEM; } ret = nouveau_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096, &priv->vm); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(priv), NULL, (NV44_GART_SIZE / NV44_GART_PAGE) * 4, 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC, &priv->vm->pgt[0].obj[0]); priv->vm->pgt[0].refcount[0] = 1; if (ret) return ret; return 0; } static int nv44_vmmgr_init(struct nouveau_object *object) { struct nv04_vmmgr_priv *priv = (void *)object; struct nouveau_gpuobj *gart = priv->vm->pgt[0].obj[0]; u32 addr; int ret; ret = nouveau_vmmgr_init(&priv->base); if (ret) return ret; /* calculate vram address of this PRAMIN block, object must be * allocated on 512KiB alignment, and not exceed a total size * of 512KiB for this to work correctly */ addr = nv_rd32(priv, 0x10020c); addr -= ((gart->addr >> 19) + 1) << 19; nv_wr32(priv, 0x100850, 0x80000000); nv_wr32(priv, 0x100818, priv->null); nv_wr32(priv, 0x100804, NV44_GART_SIZE); nv_wr32(priv, 0x100850, 0x00008000); nv_mask(priv, 0x10008c, 0x00000200, 0x00000200); nv_wr32(priv, 0x100820, 0x00000000); nv_wr32(priv, 0x10082c, 0x00000001); nv_wr32(priv, 0x100800, addr | 0x00000010); return 0; } struct nouveau_oclass nv44_vmmgr_oclass = { .handle = NV_SUBDEV(VM, 0x44), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv44_vmmgr_ctor, .dtor = nv04_vmmgr_dtor, .init = nv44_vmmgr_init, .fini = _nouveau_vmmgr_fini, }, };
gpl-2.0
omnirom/android_kernel_lge_mako
arch/arm/mach-msm/irq-vic.c
3173
20022
/* * Copyright (C) 2007 Google, Inc. * Copyright (c) 2009, 2011 The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/timer.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <asm/exception.h> #include <asm/cp15.h> #include <mach/hardware.h> #include <mach/msm_iomap.h> #include <mach/fiq.h> #include "fiq.h" #include "smd_private.h" enum { IRQ_DEBUG_SLEEP_INT_TRIGGER = 1U << 0, IRQ_DEBUG_SLEEP_INT = 1U << 1, IRQ_DEBUG_SLEEP_ABORT = 1U << 2, IRQ_DEBUG_SLEEP = 1U << 3, IRQ_DEBUG_SLEEP_REQUEST = 1U << 4, }; static int msm_irq_debug_mask; module_param_named(debug_mask, msm_irq_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); #define VIC_REG(off) (MSM_VIC_BASE + (off)) #define VIC_INT_TO_REG_ADDR(base, irq) (base + (irq / 32) * 4) #define VIC_INT_TO_REG_INDEX(irq) ((irq >> 5) & 3) #define VIC_INT_SELECT0 VIC_REG(0x0000) /* 1: FIQ, 0: IRQ */ #define VIC_INT_SELECT1 VIC_REG(0x0004) /* 1: FIQ, 0: IRQ */ #define VIC_INT_SELECT2 VIC_REG(0x0008) /* 1: FIQ, 0: IRQ */ #define VIC_INT_SELECT3 VIC_REG(0x000C) /* 1: FIQ, 0: IRQ */ #define VIC_INT_EN0 VIC_REG(0x0010) #define VIC_INT_EN1 VIC_REG(0x0014) #define VIC_INT_EN2 VIC_REG(0x0018) #define VIC_INT_EN3 VIC_REG(0x001C) #define VIC_INT_ENCLEAR0 VIC_REG(0x0020) #define VIC_INT_ENCLEAR1 VIC_REG(0x0024) #define VIC_INT_ENCLEAR2 VIC_REG(0x0028) #define VIC_INT_ENCLEAR3 VIC_REG(0x002C) #define VIC_INT_ENSET0 VIC_REG(0x0030) #define VIC_INT_ENSET1 VIC_REG(0x0034) #define VIC_INT_ENSET2 VIC_REG(0x0038) #define VIC_INT_ENSET3 VIC_REG(0x003C) #define VIC_INT_TYPE0 VIC_REG(0x0040) /* 1: EDGE, 0: LEVEL */ #define VIC_INT_TYPE1 VIC_REG(0x0044) /* 1: EDGE, 0: LEVEL */ #define VIC_INT_TYPE2 VIC_REG(0x0048) /* 1: EDGE, 0: LEVEL */ #define VIC_INT_TYPE3 VIC_REG(0x004C) /* 1: EDGE, 0: LEVEL */ #define VIC_INT_POLARITY0 VIC_REG(0x0050) /* 1: NEG, 0: POS */ #define VIC_INT_POLARITY1 VIC_REG(0x0054) /* 1: NEG, 0: POS */ #define VIC_INT_POLARITY2 VIC_REG(0x0058) /* 1: NEG, 0: POS */ #define VIC_INT_POLARITY3 VIC_REG(0x005C) /* 1: NEG, 0: POS */ #define VIC_NO_PEND_VAL VIC_REG(0x0060) #if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP) #define VIC_NO_PEND_VAL_FIQ VIC_REG(0x0064) #define VIC_INT_MASTEREN VIC_REG(0x0068) /* 1: IRQ, 2: FIQ */ #define VIC_CONFIG VIC_REG(0x006C) /* 1: USE SC VIC */ #else #define VIC_INT_MASTEREN VIC_REG(0x0064) /* 1: IRQ, 2: FIQ */ #define VIC_PROTECTION VIC_REG(0x006C) /* 1: ENABLE */ #define VIC_CONFIG VIC_REG(0x0068) /* 1: USE ARM1136 VIC */ #endif #define VIC_IRQ_STATUS0 VIC_REG(0x0080) #define VIC_IRQ_STATUS1 VIC_REG(0x0084) #define VIC_IRQ_STATUS2 VIC_REG(0x0088) #define VIC_IRQ_STATUS3 VIC_REG(0x008C) #define VIC_FIQ_STATUS0 VIC_REG(0x0090) #define VIC_FIQ_STATUS1 VIC_REG(0x0094) #define VIC_FIQ_STATUS2 VIC_REG(0x0098) #define VIC_FIQ_STATUS3 VIC_REG(0x009C) #define VIC_RAW_STATUS0 VIC_REG(0x00A0) #define VIC_RAW_STATUS1 VIC_REG(0x00A4) #define VIC_RAW_STATUS2 VIC_REG(0x00A8) #define VIC_RAW_STATUS3 VIC_REG(0x00AC) #define VIC_INT_CLEAR0 VIC_REG(0x00B0) #define VIC_INT_CLEAR1 VIC_REG(0x00B4) #define VIC_INT_CLEAR2 VIC_REG(0x00B8) #define VIC_INT_CLEAR3 VIC_REG(0x00BC) #define VIC_SOFTINT0 VIC_REG(0x00C0) #define VIC_SOFTINT1 VIC_REG(0x00C4) #define VIC_SOFTINT2 VIC_REG(0x00C8) #define VIC_SOFTINT3 VIC_REG(0x00CC) #define VIC_IRQ_VEC_RD VIC_REG(0x00D0) /* pending int # */ #define VIC_IRQ_VEC_PEND_RD VIC_REG(0x00D4) /* pending vector addr */ #define VIC_IRQ_VEC_WR VIC_REG(0x00D8) #if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP) #define VIC_FIQ_VEC_RD VIC_REG(0x00DC) #define VIC_FIQ_VEC_PEND_RD VIC_REG(0x00E0) #define VIC_FIQ_VEC_WR VIC_REG(0x00E4) #define VIC_IRQ_IN_SERVICE VIC_REG(0x00E8) #define VIC_IRQ_IN_STACK VIC_REG(0x00EC) #define VIC_FIQ_IN_SERVICE VIC_REG(0x00F0) #define VIC_FIQ_IN_STACK VIC_REG(0x00F4) #define VIC_TEST_BUS_SEL VIC_REG(0x00F8) #define VIC_IRQ_CTRL_CONFIG VIC_REG(0x00FC) #else #define VIC_IRQ_IN_SERVICE VIC_REG(0x00E0) #define VIC_IRQ_IN_STACK VIC_REG(0x00E4) #define VIC_TEST_BUS_SEL VIC_REG(0x00E8) #endif #define VIC_VECTPRIORITY(n) VIC_REG(0x0200+((n) * 4)) #define VIC_VECTADDR(n) VIC_REG(0x0400+((n) * 4)) #if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_FSM9XXX) #define VIC_NUM_REGS 4 #else #define VIC_NUM_REGS 2 #endif #if VIC_NUM_REGS == 2 #define DPRINT_REGS(base_reg, format, ...) \ printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \ readl(base_reg ## 0), readl(base_reg ## 1)) #define DPRINT_ARRAY(array, format, ...) \ printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \ array[0], array[1]) #elif VIC_NUM_REGS == 4 #define DPRINT_REGS(base_reg, format, ...) \ printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \ readl(base_reg ## 0), readl(base_reg ## 1), \ readl(base_reg ## 2), readl(base_reg ## 3)) #define DPRINT_ARRAY(array, format, ...) \ printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \ array[0], array[1], \ array[2], array[3]) #else #error "VIC_NUM_REGS set to illegal value" #endif static uint32_t msm_irq_smsm_wake_enable[2]; static struct { uint32_t int_en[2]; uint32_t int_type; uint32_t int_polarity; uint32_t int_select; } msm_irq_shadow_reg[VIC_NUM_REGS]; static uint32_t msm_irq_idle_disable[VIC_NUM_REGS]; #define SMSM_FAKE_IRQ (0xff) #if !defined(CONFIG_ARCH_FSM9XXX) static uint8_t msm_irq_to_smsm[NR_IRQS] = { #if !defined(CONFIG_ARCH_MSM7X27A) [INT_MDDI_EXT] = 1, [INT_MDDI_PRI] = 2, [INT_MDDI_CLIENT] = 3, #endif [INT_USB_OTG] = 4, [INT_PWB_I2C] = 5, [INT_SDC1_0] = 6, [INT_SDC1_1] = 7, [INT_SDC2_0] = 8, [INT_SDC2_1] = 9, [INT_ADSP_A9_A11] = 10, [INT_UART1] = 11, [INT_UART2] = 12, [INT_UART3] = 13, [INT_UART1_RX] = 14, [INT_UART2_RX] = 15, [INT_UART3_RX] = 16, [INT_UART1DM_IRQ] = 17, [INT_UART1DM_RX] = 18, [INT_KEYSENSE] = 19, #if !defined(CONFIG_ARCH_MSM7X30) [INT_AD_HSSD] = 20, #endif [INT_NAND_WR_ER_DONE] = 21, [INT_NAND_OP_DONE] = 22, [INT_TCHSCRN1] = 23, [INT_TCHSCRN2] = 24, [INT_TCHSCRN_SSBI] = 25, [INT_USB_HS] = 26, [INT_UART2DM_RX] = 27, [INT_UART2DM_IRQ] = 28, [INT_SDC4_1] = 29, [INT_SDC4_0] = 30, [INT_SDC3_1] = 31, [INT_SDC3_0] = 32, /* fake wakeup interrupts */ [INT_GPIO_GROUP1] = SMSM_FAKE_IRQ, [INT_GPIO_GROUP2] = SMSM_FAKE_IRQ, [INT_A9_M2A_0] = SMSM_FAKE_IRQ, [INT_A9_M2A_1] = SMSM_FAKE_IRQ, [INT_A9_M2A_5] = SMSM_FAKE_IRQ, [INT_GP_TIMER_EXP] = SMSM_FAKE_IRQ, [INT_DEBUG_TIMER_EXP] = SMSM_FAKE_IRQ, [INT_ADSP_A11] = SMSM_FAKE_IRQ, #ifdef CONFIG_ARCH_QSD8X50 [INT_SIRC_0] = SMSM_FAKE_IRQ, [INT_SIRC_1] = SMSM_FAKE_IRQ, #endif }; # else /* CONFIG_ARCH_FSM9XXX */ static uint8_t msm_irq_to_smsm[NR_IRQS] = { [INT_UART1] = 11, [INT_A9_M2A_0] = SMSM_FAKE_IRQ, [INT_A9_M2A_1] = SMSM_FAKE_IRQ, [INT_A9_M2A_5] = SMSM_FAKE_IRQ, [INT_GP_TIMER_EXP] = SMSM_FAKE_IRQ, [INT_DEBUG_TIMER_EXP] = SMSM_FAKE_IRQ, [INT_SIRC_0] = 10, [INT_ADSP_A11] = SMSM_FAKE_IRQ, }; #endif /* CONFIG_ARCH_FSM9XXX */ static inline void msm_irq_write_all_regs(void __iomem *base, unsigned int val) { int i; for (i = 0; i < VIC_NUM_REGS; i++) writel(val, base + (i * 4)); } static void msm_irq_ack(struct irq_data *d) { uint32_t mask; void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_CLEAR0, d->irq); mask = 1 << (d->irq & 31); writel(mask, reg); mb(); } static void msm_irq_disable(struct irq_data *d) { void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, d->irq); unsigned index = VIC_INT_TO_REG_INDEX(d->irq); uint32_t mask = 1UL << (d->irq & 31); int smsm_irq = msm_irq_to_smsm[d->irq]; if (!(msm_irq_shadow_reg[index].int_en[1] & mask)) { msm_irq_shadow_reg[index].int_en[0] &= ~mask; writel(mask, reg); mb(); if (smsm_irq == 0) msm_irq_idle_disable[index] &= ~mask; else { mask = 1UL << (smsm_irq - 1); msm_irq_smsm_wake_enable[0] &= ~mask; } } } static void msm_irq_mask(struct irq_data *d) { void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, d->irq); unsigned index = VIC_INT_TO_REG_INDEX(d->irq); uint32_t mask = 1UL << (d->irq & 31); int smsm_irq = msm_irq_to_smsm[d->irq]; msm_irq_shadow_reg[index].int_en[0] &= ~mask; writel(mask, reg); mb(); if (smsm_irq == 0) msm_irq_idle_disable[index] &= ~mask; else { mask = 1UL << (smsm_irq - 1); msm_irq_smsm_wake_enable[0] &= ~mask; } } static void msm_irq_unmask(struct irq_data *d) { void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, d->irq); unsigned index = VIC_INT_TO_REG_INDEX(d->irq); uint32_t mask = 1UL << (d->irq & 31); int smsm_irq = msm_irq_to_smsm[d->irq]; msm_irq_shadow_reg[index].int_en[0] |= mask; writel(mask, reg); mb(); if (smsm_irq == 0) msm_irq_idle_disable[index] |= mask; else { mask = 1UL << (smsm_irq - 1); msm_irq_smsm_wake_enable[0] |= mask; } } static int msm_irq_set_wake(struct irq_data *d, unsigned int on) { unsigned index = VIC_INT_TO_REG_INDEX(d->irq); uint32_t mask = 1UL << (d->irq & 31); int smsm_irq = msm_irq_to_smsm[d->irq]; if (smsm_irq == 0) { printk(KERN_ERR "msm_irq_set_wake: bad wakeup irq %d\n", d->irq); return -EINVAL; } if (on) msm_irq_shadow_reg[index].int_en[1] |= mask; else msm_irq_shadow_reg[index].int_en[1] &= ~mask; if (smsm_irq == SMSM_FAKE_IRQ) return 0; mask = 1UL << (smsm_irq - 1); if (on) msm_irq_smsm_wake_enable[1] |= mask; else msm_irq_smsm_wake_enable[1] &= ~mask; return 0; } static int msm_irq_set_type(struct irq_data *d, unsigned int flow_type) { void __iomem *treg = VIC_INT_TO_REG_ADDR(VIC_INT_TYPE0, d->irq); void __iomem *preg = VIC_INT_TO_REG_ADDR(VIC_INT_POLARITY0, d->irq); unsigned index = VIC_INT_TO_REG_INDEX(d->irq); int b = 1 << (d->irq & 31); uint32_t polarity; uint32_t type; polarity = msm_irq_shadow_reg[index].int_polarity; if (flow_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW)) polarity |= b; if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH)) polarity &= ~b; writel(polarity, preg); msm_irq_shadow_reg[index].int_polarity = polarity; type = msm_irq_shadow_reg[index].int_type; if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { type |= b; __irq_set_handler_locked(d->irq, handle_edge_irq); } if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) { type &= ~b; __irq_set_handler_locked(d->irq, handle_level_irq); } writel(type, treg); mb(); msm_irq_shadow_reg[index].int_type = type; return 0; } unsigned int msm_irq_pending(void) { unsigned int i, pending = 0; for (i = 0; (i < VIC_NUM_REGS) && !pending; i++) pending |= readl(VIC_IRQ_STATUS0 + (i * 4)); return pending; } int msm_irq_idle_sleep_allowed(void) { uint32_t i, disable = 0; if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_REQUEST) DPRINT_ARRAY(msm_irq_idle_disable, "msm_irq_idle_sleep_allowed: disable"); for (i = 0; i < VIC_NUM_REGS; i++) disable |= msm_irq_idle_disable[i]; return !disable; } /* * Prepare interrupt subsystem for entering sleep -- phase 1. * If modem_wake is true, return currently enabled interrupts in *irq_mask. */ void msm_irq_enter_sleep1(bool modem_wake, int from_idle, uint32_t *irq_mask) { if (modem_wake) { *irq_mask = msm_irq_smsm_wake_enable[!from_idle]; if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP) printk(KERN_INFO "%s irq_mask %x\n", __func__, *irq_mask); } } /* * Prepare interrupt subsystem for entering sleep -- phase 2. * Detect any pending interrupts and configure interrupt hardware. * * Return value: * -EAGAIN: there are pending interrupt(s); interrupt configuration * is not changed. * 0: success */ int msm_irq_enter_sleep2(bool modem_wake, int from_idle) { int i, limit = 10; uint32_t pending[VIC_NUM_REGS]; if (from_idle && !modem_wake) return 0; /* edge triggered interrupt may get lost if this mode is used */ WARN_ON_ONCE(!modem_wake && !from_idle); if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP) DPRINT_REGS(VIC_IRQ_STATUS, "%s change irq, pend", __func__); for (i = 0; i < VIC_NUM_REGS; i++) { pending[i] = readl(VIC_IRQ_STATUS0 + (i * 4)); pending[i] &= msm_irq_shadow_reg[i].int_en[!from_idle]; } /* * Clear INT_A9_M2A_5 since requesting sleep triggers it. * In some arch e.g. FSM9XXX, INT_A9_M2A_5 may not be in the first set. */ pending[INT_A9_M2A_5 / 32] &= ~(1U << (INT_A9_M2A_5 % 32)); for (i = 0; i < VIC_NUM_REGS; i++) { if (pending[i]) { if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_ABORT) DPRINT_ARRAY(pending, "%s abort", __func__); return -EAGAIN; } } msm_irq_write_all_regs(VIC_INT_EN0, 0); while (limit-- > 0) { int pend_irq; int irq = readl(VIC_IRQ_VEC_RD); if (irq == -1) break; pend_irq = readl(VIC_IRQ_VEC_PEND_RD); if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_INT) printk(KERN_INFO "%s cleared int %d (%d)\n", __func__, irq, pend_irq); } if (modem_wake) { struct irq_data d = { .irq = INT_A9_M2A_6 }; msm_irq_set_type(&d, IRQF_TRIGGER_RISING); __raw_writel(1U << (INT_A9_M2A_6 % 32), VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, INT_A9_M2A_6)); } else { for (i = 0; i < VIC_NUM_REGS; i++) writel(msm_irq_shadow_reg[i].int_en[1], VIC_INT_ENSET0 + (i * 4)); } mb(); return 0; } /* * Restore interrupt subsystem from sleep -- phase 1. * Configure interrupt hardware. */ void msm_irq_exit_sleep1(uint32_t irq_mask, uint32_t wakeup_reason, uint32_t pending_irqs) { int i; struct irq_data d = { .irq = INT_A9_M2A_6 }; msm_irq_ack(&d); for (i = 0; i < VIC_NUM_REGS; i++) { writel(msm_irq_shadow_reg[i].int_type, VIC_INT_TYPE0 + i * 4); writel(msm_irq_shadow_reg[i].int_polarity, VIC_INT_POLARITY0 + i * 4); writel(msm_irq_shadow_reg[i].int_en[0], VIC_INT_EN0 + i * 4); writel(msm_irq_shadow_reg[i].int_select, VIC_INT_SELECT0 + i * 4); } writel(3, VIC_INT_MASTEREN); mb(); if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP) DPRINT_REGS(VIC_IRQ_STATUS, "%s %x %x %x now", __func__, irq_mask, pending_irqs, wakeup_reason); } /* * Restore interrupt subsystem from sleep -- phase 2. * Poke the specified pending interrupts into interrupt hardware. */ void msm_irq_exit_sleep2(uint32_t irq_mask, uint32_t wakeup_reason, uint32_t pending) { int i; if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP) DPRINT_REGS(VIC_IRQ_STATUS, "%s %x %x %x now", __func__, irq_mask, pending, wakeup_reason); for (i = 0; pending && i < ARRAY_SIZE(msm_irq_to_smsm); i++) { unsigned reg_offset = VIC_INT_TO_REG_ADDR(0, i); uint32_t reg_mask = 1UL << (i & 31); int smsm_irq = msm_irq_to_smsm[i]; uint32_t smsm_mask; if (smsm_irq == 0) continue; smsm_mask = 1U << (smsm_irq - 1); if (!(pending & smsm_mask)) continue; pending &= ~smsm_mask; if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_INT) DPRINT_REGS(VIC_IRQ_STATUS, "%s: irq %d still pending %x now", __func__, i, pending); #ifdef DEBUG_INTERRUPT_TRIGGER if (readl(VIC_IRQ_STATUS0 + reg_offset) & reg_mask) writel(reg_mask, VIC_INT_CLEAR0 + reg_offset); #endif if (readl(VIC_IRQ_STATUS0 + reg_offset) & reg_mask) continue; writel(reg_mask, VIC_SOFTINT0 + reg_offset); if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_INT_TRIGGER) DPRINT_REGS(VIC_IRQ_STATUS, "%s: irq %d need trigger, now", __func__, i); } mb(); } /* * Restore interrupt subsystem from sleep -- phase 3. * Print debug information. */ void msm_irq_exit_sleep3(uint32_t irq_mask, uint32_t wakeup_reason, uint32_t pending_irqs) { if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP) DPRINT_REGS(VIC_IRQ_STATUS, "%s %x %x %x state %x now", __func__, irq_mask, pending_irqs, wakeup_reason, smsm_get_state(SMSM_MODEM_STATE)); } static struct irq_chip msm_irq_chip = { .name = "msm", .irq_disable = msm_irq_disable, .irq_ack = msm_irq_ack, .irq_mask = msm_irq_mask, .irq_unmask = msm_irq_unmask, .irq_set_wake = msm_irq_set_wake, .irq_set_type = msm_irq_set_type, }; void __init msm_init_irq(void) { unsigned n; /* select level interrupts */ msm_irq_write_all_regs(VIC_INT_TYPE0, 0); /* select highlevel interrupts */ msm_irq_write_all_regs(VIC_INT_POLARITY0, 0); /* select IRQ for all INTs */ msm_irq_write_all_regs(VIC_INT_SELECT0, 0); /* disable all INTs */ msm_irq_write_all_regs(VIC_INT_EN0, 0); /* don't use vic */ writel(0, VIC_CONFIG); for (n = 0; n < NR_MSM_IRQS; n++) { irq_set_chip_and_handler(n, &msm_irq_chip, handle_level_irq); set_irq_flags(n, IRQF_VALID); } /* enable interrupt controller */ writel(3, VIC_INT_MASTEREN); mb(); } static inline void msm_vic_handle_irq(void __iomem *base_addr, struct pt_regs *regs) { u32 irqnr; do { /* 0xD0 has irq# or old irq# if the irq has been handled * 0xD4 has irq# or -1 if none pending *but* if you just * read 0xD4 you never get the first irq for some reason */ irqnr = readl_relaxed(base_addr + 0xD0); irqnr = readl_relaxed(base_addr + 0xD4); if (irqnr == -1) break; handle_IRQ(irqnr, regs); } while (1); } /* enable imprecise aborts */ #define local_cpsie_enable() __asm__ __volatile__("cpsie a @ enable") asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs) { local_cpsie_enable(); msm_vic_handle_irq((void __iomem *)MSM_VIC_BASE, regs); } #if defined(CONFIG_MSM_FIQ_SUPPORT) void msm_trigger_irq(int irq) { void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_SOFTINT0, irq); uint32_t mask = 1UL << (irq & 31); writel(mask, reg); mb(); } void msm_fiq_enable(int irq) { struct irq_data d = { .irq = irq }; unsigned long flags; local_irq_save(flags); msm_irq_unmask(&d); local_irq_restore(flags); } void msm_fiq_disable(int irq) { struct irq_data d = { .irq = irq }; unsigned long flags; local_irq_save(flags); msm_irq_mask(&d); local_irq_restore(flags); } void msm_fiq_select(int irq) { void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_SELECT0, irq); unsigned index = VIC_INT_TO_REG_INDEX(irq); uint32_t mask = 1UL << (irq & 31); unsigned long flags; local_irq_save(flags); msm_irq_shadow_reg[index].int_select |= mask; writel(msm_irq_shadow_reg[index].int_select, reg); mb(); local_irq_restore(flags); } void msm_fiq_unselect(int irq) { void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_SELECT0, irq); unsigned index = VIC_INT_TO_REG_INDEX(irq); uint32_t mask = 1UL << (irq & 31); unsigned long flags; local_irq_save(flags); msm_irq_shadow_reg[index].int_select &= (!mask); writel(msm_irq_shadow_reg[index].int_select, reg); mb(); local_irq_restore(flags); } /* set_fiq_handler originally from arch/arm/kernel/fiq.c */ static void set_fiq_handler(void *start, unsigned int length) { memcpy((void *)0xffff001c, start, length); flush_icache_range(0xffff001c, 0xffff001c + length); if (!vectors_high()) flush_icache_range(0x1c, 0x1c + length); } static void (*fiq_func)(void *data, void *regs); static unsigned long long fiq_stack[256]; int msm_fiq_set_handler(void (*func)(void *data, void *regs), void *data) { unsigned long flags; int ret = -ENOMEM; local_irq_save(flags); if (fiq_func == 0) { fiq_func = func; fiq_glue_setup(func, data, fiq_stack + 255); set_fiq_handler(&fiq_glue, (&fiq_glue_end - &fiq_glue)); ret = 0; } local_irq_restore(flags); return ret; } #endif
gpl-2.0
gdetal/kernel_msm_mptcp
arch/parisc/kernel/smp.c
3941
10875
/* ** SMP Support ** ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com> ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> ** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org> ** ** Lots of stuff stolen from arch/alpha/kernel/smp.c ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^) ** ** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work. ** -grant (1/12/2001) ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. */ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/ftrace.h> #include <linux/cpu.h> #include <linux/atomic.h> #include <asm/current.h> #include <asm/delay.h> #include <asm/tlbflush.h> #include <asm/io.h> #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */ #include <asm/mmu_context.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/unistd.h> #include <asm/cacheflush.h> #undef DEBUG_SMP #ifdef DEBUG_SMP static int smp_debug_lvl = 0; #define smp_debug(lvl, printargs...) \ if (lvl >= smp_debug_lvl) \ printk(printargs); #else #define smp_debug(lvl, ...) do { } while(0) #endif /* DEBUG_SMP */ volatile struct task_struct *smp_init_current_idle_task; /* track which CPU is booting */ static volatile int cpu_now_booting __cpuinitdata; static int parisc_max_cpus __cpuinitdata = 1; static DEFINE_PER_CPU(spinlock_t, ipi_lock); enum ipi_message_type { IPI_NOP=0, IPI_RESCHEDULE=1, IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, IPI_CPU_START, IPI_CPU_STOP, IPI_CPU_TEST }; /********** SMP inter processor interrupt and communication routines */ #undef PER_CPU_IRQ_REGION #ifdef PER_CPU_IRQ_REGION /* XXX REVISIT Ignore for now. ** *May* need this "hook" to register IPI handler ** once we have perCPU ExtIntr switch tables. */ static void ipi_init(int cpuid) { #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region if(cpu_online(cpuid) ) { switch_to_idle_task(current); } return; } #endif /* ** Yoink this CPU from the runnable list... ** */ static void halt_processor(void) { /* REVISIT : redirect I/O Interrupts to another CPU? */ /* REVISIT : does PM *know* this CPU isn't available? */ set_cpu_online(smp_processor_id(), false); local_irq_disable(); for (;;) ; } irqreturn_t __irq_entry ipi_interrupt(int irq, void *dev_id) { int this_cpu = smp_processor_id(); struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); unsigned long ops; unsigned long flags; /* Count this now; we may make a call that never returns. */ p->ipi_count++; mb(); /* Order interrupt and bit testing. */ for (;;) { spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); spin_lock_irqsave(lock, flags); ops = p->pending_ipi; p->pending_ipi = 0; spin_unlock_irqrestore(lock, flags); mb(); /* Order bit clearing and data access. */ if (!ops) break; while (ops) { unsigned long which = ffz(~ops); ops &= ~(1 << which); switch (which) { case IPI_NOP: smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu); break; case IPI_RESCHEDULE: smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); scheduler_ipi(); break; case IPI_CALL_FUNC: smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); generic_smp_call_function_interrupt(); break; case IPI_CALL_FUNC_SINGLE: smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu); generic_smp_call_function_single_interrupt(); break; case IPI_CPU_START: smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); break; case IPI_CPU_STOP: smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu); halt_processor(); break; case IPI_CPU_TEST: smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu); break; default: printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", this_cpu, which); return IRQ_NONE; } /* Switch */ /* let in any pending interrupts */ local_irq_enable(); local_irq_disable(); } /* while (ops) */ } return IRQ_HANDLED; } static inline void ipi_send(int cpu, enum ipi_message_type op) { struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); spinlock_t *lock = &per_cpu(ipi_lock, cpu); unsigned long flags; spin_lock_irqsave(lock, flags); p->pending_ipi |= 1 << op; gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa); spin_unlock_irqrestore(lock, flags); } static void send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op) { int cpu; for_each_cpu(cpu, mask) ipi_send(cpu, op); } static inline void send_IPI_single(int dest_cpu, enum ipi_message_type op) { BUG_ON(dest_cpu == NO_PROC_ID); ipi_send(dest_cpu, op); } static inline void send_IPI_allbutself(enum ipi_message_type op) { int i; for_each_online_cpu(i) { if (i != smp_processor_id()) send_IPI_single(i, op); } } inline void smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); } static inline void smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); } void smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } void smp_send_all_nop(void) { send_IPI_allbutself(IPI_NOP); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_IPI_mask(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); } /* * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() * as we want to ensure all TLB's flushed before proceeding. */ void smp_flush_tlb_all(void) { on_each_cpu(flush_tlb_all_local, NULL, 1); } /* * Called by secondaries to update state and initialize CPU registers. */ static void __init smp_cpu_init(int cpunum) { extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */ extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */ /* Set modes and Enable floating point coprocessor */ (void) init_per_cpu(cpunum); disable_sr_hashing(); mb(); /* Well, support 2.4 linux scheme as well. */ if (cpu_online(cpunum)) { extern void machine_halt(void); /* arch/parisc.../process.c */ printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); machine_halt(); } notify_cpu_starting(cpunum); ipi_call_lock(); set_cpu_online(cpunum, true); ipi_call_unlock(); /* Initialise the idle task for this CPU */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); init_IRQ(); /* make sure no IRQs are enabled or pending */ start_cpu_itimer(); } /* * Slaves start using C here. Indirectly called from smp_slave_stext. * Do what start_kernel() and main() do for boot strap processor (aka monarch) */ void __init smp_callin(void) { int slave_id = cpu_now_booting; smp_cpu_init(slave_id); preempt_disable(); flush_cache_all_local(); /* start with known state */ flush_tlb_all_local(NULL); local_irq_enable(); /* Interrupts have been off until now */ cpu_idle(); /* Wait for timer to schedule some work */ /* NOTREACHED */ panic("smp_callin() AAAAaaaaahhhh....\n"); } /* * Bring one cpu online. */ int __cpuinit smp_boot_one_cpu(int cpuid) { const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); struct task_struct *idle; long timeout; /* * Create an idle task for this CPU. Note the address wed* give * to kernel_thread is irrelevant -- it's going to start * where OS_BOOT_RENDEVZ vector in SAL says to start. But * this gets all the other task-y sort of data structures set * up like we wish. We need to pull the just created idle task * off the run queue and stuff it into the init_tasks[] array. * Sheesh . . . */ idle = fork_idle(cpuid); if (IS_ERR(idle)) panic("SMP: fork failed for CPU:%d", cpuid); task_thread_info(idle)->cpu = cpuid; /* Let _start know what logical CPU we're booting ** (offset into init_tasks[],cpu_data[]) */ cpu_now_booting = cpuid; /* ** boot strap code needs to know the task address since ** it also contains the process stack. */ smp_init_current_idle_task = idle ; mb(); printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa); /* ** This gets PDC to release the CPU from a very tight loop. ** ** From the PA-RISC 2.0 Firmware Architecture Reference Specification: ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which ** is executed after receiving the rendezvous signal (an interrupt to ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the ** contents of memory are valid." */ gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa); mb(); /* * OK, wait a bit for that CPU to finish staggering about. * Slave will set a bit when it reaches smp_cpu_init(). * Once the "monarch CPU" sees the bit change, it can move on. */ for (timeout = 0; timeout < 10000; timeout++) { if(cpu_online(cpuid)) { /* Which implies Slave has started up */ cpu_now_booting = 0; smp_init_current_idle_task = NULL; goto alive ; } udelay(100); barrier(); } put_task_struct(idle); idle = NULL; printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); return -1; alive: /* Remember the Slave data */ smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n", cpuid, timeout * 100); return 0; } void __init smp_prepare_boot_cpu(void) { int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; /* Setup BSP mappings */ printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor); set_cpu_online(bootstrap_processor, true); set_cpu_present(bootstrap_processor, true); } /* ** inventory.c:do_inventory() hasn't yet been run and thus we ** don't 'discover' the additional CPUs until later. */ void __init smp_prepare_cpus(unsigned int max_cpus) { int cpu; for_each_possible_cpu(cpu) spin_lock_init(&per_cpu(ipi_lock, cpu)); init_cpu_present(cpumask_of(0)); parisc_max_cpus = max_cpus; if (!max_cpus) printk(KERN_INFO "SMP mode deactivated.\n"); } void smp_cpus_done(unsigned int cpu_max) { return; } int __cpuinit __cpu_up(unsigned int cpu) { if (cpu != 0 && cpu < parisc_max_cpus) smp_boot_one_cpu(cpu); return cpu_online(cpu) ? 0 : -ENOSYS; } #ifdef CONFIG_PROC_FS int __init setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } #endif
gpl-2.0
Split-Screen/android_kernel_sony_msm8974
fs/jffs2/readinode.c
4709
44313
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/crc32.h> #include <linux/pagemap.h> #include <linux/mtd/mtd.h> #include <linux/compiler.h> #include "nodelist.h" /* * Check the data CRC of the node. * * Returns: 0 if the data CRC is correct; * 1 - if incorrect; * error code if an error occurred. */ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) { struct jffs2_raw_node_ref *ref = tn->fn->raw; int err = 0, pointed = 0; struct jffs2_eraseblock *jeb; unsigned char *buffer; uint32_t crc, ofs, len; size_t retlen; BUG_ON(tn->csize == 0); /* Calculate how many bytes were already checked */ ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); len = tn->csize; if (jffs2_is_writebuffered(c)) { int adj = ofs % c->wbuf_pagesize; if (likely(adj)) adj = c->wbuf_pagesize - adj; if (adj >= tn->csize) { dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", ref_offset(ref), tn->csize, ofs); goto adj_acc; } ofs += adj; len -= adj; } dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); #ifndef __ECOS /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(), * adding and jffs2_flash_read_end() interface. */ err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL); if (!err && retlen < len) { JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); mtd_unpoint(c->mtd, ofs, retlen); } else if (err) { if (err != -EOPNOTSUPP) JFFS2_WARNING("MTD point failed: error code %d.\n", err); } else pointed = 1; /* succefully pointed to device */ #endif if (!pointed) { buffer = kmalloc(len, GFP_KERNEL); if (unlikely(!buffer)) return -ENOMEM; /* TODO: this is very frequent pattern, make it a separate * routine */ err = jffs2_flash_read(c, ofs, len, &retlen, buffer); if (err) { JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err); goto free_out; } if (retlen != len) { JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len); err = -EIO; goto free_out; } } /* Continue calculating CRC */ crc = crc32(tn->partial_crc, buffer, len); if(!pointed) kfree(buffer); #ifndef __ECOS else mtd_unpoint(c->mtd, ofs, len); #endif if (crc != tn->data_crc) { JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", ref_offset(ref), tn->data_crc, crc); return 1; } adj_acc: jeb = &c->blocks[ref->flash_offset / c->sector_size]; len = ref_totlen(c, jeb, ref); /* If it should be REF_NORMAL, it'll get marked as such when we build the fragtree, shortly. No need to worry about GC moving it while it's marked REF_PRISTINE -- GC won't happen till we've finished checking every inode anyway. */ ref->flash_offset |= REF_PRISTINE; /* * Mark the node as having been checked and fix the * accounting accordingly. */ spin_lock(&c->erase_completion_lock); jeb->used_size += len; jeb->unchecked_size -= len; c->used_size += len; c->unchecked_size -= len; jffs2_dbg_acct_paranoia_check_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); return 0; free_out: if(!pointed) kfree(buffer); #ifndef __ECOS else mtd_unpoint(c->mtd, ofs, len); #endif return err; } /* * Helper function for jffs2_add_older_frag_to_fragtree(). * * Checks the node if we are in the checking stage. */ static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) { int ret; BUG_ON(ref_obsolete(tn->fn->raw)); /* We only check the data CRC of unchecked nodes */ if (ref_flags(tn->fn->raw) != REF_UNCHECKED) return 0; dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n", tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw)); ret = check_node_data(c, tn); if (unlikely(ret < 0)) { JFFS2_ERROR("check_node_data() returned error: %d.\n", ret); } else if (unlikely(ret > 0)) { dbg_readinode("CRC error, mark it obsolete.\n"); jffs2_mark_node_obsolete(c, tn->fn->raw); } return ret; } static struct jffs2_tmp_dnode_info *jffs2_lookup_tn(struct rb_root *tn_root, uint32_t offset) { struct rb_node *next; struct jffs2_tmp_dnode_info *tn = NULL; dbg_readinode("root %p, offset %d\n", tn_root, offset); next = tn_root->rb_node; while (next) { tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb); if (tn->fn->ofs < offset) next = tn->rb.rb_right; else if (tn->fn->ofs >= offset) next = tn->rb.rb_left; else break; } return tn; } static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) { jffs2_mark_node_obsolete(c, tn->fn->raw); jffs2_free_full_dnode(tn->fn); jffs2_free_tmp_dnode_info(tn); } /* * This function is used when we read an inode. Data nodes arrive in * arbitrary order -- they may be older or newer than the nodes which * are already in the tree. Where overlaps occur, the older node can * be discarded as long as the newer passes the CRC check. We don't * bother to keep track of holes in this rbtree, and neither do we deal * with frags -- we can have multiple entries starting at the same * offset, and the one with the smallest length will come first in the * ordering. * * Returns 0 if the node was handled (including marking it obsolete) * < 0 an if error occurred */ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c, struct jffs2_readinode_info *rii, struct jffs2_tmp_dnode_info *tn) { uint32_t fn_end = tn->fn->ofs + tn->fn->size; struct jffs2_tmp_dnode_info *this, *ptn; dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw)); /* If a node has zero dsize, we only have to keep if it if it might be the node with highest version -- i.e. the one which will end up as f->metadata. Note that such nodes won't be REF_UNCHECKED since there are no data to check anyway. */ if (!tn->fn->size) { if (rii->mdata_tn) { if (rii->mdata_tn->version < tn->version) { /* We had a candidate mdata node already */ dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version); jffs2_kill_tn(c, rii->mdata_tn); } else { dbg_readinode("kill new mdata with ver %d (older than existing %d\n", tn->version, rii->mdata_tn->version); jffs2_kill_tn(c, tn); return 0; } } rii->mdata_tn = tn; dbg_readinode("keep new mdata with ver %d\n", tn->version); return 0; } /* Find the earliest node which _may_ be relevant to this one */ this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs); if (this) { /* If the node is coincident with another at a lower address, back up until the other node is found. It may be relevant */ while (this->overlapped) { ptn = tn_prev(this); if (!ptn) { /* * We killed a node which set the overlapped * flags during the scan. Fix it up. */ this->overlapped = 0; break; } this = ptn; } dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole"); } while (this) { if (this->fn->ofs > fn_end) break; dbg_readinode("Ponder this ver %d, 0x%x-0x%x\n", this->version, this->fn->ofs, this->fn->size); if (this->version == tn->version) { /* Version number collision means REF_PRISTINE GC. Accept either of them as long as the CRC is correct. Check the one we have already... */ if (!check_tn_node(c, this)) { /* The one we already had was OK. Keep it and throw away the new one */ dbg_readinode("Like old node. Throw away new\n"); jffs2_kill_tn(c, tn); return 0; } else { /* Who cares if the new one is good; keep it for now anyway. */ dbg_readinode("Like new node. Throw away old\n"); rb_replace_node(&this->rb, &tn->rb, &rii->tn_root); jffs2_kill_tn(c, this); /* Same overlapping from in front and behind */ return 0; } } if (this->version < tn->version && this->fn->ofs >= tn->fn->ofs && this->fn->ofs + this->fn->size <= fn_end) { /* New node entirely overlaps 'this' */ if (check_tn_node(c, tn)) { dbg_readinode("new node bad CRC\n"); jffs2_kill_tn(c, tn); return 0; } /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */ while (this && this->fn->ofs + this->fn->size <= fn_end) { struct jffs2_tmp_dnode_info *next = tn_next(this); if (this->version < tn->version) { tn_erase(this, &rii->tn_root); dbg_readinode("Kill overlapped ver %d, 0x%x-0x%x\n", this->version, this->fn->ofs, this->fn->ofs+this->fn->size); jffs2_kill_tn(c, this); } this = next; } dbg_readinode("Done killing overlapped nodes\n"); continue; } if (this->version > tn->version && this->fn->ofs <= tn->fn->ofs && this->fn->ofs+this->fn->size >= fn_end) { /* New node entirely overlapped by 'this' */ if (!check_tn_node(c, this)) { dbg_readinode("Good CRC on old node. Kill new\n"); jffs2_kill_tn(c, tn); return 0; } /* ... but 'this' was bad. Replace it... */ dbg_readinode("Bad CRC on old overlapping node. Kill it\n"); tn_erase(this, &rii->tn_root); jffs2_kill_tn(c, this); break; } this = tn_next(this); } /* We neither completely obsoleted nor were completely obsoleted by an earlier node. Insert into the tree */ { struct rb_node *parent; struct rb_node **link = &rii->tn_root.rb_node; struct jffs2_tmp_dnode_info *insert_point = NULL; while (*link) { parent = *link; insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); if (tn->fn->ofs > insert_point->fn->ofs) link = &insert_point->rb.rb_right; else if (tn->fn->ofs < insert_point->fn->ofs || tn->fn->size < insert_point->fn->size) link = &insert_point->rb.rb_left; else link = &insert_point->rb.rb_right; } rb_link_node(&tn->rb, &insert_point->rb, link); rb_insert_color(&tn->rb, &rii->tn_root); } /* If there's anything behind that overlaps us, note it */ this = tn_prev(tn); if (this) { while (1) { if (this->fn->ofs + this->fn->size > tn->fn->ofs) { dbg_readinode("Node is overlapped by %p (v %d, 0x%x-0x%x)\n", this, this->version, this->fn->ofs, this->fn->ofs+this->fn->size); tn->overlapped = 1; break; } if (!this->overlapped) break; ptn = tn_prev(this); if (!ptn) { /* * We killed a node which set the overlapped * flags during the scan. Fix it up. */ this->overlapped = 0; break; } this = ptn; } } /* If the new node overlaps anything ahead, note it */ this = tn_next(tn); while (this && this->fn->ofs < fn_end) { this->overlapped = 1; dbg_readinode("Node ver %d, 0x%x-0x%x is overlapped\n", this->version, this->fn->ofs, this->fn->ofs+this->fn->size); this = tn_next(this); } return 0; } /* Trivial function to remove the last node in the tree. Which by definition has no right-hand -- so can be removed just by making its only child (if any) take its place under its parent. */ static void eat_last(struct rb_root *root, struct rb_node *node) { struct rb_node *parent = rb_parent(node); struct rb_node **link; /* LAST! */ BUG_ON(node->rb_right); if (!parent) link = &root->rb_node; else if (node == parent->rb_left) link = &parent->rb_left; else link = &parent->rb_right; *link = node->rb_left; /* Colour doesn't matter now. Only the parent pointer. */ if (node->rb_left) node->rb_left->rb_parent_color = node->rb_parent_color; } /* We put this in reverse order, so we can just use eat_last */ static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn) { struct rb_node **link = &ver_root->rb_node; struct rb_node *parent = NULL; struct jffs2_tmp_dnode_info *this_tn; while (*link) { parent = *link; this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); if (tn->version > this_tn->version) link = &parent->rb_left; else link = &parent->rb_right; } dbg_readinode("Link new node at %p (root is %p)\n", link, ver_root); rb_link_node(&tn->rb, parent, link); rb_insert_color(&tn->rb, ver_root); } /* Build final, normal fragtree from tn tree. It doesn't matter which order we add nodes to the real fragtree, as long as they don't overlap. And having thrown away the majority of overlapped nodes as we went, there really shouldn't be many sets of nodes which do overlap. If we start at the end, we can use the overlap markers -- we can just eat nodes which aren't overlapped, and when we encounter nodes which _do_ overlap we sort them all into a temporary tree in version order before replaying them. */ static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_readinode_info *rii) { struct jffs2_tmp_dnode_info *pen, *last, *this; struct rb_root ver_root = RB_ROOT; uint32_t high_ver = 0; if (rii->mdata_tn) { dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn); high_ver = rii->mdata_tn->version; rii->latest_ref = rii->mdata_tn->fn->raw; } #ifdef JFFS2_DBG_READINODE_MESSAGES this = tn_last(&rii->tn_root); while (this) { dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs, this->fn->ofs+this->fn->size, this->overlapped); this = tn_prev(this); } #endif pen = tn_last(&rii->tn_root); while ((last = pen)) { pen = tn_prev(last); eat_last(&rii->tn_root, &last->rb); ver_insert(&ver_root, last); if (unlikely(last->overlapped)) { if (pen) continue; /* * We killed a node which set the overlapped * flags during the scan. Fix it up. */ last->overlapped = 0; } /* Now we have a bunch of nodes in reverse version order, in the tree at ver_root. Most of the time, there'll actually be only one node in the 'tree', in fact. */ this = tn_last(&ver_root); while (this) { struct jffs2_tmp_dnode_info *vers_next; int ret; vers_next = tn_prev(this); eat_last(&ver_root, &this->rb); if (check_tn_node(c, this)) { dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n", this->version, this->fn->ofs, this->fn->ofs+this->fn->size); jffs2_kill_tn(c, this); } else { if (this->version > high_ver) { /* Note that this is different from the other highest_version, because this one is only counting _valid_ nodes which could give the latest inode metadata */ high_ver = this->version; rii->latest_ref = this->fn->raw; } dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n", this, this->version, this->fn->ofs, this->fn->ofs+this->fn->size, this->overlapped); ret = jffs2_add_full_dnode_to_inode(c, f, this->fn); if (ret) { /* Free the nodes in vers_root; let the caller deal with the rest */ JFFS2_ERROR("Add node to tree failed %d\n", ret); while (1) { vers_next = tn_prev(this); if (check_tn_node(c, this)) jffs2_mark_node_obsolete(c, this->fn->raw); jffs2_free_full_dnode(this->fn); jffs2_free_tmp_dnode_info(this); this = vers_next; if (!this) break; eat_last(&ver_root, &vers_next->rb); } return ret; } jffs2_free_tmp_dnode_info(this); } this = vers_next; } } return 0; } static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) { struct rb_node *this; struct jffs2_tmp_dnode_info *tn; this = list->rb_node; /* Now at bottom of tree */ while (this) { if (this->rb_left) this = this->rb_left; else if (this->rb_right) this = this->rb_right; else { tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb); jffs2_free_full_dnode(tn->fn); jffs2_free_tmp_dnode_info(tn); this = rb_parent(this); if (!this) break; if (this->rb_left == &tn->rb) this->rb_left = NULL; else if (this->rb_right == &tn->rb) this->rb_right = NULL; else BUG(); } } *list = RB_ROOT; } static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) { struct jffs2_full_dirent *next; while (fd) { next = fd->next; jffs2_free_full_dirent(fd); fd = next; } } /* Returns first valid node after 'ref'. May return 'ref' */ static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) { while (ref && ref->next_in_ino) { if (!ref_obsolete(ref)) return ref; dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)); ref = ref->next_in_ino; } return NULL; } /* * Helper function for jffs2_get_inode_nodes(). * It is called every time an directory entry node is found. * * Returns: 0 on success; * negative error code on failure. */ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_raw_dirent *rd, size_t read, struct jffs2_readinode_info *rii) { struct jffs2_full_dirent *fd; uint32_t crc; /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ BUG_ON(ref_obsolete(ref)); crc = crc32(0, rd, sizeof(*rd) - 8); if (unlikely(crc != je32_to_cpu(rd->node_crc))) { JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n", ref_offset(ref), je32_to_cpu(rd->node_crc), crc); jffs2_mark_node_obsolete(c, ref); return 0; } /* If we've never checked the CRCs on this node, check them now */ if (ref_flags(ref) == REF_UNCHECKED) { struct jffs2_eraseblock *jeb; int len; /* Sanity check */ if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) { JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n", ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); jffs2_mark_node_obsolete(c, ref); return 0; } jeb = &c->blocks[ref->flash_offset / c->sector_size]; len = ref_totlen(c, jeb, ref); spin_lock(&c->erase_completion_lock); jeb->used_size += len; jeb->unchecked_size -= len; c->used_size += len; c->unchecked_size -= len; ref->flash_offset = ref_offset(ref) | dirent_node_state(rd); spin_unlock(&c->erase_completion_lock); } fd = jffs2_alloc_full_dirent(rd->nsize + 1); if (unlikely(!fd)) return -ENOMEM; fd->raw = ref; fd->version = je32_to_cpu(rd->version); fd->ino = je32_to_cpu(rd->ino); fd->type = rd->type; if (fd->version > rii->highest_version) rii->highest_version = fd->version; /* Pick out the mctime of the latest dirent */ if(fd->version > rii->mctime_ver && je32_to_cpu(rd->mctime)) { rii->mctime_ver = fd->version; rii->latest_mctime = je32_to_cpu(rd->mctime); } /* * Copy as much of the name as possible from the raw * dirent we've already read from the flash. */ if (read > sizeof(*rd)) memcpy(&fd->name[0], &rd->name[0], min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) )); /* Do we need to copy any more of the name directly from the flash? */ if (rd->nsize + sizeof(*rd) > read) { /* FIXME: point() */ int err; int already = read - sizeof(*rd); err = jffs2_flash_read(c, (ref_offset(ref)) + read, rd->nsize - already, &read, &fd->name[already]); if (unlikely(read != rd->nsize - already) && likely(!err)) return -EIO; if (unlikely(err)) { JFFS2_ERROR("read remainder of name: error %d\n", err); jffs2_free_full_dirent(fd); return -EIO; } } fd->nhash = full_name_hash(fd->name, rd->nsize); fd->next = NULL; fd->name[rd->nsize] = '\0'; /* * Wheee. We now have a complete jffs2_full_dirent structure, with * the name in it and everything. Link it into the list */ jffs2_add_fd_to_list(c, fd, &rii->fds); return 0; } /* * Helper function for jffs2_get_inode_nodes(). * It is called every time an inode node is found. * * Returns: 0 on success (possibly after marking a bad node obsolete); * negative error code on failure. */ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_raw_inode *rd, int rdlen, struct jffs2_readinode_info *rii) { struct jffs2_tmp_dnode_info *tn; uint32_t len, csize; int ret = 0; uint32_t crc; /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ BUG_ON(ref_obsolete(ref)); crc = crc32(0, rd, sizeof(*rd) - 8); if (unlikely(crc != je32_to_cpu(rd->node_crc))) { JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n", ref_offset(ref), je32_to_cpu(rd->node_crc), crc); jffs2_mark_node_obsolete(c, ref); return 0; } tn = jffs2_alloc_tmp_dnode_info(); if (!tn) { JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn)); return -ENOMEM; } tn->partial_crc = 0; csize = je32_to_cpu(rd->csize); /* If we've never checked the CRCs on this node, check them now */ if (ref_flags(ref) == REF_UNCHECKED) { /* Sanity checks */ if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) || unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) { JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref)); jffs2_dbg_dump_node(c, ref_offset(ref)); jffs2_mark_node_obsolete(c, ref); goto free_out; } if (jffs2_is_writebuffered(c) && csize != 0) { /* At this point we are supposed to check the data CRC * of our unchecked node. But thus far, we do not * know whether the node is valid or obsolete. To * figure this out, we need to walk all the nodes of * the inode and build the inode fragtree. We don't * want to spend time checking data of nodes which may * later be found to be obsolete. So we put off the full * data CRC checking until we have read all the inode * nodes and have started building the fragtree. * * The fragtree is being built starting with nodes * having the highest version number, so we'll be able * to detect whether a node is valid (i.e., it is not * overlapped by a node with higher version) or not. * And we'll be able to check only those nodes, which * are not obsolete. * * Of course, this optimization only makes sense in case * of NAND flashes (or other flashes with * !jffs2_can_mark_obsolete()), since on NOR flashes * nodes are marked obsolete physically. * * Since NAND flashes (or other flashes with * jffs2_is_writebuffered(c)) are anyway read by * fractions of c->wbuf_pagesize, and we have just read * the node header, it is likely that the starting part * of the node data is also read when we read the * header. So we don't mind to check the CRC of the * starting part of the data of the node now, and check * the second part later (in jffs2_check_node_data()). * Of course, we will not need to re-read and re-check * the NAND page which we have just read. This is why we * read the whole NAND page at jffs2_get_inode_nodes(), * while we needed only the node header. */ unsigned char *buf; /* 'buf' will point to the start of data */ buf = (unsigned char *)rd + sizeof(*rd); /* len will be the read data length */ len = min_t(uint32_t, rdlen - sizeof(*rd), csize); tn->partial_crc = crc32(0, buf, len); dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize); /* If we actually calculated the whole data CRC * and it is wrong, drop the node. */ if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) { JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc)); jffs2_mark_node_obsolete(c, ref); goto free_out; } } else if (csize == 0) { /* * We checked the header CRC. If the node has no data, adjust * the space accounting now. For other nodes this will be done * later either when the node is marked obsolete or when its * data is checked. */ struct jffs2_eraseblock *jeb; dbg_readinode("the node has no data.\n"); jeb = &c->blocks[ref->flash_offset / c->sector_size]; len = ref_totlen(c, jeb, ref); spin_lock(&c->erase_completion_lock); jeb->used_size += len; jeb->unchecked_size -= len; c->used_size += len; c->unchecked_size -= len; ref->flash_offset = ref_offset(ref) | REF_NORMAL; spin_unlock(&c->erase_completion_lock); } } tn->fn = jffs2_alloc_full_dnode(); if (!tn->fn) { JFFS2_ERROR("alloc fn failed\n"); ret = -ENOMEM; goto free_out; } tn->version = je32_to_cpu(rd->version); tn->fn->ofs = je32_to_cpu(rd->offset); tn->data_crc = je32_to_cpu(rd->data_crc); tn->csize = csize; tn->fn->raw = ref; tn->overlapped = 0; if (tn->version > rii->highest_version) rii->highest_version = tn->version; /* There was a bug where we wrote hole nodes out with csize/dsize swapped. Deal with it */ if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize) tn->fn->size = csize; else // normal case... tn->fn->size = je32_to_cpu(rd->dsize); dbg_readinode2("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n", ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize); ret = jffs2_add_tn_to_tree(c, rii, tn); if (ret) { jffs2_free_full_dnode(tn->fn); free_out: jffs2_free_tmp_dnode_info(tn); return ret; } #ifdef JFFS2_DBG_READINODE2_MESSAGES dbg_readinode2("After adding ver %d:\n", je32_to_cpu(rd->version)); tn = tn_first(&rii->tn_root); while (tn) { dbg_readinode2("%p: v %d r 0x%x-0x%x ov %d\n", tn, tn->version, tn->fn->ofs, tn->fn->ofs+tn->fn->size, tn->overlapped); tn = tn_next(tn); } #endif return 0; } /* * Helper function for jffs2_get_inode_nodes(). * It is called every time an unknown node is found. * * Returns: 0 on success; * negative error code on failure. */ static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un) { /* We don't mark unknown nodes as REF_UNCHECKED */ if (ref_flags(ref) == REF_UNCHECKED) { JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n", ref_offset(ref)); JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n", je16_to_cpu(un->magic), je16_to_cpu(un->nodetype), je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc)); jffs2_mark_node_obsolete(c, ref); return 0; } un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype)); switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) { case JFFS2_FEATURE_INCOMPAT: JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n", je16_to_cpu(un->nodetype), ref_offset(ref)); /* EEP */ BUG(); break; case JFFS2_FEATURE_ROCOMPAT: JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n", je16_to_cpu(un->nodetype), ref_offset(ref)); BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO)); break; case JFFS2_FEATURE_RWCOMPAT_COPY: JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n", je16_to_cpu(un->nodetype), ref_offset(ref)); break; case JFFS2_FEATURE_RWCOMPAT_DELETE: JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n", je16_to_cpu(un->nodetype), ref_offset(ref)); jffs2_mark_node_obsolete(c, ref); return 0; } return 0; } /* * Helper function for jffs2_get_inode_nodes(). * The function detects whether more data should be read and reads it if yes. * * Returns: 0 on success; * negative error code on failure. */ static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, int needed_len, int *rdlen, unsigned char *buf) { int err, to_read = needed_len - *rdlen; size_t retlen; uint32_t offs; if (jffs2_is_writebuffered(c)) { int rem = to_read % c->wbuf_pagesize; if (rem) to_read += c->wbuf_pagesize - rem; } /* We need to read more data */ offs = ref_offset(ref) + *rdlen; dbg_readinode("read more %d bytes\n", to_read); err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen); if (err) { JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", to_read, offs, err); return err; } if (retlen < to_read) { JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", offs, retlen, to_read); return -EIO; } *rdlen += to_read; return 0; } /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated with this ino. Perform a preliminary ordering on data nodes, throwing away those which are completely obsoleted by newer ones. The naïve approach we use to take of just returning them _all_ in version order will cause us to run out of memory in certain degenerate cases. */ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_readinode_info *rii) { struct jffs2_raw_node_ref *ref, *valid_ref; unsigned char *buf = NULL; union jffs2_node_union *node; size_t retlen; int len, err; rii->mctime_ver = 0; dbg_readinode("ino #%u\n", f->inocache->ino); /* FIXME: in case of NOR and available ->point() this * needs to be fixed. */ len = sizeof(union jffs2_node_union) + c->wbuf_pagesize; buf = kmalloc(len, GFP_KERNEL); if (!buf) return -ENOMEM; spin_lock(&c->erase_completion_lock); valid_ref = jffs2_first_valid_node(f->inocache->nodes); if (!valid_ref && f->inocache->ino != 1) JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino); while (valid_ref) { /* We can hold a pointer to a non-obsolete node without the spinlock, but _obsolete_ nodes may disappear at any time, if the block they're in gets erased. So if we mark 'ref' obsolete while we're not holding the lock, it can go away immediately. For that reason, we find the next valid node first, before processing 'ref'. */ ref = valid_ref; valid_ref = jffs2_first_valid_node(ref->next_in_ino); spin_unlock(&c->erase_completion_lock); cond_resched(); /* * At this point we don't know the type of the node we're going * to read, so we do not know the size of its header. In order * to minimize the amount of flash IO we assume the header is * of size = JFFS2_MIN_NODE_HEADER. */ len = JFFS2_MIN_NODE_HEADER; if (jffs2_is_writebuffered(c)) { int end, rem; /* * We are about to read JFFS2_MIN_NODE_HEADER bytes, * but this flash has some minimal I/O unit. It is * possible that we'll need to read more soon, so read * up to the next min. I/O unit, in order not to * re-read the same min. I/O unit twice. */ end = ref_offset(ref) + len; rem = end % c->wbuf_pagesize; if (rem) end += c->wbuf_pagesize - rem; len = end - ref_offset(ref); } dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref)); /* FIXME: point() */ err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf); if (err) { JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ref_offset(ref), err); goto free_out; } if (retlen < len) { JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len); err = -EIO; goto free_out; } node = (union jffs2_node_union *)buf; /* No need to mask in the valid bit; it shouldn't be invalid */ if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) { JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n", ref_offset(ref), je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype), je32_to_cpu(node->u.totlen), je32_to_cpu(node->u.hdr_crc)); jffs2_dbg_dump_node(c, ref_offset(ref)); jffs2_mark_node_obsolete(c, ref); goto cont; } if (je16_to_cpu(node->u.magic) != JFFS2_MAGIC_BITMASK) { /* Not a JFFS2 node, whinge and move on */ JFFS2_NOTICE("Wrong magic bitmask 0x%04x in node header at %#08x.\n", je16_to_cpu(node->u.magic), ref_offset(ref)); jffs2_mark_node_obsolete(c, ref); goto cont; } switch (je16_to_cpu(node->u.nodetype)) { case JFFS2_NODETYPE_DIRENT: if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent) && len < sizeof(struct jffs2_raw_dirent)) { err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf); if (unlikely(err)) goto free_out; } err = read_direntry(c, ref, &node->d, retlen, rii); if (unlikely(err)) goto free_out; break; case JFFS2_NODETYPE_INODE: if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode) && len < sizeof(struct jffs2_raw_inode)) { err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf); if (unlikely(err)) goto free_out; } err = read_dnode(c, ref, &node->i, len, rii); if (unlikely(err)) goto free_out; break; default: if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node) && len < sizeof(struct jffs2_unknown_node)) { err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf); if (unlikely(err)) goto free_out; } err = read_unknown(c, ref, &node->u); if (unlikely(err)) goto free_out; } cont: spin_lock(&c->erase_completion_lock); } spin_unlock(&c->erase_completion_lock); kfree(buf); f->highest_version = rii->highest_version; dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n", f->inocache->ino, rii->highest_version, rii->latest_mctime, rii->mctime_ver); return 0; free_out: jffs2_free_tmp_dnode_info_list(&rii->tn_root); jffs2_free_full_dirent_list(rii->fds); rii->fds = NULL; kfree(buf); return err; } static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *latest_node) { struct jffs2_readinode_info rii; uint32_t crc, new_size; size_t retlen; int ret; dbg_readinode("ino #%u pino/nlink is %d\n", f->inocache->ino, f->inocache->pino_nlink); memset(&rii, 0, sizeof(rii)); /* Grab all nodes relevant to this ino */ ret = jffs2_get_inode_nodes(c, f, &rii); if (ret) { JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret); if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); return ret; } ret = jffs2_build_inode_fragtree(c, f, &rii); if (ret) { JFFS2_ERROR("Failed to build final fragtree for inode #%u: error %d\n", f->inocache->ino, ret); if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); jffs2_free_tmp_dnode_info_list(&rii.tn_root); /* FIXME: We could at least crc-check them all */ if (rii.mdata_tn) { jffs2_free_full_dnode(rii.mdata_tn->fn); jffs2_free_tmp_dnode_info(rii.mdata_tn); rii.mdata_tn = NULL; } return ret; } if (rii.mdata_tn) { if (rii.mdata_tn->fn->raw == rii.latest_ref) { f->metadata = rii.mdata_tn->fn; jffs2_free_tmp_dnode_info(rii.mdata_tn); } else { jffs2_kill_tn(c, rii.mdata_tn); } rii.mdata_tn = NULL; } f->dents = rii.fds; jffs2_dbg_fragtree_paranoia_check_nolock(f); if (unlikely(!rii.latest_ref)) { /* No data nodes for this inode. */ if (f->inocache->ino != 1) { JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino); if (!rii.fds) { if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); return -EIO; } JFFS2_NOTICE("but it has children so we fake some modes for it\n"); } latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO); latest_node->version = cpu_to_je32(0); latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0); latest_node->isize = cpu_to_je32(0); latest_node->gid = cpu_to_je16(0); latest_node->uid = cpu_to_je16(0); if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT); return 0; } ret = jffs2_flash_read(c, ref_offset(rii.latest_ref), sizeof(*latest_node), &retlen, (void *)latest_node); if (ret || retlen != sizeof(*latest_node)) { JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n", ret, retlen, sizeof(*latest_node)); /* FIXME: If this fails, there seems to be a memory leak. Find it. */ mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return ret?ret:-EIO; } crc = crc32(0, latest_node, sizeof(*latest_node)-8); if (crc != je32_to_cpu(latest_node->node_crc)) { JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n", f->inocache->ino, ref_offset(rii.latest_ref)); mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return -EIO; } switch(jemode_to_cpu(latest_node->mode) & S_IFMT) { case S_IFDIR: if (rii.mctime_ver > je32_to_cpu(latest_node->version)) { /* The times in the latest_node are actually older than mctime in the latest dirent. Cheat. */ latest_node->ctime = latest_node->mtime = cpu_to_je32(rii.latest_mctime); } break; case S_IFREG: /* If it was a regular file, truncate it to the latest node's isize */ new_size = jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize)); if (new_size != je32_to_cpu(latest_node->isize)) { JFFS2_WARNING("Truncating ino #%u to %d bytes failed because it only had %d bytes to start with!\n", f->inocache->ino, je32_to_cpu(latest_node->isize), new_size); latest_node->isize = cpu_to_je32(new_size); } break; case S_IFLNK: /* Hack to work around broken isize in old symlink code. Remove this when dwmw2 comes to his senses and stops symlinks from being an entirely gratuitous special case. */ if (!je32_to_cpu(latest_node->isize)) latest_node->isize = latest_node->dsize; if (f->inocache->state != INO_STATE_CHECKING) { /* Symlink's inode data is the target path. Read it and * keep in RAM to facilitate quick follow symlink * operation. */ f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL); if (!f->target) { JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize)); mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return -ENOMEM; } ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node), je32_to_cpu(latest_node->csize), &retlen, (char *)f->target); if (ret || retlen != je32_to_cpu(latest_node->csize)) { if (retlen != je32_to_cpu(latest_node->csize)) ret = -EIO; kfree(f->target); f->target = NULL; mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return ret; } f->target[je32_to_cpu(latest_node->csize)] = '\0'; dbg_readinode("symlink's target '%s' cached\n", f->target); } /* fall through... */ case S_IFBLK: case S_IFCHR: /* Certain inode types should have only one data node, and it's kept as the metadata node */ if (f->metadata) { JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n", f->inocache->ino, jemode_to_cpu(latest_node->mode)); mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return -EIO; } if (!frag_first(&f->fragtree)) { JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n", f->inocache->ino, jemode_to_cpu(latest_node->mode)); mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return -EIO; } /* ASSERT: f->fraglist != NULL */ if (frag_next(frag_first(&f->fragtree))) { JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n", f->inocache->ino, jemode_to_cpu(latest_node->mode)); /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */ mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return -EIO; } /* OK. We're happy */ f->metadata = frag_first(&f->fragtree)->node; jffs2_free_node_frag(frag_first(&f->fragtree)); f->fragtree = RB_ROOT; break; } if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT); return 0; } /* Scan the list of all nodes present for this ino, build map of versions, etc. */ int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t ino, struct jffs2_raw_inode *latest_node) { dbg_readinode("read inode #%u\n", ino); retry_inocache: spin_lock(&c->inocache_lock); f->inocache = jffs2_get_ino_cache(c, ino); if (f->inocache) { /* Check its state. We may need to wait before we can use it */ switch(f->inocache->state) { case INO_STATE_UNCHECKED: case INO_STATE_CHECKEDABSENT: f->inocache->state = INO_STATE_READING; break; case INO_STATE_CHECKING: case INO_STATE_GC: /* If it's in either of these states, we need to wait for whoever's got it to finish and put it back. */ dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); goto retry_inocache; case INO_STATE_READING: case INO_STATE_PRESENT: /* Eep. This should never happen. It can happen if Linux calls read_inode() again before clear_inode() has finished though. */ JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); /* Fail. That's probably better than allowing it to succeed */ f->inocache = NULL; break; default: BUG(); } } spin_unlock(&c->inocache_lock); if (!f->inocache && ino == 1) { /* Special case - no root inode on medium */ f->inocache = jffs2_alloc_inode_cache(); if (!f->inocache) { JFFS2_ERROR("cannot allocate inocache for root inode\n"); return -ENOMEM; } dbg_readinode("creating inocache for root inode\n"); memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); f->inocache->ino = f->inocache->pino_nlink = 1; f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; f->inocache->state = INO_STATE_READING; jffs2_add_ino_cache(c, f->inocache); } if (!f->inocache) { JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino); return -ENOENT; } return jffs2_do_read_inode_internal(c, f, latest_node); } int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) { struct jffs2_raw_inode n; struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL); int ret; if (!f) return -ENOMEM; mutex_init(&f->sem); mutex_lock(&f->sem); f->inocache = ic; ret = jffs2_do_read_inode_internal(c, f, &n); if (!ret) { mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); } kfree (f); return ret; } void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) { struct jffs2_full_dirent *fd, *fds; int deleted; jffs2_xattr_delete_inode(c, f->inocache); mutex_lock(&f->sem); deleted = f->inocache && !f->inocache->pino_nlink; if (f->inocache && f->inocache->state != INO_STATE_CHECKING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING); if (f->metadata) { if (deleted) jffs2_mark_node_obsolete(c, f->metadata->raw); jffs2_free_full_dnode(f->metadata); } jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); if (f->target) { kfree(f->target); f->target = NULL; } fds = f->dents; while(fds) { fd = fds; fds = fd->next; jffs2_free_full_dirent(fd); } if (f->inocache && f->inocache->state != INO_STATE_CHECKING) { jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); if (f->inocache->nodes == (void *)f->inocache) jffs2_del_ino_cache(c, f->inocache); } mutex_unlock(&f->sem); }
gpl-2.0
schqiushui/android_kernel_htc_msm8974
drivers/staging/iio/gyro/adxrs450_core.c
4965
10111
/* * ADXRS450/ADXRS453 Digital Output Gyroscope Driver * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/module.h> #include "../iio.h" #include "../sysfs.h" #include "adxrs450.h" /** * adxrs450_spi_read_reg_16() - read 2 bytes from a register pair * @dev: device associated with child of actual iio_dev * @reg_address: the address of the lower of the two registers,which should be an even address, * Second register's address is reg_address + 1. * @val: somewhere to pass back the value read **/ static int adxrs450_spi_read_reg_16(struct iio_dev *indio_dev, u8 reg_address, u16 *val) { struct adxrs450_state *st = iio_priv(indio_dev); int ret; mutex_lock(&st->buf_lock); st->tx[0] = ADXRS450_READ_DATA | (reg_address >> 7); st->tx[1] = reg_address << 1; st->tx[2] = 0; st->tx[3] = 0; if (!(hweight32(be32_to_cpu(*(u32 *)st->tx)) & 1)) st->tx[3] |= ADXRS450_P; ret = spi_write(st->us, st->tx, 4); if (ret) { dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n", reg_address); goto error_ret; } ret = spi_read(st->us, st->rx, 4); if (ret) { dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n", reg_address); goto error_ret; } *val = (be32_to_cpu(*(u32 *)st->rx) >> 5) & 0xFFFF; error_ret: mutex_unlock(&st->buf_lock); return ret; } /** * adxrs450_spi_write_reg_16() - write 2 bytes data to a register pair * @dev: device associated with child of actual actual iio_dev * @reg_address: the address of the lower of the two registers,which should be an even address, * Second register's address is reg_address + 1. * @val: value to be written. **/ static int adxrs450_spi_write_reg_16(struct iio_dev *indio_dev, u8 reg_address, u16 val) { struct adxrs450_state *st = iio_priv(indio_dev); int ret; mutex_lock(&st->buf_lock); st->tx[0] = ADXRS450_WRITE_DATA | reg_address >> 7; st->tx[1] = reg_address << 1 | val >> 15; st->tx[2] = val >> 7; st->tx[3] = val << 1; if (!(hweight32(be32_to_cpu(*(u32 *)st->tx)) & 1)) st->tx[3] |= ADXRS450_P; ret = spi_write(st->us, st->tx, 4); if (ret) dev_err(&st->us->dev, "problem while writing 16 bit register 0x%02x\n", reg_address); msleep(1); /* enforce sequential transfer delay 0.1ms */ mutex_unlock(&st->buf_lock); return ret; } /** * adxrs450_spi_sensor_data() - read 2 bytes sensor data * @dev: device associated with child of actual iio_dev * @val: somewhere to pass back the value read **/ static int adxrs450_spi_sensor_data(struct iio_dev *indio_dev, s16 *val) { struct adxrs450_state *st = iio_priv(indio_dev); int ret; mutex_lock(&st->buf_lock); st->tx[0] = ADXRS450_SENSOR_DATA; st->tx[1] = 0; st->tx[2] = 0; st->tx[3] = 0; ret = spi_write(st->us, st->tx, 4); if (ret) { dev_err(&st->us->dev, "Problem while reading sensor data\n"); goto error_ret; } ret = spi_read(st->us, st->rx, 4); if (ret) { dev_err(&st->us->dev, "Problem while reading sensor data\n"); goto error_ret; } *val = (be32_to_cpu(*(u32 *)st->rx) >> 10) & 0xFFFF; error_ret: mutex_unlock(&st->buf_lock); return ret; } /** * adxrs450_spi_initial() - use for initializing procedure. * @st: device instance specific data * @val: somewhere to pass back the value read **/ static int adxrs450_spi_initial(struct adxrs450_state *st, u32 *val, char chk) { struct spi_message msg; int ret; struct spi_transfer xfers = { .tx_buf = st->tx, .rx_buf = st->rx, .bits_per_word = 8, .len = 4, }; mutex_lock(&st->buf_lock); st->tx[0] = ADXRS450_SENSOR_DATA; st->tx[1] = 0; st->tx[2] = 0; st->tx[3] = 0; if (chk) st->tx[3] |= (ADXRS450_CHK | ADXRS450_P); spi_message_init(&msg); spi_message_add_tail(&xfers, &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(&st->us->dev, "Problem while reading initializing data\n"); goto error_ret; } *val = be32_to_cpu(*(u32 *)st->rx); error_ret: mutex_unlock(&st->buf_lock); return ret; } /* Recommended Startup Sequence by spec */ static int adxrs450_initial_setup(struct iio_dev *indio_dev) { u32 t; u16 data; int ret; struct adxrs450_state *st = iio_priv(indio_dev); msleep(ADXRS450_STARTUP_DELAY*2); ret = adxrs450_spi_initial(st, &t, 1); if (ret) return ret; if (t != 0x01) dev_warn(&st->us->dev, "The initial power on response " "is not correct! Restart without reset?\n"); msleep(ADXRS450_STARTUP_DELAY); ret = adxrs450_spi_initial(st, &t, 0); if (ret) return ret; msleep(ADXRS450_STARTUP_DELAY); ret = adxrs450_spi_initial(st, &t, 0); if (ret) return ret; if (((t & 0xff) | 0x01) != 0xff || ADXRS450_GET_ST(t) != 2) { dev_err(&st->us->dev, "The second response is not correct!\n"); return -EIO; } ret = adxrs450_spi_initial(st, &t, 0); if (ret) return ret; if (((t & 0xff) | 0x01) != 0xff || ADXRS450_GET_ST(t) != 2) { dev_err(&st->us->dev, "The third response is not correct!\n"); return -EIO; } ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_FAULT1, &data); if (ret) return ret; if (data & 0x0fff) { dev_err(&st->us->dev, "The device is not in normal status!\n"); return -EINVAL; } ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_PID1, &data); if (ret) return ret; dev_info(&st->us->dev, "The Part ID is 0x%x\n", data); ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_SNL, &data); if (ret) return ret; t = data; ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_SNH, &data); if (ret) return ret; t |= data << 16; dev_info(&st->us->dev, "The Serial Number is 0x%x\n", t); return 0; } static int adxrs450_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { int ret; switch (mask) { case IIO_CHAN_INFO_CALIBBIAS: ret = adxrs450_spi_write_reg_16(indio_dev, ADXRS450_DNC1, val & 0x3FF); break; default: ret = -EINVAL; break; } return ret; } static int adxrs450_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { int ret; s16 t; switch (mask) { case 0: switch (chan->type) { case IIO_ANGL_VEL: ret = adxrs450_spi_sensor_data(indio_dev, &t); if (ret) break; *val = t; ret = IIO_VAL_INT; break; case IIO_TEMP: ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_TEMP1, &t); if (ret) break; *val = (t >> 6) + 225; ret = IIO_VAL_INT; break; default: ret = -EINVAL; break; } break; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_ANGL_VEL: *val = 0; *val2 = 218166; return IIO_VAL_INT_PLUS_NANO; case IIO_TEMP: *val = 200; *val2 = 0; return IIO_VAL_INT; default: return -EINVAL; } break; case IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW: ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_QUAD1, &t); if (ret) break; *val = t; ret = IIO_VAL_INT; break; case IIO_CHAN_INFO_CALIBBIAS: ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_DNC1, &t); if (ret) break; *val = t; ret = IIO_VAL_INT; break; default: ret = -EINVAL; break; } return ret; } static const struct iio_chan_spec adxrs450_channels[2][2] = { [ID_ADXRS450] = { { .type = IIO_ANGL_VEL, .modified = 1, .channel2 = IIO_MOD_Z, .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, }, { .type = IIO_TEMP, .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, } }, [ID_ADXRS453] = { { .type = IIO_ANGL_VEL, .modified = 1, .channel2 = IIO_MOD_Z, .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT | IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT, }, { .type = IIO_TEMP, .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, } }, }; static const struct iio_info adxrs450_info = { .driver_module = THIS_MODULE, .read_raw = &adxrs450_read_raw, .write_raw = &adxrs450_write_raw, }; static int __devinit adxrs450_probe(struct spi_device *spi) { int ret; struct adxrs450_state *st; struct iio_dev *indio_dev; /* setup the industrialio driver allocated elements */ indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); st->us = spi; mutex_init(&st->buf_lock); /* This is only used for removal purposes */ spi_set_drvdata(spi, indio_dev); indio_dev->dev.parent = &spi->dev; indio_dev->info = &adxrs450_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = adxrs450_channels[spi_get_device_id(spi)->driver_data]; indio_dev->num_channels = ARRAY_SIZE(adxrs450_channels); indio_dev->name = spi->dev.driver->name; ret = iio_device_register(indio_dev); if (ret) goto error_free_dev; /* Get the device into a sane initial state */ ret = adxrs450_initial_setup(indio_dev); if (ret) goto error_initial; return 0; error_initial: iio_device_unregister(indio_dev); error_free_dev: iio_free_device(indio_dev); error_ret: return ret; } static int adxrs450_remove(struct spi_device *spi) { iio_device_unregister(spi_get_drvdata(spi)); iio_free_device(spi_get_drvdata(spi)); return 0; } static const struct spi_device_id adxrs450_id[] = { {"adxrs450", ID_ADXRS450}, {"adxrs453", ID_ADXRS453}, {} }; MODULE_DEVICE_TABLE(spi, adxrs450_id); static struct spi_driver adxrs450_driver = { .driver = { .name = "adxrs450", .owner = THIS_MODULE, }, .probe = adxrs450_probe, .remove = __devexit_p(adxrs450_remove), .id_table = adxrs450_id, }; module_spi_driver(adxrs450_driver); MODULE_AUTHOR("Cliff Cai <cliff.cai@xxxxxxxxxx>"); MODULE_DESCRIPTION("Analog Devices ADXRS450/ADXRS453 Gyroscope SPI driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
TKr/riches-mod-jlo-cm10
drivers/pci/iova.c
8037
11779
/* * Copyright © 2006-2009, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ #include <linux/iova.h> void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) { spin_lock_init(&iovad->iova_rbtree_lock); iovad->rbroot = RB_ROOT; iovad->cached32_node = NULL; iovad->dma_32bit_pfn = pfn_32bit; } static struct rb_node * __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) { if ((*limit_pfn != iovad->dma_32bit_pfn) || (iovad->cached32_node == NULL)) return rb_last(&iovad->rbroot); else { struct rb_node *prev_node = rb_prev(iovad->cached32_node); struct iova *curr_iova = container_of(iovad->cached32_node, struct iova, node); *limit_pfn = curr_iova->pfn_lo - 1; return prev_node; } } static void __cached_rbnode_insert_update(struct iova_domain *iovad, unsigned long limit_pfn, struct iova *new) { if (limit_pfn != iovad->dma_32bit_pfn) return; iovad->cached32_node = &new->node; } static void __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) { struct iova *cached_iova; struct rb_node *curr; if (!iovad->cached32_node) return; curr = iovad->cached32_node; cached_iova = container_of(curr, struct iova, node); if (free->pfn_lo >= cached_iova->pfn_lo) { struct rb_node *node = rb_next(&free->node); struct iova *iova = container_of(node, struct iova, node); /* only cache if it's below 32bit pfn */ if (node && iova->pfn_lo < iovad->dma_32bit_pfn) iovad->cached32_node = node; else iovad->cached32_node = NULL; } } /* Computes the padding size required, to make the * the start address naturally aligned on its size */ static int iova_get_pad_size(int size, unsigned int limit_pfn) { unsigned int pad_size = 0; unsigned int order = ilog2(size); if (order) pad_size = (limit_pfn + 1) % (1 << order); return pad_size; } static int __alloc_and_insert_iova_range(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, struct iova *new, bool size_aligned) { struct rb_node *prev, *curr = NULL; unsigned long flags; unsigned long saved_pfn; unsigned int pad_size = 0; /* Walk the tree backwards */ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); saved_pfn = limit_pfn; curr = __get_cached_rbnode(iovad, &limit_pfn); prev = curr; while (curr) { struct iova *curr_iova = container_of(curr, struct iova, node); if (limit_pfn < curr_iova->pfn_lo) goto move_left; else if (limit_pfn < curr_iova->pfn_hi) goto adjust_limit_pfn; else { if (size_aligned) pad_size = iova_get_pad_size(size, limit_pfn); if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) break; /* found a free slot */ } adjust_limit_pfn: limit_pfn = curr_iova->pfn_lo - 1; move_left: prev = curr; curr = rb_prev(curr); } if (!curr) { if (size_aligned) pad_size = iova_get_pad_size(size, limit_pfn); if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return -ENOMEM; } } /* pfn_lo will point to size aligned address if size_aligned is set */ new->pfn_lo = limit_pfn - (size + pad_size) + 1; new->pfn_hi = new->pfn_lo + size - 1; /* Insert the new_iova into domain rbtree by holding writer lock */ /* Add new node and rebalance tree. */ { struct rb_node **entry, *parent = NULL; /* If we have 'prev', it's a valid place to start the insertion. Otherwise, start from the root. */ if (prev) entry = &prev; else entry = &iovad->rbroot.rb_node; /* Figure out where to put new node */ while (*entry) { struct iova *this = container_of(*entry, struct iova, node); parent = *entry; if (new->pfn_lo < this->pfn_lo) entry = &((*entry)->rb_left); else if (new->pfn_lo > this->pfn_lo) entry = &((*entry)->rb_right); else BUG(); /* this should not happen */ } /* Add new node and rebalance tree. */ rb_link_node(&new->node, parent, entry); rb_insert_color(&new->node, &iovad->rbroot); } __cached_rbnode_insert_update(iovad, saved_pfn, new); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return 0; } static void iova_insert_rbtree(struct rb_root *root, struct iova *iova) { struct rb_node **new = &(root->rb_node), *parent = NULL; /* Figure out where to put new node */ while (*new) { struct iova *this = container_of(*new, struct iova, node); parent = *new; if (iova->pfn_lo < this->pfn_lo) new = &((*new)->rb_left); else if (iova->pfn_lo > this->pfn_lo) new = &((*new)->rb_right); else BUG(); /* this should not happen */ } /* Add new node and rebalance tree. */ rb_link_node(&iova->node, parent, new); rb_insert_color(&iova->node, root); } /** * alloc_iova - allocates an iova * @iovad - iova domain in question * @size - size of page frames to allocate * @limit_pfn - max limit address * @size_aligned - set if size_aligned address range is required * This function allocates an iova in the range limit_pfn to IOVA_START_PFN * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned * flag is set then the allocated address iova->pfn_lo will be naturally * aligned on roundup_power_of_two(size). */ struct iova * alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool size_aligned) { struct iova *new_iova; int ret; new_iova = alloc_iova_mem(); if (!new_iova) return NULL; /* If size aligned is set then round the size to * to next power of two. */ if (size_aligned) size = __roundup_pow_of_two(size); ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, new_iova, size_aligned); if (ret) { free_iova_mem(new_iova); return NULL; } return new_iova; } /** * find_iova - find's an iova for a given pfn * @iovad - iova domain in question. * pfn - page frame number * This function finds and returns an iova belonging to the * given doamin which matches the given pfn. */ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) { unsigned long flags; struct rb_node *node; /* Take the lock so that no other thread is manipulating the rbtree */ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); node = iovad->rbroot.rb_node; while (node) { struct iova *iova = container_of(node, struct iova, node); /* If pfn falls within iova's range, return iova */ if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); /* We are not holding the lock while this iova * is referenced by the caller as the same thread * which called this function also calls __free_iova() * and it is by desing that only one thread can possibly * reference a particular iova and hence no conflict. */ return iova; } if (pfn < iova->pfn_lo) node = node->rb_left; else if (pfn > iova->pfn_lo) node = node->rb_right; } spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return NULL; } /** * __free_iova - frees the given iova * @iovad: iova domain in question. * @iova: iova in question. * Frees the given iova belonging to the giving domain */ void __free_iova(struct iova_domain *iovad, struct iova *iova) { unsigned long flags; spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); __cached_rbnode_delete_update(iovad, iova); rb_erase(&iova->node, &iovad->rbroot); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); free_iova_mem(iova); } /** * free_iova - finds and frees the iova for a given pfn * @iovad: - iova domain in question. * @pfn: - pfn that is allocated previously * This functions finds an iova for a given pfn and then * frees the iova from that domain. */ void free_iova(struct iova_domain *iovad, unsigned long pfn) { struct iova *iova = find_iova(iovad, pfn); if (iova) __free_iova(iovad, iova); } /** * put_iova_domain - destroys the iova doamin * @iovad: - iova domain in question. * All the iova's in that domain are destroyed. */ void put_iova_domain(struct iova_domain *iovad) { struct rb_node *node; unsigned long flags; spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); node = rb_first(&iovad->rbroot); while (node) { struct iova *iova = container_of(node, struct iova, node); rb_erase(node, &iovad->rbroot); free_iova_mem(iova); node = rb_first(&iovad->rbroot); } spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); } static int __is_range_overlap(struct rb_node *node, unsigned long pfn_lo, unsigned long pfn_hi) { struct iova *iova = container_of(node, struct iova, node); if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) return 1; return 0; } static struct iova * __insert_new_range(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi) { struct iova *iova; iova = alloc_iova_mem(); if (!iova) return iova; iova->pfn_hi = pfn_hi; iova->pfn_lo = pfn_lo; iova_insert_rbtree(&iovad->rbroot, iova); return iova; } static void __adjust_overlap_range(struct iova *iova, unsigned long *pfn_lo, unsigned long *pfn_hi) { if (*pfn_lo < iova->pfn_lo) iova->pfn_lo = *pfn_lo; if (*pfn_hi > iova->pfn_hi) *pfn_lo = iova->pfn_hi + 1; } /** * reserve_iova - reserves an iova in the given range * @iovad: - iova domain pointer * @pfn_lo: - lower page frame address * @pfn_hi:- higher pfn adderss * This function allocates reserves the address range from pfn_lo to pfn_hi so * that this address is not dished out as part of alloc_iova. */ struct iova * reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi) { struct rb_node *node; unsigned long flags; struct iova *iova; unsigned int overlap = 0; spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) { iova = container_of(node, struct iova, node); __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); if ((pfn_lo >= iova->pfn_lo) && (pfn_hi <= iova->pfn_hi)) goto finish; overlap = 1; } else if (overlap) break; } /* We are here either because this is the first reserver node * or need to insert remaining non overlap addr range */ iova = __insert_new_range(iovad, pfn_lo, pfn_hi); finish: spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return iova; } /** * copy_reserved_iova - copies the reserved between domains * @from: - source doamin from where to copy * @to: - destination domin where to copy * This function copies reserved iova's from one doamin to * other. */ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) { unsigned long flags; struct rb_node *node; spin_lock_irqsave(&from->iova_rbtree_lock, flags); for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { struct iova *iova = container_of(node, struct iova, node); struct iova *new_iova; new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); if (!new_iova) printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", iova->pfn_lo, iova->pfn_lo); } spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); }
gpl-2.0
andreya108/bindu-kernel-base
arch/x86/um/user-offsets.c
9061
2490
#include <stdio.h> #include <stddef.h> #include <signal.h> #include <sys/poll.h> #include <sys/mman.h> #include <sys/user.h> #define __FRAME_OFFSETS #include <asm/ptrace.h> #include <asm/types.h> #ifdef __i386__ #define __SYSCALL_I386(nr, sym, compat) [nr] = 1, static char syscalls[] = { #include <asm/syscalls_32.h> }; #else #define __SYSCALL_64(nr, sym, compat) [nr] = 1, #define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1, #define __SYSCALL_X32(nr, sym, compat) /* Not supported */ static char syscalls[] = { #include <asm/syscalls_64.h> }; #endif #define DEFINE(sym, val) \ asm volatile("\n->" #sym " %0 " #val : : "i" (val)) #define DEFINE_LONGS(sym, val) \ asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long))) void foo(void) { #ifdef __i386__ DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct)); DEFINE_LONGS(HOST_FPX_SIZE, sizeof(struct user_fpxregs_struct)); DEFINE(HOST_IP, EIP); DEFINE(HOST_SP, UESP); DEFINE(HOST_EFLAGS, EFL); DEFINE(HOST_AX, EAX); DEFINE(HOST_BX, EBX); DEFINE(HOST_CX, ECX); DEFINE(HOST_DX, EDX); DEFINE(HOST_SI, ESI); DEFINE(HOST_DI, EDI); DEFINE(HOST_BP, EBP); DEFINE(HOST_CS, CS); DEFINE(HOST_SS, SS); DEFINE(HOST_DS, DS); DEFINE(HOST_FS, FS); DEFINE(HOST_ES, ES); DEFINE(HOST_GS, GS); DEFINE(HOST_ORIG_AX, ORIG_EAX); #else DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long)); DEFINE_LONGS(HOST_BX, RBX); DEFINE_LONGS(HOST_CX, RCX); DEFINE_LONGS(HOST_DI, RDI); DEFINE_LONGS(HOST_SI, RSI); DEFINE_LONGS(HOST_DX, RDX); DEFINE_LONGS(HOST_BP, RBP); DEFINE_LONGS(HOST_AX, RAX); DEFINE_LONGS(HOST_R8, R8); DEFINE_LONGS(HOST_R9, R9); DEFINE_LONGS(HOST_R10, R10); DEFINE_LONGS(HOST_R11, R11); DEFINE_LONGS(HOST_R12, R12); DEFINE_LONGS(HOST_R13, R13); DEFINE_LONGS(HOST_R14, R14); DEFINE_LONGS(HOST_R15, R15); DEFINE_LONGS(HOST_ORIG_AX, ORIG_RAX); DEFINE_LONGS(HOST_CS, CS); DEFINE_LONGS(HOST_SS, SS); DEFINE_LONGS(HOST_EFLAGS, EFLAGS); #if 0 DEFINE_LONGS(HOST_FS, FS); DEFINE_LONGS(HOST_GS, GS); DEFINE_LONGS(HOST_DS, DS); DEFINE_LONGS(HOST_ES, ES); #endif DEFINE_LONGS(HOST_IP, RIP); DEFINE_LONGS(HOST_SP, RSP); #endif DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct)); DEFINE(UM_POLLIN, POLLIN); DEFINE(UM_POLLPRI, POLLPRI); DEFINE(UM_POLLOUT, POLLOUT); DEFINE(UM_PROT_READ, PROT_READ); DEFINE(UM_PROT_WRITE, PROT_WRITE); DEFINE(UM_PROT_EXEC, PROT_EXEC); DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); DEFINE(NR_syscalls, sizeof(syscalls)); }
gpl-2.0
psachin/old.apc-rock-II-kernel
drivers/isdn/mISDN/fsm.c
9573
4508
/* * finite state machine implementation * * Author Karsten Keil <kkeil@novell.com> * * Thanks to Jan den Ouden * Fritz Elfert * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/string.h> #include "fsm.h" #define FSM_TIMER_DEBUG 0 void mISDN_FsmNew(struct Fsm *fsm, struct FsmNode *fnlist, int fncount) { int i; fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count * fsm->event_count, GFP_KERNEL); for (i = 0; i < fncount; i++) if ((fnlist[i].state >= fsm->state_count) || (fnlist[i].event >= fsm->event_count)) { printk(KERN_ERR "mISDN_FsmNew Error: %d st(%ld/%ld) ev(%ld/%ld)\n", i, (long)fnlist[i].state, (long)fsm->state_count, (long)fnlist[i].event, (long)fsm->event_count); } else fsm->jumpmatrix[fsm->state_count * fnlist[i].event + fnlist[i].state] = (FSMFNPTR) fnlist[i].routine; } EXPORT_SYMBOL(mISDN_FsmNew); void mISDN_FsmFree(struct Fsm *fsm) { kfree((void *) fsm->jumpmatrix); } EXPORT_SYMBOL(mISDN_FsmFree); int mISDN_FsmEvent(struct FsmInst *fi, int event, void *arg) { FSMFNPTR r; if ((fi->state >= fi->fsm->state_count) || (event >= fi->fsm->event_count)) { printk(KERN_ERR "mISDN_FsmEvent Error st(%ld/%ld) ev(%d/%ld)\n", (long)fi->state, (long)fi->fsm->state_count, event, (long)fi->fsm->event_count); return 1; } r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state]; if (r) { if (fi->debug) fi->printdebug(fi, "State %s Event %s", fi->fsm->strState[fi->state], fi->fsm->strEvent[event]); r(fi, event, arg); return 0; } else { if (fi->debug) fi->printdebug(fi, "State %s Event %s no action", fi->fsm->strState[fi->state], fi->fsm->strEvent[event]); return 1; } } EXPORT_SYMBOL(mISDN_FsmEvent); void mISDN_FsmChangeState(struct FsmInst *fi, int newstate) { fi->state = newstate; if (fi->debug) fi->printdebug(fi, "ChangeState %s", fi->fsm->strState[newstate]); } EXPORT_SYMBOL(mISDN_FsmChangeState); static void FsmExpireTimer(struct FsmTimer *ft) { #if FSM_TIMER_DEBUG if (ft->fi->debug) ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft); #endif mISDN_FsmEvent(ft->fi, ft->event, ft->arg); } void mISDN_FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft) { ft->fi = fi; ft->tl.function = (void *) FsmExpireTimer; ft->tl.data = (long) ft; #if FSM_TIMER_DEBUG if (ft->fi->debug) ft->fi->printdebug(ft->fi, "mISDN_FsmInitTimer %lx", (long) ft); #endif init_timer(&ft->tl); } EXPORT_SYMBOL(mISDN_FsmInitTimer); void mISDN_FsmDelTimer(struct FsmTimer *ft, int where) { #if FSM_TIMER_DEBUG if (ft->fi->debug) ft->fi->printdebug(ft->fi, "mISDN_FsmDelTimer %lx %d", (long) ft, where); #endif del_timer(&ft->tl); } EXPORT_SYMBOL(mISDN_FsmDelTimer); int mISDN_FsmAddTimer(struct FsmTimer *ft, int millisec, int event, void *arg, int where) { #if FSM_TIMER_DEBUG if (ft->fi->debug) ft->fi->printdebug(ft->fi, "mISDN_FsmAddTimer %lx %d %d", (long) ft, millisec, where); #endif if (timer_pending(&ft->tl)) { if (ft->fi->debug) { printk(KERN_WARNING "mISDN_FsmAddTimer: timer already active!\n"); ft->fi->printdebug(ft->fi, "mISDN_FsmAddTimer already active!"); } return -1; } init_timer(&ft->tl); ft->event = event; ft->arg = arg; ft->tl.expires = jiffies + (millisec * HZ) / 1000; add_timer(&ft->tl); return 0; } EXPORT_SYMBOL(mISDN_FsmAddTimer); void mISDN_FsmRestartTimer(struct FsmTimer *ft, int millisec, int event, void *arg, int where) { #if FSM_TIMER_DEBUG if (ft->fi->debug) ft->fi->printdebug(ft->fi, "mISDN_FsmRestartTimer %lx %d %d", (long) ft, millisec, where); #endif if (timer_pending(&ft->tl)) del_timer(&ft->tl); init_timer(&ft->tl); ft->event = event; ft->arg = arg; ft->tl.expires = jiffies + (millisec * HZ) / 1000; add_timer(&ft->tl); } EXPORT_SYMBOL(mISDN_FsmRestartTimer);
gpl-2.0
rperier/linux
drivers/media/platform/exynos4-is/fimc-is.c
102
23303
// SPDX-License-Identifier: GPL-2.0-only /* * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * * Authors: Sylwester Nawrocki <s.nawrocki@samsung.com> * Younghwan Joo <yhwan.joo@samsung.com> */ #define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__ #include <linux/device.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/firmware.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/of_graph.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/videodev2.h> #include <media/videobuf2-dma-contig.h> #include "media-dev.h" #include "fimc-is.h" #include "fimc-is-command.h" #include "fimc-is-errno.h" #include "fimc-is-i2c.h" #include "fimc-is-param.h" #include "fimc-is-regs.h" static char *fimc_is_clocks[ISS_CLKS_MAX] = { [ISS_CLK_PPMUISPX] = "ppmuispx", [ISS_CLK_PPMUISPMX] = "ppmuispmx", [ISS_CLK_LITE0] = "lite0", [ISS_CLK_LITE1] = "lite1", [ISS_CLK_MPLL] = "mpll", [ISS_CLK_ISP] = "isp", [ISS_CLK_DRC] = "drc", [ISS_CLK_FD] = "fd", [ISS_CLK_MCUISP] = "mcuisp", [ISS_CLK_GICISP] = "gicisp", [ISS_CLK_PWM_ISP] = "pwm_isp", [ISS_CLK_MCUCTL_ISP] = "mcuctl_isp", [ISS_CLK_UART] = "uart", [ISS_CLK_ISP_DIV0] = "ispdiv0", [ISS_CLK_ISP_DIV1] = "ispdiv1", [ISS_CLK_MCUISP_DIV0] = "mcuispdiv0", [ISS_CLK_MCUISP_DIV1] = "mcuispdiv1", [ISS_CLK_ACLK200] = "aclk200", [ISS_CLK_ACLK200_DIV] = "div_aclk200", [ISS_CLK_ACLK400MCUISP] = "aclk400mcuisp", [ISS_CLK_ACLK400MCUISP_DIV] = "div_aclk400mcuisp", }; static void fimc_is_put_clocks(struct fimc_is *is) { int i; for (i = 0; i < ISS_CLKS_MAX; i++) { if (IS_ERR(is->clocks[i])) continue; clk_put(is->clocks[i]); is->clocks[i] = ERR_PTR(-EINVAL); } } static int fimc_is_get_clocks(struct fimc_is *is) { int i, ret; for (i = 0; i < ISS_CLKS_MAX; i++) is->clocks[i] = ERR_PTR(-EINVAL); for (i = 0; i < ISS_CLKS_MAX; i++) { is->clocks[i] = clk_get(&is->pdev->dev, fimc_is_clocks[i]); if (IS_ERR(is->clocks[i])) { ret = PTR_ERR(is->clocks[i]); goto err; } } return 0; err: fimc_is_put_clocks(is); dev_err(&is->pdev->dev, "failed to get clock: %s\n", fimc_is_clocks[i]); return ret; } static int fimc_is_setup_clocks(struct fimc_is *is) { int ret; ret = clk_set_parent(is->clocks[ISS_CLK_ACLK200], is->clocks[ISS_CLK_ACLK200_DIV]); if (ret < 0) return ret; ret = clk_set_parent(is->clocks[ISS_CLK_ACLK400MCUISP], is->clocks[ISS_CLK_ACLK400MCUISP_DIV]); if (ret < 0) return ret; ret = clk_set_rate(is->clocks[ISS_CLK_ISP_DIV0], ACLK_AXI_FREQUENCY); if (ret < 0) return ret; ret = clk_set_rate(is->clocks[ISS_CLK_ISP_DIV1], ACLK_AXI_FREQUENCY); if (ret < 0) return ret; ret = clk_set_rate(is->clocks[ISS_CLK_MCUISP_DIV0], ATCLK_MCUISP_FREQUENCY); if (ret < 0) return ret; return clk_set_rate(is->clocks[ISS_CLK_MCUISP_DIV1], ATCLK_MCUISP_FREQUENCY); } static int fimc_is_enable_clocks(struct fimc_is *is) { int i, ret; for (i = 0; i < ISS_GATE_CLKS_MAX; i++) { if (IS_ERR(is->clocks[i])) continue; ret = clk_prepare_enable(is->clocks[i]); if (ret < 0) { dev_err(&is->pdev->dev, "clock %s enable failed\n", fimc_is_clocks[i]); for (--i; i >= 0; i--) clk_disable(is->clocks[i]); return ret; } pr_debug("enabled clock: %s\n", fimc_is_clocks[i]); } return 0; } static void fimc_is_disable_clocks(struct fimc_is *is) { int i; for (i = 0; i < ISS_GATE_CLKS_MAX; i++) { if (!IS_ERR(is->clocks[i])) { clk_disable_unprepare(is->clocks[i]); pr_debug("disabled clock: %s\n", fimc_is_clocks[i]); } } } static int fimc_is_parse_sensor_config(struct fimc_is *is, unsigned int index, struct device_node *node) { struct fimc_is_sensor *sensor = &is->sensor[index]; struct device_node *ep, *port; u32 tmp = 0; int ret; sensor->drvdata = fimc_is_sensor_get_drvdata(node); if (!sensor->drvdata) { dev_err(&is->pdev->dev, "no driver data found for: %pOF\n", node); return -EINVAL; } ep = of_graph_get_next_endpoint(node, NULL); if (!ep) return -ENXIO; port = of_graph_get_remote_port(ep); of_node_put(ep); if (!port) return -ENXIO; /* Use MIPI-CSIS channel id to determine the ISP I2C bus index. */ ret = of_property_read_u32(port, "reg", &tmp); if (ret < 0) { dev_err(&is->pdev->dev, "reg property not found at: %pOF\n", port); of_node_put(port); return ret; } of_node_put(port); sensor->i2c_bus = tmp - FIMC_INPUT_MIPI_CSI2_0; return 0; } static int fimc_is_register_subdevs(struct fimc_is *is) { struct device_node *i2c_bus, *child; int ret, index = 0; ret = fimc_isp_subdev_create(&is->isp); if (ret < 0) return ret; for_each_compatible_node(i2c_bus, NULL, FIMC_IS_I2C_COMPATIBLE) { for_each_available_child_of_node(i2c_bus, child) { ret = fimc_is_parse_sensor_config(is, index, child); if (ret < 0 || index >= FIMC_IS_SENSORS_NUM) { of_node_put(child); return ret; } index++; } } return 0; } static int fimc_is_unregister_subdevs(struct fimc_is *is) { fimc_isp_subdev_destroy(&is->isp); return 0; } static int fimc_is_load_setfile(struct fimc_is *is, char *file_name) { const struct firmware *fw; void *buf; int ret; ret = request_firmware(&fw, file_name, &is->pdev->dev); if (ret < 0) { dev_err(&is->pdev->dev, "firmware request failed (%d)\n", ret); return ret; } buf = is->memory.vaddr + is->setfile.base; memcpy(buf, fw->data, fw->size); fimc_is_mem_barrier(); is->setfile.size = fw->size; pr_debug("mem vaddr: %p, setfile buf: %p\n", is->memory.vaddr, buf); memcpy(is->fw.setfile_info, fw->data + fw->size - FIMC_IS_SETFILE_INFO_LEN, FIMC_IS_SETFILE_INFO_LEN - 1); is->fw.setfile_info[FIMC_IS_SETFILE_INFO_LEN - 1] = '\0'; is->setfile.state = 1; pr_debug("FIMC-IS setfile loaded: base: %#x, size: %zu B\n", is->setfile.base, fw->size); release_firmware(fw); return ret; } int fimc_is_cpu_set_power(struct fimc_is *is, int on) { unsigned int timeout = FIMC_IS_POWER_ON_TIMEOUT; if (on) { /* Disable watchdog */ mcuctl_write(0, is, REG_WDT_ISP); /* Cortex-A5 start address setting */ mcuctl_write(is->memory.addr, is, MCUCTL_REG_BBOAR); /* Enable and start Cortex-A5 */ pmuisp_write(0x18000, is, REG_PMU_ISP_ARM_OPTION); pmuisp_write(0x1, is, REG_PMU_ISP_ARM_CONFIGURATION); } else { /* A5 power off */ pmuisp_write(0x10000, is, REG_PMU_ISP_ARM_OPTION); pmuisp_write(0x0, is, REG_PMU_ISP_ARM_CONFIGURATION); while (pmuisp_read(is, REG_PMU_ISP_ARM_STATUS) & 1) { if (timeout == 0) return -ETIME; timeout--; udelay(1); } } return 0; } /* Wait until @bit of @is->state is set to @state in the interrupt handler. */ int fimc_is_wait_event(struct fimc_is *is, unsigned long bit, unsigned int state, unsigned int timeout) { int ret = wait_event_timeout(is->irq_queue, !state ^ test_bit(bit, &is->state), timeout); if (ret == 0) { dev_WARN(&is->pdev->dev, "%s() timed out\n", __func__); return -ETIME; } return 0; } int fimc_is_start_firmware(struct fimc_is *is) { struct device *dev = &is->pdev->dev; int ret; if (is->fw.f_w == NULL) { dev_err(dev, "firmware is not loaded\n"); return -EINVAL; } memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size); wmb(); ret = fimc_is_cpu_set_power(is, 1); if (ret < 0) return ret; ret = fimc_is_wait_event(is, IS_ST_A5_PWR_ON, 1, msecs_to_jiffies(FIMC_IS_FW_LOAD_TIMEOUT)); if (ret < 0) dev_err(dev, "FIMC-IS CPU power on failed\n"); return ret; } /* Allocate working memory for the FIMC-IS CPU. */ static int fimc_is_alloc_cpu_memory(struct fimc_is *is) { struct device *dev = &is->pdev->dev; is->memory.vaddr = dma_alloc_coherent(dev, FIMC_IS_CPU_MEM_SIZE, &is->memory.addr, GFP_KERNEL); if (is->memory.vaddr == NULL) return -ENOMEM; is->memory.size = FIMC_IS_CPU_MEM_SIZE; dev_info(dev, "FIMC-IS CPU memory base: %pad\n", &is->memory.addr); if (((u32)is->memory.addr) & FIMC_IS_FW_ADDR_MASK) { dev_err(dev, "invalid firmware memory alignment: %#x\n", (u32)is->memory.addr); dma_free_coherent(dev, is->memory.size, is->memory.vaddr, is->memory.addr); return -EIO; } is->is_p_region = (struct is_region *)(is->memory.vaddr + FIMC_IS_CPU_MEM_SIZE - FIMC_IS_REGION_SIZE); is->is_dma_p_region = is->memory.addr + FIMC_IS_CPU_MEM_SIZE - FIMC_IS_REGION_SIZE; is->is_shared_region = (struct is_share_region *)(is->memory.vaddr + FIMC_IS_SHARED_REGION_OFFSET); return 0; } static void fimc_is_free_cpu_memory(struct fimc_is *is) { struct device *dev = &is->pdev->dev; if (is->memory.vaddr == NULL) return; dma_free_coherent(dev, is->memory.size, is->memory.vaddr, is->memory.addr); } static void fimc_is_load_firmware(const struct firmware *fw, void *context) { struct fimc_is *is = context; struct device *dev = &is->pdev->dev; void *buf; int ret; if (fw == NULL) { dev_err(dev, "firmware request failed\n"); return; } mutex_lock(&is->lock); if (fw->size < FIMC_IS_FW_SIZE_MIN || fw->size > FIMC_IS_FW_SIZE_MAX) { dev_err(dev, "wrong firmware size: %zu\n", fw->size); goto done; } is->fw.size = fw->size; ret = fimc_is_alloc_cpu_memory(is); if (ret < 0) { dev_err(dev, "failed to allocate FIMC-IS CPU memory\n"); goto done; } memcpy(is->memory.vaddr, fw->data, fw->size); wmb(); /* Read firmware description. */ buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_DESC_LEN); memcpy(&is->fw.info, buf, FIMC_IS_FW_INFO_LEN); is->fw.info[FIMC_IS_FW_INFO_LEN] = 0; buf = (void *)(is->memory.vaddr + fw->size - FIMC_IS_FW_VER_LEN); memcpy(&is->fw.version, buf, FIMC_IS_FW_VER_LEN); is->fw.version[FIMC_IS_FW_VER_LEN - 1] = 0; is->fw.state = 1; dev_info(dev, "loaded firmware: %s, rev. %s\n", is->fw.info, is->fw.version); dev_dbg(dev, "FW size: %zu, DMA addr: %pad\n", fw->size, &is->memory.addr); is->is_shared_region->chip_id = 0xe4412; is->is_shared_region->chip_rev_no = 1; fimc_is_mem_barrier(); /* * FIXME: The firmware is not being released for now, as it is * needed around for copying to the IS working memory every * time before the Cortex-A5 is restarted. */ release_firmware(is->fw.f_w); is->fw.f_w = fw; done: mutex_unlock(&is->lock); } static int fimc_is_request_firmware(struct fimc_is *is, const char *fw_name) { return request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, fw_name, &is->pdev->dev, GFP_KERNEL, is, fimc_is_load_firmware); } /* General IS interrupt handler */ static void fimc_is_general_irq_handler(struct fimc_is *is) { is->i2h_cmd.cmd = mcuctl_read(is, MCUCTL_REG_ISSR(10)); switch (is->i2h_cmd.cmd) { case IHC_GET_SENSOR_NUM: fimc_is_hw_get_params(is, 1); fimc_is_hw_wait_intmsr0_intmsd0(is); fimc_is_hw_set_sensor_num(is); pr_debug("ISP FW version: %#x\n", is->i2h_cmd.args[0]); break; case IHC_SET_FACE_MARK: case IHC_FRAME_DONE: fimc_is_hw_get_params(is, 2); break; case IHC_SET_SHOT_MARK: case IHC_AA_DONE: case IH_REPLY_DONE: fimc_is_hw_get_params(is, 3); break; case IH_REPLY_NOT_DONE: fimc_is_hw_get_params(is, 4); break; case IHC_NOT_READY: break; default: pr_info("unknown command: %#x\n", is->i2h_cmd.cmd); } fimc_is_fw_clear_irq1(is, FIMC_IS_INT_GENERAL); switch (is->i2h_cmd.cmd) { case IHC_GET_SENSOR_NUM: fimc_is_hw_set_intgr0_gd0(is); set_bit(IS_ST_A5_PWR_ON, &is->state); break; case IHC_SET_SHOT_MARK: break; case IHC_SET_FACE_MARK: is->fd_header.count = is->i2h_cmd.args[0]; is->fd_header.index = is->i2h_cmd.args[1]; is->fd_header.offset = 0; break; case IHC_FRAME_DONE: break; case IHC_AA_DONE: pr_debug("AA_DONE - %d, %d, %d\n", is->i2h_cmd.args[0], is->i2h_cmd.args[1], is->i2h_cmd.args[2]); break; case IH_REPLY_DONE: pr_debug("ISR_DONE: args[0]: %#x\n", is->i2h_cmd.args[0]); switch (is->i2h_cmd.args[0]) { case HIC_PREVIEW_STILL...HIC_CAPTURE_VIDEO: /* Get CAC margin */ set_bit(IS_ST_CHANGE_MODE, &is->state); is->isp.cac_margin_x = is->i2h_cmd.args[1]; is->isp.cac_margin_y = is->i2h_cmd.args[2]; pr_debug("CAC margin (x,y): (%d,%d)\n", is->isp.cac_margin_x, is->isp.cac_margin_y); break; case HIC_STREAM_ON: clear_bit(IS_ST_STREAM_OFF, &is->state); set_bit(IS_ST_STREAM_ON, &is->state); break; case HIC_STREAM_OFF: clear_bit(IS_ST_STREAM_ON, &is->state); set_bit(IS_ST_STREAM_OFF, &is->state); break; case HIC_SET_PARAMETER: is->config[is->config_index].p_region_index[0] = 0; is->config[is->config_index].p_region_index[1] = 0; set_bit(IS_ST_BLOCK_CMD_CLEARED, &is->state); pr_debug("HIC_SET_PARAMETER\n"); break; case HIC_GET_PARAMETER: break; case HIC_SET_TUNE: break; case HIC_GET_STATUS: break; case HIC_OPEN_SENSOR: set_bit(IS_ST_OPEN_SENSOR, &is->state); pr_debug("data lanes: %d, settle line: %d\n", is->i2h_cmd.args[2], is->i2h_cmd.args[1]); break; case HIC_CLOSE_SENSOR: clear_bit(IS_ST_OPEN_SENSOR, &is->state); is->sensor_index = 0; break; case HIC_MSG_TEST: pr_debug("config MSG level completed\n"); break; case HIC_POWER_DOWN: clear_bit(IS_ST_PWR_SUBIP_ON, &is->state); break; case HIC_GET_SET_FILE_ADDR: is->setfile.base = is->i2h_cmd.args[1]; set_bit(IS_ST_SETFILE_LOADED, &is->state); break; case HIC_LOAD_SET_FILE: set_bit(IS_ST_SETFILE_LOADED, &is->state); break; } break; case IH_REPLY_NOT_DONE: pr_err("ISR_NDONE: %d: %#x, %s\n", is->i2h_cmd.args[0], is->i2h_cmd.args[1], fimc_is_strerr(is->i2h_cmd.args[1])); if (is->i2h_cmd.args[1] & IS_ERROR_TIME_OUT_FLAG) pr_err("IS_ERROR_TIME_OUT\n"); switch (is->i2h_cmd.args[1]) { case IS_ERROR_SET_PARAMETER: fimc_is_mem_barrier(); } switch (is->i2h_cmd.args[0]) { case HIC_SET_PARAMETER: is->config[is->config_index].p_region_index[0] = 0; is->config[is->config_index].p_region_index[1] = 0; set_bit(IS_ST_BLOCK_CMD_CLEARED, &is->state); break; } break; case IHC_NOT_READY: pr_err("IS control sequence error: Not Ready\n"); break; } wake_up(&is->irq_queue); } static irqreturn_t fimc_is_irq_handler(int irq, void *priv) { struct fimc_is *is = priv; unsigned long flags; u32 status; spin_lock_irqsave(&is->slock, flags); status = mcuctl_read(is, MCUCTL_REG_INTSR1); if (status & (1UL << FIMC_IS_INT_GENERAL)) fimc_is_general_irq_handler(is); if (status & (1UL << FIMC_IS_INT_FRAME_DONE_ISP)) fimc_isp_irq_handler(is); spin_unlock_irqrestore(&is->slock, flags); return IRQ_HANDLED; } static int fimc_is_hw_open_sensor(struct fimc_is *is, struct fimc_is_sensor *sensor) { struct sensor_open_extended *soe = (void *)&is->is_p_region->shared; fimc_is_hw_wait_intmsr0_intmsd0(is); soe->self_calibration_mode = 1; soe->actuator_type = 0; soe->mipi_lane_num = 0; soe->mclk = 0; soe->mipi_speed = 0; soe->fast_open_sensor = 0; soe->i2c_sclk = 88000000; fimc_is_mem_barrier(); /* * Some user space use cases hang up here without this * empirically chosen delay. */ udelay(100); mcuctl_write(HIC_OPEN_SENSOR, is, MCUCTL_REG_ISSR(0)); mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1)); mcuctl_write(sensor->drvdata->id, is, MCUCTL_REG_ISSR(2)); mcuctl_write(sensor->i2c_bus, is, MCUCTL_REG_ISSR(3)); mcuctl_write(is->is_dma_p_region, is, MCUCTL_REG_ISSR(4)); fimc_is_hw_set_intgr0_gd0(is); return fimc_is_wait_event(is, IS_ST_OPEN_SENSOR, 1, sensor->drvdata->open_timeout); } int fimc_is_hw_initialize(struct fimc_is *is) { static const int config_ids[] = { IS_SC_PREVIEW_STILL, IS_SC_PREVIEW_VIDEO, IS_SC_CAPTURE_STILL, IS_SC_CAPTURE_VIDEO }; struct device *dev = &is->pdev->dev; u32 prev_id; int i, ret; /* Sensor initialization. Only one sensor is currently supported. */ ret = fimc_is_hw_open_sensor(is, &is->sensor[0]); if (ret < 0) return ret; /* Get the setfile address. */ fimc_is_hw_get_setfile_addr(is); ret = fimc_is_wait_event(is, IS_ST_SETFILE_LOADED, 1, FIMC_IS_CONFIG_TIMEOUT); if (ret < 0) { dev_err(dev, "get setfile address timed out\n"); return ret; } pr_debug("setfile.base: %#x\n", is->setfile.base); /* Load the setfile. */ fimc_is_load_setfile(is, FIMC_IS_SETFILE_6A3); clear_bit(IS_ST_SETFILE_LOADED, &is->state); fimc_is_hw_load_setfile(is); ret = fimc_is_wait_event(is, IS_ST_SETFILE_LOADED, 1, FIMC_IS_CONFIG_TIMEOUT); if (ret < 0) { dev_err(dev, "loading setfile timed out\n"); return ret; } pr_debug("setfile: base: %#x, size: %d\n", is->setfile.base, is->setfile.size); pr_info("FIMC-IS Setfile info: %s\n", is->fw.setfile_info); /* Check magic number. */ if (is->is_p_region->shared[MAX_SHARED_COUNT - 1] != FIMC_IS_MAGIC_NUMBER) { dev_err(dev, "magic number error!\n"); return -EIO; } pr_debug("shared region: %pad, parameter region: %pad\n", &is->memory.addr + FIMC_IS_SHARED_REGION_OFFSET, &is->is_dma_p_region); is->setfile.sub_index = 0; /* Stream off. */ fimc_is_hw_stream_off(is); ret = fimc_is_wait_event(is, IS_ST_STREAM_OFF, 1, FIMC_IS_CONFIG_TIMEOUT); if (ret < 0) { dev_err(dev, "stream off timeout\n"); return ret; } /* Preserve previous mode. */ prev_id = is->config_index; /* Set initial parameter values. */ for (i = 0; i < ARRAY_SIZE(config_ids); i++) { is->config_index = config_ids[i]; fimc_is_set_initial_params(is); ret = fimc_is_itf_s_param(is, true); if (ret < 0) { is->config_index = prev_id; return ret; } } is->config_index = prev_id; set_bit(IS_ST_INIT_DONE, &is->state); dev_info(dev, "initialization sequence completed (%d)\n", is->config_index); return 0; } static int fimc_is_show(struct seq_file *s, void *data) { struct fimc_is *is = s->private; const u8 *buf = is->memory.vaddr + FIMC_IS_DEBUG_REGION_OFFSET; if (is->memory.vaddr == NULL) { dev_err(&is->pdev->dev, "firmware memory is not initialized\n"); return -EIO; } seq_printf(s, "%s\n", buf); return 0; } DEFINE_SHOW_ATTRIBUTE(fimc_is); static void fimc_is_debugfs_remove(struct fimc_is *is) { debugfs_remove_recursive(is->debugfs_entry); is->debugfs_entry = NULL; } static void fimc_is_debugfs_create(struct fimc_is *is) { is->debugfs_entry = debugfs_create_dir("fimc_is", NULL); debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry, is, &fimc_is_fops); } static int fimc_is_runtime_resume(struct device *dev); static int fimc_is_runtime_suspend(struct device *dev); static int fimc_is_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fimc_is *is; struct resource res; struct device_node *node; int ret; is = devm_kzalloc(&pdev->dev, sizeof(*is), GFP_KERNEL); if (!is) return -ENOMEM; is->pdev = pdev; is->isp.pdev = pdev; init_waitqueue_head(&is->irq_queue); spin_lock_init(&is->slock); mutex_init(&is->lock); ret = of_address_to_resource(dev->of_node, 0, &res); if (ret < 0) return ret; is->regs = devm_ioremap_resource(dev, &res); if (IS_ERR(is->regs)) return PTR_ERR(is->regs); node = of_get_child_by_name(dev->of_node, "pmu"); if (!node) return -ENODEV; is->pmu_regs = of_iomap(node, 0); of_node_put(node); if (!is->pmu_regs) return -ENOMEM; is->irq = irq_of_parse_and_map(dev->of_node, 0); if (!is->irq) { dev_err(dev, "no irq found\n"); ret = -EINVAL; goto err_iounmap; } ret = fimc_is_get_clocks(is); if (ret < 0) goto err_iounmap; platform_set_drvdata(pdev, is); ret = request_irq(is->irq, fimc_is_irq_handler, 0, dev_name(dev), is); if (ret < 0) { dev_err(dev, "irq request failed\n"); goto err_clk; } pm_runtime_enable(dev); if (!pm_runtime_enabled(dev)) { ret = fimc_is_runtime_resume(dev); if (ret < 0) goto err_irq; } ret = pm_runtime_resume_and_get(dev); if (ret < 0) goto err_irq; vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32)); ret = devm_of_platform_populate(dev); if (ret < 0) goto err_pm; /* * Register FIMC-IS V4L2 subdevs to this driver. The video nodes * will be created within the subdev's registered() callback. */ ret = fimc_is_register_subdevs(is); if (ret < 0) goto err_pm; fimc_is_debugfs_create(is); ret = fimc_is_request_firmware(is, FIMC_IS_FW_FILENAME); if (ret < 0) goto err_dfs; pm_runtime_put_sync(dev); dev_dbg(dev, "FIMC-IS registered successfully\n"); return 0; err_dfs: fimc_is_debugfs_remove(is); fimc_is_unregister_subdevs(is); err_pm: pm_runtime_put_noidle(dev); if (!pm_runtime_enabled(dev)) fimc_is_runtime_suspend(dev); err_irq: free_irq(is->irq, is); err_clk: fimc_is_put_clocks(is); err_iounmap: iounmap(is->pmu_regs); return ret; } static int fimc_is_runtime_resume(struct device *dev) { struct fimc_is *is = dev_get_drvdata(dev); int ret; ret = fimc_is_setup_clocks(is); if (ret) return ret; return fimc_is_enable_clocks(is); } static int fimc_is_runtime_suspend(struct device *dev) { struct fimc_is *is = dev_get_drvdata(dev); fimc_is_disable_clocks(is); return 0; } #ifdef CONFIG_PM_SLEEP static int fimc_is_resume(struct device *dev) { /* TODO: */ return 0; } static int fimc_is_suspend(struct device *dev) { struct fimc_is *is = dev_get_drvdata(dev); /* TODO: */ if (test_bit(IS_ST_A5_PWR_ON, &is->state)) return -EBUSY; return 0; } #endif /* CONFIG_PM_SLEEP */ static int fimc_is_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fimc_is *is = dev_get_drvdata(dev); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); if (!pm_runtime_status_suspended(dev)) fimc_is_runtime_suspend(dev); free_irq(is->irq, is); fimc_is_unregister_subdevs(is); vb2_dma_contig_clear_max_seg_size(dev); fimc_is_put_clocks(is); iounmap(is->pmu_regs); fimc_is_debugfs_remove(is); release_firmware(is->fw.f_w); fimc_is_free_cpu_memory(is); return 0; } static const struct of_device_id fimc_is_of_match[] = { { .compatible = "samsung,exynos4212-fimc-is" }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, fimc_is_of_match); static const struct dev_pm_ops fimc_is_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(fimc_is_suspend, fimc_is_resume) SET_RUNTIME_PM_OPS(fimc_is_runtime_suspend, fimc_is_runtime_resume, NULL) }; static struct platform_driver fimc_is_driver = { .probe = fimc_is_probe, .remove = fimc_is_remove, .driver = { .of_match_table = fimc_is_of_match, .name = FIMC_IS_DRV_NAME, .pm = &fimc_is_pm_ops, } }; static int fimc_is_module_init(void) { int ret; ret = fimc_is_register_i2c_driver(); if (ret < 0) return ret; ret = platform_driver_register(&fimc_is_driver); if (ret < 0) fimc_is_unregister_i2c_driver(); return ret; } static void fimc_is_module_exit(void) { fimc_is_unregister_i2c_driver(); platform_driver_unregister(&fimc_is_driver); } module_init(fimc_is_module_init); module_exit(fimc_is_module_exit); MODULE_ALIAS("platform:" FIMC_IS_DRV_NAME); MODULE_AUTHOR("Younghwan Joo <yhwan.joo@samsung.com>"); MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
rborisov/u-boot-am335x
arch/arm/cpu/arm926ejs/davinci/dm365_lowlevel.c
102
13315
/* * SoC-specific lowlevel code for tms320dm365 and similar chips * Actually used for booting from NAND with nand_spl. * * Copyright (C) 2011 * Heiko Schocher, DENX Software Engineering, hs@denx.de. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <common.h> #include <nand.h> #include <ns16550.h> #include <post.h> #include <asm/arch/dm365_lowlevel.h> #include <asm/arch/hardware.h> void dm365_waitloop(unsigned long loopcnt) { unsigned long i; for (i = 0; i < loopcnt; i++) asm(" NOP"); } int dm365_pll1_init(unsigned long pllmult, unsigned long prediv) { unsigned int clksrc = 0x0; /* Power up the PLL */ clrbits_le32(&dv_pll0_regs->pllctl, PLLCTL_PLLPWRDN); clrbits_le32(&dv_pll0_regs->pllctl, PLLCTL_RES_9); setbits_le32(&dv_pll0_regs->pllctl, clksrc << PLLCTL_CLOCK_MODE_SHIFT); /* * Set PLLENSRC '0', PLL Enable(PLLEN) selection is controlled * through MMR */ clrbits_le32(&dv_pll0_regs->pllctl, PLLCTL_PLLENSRC); /* Set PLLEN=0 => PLL BYPASS MODE */ clrbits_le32(&dv_pll0_regs->pllctl, PLLCTL_PLLEN); dm365_waitloop(150); /* PLLRST=1(reset assert) */ setbits_le32(&dv_pll0_regs->pllctl, PLLCTL_PLLRST); dm365_waitloop(300); /*Bring PLL out of Reset*/ clrbits_le32(&dv_pll0_regs->pllctl, PLLCTL_PLLRST); /* Program the Multiper and Pre-Divider for PLL1 */ writel(pllmult, &dv_pll0_regs->pllm); writel(prediv, &dv_pll0_regs->prediv); /* Assert TENABLE = 1, TENABLEDIV = 1, TINITZ = 1 */ writel(PLLSECCTL_STOPMODE | PLLSECCTL_TENABLEDIV | PLLSECCTL_TENABLE | PLLSECCTL_TINITZ, &dv_pll0_regs->secctl); /* Assert TENABLE = 1, TENABLEDIV = 1, TINITZ = 0 */ writel(PLLSECCTL_STOPMODE | PLLSECCTL_TENABLEDIV | PLLSECCTL_TENABLE, &dv_pll0_regs->secctl); /* Assert TENABLE = 0, TENABLEDIV = 0, TINITZ = 0 */ writel(PLLSECCTL_STOPMODE, &dv_pll0_regs->secctl); /* Assert TENABLE = 0, TENABLEDIV = 0, TINITZ = 1 */ writel(PLLSECCTL_STOPMODE | PLLSECCTL_TINITZ, &dv_pll0_regs->secctl); /* Program the PostDiv for PLL1 */ writel(PLL_POSTDEN, &dv_pll0_regs->postdiv); /* Post divider setting for PLL1 */ writel(CONFIG_SYS_DM36x_PLL1_PLLDIV1, &dv_pll0_regs->plldiv1); writel(CONFIG_SYS_DM36x_PLL1_PLLDIV2, &dv_pll0_regs->plldiv2); writel(CONFIG_SYS_DM36x_PLL1_PLLDIV3, &dv_pll0_regs->plldiv3); writel(CONFIG_SYS_DM36x_PLL1_PLLDIV4, &dv_pll0_regs->plldiv4); writel(CONFIG_SYS_DM36x_PLL1_PLLDIV5, &dv_pll0_regs->plldiv5); writel(CONFIG_SYS_DM36x_PLL1_PLLDIV6, &dv_pll0_regs->plldiv6); writel(CONFIG_SYS_DM36x_PLL1_PLLDIV7, &dv_pll0_regs->plldiv7); writel(CONFIG_SYS_DM36x_PLL1_PLLDIV8, &dv_pll0_regs->plldiv8); writel(CONFIG_SYS_DM36x_PLL1_PLLDIV9, &dv_pll0_regs->plldiv9); dm365_waitloop(300); /* Set the GOSET bit */ writel(PLLCMD_GOSET, &dv_pll0_regs->pllcmd); /* Go */ dm365_waitloop(300); /* Wait for PLL to LOCK */ while (!((readl(&dv_sys_module_regs->pll0_config) & PLL0_LOCK) == PLL0_LOCK)) ; /* Enable the PLL Bit of PLLCTL*/ setbits_le32(&dv_pll0_regs->pllctl, PLLCTL_PLLEN); return 0; } int dm365_pll2_init(unsigned long pllm, unsigned long prediv) { unsigned int clksrc = 0x0; /* Power up the PLL*/ clrbits_le32(&dv_pll1_regs->pllctl, PLLCTL_PLLPWRDN); /* * Select the Clock Mode as Onchip Oscilator or External Clock on * MXI pin * VDB has input on MXI pin */ clrbits_le32(&dv_pll1_regs->pllctl, PLLCTL_RES_9); setbits_le32(&dv_pll1_regs->pllctl, clksrc << PLLCTL_CLOCK_MODE_SHIFT); /* * Set PLLENSRC '0', PLL Enable(PLLEN) selection is controlled * through MMR */ clrbits_le32(&dv_pll1_regs->pllctl, PLLCTL_PLLENSRC); /* Set PLLEN=0 => PLL BYPASS MODE */ clrbits_le32(&dv_pll1_regs->pllctl, PLLCTL_PLLEN); dm365_waitloop(50); /* PLLRST=1(reset assert) */ setbits_le32(&dv_pll1_regs->pllctl, PLLCTL_PLLRST); dm365_waitloop(300); /* Bring PLL out of Reset */ clrbits_le32(&dv_pll1_regs->pllctl, PLLCTL_PLLRST); /* Program the Multiper and Pre-Divider for PLL2 */ writel(pllm, &dv_pll1_regs->pllm); writel(prediv, &dv_pll1_regs->prediv); writel(PLL_POSTDEN, &dv_pll1_regs->postdiv); /* Assert TENABLE = 1, TENABLEDIV = 1, TINITZ = 1 */ writel(PLLSECCTL_STOPMODE | PLLSECCTL_TENABLEDIV | PLLSECCTL_TENABLE | PLLSECCTL_TINITZ, &dv_pll1_regs->secctl); /* Assert TENABLE = 1, TENABLEDIV = 1, TINITZ = 0 */ writel(PLLSECCTL_STOPMODE | PLLSECCTL_TENABLEDIV | PLLSECCTL_TENABLE, &dv_pll1_regs->secctl); /* Assert TENABLE = 0, TENABLEDIV = 0, TINITZ = 0 */ writel(PLLSECCTL_STOPMODE, &dv_pll1_regs->secctl); /* Assert TENABLE = 0, TENABLEDIV = 0, TINITZ = 1 */ writel(PLLSECCTL_STOPMODE | PLLSECCTL_TINITZ, &dv_pll1_regs->secctl); /* Post divider setting for PLL2 */ writel(CONFIG_SYS_DM36x_PLL2_PLLDIV1, &dv_pll1_regs->plldiv1); writel(CONFIG_SYS_DM36x_PLL2_PLLDIV2, &dv_pll1_regs->plldiv2); writel(CONFIG_SYS_DM36x_PLL2_PLLDIV3, &dv_pll1_regs->plldiv3); writel(CONFIG_SYS_DM36x_PLL2_PLLDIV4, &dv_pll1_regs->plldiv4); writel(CONFIG_SYS_DM36x_PLL2_PLLDIV5, &dv_pll1_regs->plldiv5); /* GoCmd for PostDivider to take effect */ writel(PLLCMD_GOSET, &dv_pll1_regs->pllcmd); dm365_waitloop(150); /* Wait for PLL to LOCK */ while (!((readl(&dv_sys_module_regs->pll1_config) & PLL1_LOCK) == PLL1_LOCK)) ; dm365_waitloop(4100); /* Enable the PLL2 */ setbits_le32(&dv_pll1_regs->pllctl, PLLCTL_PLLEN); /* do this after PLL's have been set up */ writel(CONFIG_SYS_DM36x_PERI_CLK_CTRL, &dv_sys_module_regs->peri_clkctl); return 0; } int dm365_ddr_setup(void) { lpsc_on(DAVINCI_LPSC_DDR_EMIF); clrbits_le32(&dv_sys_module_regs->vtpiocr, VPTIO_IOPWRDN | VPTIO_CLRZ | VPTIO_LOCK | VPTIO_PWRDN); /* Set bit CLRZ (bit 13) */ setbits_le32(&dv_sys_module_regs->vtpiocr, VPTIO_CLRZ); /* Check VTP READY Status */ while (!(readl(&dv_sys_module_regs->vtpiocr) & VPTIO_RDY)) ; /* Set bit VTP_IOPWRDWN bit 14 for DDR input buffers) */ setbits_le32(&dv_sys_module_regs->vtpiocr, VPTIO_IOPWRDN); /* Set bit LOCK(bit7) */ setbits_le32(&dv_sys_module_regs->vtpiocr, VPTIO_LOCK); /* * Powerdown VTP as it is locked (bit 6) * Set bit VTP_IOPWRDWN bit 14 for DDR input buffers) */ setbits_le32(&dv_sys_module_regs->vtpiocr, VPTIO_IOPWRDN | VPTIO_PWRDN); /* Wait for calibration to complete */ dm365_waitloop(150); /* Set the DDR2 to synreset, then enable it again */ lpsc_syncreset(DAVINCI_LPSC_DDR_EMIF); lpsc_on(DAVINCI_LPSC_DDR_EMIF); writel(CONFIG_SYS_DM36x_DDR2_DDRPHYCR, &dv_ddr2_regs_ctrl->ddrphycr); /* Program SDRAM Bank Config Register */ writel((CONFIG_SYS_DM36x_DDR2_SDBCR | DV_DDR_BOOTUNLOCK), &dv_ddr2_regs_ctrl->sdbcr); writel((CONFIG_SYS_DM36x_DDR2_SDBCR | DV_DDR_TIMUNLOCK), &dv_ddr2_regs_ctrl->sdbcr); /* Program SDRAM Timing Control Register1 */ writel(CONFIG_SYS_DM36x_DDR2_SDTIMR, &dv_ddr2_regs_ctrl->sdtimr); /* Program SDRAM Timing Control Register2 */ writel(CONFIG_SYS_DM36x_DDR2_SDTIMR2, &dv_ddr2_regs_ctrl->sdtimr2); writel(CONFIG_SYS_DM36x_DDR2_PBBPR, &dv_ddr2_regs_ctrl->pbbpr); writel(CONFIG_SYS_DM36x_DDR2_SDBCR, &dv_ddr2_regs_ctrl->sdbcr); /* Program SDRAM Refresh Control Register */ writel(CONFIG_SYS_DM36x_DDR2_SDRCR, &dv_ddr2_regs_ctrl->sdrcr); lpsc_syncreset(DAVINCI_LPSC_DDR_EMIF); lpsc_on(DAVINCI_LPSC_DDR_EMIF); return 0; } static void dm365_vpss_sync_reset(void) { unsigned int PdNum = 0; /* VPSS_CLKMD 1:1 */ setbits_le32(&dv_sys_module_regs->vpss_clkctl, VPSS_CLK_CTL_VPSS_CLKMD); /* LPSC SyncReset DDR Clock Enable */ writel(((readl(&dv_psc_regs->mdctl[DAVINCI_LPSC_VPSSMASTER]) & ~PSC_MD_STATE_MSK) | PSC_SYNCRESET), &dv_psc_regs->mdctl[DAVINCI_LPSC_VPSSMASTER]); writel((1 << PdNum), &dv_psc_regs->ptcmd); while (!(((readl(&dv_psc_regs->ptstat) >> PdNum) & PSC_GOSTAT) == 0)) ; while (!((readl(&dv_psc_regs->mdstat[DAVINCI_LPSC_VPSSMASTER]) & PSC_MD_STATE_MSK) == PSC_SYNCRESET)) ; } static void dm365_por_reset(void) { struct davinci_timer *wdog = (struct davinci_timer *)DAVINCI_WDOG_BASE; if (readl(&dv_pll0_regs->rstype) & (PLL_RSTYPE_POR | PLL_RSTYPE_XWRST)) { dm365_vpss_sync_reset(); writel(DV_TMPBUF_VAL, TMPBUF); setbits_le32(TMPSTATUS, FLAG_PORRST); writel(DV_WDT_ENABLE_SYS_RESET, &wdog->na1); writel(DV_WDT_TRIGGER_SYS_RESET, &wdog->na2); while (1); } } static void dm365_wdt_reset(void) { struct davinci_timer *wdog = (struct davinci_timer *)DAVINCI_WDOG_BASE; if (readl(TMPBUF) != DV_TMPBUF_VAL) { writel(DV_TMPBUF_VAL, TMPBUF); setbits_le32(TMPSTATUS, FLAG_PORRST); setbits_le32(TMPSTATUS, FLAG_FLGOFF); dm365_waitloop(100); dm365_vpss_sync_reset(); writel(DV_WDT_ENABLE_SYS_RESET, &wdog->na1); writel(DV_WDT_TRIGGER_SYS_RESET, &wdog->na2); while (1); } } static void dm365_wdt_flag_on(void) { /* VPSS_CLKMD 1:2 */ clrbits_le32(&dv_sys_module_regs->vpss_clkctl, VPSS_CLK_CTL_VPSS_CLKMD); writel(0, TMPBUF); setbits_le32(TMPSTATUS, FLAG_FLGON); } void dm365_psc_init(void) { unsigned char i = 0; unsigned char lpsc_start; unsigned char lpsc_end, lpscgroup, lpscmin, lpscmax; unsigned int PdNum = 0; lpscmin = 0; lpscmax = 2; for (lpscgroup = lpscmin; lpscgroup <= lpscmax; lpscgroup++) { if (lpscgroup == 0) { /* Enabling LPSC 3 to 28 SCR first */ lpsc_start = DAVINCI_LPSC_VPSSMSTR; lpsc_end = DAVINCI_LPSC_TIMER1; } else if (lpscgroup == 1) { /* Skip locked LPSCs [29-37] */ lpsc_start = DAVINCI_LPSC_CFG5; lpsc_end = DAVINCI_LPSC_VPSSMASTER; } else { lpsc_start = DAVINCI_LPSC_MJCP; lpsc_end = DAVINCI_LPSC_HDVICP; } /* NEXT=0x3, Enable LPSC's */ for (i = lpsc_start; i <= lpsc_end; i++) setbits_le32(&dv_psc_regs->mdctl[i], PSC_ENABLE); /* * Program goctl to start transition sequence for LPSCs * CSL_PSC_0_REGS->PTCMD = (1<<PdNum); Kick off Power * Domain 0 Modules */ writel((1 << PdNum), &dv_psc_regs->ptcmd); /* * Wait for GOSTAT = NO TRANSITION from PSC for Powerdomain 0 */ while (!(((readl(&dv_psc_regs->ptstat) >> PdNum) & PSC_GOSTAT) == 0)) ; /* Wait for MODSTAT = ENABLE from LPSC's */ for (i = lpsc_start; i <= lpsc_end; i++) while (!((readl(&dv_psc_regs->mdstat[i]) & PSC_MD_STATE_MSK) == PSC_ENABLE)) ; } } static void dm365_emif_init(void) { writel(CONFIG_SYS_DM36x_AWCCR, &davinci_emif_regs->awccr); writel(CONFIG_SYS_DM36x_AB1CR, &davinci_emif_regs->ab1cr); setbits_le32(&davinci_emif_regs->nandfcr, DAVINCI_NANDFCR_CS2NAND); writel(CONFIG_SYS_DM36x_AB2CR, &davinci_emif_regs->ab2cr); return; } void dm365_pinmux_ctl(unsigned long offset, unsigned long mask, unsigned long value) { clrbits_le32(&dv_sys_module_regs->pinmux[offset], mask); setbits_le32(&dv_sys_module_regs->pinmux[offset], (mask & value)); } __attribute__((weak)) void board_gpio_init(void) { return; } #if defined(CONFIG_POST) int post_log(char *format, ...) { return 0; } #endif void dm36x_lowlevel_init(ulong bootflag) { struct davinci_uart_ctrl_regs *davinci_uart_ctrl_regs = (struct davinci_uart_ctrl_regs *)(CONFIG_SYS_NS16550_COM1 + DAVINCI_UART_CTRL_BASE); /* Mask all interrupts */ writel(DV_AINTC_INTCTL_IDMODE, &dv_aintc_regs->intctl); writel(0x0, &dv_aintc_regs->eabase); writel(0x0, &dv_aintc_regs->eint0); writel(0x0, &dv_aintc_regs->eint1); /* Clear all interrupts */ writel(0xffffffff, &dv_aintc_regs->fiq0); writel(0xffffffff, &dv_aintc_regs->fiq1); writel(0xffffffff, &dv_aintc_regs->irq0); writel(0xffffffff, &dv_aintc_regs->irq1); dm365_por_reset(); dm365_wdt_reset(); /* System PSC setup - enable all */ dm365_psc_init(); /* Setup Pinmux */ dm365_pinmux_ctl(0, 0xFFFFFFFF, CONFIG_SYS_DM36x_PINMUX0); dm365_pinmux_ctl(1, 0xFFFFFFFF, CONFIG_SYS_DM36x_PINMUX1); dm365_pinmux_ctl(2, 0xFFFFFFFF, CONFIG_SYS_DM36x_PINMUX2); dm365_pinmux_ctl(3, 0xFFFFFFFF, CONFIG_SYS_DM36x_PINMUX3); dm365_pinmux_ctl(4, 0xFFFFFFFF, CONFIG_SYS_DM36x_PINMUX4); /* PLL setup */ dm365_pll1_init(CONFIG_SYS_DM36x_PLL1_PLLM, CONFIG_SYS_DM36x_PLL1_PREDIV); dm365_pll2_init(CONFIG_SYS_DM36x_PLL2_PLLM, CONFIG_SYS_DM36x_PLL2_PREDIV); /* GPIO setup */ board_gpio_init(); NS16550_init((NS16550_t)(CONFIG_SYS_NS16550_COM1), CONFIG_SYS_NS16550_CLK / 16 / CONFIG_BAUDRATE); /* * Fix Power and Emulation Management Register * see sprufh2.pdf page 38 Table 22 */ writel((DAVINCI_UART_PWREMU_MGMT_FREE | DAVINCI_UART_PWREMU_MGMT_URRST | DAVINCI_UART_PWREMU_MGMT_UTRST), &davinci_uart_ctrl_regs->pwremu_mgmt); puts("ddr init\n"); dm365_ddr_setup(); puts("emif init\n"); dm365_emif_init(); dm365_wdt_flag_on(); #if defined(CONFIG_POST) /* * Do memory tests, calls arch_memory_failure_handle() * if error detected. */ memory_post_test(0); #endif }
gpl-2.0
JacobTech/lge_kernel_e400-
drivers/mmc/host/sdhci-tegra.c
102
6393
/* * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/err.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <mach/gpio.h> #include <mach/sdhci.h> #include "sdhci.h" #include "sdhci-pltfm.h" static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) { u32 val; if (unlikely(reg == SDHCI_PRESENT_STATE)) { /* Use wp_gpio here instead? */ val = readl(host->ioaddr + reg); return val | SDHCI_WRITE_PROTECT; } return readl(host->ioaddr + reg); } static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) { if (unlikely(reg == SDHCI_HOST_VERSION)) { /* Erratum: Version register is invalid in HW. */ return SDHCI_SPEC_200; } return readw(host->ioaddr + reg); } static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) { /* Seems like we're getting spurious timeout and crc errors, so * disable signalling of them. In case of real errors software * timers should take care of eventually detecting them. */ if (unlikely(reg == SDHCI_SIGNAL_ENABLE)) val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC); writel(val, host->ioaddr + reg); if (unlikely(reg == SDHCI_INT_ENABLE)) { /* Erratum: Must enable block gap interrupt detection */ u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); if (val & SDHCI_INT_CARD_INT) gap_ctrl |= 0x8; else gap_ctrl &= ~0x8; writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); } } static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci) { struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc)); struct tegra_sdhci_platform_data *plat; plat = pdev->dev.platform_data; if (!gpio_is_valid(plat->wp_gpio)) return -1; return gpio_get_value(plat->wp_gpio); } static irqreturn_t carddetect_irq(int irq, void *data) { struct sdhci_host *sdhost = (struct sdhci_host *)data; tasklet_schedule(&sdhost->card_tasklet); return IRQ_HANDLED; }; static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) { struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); struct tegra_sdhci_platform_data *plat; u32 ctrl; plat = pdev->dev.platform_data; ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) { ctrl &= ~SDHCI_CTRL_4BITBUS; ctrl |= SDHCI_CTRL_8BITBUS; } else { ctrl &= ~SDHCI_CTRL_8BITBUS; if (bus_width == MMC_BUS_WIDTH_4) ctrl |= SDHCI_CTRL_4BITBUS; else ctrl &= ~SDHCI_CTRL_4BITBUS; } sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); return 0; } static int tegra_sdhci_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); struct tegra_sdhci_platform_data *plat; struct clk *clk; int rc; plat = pdev->dev.platform_data; if (plat == NULL) { dev_err(mmc_dev(host->mmc), "missing platform data\n"); return -ENXIO; } if (gpio_is_valid(plat->power_gpio)) { rc = gpio_request(plat->power_gpio, "sdhci_power"); if (rc) { dev_err(mmc_dev(host->mmc), "failed to allocate power gpio\n"); goto out; } tegra_gpio_enable(plat->power_gpio); gpio_direction_output(plat->power_gpio, 1); } if (gpio_is_valid(plat->cd_gpio)) { rc = gpio_request(plat->cd_gpio, "sdhci_cd"); if (rc) { dev_err(mmc_dev(host->mmc), "failed to allocate cd gpio\n"); goto out_power; } tegra_gpio_enable(plat->cd_gpio); gpio_direction_input(plat->cd_gpio); rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, mmc_hostname(host->mmc), host); if (rc) { dev_err(mmc_dev(host->mmc), "request irq error\n"); goto out_cd; } } if (gpio_is_valid(plat->wp_gpio)) { rc = gpio_request(plat->wp_gpio, "sdhci_wp"); if (rc) { dev_err(mmc_dev(host->mmc), "failed to allocate wp gpio\n"); goto out_cd; } tegra_gpio_enable(plat->wp_gpio); gpio_direction_input(plat->wp_gpio); } clk = clk_get(mmc_dev(host->mmc), NULL); if (IS_ERR(clk)) { dev_err(mmc_dev(host->mmc), "clk err\n"); rc = PTR_ERR(clk); goto out_wp; } clk_enable(clk); pltfm_host->clk = clk; if (plat->is_8bit) host->mmc->caps |= MMC_CAP_8_BIT_DATA; return 0; out_wp: if (gpio_is_valid(plat->wp_gpio)) { tegra_gpio_disable(plat->wp_gpio); gpio_free(plat->wp_gpio); } out_cd: if (gpio_is_valid(plat->cd_gpio)) { tegra_gpio_disable(plat->cd_gpio); gpio_free(plat->cd_gpio); } out_power: if (gpio_is_valid(plat->power_gpio)) { tegra_gpio_disable(plat->power_gpio); gpio_free(plat->power_gpio); } out: return rc; } static void tegra_sdhci_pltfm_exit(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); struct tegra_sdhci_platform_data *plat; plat = pdev->dev.platform_data; if (gpio_is_valid(plat->wp_gpio)) { tegra_gpio_disable(plat->wp_gpio); gpio_free(plat->wp_gpio); } if (gpio_is_valid(plat->cd_gpio)) { tegra_gpio_disable(plat->cd_gpio); gpio_free(plat->cd_gpio); } if (gpio_is_valid(plat->power_gpio)) { tegra_gpio_disable(plat->power_gpio); gpio_free(plat->power_gpio); } clk_disable(pltfm_host->clk); clk_put(pltfm_host->clk); } static struct sdhci_ops tegra_sdhci_ops = { .get_ro = tegra_sdhci_get_ro, .read_l = tegra_sdhci_readl, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .platform_8bit_width = tegra_sdhci_8bit, }; struct sdhci_pltfm_data sdhci_tegra_pdata = { .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, .ops = &tegra_sdhci_ops, .init = tegra_sdhci_pltfm_init, .exit = tegra_sdhci_pltfm_exit, };
gpl-2.0
smarkwell/asuswrt-merlin
release/src/router/samba36/lib/zlib/contrib/untgz/untgz.c
1638
16542
/* * untgz.c -- Display contents and extract files from a gzip'd TAR file * * written by Pedro A. Aranda Gutierrez <paag@tid.es> * adaptation to Unix by Jean-loup Gailly <jloup@gzip.org> * various fixes by Cosmin Truta <cosmint@cs.ubbcluj.ro> */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <errno.h> #include "zlib.h" #ifdef unix # include <unistd.h> #else # include <direct.h> # include <io.h> #endif #ifdef WIN32 #include <windows.h> # ifndef F_OK # define F_OK 0 # endif # define mkdir(dirname,mode) _mkdir(dirname) # ifdef _MSC_VER # define access(path,mode) _access(path,mode) # define chmod(path,mode) _chmod(path,mode) # define strdup(str) _strdup(str) # endif #else # include <utime.h> #endif /* values used in typeflag field */ #define REGTYPE '0' /* regular file */ #define AREGTYPE '\0' /* regular file */ #define LNKTYPE '1' /* link */ #define SYMTYPE '2' /* reserved */ #define CHRTYPE '3' /* character special */ #define BLKTYPE '4' /* block special */ #define DIRTYPE '5' /* directory */ #define FIFOTYPE '6' /* FIFO special */ #define CONTTYPE '7' /* reserved */ /* GNU tar extensions */ #define GNUTYPE_DUMPDIR 'D' /* file names from dumped directory */ #define GNUTYPE_LONGLINK 'K' /* long link name */ #define GNUTYPE_LONGNAME 'L' /* long file name */ #define GNUTYPE_MULTIVOL 'M' /* continuation of file from another volume */ #define GNUTYPE_NAMES 'N' /* file name that does not fit into main hdr */ #define GNUTYPE_SPARSE 'S' /* sparse file */ #define GNUTYPE_VOLHDR 'V' /* tape/volume header */ /* tar header */ #define BLOCKSIZE 512 #define SHORTNAMESIZE 100 struct tar_header { /* byte offset */ char name[100]; /* 0 */ char mode[8]; /* 100 */ char uid[8]; /* 108 */ char gid[8]; /* 116 */ char size[12]; /* 124 */ char mtime[12]; /* 136 */ char chksum[8]; /* 148 */ char typeflag; /* 156 */ char linkname[100]; /* 157 */ char magic[6]; /* 257 */ char version[2]; /* 263 */ char uname[32]; /* 265 */ char gname[32]; /* 297 */ char devmajor[8]; /* 329 */ char devminor[8]; /* 337 */ char prefix[155]; /* 345 */ /* 500 */ }; union tar_buffer { char buffer[BLOCKSIZE]; struct tar_header header; }; struct attr_item { struct attr_item *next; char *fname; int mode; time_t time; }; enum { TGZ_EXTRACT, TGZ_LIST, TGZ_INVALID }; char *TGZfname OF((const char *)); void TGZnotfound OF((const char *)); int getoct OF((char *, int)); char *strtime OF((time_t *)); int setfiletime OF((char *, time_t)); void push_attr OF((struct attr_item **, char *, int, time_t)); void restore_attr OF((struct attr_item **)); int ExprMatch OF((char *, char *)); int makedir OF((char *)); int matchname OF((int, int, char **, char *)); void error OF((const char *)); int tar OF((gzFile, int, int, int, char **)); void help OF((int)); int main OF((int, char **)); char *prog; const char *TGZsuffix[] = { "\0", ".tar", ".tar.gz", ".taz", ".tgz", NULL }; /* return the file name of the TGZ archive */ /* or NULL if it does not exist */ char *TGZfname (const char *arcname) { static char buffer[1024]; int origlen,i; strcpy(buffer,arcname); origlen = strlen(buffer); for (i=0; TGZsuffix[i]; i++) { strcpy(buffer+origlen,TGZsuffix[i]); if (access(buffer,F_OK) == 0) return buffer; } return NULL; } /* error message for the filename */ void TGZnotfound (const char *arcname) { int i; fprintf(stderr,"%s: Couldn't find ",prog); for (i=0;TGZsuffix[i];i++) fprintf(stderr,(TGZsuffix[i+1]) ? "%s%s, " : "or %s%s\n", arcname, TGZsuffix[i]); exit(1); } /* convert octal digits to int */ /* on error return -1 */ int getoct (char *p,int width) { int result = 0; char c; while (width--) { c = *p++; if (c == 0) break; if (c == ' ') continue; if (c < '0' || c > '7') return -1; result = result * 8 + (c - '0'); } return result; } /* convert time_t to string */ /* use the "YYYY/MM/DD hh:mm:ss" format */ char *strtime (time_t *t) { struct tm *local; static char result[32]; local = localtime(t); sprintf(result,"%4d/%02d/%02d %02d:%02d:%02d", local->tm_year+1900, local->tm_mon+1, local->tm_mday, local->tm_hour, local->tm_min, local->tm_sec); return result; } /* set file time */ int setfiletime (char *fname,time_t ftime) { #ifdef WIN32 static int isWinNT = -1; SYSTEMTIME st; FILETIME locft, modft; struct tm *loctm; HANDLE hFile; int result; loctm = localtime(&ftime); if (loctm == NULL) return -1; st.wYear = (WORD)loctm->tm_year + 1900; st.wMonth = (WORD)loctm->tm_mon + 1; st.wDayOfWeek = (WORD)loctm->tm_wday; st.wDay = (WORD)loctm->tm_mday; st.wHour = (WORD)loctm->tm_hour; st.wMinute = (WORD)loctm->tm_min; st.wSecond = (WORD)loctm->tm_sec; st.wMilliseconds = 0; if (!SystemTimeToFileTime(&st, &locft) || !LocalFileTimeToFileTime(&locft, &modft)) return -1; if (isWinNT < 0) isWinNT = (GetVersion() < 0x80000000) ? 1 : 0; hFile = CreateFile(fname, GENERIC_WRITE, 0, NULL, OPEN_EXISTING, (isWinNT ? FILE_FLAG_BACKUP_SEMANTICS : 0), NULL); if (hFile == INVALID_HANDLE_VALUE) return -1; result = SetFileTime(hFile, NULL, NULL, &modft) ? 0 : -1; CloseHandle(hFile); return result; #else struct utimbuf settime; settime.actime = settime.modtime = ftime; return utime(fname,&settime); #endif } /* push file attributes */ void push_attr(struct attr_item **list,char *fname,int mode,time_t time) { struct attr_item *item; item = (struct attr_item *)malloc(sizeof(struct attr_item)); if (item == NULL) error("Out of memory"); item->fname = strdup(fname); item->mode = mode; item->time = time; item->next = *list; *list = item; } /* restore file attributes */ void restore_attr(struct attr_item **list) { struct attr_item *item, *prev; for (item = *list; item != NULL; ) { setfiletime(item->fname,item->time); chmod(item->fname,item->mode); prev = item; item = item->next; free(prev); } *list = NULL; } /* match regular expression */ #define ISSPECIAL(c) (((c) == '*') || ((c) == '/')) int ExprMatch (char *string,char *expr) { while (1) { if (ISSPECIAL(*expr)) { if (*expr == '/') { if (*string != '\\' && *string != '/') return 0; string ++; expr++; } else if (*expr == '*') { if (*expr ++ == 0) return 1; while (*++string != *expr) if (*string == 0) return 0; } } else { if (*string != *expr) return 0; if (*expr++ == 0) return 1; string++; } } } /* recursive mkdir */ /* abort on ENOENT; ignore other errors like "directory already exists" */ /* return 1 if OK */ /* 0 on error */ int makedir (char *newdir) { char *buffer = strdup(newdir); char *p; int len = strlen(buffer); if (len <= 0) { free(buffer); return 0; } if (buffer[len-1] == '/') { buffer[len-1] = '\0'; } if (mkdir(buffer, 0755) == 0) { free(buffer); return 1; } p = buffer+1; while (1) { char hold; while(*p && *p != '\\' && *p != '/') p++; hold = *p; *p = 0; if ((mkdir(buffer, 0755) == -1) && (errno == ENOENT)) { fprintf(stderr,"%s: Couldn't create directory %s\n",prog,buffer); free(buffer); return 0; } if (hold == 0) break; *p++ = hold; } free(buffer); return 1; } int matchname (int arg,int argc,char **argv,char *fname) { if (arg == argc) /* no arguments given (untgz tgzarchive) */ return 1; while (arg < argc) if (ExprMatch(fname,argv[arg++])) return 1; return 0; /* ignore this for the moment being */ } /* tar file list or extract */ int tar (gzFile in,int action,int arg,int argc,char **argv) { union tar_buffer buffer; int len; int err; int getheader = 1; int remaining = 0; FILE *outfile = NULL; char fname[BLOCKSIZE]; int tarmode; time_t tartime; struct attr_item *attributes = NULL; if (action == TGZ_LIST) printf(" date time size file\n" " ---------- -------- --------- -------------------------------------\n"); while (1) { len = gzread(in, &buffer, BLOCKSIZE); if (len < 0) error(gzerror(in, &err)); /* * Always expect complete blocks to process * the tar information. */ if (len != BLOCKSIZE) { action = TGZ_INVALID; /* force error exit */ remaining = 0; /* force I/O cleanup */ } /* * If we have to get a tar header */ if (getheader >= 1) { /* * if we met the end of the tar * or the end-of-tar block, * we are done */ if (len == 0 || buffer.header.name[0] == 0) break; tarmode = getoct(buffer.header.mode,8); tartime = (time_t)getoct(buffer.header.mtime,12); if (tarmode == -1 || tartime == (time_t)-1) { buffer.header.name[0] = 0; action = TGZ_INVALID; } if (getheader == 1) { strncpy(fname,buffer.header.name,SHORTNAMESIZE); if (fname[SHORTNAMESIZE-1] != 0) fname[SHORTNAMESIZE] = 0; } else { /* * The file name is longer than SHORTNAMESIZE */ if (strncmp(fname,buffer.header.name,SHORTNAMESIZE-1) != 0) error("bad long name"); getheader = 1; } /* * Act according to the type flag */ switch (buffer.header.typeflag) { case DIRTYPE: if (action == TGZ_LIST) printf(" %s <dir> %s\n",strtime(&tartime),fname); if (action == TGZ_EXTRACT) { makedir(fname); push_attr(&attributes,fname,tarmode,tartime); } break; case REGTYPE: case AREGTYPE: remaining = getoct(buffer.header.size,12); if (remaining == -1) { action = TGZ_INVALID; break; } if (action == TGZ_LIST) printf(" %s %9d %s\n",strtime(&tartime),remaining,fname); else if (action == TGZ_EXTRACT) { if (matchname(arg,argc,argv,fname)) { outfile = fopen(fname,"wb"); if (outfile == NULL) { /* try creating directory */ char *p = strrchr(fname, '/'); if (p != NULL) { *p = '\0'; makedir(fname); *p = '/'; outfile = fopen(fname,"wb"); } } if (outfile != NULL) printf("Extracting %s\n",fname); else fprintf(stderr, "%s: Couldn't create %s",prog,fname); } else outfile = NULL; } getheader = 0; break; case GNUTYPE_LONGLINK: case GNUTYPE_LONGNAME: remaining = getoct(buffer.header.size,12); if (remaining < 0 || remaining >= BLOCKSIZE) { action = TGZ_INVALID; break; } len = gzread(in, fname, BLOCKSIZE); if (len < 0) error(gzerror(in, &err)); if (fname[BLOCKSIZE-1] != 0 || (int)strlen(fname) > remaining) { action = TGZ_INVALID; break; } getheader = 2; break; default: if (action == TGZ_LIST) printf(" %s <---> %s\n",strtime(&tartime),fname); break; } } else { unsigned int bytes = (remaining > BLOCKSIZE) ? BLOCKSIZE : remaining; if (outfile != NULL) { if (fwrite(&buffer,sizeof(char),bytes,outfile) != bytes) { fprintf(stderr, "%s: Error writing %s -- skipping\n",prog,fname); fclose(outfile); outfile = NULL; remove(fname); } } remaining -= bytes; } if (remaining == 0) { getheader = 1; if (outfile != NULL) { fclose(outfile); outfile = NULL; if (action != TGZ_INVALID) push_attr(&attributes,fname,tarmode,tartime); } } /* * Abandon if errors are found */ if (action == TGZ_INVALID) { error("broken archive"); break; } } /* * Restore file modes and time stamps */ restore_attr(&attributes); if (gzclose(in) != Z_OK) error("failed gzclose"); return 0; } /* ============================================================ */ void help(int exitval) { printf("untgz version 0.2.1\n" " using zlib version %s\n\n", zlibVersion()); printf("Usage: untgz file.tgz extract all files\n" " untgz file.tgz fname ... extract selected files\n" " untgz -l file.tgz list archive contents\n" " untgz -h display this help\n"); exit(exitval); } void error(const char *msg) { fprintf(stderr, "%s: %s\n", prog, msg); exit(1); } /* ============================================================ */ #if defined(WIN32) && defined(__GNUC__) int _CRT_glob = 0; /* disable argument globbing in MinGW */ #endif int main(int argc,char **argv) { int action = TGZ_EXTRACT; int arg = 1; char *TGZfile; gzFile *f; prog = strrchr(argv[0],'\\'); if (prog == NULL) { prog = strrchr(argv[0],'/'); if (prog == NULL) { prog = strrchr(argv[0],':'); if (prog == NULL) prog = argv[0]; else prog++; } else prog++; } else prog++; if (argc == 1) help(0); if (strcmp(argv[arg],"-l") == 0) { action = TGZ_LIST; if (argc == ++arg) help(0); } else if (strcmp(argv[arg],"-h") == 0) { help(0); } if ((TGZfile = TGZfname(argv[arg])) == NULL) TGZnotfound(argv[arg]); ++arg; if ((action == TGZ_LIST) && (arg != argc)) help(1); /* * Process the TGZ file */ switch(action) { case TGZ_LIST: case TGZ_EXTRACT: f = gzopen(TGZfile,"rb"); if (f == NULL) { fprintf(stderr,"%s: Couldn't gzopen %s\n",prog,TGZfile); return 1; } exit(tar(f, action, arg, argc, argv)); break; default: error("Unknown option"); exit(1); } return 0; }
gpl-2.0
dineshram/linux-media-si4713USBDriver
fs/nfs/read.c
2150
17916
/* * linux/fs/nfs/read.c * * Block I/O for NFS * * Partial copy of Linus' read cache modifications to fs/nfs/file.c * modified for async RPC by okir@monad.swb.de */ #include <linux/time.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/sunrpc/clnt.h> #include <linux/nfs_fs.h> #include <linux/nfs_page.h> #include <linux/module.h> #include "nfs4_fs.h" #include "internal.h" #include "iostat.h" #include "fscache.h" #define NFSDBG_FACILITY NFSDBG_PAGECACHE static const struct nfs_pageio_ops nfs_pageio_read_ops; static const struct rpc_call_ops nfs_read_common_ops; static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops; static struct kmem_cache *nfs_rdata_cachep; struct nfs_read_header *nfs_readhdr_alloc(void) { struct nfs_read_header *rhdr; rhdr = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); if (rhdr) { struct nfs_pgio_header *hdr = &rhdr->header; INIT_LIST_HEAD(&hdr->pages); INIT_LIST_HEAD(&hdr->rpc_list); spin_lock_init(&hdr->lock); atomic_set(&hdr->refcnt, 0); } return rhdr; } EXPORT_SYMBOL_GPL(nfs_readhdr_alloc); static struct nfs_read_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr, unsigned int pagecount) { struct nfs_read_data *data, *prealloc; prealloc = &container_of(hdr, struct nfs_read_header, header)->rpc_data; if (prealloc->header == NULL) data = prealloc; else data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) goto out; if (nfs_pgarray_set(&data->pages, pagecount)) { data->header = hdr; atomic_inc(&hdr->refcnt); } else { if (data != prealloc) kfree(data); data = NULL; } out: return data; } void nfs_readhdr_free(struct nfs_pgio_header *hdr) { struct nfs_read_header *rhdr = container_of(hdr, struct nfs_read_header, header); kmem_cache_free(nfs_rdata_cachep, rhdr); } EXPORT_SYMBOL_GPL(nfs_readhdr_free); void nfs_readdata_release(struct nfs_read_data *rdata) { struct nfs_pgio_header *hdr = rdata->header; struct nfs_read_header *read_header = container_of(hdr, struct nfs_read_header, header); put_nfs_open_context(rdata->args.context); if (rdata->pages.pagevec != rdata->pages.page_array) kfree(rdata->pages.pagevec); if (rdata == &read_header->rpc_data) { rdata->header = NULL; rdata = NULL; } if (atomic_dec_and_test(&hdr->refcnt)) hdr->completion_ops->completion(hdr); /* Note: we only free the rpc_task after callbacks are done. * See the comment in rpc_free_task() for why */ kfree(rdata); } EXPORT_SYMBOL_GPL(nfs_readdata_release); static int nfs_return_empty_page(struct page *page) { zero_user(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); unlock_page(page); return 0; } void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode, const struct nfs_pgio_completion_ops *compl_ops) { nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, compl_ops, NFS_SERVER(inode)->rsize, 0); } EXPORT_SYMBOL_GPL(nfs_pageio_init_read); void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio) { pgio->pg_ops = &nfs_pageio_read_ops; pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize; } EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, struct page *page) { struct nfs_page *new; unsigned int len; struct nfs_pageio_descriptor pgio; len = nfs_page_length(page); if (len == 0) return nfs_return_empty_page(page); new = nfs_create_request(ctx, inode, page, 0, len); if (IS_ERR(new)) { unlock_page(page); return PTR_ERR(new); } if (len < PAGE_CACHE_SIZE) zero_user_segment(page, len, PAGE_CACHE_SIZE); NFS_PROTO(inode)->read_pageio_init(&pgio, inode, &nfs_async_read_completion_ops); nfs_pageio_add_request(&pgio, new); nfs_pageio_complete(&pgio); NFS_I(inode)->read_io += pgio.pg_bytes_written; return 0; } static void nfs_readpage_release(struct nfs_page *req) { struct inode *d_inode = req->wb_context->dentry->d_inode; if (PageUptodate(req->wb_page)) nfs_readpage_to_fscache(d_inode, req->wb_page, 0); unlock_page(req->wb_page); dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", req->wb_context->dentry->d_inode->i_sb->s_id, (long long)NFS_FILEID(req->wb_context->dentry->d_inode), req->wb_bytes, (long long)req_offset(req)); nfs_release_request(req); } /* Note io was page aligned */ static void nfs_read_completion(struct nfs_pgio_header *hdr) { unsigned long bytes = 0; if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) goto out; while (!list_empty(&hdr->pages)) { struct nfs_page *req = nfs_list_entry(hdr->pages.next); struct page *page = req->wb_page; if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) { if (bytes > hdr->good_bytes) zero_user(page, 0, PAGE_SIZE); else if (hdr->good_bytes - bytes < PAGE_SIZE) zero_user_segment(page, hdr->good_bytes & ~PAGE_MASK, PAGE_SIZE); } bytes += req->wb_bytes; if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { if (bytes <= hdr->good_bytes) SetPageUptodate(page); } else SetPageUptodate(page); nfs_list_remove_request(req); nfs_readpage_release(req); } out: hdr->release(hdr); } int nfs_initiate_read(struct rpc_clnt *clnt, struct nfs_read_data *data, const struct rpc_call_ops *call_ops, int flags) { struct inode *inode = data->header->inode; int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; struct rpc_task *task; struct rpc_message msg = { .rpc_argp = &data->args, .rpc_resp = &data->res, .rpc_cred = data->header->cred, }; struct rpc_task_setup task_setup_data = { .task = &data->task, .rpc_client = clnt, .rpc_message = &msg, .callback_ops = call_ops, .callback_data = data, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC | swap_flags | flags, }; /* Set up the initial task struct. */ NFS_PROTO(inode)->read_setup(data, &msg); dprintk("NFS: %5u initiated read call (req %s/%lld, %u bytes @ " "offset %llu)\n", data->task.tk_pid, inode->i_sb->s_id, (long long)NFS_FILEID(inode), data->args.count, (unsigned long long)data->args.offset); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } EXPORT_SYMBOL_GPL(nfs_initiate_read); /* * Set up the NFS read request struct */ static void nfs_read_rpcsetup(struct nfs_read_data *data, unsigned int count, unsigned int offset) { struct nfs_page *req = data->header->req; data->args.fh = NFS_FH(data->header->inode); data->args.offset = req_offset(req) + offset; data->args.pgbase = req->wb_pgbase + offset; data->args.pages = data->pages.pagevec; data->args.count = count; data->args.context = get_nfs_open_context(req->wb_context); data->args.lock_context = req->wb_lock_context; data->res.fattr = &data->fattr; data->res.count = count; data->res.eof = 0; nfs_fattr_init(&data->fattr); } static int nfs_do_read(struct nfs_read_data *data, const struct rpc_call_ops *call_ops) { struct inode *inode = data->header->inode; return nfs_initiate_read(NFS_CLIENT(inode), data, call_ops, 0); } static int nfs_do_multiple_reads(struct list_head *head, const struct rpc_call_ops *call_ops) { struct nfs_read_data *data; int ret = 0; while (!list_empty(head)) { int ret2; data = list_first_entry(head, struct nfs_read_data, list); list_del_init(&data->list); ret2 = nfs_do_read(data, call_ops); if (ret == 0) ret = ret2; } return ret; } static void nfs_async_read_error(struct list_head *head) { struct nfs_page *req; while (!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); nfs_readpage_release(req); } } static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = { .error_cleanup = nfs_async_read_error, .completion = nfs_read_completion, }; static void nfs_pagein_error(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { set_bit(NFS_IOHDR_REDO, &hdr->flags); while (!list_empty(&hdr->rpc_list)) { struct nfs_read_data *data = list_first_entry(&hdr->rpc_list, struct nfs_read_data, list); list_del(&data->list); nfs_readdata_release(data); } desc->pg_completion_ops->error_cleanup(&desc->pg_list); } /* * Generate multiple requests to fill a single page. * * We optimize to reduce the number of read operations on the wire. If we * detect that we're reading a page, or an area of a page, that is past the * end of file, we do not generate NFS read operations but just clear the * parts of the page that would have come back zero from the server anyway. * * We rely on the cached value of i_size to make this determination; another * client can fill pages on the server past our cached end-of-file, but we * won't see the new data until our attribute cache is updated. This is more * or less conventional NFS client behavior. */ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { struct nfs_page *req = hdr->req; struct page *page = req->wb_page; struct nfs_read_data *data; size_t rsize = desc->pg_bsize, nbytes; unsigned int offset; offset = 0; nbytes = desc->pg_count; do { size_t len = min(nbytes,rsize); data = nfs_readdata_alloc(hdr, 1); if (!data) { nfs_pagein_error(desc, hdr); return -ENOMEM; } data->pages.pagevec[0] = page; nfs_read_rpcsetup(data, len, offset); list_add(&data->list, &hdr->rpc_list); nbytes -= len; offset += len; } while (nbytes != 0); nfs_list_remove_request(req); nfs_list_add_request(req, &hdr->pages); desc->pg_rpc_callops = &nfs_read_common_ops; return 0; } static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { struct nfs_page *req; struct page **pages; struct nfs_read_data *data; struct list_head *head = &desc->pg_list; data = nfs_readdata_alloc(hdr, nfs_page_array_len(desc->pg_base, desc->pg_count)); if (!data) { nfs_pagein_error(desc, hdr); return -ENOMEM; } pages = data->pages.pagevec; while (!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); nfs_list_add_request(req, &hdr->pages); *pages++ = req->wb_page; } nfs_read_rpcsetup(data, desc->pg_count, 0); list_add(&data->list, &hdr->rpc_list); desc->pg_rpc_callops = &nfs_read_common_ops; return 0; } int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { if (desc->pg_bsize < PAGE_CACHE_SIZE) return nfs_pagein_multi(desc, hdr); return nfs_pagein_one(desc, hdr); } EXPORT_SYMBOL_GPL(nfs_generic_pagein); static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) { struct nfs_read_header *rhdr; struct nfs_pgio_header *hdr; int ret; rhdr = nfs_readhdr_alloc(); if (!rhdr) { desc->pg_completion_ops->error_cleanup(&desc->pg_list); return -ENOMEM; } hdr = &rhdr->header; nfs_pgheader_init(desc, hdr, nfs_readhdr_free); atomic_inc(&hdr->refcnt); ret = nfs_generic_pagein(desc, hdr); if (ret == 0) ret = nfs_do_multiple_reads(&hdr->rpc_list, desc->pg_rpc_callops); if (atomic_dec_and_test(&hdr->refcnt)) hdr->completion_ops->completion(hdr); return ret; } static const struct nfs_pageio_ops nfs_pageio_read_ops = { .pg_test = nfs_generic_pg_test, .pg_doio = nfs_generic_pg_readpages, }; /* * This is the callback from RPC telling us whether a reply was * received or some error occurred (timeout or socket shutdown). */ int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) { struct inode *inode = data->header->inode; int status; dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid, task->tk_status); status = NFS_PROTO(inode)->read_done(task, data); if (status != 0) return status; nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count); if (task->tk_status == -ESTALE) { set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); nfs_mark_for_revalidate(inode); } return 0; } static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data) { struct nfs_readargs *argp = &data->args; struct nfs_readres *resp = &data->res; /* This is a short read! */ nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD); /* Has the server at least made some progress? */ if (resp->count == 0) { nfs_set_pgio_error(data->header, -EIO, argp->offset); return; } /* Yes, so retry the read at the end of the data */ data->mds_offset += resp->count; argp->offset += resp->count; argp->pgbase += resp->count; argp->count -= resp->count; rpc_restart_call_prepare(task); } static void nfs_readpage_result_common(struct rpc_task *task, void *calldata) { struct nfs_read_data *data = calldata; struct nfs_pgio_header *hdr = data->header; /* Note the only returns of nfs_readpage_result are 0 and -EAGAIN */ if (nfs_readpage_result(task, data) != 0) return; if (task->tk_status < 0) nfs_set_pgio_error(hdr, task->tk_status, data->args.offset); else if (data->res.eof) { loff_t bound; bound = data->args.offset + data->res.count; spin_lock(&hdr->lock); if (bound < hdr->io_start + hdr->good_bytes) { set_bit(NFS_IOHDR_EOF, &hdr->flags); clear_bit(NFS_IOHDR_ERROR, &hdr->flags); hdr->good_bytes = bound - hdr->io_start; } spin_unlock(&hdr->lock); } else if (data->res.count != data->args.count) nfs_readpage_retry(task, data); } static void nfs_readpage_release_common(void *calldata) { nfs_readdata_release(calldata); } void nfs_read_prepare(struct rpc_task *task, void *calldata) { struct nfs_read_data *data = calldata; NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data); if (unlikely(test_bit(NFS_CONTEXT_BAD, &data->args.context->flags))) rpc_exit(task, -EIO); } static const struct rpc_call_ops nfs_read_common_ops = { .rpc_call_prepare = nfs_read_prepare, .rpc_call_done = nfs_readpage_result_common, .rpc_release = nfs_readpage_release_common, }; /* * Read a page over NFS. * We read the page synchronously in the following case: * - The error flag is set for this page. This happens only when a * previous async read operation failed. */ int nfs_readpage(struct file *file, struct page *page) { struct nfs_open_context *ctx; struct inode *inode = page_file_mapping(page)->host; int error; dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", page, PAGE_CACHE_SIZE, page_file_index(page)); nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); nfs_add_stats(inode, NFSIOS_READPAGES, 1); /* * Try to flush any pending writes to the file.. * * NOTE! Because we own the page lock, there cannot * be any new pending writes generated at this point * for this page (other pages can be written to). */ error = nfs_wb_page(inode, page); if (error) goto out_unlock; if (PageUptodate(page)) goto out_unlock; error = -ESTALE; if (NFS_STALE(inode)) goto out_unlock; if (file == NULL) { error = -EBADF; ctx = nfs_find_open_context(inode, NULL, FMODE_READ); if (ctx == NULL) goto out_unlock; } else ctx = get_nfs_open_context(nfs_file_open_context(file)); if (!IS_SYNC(inode)) { error = nfs_readpage_from_fscache(ctx, inode, page); if (error == 0) goto out; } error = nfs_readpage_async(ctx, inode, page); out: put_nfs_open_context(ctx); return error; out_unlock: unlock_page(page); return error; } struct nfs_readdesc { struct nfs_pageio_descriptor *pgio; struct nfs_open_context *ctx; }; static int readpage_async_filler(void *data, struct page *page) { struct nfs_readdesc *desc = (struct nfs_readdesc *)data; struct inode *inode = page_file_mapping(page)->host; struct nfs_page *new; unsigned int len; int error; len = nfs_page_length(page); if (len == 0) return nfs_return_empty_page(page); new = nfs_create_request(desc->ctx, inode, page, 0, len); if (IS_ERR(new)) goto out_error; if (len < PAGE_CACHE_SIZE) zero_user_segment(page, len, PAGE_CACHE_SIZE); if (!nfs_pageio_add_request(desc->pgio, new)) { error = desc->pgio->pg_error; goto out_unlock; } return 0; out_error: error = PTR_ERR(new); out_unlock: unlock_page(page); return error; } int nfs_readpages(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct nfs_pageio_descriptor pgio; struct nfs_readdesc desc = { .pgio = &pgio, }; struct inode *inode = mapping->host; unsigned long npages; int ret = -ESTALE; dprintk("NFS: nfs_readpages (%s/%Ld %d)\n", inode->i_sb->s_id, (long long)NFS_FILEID(inode), nr_pages); nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); if (NFS_STALE(inode)) goto out; if (filp == NULL) { desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); if (desc.ctx == NULL) return -EBADF; } else desc.ctx = get_nfs_open_context(nfs_file_open_context(filp)); /* attempt to read as many of the pages as possible from the cache * - this returns -ENOBUFS immediately if the cookie is negative */ ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping, pages, &nr_pages); if (ret == 0) goto read_complete; /* all pages were read */ NFS_PROTO(inode)->read_pageio_init(&pgio, inode, &nfs_async_read_completion_ops); ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); nfs_pageio_complete(&pgio); NFS_I(inode)->read_io += pgio.pg_bytes_written; npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; nfs_add_stats(inode, NFSIOS_READPAGES, npages); read_complete: put_nfs_open_context(desc.ctx); out: return ret; } int __init nfs_init_readpagecache(void) { nfs_rdata_cachep = kmem_cache_create("nfs_read_data", sizeof(struct nfs_read_header), 0, SLAB_HWCACHE_ALIGN, NULL); if (nfs_rdata_cachep == NULL) return -ENOMEM; return 0; } void nfs_destroy_readpagecache(void) { kmem_cache_destroy(nfs_rdata_cachep); }
gpl-2.0
gautamMalu/linux-samsung-arndale-xen
drivers/net/ethernet/chelsio/cxgb/subr.c
2150
31316
/***************************************************************************** * * * File: subr.c * * $Revision: 1.27 $ * * $Date: 2005/06/22 01:08:36 $ * * Description: * * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. * * part of the Chelsio 10Gb Ethernet Driver. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License, version 2, as * * published by the Free Software Foundation. * * * * You should have received a copy of the GNU General Public License along * * with this program; if not, see <http://www.gnu.org/licenses/>. * * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * * * http://www.chelsio.com * * * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * * All rights reserved. * * * * Maintainers: maintainers@chelsio.com * * * * Authors: Dimitrios Michailidis <dm@chelsio.com> * * Tina Yang <tainay@chelsio.com> * * Felix Marti <felix@chelsio.com> * * Scott Bardone <sbardone@chelsio.com> * * Kurt Ottaway <kottaway@chelsio.com> * * Frank DiMambro <frank@chelsio.com> * * * * History: * * * ****************************************************************************/ #include "common.h" #include "elmer0.h" #include "regs.h" #include "gmac.h" #include "cphy.h" #include "sge.h" #include "tp.h" #include "espi.h" /** * t1_wait_op_done - wait until an operation is completed * @adapter: the adapter performing the operation * @reg: the register to check for completion * @mask: a single-bit field within @reg that indicates completion * @polarity: the value of the field when the operation is completed * @attempts: number of check iterations * @delay: delay in usecs between iterations * * Wait until an operation is completed by checking a bit in a register * up to @attempts times. Returns %0 if the operation completes and %1 * otherwise. */ static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity, int attempts, int delay) { while (1) { u32 val = readl(adapter->regs + reg) & mask; if (!!val == polarity) return 0; if (--attempts == 0) return 1; if (delay) udelay(delay); } } #define TPI_ATTEMPTS 50 /* * Write a register over the TPI interface (unlocked and locked versions). */ int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) { int tpi_busy; writel(addr, adapter->regs + A_TPI_ADDR); writel(value, adapter->regs + A_TPI_WR_DATA); writel(F_TPIWR, adapter->regs + A_TPI_CSR); tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, TPI_ATTEMPTS, 3); if (tpi_busy) pr_alert("%s: TPI write to 0x%x failed\n", adapter->name, addr); return tpi_busy; } int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) { int ret; spin_lock(&adapter->tpi_lock); ret = __t1_tpi_write(adapter, addr, value); spin_unlock(&adapter->tpi_lock); return ret; } /* * Read a register over the TPI interface (unlocked and locked versions). */ int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) { int tpi_busy; writel(addr, adapter->regs + A_TPI_ADDR); writel(0, adapter->regs + A_TPI_CSR); tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, TPI_ATTEMPTS, 3); if (tpi_busy) pr_alert("%s: TPI read from 0x%x failed\n", adapter->name, addr); else *valp = readl(adapter->regs + A_TPI_RD_DATA); return tpi_busy; } int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) { int ret; spin_lock(&adapter->tpi_lock); ret = __t1_tpi_read(adapter, addr, valp); spin_unlock(&adapter->tpi_lock); return ret; } /* * Set a TPI parameter. */ static void t1_tpi_par(adapter_t *adapter, u32 value) { writel(V_TPIPAR(value), adapter->regs + A_TPI_PAR); } /* * Called when a port's link settings change to propagate the new values to the * associated PHY and MAC. After performing the common tasks it invokes an * OS-specific handler. */ void t1_link_changed(adapter_t *adapter, int port_id) { int link_ok, speed, duplex, fc; struct cphy *phy = adapter->port[port_id].phy; struct link_config *lc = &adapter->port[port_id].link_config; phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); lc->speed = speed < 0 ? SPEED_INVALID : speed; lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; if (!(lc->requested_fc & PAUSE_AUTONEG)) fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) { /* Set MAC speed, duplex, and flow control to match PHY. */ struct cmac *mac = adapter->port[port_id].mac; mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc); lc->fc = (unsigned char)fc; } t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc); } static int t1_pci_intr_handler(adapter_t *adapter) { u32 pcix_cause; pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause); if (pcix_cause) { pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, pcix_cause); t1_fatal_err(adapter); /* PCI errors are fatal */ } return 0; } #ifdef CONFIG_CHELSIO_T1_1G #include "fpga_defs.h" /* * PHY interrupt handler for FPGA boards. */ static int fpga_phy_intr_handler(adapter_t *adapter) { int p; u32 cause = readl(adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); for_each_port(adapter, p) if (cause & (1 << p)) { struct cphy *phy = adapter->port[p].phy; int phy_cause = phy->ops->interrupt_handler(phy); if (phy_cause & cphy_cause_link_change) t1_link_changed(adapter, p); } writel(cause, adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); return 0; } /* * Slow path interrupt handler for FPGAs. */ static int fpga_slow_intr(adapter_t *adapter) { u32 cause = readl(adapter->regs + A_PL_CAUSE); cause &= ~F_PL_INTR_SGE_DATA; if (cause & F_PL_INTR_SGE_ERR) t1_sge_intr_error_handler(adapter->sge); if (cause & FPGA_PCIX_INTERRUPT_GMAC) fpga_phy_intr_handler(adapter); if (cause & FPGA_PCIX_INTERRUPT_TP) { /* * FPGA doesn't support MC4 interrupts and it requires * this odd layer of indirection for MC5. */ u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); /* Clear TP interrupt */ writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); } if (cause & FPGA_PCIX_INTERRUPT_PCIX) t1_pci_intr_handler(adapter); /* Clear the interrupts just processed. */ if (cause) writel(cause, adapter->regs + A_PL_CAUSE); return cause != 0; } #endif /* * Wait until Elmer's MI1 interface is ready for new operations. */ static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg) { int attempts = 100, busy; do { u32 val; __t1_tpi_read(adapter, mi1_reg, &val); busy = val & F_MI1_OP_BUSY; if (busy) udelay(10); } while (busy && --attempts); if (busy) pr_alert("%s: MDIO operation timed out\n", adapter->name); return busy; } /* * MI1 MDIO initialization. */ static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi) { u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1; u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) | V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv); if (!(bi->caps & SUPPORTED_10000baseT_Full)) val |= V_MI1_SOF(1); t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); } #if defined(CONFIG_CHELSIO_T1_1G) /* * Elmer MI1 MDIO read/write operations. */ static int mi1_mdio_read(struct net_device *dev, int phy_addr, int mmd_addr, u16 reg_addr) { struct adapter *adapter = dev->ml_priv; u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); unsigned int val; spin_lock(&adapter->tpi_lock); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_READ); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val); spin_unlock(&adapter->tpi_lock); return val; } static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr, u16 reg_addr, u16 val) { struct adapter *adapter = dev->ml_priv; u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); spin_lock(&adapter->tpi_lock); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_WRITE); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); spin_unlock(&adapter->tpi_lock); return 0; } static const struct mdio_ops mi1_mdio_ops = { .init = mi1_mdio_init, .read = mi1_mdio_read, .write = mi1_mdio_write, .mode_support = MDIO_SUPPORTS_C22 }; #endif static int mi1_mdio_ext_read(struct net_device *dev, int phy_addr, int mmd_addr, u16 reg_addr) { struct adapter *adapter = dev->ml_priv; u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); unsigned int val; spin_lock(&adapter->tpi_lock); /* Write the address we want. */ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_ADDRESS); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); /* Write the operation we want. */ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); /* Read the data. */ __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val); spin_unlock(&adapter->tpi_lock); return val; } static int mi1_mdio_ext_write(struct net_device *dev, int phy_addr, int mmd_addr, u16 reg_addr, u16 val) { struct adapter *adapter = dev->ml_priv; u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); spin_lock(&adapter->tpi_lock); /* Write the address we want. */ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_ADDRESS); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); /* Write the data. */ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); spin_unlock(&adapter->tpi_lock); return 0; } static const struct mdio_ops mi1_mdio_ext_ops = { .init = mi1_mdio_init, .read = mi1_mdio_ext_read, .write = mi1_mdio_ext_write, .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22 }; enum { CH_BRD_T110_1CU, CH_BRD_N110_1F, CH_BRD_N210_1F, CH_BRD_T210_1F, CH_BRD_T210_1CU, CH_BRD_N204_4CU, }; static const struct board_info t1_board[] = { { .board = CHBT_BOARD_CHT110, .port_number = 1, .caps = SUPPORTED_10000baseT_Full, .chip_term = CHBT_TERM_T1, .chip_mac = CHBT_MAC_PM3393, .chip_phy = CHBT_PHY_MY3126, .clock_core = 125000000, .clock_mc3 = 150000000, .clock_mc4 = 125000000, .espi_nports = 1, .clock_elmer0 = 44, .mdio_mdien = 1, .mdio_mdiinv = 1, .mdio_mdc = 1, .mdio_phybaseaddr = 1, .gmac = &t1_pm3393_ops, .gphy = &t1_my3126_ops, .mdio_ops = &mi1_mdio_ext_ops, .desc = "Chelsio T110 1x10GBase-CX4 TOE", }, { .board = CHBT_BOARD_N110, .port_number = 1, .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE, .chip_term = CHBT_TERM_T1, .chip_mac = CHBT_MAC_PM3393, .chip_phy = CHBT_PHY_88X2010, .clock_core = 125000000, .espi_nports = 1, .clock_elmer0 = 44, .mdio_mdien = 0, .mdio_mdiinv = 0, .mdio_mdc = 1, .mdio_phybaseaddr = 0, .gmac = &t1_pm3393_ops, .gphy = &t1_mv88x201x_ops, .mdio_ops = &mi1_mdio_ext_ops, .desc = "Chelsio N110 1x10GBaseX NIC", }, { .board = CHBT_BOARD_N210, .port_number = 1, .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE, .chip_term = CHBT_TERM_T2, .chip_mac = CHBT_MAC_PM3393, .chip_phy = CHBT_PHY_88X2010, .clock_core = 125000000, .espi_nports = 1, .clock_elmer0 = 44, .mdio_mdien = 0, .mdio_mdiinv = 0, .mdio_mdc = 1, .mdio_phybaseaddr = 0, .gmac = &t1_pm3393_ops, .gphy = &t1_mv88x201x_ops, .mdio_ops = &mi1_mdio_ext_ops, .desc = "Chelsio N210 1x10GBaseX NIC", }, { .board = CHBT_BOARD_CHT210, .port_number = 1, .caps = SUPPORTED_10000baseT_Full, .chip_term = CHBT_TERM_T2, .chip_mac = CHBT_MAC_PM3393, .chip_phy = CHBT_PHY_88X2010, .clock_core = 125000000, .clock_mc3 = 133000000, .clock_mc4 = 125000000, .espi_nports = 1, .clock_elmer0 = 44, .mdio_mdien = 0, .mdio_mdiinv = 0, .mdio_mdc = 1, .mdio_phybaseaddr = 0, .gmac = &t1_pm3393_ops, .gphy = &t1_mv88x201x_ops, .mdio_ops = &mi1_mdio_ext_ops, .desc = "Chelsio T210 1x10GBaseX TOE", }, { .board = CHBT_BOARD_CHT210, .port_number = 1, .caps = SUPPORTED_10000baseT_Full, .chip_term = CHBT_TERM_T2, .chip_mac = CHBT_MAC_PM3393, .chip_phy = CHBT_PHY_MY3126, .clock_core = 125000000, .clock_mc3 = 133000000, .clock_mc4 = 125000000, .espi_nports = 1, .clock_elmer0 = 44, .mdio_mdien = 1, .mdio_mdiinv = 1, .mdio_mdc = 1, .mdio_phybaseaddr = 1, .gmac = &t1_pm3393_ops, .gphy = &t1_my3126_ops, .mdio_ops = &mi1_mdio_ext_ops, .desc = "Chelsio T210 1x10GBase-CX4 TOE", }, #ifdef CONFIG_CHELSIO_T1_1G { .board = CHBT_BOARD_CHN204, .port_number = 4, .caps = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_PAUSE | SUPPORTED_TP, .chip_term = CHBT_TERM_T2, .chip_mac = CHBT_MAC_VSC7321, .chip_phy = CHBT_PHY_88E1111, .clock_core = 100000000, .espi_nports = 4, .clock_elmer0 = 44, .mdio_mdien = 0, .mdio_mdiinv = 0, .mdio_mdc = 0, .mdio_phybaseaddr = 4, .gmac = &t1_vsc7326_ops, .gphy = &t1_mv88e1xxx_ops, .mdio_ops = &mi1_mdio_ops, .desc = "Chelsio N204 4x100/1000BaseT NIC", }, #endif }; const struct pci_device_id t1_pci_tbl[] = { CH_DEVICE(8, 0, CH_BRD_T110_1CU), CH_DEVICE(8, 1, CH_BRD_T110_1CU), CH_DEVICE(7, 0, CH_BRD_N110_1F), CH_DEVICE(10, 1, CH_BRD_N210_1F), CH_DEVICE(11, 1, CH_BRD_T210_1F), CH_DEVICE(14, 1, CH_BRD_T210_1CU), CH_DEVICE(16, 1, CH_BRD_N204_4CU), { 0 } }; MODULE_DEVICE_TABLE(pci, t1_pci_tbl); /* * Return the board_info structure with a given index. Out-of-range indices * return NULL. */ const struct board_info *t1_get_board_info(unsigned int board_id) { return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL; } struct chelsio_vpd_t { u32 format_version; u8 serial_number[16]; u8 mac_base_address[6]; u8 pad[2]; /* make multiple-of-4 size requirement explicit */ }; #define EEPROMSIZE (8 * 1024) #define EEPROM_MAX_POLL 4 /* * Read SEEPROM. A zero is written to the flag register when the address is * written to the Control register. The hardware device will set the flag to a * one when 4B have been transferred to the Data register. */ int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data) { int i = EEPROM_MAX_POLL; u16 val; u32 v; if (addr >= EEPROMSIZE || (addr & 3)) return -EINVAL; pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr); do { udelay(50); pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val); } while (!(val & F_VPD_OP_FLAG) && --i); if (!(val & F_VPD_OP_FLAG)) { pr_err("%s: reading EEPROM address 0x%x failed\n", adapter->name, addr); return -EIO; } pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, &v); *data = cpu_to_le32(v); return 0; } static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd) { int addr, ret = 0; for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32)) ret = t1_seeprom_read(adapter, addr, (__le32 *)((u8 *)vpd + addr)); return ret; } /* * Read a port's MAC address from the VPD ROM. */ static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[]) { struct chelsio_vpd_t vpd; if (t1_eeprom_vpd_get(adapter, &vpd)) return 1; memcpy(mac_addr, vpd.mac_base_address, 5); mac_addr[5] = vpd.mac_base_address[5] + index; return 0; } /* * Set up the MAC/PHY according to the requested link settings. * * If the PHY can auto-negotiate first decide what to advertise, then * enable/disable auto-negotiation as desired and reset. * * If the PHY does not auto-negotiate we just reset it. * * If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) { unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); if (lc->supported & SUPPORTED_Autoneg) { lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE); if (fc) { if (fc == ((PAUSE_RX | PAUSE_TX) & (mac->adapter->params.nports < 2))) lc->advertising |= ADVERTISED_PAUSE; else { lc->advertising |= ADVERTISED_ASYM_PAUSE; if (fc == PAUSE_RX) lc->advertising |= ADVERTISED_PAUSE; } } phy->ops->advertise(phy, lc->advertising); if (lc->autoneg == AUTONEG_DISABLE) { lc->speed = lc->requested_speed; lc->duplex = lc->requested_duplex; lc->fc = (unsigned char)fc; mac->ops->set_speed_duplex_fc(mac, lc->speed, lc->duplex, fc); /* Also disables autoneg */ phy->state = PHY_AUTONEG_RDY; phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); phy->ops->reset(phy, 0); } else { phy->state = PHY_AUTONEG_EN; phy->ops->autoneg_enable(phy); /* also resets PHY */ } } else { phy->state = PHY_AUTONEG_RDY; mac->ops->set_speed_duplex_fc(mac, -1, -1, fc); lc->fc = (unsigned char)fc; phy->ops->reset(phy, 0); } return 0; } /* * External interrupt handler for boards using elmer0. */ int t1_elmer0_ext_intr_handler(adapter_t *adapter) { struct cphy *phy; int phy_cause; u32 cause; t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause); switch (board_info(adapter)->board) { #ifdef CONFIG_CHELSIO_T1_1G case CHBT_BOARD_CHT204: case CHBT_BOARD_CHT204E: case CHBT_BOARD_CHN204: case CHBT_BOARD_CHT204V: { int i, port_bit; for_each_port(adapter, i) { port_bit = i + 1; if (!(cause & (1 << port_bit))) continue; phy = adapter->port[i].phy; phy_cause = phy->ops->interrupt_handler(phy); if (phy_cause & cphy_cause_link_change) t1_link_changed(adapter, i); } break; } case CHBT_BOARD_CHT101: if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */ phy = adapter->port[0].phy; phy_cause = phy->ops->interrupt_handler(phy); if (phy_cause & cphy_cause_link_change) t1_link_changed(adapter, 0); } break; case CHBT_BOARD_7500: { int p; /* * Elmer0's interrupt cause isn't useful here because there is * only one bit that can be set for all 4 ports. This means * we are forced to check every PHY's interrupt status * register to see who initiated the interrupt. */ for_each_port(adapter, p) { phy = adapter->port[p].phy; phy_cause = phy->ops->interrupt_handler(phy); if (phy_cause & cphy_cause_link_change) t1_link_changed(adapter, p); } break; } #endif case CHBT_BOARD_CHT210: case CHBT_BOARD_N210: case CHBT_BOARD_N110: if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */ phy = adapter->port[0].phy; phy_cause = phy->ops->interrupt_handler(phy); if (phy_cause & cphy_cause_link_change) t1_link_changed(adapter, 0); } break; case CHBT_BOARD_8000: case CHBT_BOARD_CHT110: if (netif_msg_intr(adapter)) dev_dbg(&adapter->pdev->dev, "External interrupt cause 0x%x\n", cause); if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ struct cmac *mac = adapter->port[0].mac; mac->ops->interrupt_handler(mac); } if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */ u32 mod_detect; t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); if (netif_msg_link(adapter)) dev_info(&adapter->pdev->dev, "XPAK %s\n", mod_detect ? "removed" : "inserted"); } break; } t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); return 0; } /* Enables all interrupts. */ void t1_interrupts_enable(adapter_t *adapter) { unsigned int i; adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP; t1_sge_intr_enable(adapter->sge); t1_tp_intr_enable(adapter->tp); if (adapter->espi) { adapter->slow_intr_mask |= F_PL_INTR_ESPI; t1_espi_intr_enable(adapter->espi); } /* Enable MAC/PHY interrupts for each port. */ for_each_port(adapter, i) { adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac); adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy); } /* Enable PCIX & external chip interrupts on ASIC boards. */ if (t1_is_asic(adapter)) { u32 pl_intr = readl(adapter->regs + A_PL_ENABLE); /* PCI-X interrupts */ pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0xffffffff); adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX; pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX; writel(pl_intr, adapter->regs + A_PL_ENABLE); } } /* Disables all interrupts. */ void t1_interrupts_disable(adapter_t* adapter) { unsigned int i; t1_sge_intr_disable(adapter->sge); t1_tp_intr_disable(adapter->tp); if (adapter->espi) t1_espi_intr_disable(adapter->espi); /* Disable MAC/PHY interrupts for each port. */ for_each_port(adapter, i) { adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac); adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy); } /* Disable PCIX & external chip interrupts. */ if (t1_is_asic(adapter)) writel(0, adapter->regs + A_PL_ENABLE); /* PCI-X interrupts */ pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); adapter->slow_intr_mask = 0; } /* Clears all interrupts */ void t1_interrupts_clear(adapter_t* adapter) { unsigned int i; t1_sge_intr_clear(adapter->sge); t1_tp_intr_clear(adapter->tp); if (adapter->espi) t1_espi_intr_clear(adapter->espi); /* Clear MAC/PHY interrupts for each port. */ for_each_port(adapter, i) { adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac); adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy); } /* Enable interrupts for external devices. */ if (t1_is_asic(adapter)) { u32 pl_intr = readl(adapter->regs + A_PL_CAUSE); writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX, adapter->regs + A_PL_CAUSE); } /* PCI-X interrupts */ pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff); } /* * Slow path interrupt handler for ASICs. */ static int asic_slow_intr(adapter_t *adapter) { u32 cause = readl(adapter->regs + A_PL_CAUSE); cause &= adapter->slow_intr_mask; if (!cause) return 0; if (cause & F_PL_INTR_SGE_ERR) t1_sge_intr_error_handler(adapter->sge); if (cause & F_PL_INTR_TP) t1_tp_intr_handler(adapter->tp); if (cause & F_PL_INTR_ESPI) t1_espi_intr_handler(adapter->espi); if (cause & F_PL_INTR_PCIX) t1_pci_intr_handler(adapter); if (cause & F_PL_INTR_EXT) t1_elmer0_ext_intr(adapter); /* Clear the interrupts just processed. */ writel(cause, adapter->regs + A_PL_CAUSE); readl(adapter->regs + A_PL_CAUSE); /* flush writes */ return 1; } int t1_slow_intr_handler(adapter_t *adapter) { #ifdef CONFIG_CHELSIO_T1_1G if (!t1_is_asic(adapter)) return fpga_slow_intr(adapter); #endif return asic_slow_intr(adapter); } /* Power sequencing is a work-around for Intel's XPAKs. */ static void power_sequence_xpak(adapter_t* adapter) { u32 mod_detect; u32 gpo; /* Check for XPAK */ t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); if (!(ELMER0_GP_BIT5 & mod_detect)) { /* XPAK is present */ t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); gpo |= ELMER0_GP_BIT18; t1_tpi_write(adapter, A_ELMER0_GPO, gpo); } } int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, struct adapter_params *p) { p->chip_version = bi->chip_term; p->is_asic = (p->chip_version != CHBT_TERM_FPGA); if (p->chip_version == CHBT_TERM_T1 || p->chip_version == CHBT_TERM_T2 || p->chip_version == CHBT_TERM_FPGA) { u32 val = readl(adapter->regs + A_TP_PC_CONFIG); val = G_TP_PC_REV(val); if (val == 2) p->chip_revision = TERM_T1B; else if (val == 3) p->chip_revision = TERM_T2; else return -1; } else return -1; return 0; } /* * Enable board components other than the Chelsio chip, such as external MAC * and PHY. */ static int board_init(adapter_t *adapter, const struct board_info *bi) { switch (bi->board) { case CHBT_BOARD_8000: case CHBT_BOARD_N110: case CHBT_BOARD_N210: case CHBT_BOARD_CHT210: t1_tpi_par(adapter, 0xf); t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); break; case CHBT_BOARD_CHT110: t1_tpi_par(adapter, 0xf); t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800); /* TBD XXX Might not need. This fixes a problem * described in the Intel SR XPAK errata. */ power_sequence_xpak(adapter); break; #ifdef CONFIG_CHELSIO_T1_1G case CHBT_BOARD_CHT204E: /* add config space write here */ case CHBT_BOARD_CHT204: case CHBT_BOARD_CHT204V: case CHBT_BOARD_CHN204: t1_tpi_par(adapter, 0xf); t1_tpi_write(adapter, A_ELMER0_GPO, 0x804); break; case CHBT_BOARD_CHT101: case CHBT_BOARD_7500: t1_tpi_par(adapter, 0xf); t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804); break; #endif } return 0; } /* * Initialize and configure the Terminator HW modules. Note that external * MAC and PHYs are initialized separately. */ int t1_init_hw_modules(adapter_t *adapter) { int err = -EIO; const struct board_info *bi = board_info(adapter); if (!bi->clock_mc4) { u32 val = readl(adapter->regs + A_MC4_CFG); writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG); writel(F_M_BUS_ENABLE | F_TCAM_RESET, adapter->regs + A_MC5_CONFIG); } if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, bi->espi_nports)) goto out_err; if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core)) goto out_err; err = t1_sge_configure(adapter->sge, &adapter->params.sge); if (err) goto out_err; err = 0; out_err: return err; } /* * Determine a card's PCI mode. */ static void get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) { static const unsigned short speed_map[] = { 33, 66, 100, 133 }; u32 pci_mode; pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode); p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)]; p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32; p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0; } /* * Release the structures holding the SW per-Terminator-HW-module state. */ void t1_free_sw_modules(adapter_t *adapter) { unsigned int i; for_each_port(adapter, i) { struct cmac *mac = adapter->port[i].mac; struct cphy *phy = adapter->port[i].phy; if (mac) mac->ops->destroy(mac); if (phy) phy->ops->destroy(phy); } if (adapter->sge) t1_sge_destroy(adapter->sge); if (adapter->tp) t1_tp_destroy(adapter->tp); if (adapter->espi) t1_espi_destroy(adapter->espi); } static void init_link_config(struct link_config *lc, const struct board_info *bi) { lc->supported = bi->caps; lc->requested_speed = lc->speed = SPEED_INVALID; lc->requested_duplex = lc->duplex = DUPLEX_INVALID; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; if (lc->supported & SUPPORTED_Autoneg) { lc->advertising = lc->supported; lc->autoneg = AUTONEG_ENABLE; lc->requested_fc |= PAUSE_AUTONEG; } else { lc->advertising = 0; lc->autoneg = AUTONEG_DISABLE; } } /* * Allocate and initialize the data structures that hold the SW state of * the Terminator HW modules. */ int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi) { unsigned int i; adapter->params.brd_info = bi; adapter->params.nports = bi->port_number; adapter->params.stats_update_period = bi->gmac->stats_update_period; adapter->sge = t1_sge_create(adapter, &adapter->params.sge); if (!adapter->sge) { pr_err("%s: SGE initialization failed\n", adapter->name); goto error; } if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) { pr_err("%s: ESPI initialization failed\n", adapter->name); goto error; } adapter->tp = t1_tp_create(adapter, &adapter->params.tp); if (!adapter->tp) { pr_err("%s: TP initialization failed\n", adapter->name); goto error; } board_init(adapter, bi); bi->mdio_ops->init(adapter, bi); if (bi->gphy->reset) bi->gphy->reset(adapter); if (bi->gmac->reset) bi->gmac->reset(adapter); for_each_port(adapter, i) { u8 hw_addr[6]; struct cmac *mac; int phy_addr = bi->mdio_phybaseaddr + i; adapter->port[i].phy = bi->gphy->create(adapter->port[i].dev, phy_addr, bi->mdio_ops); if (!adapter->port[i].phy) { pr_err("%s: PHY %d initialization failed\n", adapter->name, i); goto error; } adapter->port[i].mac = mac = bi->gmac->create(adapter, i); if (!mac) { pr_err("%s: MAC %d initialization failed\n", adapter->name, i); goto error; } /* * Get the port's MAC addresses either from the EEPROM if one * exists or the one hardcoded in the MAC. */ if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY) mac->ops->macaddress_get(mac, hw_addr); else if (vpd_macaddress_get(adapter, i, hw_addr)) { pr_err("%s: could not read MAC address from VPD ROM\n", adapter->port[i].dev->name); goto error; } memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN); init_link_config(&adapter->port[i].link_config, bi); } get_pci_mode(adapter, &adapter->params.pci); t1_interrupts_clear(adapter); return 0; error: t1_free_sw_modules(adapter); return -1; }
gpl-2.0
segment-routing/openwrt
tools/perf/util/intlist.c
2406
3029
/* * Based on intlist.c by: * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com> * * Licensed under the GPLv2. */ #include <errno.h> #include <stdlib.h> #include <linux/compiler.h> #include "intlist.h" static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused, const void *entry) { int i = (int)((long)entry); struct rb_node *rc = NULL; struct int_node *node = malloc(sizeof(*node)); if (node != NULL) { node->i = i; node->priv = NULL; rc = &node->rb_node; } return rc; } static void int_node__delete(struct int_node *ilist) { free(ilist); } static void intlist__node_delete(struct rblist *rblist __maybe_unused, struct rb_node *rb_node) { struct int_node *node = container_of(rb_node, struct int_node, rb_node); int_node__delete(node); } static int intlist__node_cmp(struct rb_node *rb_node, const void *entry) { int i = (int)((long)entry); struct int_node *node = container_of(rb_node, struct int_node, rb_node); return node->i - i; } int intlist__add(struct intlist *ilist, int i) { return rblist__add_node(&ilist->rblist, (void *)((long)i)); } void intlist__remove(struct intlist *ilist, struct int_node *node) { rblist__remove_node(&ilist->rblist, &node->rb_node); } static struct int_node *__intlist__findnew(struct intlist *ilist, int i, bool create) { struct int_node *node = NULL; struct rb_node *rb_node; if (ilist == NULL) return NULL; if (create) rb_node = rblist__findnew(&ilist->rblist, (void *)((long)i)); else rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); if (rb_node) node = container_of(rb_node, struct int_node, rb_node); return node; } struct int_node *intlist__find(struct intlist *ilist, int i) { return __intlist__findnew(ilist, i, false); } struct int_node *intlist__findnew(struct intlist *ilist, int i) { return __intlist__findnew(ilist, i, true); } static int intlist__parse_list(struct intlist *ilist, const char *s) { char *sep; int err; do { long value = strtol(s, &sep, 10); err = -EINVAL; if (*sep != ',' && *sep != '\0') break; err = intlist__add(ilist, value); if (err) break; s = sep + 1; } while (*sep != '\0'); return err; } struct intlist *intlist__new(const char *slist) { struct intlist *ilist = malloc(sizeof(*ilist)); if (ilist != NULL) { rblist__init(&ilist->rblist); ilist->rblist.node_cmp = intlist__node_cmp; ilist->rblist.node_new = intlist__node_new; ilist->rblist.node_delete = intlist__node_delete; if (slist && intlist__parse_list(ilist, slist)) goto out_delete; } return ilist; out_delete: intlist__delete(ilist); return NULL; } void intlist__delete(struct intlist *ilist) { if (ilist != NULL) rblist__delete(&ilist->rblist); } struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx) { struct int_node *node = NULL; struct rb_node *rb_node; rb_node = rblist__entry(&ilist->rblist, idx); if (rb_node) node = container_of(rb_node, struct int_node, rb_node); return node; }
gpl-2.0
DmitryADP/diff_qc750
kernel/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2662
7275
/* * Copyright (c) 2010 Red Hat Inc. * Author : Dave Airlie <airlied@redhat.com> * * Licensed under GPLv2 * * ATPX support for both Intel/ATI */ #include <linux/vga_switcheroo.h> #include <linux/slab.h> #include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <linux/pci.h> #define ATPX_VERSION 0 #define ATPX_GPU_PWR 2 #define ATPX_MUX_SELECT 3 #define ATPX_I2C_MUX_SELECT 4 #define ATPX_SWITCH_START 5 #define ATPX_SWITCH_END 6 #define ATPX_INTEGRATED 0 #define ATPX_DISCRETE 1 #define ATPX_MUX_IGD 0 #define ATPX_MUX_DISCRETE 1 static struct radeon_atpx_priv { bool atpx_detected; /* handle for device - and atpx */ acpi_handle dhandle; acpi_handle atpx_handle; acpi_handle atrm_handle; } radeon_atpx_priv; /* retrieve the ROM in 4k blocks */ static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, int offset, int len) { acpi_status status; union acpi_object atrm_arg_elements[2], *obj; struct acpi_object_list atrm_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; atrm_arg.count = 2; atrm_arg.pointer = &atrm_arg_elements[0]; atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; atrm_arg_elements[0].integer.value = offset; atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; atrm_arg_elements[1].integer.value = len; status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); if (ACPI_FAILURE(status)) { printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); return -ENODEV; } obj = (union acpi_object *)buffer.pointer; memcpy(bios+offset, obj->buffer.pointer, len); kfree(buffer.pointer); return len; } bool radeon_atrm_supported(struct pci_dev *pdev) { /* get the discrete ROM only via ATRM */ if (!radeon_atpx_priv.atpx_detected) return false; if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) return false; return true; } int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len) { return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len); } static int radeon_atpx_get_version(acpi_handle handle) { acpi_status status; union acpi_object atpx_arg_elements[2], *obj; struct acpi_object_list atpx_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; atpx_arg.count = 2; atpx_arg.pointer = &atpx_arg_elements[0]; atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; atpx_arg_elements[0].integer.value = ATPX_VERSION; atpx_arg_elements[1].type = ACPI_TYPE_INTEGER; atpx_arg_elements[1].integer.value = ATPX_VERSION; status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); if (ACPI_FAILURE(status)) { printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status)); return -ENOSYS; } obj = (union acpi_object *)buffer.pointer; if (obj && (obj->type == ACPI_TYPE_BUFFER)) printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2)); kfree(buffer.pointer); return 0; } static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value) { acpi_status status; union acpi_object atpx_arg_elements[2]; struct acpi_object_list atpx_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; uint8_t buf[4] = {0}; if (!handle) return -EINVAL; atpx_arg.count = 2; atpx_arg.pointer = &atpx_arg_elements[0]; atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; atpx_arg_elements[0].integer.value = cmd_id; buf[2] = value & 0xff; buf[3] = (value >> 8) & 0xff; atpx_arg_elements[1].type = ACPI_TYPE_BUFFER; atpx_arg_elements[1].buffer.length = 4; atpx_arg_elements[1].buffer.pointer = buf; status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); if (ACPI_FAILURE(status)) { printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status)); return -ENOSYS; } kfree(buffer.pointer); return 0; } static int radeon_atpx_set_discrete_state(acpi_handle handle, int state) { return radeon_atpx_execute(handle, ATPX_GPU_PWR, state); } static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id) { return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); } static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id) { return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id); } static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id) { return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id); } static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id) { return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id); } static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) { int gpu_id; if (id == VGA_SWITCHEROO_IGD) gpu_id = ATPX_INTEGRATED; else gpu_id = ATPX_DISCRETE; radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id); radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id); radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id); radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id); return 0; } static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, enum vga_switcheroo_state state) { /* on w500 ACPI can't change intel gpu state */ if (id == VGA_SWITCHEROO_IGD) return 0; radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state); return 0; } static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) { acpi_handle dhandle, atpx_handle, atrm_handle; acpi_status status; dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); if (ACPI_FAILURE(status)) return false; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); if (ACPI_FAILURE(status)) return false; radeon_atpx_priv.dhandle = dhandle; radeon_atpx_priv.atpx_handle = atpx_handle; radeon_atpx_priv.atrm_handle = atrm_handle; return true; } static int radeon_atpx_init(void) { /* set up the ATPX handle */ radeon_atpx_get_version(radeon_atpx_priv.atpx_handle); return 0; } static int radeon_atpx_get_client_id(struct pci_dev *pdev) { if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) return VGA_SWITCHEROO_IGD; else return VGA_SWITCHEROO_DIS; } static struct vga_switcheroo_handler radeon_atpx_handler = { .switchto = radeon_atpx_switchto, .power_state = radeon_atpx_power_state, .init = radeon_atpx_init, .get_client_id = radeon_atpx_get_client_id, }; static bool radeon_atpx_detect(void) { char acpi_method_name[255] = { 0 }; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; bool has_atpx = false; int vga_count = 0; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); } if (has_atpx && vga_count == 2) { acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", acpi_method_name); radeon_atpx_priv.atpx_detected = true; return true; } return false; } void radeon_register_atpx_handler(void) { bool r; /* detect if we have any ATPX + 2 VGA in the system */ r = radeon_atpx_detect(); if (!r) return; vga_switcheroo_register_handler(&radeon_atpx_handler); } void radeon_unregister_atpx_handler(void) { vga_switcheroo_unregister_handler(); }
gpl-2.0
bsmitty83/SpecialKang
arch/arm/lib/uaccess_with_memcpy.c
2918
5405
/* * linux/arch/arm/lib/uaccess_with_memcpy.c * * Written by: Lennert Buytenhek and Nicolas Pitre * Copyright (C) 2009 Marvell Semiconductor * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/uaccess.h> #include <linux/rwsem.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/hardirq.h> /* for in_atomic() */ #include <linux/gfp.h> #include <asm/current.h> #include <asm/page.h> static int pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) { unsigned long addr = (unsigned long)_addr; pgd_t *pgd; pmd_t *pmd; pte_t *pte; pud_t *pud; spinlock_t *ptl; pgd = pgd_offset(current->mm, addr); if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd))) return 0; pud = pud_offset(pgd, addr); if (unlikely(pud_none(*pud) || pud_bad(*pud))) return 0; pmd = pmd_offset(pud, addr); if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd))) return 0; pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); if (unlikely(!pte_present(*pte) || !pte_young(*pte) || !pte_write(*pte) || !pte_dirty(*pte))) { pte_unmap_unlock(pte, ptl); return 0; } *ptep = pte; *ptlp = ptl; return 1; } static unsigned long noinline __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) { int atomic; if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { memcpy((void *)to, from, n); return 0; } /* the mmap semaphore is taken only if not in an atomic context */ atomic = in_atomic(); if (!atomic) down_read(&current->mm->mmap_sem); while (n) { pte_t *pte; spinlock_t *ptl; int tocopy; while (!pin_page_for_write(to, &pte, &ptl)) { if (!atomic) up_read(&current->mm->mmap_sem); if (__put_user(0, (char __user *)to)) goto out; if (!atomic) down_read(&current->mm->mmap_sem); } tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1; if (tocopy > n) tocopy = n; memcpy((void *)to, from, tocopy); to += tocopy; from += tocopy; n -= tocopy; pte_unmap_unlock(pte, ptl); } if (!atomic) up_read(&current->mm->mmap_sem); out: return n; } unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { /* * This test is stubbed out of the main function above to keep * the overhead for small copies low by avoiding a large * register dump on the stack just to reload them right away. * With frame pointer disabled, tail call optimization kicks in * as well making this test almost invisible. */ if (n < 64) return __copy_to_user_std(to, from, n); return __copy_to_user_memcpy(to, from, n); } static unsigned long noinline __clear_user_memset(void __user *addr, unsigned long n) { if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { memset((void *)addr, 0, n); return 0; } down_read(&current->mm->mmap_sem); while (n) { pte_t *pte; spinlock_t *ptl; int tocopy; while (!pin_page_for_write(addr, &pte, &ptl)) { up_read(&current->mm->mmap_sem); if (__put_user(0, (char __user *)addr)) goto out; down_read(&current->mm->mmap_sem); } tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1; if (tocopy > n) tocopy = n; memset((void *)addr, 0, tocopy); addr += tocopy; n -= tocopy; pte_unmap_unlock(pte, ptl); } up_read(&current->mm->mmap_sem); out: return n; } unsigned long __clear_user(void __user *addr, unsigned long n) { /* See rational for this in __copy_to_user() above. */ if (n < 64) return __clear_user_std(addr, n); return __clear_user_memset(addr, n); } #if 0 /* * This code is disabled by default, but kept around in case the chosen * thresholds need to be revalidated. Some overhead (small but still) * would be implied by a runtime determined variable threshold, and * so far the measurement on concerned targets didn't show a worthwhile * variation. * * Note that a fairly precise sched_clock() implementation is needed * for results to make some sense. */ #include <linux/vmalloc.h> static int __init test_size_treshold(void) { struct page *src_page, *dst_page; void *user_ptr, *kernel_ptr; unsigned long long t0, t1, t2; int size, ret; ret = -ENOMEM; src_page = alloc_page(GFP_KERNEL); if (!src_page) goto no_src; dst_page = alloc_page(GFP_KERNEL); if (!dst_page) goto no_dst; kernel_ptr = page_address(src_page); user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010)); if (!user_ptr) goto no_vmap; /* warm up the src page dcache */ ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE); for (size = PAGE_SIZE; size >= 4; size /= 2) { t0 = sched_clock(); ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size); t1 = sched_clock(); ret |= __copy_to_user_std(user_ptr, kernel_ptr, size); t2 = sched_clock(); printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1); } for (size = PAGE_SIZE; size >= 4; size /= 2) { t0 = sched_clock(); ret |= __clear_user_memset(user_ptr, size); t1 = sched_clock(); ret |= __clear_user_std(user_ptr, size); t2 = sched_clock(); printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1); } if (ret) ret = -EFAULT; vunmap(user_ptr); no_vmap: put_page(dst_page); no_dst: put_page(src_page); no_src: return ret; } subsys_initcall(test_size_treshold); #endif
gpl-2.0
hurrian/kernel_samsung_trelte
drivers/pinctrl/pinctrl-imx28.c
2918
11279
/* * Copyright 2012 Freescale Semiconductor, Inc. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/init.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> #include "pinctrl-mxs.h" enum imx28_pin_enum { GPMI_D00 = PINID(0, 0), GPMI_D01 = PINID(0, 1), GPMI_D02 = PINID(0, 2), GPMI_D03 = PINID(0, 3), GPMI_D04 = PINID(0, 4), GPMI_D05 = PINID(0, 5), GPMI_D06 = PINID(0, 6), GPMI_D07 = PINID(0, 7), GPMI_CE0N = PINID(0, 16), GPMI_CE1N = PINID(0, 17), GPMI_CE2N = PINID(0, 18), GPMI_CE3N = PINID(0, 19), GPMI_RDY0 = PINID(0, 20), GPMI_RDY1 = PINID(0, 21), GPMI_RDY2 = PINID(0, 22), GPMI_RDY3 = PINID(0, 23), GPMI_RDN = PINID(0, 24), GPMI_WRN = PINID(0, 25), GPMI_ALE = PINID(0, 26), GPMI_CLE = PINID(0, 27), GPMI_RESETN = PINID(0, 28), LCD_D00 = PINID(1, 0), LCD_D01 = PINID(1, 1), LCD_D02 = PINID(1, 2), LCD_D03 = PINID(1, 3), LCD_D04 = PINID(1, 4), LCD_D05 = PINID(1, 5), LCD_D06 = PINID(1, 6), LCD_D07 = PINID(1, 7), LCD_D08 = PINID(1, 8), LCD_D09 = PINID(1, 9), LCD_D10 = PINID(1, 10), LCD_D11 = PINID(1, 11), LCD_D12 = PINID(1, 12), LCD_D13 = PINID(1, 13), LCD_D14 = PINID(1, 14), LCD_D15 = PINID(1, 15), LCD_D16 = PINID(1, 16), LCD_D17 = PINID(1, 17), LCD_D18 = PINID(1, 18), LCD_D19 = PINID(1, 19), LCD_D20 = PINID(1, 20), LCD_D21 = PINID(1, 21), LCD_D22 = PINID(1, 22), LCD_D23 = PINID(1, 23), LCD_RD_E = PINID(1, 24), LCD_WR_RWN = PINID(1, 25), LCD_RS = PINID(1, 26), LCD_CS = PINID(1, 27), LCD_VSYNC = PINID(1, 28), LCD_HSYNC = PINID(1, 29), LCD_DOTCLK = PINID(1, 30), LCD_ENABLE = PINID(1, 31), SSP0_DATA0 = PINID(2, 0), SSP0_DATA1 = PINID(2, 1), SSP0_DATA2 = PINID(2, 2), SSP0_DATA3 = PINID(2, 3), SSP0_DATA4 = PINID(2, 4), SSP0_DATA5 = PINID(2, 5), SSP0_DATA6 = PINID(2, 6), SSP0_DATA7 = PINID(2, 7), SSP0_CMD = PINID(2, 8), SSP0_DETECT = PINID(2, 9), SSP0_SCK = PINID(2, 10), SSP1_SCK = PINID(2, 12), SSP1_CMD = PINID(2, 13), SSP1_DATA0 = PINID(2, 14), SSP1_DATA3 = PINID(2, 15), SSP2_SCK = PINID(2, 16), SSP2_MOSI = PINID(2, 17), SSP2_MISO = PINID(2, 18), SSP2_SS0 = PINID(2, 19), SSP2_SS1 = PINID(2, 20), SSP2_SS2 = PINID(2, 21), SSP3_SCK = PINID(2, 24), SSP3_MOSI = PINID(2, 25), SSP3_MISO = PINID(2, 26), SSP3_SS0 = PINID(2, 27), AUART0_RX = PINID(3, 0), AUART0_TX = PINID(3, 1), AUART0_CTS = PINID(3, 2), AUART0_RTS = PINID(3, 3), AUART1_RX = PINID(3, 4), AUART1_TX = PINID(3, 5), AUART1_CTS = PINID(3, 6), AUART1_RTS = PINID(3, 7), AUART2_RX = PINID(3, 8), AUART2_TX = PINID(3, 9), AUART2_CTS = PINID(3, 10), AUART2_RTS = PINID(3, 11), AUART3_RX = PINID(3, 12), AUART3_TX = PINID(3, 13), AUART3_CTS = PINID(3, 14), AUART3_RTS = PINID(3, 15), PWM0 = PINID(3, 16), PWM1 = PINID(3, 17), PWM2 = PINID(3, 18), SAIF0_MCLK = PINID(3, 20), SAIF0_LRCLK = PINID(3, 21), SAIF0_BITCLK = PINID(3, 22), SAIF0_SDATA0 = PINID(3, 23), I2C0_SCL = PINID(3, 24), I2C0_SDA = PINID(3, 25), SAIF1_SDATA0 = PINID(3, 26), SPDIF = PINID(3, 27), PWM3 = PINID(3, 28), PWM4 = PINID(3, 29), LCD_RESET = PINID(3, 30), ENET0_MDC = PINID(4, 0), ENET0_MDIO = PINID(4, 1), ENET0_RX_EN = PINID(4, 2), ENET0_RXD0 = PINID(4, 3), ENET0_RXD1 = PINID(4, 4), ENET0_TX_CLK = PINID(4, 5), ENET0_TX_EN = PINID(4, 6), ENET0_TXD0 = PINID(4, 7), ENET0_TXD1 = PINID(4, 8), ENET0_RXD2 = PINID(4, 9), ENET0_RXD3 = PINID(4, 10), ENET0_TXD2 = PINID(4, 11), ENET0_TXD3 = PINID(4, 12), ENET0_RX_CLK = PINID(4, 13), ENET0_COL = PINID(4, 14), ENET0_CRS = PINID(4, 15), ENET_CLK = PINID(4, 16), JTAG_RTCK = PINID(4, 20), EMI_D00 = PINID(5, 0), EMI_D01 = PINID(5, 1), EMI_D02 = PINID(5, 2), EMI_D03 = PINID(5, 3), EMI_D04 = PINID(5, 4), EMI_D05 = PINID(5, 5), EMI_D06 = PINID(5, 6), EMI_D07 = PINID(5, 7), EMI_D08 = PINID(5, 8), EMI_D09 = PINID(5, 9), EMI_D10 = PINID(5, 10), EMI_D11 = PINID(5, 11), EMI_D12 = PINID(5, 12), EMI_D13 = PINID(5, 13), EMI_D14 = PINID(5, 14), EMI_D15 = PINID(5, 15), EMI_ODT0 = PINID(5, 16), EMI_DQM0 = PINID(5, 17), EMI_ODT1 = PINID(5, 18), EMI_DQM1 = PINID(5, 19), EMI_DDR_OPEN_FB = PINID(5, 20), EMI_CLK = PINID(5, 21), EMI_DQS0 = PINID(5, 22), EMI_DQS1 = PINID(5, 23), EMI_DDR_OPEN = PINID(5, 26), EMI_A00 = PINID(6, 0), EMI_A01 = PINID(6, 1), EMI_A02 = PINID(6, 2), EMI_A03 = PINID(6, 3), EMI_A04 = PINID(6, 4), EMI_A05 = PINID(6, 5), EMI_A06 = PINID(6, 6), EMI_A07 = PINID(6, 7), EMI_A08 = PINID(6, 8), EMI_A09 = PINID(6, 9), EMI_A10 = PINID(6, 10), EMI_A11 = PINID(6, 11), EMI_A12 = PINID(6, 12), EMI_A13 = PINID(6, 13), EMI_A14 = PINID(6, 14), EMI_BA0 = PINID(6, 16), EMI_BA1 = PINID(6, 17), EMI_BA2 = PINID(6, 18), EMI_CASN = PINID(6, 19), EMI_RASN = PINID(6, 20), EMI_WEN = PINID(6, 21), EMI_CE0N = PINID(6, 22), EMI_CE1N = PINID(6, 23), EMI_CKE = PINID(6, 24), }; static const struct pinctrl_pin_desc imx28_pins[] = { MXS_PINCTRL_PIN(GPMI_D00), MXS_PINCTRL_PIN(GPMI_D01), MXS_PINCTRL_PIN(GPMI_D02), MXS_PINCTRL_PIN(GPMI_D03), MXS_PINCTRL_PIN(GPMI_D04), MXS_PINCTRL_PIN(GPMI_D05), MXS_PINCTRL_PIN(GPMI_D06), MXS_PINCTRL_PIN(GPMI_D07), MXS_PINCTRL_PIN(GPMI_CE0N), MXS_PINCTRL_PIN(GPMI_CE1N), MXS_PINCTRL_PIN(GPMI_CE2N), MXS_PINCTRL_PIN(GPMI_CE3N), MXS_PINCTRL_PIN(GPMI_RDY0), MXS_PINCTRL_PIN(GPMI_RDY1), MXS_PINCTRL_PIN(GPMI_RDY2), MXS_PINCTRL_PIN(GPMI_RDY3), MXS_PINCTRL_PIN(GPMI_RDN), MXS_PINCTRL_PIN(GPMI_WRN), MXS_PINCTRL_PIN(GPMI_ALE), MXS_PINCTRL_PIN(GPMI_CLE), MXS_PINCTRL_PIN(GPMI_RESETN), MXS_PINCTRL_PIN(LCD_D00), MXS_PINCTRL_PIN(LCD_D01), MXS_PINCTRL_PIN(LCD_D02), MXS_PINCTRL_PIN(LCD_D03), MXS_PINCTRL_PIN(LCD_D04), MXS_PINCTRL_PIN(LCD_D05), MXS_PINCTRL_PIN(LCD_D06), MXS_PINCTRL_PIN(LCD_D07), MXS_PINCTRL_PIN(LCD_D08), MXS_PINCTRL_PIN(LCD_D09), MXS_PINCTRL_PIN(LCD_D10), MXS_PINCTRL_PIN(LCD_D11), MXS_PINCTRL_PIN(LCD_D12), MXS_PINCTRL_PIN(LCD_D13), MXS_PINCTRL_PIN(LCD_D14), MXS_PINCTRL_PIN(LCD_D15), MXS_PINCTRL_PIN(LCD_D16), MXS_PINCTRL_PIN(LCD_D17), MXS_PINCTRL_PIN(LCD_D18), MXS_PINCTRL_PIN(LCD_D19), MXS_PINCTRL_PIN(LCD_D20), MXS_PINCTRL_PIN(LCD_D21), MXS_PINCTRL_PIN(LCD_D22), MXS_PINCTRL_PIN(LCD_D23), MXS_PINCTRL_PIN(LCD_RD_E), MXS_PINCTRL_PIN(LCD_WR_RWN), MXS_PINCTRL_PIN(LCD_RS), MXS_PINCTRL_PIN(LCD_CS), MXS_PINCTRL_PIN(LCD_VSYNC), MXS_PINCTRL_PIN(LCD_HSYNC), MXS_PINCTRL_PIN(LCD_DOTCLK), MXS_PINCTRL_PIN(LCD_ENABLE), MXS_PINCTRL_PIN(SSP0_DATA0), MXS_PINCTRL_PIN(SSP0_DATA1), MXS_PINCTRL_PIN(SSP0_DATA2), MXS_PINCTRL_PIN(SSP0_DATA3), MXS_PINCTRL_PIN(SSP0_DATA4), MXS_PINCTRL_PIN(SSP0_DATA5), MXS_PINCTRL_PIN(SSP0_DATA6), MXS_PINCTRL_PIN(SSP0_DATA7), MXS_PINCTRL_PIN(SSP0_CMD), MXS_PINCTRL_PIN(SSP0_DETECT), MXS_PINCTRL_PIN(SSP0_SCK), MXS_PINCTRL_PIN(SSP1_SCK), MXS_PINCTRL_PIN(SSP1_CMD), MXS_PINCTRL_PIN(SSP1_DATA0), MXS_PINCTRL_PIN(SSP1_DATA3), MXS_PINCTRL_PIN(SSP2_SCK), MXS_PINCTRL_PIN(SSP2_MOSI), MXS_PINCTRL_PIN(SSP2_MISO), MXS_PINCTRL_PIN(SSP2_SS0), MXS_PINCTRL_PIN(SSP2_SS1), MXS_PINCTRL_PIN(SSP2_SS2), MXS_PINCTRL_PIN(SSP3_SCK), MXS_PINCTRL_PIN(SSP3_MOSI), MXS_PINCTRL_PIN(SSP3_MISO), MXS_PINCTRL_PIN(SSP3_SS0), MXS_PINCTRL_PIN(AUART0_RX), MXS_PINCTRL_PIN(AUART0_TX), MXS_PINCTRL_PIN(AUART0_CTS), MXS_PINCTRL_PIN(AUART0_RTS), MXS_PINCTRL_PIN(AUART1_RX), MXS_PINCTRL_PIN(AUART1_TX), MXS_PINCTRL_PIN(AUART1_CTS), MXS_PINCTRL_PIN(AUART1_RTS), MXS_PINCTRL_PIN(AUART2_RX), MXS_PINCTRL_PIN(AUART2_TX), MXS_PINCTRL_PIN(AUART2_CTS), MXS_PINCTRL_PIN(AUART2_RTS), MXS_PINCTRL_PIN(AUART3_RX), MXS_PINCTRL_PIN(AUART3_TX), MXS_PINCTRL_PIN(AUART3_CTS), MXS_PINCTRL_PIN(AUART3_RTS), MXS_PINCTRL_PIN(PWM0), MXS_PINCTRL_PIN(PWM1), MXS_PINCTRL_PIN(PWM2), MXS_PINCTRL_PIN(SAIF0_MCLK), MXS_PINCTRL_PIN(SAIF0_LRCLK), MXS_PINCTRL_PIN(SAIF0_BITCLK), MXS_PINCTRL_PIN(SAIF0_SDATA0), MXS_PINCTRL_PIN(I2C0_SCL), MXS_PINCTRL_PIN(I2C0_SDA), MXS_PINCTRL_PIN(SAIF1_SDATA0), MXS_PINCTRL_PIN(SPDIF), MXS_PINCTRL_PIN(PWM3), MXS_PINCTRL_PIN(PWM4), MXS_PINCTRL_PIN(LCD_RESET), MXS_PINCTRL_PIN(ENET0_MDC), MXS_PINCTRL_PIN(ENET0_MDIO), MXS_PINCTRL_PIN(ENET0_RX_EN), MXS_PINCTRL_PIN(ENET0_RXD0), MXS_PINCTRL_PIN(ENET0_RXD1), MXS_PINCTRL_PIN(ENET0_TX_CLK), MXS_PINCTRL_PIN(ENET0_TX_EN), MXS_PINCTRL_PIN(ENET0_TXD0), MXS_PINCTRL_PIN(ENET0_TXD1), MXS_PINCTRL_PIN(ENET0_RXD2), MXS_PINCTRL_PIN(ENET0_RXD3), MXS_PINCTRL_PIN(ENET0_TXD2), MXS_PINCTRL_PIN(ENET0_TXD3), MXS_PINCTRL_PIN(ENET0_RX_CLK), MXS_PINCTRL_PIN(ENET0_COL), MXS_PINCTRL_PIN(ENET0_CRS), MXS_PINCTRL_PIN(ENET_CLK), MXS_PINCTRL_PIN(JTAG_RTCK), MXS_PINCTRL_PIN(EMI_D00), MXS_PINCTRL_PIN(EMI_D01), MXS_PINCTRL_PIN(EMI_D02), MXS_PINCTRL_PIN(EMI_D03), MXS_PINCTRL_PIN(EMI_D04), MXS_PINCTRL_PIN(EMI_D05), MXS_PINCTRL_PIN(EMI_D06), MXS_PINCTRL_PIN(EMI_D07), MXS_PINCTRL_PIN(EMI_D08), MXS_PINCTRL_PIN(EMI_D09), MXS_PINCTRL_PIN(EMI_D10), MXS_PINCTRL_PIN(EMI_D11), MXS_PINCTRL_PIN(EMI_D12), MXS_PINCTRL_PIN(EMI_D13), MXS_PINCTRL_PIN(EMI_D14), MXS_PINCTRL_PIN(EMI_D15), MXS_PINCTRL_PIN(EMI_ODT0), MXS_PINCTRL_PIN(EMI_DQM0), MXS_PINCTRL_PIN(EMI_ODT1), MXS_PINCTRL_PIN(EMI_DQM1), MXS_PINCTRL_PIN(EMI_DDR_OPEN_FB), MXS_PINCTRL_PIN(EMI_CLK), MXS_PINCTRL_PIN(EMI_DQS0), MXS_PINCTRL_PIN(EMI_DQS1), MXS_PINCTRL_PIN(EMI_DDR_OPEN), MXS_PINCTRL_PIN(EMI_A00), MXS_PINCTRL_PIN(EMI_A01), MXS_PINCTRL_PIN(EMI_A02), MXS_PINCTRL_PIN(EMI_A03), MXS_PINCTRL_PIN(EMI_A04), MXS_PINCTRL_PIN(EMI_A05), MXS_PINCTRL_PIN(EMI_A06), MXS_PINCTRL_PIN(EMI_A07), MXS_PINCTRL_PIN(EMI_A08), MXS_PINCTRL_PIN(EMI_A09), MXS_PINCTRL_PIN(EMI_A10), MXS_PINCTRL_PIN(EMI_A11), MXS_PINCTRL_PIN(EMI_A12), MXS_PINCTRL_PIN(EMI_A13), MXS_PINCTRL_PIN(EMI_A14), MXS_PINCTRL_PIN(EMI_BA0), MXS_PINCTRL_PIN(EMI_BA1), MXS_PINCTRL_PIN(EMI_BA2), MXS_PINCTRL_PIN(EMI_CASN), MXS_PINCTRL_PIN(EMI_RASN), MXS_PINCTRL_PIN(EMI_WEN), MXS_PINCTRL_PIN(EMI_CE0N), MXS_PINCTRL_PIN(EMI_CE1N), MXS_PINCTRL_PIN(EMI_CKE), }; static struct mxs_regs imx28_regs = { .muxsel = 0x100, .drive = 0x300, .pull = 0x600, }; static struct mxs_pinctrl_soc_data imx28_pinctrl_data = { .regs = &imx28_regs, .pins = imx28_pins, .npins = ARRAY_SIZE(imx28_pins), }; static int imx28_pinctrl_probe(struct platform_device *pdev) { return mxs_pinctrl_probe(pdev, &imx28_pinctrl_data); } static struct of_device_id imx28_pinctrl_of_match[] = { { .compatible = "fsl,imx28-pinctrl", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx28_pinctrl_of_match); static struct platform_driver imx28_pinctrl_driver = { .driver = { .name = "imx28-pinctrl", .owner = THIS_MODULE, .of_match_table = imx28_pinctrl_of_match, }, .probe = imx28_pinctrl_probe, .remove = mxs_pinctrl_remove, }; static int __init imx28_pinctrl_init(void) { return platform_driver_register(&imx28_pinctrl_driver); } postcore_initcall(imx28_pinctrl_init); static void __exit imx28_pinctrl_exit(void) { platform_driver_unregister(&imx28_pinctrl_driver); } module_exit(imx28_pinctrl_exit); MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); MODULE_DESCRIPTION("Freescale i.MX28 pinctrl driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
AKKP/lge-kernel-star
arch/x86/kernel/cpu/mcheck/mce-apei.c
2918
4303
/* * Bridge between MCE and APEI * * On some machine, corrected memory errors are reported via APEI * generic hardware error source (GHES) instead of corrected Machine * Check. These corrected memory errors can be reported to user space * through /dev/mcelog via faking a corrected Machine Check, so that * the error memory page can be offlined by /sbin/mcelog if the error * count for one page is beyond the threshold. * * For fatal MCE, save MCE record into persistent storage via ERST, so * that the MCE record can be logged after reboot via ERST. * * Copyright 2010 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/cper.h> #include <acpi/apei.h> #include <asm/mce.h> #include "mce-internal.h" void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err) { struct mce m; /* Only corrected MC is reported */ if (!corrected) return; mce_setup(&m); m.bank = 1; /* Fake a memory read corrected error with unknown channel */ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f; m.addr = mem_err->physical_addr; mce_log(&m); mce_notify_irq(); } EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); #define CPER_CREATOR_MCE \ UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ 0x64, 0x90, 0xb8, 0x9d) #define CPER_SECTION_TYPE_MCE \ UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ 0x04, 0x4a, 0x38, 0xfc) /* * CPER specification (in UEFI specification 2.3 appendix N) requires * byte-packed. */ struct cper_mce_record { struct cper_record_header hdr; struct cper_section_descriptor sec_hdr; struct mce mce; } __packed; int apei_write_mce(struct mce *m) { struct cper_mce_record rcd; memset(&rcd, 0, sizeof(rcd)); memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE); rcd.hdr.revision = CPER_RECORD_REV; rcd.hdr.signature_end = CPER_SIG_END; rcd.hdr.section_count = 1; rcd.hdr.error_severity = CPER_SEV_FATAL; /* timestamp, platform_id, partition_id are all invalid */ rcd.hdr.validation_bits = 0; rcd.hdr.record_length = sizeof(rcd); rcd.hdr.creator_id = CPER_CREATOR_MCE; rcd.hdr.notification_type = CPER_NOTIFY_MCE; rcd.hdr.record_id = cper_next_record_id(); rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR; rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd; rcd.sec_hdr.section_length = sizeof(rcd.mce); rcd.sec_hdr.revision = CPER_SEC_REV; /* fru_id and fru_text is invalid */ rcd.sec_hdr.validation_bits = 0; rcd.sec_hdr.flags = CPER_SEC_PRIMARY; rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE; rcd.sec_hdr.section_severity = CPER_SEV_FATAL; memcpy(&rcd.mce, m, sizeof(*m)); return erst_write(&rcd.hdr); } ssize_t apei_read_mce(struct mce *m, u64 *record_id) { struct cper_mce_record rcd; int rc, pos; rc = erst_get_record_id_begin(&pos); if (rc) return rc; retry: rc = erst_get_record_id_next(&pos, record_id); if (rc) goto out; /* no more record */ if (*record_id == APEI_ERST_INVALID_RECORD_ID) goto out; rc = erst_read(*record_id, &rcd.hdr, sizeof(rcd)); /* someone else has cleared the record, try next one */ if (rc == -ENOENT) goto retry; else if (rc < 0) goto out; /* try to skip other type records in storage */ else if (rc != sizeof(rcd) || uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) goto retry; memcpy(m, &rcd.mce, sizeof(*m)); rc = sizeof(*m); out: erst_get_record_id_end(); return rc; } /* Check whether there is record in ERST */ int apei_check_mce(void) { return erst_get_record_count(); } int apei_clear_mce(u64 record_id) { return erst_clear(record_id); }
gpl-2.0
knone1/android_kernel_asus_moorefield
drivers/net/sungem_phy.c
2918
29308
/* * PHY drivers for the sungem ethernet driver. * * This file could be shared with other drivers. * * (c) 2002-2007, Benjamin Herrenscmidt (benh@kernel.crashing.org) * * TODO: * - Add support for PHYs that provide an IRQ line * - Eventually moved the entire polling state machine in * there (out of the eth driver), so that it can easily be * skipped on PHYs that implement it in hardware. * - On LXT971 & BCM5201, Apple uses some chip specific regs * to read the link status. Figure out why and if it makes * sense to do the same (magic aneg ?) * - Apple has some additional power management code for some * Broadcom PHYs that they "hide" from the OpenSource version * of darwin, still need to reverse engineer that */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/delay.h> #ifdef CONFIG_PPC_PMAC #include <asm/prom.h> #endif #include <linux/sungem_phy.h> /* Link modes of the BCM5400 PHY */ static const int phy_BCM5400_link_table[8][3] = { { 0, 0, 0 }, /* No link */ { 0, 0, 0 }, /* 10BT Half Duplex */ { 1, 0, 0 }, /* 10BT Full Duplex */ { 0, 1, 0 }, /* 100BT Half Duplex */ { 0, 1, 0 }, /* 100BT Half Duplex */ { 1, 1, 0 }, /* 100BT Full Duplex*/ { 1, 0, 1 }, /* 1000BT */ { 1, 0, 1 }, /* 1000BT */ }; static inline int __phy_read(struct mii_phy* phy, int id, int reg) { return phy->mdio_read(phy->dev, id, reg); } static inline void __phy_write(struct mii_phy* phy, int id, int reg, int val) { phy->mdio_write(phy->dev, id, reg, val); } static inline int phy_read(struct mii_phy* phy, int reg) { return phy->mdio_read(phy->dev, phy->mii_id, reg); } static inline void phy_write(struct mii_phy* phy, int reg, int val) { phy->mdio_write(phy->dev, phy->mii_id, reg, val); } static int reset_one_mii_phy(struct mii_phy* phy, int phy_id) { u16 val; int limit = 10000; val = __phy_read(phy, phy_id, MII_BMCR); val &= ~(BMCR_ISOLATE | BMCR_PDOWN); val |= BMCR_RESET; __phy_write(phy, phy_id, MII_BMCR, val); udelay(100); while (--limit) { val = __phy_read(phy, phy_id, MII_BMCR); if ((val & BMCR_RESET) == 0) break; udelay(10); } if ((val & BMCR_ISOLATE) && limit > 0) __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); return limit <= 0; } static int bcm5201_init(struct mii_phy* phy) { u16 data; data = phy_read(phy, MII_BCM5201_MULTIPHY); data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE; phy_write(phy, MII_BCM5201_MULTIPHY, data); phy_write(phy, MII_BCM5201_INTERRUPT, 0); return 0; } static int bcm5201_suspend(struct mii_phy* phy) { phy_write(phy, MII_BCM5201_INTERRUPT, 0); phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE); return 0; } static int bcm5221_init(struct mii_phy* phy) { u16 data; data = phy_read(phy, MII_BCM5221_TEST); phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, data | MII_BCM5221_SHDOW_AUX_STAT2_APD); data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR); data = phy_read(phy, MII_BCM5221_TEST); phy_write(phy, MII_BCM5221_TEST, data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); return 0; } static int bcm5221_suspend(struct mii_phy* phy) { u16 data; data = phy_read(phy, MII_BCM5221_TEST); phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE); return 0; } static int bcm5241_init(struct mii_phy* phy) { u16 data; data = phy_read(phy, MII_BCM5221_TEST); phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, data | MII_BCM5221_SHDOW_AUX_STAT2_APD); data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); data = phy_read(phy, MII_BCM5221_TEST); phy_write(phy, MII_BCM5221_TEST, data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); return 0; } static int bcm5241_suspend(struct mii_phy* phy) { u16 data; data = phy_read(phy, MII_BCM5221_TEST); phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); return 0; } static int bcm5400_init(struct mii_phy* phy) { u16 data; /* Configure for gigabit full duplex */ data = phy_read(phy, MII_BCM5400_AUXCONTROL); data |= MII_BCM5400_AUXCONTROL_PWR10BASET; phy_write(phy, MII_BCM5400_AUXCONTROL, data); data = phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(100); /* Reset and configure cascaded 10/100 PHY */ (void)reset_one_mii_phy(phy, 0x1f); data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); data |= MII_BCM5201_MULTIPHY_SERIALMODE; __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); data = phy_read(phy, MII_BCM5400_AUXCONTROL); data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET; phy_write(phy, MII_BCM5400_AUXCONTROL, data); return 0; } static int bcm5400_suspend(struct mii_phy* phy) { #if 0 /* Commented out in Darwin... someone has those dawn docs ? */ phy_write(phy, MII_BMCR, BMCR_PDOWN); #endif return 0; } static int bcm5401_init(struct mii_phy* phy) { u16 data; int rev; rev = phy_read(phy, MII_PHYSID2) & 0x000f; if (rev == 0 || rev == 3) { /* Some revisions of 5401 appear to need this * initialisation sequence to disable, according * to OF, "tap power management" * * WARNING ! OF and Darwin don't agree on the * register addresses. OF seem to interpret the * register numbers below as decimal * * Note: This should (and does) match tg3_init_5401phy_dsp * in the tg3.c driver. -DaveM */ phy_write(phy, 0x18, 0x0c20); phy_write(phy, 0x17, 0x0012); phy_write(phy, 0x15, 0x1804); phy_write(phy, 0x17, 0x0013); phy_write(phy, 0x15, 0x1204); phy_write(phy, 0x17, 0x8006); phy_write(phy, 0x15, 0x0132); phy_write(phy, 0x17, 0x8006); phy_write(phy, 0x15, 0x0232); phy_write(phy, 0x17, 0x201f); phy_write(phy, 0x15, 0x0a20); } /* Configure for gigabit full duplex */ data = phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(10); /* Reset and configure cascaded 10/100 PHY */ (void)reset_one_mii_phy(phy, 0x1f); data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); data |= MII_BCM5201_MULTIPHY_SERIALMODE; __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); return 0; } static int bcm5401_suspend(struct mii_phy* phy) { #if 0 /* Commented out in Darwin... someone has those dawn docs ? */ phy_write(phy, MII_BMCR, BMCR_PDOWN); #endif return 0; } static int bcm5411_init(struct mii_phy* phy) { u16 data; /* Here's some more Apple black magic to setup * some voltage stuffs. */ phy_write(phy, 0x1c, 0x8c23); phy_write(phy, 0x1c, 0x8ca3); phy_write(phy, 0x1c, 0x8c23); /* Here, Apple seems to want to reset it, do * it as well */ phy_write(phy, MII_BMCR, BMCR_RESET); phy_write(phy, MII_BMCR, 0x1340); data = phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(10); /* Reset and configure cascaded 10/100 PHY */ (void)reset_one_mii_phy(phy, 0x1f); return 0; } static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) { u16 ctl, adv; phy->autoneg = 1; phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = 0; phy->advertising = advertise; /* Setup standard advertise */ adv = phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; if (advertise & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; if (advertise & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; if (advertise & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; phy_write(phy, MII_ADVERTISE, adv); /* Start/Restart aneg */ ctl = phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); phy_write(phy, MII_BMCR, ctl); return 0; } static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) { u16 ctl; phy->autoneg = 0; phy->speed = speed; phy->duplex = fd; phy->pause = 0; ctl = phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE); /* First reset the PHY */ phy_write(phy, MII_BMCR, ctl | BMCR_RESET); /* Select speed & duplex */ switch(speed) { case SPEED_10: break; case SPEED_100: ctl |= BMCR_SPEED100; break; case SPEED_1000: default: return -EINVAL; } if (fd == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; phy_write(phy, MII_BMCR, ctl); return 0; } static int genmii_poll_link(struct mii_phy *phy) { u16 status; (void)phy_read(phy, MII_BMSR); status = phy_read(phy, MII_BMSR); if ((status & BMSR_LSTATUS) == 0) return 0; if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE)) return 0; return 1; } static int genmii_read_link(struct mii_phy *phy) { u16 lpa; if (phy->autoneg) { lpa = phy_read(phy, MII_LPA); if (lpa & (LPA_10FULL | LPA_100FULL)) phy->duplex = DUPLEX_FULL; else phy->duplex = DUPLEX_HALF; if (lpa & (LPA_100FULL | LPA_100HALF)) phy->speed = SPEED_100; else phy->speed = SPEED_10; phy->pause = 0; } /* On non-aneg, we assume what we put in BMCR is the speed, * though magic-aneg shouldn't prevent this case from occurring */ return 0; } static int generic_suspend(struct mii_phy* phy) { phy_write(phy, MII_BMCR, BMCR_PDOWN); return 0; } static int bcm5421_init(struct mii_phy* phy) { u16 data; unsigned int id; id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); /* Revision 0 of 5421 needs some fixups */ if (id == 0x002060e0) { /* This is borrowed from MacOS */ phy_write(phy, 0x18, 0x1007); data = phy_read(phy, 0x18); phy_write(phy, 0x18, data | 0x0400); phy_write(phy, 0x18, 0x0007); data = phy_read(phy, 0x18); phy_write(phy, 0x18, data | 0x0800); phy_write(phy, 0x17, 0x000a); data = phy_read(phy, 0x15); phy_write(phy, 0x15, data | 0x0200); } /* Pick up some init code from OF for K2 version */ if ((id & 0xfffffff0) == 0x002062e0) { phy_write(phy, 4, 0x01e1); phy_write(phy, 9, 0x0300); } /* Check if we can enable automatic low power */ #ifdef CONFIG_PPC_PMAC if (phy->platform_data) { struct device_node *np = of_get_parent(phy->platform_data); int can_low_power = 1; if (np == NULL || of_get_property(np, "no-autolowpower", NULL)) can_low_power = 0; if (can_low_power) { /* Enable automatic low-power */ phy_write(phy, 0x1c, 0x9002); phy_write(phy, 0x1c, 0xa821); phy_write(phy, 0x1c, 0x941d); } } #endif /* CONFIG_PPC_PMAC */ return 0; } static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) { u16 ctl, adv; phy->autoneg = 1; phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = 0; phy->advertising = advertise; /* Setup standard advertise */ adv = phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; if (advertise & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; if (advertise & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; if (advertise & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (advertise & ADVERTISED_Pause) adv |= ADVERTISE_PAUSE_CAP; if (advertise & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; phy_write(phy, MII_ADVERTISE, adv); /* Setup 1000BT advertise */ adv = phy_read(phy, MII_1000BASETCONTROL); adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP); if (advertise & SUPPORTED_1000baseT_Half) adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; if (advertise & SUPPORTED_1000baseT_Full) adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; phy_write(phy, MII_1000BASETCONTROL, adv); /* Start/Restart aneg */ ctl = phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); phy_write(phy, MII_BMCR, ctl); return 0; } static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd) { u16 ctl; phy->autoneg = 0; phy->speed = speed; phy->duplex = fd; phy->pause = 0; ctl = phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); /* First reset the PHY */ phy_write(phy, MII_BMCR, ctl | BMCR_RESET); /* Select speed & duplex */ switch(speed) { case SPEED_10: break; case SPEED_100: ctl |= BMCR_SPEED100; break; case SPEED_1000: ctl |= BMCR_SPD2; } if (fd == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; // XXX Should we set the sungem to GII now on 1000BT ? phy_write(phy, MII_BMCR, ctl); return 0; } static int bcm54xx_read_link(struct mii_phy *phy) { int link_mode; u16 val; if (phy->autoneg) { val = phy_read(phy, MII_BCM5400_AUXSTATUS); link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >> MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT); phy->duplex = phy_BCM5400_link_table[link_mode][0] ? DUPLEX_FULL : DUPLEX_HALF; phy->speed = phy_BCM5400_link_table[link_mode][2] ? SPEED_1000 : (phy_BCM5400_link_table[link_mode][1] ? SPEED_100 : SPEED_10); val = phy_read(phy, MII_LPA); phy->pause = (phy->duplex == DUPLEX_FULL) && ((val & LPA_PAUSE) != 0); } /* On non-aneg, we assume what we put in BMCR is the speed, * though magic-aneg shouldn't prevent this case from occurring */ return 0; } static int marvell88e1111_init(struct mii_phy* phy) { u16 rev; /* magic init sequence for rev 0 */ rev = phy_read(phy, MII_PHYSID2) & 0x000f; if (rev == 0) { phy_write(phy, 0x1d, 0x000a); phy_write(phy, 0x1e, 0x0821); phy_write(phy, 0x1d, 0x0006); phy_write(phy, 0x1e, 0x8600); phy_write(phy, 0x1d, 0x000b); phy_write(phy, 0x1e, 0x0100); phy_write(phy, 0x1d, 0x0004); phy_write(phy, 0x1e, 0x4850); } return 0; } #define BCM5421_MODE_MASK (1 << 5) static int bcm5421_poll_link(struct mii_phy* phy) { u32 phy_reg; int mode; /* find out in what mode we are */ phy_write(phy, MII_NCONFIG, 0x1000); phy_reg = phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5421_MODE_MASK) >> 5; if ( mode == BCM54XX_COPPER) return genmii_poll_link(phy); /* try to find out whether we have a link */ phy_write(phy, MII_NCONFIG, 0x2000); phy_reg = phy_read(phy, MII_NCONFIG); if (phy_reg & 0x0020) return 0; else return 1; } static int bcm5421_read_link(struct mii_phy* phy) { u32 phy_reg; int mode; /* find out in what mode we are */ phy_write(phy, MII_NCONFIG, 0x1000); phy_reg = phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5421_MODE_MASK ) >> 5; if ( mode == BCM54XX_COPPER) return bcm54xx_read_link(phy); phy->speed = SPEED_1000; /* find out whether we are running half- or full duplex */ phy_write(phy, MII_NCONFIG, 0x2000); phy_reg = phy_read(phy, MII_NCONFIG); if ( (phy_reg & 0x0080) >> 7) phy->duplex |= DUPLEX_HALF; else phy->duplex |= DUPLEX_FULL; return 0; } static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg) { /* enable fiber mode */ phy_write(phy, MII_NCONFIG, 0x9020); /* LEDs active in both modes, autosense prio = fiber */ phy_write(phy, MII_NCONFIG, 0x945f); if (!autoneg) { /* switch off fibre autoneg */ phy_write(phy, MII_NCONFIG, 0xfc01); phy_write(phy, 0x0b, 0x0004); } phy->autoneg = autoneg; return 0; } #define BCM5461_FIBER_LINK (1 << 2) #define BCM5461_MODE_MASK (3 << 1) static int bcm5461_poll_link(struct mii_phy* phy) { u32 phy_reg; int mode; /* find out in what mode we are */ phy_write(phy, MII_NCONFIG, 0x7c00); phy_reg = phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; if ( mode == BCM54XX_COPPER) return genmii_poll_link(phy); /* find out whether we have a link */ phy_write(phy, MII_NCONFIG, 0x7000); phy_reg = phy_read(phy, MII_NCONFIG); if (phy_reg & BCM5461_FIBER_LINK) return 1; else return 0; } #define BCM5461_FIBER_DUPLEX (1 << 3) static int bcm5461_read_link(struct mii_phy* phy) { u32 phy_reg; int mode; /* find out in what mode we are */ phy_write(phy, MII_NCONFIG, 0x7c00); phy_reg = phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; if ( mode == BCM54XX_COPPER) { return bcm54xx_read_link(phy); } phy->speed = SPEED_1000; /* find out whether we are running half- or full duplex */ phy_write(phy, MII_NCONFIG, 0x7000); phy_reg = phy_read(phy, MII_NCONFIG); if (phy_reg & BCM5461_FIBER_DUPLEX) phy->duplex |= DUPLEX_FULL; else phy->duplex |= DUPLEX_HALF; return 0; } static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg) { /* select fiber mode, enable 1000 base-X registers */ phy_write(phy, MII_NCONFIG, 0xfc0b); if (autoneg) { /* enable fiber with no autonegotiation */ phy_write(phy, MII_ADVERTISE, 0x01e0); phy_write(phy, MII_BMCR, 0x1140); } else { /* enable fiber with autonegotiation */ phy_write(phy, MII_BMCR, 0x0140); } phy->autoneg = autoneg; return 0; } static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) { u16 ctl, adv; phy->autoneg = 1; phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = 0; phy->advertising = advertise; /* Setup standard advertise */ adv = phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; if (advertise & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; if (advertise & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; if (advertise & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (advertise & ADVERTISED_Pause) adv |= ADVERTISE_PAUSE_CAP; if (advertise & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; phy_write(phy, MII_ADVERTISE, adv); /* Setup 1000BT advertise & enable crossover detect * XXX How do we advertise 1000BT ? Darwin source is * confusing here, they read from specific control and * write to control... Someone has specs for those * beasts ? */ adv = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX; adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP | MII_1000BASETCONTROL_HALFDUPLEXCAP); if (advertise & SUPPORTED_1000baseT_Half) adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; if (advertise & SUPPORTED_1000baseT_Full) adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; phy_write(phy, MII_1000BASETCONTROL, adv); /* Start/Restart aneg */ ctl = phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); phy_write(phy, MII_BMCR, ctl); return 0; } static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd) { u16 ctl, ctl2; phy->autoneg = 0; phy->speed = speed; phy->duplex = fd; phy->pause = 0; ctl = phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); ctl |= BMCR_RESET; /* Select speed & duplex */ switch(speed) { case SPEED_10: break; case SPEED_100: ctl |= BMCR_SPEED100; break; /* I'm not sure about the one below, again, Darwin source is * quite confusing and I lack chip specs */ case SPEED_1000: ctl |= BMCR_SPD2; } if (fd == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; /* Disable crossover. Again, the way Apple does it is strange, * though I don't assume they are wrong ;) */ ctl2 = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX | MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX | MII_1000BASETCONTROL_FULLDUPLEXCAP | MII_1000BASETCONTROL_HALFDUPLEXCAP); if (speed == SPEED_1000) ctl2 |= (fd == DUPLEX_FULL) ? MII_1000BASETCONTROL_FULLDUPLEXCAP : MII_1000BASETCONTROL_HALFDUPLEXCAP; phy_write(phy, MII_1000BASETCONTROL, ctl2); // XXX Should we set the sungem to GII now on 1000BT ? phy_write(phy, MII_BMCR, ctl); return 0; } static int marvell_read_link(struct mii_phy *phy) { u16 status, pmask; if (phy->autoneg) { status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS); if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0) return -EAGAIN; if (status & MII_M1011_PHY_SPEC_STATUS_1000) phy->speed = SPEED_1000; else if (status & MII_M1011_PHY_SPEC_STATUS_100) phy->speed = SPEED_100; else phy->speed = SPEED_10; if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX) phy->duplex = DUPLEX_FULL; else phy->duplex = DUPLEX_HALF; pmask = MII_M1011_PHY_SPEC_STATUS_TX_PAUSE | MII_M1011_PHY_SPEC_STATUS_RX_PAUSE; phy->pause = (status & pmask) == pmask; } /* On non-aneg, we assume what we put in BMCR is the speed, * though magic-aneg shouldn't prevent this case from occurring */ return 0; } #define MII_BASIC_FEATURES \ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | \ SUPPORTED_Pause) /* On gigabit capable PHYs, we advertise Pause support but not asym pause * support for now as I'm not sure it's supported and Darwin doesn't do * it neither. --BenH. */ #define MII_GBIT_FEATURES \ (MII_BASIC_FEATURES | \ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) /* Broadcom BCM 5201 */ static struct mii_phy_ops bcm5201_phy_ops = { .init = bcm5201_init, .suspend = bcm5201_suspend, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link, }; static struct mii_phy_def bcm5201_phy_def = { .phy_id = 0x00406210, .phy_id_mask = 0xfffffff0, .name = "BCM5201", .features = MII_BASIC_FEATURES, .magic_aneg = 1, .ops = &bcm5201_phy_ops }; /* Broadcom BCM 5221 */ static struct mii_phy_ops bcm5221_phy_ops = { .suspend = bcm5221_suspend, .init = bcm5221_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link, }; static struct mii_phy_def bcm5221_phy_def = { .phy_id = 0x004061e0, .phy_id_mask = 0xfffffff0, .name = "BCM5221", .features = MII_BASIC_FEATURES, .magic_aneg = 1, .ops = &bcm5221_phy_ops }; /* Broadcom BCM 5241 */ static struct mii_phy_ops bcm5241_phy_ops = { .suspend = bcm5241_suspend, .init = bcm5241_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link, }; static struct mii_phy_def bcm5241_phy_def = { .phy_id = 0x0143bc30, .phy_id_mask = 0xfffffff0, .name = "BCM5241", .features = MII_BASIC_FEATURES, .magic_aneg = 1, .ops = &bcm5241_phy_ops }; /* Broadcom BCM 5400 */ static struct mii_phy_ops bcm5400_phy_ops = { .init = bcm5400_init, .suspend = bcm5400_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = genmii_poll_link, .read_link = bcm54xx_read_link, }; static struct mii_phy_def bcm5400_phy_def = { .phy_id = 0x00206040, .phy_id_mask = 0xfffffff0, .name = "BCM5400", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5400_phy_ops }; /* Broadcom BCM 5401 */ static struct mii_phy_ops bcm5401_phy_ops = { .init = bcm5401_init, .suspend = bcm5401_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = genmii_poll_link, .read_link = bcm54xx_read_link, }; static struct mii_phy_def bcm5401_phy_def = { .phy_id = 0x00206050, .phy_id_mask = 0xfffffff0, .name = "BCM5401", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5401_phy_ops }; /* Broadcom BCM 5411 */ static struct mii_phy_ops bcm5411_phy_ops = { .init = bcm5411_init, .suspend = generic_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = genmii_poll_link, .read_link = bcm54xx_read_link, }; static struct mii_phy_def bcm5411_phy_def = { .phy_id = 0x00206070, .phy_id_mask = 0xfffffff0, .name = "BCM5411", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5411_phy_ops }; /* Broadcom BCM 5421 */ static struct mii_phy_ops bcm5421_phy_ops = { .init = bcm5421_init, .suspend = generic_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = bcm5421_poll_link, .read_link = bcm5421_read_link, .enable_fiber = bcm5421_enable_fiber, }; static struct mii_phy_def bcm5421_phy_def = { .phy_id = 0x002060e0, .phy_id_mask = 0xfffffff0, .name = "BCM5421", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5421_phy_ops }; /* Broadcom BCM 5421 built-in K2 */ static struct mii_phy_ops bcm5421k2_phy_ops = { .init = bcm5421_init, .suspend = generic_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = genmii_poll_link, .read_link = bcm54xx_read_link, }; static struct mii_phy_def bcm5421k2_phy_def = { .phy_id = 0x002062e0, .phy_id_mask = 0xfffffff0, .name = "BCM5421-K2", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5421k2_phy_ops }; static struct mii_phy_ops bcm5461_phy_ops = { .init = bcm5421_init, .suspend = generic_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = bcm5461_poll_link, .read_link = bcm5461_read_link, .enable_fiber = bcm5461_enable_fiber, }; static struct mii_phy_def bcm5461_phy_def = { .phy_id = 0x002060c0, .phy_id_mask = 0xfffffff0, .name = "BCM5461", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5461_phy_ops }; /* Broadcom BCM 5462 built-in Vesta */ static struct mii_phy_ops bcm5462V_phy_ops = { .init = bcm5421_init, .suspend = generic_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = genmii_poll_link, .read_link = bcm54xx_read_link, }; static struct mii_phy_def bcm5462V_phy_def = { .phy_id = 0x002060d0, .phy_id_mask = 0xfffffff0, .name = "BCM5462-Vesta", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5462V_phy_ops }; /* Marvell 88E1101 amd 88E1111 */ static struct mii_phy_ops marvell88e1101_phy_ops = { .suspend = generic_suspend, .setup_aneg = marvell_setup_aneg, .setup_forced = marvell_setup_forced, .poll_link = genmii_poll_link, .read_link = marvell_read_link }; static struct mii_phy_ops marvell88e1111_phy_ops = { .init = marvell88e1111_init, .suspend = generic_suspend, .setup_aneg = marvell_setup_aneg, .setup_forced = marvell_setup_forced, .poll_link = genmii_poll_link, .read_link = marvell_read_link }; /* two revs in darwin for the 88e1101 ... I could use a datasheet * to get the proper names... */ static struct mii_phy_def marvell88e1101v1_phy_def = { .phy_id = 0x01410c20, .phy_id_mask = 0xfffffff0, .name = "Marvell 88E1101v1", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &marvell88e1101_phy_ops }; static struct mii_phy_def marvell88e1101v2_phy_def = { .phy_id = 0x01410c60, .phy_id_mask = 0xfffffff0, .name = "Marvell 88E1101v2", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &marvell88e1101_phy_ops }; static struct mii_phy_def marvell88e1111_phy_def = { .phy_id = 0x01410cc0, .phy_id_mask = 0xfffffff0, .name = "Marvell 88E1111", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &marvell88e1111_phy_ops }; /* Generic implementation for most 10/100 PHYs */ static struct mii_phy_ops generic_phy_ops = { .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link }; static struct mii_phy_def genmii_phy_def = { .phy_id = 0x00000000, .phy_id_mask = 0x00000000, .name = "Generic MII", .features = MII_BASIC_FEATURES, .magic_aneg = 0, .ops = &generic_phy_ops }; static struct mii_phy_def* mii_phy_table[] = { &bcm5201_phy_def, &bcm5221_phy_def, &bcm5241_phy_def, &bcm5400_phy_def, &bcm5401_phy_def, &bcm5411_phy_def, &bcm5421_phy_def, &bcm5421k2_phy_def, &bcm5461_phy_def, &bcm5462V_phy_def, &marvell88e1101v1_phy_def, &marvell88e1101v2_phy_def, &marvell88e1111_phy_def, &genmii_phy_def, NULL }; int sungem_phy_probe(struct mii_phy *phy, int mii_id) { int rc; u32 id; struct mii_phy_def* def; int i; /* We do not reset the mii_phy structure as the driver * may re-probe the PHY regulary */ phy->mii_id = mii_id; /* Take PHY out of isloate mode and reset it. */ rc = reset_one_mii_phy(phy, mii_id); if (rc) goto fail; /* Read ID and find matching entry */ id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n", id, mii_id); for (i=0; (def = mii_phy_table[i]) != NULL; i++) if ((id & def->phy_id_mask) == def->phy_id) break; /* Should never be NULL (we have a generic entry), but... */ if (def == NULL) goto fail; phy->def = def; return 0; fail: phy->speed = 0; phy->duplex = 0; phy->pause = 0; phy->advertising = 0; return -ENODEV; } EXPORT_SYMBOL(sungem_phy_probe); MODULE_LICENSE("GPL");
gpl-2.0
JianguoWEI/linux-efq-final
drivers/char/sonypi.c
3174
42248
/* * Sony Programmable I/O Control Device driver for VAIO * * Copyright (C) 2007 Mattia Dongili <malattia@linux.it> * * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net> * * Copyright (C) 2005 Narayanan R S <nars@kadamba.org> * * Copyright (C) 2001-2002 Alcôve <www.alcove.com> * * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au> * * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp> * * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp> * * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com> * * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/module.h> #include <linux/sched.h> #include <linux/input.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/poll.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/err.h> #include <linux/kfifo.h> #include <linux/platform_device.h> #include <linux/gfp.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/system.h> #include <linux/sonypi.h> #define SONYPI_DRIVER_VERSION "1.26" MODULE_AUTHOR("Stelian Pop <stelian@popies.net>"); MODULE_DESCRIPTION("Sony Programmable I/O Control Device driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(SONYPI_DRIVER_VERSION); static int minor = -1; module_param(minor, int, 0); MODULE_PARM_DESC(minor, "minor number of the misc device, default is -1 (automatic)"); static int verbose; /* = 0 */ module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose, "be verbose, default is 0 (no)"); static int fnkeyinit; /* = 0 */ module_param(fnkeyinit, int, 0444); MODULE_PARM_DESC(fnkeyinit, "set this if your Fn keys do not generate any event"); static int camera; /* = 0 */ module_param(camera, int, 0444); MODULE_PARM_DESC(camera, "set this if you have a MotionEye camera (PictureBook series)"); static int compat; /* = 0 */ module_param(compat, int, 0444); MODULE_PARM_DESC(compat, "set this if you want to enable backward compatibility mode"); static unsigned long mask = 0xffffffff; module_param(mask, ulong, 0644); MODULE_PARM_DESC(mask, "set this to the mask of event you want to enable (see doc)"); static int useinput = 1; module_param(useinput, int, 0444); MODULE_PARM_DESC(useinput, "set this if you would like sonypi to feed events to the input subsystem"); static int check_ioport = 1; module_param(check_ioport, int, 0444); MODULE_PARM_DESC(check_ioport, "set this to 0 if you think the automatic ioport check for sony-laptop is wrong"); #define SONYPI_DEVICE_MODEL_TYPE1 1 #define SONYPI_DEVICE_MODEL_TYPE2 2 #define SONYPI_DEVICE_MODEL_TYPE3 3 /* type1 models use those */ #define SONYPI_IRQ_PORT 0x8034 #define SONYPI_IRQ_SHIFT 22 #define SONYPI_TYPE1_BASE 0x50 #define SONYPI_G10A (SONYPI_TYPE1_BASE+0x14) #define SONYPI_TYPE1_REGION_SIZE 0x08 #define SONYPI_TYPE1_EVTYPE_OFFSET 0x04 /* type2 series specifics */ #define SONYPI_SIRQ 0x9b #define SONYPI_SLOB 0x9c #define SONYPI_SHIB 0x9d #define SONYPI_TYPE2_REGION_SIZE 0x20 #define SONYPI_TYPE2_EVTYPE_OFFSET 0x12 /* type3 series specifics */ #define SONYPI_TYPE3_BASE 0x40 #define SONYPI_TYPE3_GID2 (SONYPI_TYPE3_BASE+0x48) /* 16 bits */ #define SONYPI_TYPE3_MISC (SONYPI_TYPE3_BASE+0x6d) /* 8 bits */ #define SONYPI_TYPE3_REGION_SIZE 0x20 #define SONYPI_TYPE3_EVTYPE_OFFSET 0x12 /* battery / brightness addresses */ #define SONYPI_BAT_FLAGS 0x81 #define SONYPI_LCD_LIGHT 0x96 #define SONYPI_BAT1_PCTRM 0xa0 #define SONYPI_BAT1_LEFT 0xa2 #define SONYPI_BAT1_MAXRT 0xa4 #define SONYPI_BAT2_PCTRM 0xa8 #define SONYPI_BAT2_LEFT 0xaa #define SONYPI_BAT2_MAXRT 0xac #define SONYPI_BAT1_MAXTK 0xb0 #define SONYPI_BAT1_FULL 0xb2 #define SONYPI_BAT2_MAXTK 0xb8 #define SONYPI_BAT2_FULL 0xba /* FAN0 information (reverse engineered from ACPI tables) */ #define SONYPI_FAN0_STATUS 0x93 #define SONYPI_TEMP_STATUS 0xC1 /* ioports used for brightness and type2 events */ #define SONYPI_DATA_IOPORT 0x62 #define SONYPI_CST_IOPORT 0x66 /* The set of possible ioports */ struct sonypi_ioport_list { u16 port1; u16 port2; }; static struct sonypi_ioport_list sonypi_type1_ioport_list[] = { { 0x10c0, 0x10c4 }, /* looks like the default on C1Vx */ { 0x1080, 0x1084 }, { 0x1090, 0x1094 }, { 0x10a0, 0x10a4 }, { 0x10b0, 0x10b4 }, { 0x0, 0x0 } }; static struct sonypi_ioport_list sonypi_type2_ioport_list[] = { { 0x1080, 0x1084 }, { 0x10a0, 0x10a4 }, { 0x10c0, 0x10c4 }, { 0x10e0, 0x10e4 }, { 0x0, 0x0 } }; /* same as in type 2 models */ static struct sonypi_ioport_list *sonypi_type3_ioport_list = sonypi_type2_ioport_list; /* The set of possible interrupts */ struct sonypi_irq_list { u16 irq; u16 bits; }; static struct sonypi_irq_list sonypi_type1_irq_list[] = { { 11, 0x2 }, /* IRQ 11, GO22=0,GO23=1 in AML */ { 10, 0x1 }, /* IRQ 10, GO22=1,GO23=0 in AML */ { 5, 0x0 }, /* IRQ 5, GO22=0,GO23=0 in AML */ { 0, 0x3 } /* no IRQ, GO22=1,GO23=1 in AML */ }; static struct sonypi_irq_list sonypi_type2_irq_list[] = { { 11, 0x80 }, /* IRQ 11, 0x80 in SIRQ in AML */ { 10, 0x40 }, /* IRQ 10, 0x40 in SIRQ in AML */ { 9, 0x20 }, /* IRQ 9, 0x20 in SIRQ in AML */ { 6, 0x10 }, /* IRQ 6, 0x10 in SIRQ in AML */ { 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */ }; /* same as in type2 models */ static struct sonypi_irq_list *sonypi_type3_irq_list = sonypi_type2_irq_list; #define SONYPI_CAMERA_BRIGHTNESS 0 #define SONYPI_CAMERA_CONTRAST 1 #define SONYPI_CAMERA_HUE 2 #define SONYPI_CAMERA_COLOR 3 #define SONYPI_CAMERA_SHARPNESS 4 #define SONYPI_CAMERA_PICTURE 5 #define SONYPI_CAMERA_EXPOSURE_MASK 0xC #define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3 #define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30 #define SONYPI_CAMERA_MUTE_MASK 0x40 /* the rest don't need a loop until not 0xff */ #define SONYPI_CAMERA_AGC 6 #define SONYPI_CAMERA_AGC_MASK 0x30 #define SONYPI_CAMERA_SHUTTER_MASK 0x7 #define SONYPI_CAMERA_SHUTDOWN_REQUEST 7 #define SONYPI_CAMERA_CONTROL 0x10 #define SONYPI_CAMERA_STATUS 7 #define SONYPI_CAMERA_STATUS_READY 0x2 #define SONYPI_CAMERA_STATUS_POSITION 0x4 #define SONYPI_DIRECTION_BACKWARDS 0x4 #define SONYPI_CAMERA_REVISION 8 #define SONYPI_CAMERA_ROMVERSION 9 /* Event masks */ #define SONYPI_JOGGER_MASK 0x00000001 #define SONYPI_CAPTURE_MASK 0x00000002 #define SONYPI_FNKEY_MASK 0x00000004 #define SONYPI_BLUETOOTH_MASK 0x00000008 #define SONYPI_PKEY_MASK 0x00000010 #define SONYPI_BACK_MASK 0x00000020 #define SONYPI_HELP_MASK 0x00000040 #define SONYPI_LID_MASK 0x00000080 #define SONYPI_ZOOM_MASK 0x00000100 #define SONYPI_THUMBPHRASE_MASK 0x00000200 #define SONYPI_MEYE_MASK 0x00000400 #define SONYPI_MEMORYSTICK_MASK 0x00000800 #define SONYPI_BATTERY_MASK 0x00001000 #define SONYPI_WIRELESS_MASK 0x00002000 struct sonypi_event { u8 data; u8 event; }; /* The set of possible button release events */ static struct sonypi_event sonypi_releaseev[] = { { 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0, 0 } }; /* The set of possible jogger events */ static struct sonypi_event sonypi_joggerev[] = { { 0x1f, SONYPI_EVENT_JOGDIAL_UP }, { 0x01, SONYPI_EVENT_JOGDIAL_DOWN }, { 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED }, { 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED }, { 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP }, { 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN }, { 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED }, { 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED }, { 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP }, { 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN }, { 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED }, { 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED }, { 0x40, SONYPI_EVENT_JOGDIAL_PRESSED }, { 0, 0 } }; /* The set of possible capture button events */ static struct sonypi_event sonypi_captureev[] = { { 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED }, { 0x07, SONYPI_EVENT_CAPTURE_PRESSED }, { 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED }, { 0, 0 } }; /* The set of possible fnkeys events */ static struct sonypi_event sonypi_fnkeyev[] = { { 0x10, SONYPI_EVENT_FNKEY_ESC }, { 0x11, SONYPI_EVENT_FNKEY_F1 }, { 0x12, SONYPI_EVENT_FNKEY_F2 }, { 0x13, SONYPI_EVENT_FNKEY_F3 }, { 0x14, SONYPI_EVENT_FNKEY_F4 }, { 0x15, SONYPI_EVENT_FNKEY_F5 }, { 0x16, SONYPI_EVENT_FNKEY_F6 }, { 0x17, SONYPI_EVENT_FNKEY_F7 }, { 0x18, SONYPI_EVENT_FNKEY_F8 }, { 0x19, SONYPI_EVENT_FNKEY_F9 }, { 0x1a, SONYPI_EVENT_FNKEY_F10 }, { 0x1b, SONYPI_EVENT_FNKEY_F11 }, { 0x1c, SONYPI_EVENT_FNKEY_F12 }, { 0x1f, SONYPI_EVENT_FNKEY_RELEASED }, { 0x21, SONYPI_EVENT_FNKEY_1 }, { 0x22, SONYPI_EVENT_FNKEY_2 }, { 0x31, SONYPI_EVENT_FNKEY_D }, { 0x32, SONYPI_EVENT_FNKEY_E }, { 0x33, SONYPI_EVENT_FNKEY_F }, { 0x34, SONYPI_EVENT_FNKEY_S }, { 0x35, SONYPI_EVENT_FNKEY_B }, { 0x36, SONYPI_EVENT_FNKEY_ONLY }, { 0, 0 } }; /* The set of possible program key events */ static struct sonypi_event sonypi_pkeyev[] = { { 0x01, SONYPI_EVENT_PKEY_P1 }, { 0x02, SONYPI_EVENT_PKEY_P2 }, { 0x04, SONYPI_EVENT_PKEY_P3 }, { 0x5c, SONYPI_EVENT_PKEY_P1 }, { 0, 0 } }; /* The set of possible bluetooth events */ static struct sonypi_event sonypi_blueev[] = { { 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED }, { 0x59, SONYPI_EVENT_BLUETOOTH_ON }, { 0x5a, SONYPI_EVENT_BLUETOOTH_OFF }, { 0, 0 } }; /* The set of possible wireless events */ static struct sonypi_event sonypi_wlessev[] = { { 0x59, SONYPI_EVENT_WIRELESS_ON }, { 0x5a, SONYPI_EVENT_WIRELESS_OFF }, { 0, 0 } }; /* The set of possible back button events */ static struct sonypi_event sonypi_backev[] = { { 0x20, SONYPI_EVENT_BACK_PRESSED }, { 0, 0 } }; /* The set of possible help button events */ static struct sonypi_event sonypi_helpev[] = { { 0x3b, SONYPI_EVENT_HELP_PRESSED }, { 0, 0 } }; /* The set of possible lid events */ static struct sonypi_event sonypi_lidev[] = { { 0x51, SONYPI_EVENT_LID_CLOSED }, { 0x50, SONYPI_EVENT_LID_OPENED }, { 0, 0 } }; /* The set of possible zoom events */ static struct sonypi_event sonypi_zoomev[] = { { 0x39, SONYPI_EVENT_ZOOM_PRESSED }, { 0, 0 } }; /* The set of possible thumbphrase events */ static struct sonypi_event sonypi_thumbphraseev[] = { { 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED }, { 0, 0 } }; /* The set of possible motioneye camera events */ static struct sonypi_event sonypi_meyeev[] = { { 0x00, SONYPI_EVENT_MEYE_FACE }, { 0x01, SONYPI_EVENT_MEYE_OPPOSITE }, { 0, 0 } }; /* The set of possible memorystick events */ static struct sonypi_event sonypi_memorystickev[] = { { 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT }, { 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT }, { 0, 0 } }; /* The set of possible battery events */ static struct sonypi_event sonypi_batteryev[] = { { 0x20, SONYPI_EVENT_BATTERY_INSERT }, { 0x30, SONYPI_EVENT_BATTERY_REMOVE }, { 0, 0 } }; static struct sonypi_eventtypes { int model; u8 data; unsigned long mask; struct sonypi_event * events; } sonypi_eventtypes[] = { { SONYPI_DEVICE_MODEL_TYPE1, 0, 0xffffffff, sonypi_releaseev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev }, { SONYPI_DEVICE_MODEL_TYPE2, 0, 0xffffffff, sonypi_releaseev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, { SONYPI_DEVICE_MODEL_TYPE3, 0, 0xffffffff, sonypi_releaseev }, { SONYPI_DEVICE_MODEL_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev }, { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { SONYPI_DEVICE_MODEL_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0 } }; #define SONYPI_BUF_SIZE 128 /* Correspondance table between sonypi events and input layer events */ static struct { int sonypiev; int inputev; } sonypi_inputkeys[] = { { SONYPI_EVENT_CAPTURE_PRESSED, KEY_CAMERA }, { SONYPI_EVENT_FNKEY_ONLY, KEY_FN }, { SONYPI_EVENT_FNKEY_ESC, KEY_FN_ESC }, { SONYPI_EVENT_FNKEY_F1, KEY_FN_F1 }, { SONYPI_EVENT_FNKEY_F2, KEY_FN_F2 }, { SONYPI_EVENT_FNKEY_F3, KEY_FN_F3 }, { SONYPI_EVENT_FNKEY_F4, KEY_FN_F4 }, { SONYPI_EVENT_FNKEY_F5, KEY_FN_F5 }, { SONYPI_EVENT_FNKEY_F6, KEY_FN_F6 }, { SONYPI_EVENT_FNKEY_F7, KEY_FN_F7 }, { SONYPI_EVENT_FNKEY_F8, KEY_FN_F8 }, { SONYPI_EVENT_FNKEY_F9, KEY_FN_F9 }, { SONYPI_EVENT_FNKEY_F10, KEY_FN_F10 }, { SONYPI_EVENT_FNKEY_F11, KEY_FN_F11 }, { SONYPI_EVENT_FNKEY_F12, KEY_FN_F12 }, { SONYPI_EVENT_FNKEY_1, KEY_FN_1 }, { SONYPI_EVENT_FNKEY_2, KEY_FN_2 }, { SONYPI_EVENT_FNKEY_D, KEY_FN_D }, { SONYPI_EVENT_FNKEY_E, KEY_FN_E }, { SONYPI_EVENT_FNKEY_F, KEY_FN_F }, { SONYPI_EVENT_FNKEY_S, KEY_FN_S }, { SONYPI_EVENT_FNKEY_B, KEY_FN_B }, { SONYPI_EVENT_BLUETOOTH_PRESSED, KEY_BLUE }, { SONYPI_EVENT_BLUETOOTH_ON, KEY_BLUE }, { SONYPI_EVENT_PKEY_P1, KEY_PROG1 }, { SONYPI_EVENT_PKEY_P2, KEY_PROG2 }, { SONYPI_EVENT_PKEY_P3, KEY_PROG3 }, { SONYPI_EVENT_BACK_PRESSED, KEY_BACK }, { SONYPI_EVENT_HELP_PRESSED, KEY_HELP }, { SONYPI_EVENT_ZOOM_PRESSED, KEY_ZOOM }, { SONYPI_EVENT_THUMBPHRASE_PRESSED, BTN_THUMB }, { 0, 0 }, }; struct sonypi_keypress { struct input_dev *dev; int key; }; static struct sonypi_device { struct pci_dev *dev; u16 irq; u16 bits; u16 ioport1; u16 ioport2; u16 region_size; u16 evtype_offset; int camera_power; int bluetooth_power; struct mutex lock; struct kfifo fifo; spinlock_t fifo_lock; wait_queue_head_t fifo_proc_list; struct fasync_struct *fifo_async; int open_count; int model; struct input_dev *input_jog_dev; struct input_dev *input_key_dev; struct work_struct input_work; struct kfifo input_fifo; spinlock_t input_fifo_lock; } sonypi_device; #define ITERATIONS_LONG 10000 #define ITERATIONS_SHORT 10 #define wait_on_command(quiet, command, iterations) { \ unsigned int n = iterations; \ while (--n && (command)) \ udelay(1); \ if (!n && (verbose || !quiet)) \ printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \ } #ifdef CONFIG_ACPI #define SONYPI_ACPI_ACTIVE (!acpi_disabled) #else #define SONYPI_ACPI_ACTIVE 0 #endif /* CONFIG_ACPI */ #ifdef CONFIG_ACPI static struct acpi_device *sonypi_acpi_device; static int acpi_driver_registered; #endif static int sonypi_ec_write(u8 addr, u8 value) { #ifdef CONFIG_ACPI if (SONYPI_ACPI_ACTIVE) return ec_write(addr, value); #endif wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); outb_p(0x81, SONYPI_CST_IOPORT); wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); outb_p(addr, SONYPI_DATA_IOPORT); wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); outb_p(value, SONYPI_DATA_IOPORT); wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); return 0; } static int sonypi_ec_read(u8 addr, u8 *value) { #ifdef CONFIG_ACPI if (SONYPI_ACPI_ACTIVE) return ec_read(addr, value); #endif wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); outb_p(0x80, SONYPI_CST_IOPORT); wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); outb_p(addr, SONYPI_DATA_IOPORT); wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); *value = inb_p(SONYPI_DATA_IOPORT); return 0; } static int ec_read16(u8 addr, u16 *value) { u8 val_lb, val_hb; if (sonypi_ec_read(addr, &val_lb)) return -1; if (sonypi_ec_read(addr + 1, &val_hb)) return -1; *value = val_lb | (val_hb << 8); return 0; } /* Initializes the device - this comes from the AML code in the ACPI bios */ static void sonypi_type1_srs(void) { u32 v; pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); v = (v & 0xFFFF0000) | ((u32) sonypi_device.ioport1); pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); v = (v & 0xFFF0FFFF) | (((u32) sonypi_device.ioport1 ^ sonypi_device.ioport2) << 16); pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); v = inl(SONYPI_IRQ_PORT); v &= ~(((u32) 0x3) << SONYPI_IRQ_SHIFT); v |= (((u32) sonypi_device.bits) << SONYPI_IRQ_SHIFT); outl(v, SONYPI_IRQ_PORT); pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); v = (v & 0xFF1FFFFF) | 0x00C00000; pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); } static void sonypi_type2_srs(void) { if (sonypi_ec_write(SONYPI_SHIB, (sonypi_device.ioport1 & 0xFF00) >> 8)) printk(KERN_WARNING "ec_write failed\n"); if (sonypi_ec_write(SONYPI_SLOB, sonypi_device.ioport1 & 0x00FF)) printk(KERN_WARNING "ec_write failed\n"); if (sonypi_ec_write(SONYPI_SIRQ, sonypi_device.bits)) printk(KERN_WARNING "ec_write failed\n"); udelay(10); } static void sonypi_type3_srs(void) { u16 v16; u8 v8; /* This model type uses the same initialiazation of * the embedded controller as the type2 models. */ sonypi_type2_srs(); /* Initialization of PCI config space of the LPC interface bridge. */ v16 = (sonypi_device.ioport1 & 0xFFF0) | 0x01; pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, v16); pci_read_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, &v8); v8 = (v8 & 0xCF) | 0x10; pci_write_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, v8); } /* Disables the device - this comes from the AML code in the ACPI bios */ static void sonypi_type1_dis(void) { u32 v; pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); v = v & 0xFF3FFFFF; pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); v = inl(SONYPI_IRQ_PORT); v |= (0x3 << SONYPI_IRQ_SHIFT); outl(v, SONYPI_IRQ_PORT); } static void sonypi_type2_dis(void) { if (sonypi_ec_write(SONYPI_SHIB, 0)) printk(KERN_WARNING "ec_write failed\n"); if (sonypi_ec_write(SONYPI_SLOB, 0)) printk(KERN_WARNING "ec_write failed\n"); if (sonypi_ec_write(SONYPI_SIRQ, 0)) printk(KERN_WARNING "ec_write failed\n"); } static void sonypi_type3_dis(void) { sonypi_type2_dis(); udelay(10); pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, 0); } static u8 sonypi_call1(u8 dev) { u8 v1, v2; wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(dev, sonypi_device.ioport2); v1 = inb_p(sonypi_device.ioport2); v2 = inb_p(sonypi_device.ioport1); return v2; } static u8 sonypi_call2(u8 dev, u8 fn) { u8 v1; wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(dev, sonypi_device.ioport2); wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(fn, sonypi_device.ioport1); v1 = inb_p(sonypi_device.ioport1); return v1; } static u8 sonypi_call3(u8 dev, u8 fn, u8 v) { u8 v1; wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(dev, sonypi_device.ioport2); wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(fn, sonypi_device.ioport1); wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); outb(v, sonypi_device.ioport1); v1 = inb_p(sonypi_device.ioport1); return v1; } #if 0 /* Get brightness, hue etc. Unreliable... */ static u8 sonypi_read(u8 fn) { u8 v1, v2; int n = 100; while (n--) { v1 = sonypi_call2(0x8f, fn); v2 = sonypi_call2(0x8f, fn); if (v1 == v2 && v1 != 0xff) return v1; } return 0xff; } #endif /* Set brightness, hue etc */ static void sonypi_set(u8 fn, u8 v) { wait_on_command(0, sonypi_call3(0x90, fn, v), ITERATIONS_SHORT); } /* Tests if the camera is ready */ static int sonypi_camera_ready(void) { u8 v; v = sonypi_call2(0x8f, SONYPI_CAMERA_STATUS); return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY)); } /* Turns the camera off */ static void sonypi_camera_off(void) { sonypi_set(SONYPI_CAMERA_PICTURE, SONYPI_CAMERA_MUTE_MASK); if (!sonypi_device.camera_power) return; sonypi_call2(0x91, 0); sonypi_device.camera_power = 0; } /* Turns the camera on */ static void sonypi_camera_on(void) { int i, j; if (sonypi_device.camera_power) return; for (j = 5; j > 0; j--) { while (sonypi_call2(0x91, 0x1)) msleep(10); sonypi_call1(0x93); for (i = 400; i > 0; i--) { if (sonypi_camera_ready()) break; msleep(10); } if (i) break; } if (j == 0) { printk(KERN_WARNING "sonypi: failed to power on camera\n"); return; } sonypi_set(0x10, 0x5a); sonypi_device.camera_power = 1; } /* sets the bluetooth subsystem power state */ static void sonypi_setbluetoothpower(u8 state) { state = !!state; if (sonypi_device.bluetooth_power == state) return; sonypi_call2(0x96, state); sonypi_call1(0x82); sonypi_device.bluetooth_power = state; } static void input_keyrelease(struct work_struct *work) { struct sonypi_keypress kp; while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp, sizeof(kp), &sonypi_device.input_fifo_lock) == sizeof(kp)) { msleep(10); input_report_key(kp.dev, kp.key, 0); input_sync(kp.dev); } } static void sonypi_report_input_event(u8 event) { struct input_dev *jog_dev = sonypi_device.input_jog_dev; struct input_dev *key_dev = sonypi_device.input_key_dev; struct sonypi_keypress kp = { NULL }; int i; switch (event) { case SONYPI_EVENT_JOGDIAL_UP: case SONYPI_EVENT_JOGDIAL_UP_PRESSED: input_report_rel(jog_dev, REL_WHEEL, 1); input_sync(jog_dev); break; case SONYPI_EVENT_JOGDIAL_DOWN: case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED: input_report_rel(jog_dev, REL_WHEEL, -1); input_sync(jog_dev); break; case SONYPI_EVENT_JOGDIAL_PRESSED: kp.key = BTN_MIDDLE; kp.dev = jog_dev; break; case SONYPI_EVENT_FNKEY_RELEASED: /* Nothing, not all VAIOs generate this event */ break; default: for (i = 0; sonypi_inputkeys[i].sonypiev; i++) if (event == sonypi_inputkeys[i].sonypiev) { kp.dev = key_dev; kp.key = sonypi_inputkeys[i].inputev; break; } break; } if (kp.dev) { input_report_key(kp.dev, kp.key, 1); input_sync(kp.dev); kfifo_in_locked(&sonypi_device.input_fifo, (unsigned char *)&kp, sizeof(kp), &sonypi_device.input_fifo_lock); schedule_work(&sonypi_device.input_work); } } /* Interrupt handler: some event is available */ static irqreturn_t sonypi_irq(int irq, void *dev_id) { u8 v1, v2, event = 0; int i, j; v1 = inb_p(sonypi_device.ioport1); v2 = inb_p(sonypi_device.ioport1 + sonypi_device.evtype_offset); for (i = 0; sonypi_eventtypes[i].model; i++) { if (sonypi_device.model != sonypi_eventtypes[i].model) continue; if ((v2 & sonypi_eventtypes[i].data) != sonypi_eventtypes[i].data) continue; if (!(mask & sonypi_eventtypes[i].mask)) continue; for (j = 0; sonypi_eventtypes[i].events[j].event; j++) { if (v1 == sonypi_eventtypes[i].events[j].data) { event = sonypi_eventtypes[i].events[j].event; goto found; } } } if (verbose) printk(KERN_WARNING "sonypi: unknown event port1=0x%02x,port2=0x%02x\n", v1, v2); /* We need to return IRQ_HANDLED here because there *are* * events belonging to the sonypi device we don't know about, * but we still don't want those to pollute the logs... */ return IRQ_HANDLED; found: if (verbose > 1) printk(KERN_INFO "sonypi: event port1=0x%02x,port2=0x%02x\n", v1, v2); if (useinput) sonypi_report_input_event(event); #ifdef CONFIG_ACPI if (sonypi_acpi_device) acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event); #endif kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event, sizeof(event), &sonypi_device.fifo_lock); kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN); wake_up_interruptible(&sonypi_device.fifo_proc_list); return IRQ_HANDLED; } static int sonypi_misc_fasync(int fd, struct file *filp, int on) { return fasync_helper(fd, filp, on, &sonypi_device.fifo_async); } static int sonypi_misc_release(struct inode *inode, struct file *file) { mutex_lock(&sonypi_device.lock); sonypi_device.open_count--; mutex_unlock(&sonypi_device.lock); return 0; } static int sonypi_misc_open(struct inode *inode, struct file *file) { mutex_lock(&sonypi_device.lock); /* Flush input queue on first open */ if (!sonypi_device.open_count) kfifo_reset(&sonypi_device.fifo); sonypi_device.open_count++; mutex_unlock(&sonypi_device.lock); return 0; } static ssize_t sonypi_misc_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { ssize_t ret; unsigned char c; if ((kfifo_len(&sonypi_device.fifo) == 0) && (file->f_flags & O_NONBLOCK)) return -EAGAIN; ret = wait_event_interruptible(sonypi_device.fifo_proc_list, kfifo_len(&sonypi_device.fifo) != 0); if (ret) return ret; while (ret < count && (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c), &sonypi_device.fifo_lock) == sizeof(c))) { if (put_user(c, buf++)) return -EFAULT; ret++; } if (ret > 0) { struct inode *inode = file->f_path.dentry->d_inode; inode->i_atime = current_fs_time(inode->i_sb); } return ret; } static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) { poll_wait(file, &sonypi_device.fifo_proc_list, wait); if (kfifo_len(&sonypi_device.fifo)) return POLLIN | POLLRDNORM; return 0; } static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { long ret = 0; void __user *argp = (void __user *)arg; u8 val8; u16 val16; mutex_lock(&sonypi_device.lock); switch (cmd) { case SONYPI_IOCGBRT: if (sonypi_ec_read(SONYPI_LCD_LIGHT, &val8)) { ret = -EIO; break; } if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSBRT: if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } if (sonypi_ec_write(SONYPI_LCD_LIGHT, val8)) ret = -EIO; break; case SONYPI_IOCGBAT1CAP: if (ec_read16(SONYPI_BAT1_FULL, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT1REM: if (ec_read16(SONYPI_BAT1_LEFT, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT2CAP: if (ec_read16(SONYPI_BAT2_FULL, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT2REM: if (ec_read16(SONYPI_BAT2_LEFT, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBATFLAGS: if (sonypi_ec_read(SONYPI_BAT_FLAGS, &val8)) { ret = -EIO; break; } val8 &= 0x07; if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCGBLUE: val8 = sonypi_device.bluetooth_power; if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSBLUE: if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } sonypi_setbluetoothpower(val8); break; /* FAN Controls */ case SONYPI_IOCGFAN: if (sonypi_ec_read(SONYPI_FAN0_STATUS, &val8)) { ret = -EIO; break; } if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSFAN: if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } if (sonypi_ec_write(SONYPI_FAN0_STATUS, val8)) ret = -EIO; break; /* GET Temperature (useful under APM) */ case SONYPI_IOCGTEMP: if (sonypi_ec_read(SONYPI_TEMP_STATUS, &val8)) { ret = -EIO; break; } if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; default: ret = -EINVAL; } mutex_unlock(&sonypi_device.lock); return ret; } static const struct file_operations sonypi_misc_fops = { .owner = THIS_MODULE, .read = sonypi_misc_read, .poll = sonypi_misc_poll, .open = sonypi_misc_open, .release = sonypi_misc_release, .fasync = sonypi_misc_fasync, .unlocked_ioctl = sonypi_misc_ioctl, .llseek = no_llseek, }; static struct miscdevice sonypi_misc_device = { .minor = MISC_DYNAMIC_MINOR, .name = "sonypi", .fops = &sonypi_misc_fops, }; static void sonypi_enable(unsigned int camera_on) { switch (sonypi_device.model) { case SONYPI_DEVICE_MODEL_TYPE1: sonypi_type1_srs(); break; case SONYPI_DEVICE_MODEL_TYPE2: sonypi_type2_srs(); break; case SONYPI_DEVICE_MODEL_TYPE3: sonypi_type3_srs(); break; } sonypi_call1(0x82); sonypi_call2(0x81, 0xff); sonypi_call1(compat ? 0x92 : 0x82); /* Enable ACPI mode to get Fn key events */ if (!SONYPI_ACPI_ACTIVE && fnkeyinit) outb(0xf0, 0xb2); if (camera && camera_on) sonypi_camera_on(); } static int sonypi_disable(void) { sonypi_call2(0x81, 0); /* make sure we don't get any more events */ if (camera) sonypi_camera_off(); /* disable ACPI mode */ if (!SONYPI_ACPI_ACTIVE && fnkeyinit) outb(0xf1, 0xb2); switch (sonypi_device.model) { case SONYPI_DEVICE_MODEL_TYPE1: sonypi_type1_dis(); break; case SONYPI_DEVICE_MODEL_TYPE2: sonypi_type2_dis(); break; case SONYPI_DEVICE_MODEL_TYPE3: sonypi_type3_dis(); break; } return 0; } #ifdef CONFIG_ACPI static int sonypi_acpi_add(struct acpi_device *device) { sonypi_acpi_device = device; strcpy(acpi_device_name(device), "Sony laptop hotkeys"); strcpy(acpi_device_class(device), "sony/hotkey"); return 0; } static int sonypi_acpi_remove(struct acpi_device *device, int type) { sonypi_acpi_device = NULL; return 0; } static const struct acpi_device_id sonypi_device_ids[] = { {"SNY6001", 0}, {"", 0}, }; static struct acpi_driver sonypi_acpi_driver = { .name = "sonypi", .class = "hkey", .ids = sonypi_device_ids, .ops = { .add = sonypi_acpi_add, .remove = sonypi_acpi_remove, }, }; #endif static int __devinit sonypi_create_input_devices(struct platform_device *pdev) { struct input_dev *jog_dev; struct input_dev *key_dev; int i; int error; sonypi_device.input_jog_dev = jog_dev = input_allocate_device(); if (!jog_dev) return -ENOMEM; jog_dev->name = "Sony Vaio Jogdial"; jog_dev->id.bustype = BUS_ISA; jog_dev->id.vendor = PCI_VENDOR_ID_SONY; jog_dev->dev.parent = &pdev->dev; jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE); jog_dev->relbit[0] = BIT_MASK(REL_WHEEL); sonypi_device.input_key_dev = key_dev = input_allocate_device(); if (!key_dev) { error = -ENOMEM; goto err_free_jogdev; } key_dev->name = "Sony Vaio Keys"; key_dev->id.bustype = BUS_ISA; key_dev->id.vendor = PCI_VENDOR_ID_SONY; key_dev->dev.parent = &pdev->dev; /* Initialize the Input Drivers: special keys */ key_dev->evbit[0] = BIT_MASK(EV_KEY); for (i = 0; sonypi_inputkeys[i].sonypiev; i++) if (sonypi_inputkeys[i].inputev) set_bit(sonypi_inputkeys[i].inputev, key_dev->keybit); error = input_register_device(jog_dev); if (error) goto err_free_keydev; error = input_register_device(key_dev); if (error) goto err_unregister_jogdev; return 0; err_unregister_jogdev: input_unregister_device(jog_dev); /* Set to NULL so we don't free it again below */ jog_dev = NULL; err_free_keydev: input_free_device(key_dev); sonypi_device.input_key_dev = NULL; err_free_jogdev: input_free_device(jog_dev); sonypi_device.input_jog_dev = NULL; return error; } static int __devinit sonypi_setup_ioports(struct sonypi_device *dev, const struct sonypi_ioport_list *ioport_list) { /* try to detect if sony-laptop is being used and thus * has already requested one of the known ioports. * As in the deprecated check_region this is racy has we have * multiple ioports available and one of them can be requested * between this check and the subsequent request. Anyway, as an * attempt to be some more user-friendly as we currently are, * this is enough. */ const struct sonypi_ioport_list *check = ioport_list; while (check_ioport && check->port1) { if (!request_region(check->port1, sonypi_device.region_size, "Sony Programmable I/O Device Check")) { printk(KERN_ERR "sonypi: ioport 0x%.4x busy, using sony-laptop? " "if not use check_ioport=0\n", check->port1); return -EBUSY; } release_region(check->port1, sonypi_device.region_size); check++; } while (ioport_list->port1) { if (request_region(ioport_list->port1, sonypi_device.region_size, "Sony Programmable I/O Device")) { dev->ioport1 = ioport_list->port1; dev->ioport2 = ioport_list->port2; return 0; } ioport_list++; } return -EBUSY; } static int __devinit sonypi_setup_irq(struct sonypi_device *dev, const struct sonypi_irq_list *irq_list) { while (irq_list->irq) { if (!request_irq(irq_list->irq, sonypi_irq, IRQF_SHARED, "sonypi", sonypi_irq)) { dev->irq = irq_list->irq; dev->bits = irq_list->bits; return 0; } irq_list++; } return -EBUSY; } static void __devinit sonypi_display_info(void) { printk(KERN_INFO "sonypi: detected type%d model, " "verbose = %d, fnkeyinit = %s, camera = %s, " "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n", sonypi_device.model, verbose, fnkeyinit ? "on" : "off", camera ? "on" : "off", compat ? "on" : "off", mask, useinput ? "on" : "off", SONYPI_ACPI_ACTIVE ? "on" : "off"); printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n", sonypi_device.irq, sonypi_device.ioport1, sonypi_device.ioport2); if (minor == -1) printk(KERN_INFO "sonypi: device allocated minor is %d\n", sonypi_misc_device.minor); } static int __devinit sonypi_probe(struct platform_device *dev) { const struct sonypi_ioport_list *ioport_list; const struct sonypi_irq_list *irq_list; struct pci_dev *pcidev; int error; printk(KERN_WARNING "sonypi: please try the sony-laptop module instead " "and report failures, see also " "http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n"); spin_lock_init(&sonypi_device.fifo_lock); error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL); if (error) { printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); return error; } init_waitqueue_head(&sonypi_device.fifo_proc_list); mutex_init(&sonypi_device.lock); sonypi_device.bluetooth_power = -1; if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL))) sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1; else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, NULL))) sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3; else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, NULL))) sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3; else sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2; if (pcidev && pci_enable_device(pcidev)) { printk(KERN_ERR "sonypi: pci_enable_device failed\n"); error = -EIO; goto err_put_pcidev; } sonypi_device.dev = pcidev; if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) { ioport_list = sonypi_type1_ioport_list; sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE; sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET; irq_list = sonypi_type1_irq_list; } else if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) { ioport_list = sonypi_type2_ioport_list; sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE; sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET; irq_list = sonypi_type2_irq_list; } else { ioport_list = sonypi_type3_ioport_list; sonypi_device.region_size = SONYPI_TYPE3_REGION_SIZE; sonypi_device.evtype_offset = SONYPI_TYPE3_EVTYPE_OFFSET; irq_list = sonypi_type3_irq_list; } error = sonypi_setup_ioports(&sonypi_device, ioport_list); if (error) { printk(KERN_ERR "sonypi: failed to request ioports\n"); goto err_disable_pcidev; } error = sonypi_setup_irq(&sonypi_device, irq_list); if (error) { printk(KERN_ERR "sonypi: request_irq failed\n"); goto err_free_ioports; } if (minor != -1) sonypi_misc_device.minor = minor; error = misc_register(&sonypi_misc_device); if (error) { printk(KERN_ERR "sonypi: misc_register failed\n"); goto err_free_irq; } sonypi_display_info(); if (useinput) { error = sonypi_create_input_devices(dev); if (error) { printk(KERN_ERR "sonypi: failed to create input devices\n"); goto err_miscdev_unregister; } spin_lock_init(&sonypi_device.input_fifo_lock); error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE, GFP_KERNEL); if (error) { printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); goto err_inpdev_unregister; } INIT_WORK(&sonypi_device.input_work, input_keyrelease); } sonypi_enable(0); return 0; err_inpdev_unregister: input_unregister_device(sonypi_device.input_key_dev); input_unregister_device(sonypi_device.input_jog_dev); err_miscdev_unregister: misc_deregister(&sonypi_misc_device); err_free_irq: free_irq(sonypi_device.irq, sonypi_irq); err_free_ioports: release_region(sonypi_device.ioport1, sonypi_device.region_size); err_disable_pcidev: if (pcidev) pci_disable_device(pcidev); err_put_pcidev: pci_dev_put(pcidev); kfifo_free(&sonypi_device.fifo); return error; } static int __devexit sonypi_remove(struct platform_device *dev) { sonypi_disable(); synchronize_irq(sonypi_device.irq); flush_work_sync(&sonypi_device.input_work); if (useinput) { input_unregister_device(sonypi_device.input_key_dev); input_unregister_device(sonypi_device.input_jog_dev); kfifo_free(&sonypi_device.input_fifo); } misc_deregister(&sonypi_misc_device); free_irq(sonypi_device.irq, sonypi_irq); release_region(sonypi_device.ioport1, sonypi_device.region_size); if (sonypi_device.dev) { pci_disable_device(sonypi_device.dev); pci_dev_put(sonypi_device.dev); } kfifo_free(&sonypi_device.fifo); return 0; } #ifdef CONFIG_PM static int old_camera_power; static int sonypi_suspend(struct platform_device *dev, pm_message_t state) { old_camera_power = sonypi_device.camera_power; sonypi_disable(); return 0; } static int sonypi_resume(struct platform_device *dev) { sonypi_enable(old_camera_power); return 0; } #else #define sonypi_suspend NULL #define sonypi_resume NULL #endif static void sonypi_shutdown(struct platform_device *dev) { sonypi_disable(); } static struct platform_driver sonypi_driver = { .driver = { .name = "sonypi", .owner = THIS_MODULE, }, .probe = sonypi_probe, .remove = __devexit_p(sonypi_remove), .shutdown = sonypi_shutdown, .suspend = sonypi_suspend, .resume = sonypi_resume, }; static struct platform_device *sonypi_platform_device; static struct dmi_system_id __initdata sonypi_dmi_table[] = { { .ident = "Sony Vaio", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"), }, }, { .ident = "Sony Vaio", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"), }, }, { } }; static int __init sonypi_init(void) { int error; printk(KERN_INFO "sonypi: Sony Programmable I/O Controller Driver v%s.\n", SONYPI_DRIVER_VERSION); if (!dmi_check_system(sonypi_dmi_table)) return -ENODEV; error = platform_driver_register(&sonypi_driver); if (error) return error; sonypi_platform_device = platform_device_alloc("sonypi", -1); if (!sonypi_platform_device) { error = -ENOMEM; goto err_driver_unregister; } error = platform_device_add(sonypi_platform_device); if (error) goto err_free_device; #ifdef CONFIG_ACPI if (acpi_bus_register_driver(&sonypi_acpi_driver) >= 0) acpi_driver_registered = 1; #endif return 0; err_free_device: platform_device_put(sonypi_platform_device); err_driver_unregister: platform_driver_unregister(&sonypi_driver); return error; } static void __exit sonypi_exit(void) { #ifdef CONFIG_ACPI if (acpi_driver_registered) acpi_bus_unregister_driver(&sonypi_acpi_driver); #endif platform_device_unregister(sonypi_platform_device); platform_driver_unregister(&sonypi_driver); printk(KERN_INFO "sonypi: removed.\n"); } module_init(sonypi_init); module_exit(sonypi_exit);
gpl-2.0
yank555-lu/Hammerhead-3.4-lollipop
kernel/tracepoint.c
3686
20207
/* * Copyright (C) 2008 Mathieu Desnoyers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/jhash.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/tracepoint.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/static_key.h> extern struct tracepoint * const __start___tracepoints_ptrs[]; extern struct tracepoint * const __stop___tracepoints_ptrs[]; /* Set to 1 to enable tracepoint debug output */ static const int tracepoint_debug; /* * Tracepoints mutex protects the builtin and module tracepoints and the hash * table, as well as the local module list. */ static DEFINE_MUTEX(tracepoints_mutex); #ifdef CONFIG_MODULES /* Local list of struct module */ static LIST_HEAD(tracepoint_module_list); #endif /* CONFIG_MODULES */ /* * Tracepoint hash table, containing the active tracepoints. * Protected by tracepoints_mutex. */ #define TRACEPOINT_HASH_BITS 6 #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS) static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; /* * Note about RCU : * It is used to delay the free of multiple probes array until a quiescent * state is reached. * Tracepoint entries modifications are protected by the tracepoints_mutex. */ struct tracepoint_entry { struct hlist_node hlist; struct tracepoint_func *funcs; int refcount; /* Number of times armed. 0 if disarmed. */ char name[0]; }; struct tp_probes { union { struct rcu_head rcu; struct list_head list; } u; struct tracepoint_func probes[0]; }; static inline void *allocate_probes(int count) { struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func) + sizeof(struct tp_probes), GFP_KERNEL); return p == NULL ? NULL : p->probes; } static void rcu_free_old_probes(struct rcu_head *head) { kfree(container_of(head, struct tp_probes, u.rcu)); } static inline void release_probes(struct tracepoint_func *old) { if (old) { struct tp_probes *tp_probes = container_of(old, struct tp_probes, probes[0]); call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes); } } static void debug_print_probes(struct tracepoint_entry *entry) { int i; if (!tracepoint_debug || !entry->funcs) return; for (i = 0; entry->funcs[i].func; i++) printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func); } static struct tracepoint_func * tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe, void *data) { int nr_probes = 0; struct tracepoint_func *old, *new; WARN_ON(!probe); debug_print_probes(entry); old = entry->funcs; if (old) { /* (N -> N+1), (N != 0, 1) probes */ for (nr_probes = 0; old[nr_probes].func; nr_probes++) if (old[nr_probes].func == probe && old[nr_probes].data == data) return ERR_PTR(-EEXIST); } /* + 2 : one for new probe, one for NULL func */ new = allocate_probes(nr_probes + 2); if (new == NULL) return ERR_PTR(-ENOMEM); if (old) memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); new[nr_probes].func = probe; new[nr_probes].data = data; new[nr_probes + 1].func = NULL; entry->refcount = nr_probes + 1; entry->funcs = new; debug_print_probes(entry); return old; } static void * tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe, void *data) { int nr_probes = 0, nr_del = 0, i; struct tracepoint_func *old, *new; old = entry->funcs; if (!old) return ERR_PTR(-ENOENT); debug_print_probes(entry); /* (N -> M), (N > 1, M >= 0) probes */ for (nr_probes = 0; old[nr_probes].func; nr_probes++) { if (!probe || (old[nr_probes].func == probe && old[nr_probes].data == data)) nr_del++; } if (nr_probes - nr_del == 0) { /* N -> 0, (N > 1) */ entry->funcs = NULL; entry->refcount = 0; debug_print_probes(entry); return old; } else { int j = 0; /* N -> M, (N > 1, M > 0) */ /* + 1 for NULL */ new = allocate_probes(nr_probes - nr_del + 1); if (new == NULL) return ERR_PTR(-ENOMEM); for (i = 0; old[i].func; i++) if (probe && (old[i].func != probe || old[i].data != data)) new[j++] = old[i]; new[nr_probes - nr_del].func = NULL; entry->refcount = nr_probes - nr_del; entry->funcs = new; } debug_print_probes(entry); return old; } /* * Get tracepoint if the tracepoint is present in the tracepoint hash table. * Must be called with tracepoints_mutex held. * Returns NULL if not present. */ static struct tracepoint_entry *get_tracepoint(const char *name) { struct hlist_head *head; struct hlist_node *node; struct tracepoint_entry *e; u32 hash = jhash(name, strlen(name), 0); head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; hlist_for_each_entry(e, node, head, hlist) { if (!strcmp(name, e->name)) return e; } return NULL; } /* * Add the tracepoint to the tracepoint hash table. Must be called with * tracepoints_mutex held. */ static struct tracepoint_entry *add_tracepoint(const char *name) { struct hlist_head *head; struct hlist_node *node; struct tracepoint_entry *e; size_t name_len = strlen(name) + 1; u32 hash = jhash(name, name_len-1, 0); head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; hlist_for_each_entry(e, node, head, hlist) { if (!strcmp(name, e->name)) { printk(KERN_NOTICE "tracepoint %s busy\n", name); return ERR_PTR(-EEXIST); /* Already there */ } } /* * Using kmalloc here to allocate a variable length element. Could * cause some memory fragmentation if overused. */ e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL); if (!e) return ERR_PTR(-ENOMEM); memcpy(&e->name[0], name, name_len); e->funcs = NULL; e->refcount = 0; hlist_add_head(&e->hlist, head); return e; } /* * Remove the tracepoint from the tracepoint hash table. Must be called with * mutex_lock held. */ static inline void remove_tracepoint(struct tracepoint_entry *e) { hlist_del(&e->hlist); kfree(e); } /* * Sets the probe callback corresponding to one tracepoint. */ static void set_tracepoint(struct tracepoint_entry **entry, struct tracepoint *elem, int active) { WARN_ON(strcmp((*entry)->name, elem->name) != 0); if (elem->regfunc && !static_key_enabled(&elem->key) && active) elem->regfunc(); else if (elem->unregfunc && static_key_enabled(&elem->key) && !active) elem->unregfunc(); /* * rcu_assign_pointer has a smp_wmb() which makes sure that the new * probe callbacks array is consistent before setting a pointer to it. * This array is referenced by __DO_TRACE from * include/linux/tracepoints.h. A matching smp_read_barrier_depends() * is used. */ rcu_assign_pointer(elem->funcs, (*entry)->funcs); if (active && !static_key_enabled(&elem->key)) static_key_slow_inc(&elem->key); else if (!active && static_key_enabled(&elem->key)) static_key_slow_dec(&elem->key); } /* * Disable a tracepoint and its probe callback. * Note: only waiting an RCU period after setting elem->call to the empty * function insures that the original callback is not used anymore. This insured * by preempt_disable around the call site. */ static void disable_tracepoint(struct tracepoint *elem) { if (elem->unregfunc && static_key_enabled(&elem->key)) elem->unregfunc(); if (static_key_enabled(&elem->key)) static_key_slow_dec(&elem->key); rcu_assign_pointer(elem->funcs, NULL); } /** * tracepoint_update_probe_range - Update a probe range * @begin: beginning of the range * @end: end of the range * * Updates the probe callback corresponding to a range of tracepoints. * Called with tracepoints_mutex held. */ static void tracepoint_update_probe_range(struct tracepoint * const *begin, struct tracepoint * const *end) { struct tracepoint * const *iter; struct tracepoint_entry *mark_entry; if (!begin) return; for (iter = begin; iter < end; iter++) { mark_entry = get_tracepoint((*iter)->name); if (mark_entry) { set_tracepoint(&mark_entry, *iter, !!mark_entry->refcount); } else { disable_tracepoint(*iter); } } } #ifdef CONFIG_MODULES void module_update_tracepoints(void) { struct tp_module *tp_mod; list_for_each_entry(tp_mod, &tracepoint_module_list, list) tracepoint_update_probe_range(tp_mod->tracepoints_ptrs, tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints); } #else /* CONFIG_MODULES */ void module_update_tracepoints(void) { } #endif /* CONFIG_MODULES */ /* * Update probes, removing the faulty probes. * Called with tracepoints_mutex held. */ static void tracepoint_update_probes(void) { /* Core kernel tracepoints */ tracepoint_update_probe_range(__start___tracepoints_ptrs, __stop___tracepoints_ptrs); /* tracepoints in modules. */ module_update_tracepoints(); } static struct tracepoint_func * tracepoint_add_probe(const char *name, void *probe, void *data) { struct tracepoint_entry *entry; struct tracepoint_func *old; entry = get_tracepoint(name); if (!entry) { entry = add_tracepoint(name); if (IS_ERR(entry)) return (struct tracepoint_func *)entry; } old = tracepoint_entry_add_probe(entry, probe, data); if (IS_ERR(old) && !entry->refcount) remove_tracepoint(entry); return old; } /** * tracepoint_probe_register - Connect a probe to a tracepoint * @name: tracepoint name * @probe: probe handler * * Returns 0 if ok, error value on error. * The probe address must at least be aligned on the architecture pointer size. */ int tracepoint_probe_register(const char *name, void *probe, void *data) { struct tracepoint_func *old; mutex_lock(&tracepoints_mutex); old = tracepoint_add_probe(name, probe, data); if (IS_ERR(old)) { mutex_unlock(&tracepoints_mutex); return PTR_ERR(old); } tracepoint_update_probes(); /* may update entry */ mutex_unlock(&tracepoints_mutex); release_probes(old); return 0; } EXPORT_SYMBOL_GPL(tracepoint_probe_register); static struct tracepoint_func * tracepoint_remove_probe(const char *name, void *probe, void *data) { struct tracepoint_entry *entry; struct tracepoint_func *old; entry = get_tracepoint(name); if (!entry) return ERR_PTR(-ENOENT); old = tracepoint_entry_remove_probe(entry, probe, data); if (IS_ERR(old)) return old; if (!entry->refcount) remove_tracepoint(entry); return old; } /** * tracepoint_probe_unregister - Disconnect a probe from a tracepoint * @name: tracepoint name * @probe: probe function pointer * * We do not need to call a synchronize_sched to make sure the probes have * finished running before doing a module unload, because the module unload * itself uses stop_machine(), which insures that every preempt disabled section * have finished. */ int tracepoint_probe_unregister(const char *name, void *probe, void *data) { struct tracepoint_func *old; mutex_lock(&tracepoints_mutex); old = tracepoint_remove_probe(name, probe, data); if (IS_ERR(old)) { mutex_unlock(&tracepoints_mutex); return PTR_ERR(old); } tracepoint_update_probes(); /* may update entry */ mutex_unlock(&tracepoints_mutex); release_probes(old); return 0; } EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); static LIST_HEAD(old_probes); static int need_update; static void tracepoint_add_old_probes(void *old) { need_update = 1; if (old) { struct tp_probes *tp_probes = container_of(old, struct tp_probes, probes[0]); list_add(&tp_probes->u.list, &old_probes); } } /** * tracepoint_probe_register_noupdate - register a probe but not connect * @name: tracepoint name * @probe: probe handler * * caller must call tracepoint_probe_update_all() */ int tracepoint_probe_register_noupdate(const char *name, void *probe, void *data) { struct tracepoint_func *old; mutex_lock(&tracepoints_mutex); old = tracepoint_add_probe(name, probe, data); if (IS_ERR(old)) { mutex_unlock(&tracepoints_mutex); return PTR_ERR(old); } tracepoint_add_old_probes(old); mutex_unlock(&tracepoints_mutex); return 0; } EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate); /** * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect * @name: tracepoint name * @probe: probe function pointer * * caller must call tracepoint_probe_update_all() */ int tracepoint_probe_unregister_noupdate(const char *name, void *probe, void *data) { struct tracepoint_func *old; mutex_lock(&tracepoints_mutex); old = tracepoint_remove_probe(name, probe, data); if (IS_ERR(old)) { mutex_unlock(&tracepoints_mutex); return PTR_ERR(old); } tracepoint_add_old_probes(old); mutex_unlock(&tracepoints_mutex); return 0; } EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate); /** * tracepoint_probe_update_all - update tracepoints */ void tracepoint_probe_update_all(void) { LIST_HEAD(release_probes); struct tp_probes *pos, *next; mutex_lock(&tracepoints_mutex); if (!need_update) { mutex_unlock(&tracepoints_mutex); return; } if (!list_empty(&old_probes)) list_replace_init(&old_probes, &release_probes); need_update = 0; tracepoint_update_probes(); mutex_unlock(&tracepoints_mutex); list_for_each_entry_safe(pos, next, &release_probes, u.list) { list_del(&pos->u.list); call_rcu_sched(&pos->u.rcu, rcu_free_old_probes); } } EXPORT_SYMBOL_GPL(tracepoint_probe_update_all); /** * tracepoint_get_iter_range - Get a next tracepoint iterator given a range. * @tracepoint: current tracepoints (in), next tracepoint (out) * @begin: beginning of the range * @end: end of the range * * Returns whether a next tracepoint has been found (1) or not (0). * Will return the first tracepoint in the range if the input tracepoint is * NULL. */ static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint, struct tracepoint * const *begin, struct tracepoint * const *end) { if (!*tracepoint && begin != end) { *tracepoint = begin; return 1; } if (*tracepoint >= begin && *tracepoint < end) return 1; return 0; } #ifdef CONFIG_MODULES static void tracepoint_get_iter(struct tracepoint_iter *iter) { int found = 0; struct tp_module *iter_mod; /* Core kernel tracepoints */ if (!iter->module) { found = tracepoint_get_iter_range(&iter->tracepoint, __start___tracepoints_ptrs, __stop___tracepoints_ptrs); if (found) goto end; } /* Tracepoints in modules */ mutex_lock(&tracepoints_mutex); list_for_each_entry(iter_mod, &tracepoint_module_list, list) { /* * Sorted module list */ if (iter_mod < iter->module) continue; else if (iter_mod > iter->module) iter->tracepoint = NULL; found = tracepoint_get_iter_range(&iter->tracepoint, iter_mod->tracepoints_ptrs, iter_mod->tracepoints_ptrs + iter_mod->num_tracepoints); if (found) { iter->module = iter_mod; break; } } mutex_unlock(&tracepoints_mutex); end: if (!found) tracepoint_iter_reset(iter); } #else /* CONFIG_MODULES */ static void tracepoint_get_iter(struct tracepoint_iter *iter) { int found = 0; /* Core kernel tracepoints */ found = tracepoint_get_iter_range(&iter->tracepoint, __start___tracepoints_ptrs, __stop___tracepoints_ptrs); if (!found) tracepoint_iter_reset(iter); } #endif /* CONFIG_MODULES */ void tracepoint_iter_start(struct tracepoint_iter *iter) { tracepoint_get_iter(iter); } EXPORT_SYMBOL_GPL(tracepoint_iter_start); void tracepoint_iter_next(struct tracepoint_iter *iter) { iter->tracepoint++; /* * iter->tracepoint may be invalid because we blindly incremented it. * Make sure it is valid by marshalling on the tracepoints, getting the * tracepoints from following modules if necessary. */ tracepoint_get_iter(iter); } EXPORT_SYMBOL_GPL(tracepoint_iter_next); void tracepoint_iter_stop(struct tracepoint_iter *iter) { } EXPORT_SYMBOL_GPL(tracepoint_iter_stop); void tracepoint_iter_reset(struct tracepoint_iter *iter) { #ifdef CONFIG_MODULES iter->module = NULL; #endif /* CONFIG_MODULES */ iter->tracepoint = NULL; } EXPORT_SYMBOL_GPL(tracepoint_iter_reset); #ifdef CONFIG_MODULES static int tracepoint_module_coming(struct module *mod) { struct tp_module *tp_mod, *iter; int ret = 0; /* * We skip modules that taint the kernel, especially those with different * module headers (for forced load), to make sure we don't cause a crash. * Staging and out-of-tree GPL modules are fine. */ if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) return 0; mutex_lock(&tracepoints_mutex); tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); if (!tp_mod) { ret = -ENOMEM; goto end; } tp_mod->num_tracepoints = mod->num_tracepoints; tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; /* * tracepoint_module_list is kept sorted by struct module pointer * address for iteration on tracepoints from a seq_file that can release * the mutex between calls. */ list_for_each_entry_reverse(iter, &tracepoint_module_list, list) { BUG_ON(iter == tp_mod); /* Should never be in the list twice */ if (iter < tp_mod) { /* We belong to the location right after iter. */ list_add(&tp_mod->list, &iter->list); goto module_added; } } /* We belong to the beginning of the list */ list_add(&tp_mod->list, &tracepoint_module_list); module_added: tracepoint_update_probe_range(mod->tracepoints_ptrs, mod->tracepoints_ptrs + mod->num_tracepoints); end: mutex_unlock(&tracepoints_mutex); return ret; } static int tracepoint_module_going(struct module *mod) { struct tp_module *pos; mutex_lock(&tracepoints_mutex); tracepoint_update_probe_range(mod->tracepoints_ptrs, mod->tracepoints_ptrs + mod->num_tracepoints); list_for_each_entry(pos, &tracepoint_module_list, list) { if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) { list_del(&pos->list); kfree(pos); break; } } /* * In the case of modules that were tainted at "coming", we'll simply * walk through the list without finding it. We cannot use the "tainted" * flag on "going", in case a module taints the kernel only after being * loaded. */ mutex_unlock(&tracepoints_mutex); return 0; } int tracepoint_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; int ret = 0; switch (val) { case MODULE_STATE_COMING: ret = tracepoint_module_coming(mod); break; case MODULE_STATE_LIVE: break; case MODULE_STATE_GOING: ret = tracepoint_module_going(mod); break; } return ret; } struct notifier_block tracepoint_module_nb = { .notifier_call = tracepoint_module_notify, .priority = 0, }; static int init_tracepoints(void) { return register_module_notifier(&tracepoint_module_nb); } __initcall(init_tracepoints); #endif /* CONFIG_MODULES */ #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ static int sys_tracepoint_refcount; void syscall_regfunc(void) { unsigned long flags; struct task_struct *g, *t; if (!sys_tracepoint_refcount) { read_lock_irqsave(&tasklist_lock, flags); do_each_thread(g, t) { /* Skip kernel threads. */ if (t->mm) set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); } while_each_thread(g, t); read_unlock_irqrestore(&tasklist_lock, flags); } sys_tracepoint_refcount++; } void syscall_unregfunc(void) { unsigned long flags; struct task_struct *g, *t; sys_tracepoint_refcount--; if (!sys_tracepoint_refcount) { read_lock_irqsave(&tasklist_lock, flags); do_each_thread(g, t) { clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); } while_each_thread(g, t); read_unlock_irqrestore(&tasklist_lock, flags); } } #endif
gpl-2.0
lookflying/linux-kernel
drivers/isdn/hisax/callc.c
4710
48145
/* $Id: callc.c,v 2.59.2.4 2004/02/11 13:21:32 keil Exp $ * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For changes and modifications please read * Documentation/isdn/HiSax.cert * * based on the teles driver from Jan den Ouden * * Thanks to Jan den Ouden * Fritz Elfert * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include "hisax.h" #include <linux/isdn/capicmd.h> const char *lli_revision = "$Revision: 2.59.2.4 $"; extern struct IsdnCard cards[]; static int init_b_st(struct Channel *chanp, int incoming); static void release_b_st(struct Channel *chanp); static struct Fsm callcfsm; static int chancount; /* experimental REJECT after ALERTING for CALLBACK to beat the 4s delay */ #define ALERT_REJECT 0 /* Value to delay the sending of the first B-channel packet after CONNECT * here is no value given by ITU, but experience shows that 300 ms will * work on many networks, if you or your other side is behind local exchanges * a greater value may be recommented. If the delay is to short the first paket * will be lost and autodetect on many comercial routers goes wrong ! * You can adjust this value on runtime with * hisaxctrl <id> 2 <value> * value is in milliseconds */ #define DEFAULT_B_DELAY 300 /* Flags for remembering action done in lli */ #define FLG_START_B 0 /* * Find card with given driverId */ static inline struct IsdnCardState * hisax_findcard(int driverid) { int i; for (i = 0; i < nrcards; i++) if (cards[i].cs) if (cards[i].cs->myid == driverid) return (cards[i].cs); return (struct IsdnCardState *) 0; } static __printf(3, 4) void link_debug(struct Channel *chanp, int direction, char *fmt, ...) { va_list args; char tmp[16]; va_start(args, fmt); sprintf(tmp, "Ch%d %s ", chanp->chan, direction ? "LL->HL" : "HL->LL"); VHiSax_putstatus(chanp->cs, tmp, fmt, args); va_end(args); } enum { ST_NULL, /* 0 inactive */ ST_OUT_DIAL, /* 1 outgoing, SETUP send; awaiting confirm */ ST_IN_WAIT_LL, /* 2 incoming call received; wait for LL confirm */ ST_IN_ALERT_SENT, /* 3 incoming call received; ALERT send */ ST_IN_WAIT_CONN_ACK, /* 4 incoming CONNECT send; awaiting CONN_ACK */ ST_WAIT_BCONN, /* 5 CONNECT/CONN_ACK received, awaiting b-channel prot. estbl. */ ST_ACTIVE, /* 6 active, b channel prot. established */ ST_WAIT_BRELEASE, /* 7 call clear. (initiator), awaiting b channel prot. rel. */ ST_WAIT_BREL_DISC, /* 8 call clear. (receiver), DISCONNECT req. received */ ST_WAIT_DCOMMAND, /* 9 call clear. (receiver), awaiting DCHANNEL message */ ST_WAIT_DRELEASE, /* 10 DISCONNECT sent, awaiting RELEASE */ ST_WAIT_D_REL_CNF, /* 11 RELEASE sent, awaiting RELEASE confirm */ ST_IN_PROCEED_SEND, /* 12 incoming call, proceeding send */ }; #define STATE_COUNT (ST_IN_PROCEED_SEND + 1) static char *strState[] = { "ST_NULL", "ST_OUT_DIAL", "ST_IN_WAIT_LL", "ST_IN_ALERT_SENT", "ST_IN_WAIT_CONN_ACK", "ST_WAIT_BCONN", "ST_ACTIVE", "ST_WAIT_BRELEASE", "ST_WAIT_BREL_DISC", "ST_WAIT_DCOMMAND", "ST_WAIT_DRELEASE", "ST_WAIT_D_REL_CNF", "ST_IN_PROCEED_SEND", }; enum { EV_DIAL, /* 0 */ EV_SETUP_CNF, /* 1 */ EV_ACCEPTB, /* 2 */ EV_DISCONNECT_IND, /* 3 */ EV_RELEASE, /* 4 */ EV_LEASED, /* 5 */ EV_LEASED_REL, /* 6 */ EV_SETUP_IND, /* 7 */ EV_ACCEPTD, /* 8 */ EV_SETUP_CMPL_IND, /* 9 */ EV_BC_EST, /* 10 */ EV_WRITEBUF, /* 11 */ EV_HANGUP, /* 12 */ EV_BC_REL, /* 13 */ EV_CINF, /* 14 */ EV_SUSPEND, /* 15 */ EV_RESUME, /* 16 */ EV_NOSETUP_RSP, /* 17 */ EV_SETUP_ERR, /* 18 */ EV_CONNECT_ERR, /* 19 */ EV_PROCEED, /* 20 */ EV_ALERT, /* 21 */ EV_REDIR, /* 22 */ }; #define EVENT_COUNT (EV_REDIR + 1) static char *strEvent[] = { "EV_DIAL", "EV_SETUP_CNF", "EV_ACCEPTB", "EV_DISCONNECT_IND", "EV_RELEASE", "EV_LEASED", "EV_LEASED_REL", "EV_SETUP_IND", "EV_ACCEPTD", "EV_SETUP_CMPL_IND", "EV_BC_EST", "EV_WRITEBUF", "EV_HANGUP", "EV_BC_REL", "EV_CINF", "EV_SUSPEND", "EV_RESUME", "EV_NOSETUP_RSP", "EV_SETUP_ERR", "EV_CONNECT_ERR", "EV_PROCEED", "EV_ALERT", "EV_REDIR", }; static inline void HL_LL(struct Channel *chanp, int command) { isdn_ctrl ic; ic.driver = chanp->cs->myid; ic.command = command; ic.arg = chanp->chan; chanp->cs->iif.statcallb(&ic); } static inline void lli_deliver_cause(struct Channel *chanp) { isdn_ctrl ic; if (!chanp->proc) return; if (chanp->proc->para.cause == NO_CAUSE) return; ic.driver = chanp->cs->myid; ic.command = ISDN_STAT_CAUSE; ic.arg = chanp->chan; if (chanp->cs->protocol == ISDN_PTYPE_EURO) sprintf(ic.parm.num, "E%02X%02X", chanp->proc->para.loc & 0x7f, chanp->proc->para.cause & 0x7f); else sprintf(ic.parm.num, "%02X%02X", chanp->proc->para.loc & 0x7f, chanp->proc->para.cause & 0x7f); chanp->cs->iif.statcallb(&ic); } static inline void lli_close(struct FsmInst *fi) { struct Channel *chanp = fi->userdata; FsmChangeState(fi, ST_NULL); chanp->Flags = 0; chanp->cs->cardmsg(chanp->cs, MDL_INFO_REL, (void *) (long)chanp->chan); } static void lli_leased_in(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; isdn_ctrl ic; int ret; if (!chanp->leased) return; chanp->cs->cardmsg(chanp->cs, MDL_INFO_SETUP, (void *) (long)chanp->chan); FsmChangeState(fi, ST_IN_WAIT_LL); if (chanp->debug & 1) link_debug(chanp, 0, "STAT_ICALL_LEASED"); ic.driver = chanp->cs->myid; ic.command = ((chanp->chan < 2) ? ISDN_STAT_ICALL : ISDN_STAT_ICALLW); ic.arg = chanp->chan; ic.parm.setup.si1 = 7; ic.parm.setup.si2 = 0; ic.parm.setup.plan = 0; ic.parm.setup.screen = 0; sprintf(ic.parm.setup.eazmsn, "%d", chanp->chan + 1); sprintf(ic.parm.setup.phone, "LEASED%d", chanp->cs->myid); ret = chanp->cs->iif.statcallb(&ic); if (chanp->debug & 1) link_debug(chanp, 1, "statcallb ret=%d", ret); if (!ret) { chanp->cs->cardmsg(chanp->cs, MDL_INFO_REL, (void *) (long)chanp->chan); FsmChangeState(fi, ST_NULL); } } /* * Dial out */ static void lli_init_bchan_out(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; FsmChangeState(fi, ST_WAIT_BCONN); if (chanp->debug & 1) link_debug(chanp, 0, "STAT_DCONN"); HL_LL(chanp, ISDN_STAT_DCONN); init_b_st(chanp, 0); chanp->b_st->lli.l4l3(chanp->b_st, DL_ESTABLISH | REQUEST, NULL); } static void lli_prep_dialout(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; FsmDelTimer(&chanp->drel_timer, 60); FsmDelTimer(&chanp->dial_timer, 73); chanp->l2_active_protocol = chanp->l2_protocol; chanp->incoming = 0; chanp->cs->cardmsg(chanp->cs, MDL_INFO_SETUP, (void *) (long)chanp->chan); if (chanp->leased) { lli_init_bchan_out(fi, event, arg); } else { FsmChangeState(fi, ST_OUT_DIAL); chanp->d_st->lli.l4l3(chanp->d_st, CC_SETUP | REQUEST, chanp); } } static void lli_resume(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; FsmDelTimer(&chanp->drel_timer, 60); FsmDelTimer(&chanp->dial_timer, 73); chanp->l2_active_protocol = chanp->l2_protocol; chanp->incoming = 0; chanp->cs->cardmsg(chanp->cs, MDL_INFO_SETUP, (void *) (long)chanp->chan); if (chanp->leased) { lli_init_bchan_out(fi, event, arg); } else { FsmChangeState(fi, ST_OUT_DIAL); chanp->d_st->lli.l4l3(chanp->d_st, CC_RESUME | REQUEST, chanp); } } static void lli_go_active(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; isdn_ctrl ic; FsmChangeState(fi, ST_ACTIVE); chanp->data_open = !0; if (chanp->bcs->conmsg) strcpy(ic.parm.num, chanp->bcs->conmsg); else ic.parm.num[0] = 0; if (chanp->debug & 1) link_debug(chanp, 0, "STAT_BCONN %s", ic.parm.num); ic.driver = chanp->cs->myid; ic.command = ISDN_STAT_BCONN; ic.arg = chanp->chan; chanp->cs->iif.statcallb(&ic); chanp->cs->cardmsg(chanp->cs, MDL_INFO_CONN, (void *) (long)chanp->chan); } /* * RESUME */ /* incoming call */ static void lli_deliver_call(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; isdn_ctrl ic; int ret; chanp->cs->cardmsg(chanp->cs, MDL_INFO_SETUP, (void *) (long)chanp->chan); /* * Report incoming calls only once to linklevel, use CallFlags * which is set to 3 with each broadcast message in isdnl1.c * and resetted if a interface answered the STAT_ICALL. */ if (1) { /* for only one TEI */ FsmChangeState(fi, ST_IN_WAIT_LL); if (chanp->debug & 1) link_debug(chanp, 0, (chanp->chan < 2) ? "STAT_ICALL" : "STAT_ICALLW"); ic.driver = chanp->cs->myid; ic.command = ((chanp->chan < 2) ? ISDN_STAT_ICALL : ISDN_STAT_ICALLW); ic.arg = chanp->chan; /* * No need to return "unknown" for calls without OAD, * cause that's handled in linklevel now (replaced by '0') */ memcpy(&ic.parm.setup, &chanp->proc->para.setup, sizeof(setup_parm)); ret = chanp->cs->iif.statcallb(&ic); if (chanp->debug & 1) link_debug(chanp, 1, "statcallb ret=%d", ret); switch (ret) { case 1: /* OK, someone likes this call */ FsmDelTimer(&chanp->drel_timer, 61); FsmChangeState(fi, ST_IN_ALERT_SENT); chanp->d_st->lli.l4l3(chanp->d_st, CC_ALERTING | REQUEST, chanp->proc); break; case 5: /* direct redirect */ case 4: /* Proceeding desired */ FsmDelTimer(&chanp->drel_timer, 61); FsmChangeState(fi, ST_IN_PROCEED_SEND); chanp->d_st->lli.l4l3(chanp->d_st, CC_PROCEED_SEND | REQUEST, chanp->proc); if (ret == 5) { memcpy(&chanp->setup, &ic.parm.setup, sizeof(setup_parm)); chanp->d_st->lli.l4l3(chanp->d_st, CC_REDIR | REQUEST, chanp->proc); } break; case 2: /* Rejecting Call */ break; case 3: /* incomplete number */ FsmDelTimer(&chanp->drel_timer, 61); chanp->d_st->lli.l4l3(chanp->d_st, CC_MORE_INFO | REQUEST, chanp->proc); break; case 0: /* OK, nobody likes this call */ default: /* statcallb problems */ chanp->d_st->lli.l4l3(chanp->d_st, CC_IGNORE | REQUEST, chanp->proc); chanp->cs->cardmsg(chanp->cs, MDL_INFO_REL, (void *) (long)chanp->chan); FsmChangeState(fi, ST_NULL); break; } } else { chanp->d_st->lli.l4l3(chanp->d_st, CC_IGNORE | REQUEST, chanp->proc); chanp->cs->cardmsg(chanp->cs, MDL_INFO_REL, (void *) (long)chanp->chan); } } static void lli_send_dconnect(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; FsmChangeState(fi, ST_IN_WAIT_CONN_ACK); chanp->d_st->lli.l4l3(chanp->d_st, CC_SETUP | RESPONSE, chanp->proc); } static void lli_send_alert(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; FsmChangeState(fi, ST_IN_ALERT_SENT); chanp->d_st->lli.l4l3(chanp->d_st, CC_ALERTING | REQUEST, chanp->proc); } static void lli_send_redir(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; chanp->d_st->lli.l4l3(chanp->d_st, CC_REDIR | REQUEST, chanp->proc); } static void lli_init_bchan_in(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; FsmChangeState(fi, ST_WAIT_BCONN); if (chanp->debug & 1) link_debug(chanp, 0, "STAT_DCONN"); HL_LL(chanp, ISDN_STAT_DCONN); chanp->l2_active_protocol = chanp->l2_protocol; chanp->incoming = !0; init_b_st(chanp, !0); chanp->b_st->lli.l4l3(chanp->b_st, DL_ESTABLISH | REQUEST, NULL); } static void lli_setup_rsp(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->leased) { lli_init_bchan_in(fi, event, arg); } else { FsmChangeState(fi, ST_IN_WAIT_CONN_ACK); #ifdef WANT_ALERT chanp->d_st->lli.l4l3(chanp->d_st, CC_ALERTING | REQUEST, chanp->proc); #endif chanp->d_st->lli.l4l3(chanp->d_st, CC_SETUP | RESPONSE, chanp->proc); } } /* Call suspend */ static void lli_suspend(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; chanp->d_st->lli.l4l3(chanp->d_st, CC_SUSPEND | REQUEST, chanp->proc); } /* Call clearing */ static void lli_leased_hup(struct FsmInst *fi, struct Channel *chanp) { isdn_ctrl ic; ic.driver = chanp->cs->myid; ic.command = ISDN_STAT_CAUSE; ic.arg = chanp->chan; sprintf(ic.parm.num, "L0010"); chanp->cs->iif.statcallb(&ic); if (chanp->debug & 1) link_debug(chanp, 0, "STAT_DHUP"); HL_LL(chanp, ISDN_STAT_DHUP); lli_close(fi); } static void lli_disconnect_req(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->leased) { lli_leased_hup(fi, chanp); } else { FsmChangeState(fi, ST_WAIT_DRELEASE); if (chanp->proc) chanp->proc->para.cause = 0x10; /* Normal Call Clearing */ chanp->d_st->lli.l4l3(chanp->d_st, CC_DISCONNECT | REQUEST, chanp->proc); } } static void lli_disconnect_reject(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->leased) { lli_leased_hup(fi, chanp); } else { FsmChangeState(fi, ST_WAIT_DRELEASE); if (chanp->proc) chanp->proc->para.cause = 0x15; /* Call Rejected */ chanp->d_st->lli.l4l3(chanp->d_st, CC_DISCONNECT | REQUEST, chanp->proc); } } static void lli_dhup_close(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->leased) { lli_leased_hup(fi, chanp); } else { if (chanp->debug & 1) link_debug(chanp, 0, "STAT_DHUP"); lli_deliver_cause(chanp); HL_LL(chanp, ISDN_STAT_DHUP); lli_close(fi); } } static void lli_reject_req(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->leased) { lli_leased_hup(fi, chanp); return; } #ifndef ALERT_REJECT if (chanp->proc) chanp->proc->para.cause = 0x15; /* Call Rejected */ chanp->d_st->lli.l4l3(chanp->d_st, CC_REJECT | REQUEST, chanp->proc); lli_dhup_close(fi, event, arg); #else FsmRestartTimer(&chanp->drel_timer, 40, EV_HANGUP, NULL, 63); FsmChangeState(fi, ST_IN_ALERT_SENT); chanp->d_st->lli.l4l3(chanp->d_st, CC_ALERTING | REQUEST, chanp->proc); #endif } static void lli_disconn_bchan(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; chanp->data_open = 0; FsmChangeState(fi, ST_WAIT_BRELEASE); chanp->b_st->lli.l4l3(chanp->b_st, DL_RELEASE | REQUEST, NULL); } static void lli_start_disc(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->leased) { lli_leased_hup(fi, chanp); } else { lli_disconnect_req(fi, event, arg); } } static void lli_rel_b_disc(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; release_b_st(chanp); lli_start_disc(fi, event, arg); } static void lli_bhup_disc(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->debug & 1) link_debug(chanp, 0, "STAT_BHUP"); HL_LL(chanp, ISDN_STAT_BHUP); lli_rel_b_disc(fi, event, arg); } static void lli_bhup_rel_b(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; FsmChangeState(fi, ST_WAIT_DCOMMAND); chanp->data_open = 0; if (chanp->debug & 1) link_debug(chanp, 0, "STAT_BHUP"); HL_LL(chanp, ISDN_STAT_BHUP); release_b_st(chanp); } static void lli_release_bchan(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; chanp->data_open = 0; FsmChangeState(fi, ST_WAIT_BREL_DISC); chanp->b_st->lli.l4l3(chanp->b_st, DL_RELEASE | REQUEST, NULL); } static void lli_rel_b_dhup(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; release_b_st(chanp); lli_dhup_close(fi, event, arg); } static void lli_bhup_dhup(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->debug & 1) link_debug(chanp, 0, "STAT_BHUP"); HL_LL(chanp, ISDN_STAT_BHUP); lli_rel_b_dhup(fi, event, arg); } static void lli_abort(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; chanp->data_open = 0; chanp->b_st->lli.l4l3(chanp->b_st, DL_RELEASE | REQUEST, NULL); lli_bhup_dhup(fi, event, arg); } static void lli_release_req(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->leased) { lli_leased_hup(fi, chanp); } else { FsmChangeState(fi, ST_WAIT_D_REL_CNF); chanp->d_st->lli.l4l3(chanp->d_st, CC_RELEASE | REQUEST, chanp->proc); } } static void lli_rel_b_release_req(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; release_b_st(chanp); lli_release_req(fi, event, arg); } static void lli_bhup_release_req(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->debug & 1) link_debug(chanp, 0, "STAT_BHUP"); HL_LL(chanp, ISDN_STAT_BHUP); lli_rel_b_release_req(fi, event, arg); } /* processing charge info */ static void lli_charge_info(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; isdn_ctrl ic; ic.driver = chanp->cs->myid; ic.command = ISDN_STAT_CINF; ic.arg = chanp->chan; sprintf(ic.parm.num, "%d", chanp->proc->para.chargeinfo); chanp->cs->iif.statcallb(&ic); } /* error procedures */ static void lli_dchan_not_ready(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->debug & 1) link_debug(chanp, 0, "STAT_DHUP"); HL_LL(chanp, ISDN_STAT_DHUP); } static void lli_no_setup_rsp(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->debug & 1) link_debug(chanp, 0, "STAT_DHUP"); HL_LL(chanp, ISDN_STAT_DHUP); lli_close(fi); } static void lli_error(struct FsmInst *fi, int event, void *arg) { FsmChangeState(fi, ST_WAIT_DRELEASE); } static void lli_failure_l(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; isdn_ctrl ic; FsmChangeState(fi, ST_NULL); ic.driver = chanp->cs->myid; ic.command = ISDN_STAT_CAUSE; ic.arg = chanp->chan; sprintf(ic.parm.num, "L%02X%02X", 0, 0x2f); chanp->cs->iif.statcallb(&ic); HL_LL(chanp, ISDN_STAT_DHUP); chanp->Flags = 0; chanp->cs->cardmsg(chanp->cs, MDL_INFO_REL, (void *) (long)chanp->chan); } static void lli_rel_b_fail(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; release_b_st(chanp); lli_failure_l(fi, event, arg); } static void lli_bhup_fail(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; if (chanp->debug & 1) link_debug(chanp, 0, "STAT_BHUP"); HL_LL(chanp, ISDN_STAT_BHUP); lli_rel_b_fail(fi, event, arg); } static void lli_failure_a(struct FsmInst *fi, int event, void *arg) { struct Channel *chanp = fi->userdata; chanp->data_open = 0; chanp->b_st->lli.l4l3(chanp->b_st, DL_RELEASE | REQUEST, NULL); lli_bhup_fail(fi, event, arg); } /* *INDENT-OFF* */ static struct FsmNode fnlist[] __initdata = { {ST_NULL, EV_DIAL, lli_prep_dialout}, {ST_NULL, EV_RESUME, lli_resume}, {ST_NULL, EV_SETUP_IND, lli_deliver_call}, {ST_NULL, EV_LEASED, lli_leased_in}, {ST_OUT_DIAL, EV_SETUP_CNF, lli_init_bchan_out}, {ST_OUT_DIAL, EV_HANGUP, lli_disconnect_req}, {ST_OUT_DIAL, EV_DISCONNECT_IND, lli_release_req}, {ST_OUT_DIAL, EV_RELEASE, lli_dhup_close}, {ST_OUT_DIAL, EV_NOSETUP_RSP, lli_no_setup_rsp}, {ST_OUT_DIAL, EV_SETUP_ERR, lli_error}, {ST_IN_WAIT_LL, EV_LEASED_REL, lli_failure_l}, {ST_IN_WAIT_LL, EV_ACCEPTD, lli_setup_rsp}, {ST_IN_WAIT_LL, EV_HANGUP, lli_reject_req}, {ST_IN_WAIT_LL, EV_DISCONNECT_IND, lli_release_req}, {ST_IN_WAIT_LL, EV_RELEASE, lli_dhup_close}, {ST_IN_WAIT_LL, EV_SETUP_IND, lli_deliver_call}, {ST_IN_WAIT_LL, EV_SETUP_ERR, lli_error}, {ST_IN_ALERT_SENT, EV_SETUP_CMPL_IND, lli_init_bchan_in}, {ST_IN_ALERT_SENT, EV_ACCEPTD, lli_send_dconnect}, {ST_IN_ALERT_SENT, EV_HANGUP, lli_disconnect_reject}, {ST_IN_ALERT_SENT, EV_DISCONNECT_IND, lli_release_req}, {ST_IN_ALERT_SENT, EV_RELEASE, lli_dhup_close}, {ST_IN_ALERT_SENT, EV_REDIR, lli_send_redir}, {ST_IN_PROCEED_SEND, EV_REDIR, lli_send_redir}, {ST_IN_PROCEED_SEND, EV_ALERT, lli_send_alert}, {ST_IN_PROCEED_SEND, EV_ACCEPTD, lli_send_dconnect}, {ST_IN_PROCEED_SEND, EV_HANGUP, lli_disconnect_reject}, {ST_IN_PROCEED_SEND, EV_DISCONNECT_IND, lli_dhup_close}, {ST_IN_ALERT_SENT, EV_RELEASE, lli_dhup_close}, {ST_IN_WAIT_CONN_ACK, EV_SETUP_CMPL_IND, lli_init_bchan_in}, {ST_IN_WAIT_CONN_ACK, EV_HANGUP, lli_disconnect_req}, {ST_IN_WAIT_CONN_ACK, EV_DISCONNECT_IND, lli_release_req}, {ST_IN_WAIT_CONN_ACK, EV_RELEASE, lli_dhup_close}, {ST_IN_WAIT_CONN_ACK, EV_CONNECT_ERR, lli_error}, {ST_WAIT_BCONN, EV_BC_EST, lli_go_active}, {ST_WAIT_BCONN, EV_BC_REL, lli_rel_b_disc}, {ST_WAIT_BCONN, EV_HANGUP, lli_rel_b_disc}, {ST_WAIT_BCONN, EV_DISCONNECT_IND, lli_rel_b_release_req}, {ST_WAIT_BCONN, EV_RELEASE, lli_rel_b_dhup}, {ST_WAIT_BCONN, EV_LEASED_REL, lli_rel_b_fail}, {ST_WAIT_BCONN, EV_CINF, lli_charge_info}, {ST_ACTIVE, EV_CINF, lli_charge_info}, {ST_ACTIVE, EV_BC_REL, lli_bhup_rel_b}, {ST_ACTIVE, EV_SUSPEND, lli_suspend}, {ST_ACTIVE, EV_HANGUP, lli_disconn_bchan}, {ST_ACTIVE, EV_DISCONNECT_IND, lli_release_bchan}, {ST_ACTIVE, EV_RELEASE, lli_abort}, {ST_ACTIVE, EV_LEASED_REL, lli_failure_a}, {ST_WAIT_BRELEASE, EV_BC_REL, lli_bhup_disc}, {ST_WAIT_BRELEASE, EV_DISCONNECT_IND, lli_bhup_release_req}, {ST_WAIT_BRELEASE, EV_RELEASE, lli_bhup_dhup}, {ST_WAIT_BRELEASE, EV_LEASED_REL, lli_bhup_fail}, {ST_WAIT_BREL_DISC, EV_BC_REL, lli_bhup_release_req}, {ST_WAIT_BREL_DISC, EV_RELEASE, lli_bhup_dhup}, {ST_WAIT_DCOMMAND, EV_HANGUP, lli_start_disc}, {ST_WAIT_DCOMMAND, EV_DISCONNECT_IND, lli_release_req}, {ST_WAIT_DCOMMAND, EV_RELEASE, lli_dhup_close}, {ST_WAIT_DCOMMAND, EV_LEASED_REL, lli_failure_l}, {ST_WAIT_DRELEASE, EV_RELEASE, lli_dhup_close}, {ST_WAIT_DRELEASE, EV_DIAL, lli_dchan_not_ready}, /* ETS 300-104 16.1 */ {ST_WAIT_D_REL_CNF, EV_RELEASE, lli_dhup_close}, {ST_WAIT_D_REL_CNF, EV_DIAL, lli_dchan_not_ready}, }; /* *INDENT-ON* */ int __init CallcNew(void) { callcfsm.state_count = STATE_COUNT; callcfsm.event_count = EVENT_COUNT; callcfsm.strEvent = strEvent; callcfsm.strState = strState; return FsmNew(&callcfsm, fnlist, ARRAY_SIZE(fnlist)); } void CallcFree(void) { FsmFree(&callcfsm); } static void release_b_st(struct Channel *chanp) { struct PStack *st = chanp->b_st; if (test_and_clear_bit(FLG_START_B, &chanp->Flags)) { chanp->bcs->BC_Close(chanp->bcs); switch (chanp->l2_active_protocol) { case (ISDN_PROTO_L2_X75I): releasestack_isdnl2(st); break; case (ISDN_PROTO_L2_HDLC): case (ISDN_PROTO_L2_HDLC_56K): case (ISDN_PROTO_L2_TRANS): case (ISDN_PROTO_L2_MODEM): case (ISDN_PROTO_L2_FAX): releasestack_transl2(st); break; } } } static struct Channel *selectfreechannel(struct PStack *st, int bch) { struct IsdnCardState *cs = st->l1.hardware; struct Channel *chanp = st->lli.userdata; int i; if (test_bit(FLG_TWO_DCHAN, &cs->HW_Flags)) i = 1; else i = 0; if (!bch) { i = 2; /* virtual channel */ chanp += 2; } while (i < ((bch) ? cs->chanlimit : (2 + MAX_WAITING_CALLS))) { if (chanp->fi.state == ST_NULL) return (chanp); chanp++; i++; } if (bch) /* number of channels is limited */ { i = 2; /* virtual channel */ chanp = st->lli.userdata; chanp += i; while (i < (2 + MAX_WAITING_CALLS)) { if (chanp->fi.state == ST_NULL) return (chanp); chanp++; i++; } } return (NULL); } static void stat_redir_result(struct IsdnCardState *cs, int chan, ulong result) { isdn_ctrl ic; ic.driver = cs->myid; ic.command = ISDN_STAT_REDIR; ic.arg = chan; ic.parm.num[0] = result; cs->iif.statcallb(&ic); } /* stat_redir_result */ static void dchan_l3l4(struct PStack *st, int pr, void *arg) { struct l3_process *pc = arg; struct IsdnCardState *cs = st->l1.hardware; struct Channel *chanp; if (!pc) return; if (pr == (CC_SETUP | INDICATION)) { if (!(chanp = selectfreechannel(pc->st, pc->para.bchannel))) { pc->para.cause = 0x11; /* User busy */ pc->st->lli.l4l3(pc->st, CC_REJECT | REQUEST, pc); } else { chanp->proc = pc; pc->chan = chanp; FsmEvent(&chanp->fi, EV_SETUP_IND, NULL); } return; } if (!(chanp = pc->chan)) return; switch (pr) { case (CC_MORE_INFO | INDICATION): FsmEvent(&chanp->fi, EV_SETUP_IND, NULL); break; case (CC_DISCONNECT | INDICATION): FsmEvent(&chanp->fi, EV_DISCONNECT_IND, NULL); break; case (CC_RELEASE | CONFIRM): FsmEvent(&chanp->fi, EV_RELEASE, NULL); break; case (CC_SUSPEND | CONFIRM): FsmEvent(&chanp->fi, EV_RELEASE, NULL); break; case (CC_RESUME | CONFIRM): FsmEvent(&chanp->fi, EV_SETUP_CNF, NULL); break; case (CC_RESUME_ERR): FsmEvent(&chanp->fi, EV_RELEASE, NULL); break; case (CC_RELEASE | INDICATION): FsmEvent(&chanp->fi, EV_RELEASE, NULL); break; case (CC_SETUP_COMPL | INDICATION): FsmEvent(&chanp->fi, EV_SETUP_CMPL_IND, NULL); break; case (CC_SETUP | CONFIRM): FsmEvent(&chanp->fi, EV_SETUP_CNF, NULL); break; case (CC_CHARGE | INDICATION): FsmEvent(&chanp->fi, EV_CINF, NULL); break; case (CC_NOSETUP_RSP): FsmEvent(&chanp->fi, EV_NOSETUP_RSP, NULL); break; case (CC_SETUP_ERR): FsmEvent(&chanp->fi, EV_SETUP_ERR, NULL); break; case (CC_CONNECT_ERR): FsmEvent(&chanp->fi, EV_CONNECT_ERR, NULL); break; case (CC_RELEASE_ERR): FsmEvent(&chanp->fi, EV_RELEASE, NULL); break; case (CC_PROCEED_SEND | INDICATION): case (CC_PROCEEDING | INDICATION): case (CC_ALERTING | INDICATION): case (CC_PROGRESS | INDICATION): case (CC_NOTIFY | INDICATION): break; case (CC_REDIR | INDICATION): stat_redir_result(cs, chanp->chan, pc->redir_result); break; default: if (chanp->debug & 0x800) { HiSax_putstatus(chanp->cs, "Ch", "%d L3->L4 unknown primitiv %#x", chanp->chan, pr); } } } static void dummy_pstack(struct PStack *st, int pr, void *arg) { printk(KERN_WARNING"call to dummy_pstack pr=%04x arg %lx\n", pr, (long)arg); } static int init_PStack(struct PStack **stp) { *stp = kmalloc(sizeof(struct PStack), GFP_ATOMIC); if (!*stp) return -ENOMEM; (*stp)->next = NULL; (*stp)->l1.l1l2 = dummy_pstack; (*stp)->l1.l1hw = dummy_pstack; (*stp)->l1.l1tei = dummy_pstack; (*stp)->l2.l2tei = dummy_pstack; (*stp)->l2.l2l1 = dummy_pstack; (*stp)->l2.l2l3 = dummy_pstack; (*stp)->l3.l3l2 = dummy_pstack; (*stp)->l3.l3ml3 = dummy_pstack; (*stp)->l3.l3l4 = dummy_pstack; (*stp)->lli.l4l3 = dummy_pstack; (*stp)->ma.layer = dummy_pstack; return 0; } static int init_d_st(struct Channel *chanp) { struct PStack *st; struct IsdnCardState *cs = chanp->cs; char tmp[16]; int err; err = init_PStack(&chanp->d_st); if (err) return err; st = chanp->d_st; st->next = NULL; HiSax_addlist(cs, st); setstack_HiSax(st, cs); st->l2.sap = 0; st->l2.tei = -1; st->l2.flag = 0; test_and_set_bit(FLG_MOD128, &st->l2.flag); test_and_set_bit(FLG_LAPD, &st->l2.flag); test_and_set_bit(FLG_ORIG, &st->l2.flag); st->l2.maxlen = MAX_DFRAME_LEN; st->l2.window = 1; st->l2.T200 = 1000; /* 1000 milliseconds */ st->l2.N200 = 3; /* try 3 times */ st->l2.T203 = 10000; /* 10000 milliseconds */ if (test_bit(FLG_TWO_DCHAN, &cs->HW_Flags)) sprintf(tmp, "DCh%d Q.921 ", chanp->chan); else sprintf(tmp, "DCh Q.921 "); setstack_isdnl2(st, tmp); setstack_l3dc(st, chanp); st->lli.userdata = chanp; st->l3.l3l4 = dchan_l3l4; return 0; } static __printf(2, 3) void callc_debug(struct FsmInst *fi, char *fmt, ...) { va_list args; struct Channel *chanp = fi->userdata; char tmp[16]; va_start(args, fmt); sprintf(tmp, "Ch%d callc ", chanp->chan); VHiSax_putstatus(chanp->cs, tmp, fmt, args); va_end(args); } static int init_chan(int chan, struct IsdnCardState *csta) { struct Channel *chanp = csta->channel + chan; int err; chanp->cs = csta; chanp->bcs = csta->bcs + chan; chanp->chan = chan; chanp->incoming = 0; chanp->debug = 0; chanp->Flags = 0; chanp->leased = 0; err = init_PStack(&chanp->b_st); if (err) return err; chanp->b_st->l1.delay = DEFAULT_B_DELAY; chanp->fi.fsm = &callcfsm; chanp->fi.state = ST_NULL; chanp->fi.debug = 0; chanp->fi.userdata = chanp; chanp->fi.printdebug = callc_debug; FsmInitTimer(&chanp->fi, &chanp->dial_timer); FsmInitTimer(&chanp->fi, &chanp->drel_timer); if (!chan || (test_bit(FLG_TWO_DCHAN, &csta->HW_Flags) && chan < 2)) { err = init_d_st(chanp); if (err) return err; } else { chanp->d_st = csta->channel->d_st; } chanp->data_open = 0; return 0; } int CallcNewChan(struct IsdnCardState *csta) { int i, err; chancount += 2; err = init_chan(0, csta); if (err) return err; err = init_chan(1, csta); if (err) return err; printk(KERN_INFO "HiSax: 2 channels added\n"); for (i = 0; i < MAX_WAITING_CALLS; i++) { err = init_chan(i + 2, csta); if (err) return err; } printk(KERN_INFO "HiSax: MAX_WAITING_CALLS added\n"); if (test_bit(FLG_PTP, &csta->channel->d_st->l2.flag)) { printk(KERN_INFO "LAYER2 WATCHING ESTABLISH\n"); csta->channel->d_st->lli.l4l3(csta->channel->d_st, DL_ESTABLISH | REQUEST, NULL); } return (0); } static void release_d_st(struct Channel *chanp) { struct PStack *st = chanp->d_st; if (!st) return; releasestack_isdnl2(st); releasestack_isdnl3(st); HiSax_rmlist(st->l1.hardware, st); kfree(st); chanp->d_st = NULL; } void CallcFreeChan(struct IsdnCardState *csta) { int i; for (i = 0; i < 2; i++) { FsmDelTimer(&csta->channel[i].drel_timer, 74); FsmDelTimer(&csta->channel[i].dial_timer, 75); if (i || test_bit(FLG_TWO_DCHAN, &csta->HW_Flags)) release_d_st(csta->channel + i); if (csta->channel[i].b_st) { release_b_st(csta->channel + i); kfree(csta->channel[i].b_st); csta->channel[i].b_st = NULL; } else printk(KERN_WARNING "CallcFreeChan b_st ch%d already freed\n", i); if (i || test_bit(FLG_TWO_DCHAN, &csta->HW_Flags)) { release_d_st(csta->channel + i); } else csta->channel[i].d_st = NULL; } } static void lldata_handler(struct PStack *st, int pr, void *arg) { struct Channel *chanp = (struct Channel *) st->lli.userdata; struct sk_buff *skb = arg; switch (pr) { case (DL_DATA | INDICATION): if (chanp->data_open) { if (chanp->debug & 0x800) link_debug(chanp, 0, "lldata: %d", skb->len); chanp->cs->iif.rcvcallb_skb(chanp->cs->myid, chanp->chan, skb); } else { link_debug(chanp, 0, "lldata: channel not open"); dev_kfree_skb(skb); } break; case (DL_ESTABLISH | INDICATION): case (DL_ESTABLISH | CONFIRM): FsmEvent(&chanp->fi, EV_BC_EST, NULL); break; case (DL_RELEASE | INDICATION): case (DL_RELEASE | CONFIRM): FsmEvent(&chanp->fi, EV_BC_REL, NULL); break; default: printk(KERN_WARNING "lldata_handler unknown primitive %#x\n", pr); break; } } static void lltrans_handler(struct PStack *st, int pr, void *arg) { struct Channel *chanp = (struct Channel *) st->lli.userdata; struct sk_buff *skb = arg; switch (pr) { case (PH_DATA | INDICATION): if (chanp->data_open) { if (chanp->debug & 0x800) link_debug(chanp, 0, "lltrans: %d", skb->len); chanp->cs->iif.rcvcallb_skb(chanp->cs->myid, chanp->chan, skb); } else { link_debug(chanp, 0, "lltrans: channel not open"); dev_kfree_skb(skb); } break; case (PH_ACTIVATE | INDICATION): case (PH_ACTIVATE | CONFIRM): FsmEvent(&chanp->fi, EV_BC_EST, NULL); break; case (PH_DEACTIVATE | INDICATION): case (PH_DEACTIVATE | CONFIRM): FsmEvent(&chanp->fi, EV_BC_REL, NULL); break; default: printk(KERN_WARNING "lltrans_handler unknown primitive %#x\n", pr); break; } } void lli_writewakeup(struct PStack *st, int len) { struct Channel *chanp = st->lli.userdata; isdn_ctrl ic; if (chanp->debug & 0x800) link_debug(chanp, 0, "llwakeup: %d", len); ic.driver = chanp->cs->myid; ic.command = ISDN_STAT_BSENT; ic.arg = chanp->chan; ic.parm.length = len; chanp->cs->iif.statcallb(&ic); } static int init_b_st(struct Channel *chanp, int incoming) { struct PStack *st = chanp->b_st; struct IsdnCardState *cs = chanp->cs; char tmp[16]; st->l1.hardware = cs; if (chanp->leased) st->l1.bc = chanp->chan & 1; else st->l1.bc = chanp->proc->para.bchannel - 1; switch (chanp->l2_active_protocol) { case (ISDN_PROTO_L2_X75I): case (ISDN_PROTO_L2_HDLC): st->l1.mode = L1_MODE_HDLC; break; case (ISDN_PROTO_L2_HDLC_56K): st->l1.mode = L1_MODE_HDLC_56K; break; case (ISDN_PROTO_L2_TRANS): st->l1.mode = L1_MODE_TRANS; break; case (ISDN_PROTO_L2_MODEM): st->l1.mode = L1_MODE_V32; break; case (ISDN_PROTO_L2_FAX): st->l1.mode = L1_MODE_FAX; break; } chanp->bcs->conmsg = NULL; if (chanp->bcs->BC_SetStack(st, chanp->bcs)) return (-1); st->l2.flag = 0; test_and_set_bit(FLG_LAPB, &st->l2.flag); st->l2.maxlen = MAX_DATA_SIZE; if (!incoming) test_and_set_bit(FLG_ORIG, &st->l2.flag); st->l2.T200 = 1000; /* 1000 milliseconds */ st->l2.window = 7; st->l2.N200 = 4; /* try 4 times */ st->l2.T203 = 5000; /* 5000 milliseconds */ st->l3.debug = 0; switch (chanp->l2_active_protocol) { case (ISDN_PROTO_L2_X75I): sprintf(tmp, "Ch%d X.75", chanp->chan); setstack_isdnl2(st, tmp); setstack_l3bc(st, chanp); st->l2.l2l3 = lldata_handler; st->lli.userdata = chanp; test_and_clear_bit(FLG_LLI_L1WAKEUP, &st->lli.flag); test_and_set_bit(FLG_LLI_L2WAKEUP, &st->lli.flag); st->l2.l2m.debug = chanp->debug & 16; st->l2.debug = chanp->debug & 64; break; case (ISDN_PROTO_L2_HDLC): case (ISDN_PROTO_L2_HDLC_56K): case (ISDN_PROTO_L2_TRANS): case (ISDN_PROTO_L2_MODEM): case (ISDN_PROTO_L2_FAX): st->l1.l1l2 = lltrans_handler; st->lli.userdata = chanp; test_and_set_bit(FLG_LLI_L1WAKEUP, &st->lli.flag); test_and_clear_bit(FLG_LLI_L2WAKEUP, &st->lli.flag); setstack_transl2(st); setstack_l3bc(st, chanp); break; } test_and_set_bit(FLG_START_B, &chanp->Flags); return (0); } static void leased_l4l3(struct PStack *st, int pr, void *arg) { struct Channel *chanp = (struct Channel *) st->lli.userdata; struct sk_buff *skb = arg; switch (pr) { case (DL_DATA | REQUEST): link_debug(chanp, 0, "leased line d-channel DATA"); dev_kfree_skb(skb); break; case (DL_ESTABLISH | REQUEST): st->l2.l2l1(st, PH_ACTIVATE | REQUEST, NULL); break; case (DL_RELEASE | REQUEST): break; default: printk(KERN_WARNING "transd_l4l3 unknown primitive %#x\n", pr); break; } } static void leased_l1l2(struct PStack *st, int pr, void *arg) { struct Channel *chanp = (struct Channel *) st->lli.userdata; struct sk_buff *skb = arg; int i, event = EV_LEASED_REL; switch (pr) { case (PH_DATA | INDICATION): link_debug(chanp, 0, "leased line d-channel DATA"); dev_kfree_skb(skb); break; case (PH_ACTIVATE | INDICATION): case (PH_ACTIVATE | CONFIRM): event = EV_LEASED; case (PH_DEACTIVATE | INDICATION): case (PH_DEACTIVATE | CONFIRM): if (test_bit(FLG_TWO_DCHAN, &chanp->cs->HW_Flags)) i = 1; else i = 0; while (i < 2) { FsmEvent(&chanp->fi, event, NULL); chanp++; i++; } break; default: printk(KERN_WARNING "transd_l1l2 unknown primitive %#x\n", pr); break; } } static void distr_debug(struct IsdnCardState *csta, int debugflags) { int i; struct Channel *chanp = csta->channel; for (i = 0; i < (2 + MAX_WAITING_CALLS); i++) { chanp[i].debug = debugflags; chanp[i].fi.debug = debugflags & 2; chanp[i].d_st->l2.l2m.debug = debugflags & 8; chanp[i].b_st->l2.l2m.debug = debugflags & 0x10; chanp[i].d_st->l2.debug = debugflags & 0x20; chanp[i].b_st->l2.debug = debugflags & 0x40; chanp[i].d_st->l3.l3m.debug = debugflags & 0x80; chanp[i].b_st->l3.l3m.debug = debugflags & 0x100; chanp[i].b_st->ma.tei_m.debug = debugflags & 0x200; chanp[i].b_st->ma.debug = debugflags & 0x200; chanp[i].d_st->l1.l1m.debug = debugflags & 0x1000; chanp[i].b_st->l1.l1m.debug = debugflags & 0x2000; } if (debugflags & 4) csta->debug |= DEB_DLOG_HEX; else csta->debug &= ~DEB_DLOG_HEX; } static char tmpbuf[256]; static void capi_debug(struct Channel *chanp, capi_msg *cm) { char *t = tmpbuf; t += QuickHex(t, (u_char *)cm, (cm->Length > 50) ? 50 : cm->Length); t--; *t = 0; HiSax_putstatus(chanp->cs, "Ch", "%d CAPIMSG %s", chanp->chan, tmpbuf); } static void lli_got_fac_req(struct Channel *chanp, capi_msg *cm) { if ((cm->para[0] != 3) || (cm->para[1] != 0)) return; if (cm->para[2] < 3) return; if (cm->para[4] != 0) return; switch (cm->para[3]) { case 4: /* Suspend */ strncpy(chanp->setup.phone, &cm->para[5], cm->para[5] + 1); FsmEvent(&chanp->fi, EV_SUSPEND, cm); break; case 5: /* Resume */ strncpy(chanp->setup.phone, &cm->para[5], cm->para[5] + 1); if (chanp->fi.state == ST_NULL) { FsmEvent(&chanp->fi, EV_RESUME, cm); } else { FsmDelTimer(&chanp->dial_timer, 72); FsmAddTimer(&chanp->dial_timer, 80, EV_RESUME, cm, 73); } break; } } static void lli_got_manufacturer(struct Channel *chanp, struct IsdnCardState *cs, capi_msg *cm) { if ((cs->typ == ISDN_CTYPE_ELSA) || (cs->typ == ISDN_CTYPE_ELSA_PNP) || (cs->typ == ISDN_CTYPE_ELSA_PCI)) { if (cs->hw.elsa.MFlag) { cs->cardmsg(cs, CARD_AUX_IND, cm->para); } } } /***************************************************************/ /* Limit the available number of channels for the current card */ /***************************************************************/ static int set_channel_limit(struct IsdnCardState *cs, int chanmax) { isdn_ctrl ic; int i, ii; if ((chanmax < 0) || (chanmax > 2)) return (-EINVAL); cs->chanlimit = 0; for (ii = 0; ii < 2; ii++) { ic.driver = cs->myid; ic.command = ISDN_STAT_DISCH; ic.arg = ii; if (ii >= chanmax) ic.parm.num[0] = 0; /* disabled */ else ic.parm.num[0] = 1; /* enabled */ i = cs->iif.statcallb(&ic); if (i) return (-EINVAL); if (ii < chanmax) cs->chanlimit++; } return (0); } /* set_channel_limit */ int HiSax_command(isdn_ctrl *ic) { struct IsdnCardState *csta = hisax_findcard(ic->driver); struct PStack *st; struct Channel *chanp; int i; u_int num; if (!csta) { printk(KERN_ERR "HiSax: if_command %d called with invalid driverId %d!\n", ic->command, ic->driver); return -ENODEV; } switch (ic->command) { case (ISDN_CMD_SETEAZ): chanp = csta->channel + ic->arg; break; case (ISDN_CMD_SETL2): chanp = csta->channel + (ic->arg & 0xff); if (chanp->debug & 1) link_debug(chanp, 1, "SETL2 card %d %ld", csta->cardnr + 1, ic->arg >> 8); chanp->l2_protocol = ic->arg >> 8; break; case (ISDN_CMD_SETL3): chanp = csta->channel + (ic->arg & 0xff); if (chanp->debug & 1) link_debug(chanp, 1, "SETL3 card %d %ld", csta->cardnr + 1, ic->arg >> 8); chanp->l3_protocol = ic->arg >> 8; break; case (ISDN_CMD_DIAL): chanp = csta->channel + (ic->arg & 0xff); if (chanp->debug & 1) link_debug(chanp, 1, "DIAL %s -> %s (%d,%d)", ic->parm.setup.eazmsn, ic->parm.setup.phone, ic->parm.setup.si1, ic->parm.setup.si2); memcpy(&chanp->setup, &ic->parm.setup, sizeof(setup_parm)); if (!strcmp(chanp->setup.eazmsn, "0")) chanp->setup.eazmsn[0] = '\0'; /* this solution is dirty and may be change, if * we make a callreference based callmanager */ if (chanp->fi.state == ST_NULL) { FsmEvent(&chanp->fi, EV_DIAL, NULL); } else { FsmDelTimer(&chanp->dial_timer, 70); FsmAddTimer(&chanp->dial_timer, 50, EV_DIAL, NULL, 71); } break; case (ISDN_CMD_ACCEPTB): chanp = csta->channel + ic->arg; if (chanp->debug & 1) link_debug(chanp, 1, "ACCEPTB"); FsmEvent(&chanp->fi, EV_ACCEPTB, NULL); break; case (ISDN_CMD_ACCEPTD): chanp = csta->channel + ic->arg; memcpy(&chanp->setup, &ic->parm.setup, sizeof(setup_parm)); if (chanp->debug & 1) link_debug(chanp, 1, "ACCEPTD"); FsmEvent(&chanp->fi, EV_ACCEPTD, NULL); break; case (ISDN_CMD_HANGUP): chanp = csta->channel + ic->arg; if (chanp->debug & 1) link_debug(chanp, 1, "HANGUP"); FsmEvent(&chanp->fi, EV_HANGUP, NULL); break; case (CAPI_PUT_MESSAGE): chanp = csta->channel + ic->arg; if (chanp->debug & 1) capi_debug(chanp, &ic->parm.cmsg); if (ic->parm.cmsg.Length < 8) break; switch (ic->parm.cmsg.Command) { case CAPI_FACILITY: if (ic->parm.cmsg.Subcommand == CAPI_REQ) lli_got_fac_req(chanp, &ic->parm.cmsg); break; case CAPI_MANUFACTURER: if (ic->parm.cmsg.Subcommand == CAPI_REQ) lli_got_manufacturer(chanp, csta, &ic->parm.cmsg); break; default: break; } break; case (ISDN_CMD_IOCTL): switch (ic->arg) { case (0): num = *(unsigned int *) ic->parm.num; HiSax_reportcard(csta->cardnr, num); break; case (1): num = *(unsigned int *) ic->parm.num; distr_debug(csta, num); printk(KERN_DEBUG "HiSax: debugging flags card %d set to %x\n", csta->cardnr + 1, num); HiSax_putstatus(csta, "debugging flags ", "card %d set to %x", csta->cardnr + 1, num); break; case (2): num = *(unsigned int *) ic->parm.num; csta->channel[0].b_st->l1.delay = num; csta->channel[1].b_st->l1.delay = num; HiSax_putstatus(csta, "delay ", "card %d set to %d ms", csta->cardnr + 1, num); printk(KERN_DEBUG "HiSax: delay card %d set to %d ms\n", csta->cardnr + 1, num); break; case (5): /* set card in leased mode */ num = *(unsigned int *) ic->parm.num; if ((num < 1) || (num > 2)) { HiSax_putstatus(csta, "Set LEASED ", "wrong channel %d", num); printk(KERN_WARNING "HiSax: Set LEASED wrong channel %d\n", num); } else { num--; chanp = csta->channel + num; chanp->leased = 1; HiSax_putstatus(csta, "Card", "%d channel %d set leased mode\n", csta->cardnr + 1, num + 1); chanp->d_st->l1.l1l2 = leased_l1l2; chanp->d_st->lli.l4l3 = leased_l4l3; chanp->d_st->lli.l4l3(chanp->d_st, DL_ESTABLISH | REQUEST, NULL); } break; case (6): /* set B-channel test loop */ num = *(unsigned int *) ic->parm.num; if (csta->stlist) csta->stlist->l2.l2l1(csta->stlist, PH_TESTLOOP | REQUEST, (void *) (long)num); break; case (7): /* set card in PTP mode */ num = *(unsigned int *) ic->parm.num; if (test_bit(FLG_TWO_DCHAN, &csta->HW_Flags)) { printk(KERN_ERR "HiSax PTP mode only with one TEI possible\n"); } else if (num) { test_and_set_bit(FLG_PTP, &csta->channel[0].d_st->l2.flag); test_and_set_bit(FLG_FIXED_TEI, &csta->channel[0].d_st->l2.flag); csta->channel[0].d_st->l2.tei = 0; HiSax_putstatus(csta, "set card ", "in PTP mode"); printk(KERN_DEBUG "HiSax: set card in PTP mode\n"); printk(KERN_INFO "LAYER2 WATCHING ESTABLISH\n"); csta->channel[0].d_st->lli.l4l3(csta->channel[0].d_st, DL_ESTABLISH | REQUEST, NULL); } else { test_and_clear_bit(FLG_PTP, &csta->channel[0].d_st->l2.flag); test_and_clear_bit(FLG_FIXED_TEI, &csta->channel[0].d_st->l2.flag); HiSax_putstatus(csta, "set card ", "in PTMP mode"); printk(KERN_DEBUG "HiSax: set card in PTMP mode\n"); } break; case (8): /* set card in FIXED TEI mode */ num = *(unsigned int *)ic->parm.num; chanp = csta->channel + (num & 1); num = num >> 1; if (num == 127) { test_and_clear_bit(FLG_FIXED_TEI, &chanp->d_st->l2.flag); chanp->d_st->l2.tei = -1; HiSax_putstatus(csta, "set card ", "in VAR TEI mode"); printk(KERN_DEBUG "HiSax: set card in VAR TEI mode\n"); } else { test_and_set_bit(FLG_FIXED_TEI, &chanp->d_st->l2.flag); chanp->d_st->l2.tei = num; HiSax_putstatus(csta, "set card ", "in FIXED TEI (%d) mode", num); printk(KERN_DEBUG "HiSax: set card in FIXED TEI (%d) mode\n", num); } chanp->d_st->lli.l4l3(chanp->d_st, DL_ESTABLISH | REQUEST, NULL); break; case (11): num = csta->debug & DEB_DLOG_HEX; csta->debug = *(unsigned int *) ic->parm.num; csta->debug |= num; HiSax_putstatus(cards[0].cs, "l1 debugging ", "flags card %d set to %x", csta->cardnr + 1, csta->debug); printk(KERN_DEBUG "HiSax: l1 debugging flags card %d set to %x\n", csta->cardnr + 1, csta->debug); break; case (13): csta->channel[0].d_st->l3.debug = *(unsigned int *) ic->parm.num; csta->channel[1].d_st->l3.debug = *(unsigned int *) ic->parm.num; HiSax_putstatus(cards[0].cs, "l3 debugging ", "flags card %d set to %x\n", csta->cardnr + 1, *(unsigned int *) ic->parm.num); printk(KERN_DEBUG "HiSax: l3 debugging flags card %d set to %x\n", csta->cardnr + 1, *(unsigned int *) ic->parm.num); break; case (10): i = *(unsigned int *) ic->parm.num; return (set_channel_limit(csta, i)); default: if (csta->auxcmd) return (csta->auxcmd(csta, ic)); printk(KERN_DEBUG "HiSax: invalid ioctl %d\n", (int) ic->arg); return (-EINVAL); } break; case (ISDN_CMD_PROCEED): chanp = csta->channel + ic->arg; if (chanp->debug & 1) link_debug(chanp, 1, "PROCEED"); FsmEvent(&chanp->fi, EV_PROCEED, NULL); break; case (ISDN_CMD_ALERT): chanp = csta->channel + ic->arg; if (chanp->debug & 1) link_debug(chanp, 1, "ALERT"); FsmEvent(&chanp->fi, EV_ALERT, NULL); break; case (ISDN_CMD_REDIR): chanp = csta->channel + ic->arg; if (chanp->debug & 1) link_debug(chanp, 1, "REDIR"); memcpy(&chanp->setup, &ic->parm.setup, sizeof(setup_parm)); FsmEvent(&chanp->fi, EV_REDIR, NULL); break; /* protocol specific io commands */ case (ISDN_CMD_PROT_IO): for (st = csta->stlist; st; st = st->next) if (st->protocol == (ic->arg & 0xFF)) return (st->lli.l4l3_proto(st, ic)); return (-EINVAL); break; default: if (csta->auxcmd) return (csta->auxcmd(csta, ic)); return (-EINVAL); } return (0); } int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb) { struct IsdnCardState *csta = hisax_findcard(id); struct Channel *chanp; struct PStack *st; int len = skb->len; struct sk_buff *nskb; if (!csta) { printk(KERN_ERR "HiSax: if_sendbuf called with invalid driverId!\n"); return -ENODEV; } chanp = csta->channel + chan; st = chanp->b_st; if (!chanp->data_open) { link_debug(chanp, 1, "writebuf: channel not open"); return -EIO; } if (len > MAX_DATA_SIZE) { link_debug(chanp, 1, "writebuf: packet too large (%d bytes)", len); printk(KERN_WARNING "HiSax_writebuf: packet too large (%d bytes) !\n", len); return -EINVAL; } if (len) { if ((len + chanp->bcs->tx_cnt) > MAX_DATA_MEM) { /* Must return 0 here, since this is not an error * but a temporary lack of resources. */ if (chanp->debug & 0x800) link_debug(chanp, 1, "writebuf: no buffers for %d bytes", len); return 0; } else if (chanp->debug & 0x800) link_debug(chanp, 1, "writebuf %d/%d/%d", len, chanp->bcs->tx_cnt, MAX_DATA_MEM); nskb = skb_clone(skb, GFP_ATOMIC); if (nskb) { nskb->truesize = nskb->len; if (!ack) nskb->pkt_type = PACKET_NOACK; if (chanp->l2_active_protocol == ISDN_PROTO_L2_X75I) st->l3.l3l2(st, DL_DATA | REQUEST, nskb); else { chanp->bcs->tx_cnt += len; st->l2.l2l1(st, PH_DATA | REQUEST, nskb); } dev_kfree_skb(skb); } else len = 0; } return (len); }
gpl-2.0
Hashcode/android_kernel_samsung_hlte
fs/9p/acl.c
5478
8932
/* * Copyright IBM Corporation, 2010 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include <linux/module.h> #include <linux/fs.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/posix_acl_xattr.h> #include "xattr.h" #include "acl.h" #include "v9fs.h" #include "v9fs_vfs.h" static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name) { ssize_t size; void *value = NULL; struct posix_acl *acl = NULL; size = v9fs_fid_xattr_get(fid, name, NULL, 0); if (size > 0) { value = kzalloc(size, GFP_NOFS); if (!value) return ERR_PTR(-ENOMEM); size = v9fs_fid_xattr_get(fid, name, value, size); if (size > 0) { acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) goto err_out; } } else if (size == -ENODATA || size == 0 || size == -ENOSYS || size == -EOPNOTSUPP) { acl = NULL; } else acl = ERR_PTR(-EIO); err_out: kfree(value); return acl; } int v9fs_get_acl(struct inode *inode, struct p9_fid *fid) { int retval = 0; struct posix_acl *pacl, *dacl; struct v9fs_session_info *v9ses; v9ses = v9fs_inode2v9ses(inode); if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) || ((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) { set_cached_acl(inode, ACL_TYPE_DEFAULT, NULL); set_cached_acl(inode, ACL_TYPE_ACCESS, NULL); return 0; } /* get the default/access acl values and cache them */ dacl = __v9fs_get_acl(fid, POSIX_ACL_XATTR_DEFAULT); pacl = __v9fs_get_acl(fid, POSIX_ACL_XATTR_ACCESS); if (!IS_ERR(dacl) && !IS_ERR(pacl)) { set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl); set_cached_acl(inode, ACL_TYPE_ACCESS, pacl); } else retval = -EIO; if (!IS_ERR(dacl)) posix_acl_release(dacl); if (!IS_ERR(pacl)) posix_acl_release(pacl); return retval; } static struct posix_acl *v9fs_get_cached_acl(struct inode *inode, int type) { struct posix_acl *acl; /* * 9p Always cache the acl value when * instantiating the inode (v9fs_inode_from_fid) */ acl = get_cached_acl(inode, type); BUG_ON(acl == ACL_NOT_CACHED); return acl; } struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type) { struct v9fs_session_info *v9ses; v9ses = v9fs_inode2v9ses(inode); if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) || ((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) { /* * On access = client and acl = on mode get the acl * values from the server */ return NULL; } return v9fs_get_cached_acl(inode, type); } static int v9fs_set_acl(struct dentry *dentry, int type, struct posix_acl *acl) { int retval; char *name; size_t size; void *buffer; struct inode *inode = dentry->d_inode; set_cached_acl(inode, type, acl); if (!acl) return 0; /* Set a setxattr request to server */ size = posix_acl_xattr_size(acl->a_count); buffer = kmalloc(size, GFP_KERNEL); if (!buffer) return -ENOMEM; retval = posix_acl_to_xattr(acl, buffer, size); if (retval < 0) goto err_free_out; switch (type) { case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; break; case ACL_TYPE_DEFAULT: name = POSIX_ACL_XATTR_DEFAULT; break; default: BUG(); } retval = v9fs_xattr_set(dentry, name, buffer, size, 0); err_free_out: kfree(buffer); return retval; } int v9fs_acl_chmod(struct dentry *dentry) { int retval = 0; struct posix_acl *acl; struct inode *inode = dentry->d_inode; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; acl = v9fs_get_cached_acl(inode, ACL_TYPE_ACCESS); if (acl) { retval = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); if (retval) return retval; retval = v9fs_set_acl(dentry, ACL_TYPE_ACCESS, acl); posix_acl_release(acl); } return retval; } int v9fs_set_create_acl(struct dentry *dentry, struct posix_acl **dpacl, struct posix_acl **pacl) { if (dentry) { v9fs_set_acl(dentry, ACL_TYPE_DEFAULT, *dpacl); v9fs_set_acl(dentry, ACL_TYPE_ACCESS, *pacl); } posix_acl_release(*dpacl); posix_acl_release(*pacl); *dpacl = *pacl = NULL; return 0; } int v9fs_acl_mode(struct inode *dir, umode_t *modep, struct posix_acl **dpacl, struct posix_acl **pacl) { int retval = 0; umode_t mode = *modep; struct posix_acl *acl = NULL; if (!S_ISLNK(mode)) { acl = v9fs_get_cached_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(acl)) return PTR_ERR(acl); if (!acl) mode &= ~current_umask(); } if (acl) { if (S_ISDIR(mode)) *dpacl = posix_acl_dup(acl); retval = posix_acl_create(&acl, GFP_NOFS, &mode); if (retval < 0) return retval; if (retval > 0) *pacl = acl; else posix_acl_release(acl); } *modep = mode; return 0; } static int v9fs_remote_get_acl(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { char *full_name; switch (type) { case ACL_TYPE_ACCESS: full_name = POSIX_ACL_XATTR_ACCESS; break; case ACL_TYPE_DEFAULT: full_name = POSIX_ACL_XATTR_DEFAULT; break; default: BUG(); } return v9fs_xattr_get(dentry, full_name, buffer, size); } static int v9fs_xattr_get_acl(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { struct v9fs_session_info *v9ses; struct posix_acl *acl; int error; if (strcmp(name, "") != 0) return -EINVAL; v9ses = v9fs_dentry2v9ses(dentry); /* * We allow set/get/list of acl when access=client is not specified */ if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) return v9fs_remote_get_acl(dentry, name, buffer, size, type); acl = v9fs_get_cached_acl(dentry->d_inode, type); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl == NULL) return -ENODATA; error = posix_acl_to_xattr(acl, buffer, size); posix_acl_release(acl); return error; } static int v9fs_remote_set_acl(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { char *full_name; switch (type) { case ACL_TYPE_ACCESS: full_name = POSIX_ACL_XATTR_ACCESS; break; case ACL_TYPE_DEFAULT: full_name = POSIX_ACL_XATTR_DEFAULT; break; default: BUG(); } return v9fs_xattr_set(dentry, full_name, value, size, flags); } static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { int retval; struct posix_acl *acl; struct v9fs_session_info *v9ses; struct inode *inode = dentry->d_inode; if (strcmp(name, "") != 0) return -EINVAL; v9ses = v9fs_dentry2v9ses(dentry); /* * set the attribute on the remote. Without even looking at the * xattr value. We leave it to the server to validate */ if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) return v9fs_remote_set_acl(dentry, name, value, size, flags, type); if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; if (!inode_owner_or_capable(inode)) return -EPERM; if (value) { /* update the cached acl value */ acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) return PTR_ERR(acl); else if (acl) { retval = posix_acl_valid(acl); if (retval) goto err_out; } } else acl = NULL; switch (type) { case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; if (acl) { umode_t mode = inode->i_mode; retval = posix_acl_equiv_mode(acl, &mode); if (retval < 0) goto err_out; else { struct iattr iattr; if (retval == 0) { /* * ACL can be represented * by the mode bits. So don't * update ACL. */ acl = NULL; value = NULL; size = 0; } /* Updte the mode bits */ iattr.ia_mode = ((mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO)); iattr.ia_valid = ATTR_MODE; /* FIXME should we update ctime ? * What is the following setxattr update the * mode ? */ v9fs_vfs_setattr_dotl(dentry, &iattr); } } break; case ACL_TYPE_DEFAULT: name = POSIX_ACL_XATTR_DEFAULT; if (!S_ISDIR(inode->i_mode)) { retval = acl ? -EINVAL : 0; goto err_out; } break; default: BUG(); } retval = v9fs_xattr_set(dentry, name, value, size, flags); if (!retval) set_cached_acl(inode, type, acl); err_out: posix_acl_release(acl); return retval; } const struct xattr_handler v9fs_xattr_acl_access_handler = { .prefix = POSIX_ACL_XATTR_ACCESS, .flags = ACL_TYPE_ACCESS, .get = v9fs_xattr_get_acl, .set = v9fs_xattr_set_acl, }; const struct xattr_handler v9fs_xattr_acl_default_handler = { .prefix = POSIX_ACL_XATTR_DEFAULT, .flags = ACL_TYPE_DEFAULT, .get = v9fs_xattr_get_acl, .set = v9fs_xattr_set_acl, };
gpl-2.0
GalaxyTab4/android_kernel_samsung_millet
arch/mips/cavium-octeon/executive/octeon-model.c
7782
9566
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ #include <asm/octeon/octeon.h> /** * Given the chip processor ID from COP0, this function returns a * string representing the chip model number. The string is of the * form CNXXXXpX.X-FREQ-SUFFIX. * - XXXX = The chip model number * - X.X = Chip pass number * - FREQ = Current frequency in Mhz * - SUFFIX = NSP, EXP, SCP, SSP, or CP * * @chip_id: Chip ID * * Returns Model string */ const char *octeon_model_get_string(uint32_t chip_id) { static char buffer[32]; return octeon_model_get_string_buffer(chip_id, buffer); } /* * Version of octeon_model_get_string() that takes buffer as argument, * as running early in u-boot static/global variables don't work when * running from flash. */ const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer) { const char *family; const char *core_model; char pass[4]; int clock_mhz; const char *suffix; union cvmx_l2d_fus3 fus3; int num_cores; union cvmx_mio_fus_dat2 fus_dat2; union cvmx_mio_fus_dat3 fus_dat3; char fuse_model[10]; uint32_t fuse_data = 0; fus3.u64 = 0; if (!OCTEON_IS_MODEL(OCTEON_CN6XXX)) fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3); fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2); fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3); num_cores = cvmx_pop(cvmx_read_csr(CVMX_CIU_FUSE)); /* Make sure the non existent devices look disabled */ switch ((chip_id >> 8) & 0xff) { case 6: /* CN50XX */ case 2: /* CN30XX */ fus_dat3.s.nodfa_dte = 1; fus_dat3.s.nozip = 1; break; case 4: /* CN57XX or CN56XX */ fus_dat3.s.nodfa_dte = 1; break; default: break; } /* Make a guess at the suffix */ /* NSP = everything */ /* EXP = No crypto */ /* SCP = No DFA, No zip */ /* CP = No DFA, No crypto, No zip */ if (fus_dat3.s.nodfa_dte) { if (fus_dat2.s.nocrypto) suffix = "CP"; else suffix = "SCP"; } else if (fus_dat2.s.nocrypto) suffix = "EXP"; else suffix = "NSP"; /* * Assume pass number is encoded using <5:3><2:0>. Exceptions * will be fixed later. */ sprintf(pass, "%d.%d", (int)((chip_id >> 3) & 7) + 1, (int)chip_id & 7); /* * Use the number of cores to determine the last 2 digits of * the model number. There are some exceptions that are fixed * later. */ switch (num_cores) { case 32: core_model = "80"; break; case 24: core_model = "70"; break; case 16: core_model = "60"; break; case 15: core_model = "58"; break; case 14: core_model = "55"; break; case 13: core_model = "52"; break; case 12: core_model = "50"; break; case 11: core_model = "48"; break; case 10: core_model = "45"; break; case 9: core_model = "42"; break; case 8: core_model = "40"; break; case 7: core_model = "38"; break; case 6: core_model = "34"; break; case 5: core_model = "32"; break; case 4: core_model = "30"; break; case 3: core_model = "25"; break; case 2: core_model = "20"; break; case 1: core_model = "10"; break; default: core_model = "XX"; break; } /* Now figure out the family, the first two digits */ switch ((chip_id >> 8) & 0xff) { case 0: /* CN38XX, CN37XX or CN36XX */ if (fus3.cn38xx.crip_512k) { /* * For some unknown reason, the 16 core one is * called 37 instead of 36. */ if (num_cores >= 16) family = "37"; else family = "36"; } else family = "38"; /* * This series of chips didn't follow the standard * pass numbering. */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.X"); break; case 1: strcpy(pass, "2.X"); break; case 3: strcpy(pass, "3.X"); break; default: strcpy(pass, "X.X"); break; } break; case 1: /* CN31XX or CN3020 */ if ((chip_id & 0x10) || fus3.cn31xx.crip_128k) family = "30"; else family = "31"; /* * This series of chips didn't follow the standard * pass numbering. */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.0"); break; case 2: strcpy(pass, "1.1"); break; default: strcpy(pass, "X.X"); break; } break; case 2: /* CN3010 or CN3005 */ family = "30"; /* A chip with half cache is an 05 */ if (fus3.cn30xx.crip_64k) core_model = "05"; /* * This series of chips didn't follow the standard * pass numbering. */ switch (chip_id & 0xf) { case 0: strcpy(pass, "1.0"); break; case 2: strcpy(pass, "1.1"); break; default: strcpy(pass, "X.X"); break; } break; case 3: /* CN58XX */ family = "58"; /* Special case. 4 core, half cache (CP with half cache) */ if ((num_cores == 4) && fus3.cn58xx.crip_1024k && !strncmp(suffix, "CP", 2)) core_model = "29"; /* Pass 1 uses different encodings for pass numbers */ if ((chip_id & 0xFF) < 0x8) { switch (chip_id & 0x3) { case 0: strcpy(pass, "1.0"); break; case 1: strcpy(pass, "1.1"); break; case 3: strcpy(pass, "1.2"); break; default: strcpy(pass, "1.X"); break; } } break; case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */ if (fus_dat2.cn56xx.raid_en) { if (fus3.cn56xx.crip_1024k) family = "55"; else family = "57"; if (fus_dat2.cn56xx.nocrypto) suffix = "SP"; else suffix = "SSP"; } else { if (fus_dat2.cn56xx.nocrypto) suffix = "CP"; else { suffix = "NSP"; if (fus_dat3.s.nozip) suffix = "SCP"; if (fus_dat3.s.bar2_en) suffix = "NSPB2"; } if (fus3.cn56xx.crip_1024k) family = "54"; else family = "56"; } break; case 6: /* CN50XX */ family = "50"; break; case 7: /* CN52XX */ if (fus3.cn52xx.crip_256k) family = "51"; else family = "52"; break; case 0x93: /* CN61XX */ family = "61"; if (fus_dat2.cn61xx.nocrypto && fus_dat2.cn61xx.dorm_crypto) suffix = "AP"; if (fus_dat2.cn61xx.nocrypto) suffix = "CP"; else if (fus_dat2.cn61xx.dorm_crypto) suffix = "DAP"; else if (fus_dat3.cn61xx.nozip) suffix = "SCP"; break; case 0x90: /* CN63XX */ family = "63"; if (fus_dat3.s.l2c_crip == 2) family = "62"; if (num_cores == 6) /* Other core counts match generic */ core_model = "35"; if (fus_dat2.cn63xx.nocrypto) suffix = "CP"; else if (fus_dat2.cn63xx.dorm_crypto) suffix = "DAP"; else if (fus_dat3.cn63xx.nozip) suffix = "SCP"; else suffix = "AAP"; break; case 0x92: /* CN66XX */ family = "66"; if (num_cores == 6) /* Other core counts match generic */ core_model = "35"; if (fus_dat2.cn66xx.nocrypto && fus_dat2.cn66xx.dorm_crypto) suffix = "AP"; if (fus_dat2.cn66xx.nocrypto) suffix = "CP"; else if (fus_dat2.cn66xx.dorm_crypto) suffix = "DAP"; else if (fus_dat3.cn66xx.nozip) suffix = "SCP"; else suffix = "AAP"; break; case 0x91: /* CN68XX */ family = "68"; if (fus_dat2.cn68xx.nocrypto && fus_dat3.cn68xx.nozip) suffix = "CP"; else if (fus_dat2.cn68xx.dorm_crypto) suffix = "DAP"; else if (fus_dat3.cn68xx.nozip) suffix = "SCP"; else if (fus_dat2.cn68xx.nocrypto) suffix = "SP"; else suffix = "AAP"; break; default: family = "XX"; core_model = "XX"; strcpy(pass, "X.X"); suffix = "XXX"; break; } clock_mhz = octeon_get_clock_rate() / 1000000; if (family[0] != '3') { int fuse_base = 384 / 8; if (family[0] == '6') fuse_base = 832 / 8; /* Check for model in fuses, overrides normal decode */ /* This is _not_ valid for Octeon CN3XXX models */ fuse_data |= cvmx_fuse_read_byte(fuse_base + 3); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(fuse_base + 2); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(fuse_base + 1); fuse_data = fuse_data << 8; fuse_data |= cvmx_fuse_read_byte(fuse_base); if (fuse_data & 0x7ffff) { int model = fuse_data & 0x3fff; int suffix = (fuse_data >> 14) & 0x1f; if (suffix && model) { /* Have both number and suffix in fuses, so both */ sprintf(fuse_model, "%d%c", model, 'A' + suffix - 1); core_model = ""; family = fuse_model; } else if (suffix && !model) { /* Only have suffix, so add suffix to 'normal' model number */ sprintf(fuse_model, "%s%c", core_model, 'A' + suffix - 1); core_model = fuse_model; } else { /* Don't have suffix, so just use model from fuses */ sprintf(fuse_model, "%d", model); core_model = ""; family = fuse_model; } } } sprintf(buffer, "CN%s%sp%s-%d-%s", family, core_model, pass, clock_mhz, suffix); return buffer; }
gpl-2.0
genesi/linux-testing
drivers/net/wimax/i2400m/driver.c
8038
30355
/* * Intel Wireless WiMAX Connection 2400m * Generic probe/disconnect, reset and message passing * * * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * See i2400m.h for driver documentation. This contains helpers for * the driver model glue [_setup()/_release()], handling device resets * [_dev_reset_handle()], and the backends for the WiMAX stack ops * reset [_op_reset()] and message from user [_op_msg_from_user()]. * * ROADMAP: * * i2400m_op_msg_from_user() * i2400m_msg_to_dev() * wimax_msg_to_user_send() * * i2400m_op_reset() * i240m->bus_reset() * * i2400m_dev_reset_handle() * __i2400m_dev_reset_handle() * __i2400m_dev_stop() * __i2400m_dev_start() * * i2400m_setup() * i2400m->bus_setup() * i2400m_bootrom_init() * register_netdev() * wimax_dev_add() * i2400m_dev_start() * __i2400m_dev_start() * i2400m_dev_bootstrap() * i2400m_tx_setup() * i2400m->bus_dev_start() * i2400m_firmware_check() * i2400m_check_mac_addr() * * i2400m_release() * i2400m_dev_stop() * __i2400m_dev_stop() * i2400m_dev_shutdown() * i2400m->bus_dev_stop() * i2400m_tx_release() * i2400m->bus_release() * wimax_dev_rm() * unregister_netdev() */ #include "i2400m.h" #include <linux/etherdevice.h> #include <linux/wimax/i2400m.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/suspend.h> #include <linux/slab.h> #define D_SUBMODULE driver #include "debug-levels.h" static char i2400m_debug_params[128]; module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params), 0644); MODULE_PARM_DESC(debug, "String of space-separated NAME:VALUE pairs, where NAMEs " "are the different debug submodules and VALUE are the " "initial debug value to set."); static char i2400m_barkers_params[128]; module_param_string(barkers, i2400m_barkers_params, sizeof(i2400m_barkers_params), 0644); MODULE_PARM_DESC(barkers, "String of comma-separated 32-bit values; each is " "recognized as the value the device sends as a reboot " "signal; values are appended to a list--setting one value " "as zero cleans the existing list and starts a new one."); /* * WiMAX stack operation: relay a message from user space * * @wimax_dev: device descriptor * @pipe_name: named pipe the message is for * @msg_buf: pointer to the message bytes * @msg_len: length of the buffer * @genl_info: passed by the generic netlink layer * * The WiMAX stack will call this function when a message was received * from user space. * * For the i2400m, this is an L3L4 message, as specified in * include/linux/wimax/i2400m.h, and thus prefixed with a 'struct * i2400m_l3l4_hdr'. Driver (and device) expect the messages to be * coded in Little Endian. * * This function just verifies that the header declaration and the * payload are consistent and then deals with it, either forwarding it * to the device or procesing it locally. * * In the i2400m, messages are basically commands that will carry an * ack, so we use i2400m_msg_to_dev() and then deliver the ack back to * user space. The rx.c code might intercept the response and use it * to update the driver's state, but then it will pass it on so it can * be relayed back to user space. * * Note that asynchronous events from the device are processed and * sent to user space in rx.c. */ static int i2400m_op_msg_from_user(struct wimax_dev *wimax_dev, const char *pipe_name, const void *msg_buf, size_t msg_len, const struct genl_info *genl_info) { int result; struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev); struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; d_fnstart(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p " "msg_len %zu genl_info %p)\n", wimax_dev, i2400m, msg_buf, msg_len, genl_info); ack_skb = i2400m_msg_to_dev(i2400m, msg_buf, msg_len); result = PTR_ERR(ack_skb); if (IS_ERR(ack_skb)) goto error_msg_to_dev; result = wimax_msg_send(&i2400m->wimax_dev, ack_skb); error_msg_to_dev: d_fnend(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p msg_len %zu " "genl_info %p) = %d\n", wimax_dev, i2400m, msg_buf, msg_len, genl_info, result); return result; } /* * Context to wait for a reset to finalize */ struct i2400m_reset_ctx { struct completion completion; int result; }; /* * WiMAX stack operation: reset a device * * @wimax_dev: device descriptor * * See the documentation for wimax_reset() and wimax_dev->op_reset for * the requirements of this function. The WiMAX stack guarantees * serialization on calls to this function. * * Do a warm reset on the device; if it fails, resort to a cold reset * and return -ENODEV. On successful warm reset, we need to block * until it is complete. * * The bus-driver implementation of reset takes care of falling back * to cold reset if warm fails. */ static int i2400m_op_reset(struct wimax_dev *wimax_dev) { int result; struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev); struct device *dev = i2400m_dev(i2400m); struct i2400m_reset_ctx ctx = { .completion = COMPLETION_INITIALIZER_ONSTACK(ctx.completion), .result = 0, }; d_fnstart(4, dev, "(wimax_dev %p)\n", wimax_dev); mutex_lock(&i2400m->init_mutex); i2400m->reset_ctx = &ctx; mutex_unlock(&i2400m->init_mutex); result = i2400m_reset(i2400m, I2400M_RT_WARM); if (result < 0) goto out; result = wait_for_completion_timeout(&ctx.completion, 4*HZ); if (result == 0) result = -ETIMEDOUT; else if (result > 0) result = ctx.result; /* if result < 0, pass it on */ mutex_lock(&i2400m->init_mutex); i2400m->reset_ctx = NULL; mutex_unlock(&i2400m->init_mutex); out: d_fnend(4, dev, "(wimax_dev %p) = %d\n", wimax_dev, result); return result; } /* * Check the MAC address we got from boot mode is ok * * @i2400m: device descriptor * * Returns: 0 if ok, < 0 errno code on error. */ static int i2400m_check_mac_addr(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *skb; const struct i2400m_tlv_detailed_device_info *ddi; struct net_device *net_dev = i2400m->wimax_dev.net_dev; const unsigned char zeromac[ETH_ALEN] = { 0 }; d_fnstart(3, dev, "(i2400m %p)\n", i2400m); skb = i2400m_get_device_info(i2400m); if (IS_ERR(skb)) { result = PTR_ERR(skb); dev_err(dev, "Cannot verify MAC address, error reading: %d\n", result); goto error; } /* Extract MAC address */ ddi = (void *) skb->data; BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address)); d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n", ddi->mac_address); if (!memcmp(net_dev->perm_addr, ddi->mac_address, sizeof(ddi->mac_address))) goto ok; dev_warn(dev, "warning: device reports a different MAC address " "to that of boot mode's\n"); dev_warn(dev, "device reports %pM\n", ddi->mac_address); dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr); if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac))) dev_err(dev, "device reports an invalid MAC address, " "not updating\n"); else { dev_warn(dev, "updating MAC address\n"); net_dev->addr_len = ETH_ALEN; memcpy(net_dev->perm_addr, ddi->mac_address, ETH_ALEN); memcpy(net_dev->dev_addr, ddi->mac_address, ETH_ALEN); } ok: result = 0; kfree_skb(skb); error: d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); return result; } /** * __i2400m_dev_start - Bring up driver communication with the device * * @i2400m: device descriptor * @flags: boot mode flags * * Returns: 0 if ok, < 0 errno code on error. * * Uploads firmware and brings up all the resources needed to be able * to communicate with the device. * * The workqueue has to be setup early, at least before RX handling * (it's only real user for now) so it can process reports as they * arrive. We also want to destroy it if we retry, to make sure it is * flushed...easier like this. * * TX needs to be setup before the bus-specific code (otherwise on * shutdown, the bus-tx code could try to access it). */ static int __i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri flags) { int result; struct wimax_dev *wimax_dev = &i2400m->wimax_dev; struct net_device *net_dev = wimax_dev->net_dev; struct device *dev = i2400m_dev(i2400m); int times = i2400m->bus_bm_retries; d_fnstart(3, dev, "(i2400m %p)\n", i2400m); retry: result = i2400m_dev_bootstrap(i2400m, flags); if (result < 0) { dev_err(dev, "cannot bootstrap device: %d\n", result); goto error_bootstrap; } result = i2400m_tx_setup(i2400m); if (result < 0) goto error_tx_setup; result = i2400m_rx_setup(i2400m); if (result < 0) goto error_rx_setup; i2400m->work_queue = create_singlethread_workqueue(wimax_dev->name); if (i2400m->work_queue == NULL) { result = -ENOMEM; dev_err(dev, "cannot create workqueue\n"); goto error_create_workqueue; } if (i2400m->bus_dev_start) { result = i2400m->bus_dev_start(i2400m); if (result < 0) goto error_bus_dev_start; } i2400m->ready = 1; wmb(); /* see i2400m->ready's documentation */ /* process pending reports from the device */ queue_work(i2400m->work_queue, &i2400m->rx_report_ws); result = i2400m_firmware_check(i2400m); /* fw versions ok? */ if (result < 0) goto error_fw_check; /* At this point is ok to send commands to the device */ result = i2400m_check_mac_addr(i2400m); if (result < 0) goto error_check_mac_addr; result = i2400m_dev_initialize(i2400m); if (result < 0) goto error_dev_initialize; /* We don't want any additional unwanted error recovery triggered * from any other context so if anything went wrong before we come * here, let's keep i2400m->error_recovery untouched and leave it to * dev_reset_handle(). See dev_reset_handle(). */ atomic_dec(&i2400m->error_recovery); /* Every thing works so far, ok, now we are ready to * take error recovery if it's required. */ /* At this point, reports will come for the device and set it * to the right state if it is different than UNINITIALIZED */ d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", net_dev, i2400m, result); return result; error_dev_initialize: error_check_mac_addr: error_fw_check: i2400m->ready = 0; wmb(); /* see i2400m->ready's documentation */ flush_workqueue(i2400m->work_queue); if (i2400m->bus_dev_stop) i2400m->bus_dev_stop(i2400m); error_bus_dev_start: destroy_workqueue(i2400m->work_queue); error_create_workqueue: i2400m_rx_release(i2400m); error_rx_setup: i2400m_tx_release(i2400m); error_tx_setup: error_bootstrap: if (result == -EL3RST && times-- > 0) { flags = I2400M_BRI_SOFT|I2400M_BRI_MAC_REINIT; goto retry; } d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", net_dev, i2400m, result); return result; } static int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags) { int result = 0; mutex_lock(&i2400m->init_mutex); /* Well, start the device */ if (i2400m->updown == 0) { result = __i2400m_dev_start(i2400m, bm_flags); if (result >= 0) { i2400m->updown = 1; i2400m->alive = 1; wmb();/* see i2400m->updown and i2400m->alive's doc */ } } mutex_unlock(&i2400m->init_mutex); return result; } /** * i2400m_dev_stop - Tear down driver communication with the device * * @i2400m: device descriptor * * Returns: 0 if ok, < 0 errno code on error. * * Releases all the resources allocated to communicate with the * device. Note we cannot destroy the workqueue earlier as until RX is * fully destroyed, it could still try to schedule jobs. */ static void __i2400m_dev_stop(struct i2400m *i2400m) { struct wimax_dev *wimax_dev = &i2400m->wimax_dev; struct device *dev = i2400m_dev(i2400m); d_fnstart(3, dev, "(i2400m %p)\n", i2400m); wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING); i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST); complete(&i2400m->msg_completion); i2400m_net_wake_stop(i2400m); i2400m_dev_shutdown(i2400m); /* * Make sure no report hooks are running *before* we stop the * communication infrastructure with the device. */ i2400m->ready = 0; /* nobody can queue work anymore */ wmb(); /* see i2400m->ready's documentation */ flush_workqueue(i2400m->work_queue); if (i2400m->bus_dev_stop) i2400m->bus_dev_stop(i2400m); destroy_workqueue(i2400m->work_queue); i2400m_rx_release(i2400m); i2400m_tx_release(i2400m); wimax_state_change(wimax_dev, WIMAX_ST_DOWN); d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m); } /* * Watch out -- we only need to stop if there is a need for it. The * device could have reset itself and failed to come up again (see * _i2400m_dev_reset_handle()). */ static void i2400m_dev_stop(struct i2400m *i2400m) { mutex_lock(&i2400m->init_mutex); if (i2400m->updown) { __i2400m_dev_stop(i2400m); i2400m->updown = 0; i2400m->alive = 0; wmb(); /* see i2400m->updown and i2400m->alive's doc */ } mutex_unlock(&i2400m->init_mutex); } /* * Listen to PM events to cache the firmware before suspend/hibernation * * When the device comes out of suspend, it might go into reset and * firmware has to be uploaded again. At resume, most of the times, we * can't load firmware images from disk, so we need to cache it. * * i2400m_fw_cache() will allocate a kobject and attach the firmware * to it; that way we don't have to worry too much about the fw loader * hitting a race condition. * * Note: modus operandi stolen from the Orinoco driver; thx. */ static int i2400m_pm_notifier(struct notifier_block *notifier, unsigned long pm_event, void *unused) { struct i2400m *i2400m = container_of(notifier, struct i2400m, pm_notifier); struct device *dev = i2400m_dev(i2400m); d_fnstart(3, dev, "(i2400m %p pm_event %lx)\n", i2400m, pm_event); switch (pm_event) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: i2400m_fw_cache(i2400m); break; case PM_POST_RESTORE: /* Restore from hibernation failed. We need to clean * up in exactly the same way, so fall through. */ case PM_POST_HIBERNATION: case PM_POST_SUSPEND: i2400m_fw_uncache(i2400m); break; case PM_RESTORE_PREPARE: default: break; } d_fnend(3, dev, "(i2400m %p pm_event %lx) = void\n", i2400m, pm_event); return NOTIFY_DONE; } /* * pre-reset is called before a device is going on reset * * This has to be followed by a call to i2400m_post_reset(), otherwise * bad things might happen. */ int i2400m_pre_reset(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); d_fnstart(3, dev, "(i2400m %p)\n", i2400m); d_printf(1, dev, "pre-reset shut down\n"); result = 0; mutex_lock(&i2400m->init_mutex); if (i2400m->updown) { netif_tx_disable(i2400m->wimax_dev.net_dev); __i2400m_dev_stop(i2400m); result = 0; /* down't set updown to zero -- this way * post_reset can restore properly */ } mutex_unlock(&i2400m->init_mutex); if (i2400m->bus_release) i2400m->bus_release(i2400m); d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); return result; } EXPORT_SYMBOL_GPL(i2400m_pre_reset); /* * Restore device state after a reset * * Do the work needed after a device reset to bring it up to the same * state as it was before the reset. * * NOTE: this requires i2400m->init_mutex taken */ int i2400m_post_reset(struct i2400m *i2400m) { int result = 0; struct device *dev = i2400m_dev(i2400m); d_fnstart(3, dev, "(i2400m %p)\n", i2400m); d_printf(1, dev, "post-reset start\n"); if (i2400m->bus_setup) { result = i2400m->bus_setup(i2400m); if (result < 0) { dev_err(dev, "bus-specific setup failed: %d\n", result); goto error_bus_setup; } } mutex_lock(&i2400m->init_mutex); if (i2400m->updown) { result = __i2400m_dev_start( i2400m, I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT); if (result < 0) goto error_dev_start; } mutex_unlock(&i2400m->init_mutex); d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); return result; error_dev_start: if (i2400m->bus_release) i2400m->bus_release(i2400m); /* even if the device was up, it could not be recovered, so we * mark it as down. */ i2400m->updown = 0; wmb(); /* see i2400m->updown's documentation */ mutex_unlock(&i2400m->init_mutex); error_bus_setup: d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); return result; } EXPORT_SYMBOL_GPL(i2400m_post_reset); /* * The device has rebooted; fix up the device and the driver * * Tear down the driver communication with the device, reload the * firmware and reinitialize the communication with the device. * * If someone calls a reset when the device's firmware is down, in * theory we won't see it because we are not listening. However, just * in case, leave the code to handle it. * * If there is a reset context, use it; this means someone is waiting * for us to tell him when the reset operation is complete and the * device is ready to rock again. * * NOTE: if we are in the process of bringing up or down the * communication with the device [running i2400m_dev_start() or * _stop()], don't do anything, let it fail and handle it. * * This function is ran always in a thread context * * This function gets passed, as payload to i2400m_work() a 'const * char *' ptr with a "reason" why the reset happened (for messages). */ static void __i2400m_dev_reset_handle(struct work_struct *ws) { struct i2400m *i2400m = container_of(ws, struct i2400m, reset_ws); const char *reason = i2400m->reset_reason; struct device *dev = i2400m_dev(i2400m); struct i2400m_reset_ctx *ctx = i2400m->reset_ctx; int result; d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason); i2400m->boot_mode = 1; wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */ result = 0; if (mutex_trylock(&i2400m->init_mutex) == 0) { /* We are still in i2400m_dev_start() [let it fail] or * i2400m_dev_stop() [we are shutting down anyway, so * ignore it] or we are resetting somewhere else. */ dev_err(dev, "device rebooted somewhere else?\n"); i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST); complete(&i2400m->msg_completion); goto out; } dev_err(dev, "%s: reinitializing driver\n", reason); rmb(); if (i2400m->updown) { __i2400m_dev_stop(i2400m); i2400m->updown = 0; wmb(); /* see i2400m->updown's documentation */ } if (i2400m->alive) { result = __i2400m_dev_start(i2400m, I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT); if (result < 0) { dev_err(dev, "%s: cannot start the device: %d\n", reason, result); result = -EUCLEAN; if (atomic_read(&i2400m->bus_reset_retries) >= I2400M_BUS_RESET_RETRIES) { result = -ENODEV; dev_err(dev, "tried too many times to " "reset the device, giving up\n"); } } } if (i2400m->reset_ctx) { ctx->result = result; complete(&ctx->completion); } mutex_unlock(&i2400m->init_mutex); if (result == -EUCLEAN) { /* * We come here because the reset during operational mode * wasn't successfully done and need to proceed to a bus * reset. For the dev_reset_handle() to be able to handle * the reset event later properly, we restore boot_mode back * to the state before previous reset. ie: just like we are * issuing the bus reset for the first time */ i2400m->boot_mode = 0; wmb(); atomic_inc(&i2400m->bus_reset_retries); /* ops, need to clean up [w/ init_mutex not held] */ result = i2400m_reset(i2400m, I2400M_RT_BUS); if (result >= 0) result = -ENODEV; } else { rmb(); if (i2400m->alive) { /* great, we expect the device state up and * dev_start() actually brings the device state up */ i2400m->updown = 1; wmb(); atomic_set(&i2400m->bus_reset_retries, 0); } } out: d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n", ws, i2400m, reason); } /** * i2400m_dev_reset_handle - Handle a device's reset in a thread context * * Schedule a device reset handling out on a thread context, so it * is safe to call from atomic context. We can't use the i2400m's * queue as we are going to destroy it and reinitialize it as part of * the driver bringup/bringup process. * * See __i2400m_dev_reset_handle() for details; that takes care of * reinitializing the driver to handle the reset, calling into the * bus-specific functions ops as needed. */ int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason) { i2400m->reset_reason = reason; return schedule_work(&i2400m->reset_ws); } EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle); /* * The actual work of error recovery. * * The current implementation of error recovery is to trigger a bus reset. */ static void __i2400m_error_recovery(struct work_struct *ws) { struct i2400m *i2400m = container_of(ws, struct i2400m, recovery_ws); i2400m_reset(i2400m, I2400M_RT_BUS); } /* * Schedule a work struct for error recovery. * * The intention of error recovery is to bring back the device to some * known state whenever TX sees -110 (-ETIMEOUT) on copying the data to * the device. The TX failure could mean a device bus stuck, so the current * error recovery implementation is to trigger a bus reset to the device * and hopefully it can bring back the device. * * The actual work of error recovery has to be in a thread context because * it is kicked off in the TX thread (i2400ms->tx_workqueue) which is to be * destroyed by the error recovery mechanism (currently a bus reset). * * Also, there may be already a queue of TX works that all hit * the -ETIMEOUT error condition because the device is stuck already. * Since bus reset is used as the error recovery mechanism and we don't * want consecutive bus resets simply because the multiple TX works * in the queue all hit the same device erratum, the flag "error_recovery" * is introduced for preventing unwanted consecutive bus resets. * * Error recovery shall only be invoked again if previous one was completed. * The flag error_recovery is set when error recovery mechanism is scheduled, * and is checked when we need to schedule another error recovery. If it is * in place already, then we shouldn't schedule another one. */ void i2400m_error_recovery(struct i2400m *i2400m) { if (atomic_add_return(1, &i2400m->error_recovery) == 1) schedule_work(&i2400m->recovery_ws); else atomic_dec(&i2400m->error_recovery); } EXPORT_SYMBOL_GPL(i2400m_error_recovery); /* * Alloc the command and ack buffers for boot mode * * Get the buffers needed to deal with boot mode messages. These * buffers need to be allocated before the sdio receive irq is setup. */ static int i2400m_bm_buf_alloc(struct i2400m *i2400m) { int result; result = -ENOMEM; i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL); if (i2400m->bm_cmd_buf == NULL) goto error_bm_cmd_kzalloc; i2400m->bm_ack_buf = kzalloc(I2400M_BM_ACK_BUF_SIZE, GFP_KERNEL); if (i2400m->bm_ack_buf == NULL) goto error_bm_ack_buf_kzalloc; return 0; error_bm_ack_buf_kzalloc: kfree(i2400m->bm_cmd_buf); error_bm_cmd_kzalloc: return result; } /* * Free boot mode command and ack buffers. */ static void i2400m_bm_buf_free(struct i2400m *i2400m) { kfree(i2400m->bm_ack_buf); kfree(i2400m->bm_cmd_buf); } /** * i2400m_init - Initialize a 'struct i2400m' from all zeroes * * This is a bus-generic API call. */ void i2400m_init(struct i2400m *i2400m) { wimax_dev_init(&i2400m->wimax_dev); i2400m->boot_mode = 1; i2400m->rx_reorder = 1; init_waitqueue_head(&i2400m->state_wq); spin_lock_init(&i2400m->tx_lock); i2400m->tx_pl_min = UINT_MAX; i2400m->tx_size_min = UINT_MAX; spin_lock_init(&i2400m->rx_lock); i2400m->rx_pl_min = UINT_MAX; i2400m->rx_size_min = UINT_MAX; INIT_LIST_HEAD(&i2400m->rx_reports); INIT_WORK(&i2400m->rx_report_ws, i2400m_report_hook_work); mutex_init(&i2400m->msg_mutex); init_completion(&i2400m->msg_completion); mutex_init(&i2400m->init_mutex); /* wake_tx_ws is initialized in i2400m_tx_setup() */ INIT_WORK(&i2400m->reset_ws, __i2400m_dev_reset_handle); INIT_WORK(&i2400m->recovery_ws, __i2400m_error_recovery); atomic_set(&i2400m->bus_reset_retries, 0); i2400m->alive = 0; /* initialize error_recovery to 1 for denoting we * are not yet ready to take any error recovery */ atomic_set(&i2400m->error_recovery, 1); } EXPORT_SYMBOL_GPL(i2400m_init); int i2400m_reset(struct i2400m *i2400m, enum i2400m_reset_type rt) { struct net_device *net_dev = i2400m->wimax_dev.net_dev; /* * Make sure we stop TXs and down the carrier before * resetting; this is needed to avoid things like * i2400m_wake_tx() scheduling stuff in parallel. */ if (net_dev->reg_state == NETREG_REGISTERED) { netif_tx_disable(net_dev); netif_carrier_off(net_dev); } return i2400m->bus_reset(i2400m, rt); } EXPORT_SYMBOL_GPL(i2400m_reset); /** * i2400m_setup - bus-generic setup function for the i2400m device * * @i2400m: device descriptor (bus-specific parts have been initialized) * * Returns: 0 if ok, < 0 errno code on error. * * Sets up basic device comunication infrastructure, boots the ROM to * read the MAC address, registers with the WiMAX and network stacks * and then brings up the device. */ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags) { int result = -ENODEV; struct device *dev = i2400m_dev(i2400m); struct wimax_dev *wimax_dev = &i2400m->wimax_dev; struct net_device *net_dev = i2400m->wimax_dev.net_dev; d_fnstart(3, dev, "(i2400m %p)\n", i2400m); snprintf(wimax_dev->name, sizeof(wimax_dev->name), "i2400m-%s:%s", dev->bus->name, dev_name(dev)); result = i2400m_bm_buf_alloc(i2400m); if (result < 0) { dev_err(dev, "cannot allocate bootmode scratch buffers\n"); goto error_bm_buf_alloc; } if (i2400m->bus_setup) { result = i2400m->bus_setup(i2400m); if (result < 0) { dev_err(dev, "bus-specific setup failed: %d\n", result); goto error_bus_setup; } } result = i2400m_bootrom_init(i2400m, bm_flags); if (result < 0) { dev_err(dev, "read mac addr: bootrom init " "failed: %d\n", result); goto error_bootrom_init; } result = i2400m_read_mac_addr(i2400m); if (result < 0) goto error_read_mac_addr; random_ether_addr(i2400m->src_mac_addr); i2400m->pm_notifier.notifier_call = i2400m_pm_notifier; register_pm_notifier(&i2400m->pm_notifier); result = register_netdev(net_dev); /* Okey dokey, bring it up */ if (result < 0) { dev_err(dev, "cannot register i2400m network device: %d\n", result); goto error_register_netdev; } netif_carrier_off(net_dev); i2400m->wimax_dev.op_msg_from_user = i2400m_op_msg_from_user; i2400m->wimax_dev.op_rfkill_sw_toggle = i2400m_op_rfkill_sw_toggle; i2400m->wimax_dev.op_reset = i2400m_op_reset; result = wimax_dev_add(&i2400m->wimax_dev, net_dev); if (result < 0) goto error_wimax_dev_add; /* Now setup all that requires a registered net and wimax device. */ result = sysfs_create_group(&net_dev->dev.kobj, &i2400m_dev_attr_group); if (result < 0) { dev_err(dev, "cannot setup i2400m's sysfs: %d\n", result); goto error_sysfs_setup; } result = i2400m_debugfs_add(i2400m); if (result < 0) { dev_err(dev, "cannot setup i2400m's debugfs: %d\n", result); goto error_debugfs_setup; } result = i2400m_dev_start(i2400m, bm_flags); if (result < 0) goto error_dev_start; d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); return result; error_dev_start: i2400m_debugfs_rm(i2400m); error_debugfs_setup: sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, &i2400m_dev_attr_group); error_sysfs_setup: wimax_dev_rm(&i2400m->wimax_dev); error_wimax_dev_add: unregister_netdev(net_dev); error_register_netdev: unregister_pm_notifier(&i2400m->pm_notifier); error_read_mac_addr: error_bootrom_init: if (i2400m->bus_release) i2400m->bus_release(i2400m); error_bus_setup: i2400m_bm_buf_free(i2400m); error_bm_buf_alloc: d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); return result; } EXPORT_SYMBOL_GPL(i2400m_setup); /** * i2400m_release - release the bus-generic driver resources * * Sends a disconnect message and undoes any setup done by i2400m_setup() */ void i2400m_release(struct i2400m *i2400m) { struct device *dev = i2400m_dev(i2400m); d_fnstart(3, dev, "(i2400m %p)\n", i2400m); netif_stop_queue(i2400m->wimax_dev.net_dev); i2400m_dev_stop(i2400m); cancel_work_sync(&i2400m->reset_ws); cancel_work_sync(&i2400m->recovery_ws); i2400m_debugfs_rm(i2400m); sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, &i2400m_dev_attr_group); wimax_dev_rm(&i2400m->wimax_dev); unregister_netdev(i2400m->wimax_dev.net_dev); unregister_pm_notifier(&i2400m->pm_notifier); if (i2400m->bus_release) i2400m->bus_release(i2400m); i2400m_bm_buf_free(i2400m); d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); } EXPORT_SYMBOL_GPL(i2400m_release); /* * Debug levels control; see debug.h */ struct d_level D_LEVEL[] = { D_SUBMODULE_DEFINE(control), D_SUBMODULE_DEFINE(driver), D_SUBMODULE_DEFINE(debugfs), D_SUBMODULE_DEFINE(fw), D_SUBMODULE_DEFINE(netdev), D_SUBMODULE_DEFINE(rfkill), D_SUBMODULE_DEFINE(rx), D_SUBMODULE_DEFINE(sysfs), D_SUBMODULE_DEFINE(tx), }; size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); static int __init i2400m_driver_init(void) { d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400m_debug_params, "i2400m.debug"); return i2400m_barker_db_init(i2400m_barkers_params); } module_init(i2400m_driver_init); static void __exit i2400m_driver_exit(void) { i2400m_barker_db_exit(); } module_exit(i2400m_driver_exit); MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>"); MODULE_DESCRIPTION("Intel 2400M WiMAX networking bus-generic driver"); MODULE_LICENSE("GPL");
gpl-2.0
XileForce/Linaro-LSK
arch/x86/kernel/audit_64.c
13158
1870
#include <linux/init.h> #include <linux/types.h> #include <linux/audit.h> #include <asm/unistd.h> static unsigned dir_class[] = { #include <asm-generic/audit_dir_write.h> ~0U }; static unsigned read_class[] = { #include <asm-generic/audit_read.h> ~0U }; static unsigned write_class[] = { #include <asm-generic/audit_write.h> ~0U }; static unsigned chattr_class[] = { #include <asm-generic/audit_change_attr.h> ~0U }; static unsigned signal_class[] = { #include <asm-generic/audit_signal.h> ~0U }; int audit_classify_arch(int arch) { #ifdef CONFIG_IA32_EMULATION if (arch == AUDIT_ARCH_I386) return 1; #endif return 0; } int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_IA32_EMULATION extern int ia32_classify_syscall(unsigned); if (abi == AUDIT_ARCH_I386) return ia32_classify_syscall(syscall); #endif switch(syscall) { case __NR_open: return 2; case __NR_openat: return 3; case __NR_execve: return 5; default: return 0; } } static int __init audit_classes_init(void) { #ifdef CONFIG_IA32_EMULATION extern __u32 ia32_dir_class[]; extern __u32 ia32_write_class[]; extern __u32 ia32_read_class[]; extern __u32 ia32_chattr_class[]; extern __u32 ia32_signal_class[]; audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class); audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class); audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class); #endif audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } __initcall(audit_classes_init);
gpl-2.0
Eagles2F/Telegram
TMessagesProj/jni/boringssl/crypto/ec/ec_key.c
103
13349
/* Originally written by Bodo Moeller for the OpenSSL project. * ==================================================================== * Copyright (c) 1998-2005 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@openssl.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.openssl.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ /* ==================================================================== * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. * * Portions of the attached software ("Contribution") are developed by * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. * * The Contribution is licensed pursuant to the OpenSSL open source * license provided above. * * The elliptic curve binary polynomial software is originally written by * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems * Laboratories. */ #include <openssl/ec_key.h> #include <string.h> #include <openssl/ec.h> #include <openssl/engine.h> #include <openssl/err.h> #include <openssl/ex_data.h> #include <openssl/mem.h> #include <openssl/thread.h> #include "internal.h" #include "../internal.h" static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT; EC_KEY *EC_KEY_new(void) { return EC_KEY_new_method(NULL); } EC_KEY *EC_KEY_new_method(const ENGINE *engine) { EC_KEY *ret = (EC_KEY *)OPENSSL_malloc(sizeof(EC_KEY)); if (ret == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); return NULL; } memset(ret, 0, sizeof(EC_KEY)); if (engine) { ret->ecdsa_meth = ENGINE_get_ECDSA_method(engine); } if (ret->ecdsa_meth) { METHOD_ref(ret->ecdsa_meth); } ret->version = 1; ret->conv_form = POINT_CONVERSION_UNCOMPRESSED; ret->references = 1; if (!CRYPTO_new_ex_data(&g_ex_data_class, ret, &ret->ex_data)) { goto err1; } if (ret->ecdsa_meth && ret->ecdsa_meth->init && !ret->ecdsa_meth->init(ret)) { goto err2; } return ret; err2: CRYPTO_free_ex_data(&g_ex_data_class, ret, &ret->ex_data); err1: if (ret->ecdsa_meth) { METHOD_unref(ret->ecdsa_meth); } OPENSSL_free(ret); return NULL; } EC_KEY *EC_KEY_new_by_curve_name(int nid) { EC_KEY *ret = EC_KEY_new(); if (ret == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); return NULL; } ret->group = EC_GROUP_new_by_curve_name(nid); if (ret->group == NULL) { EC_KEY_free(ret); return NULL; } return ret; } void EC_KEY_free(EC_KEY *r) { if (r == NULL) { return; } if (!CRYPTO_refcount_dec_and_test_zero(&r->references)) { return; } if (r->ecdsa_meth) { if (r->ecdsa_meth->finish) { r->ecdsa_meth->finish(r); } METHOD_unref(r->ecdsa_meth); } EC_GROUP_free(r->group); EC_POINT_free(r->pub_key); BN_clear_free(r->priv_key); CRYPTO_free_ex_data(&g_ex_data_class, r, &r->ex_data); OPENSSL_cleanse((void *)r, sizeof(EC_KEY)); OPENSSL_free(r); } EC_KEY *EC_KEY_copy(EC_KEY *dest, const EC_KEY *src) { if (dest == NULL || src == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return NULL; } /* Copy the parameters. */ if (src->group) { /* TODO(fork): duplicating the group seems wasteful. */ EC_GROUP_free(dest->group); dest->group = EC_GROUP_dup(src->group); if (dest->group == NULL) { return NULL; } } /* Copy the public key. */ if (src->pub_key && src->group) { EC_POINT_free(dest->pub_key); dest->pub_key = EC_POINT_dup(src->pub_key, src->group); if (dest->pub_key == NULL) { return NULL; } } /* copy the private key */ if (src->priv_key) { if (dest->priv_key == NULL) { dest->priv_key = BN_new(); if (dest->priv_key == NULL) { return NULL; } } if (!BN_copy(dest->priv_key, src->priv_key)) { return NULL; } } /* copy method/extra data */ if (src->ecdsa_meth) { METHOD_unref(dest->ecdsa_meth); dest->ecdsa_meth = src->ecdsa_meth; METHOD_ref(dest->ecdsa_meth); } CRYPTO_free_ex_data(&g_ex_data_class, dest, &dest->ex_data); if (!CRYPTO_dup_ex_data(&g_ex_data_class, &dest->ex_data, &src->ex_data)) { return NULL; } /* copy the rest */ dest->enc_flag = src->enc_flag; dest->conv_form = src->conv_form; dest->version = src->version; dest->flags = src->flags; return dest; } EC_KEY *EC_KEY_dup(const EC_KEY *ec_key) { EC_KEY *ret = EC_KEY_new(); if (ret == NULL) { return NULL; } if (EC_KEY_copy(ret, ec_key) == NULL) { EC_KEY_free(ret); return NULL; } return ret; } int EC_KEY_up_ref(EC_KEY *r) { CRYPTO_refcount_inc(&r->references); return 1; } int EC_KEY_is_opaque(const EC_KEY *key) { return key->ecdsa_meth && (key->ecdsa_meth->flags & ECDSA_FLAG_OPAQUE); } const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key) { return key->group; } int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group) { EC_GROUP_free(key->group); /* TODO(fork): duplicating the group seems wasteful but see * |EC_KEY_set_conv_form|. */ key->group = EC_GROUP_dup(group); return (key->group == NULL) ? 0 : 1; } const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key) { return key->priv_key; } int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *priv_key) { BN_clear_free(key->priv_key); key->priv_key = BN_dup(priv_key); return (key->priv_key == NULL) ? 0 : 1; } const EC_POINT *EC_KEY_get0_public_key(const EC_KEY *key) { return key->pub_key; } int EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub_key) { EC_POINT_free(key->pub_key); key->pub_key = EC_POINT_dup(pub_key, key->group); return (key->pub_key == NULL) ? 0 : 1; } unsigned int EC_KEY_get_enc_flags(const EC_KEY *key) { return key->enc_flag; } void EC_KEY_set_enc_flags(EC_KEY *key, unsigned int flags) { key->enc_flag = flags; } point_conversion_form_t EC_KEY_get_conv_form(const EC_KEY *key) { return key->conv_form; } void EC_KEY_set_conv_form(EC_KEY *key, point_conversion_form_t cform) { key->conv_form = cform; } int EC_KEY_precompute_mult(EC_KEY *key, BN_CTX *ctx) { if (key->group == NULL) { return 0; } return EC_GROUP_precompute_mult(key->group, ctx); } int EC_KEY_check_key(const EC_KEY *eckey) { int ok = 0; BN_CTX *ctx = NULL; const BIGNUM *order = NULL; EC_POINT *point = NULL; if (!eckey || !eckey->group || !eckey->pub_key) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } if (EC_POINT_is_at_infinity(eckey->group, eckey->pub_key)) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_AT_INFINITY); goto err; } ctx = BN_CTX_new(); point = EC_POINT_new(eckey->group); if (ctx == NULL || point == NULL) { goto err; } /* testing whether the pub_key is on the elliptic curve */ if (!EC_POINT_is_on_curve(eckey->group, eckey->pub_key, ctx)) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_IS_NOT_ON_CURVE); goto err; } /* testing whether pub_key * order is the point at infinity */ /* TODO(fork): can this be skipped if the cofactor is one or if we're about * to check the private key, below? */ order = &eckey->group->order; if (BN_is_zero(order)) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_GROUP_ORDER); goto err; } if (!EC_POINT_mul(eckey->group, point, NULL, eckey->pub_key, order, ctx)) { OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); goto err; } if (!EC_POINT_is_at_infinity(eckey->group, point)) { OPENSSL_PUT_ERROR(EC, EC_R_WRONG_ORDER); goto err; } /* in case the priv_key is present : * check if generator * priv_key == pub_key */ if (eckey->priv_key) { if (BN_cmp(eckey->priv_key, order) >= 0) { OPENSSL_PUT_ERROR(EC, EC_R_WRONG_ORDER); goto err; } if (!EC_POINT_mul(eckey->group, point, eckey->priv_key, NULL, NULL, ctx)) { OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); goto err; } if (EC_POINT_cmp(eckey->group, point, eckey->pub_key, ctx) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_PRIVATE_KEY); goto err; } } ok = 1; err: BN_CTX_free(ctx); EC_POINT_free(point); return ok; } int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x, BIGNUM *y) { BN_CTX *ctx = NULL; BIGNUM *tx, *ty; EC_POINT *point = NULL; int ok = 0; if (!key || !key->group || !x || !y) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } ctx = BN_CTX_new(); point = EC_POINT_new(key->group); if (ctx == NULL || point == NULL) { goto err; } tx = BN_CTX_get(ctx); ty = BN_CTX_get(ctx); if (!EC_POINT_set_affine_coordinates_GFp(key->group, point, x, y, ctx) || !EC_POINT_get_affine_coordinates_GFp(key->group, point, tx, ty, ctx)) { goto err; } /* Check if retrieved coordinates match originals: if not values * are out of range. */ if (BN_cmp(x, tx) || BN_cmp(y, ty)) { OPENSSL_PUT_ERROR(EC, EC_R_COORDINATES_OUT_OF_RANGE); goto err; } if (!EC_KEY_set_public_key(key, point)) { goto err; } if (EC_KEY_check_key(key) == 0) { goto err; } ok = 1; err: BN_CTX_free(ctx); EC_POINT_free(point); return ok; } int EC_KEY_generate_key(EC_KEY *eckey) { int ok = 0; BN_CTX *ctx = NULL; BIGNUM *priv_key = NULL, *order = NULL; EC_POINT *pub_key = NULL; if (!eckey || !eckey->group) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } order = BN_new(); ctx = BN_CTX_new(); if (order == NULL || ctx == NULL) { goto err; } if (eckey->priv_key == NULL) { priv_key = BN_new(); if (priv_key == NULL) { goto err; } } else { priv_key = eckey->priv_key; } if (!EC_GROUP_get_order(eckey->group, order, ctx)) { goto err; } do { if (!BN_rand_range(priv_key, order)) { goto err; } } while (BN_is_zero(priv_key)); if (eckey->pub_key == NULL) { pub_key = EC_POINT_new(eckey->group); if (pub_key == NULL) { goto err; } } else { pub_key = eckey->pub_key; } if (!EC_POINT_mul(eckey->group, pub_key, priv_key, NULL, NULL, ctx)) { goto err; } eckey->priv_key = priv_key; eckey->pub_key = pub_key; ok = 1; err: BN_free(order); if (eckey->pub_key == NULL) { EC_POINT_free(pub_key); } if (eckey->priv_key == NULL) { BN_free(priv_key); } BN_CTX_free(ctx); return ok; } int EC_KEY_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func, CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func) { int index; if (!CRYPTO_get_ex_new_index(&g_ex_data_class, &index, argl, argp, new_func, dup_func, free_func)) { return -1; } return index; } int EC_KEY_set_ex_data(EC_KEY *d, int idx, void *arg) { return CRYPTO_set_ex_data(&d->ex_data, idx, arg); } void *EC_KEY_get_ex_data(const EC_KEY *d, int idx) { return CRYPTO_get_ex_data(&d->ex_data, idx); } void EC_KEY_set_asn1_flag(EC_KEY *key, int flag) {}
gpl-2.0
ttylinux/Telegram
TMessagesProj/jni/boringssl/crypto/bio/connect.c
103
14627
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #include <openssl/bio.h> #include <assert.h> #include <errno.h> #include <stdio.h> #include <string.h> #if !defined(OPENSSL_WINDOWS) #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #else #pragma warning(push, 3) #include <winsock2.h> #include <ws2tcpip.h> #pragma warning(pop) #endif #include <openssl/buf.h> #include <openssl/err.h> #include <openssl/mem.h> #include "internal.h" enum { BIO_CONN_S_BEFORE, BIO_CONN_S_BLOCKED_CONNECT, BIO_CONN_S_OK, }; typedef struct bio_connect_st { int state; char *param_hostname; char *param_port; int nbio; uint8_t ip[4]; unsigned short port; struct sockaddr_storage them; socklen_t them_length; /* the file descriptor is kept in bio->num in order to match the socket * BIO. */ /* info_callback is called when the connection is initially made * callback(BIO,state,ret); The callback should return 'ret', state is for * compatibility with the SSL info_callback. */ int (*info_callback)(const BIO *bio, int state, int ret); } BIO_CONNECT; #if !defined(OPENSSL_WINDOWS) static int closesocket(int sock) { return close(sock); } #endif /* maybe_copy_ipv4_address sets |*ipv4| to the IPv4 address from |ss| (in * big-endian order), if |ss| contains an IPv4 socket address. */ static void maybe_copy_ipv4_address(uint8_t *ipv4, const struct sockaddr_storage *ss) { const struct sockaddr_in *sin; if (ss->ss_family != AF_INET) { return; } sin = (const struct sockaddr_in*) ss; memcpy(ipv4, &sin->sin_addr, 4); } static int conn_state(BIO *bio, BIO_CONNECT *c) { int ret = -1, i; char *p, *q; int (*cb)(const BIO *, int, int) = NULL; if (c->info_callback != NULL) { cb = c->info_callback; } for (;;) { switch (c->state) { case BIO_CONN_S_BEFORE: p = c->param_hostname; if (p == NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_NO_HOSTNAME_SPECIFIED); goto exit_loop; } for (; *p != 0; p++) { if (*p == ':' || *p == '/') { break; } } i = *p; if (i == ':' || i == '/') { *(p++) = 0; if (i == ':') { for (q = p; *q; q++) { if (*q == '/') { *q = 0; break; } } OPENSSL_free(c->param_port); c->param_port = BUF_strdup(p); } } if (c->param_port == NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_NO_PORT_SPECIFIED); ERR_add_error_data(2, "host=", c->param_hostname); goto exit_loop; } if (!bio_ip_and_port_to_socket_and_addr( &bio->num, &c->them, &c->them_length, c->param_hostname, c->param_port)) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNABLE_TO_CREATE_SOCKET); ERR_add_error_data(4, "host=", c->param_hostname, ":", c->param_port); goto exit_loop; } memset(c->ip, 0, 4); maybe_copy_ipv4_address(c->ip, &c->them); if (c->nbio) { if (!bio_socket_nbio(bio->num, 1)) { OPENSSL_PUT_ERROR(BIO, BIO_R_ERROR_SETTING_NBIO); ERR_add_error_data(4, "host=", c->param_hostname, ":", c->param_port); goto exit_loop; } } i = 1; ret = setsockopt(bio->num, SOL_SOCKET, SO_KEEPALIVE, (char *)&i, sizeof(i)); if (ret < 0) { OPENSSL_PUT_SYSTEM_ERROR(setsockopt); OPENSSL_PUT_ERROR(BIO, BIO_R_KEEPALIVE); ERR_add_error_data(4, "host=", c->param_hostname, ":", c->param_port); goto exit_loop; } BIO_clear_retry_flags(bio); ret = connect(bio->num, (struct sockaddr*) &c->them, c->them_length); if (ret < 0) { if (bio_fd_should_retry(ret)) { BIO_set_flags(bio, (BIO_FLAGS_IO_SPECIAL | BIO_FLAGS_SHOULD_RETRY)); c->state = BIO_CONN_S_BLOCKED_CONNECT; bio->retry_reason = BIO_RR_CONNECT; } else { OPENSSL_PUT_SYSTEM_ERROR(connect); OPENSSL_PUT_ERROR(BIO, BIO_R_CONNECT_ERROR); ERR_add_error_data(4, "host=", c->param_hostname, ":", c->param_port); } goto exit_loop; } else { c->state = BIO_CONN_S_OK; } break; case BIO_CONN_S_BLOCKED_CONNECT: i = bio_sock_error(bio->num); if (i) { if (bio_fd_should_retry(ret)) { BIO_set_flags(bio, (BIO_FLAGS_IO_SPECIAL | BIO_FLAGS_SHOULD_RETRY)); c->state = BIO_CONN_S_BLOCKED_CONNECT; bio->retry_reason = BIO_RR_CONNECT; ret = -1; } else { BIO_clear_retry_flags(bio); OPENSSL_PUT_SYSTEM_ERROR(connect); OPENSSL_PUT_ERROR(BIO, BIO_R_NBIO_CONNECT_ERROR); ERR_add_error_data(4, "host=", c->param_hostname, ":", c->param_port); ret = 0; } goto exit_loop; } else { c->state = BIO_CONN_S_OK; } break; case BIO_CONN_S_OK: ret = 1; goto exit_loop; default: assert(0); goto exit_loop; } if (cb != NULL) { ret = cb((BIO *)bio, c->state, ret); if (ret == 0) { goto end; } } } exit_loop: if (cb != NULL) { ret = cb((BIO *)bio, c->state, ret); } end: return ret; } static BIO_CONNECT *BIO_CONNECT_new(void) { BIO_CONNECT *ret = OPENSSL_malloc(sizeof(BIO_CONNECT)); if (ret == NULL) { return NULL; } memset(ret, 0, sizeof(BIO_CONNECT)); ret->state = BIO_CONN_S_BEFORE; return ret; } static void BIO_CONNECT_free(BIO_CONNECT *c) { if (c == NULL) { return; } OPENSSL_free(c->param_hostname); OPENSSL_free(c->param_port); OPENSSL_free(c); } static int conn_new(BIO *bio) { bio->init = 0; bio->num = -1; bio->flags = 0; bio->ptr = (char *)BIO_CONNECT_new(); return bio->ptr != NULL; } static void conn_close_socket(BIO *bio) { BIO_CONNECT *c = (BIO_CONNECT *) bio->ptr; if (bio->num == -1) { return; } /* Only do a shutdown if things were established */ if (c->state == BIO_CONN_S_OK) { shutdown(bio->num, 2); } closesocket(bio->num); bio->num = -1; } static int conn_free(BIO *bio) { if (bio == NULL) { return 0; } if (bio->shutdown) { conn_close_socket(bio); } BIO_CONNECT_free((BIO_CONNECT*) bio->ptr); return 1; } static int conn_read(BIO *bio, char *out, int out_len) { int ret = 0; BIO_CONNECT *data; data = (BIO_CONNECT *)bio->ptr; if (data->state != BIO_CONN_S_OK) { ret = conn_state(bio, data); if (ret <= 0) { return ret; } } bio_clear_socket_error(); ret = recv(bio->num, out, out_len, 0); BIO_clear_retry_flags(bio); if (ret <= 0) { if (bio_fd_should_retry(ret)) { BIO_set_retry_read(bio); } } return ret; } static int conn_write(BIO *bio, const char *in, int in_len) { int ret; BIO_CONNECT *data; data = (BIO_CONNECT *)bio->ptr; if (data->state != BIO_CONN_S_OK) { ret = conn_state(bio, data); if (ret <= 0) { return ret; } } bio_clear_socket_error(); ret = send(bio->num, in, in_len, 0); BIO_clear_retry_flags(bio); if (ret <= 0) { if (bio_fd_should_retry(ret)) { BIO_set_retry_write(bio); } } return ret; } static long conn_ctrl(BIO *bio, int cmd, long num, void *ptr) { int *ip; const char **pptr; long ret = 1; BIO_CONNECT *data; data = (BIO_CONNECT *)bio->ptr; switch (cmd) { case BIO_CTRL_RESET: ret = 0; data->state = BIO_CONN_S_BEFORE; conn_close_socket(bio); bio->flags = 0; break; case BIO_C_DO_STATE_MACHINE: /* use this one to start the connection */ if (data->state != BIO_CONN_S_OK) { ret = (long)conn_state(bio, data); } else { ret = 1; } break; case BIO_C_GET_CONNECT: /* TODO(fork): can this be removed? (Or maybe this whole file). */ if (ptr != NULL) { pptr = (const char **)ptr; if (num == 0) { *pptr = data->param_hostname; } else if (num == 1) { *pptr = data->param_port; } else if (num == 2) { *pptr = (char *) &data->ip[0]; } else if (num == 3) { *((int *)ptr) = data->port; } if (!bio->init) { *pptr = "not initialized"; } ret = 1; } break; case BIO_C_SET_CONNECT: if (ptr != NULL) { bio->init = 1; if (num == 0) { OPENSSL_free(data->param_hostname); data->param_hostname = BUF_strdup(ptr); if (data->param_hostname == NULL) { ret = 0; } } else if (num == 1) { OPENSSL_free(data->param_port); data->param_port = BUF_strdup(ptr); if (data->param_port == NULL) { ret = 0; } } else { ret = 0; } } break; case BIO_C_SET_NBIO: data->nbio = (int)num; break; case BIO_C_GET_FD: if (bio->init) { ip = (int *)ptr; if (ip != NULL) { *ip = bio->num; } ret = 1; } else { ret = 0; } break; case BIO_CTRL_GET_CLOSE: ret = bio->shutdown; break; case BIO_CTRL_SET_CLOSE: bio->shutdown = (int)num; break; case BIO_CTRL_PENDING: case BIO_CTRL_WPENDING: ret = 0; break; case BIO_CTRL_FLUSH: break; case BIO_CTRL_SET_CALLBACK: { #if 0 /* FIXME: Should this be used? -- Richard Levitte */ OPENSSL_PUT_ERROR(BIO, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); ret = -1; #else ret = 0; #endif } break; case BIO_CTRL_GET_CALLBACK: { int (**fptr)(const BIO *bio, int state, int xret); fptr = (int (**)(const BIO *bio, int state, int xret))ptr; *fptr = data->info_callback; } break; default: ret = 0; break; } return (ret); } static long conn_callback_ctrl(BIO *bio, int cmd, bio_info_cb fp) { long ret = 1; BIO_CONNECT *data; data = (BIO_CONNECT *)bio->ptr; switch (cmd) { case BIO_CTRL_SET_CALLBACK: { data->info_callback = (int (*)(const struct bio_st *, int, int))fp; } break; default: ret = 0; break; } return ret; } static int conn_puts(BIO *bp, const char *str) { return conn_write(bp, str, strlen(str)); } BIO *BIO_new_connect(const char *hostname) { BIO *ret; ret = BIO_new(BIO_s_connect()); if (ret == NULL) { return NULL; } if (!BIO_set_conn_hostname(ret, hostname)) { BIO_free(ret); return NULL; } return ret; } static const BIO_METHOD methods_connectp = { BIO_TYPE_CONNECT, "socket connect", conn_write, conn_read, conn_puts, NULL /* connect_gets, */, conn_ctrl, conn_new, conn_free, conn_callback_ctrl, }; const BIO_METHOD *BIO_s_connect(void) { return &methods_connectp; } int BIO_set_conn_hostname(BIO *bio, const char *name) { return BIO_ctrl(bio, BIO_C_SET_CONNECT, 0, (void*) name); } int BIO_set_conn_port(BIO *bio, const char *port_str) { return BIO_ctrl(bio, BIO_C_SET_CONNECT, 1, (void*) port_str); } int BIO_set_nbio(BIO *bio, int on) { return BIO_ctrl(bio, BIO_C_SET_NBIO, on, NULL); }
gpl-2.0
linuxvom/linux
drivers/pinctrl/pinctrl-tegra.c
359
18218
/* * Driver for the NVIDIA Tegra pinmux * * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved. * * Derived from code: * Copyright (C) 2010 Google, Inc. * Copyright (C) 2010 NVIDIA Corporation * Copyright (C) 2009-2011 ST-Ericsson AB * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pinctrl/machine.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/pinctrl/pinconf.h> #include <linux/slab.h> #include "core.h" #include "pinctrl-tegra.h" #include "pinctrl-utils.h" struct tegra_pmx { struct device *dev; struct pinctrl_dev *pctl; const struct tegra_pinctrl_soc_data *soc; const char **group_pins; int nbanks; void __iomem **regs; }; static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg) { return readl(pmx->regs[bank] + reg); } static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg) { writel(val, pmx->regs[bank] + reg); } static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); return pmx->soc->ngroups; } static const char *tegra_pinctrl_get_group_name(struct pinctrl_dev *pctldev, unsigned group) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); return pmx->soc->groups[group].name; } static int tegra_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, unsigned group, const unsigned **pins, unsigned *num_pins) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); *pins = pmx->soc->groups[group].pins; *num_pins = pmx->soc->groups[group].npins; return 0; } #ifdef CONFIG_DEBUG_FS static void tegra_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset) { seq_printf(s, " %s", dev_name(pctldev->dev)); } #endif static const struct cfg_param { const char *property; enum tegra_pinconf_param param; } cfg_params[] = { {"nvidia,pull", TEGRA_PINCONF_PARAM_PULL}, {"nvidia,tristate", TEGRA_PINCONF_PARAM_TRISTATE}, {"nvidia,enable-input", TEGRA_PINCONF_PARAM_ENABLE_INPUT}, {"nvidia,open-drain", TEGRA_PINCONF_PARAM_OPEN_DRAIN}, {"nvidia,lock", TEGRA_PINCONF_PARAM_LOCK}, {"nvidia,io-reset", TEGRA_PINCONF_PARAM_IORESET}, {"nvidia,rcv-sel", TEGRA_PINCONF_PARAM_RCV_SEL}, {"nvidia,io-hv", TEGRA_PINCONF_PARAM_RCV_SEL}, {"nvidia,high-speed-mode", TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE}, {"nvidia,schmitt", TEGRA_PINCONF_PARAM_SCHMITT}, {"nvidia,low-power-mode", TEGRA_PINCONF_PARAM_LOW_POWER_MODE}, {"nvidia,pull-down-strength", TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH}, {"nvidia,pull-up-strength", TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH}, {"nvidia,slew-rate-falling", TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING}, {"nvidia,slew-rate-rising", TEGRA_PINCONF_PARAM_SLEW_RATE_RISING}, {"nvidia,drive-type", TEGRA_PINCONF_PARAM_DRIVE_TYPE}, }; static int tegra_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, struct device_node *np, struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps) { struct device *dev = pctldev->dev; int ret, i; const char *function; u32 val; unsigned long config; unsigned long *configs = NULL; unsigned num_configs = 0; unsigned reserve; struct property *prop; const char *group; ret = of_property_read_string(np, "nvidia,function", &function); if (ret < 0) { /* EINVAL=missing, which is fine since it's optional */ if (ret != -EINVAL) dev_err(dev, "could not parse property nvidia,function\n"); function = NULL; } for (i = 0; i < ARRAY_SIZE(cfg_params); i++) { ret = of_property_read_u32(np, cfg_params[i].property, &val); if (!ret) { config = TEGRA_PINCONF_PACK(cfg_params[i].param, val); ret = pinctrl_utils_add_config(pctldev, &configs, &num_configs, config); if (ret < 0) goto exit; /* EINVAL=missing, which is fine since it's optional */ } else if (ret != -EINVAL) { dev_err(dev, "could not parse property %s\n", cfg_params[i].property); } } reserve = 0; if (function != NULL) reserve++; if (num_configs) reserve++; ret = of_property_count_strings(np, "nvidia,pins"); if (ret < 0) { dev_err(dev, "could not parse property nvidia,pins\n"); goto exit; } reserve *= ret; ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps, reserve); if (ret < 0) goto exit; of_property_for_each_string(np, "nvidia,pins", prop, group) { if (function) { ret = pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps, group, function); if (ret < 0) goto exit; } if (num_configs) { ret = pinctrl_utils_add_map_configs(pctldev, map, reserved_maps, num_maps, group, configs, num_configs, PIN_MAP_TYPE_CONFIGS_GROUP); if (ret < 0) goto exit; } } ret = 0; exit: kfree(configs); return ret; } static int tegra_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps) { unsigned reserved_maps; struct device_node *np; int ret; reserved_maps = 0; *map = NULL; *num_maps = 0; for_each_child_of_node(np_config, np) { ret = tegra_pinctrl_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps); if (ret < 0) { pinctrl_utils_dt_free_map(pctldev, *map, *num_maps); return ret; } } return 0; } static const struct pinctrl_ops tegra_pinctrl_ops = { .get_groups_count = tegra_pinctrl_get_groups_count, .get_group_name = tegra_pinctrl_get_group_name, .get_group_pins = tegra_pinctrl_get_group_pins, #ifdef CONFIG_DEBUG_FS .pin_dbg_show = tegra_pinctrl_pin_dbg_show, #endif .dt_node_to_map = tegra_pinctrl_dt_node_to_map, .dt_free_map = pinctrl_utils_dt_free_map, }; static int tegra_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); return pmx->soc->nfunctions; } static const char *tegra_pinctrl_get_func_name(struct pinctrl_dev *pctldev, unsigned function) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); return pmx->soc->functions[function].name; } static int tegra_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, unsigned function, const char * const **groups, unsigned * const num_groups) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); *groups = pmx->soc->functions[function].groups; *num_groups = pmx->soc->functions[function].ngroups; return 0; } static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned function, unsigned group) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); const struct tegra_pingroup *g; int i; u32 val; g = &pmx->soc->groups[group]; if (WARN_ON(g->mux_reg < 0)) return -EINVAL; for (i = 0; i < ARRAY_SIZE(g->funcs); i++) { if (g->funcs[i] == function) break; } if (WARN_ON(i == ARRAY_SIZE(g->funcs))) return -EINVAL; val = pmx_readl(pmx, g->mux_bank, g->mux_reg); val &= ~(0x3 << g->mux_bit); val |= i << g->mux_bit; pmx_writel(pmx, val, g->mux_bank, g->mux_reg); return 0; } static const struct pinmux_ops tegra_pinmux_ops = { .get_functions_count = tegra_pinctrl_get_funcs_count, .get_function_name = tegra_pinctrl_get_func_name, .get_function_groups = tegra_pinctrl_get_func_groups, .set_mux = tegra_pinctrl_set_mux, }; static int tegra_pinconf_reg(struct tegra_pmx *pmx, const struct tegra_pingroup *g, enum tegra_pinconf_param param, bool report_err, s8 *bank, s16 *reg, s8 *bit, s8 *width) { switch (param) { case TEGRA_PINCONF_PARAM_PULL: *bank = g->pupd_bank; *reg = g->pupd_reg; *bit = g->pupd_bit; *width = 2; break; case TEGRA_PINCONF_PARAM_TRISTATE: *bank = g->tri_bank; *reg = g->tri_reg; *bit = g->tri_bit; *width = 1; break; case TEGRA_PINCONF_PARAM_ENABLE_INPUT: *bank = g->mux_bank; *reg = g->mux_reg; *bit = g->einput_bit; *width = 1; break; case TEGRA_PINCONF_PARAM_OPEN_DRAIN: *bank = g->mux_bank; *reg = g->mux_reg; *bit = g->odrain_bit; *width = 1; break; case TEGRA_PINCONF_PARAM_LOCK: *bank = g->mux_bank; *reg = g->mux_reg; *bit = g->lock_bit; *width = 1; break; case TEGRA_PINCONF_PARAM_IORESET: *bank = g->mux_bank; *reg = g->mux_reg; *bit = g->ioreset_bit; *width = 1; break; case TEGRA_PINCONF_PARAM_RCV_SEL: *bank = g->mux_bank; *reg = g->mux_reg; *bit = g->rcv_sel_bit; *width = 1; break; case TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE: if (pmx->soc->hsm_in_mux) { *bank = g->mux_bank; *reg = g->mux_reg; } else { *bank = g->drv_bank; *reg = g->drv_reg; } *bit = g->hsm_bit; *width = 1; break; case TEGRA_PINCONF_PARAM_SCHMITT: if (pmx->soc->schmitt_in_mux) { *bank = g->mux_bank; *reg = g->mux_reg; } else { *bank = g->drv_bank; *reg = g->drv_reg; } *bit = g->schmitt_bit; *width = 1; break; case TEGRA_PINCONF_PARAM_LOW_POWER_MODE: *bank = g->drv_bank; *reg = g->drv_reg; *bit = g->lpmd_bit; *width = 2; break; case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH: *bank = g->drv_bank; *reg = g->drv_reg; *bit = g->drvdn_bit; *width = g->drvdn_width; break; case TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH: *bank = g->drv_bank; *reg = g->drv_reg; *bit = g->drvup_bit; *width = g->drvup_width; break; case TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING: *bank = g->drv_bank; *reg = g->drv_reg; *bit = g->slwf_bit; *width = g->slwf_width; break; case TEGRA_PINCONF_PARAM_SLEW_RATE_RISING: *bank = g->drv_bank; *reg = g->drv_reg; *bit = g->slwr_bit; *width = g->slwr_width; break; case TEGRA_PINCONF_PARAM_DRIVE_TYPE: if (pmx->soc->drvtype_in_mux) { *bank = g->mux_bank; *reg = g->mux_reg; } else { *bank = g->drv_bank; *reg = g->drv_reg; } *bit = g->drvtype_bit; *width = 2; break; default: dev_err(pmx->dev, "Invalid config param %04x\n", param); return -ENOTSUPP; } if (*reg < 0 || *bit > 31) { if (report_err) { const char *prop = "unknown"; int i; for (i = 0; i < ARRAY_SIZE(cfg_params); i++) { if (cfg_params[i].param == param) { prop = cfg_params[i].property; break; } } dev_err(pmx->dev, "Config param %04x (%s) not supported on group %s\n", param, prop, g->name); } return -ENOTSUPP; } return 0; } static int tegra_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *config) { dev_err(pctldev->dev, "pin_config_get op not supported\n"); return -ENOTSUPP; } static int tegra_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *configs, unsigned num_configs) { dev_err(pctldev->dev, "pin_config_set op not supported\n"); return -ENOTSUPP; } static int tegra_pinconf_group_get(struct pinctrl_dev *pctldev, unsigned group, unsigned long *config) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); enum tegra_pinconf_param param = TEGRA_PINCONF_UNPACK_PARAM(*config); u16 arg; const struct tegra_pingroup *g; int ret; s8 bank, bit, width; s16 reg; u32 val, mask; g = &pmx->soc->groups[group]; ret = tegra_pinconf_reg(pmx, g, param, true, &bank, &reg, &bit, &width); if (ret < 0) return ret; val = pmx_readl(pmx, bank, reg); mask = (1 << width) - 1; arg = (val >> bit) & mask; *config = TEGRA_PINCONF_PACK(param, arg); return 0; } static int tegra_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group, unsigned long *configs, unsigned num_configs) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); enum tegra_pinconf_param param; u16 arg; const struct tegra_pingroup *g; int ret, i; s8 bank, bit, width; s16 reg; u32 val, mask; g = &pmx->soc->groups[group]; for (i = 0; i < num_configs; i++) { param = TEGRA_PINCONF_UNPACK_PARAM(configs[i]); arg = TEGRA_PINCONF_UNPACK_ARG(configs[i]); ret = tegra_pinconf_reg(pmx, g, param, true, &bank, &reg, &bit, &width); if (ret < 0) return ret; val = pmx_readl(pmx, bank, reg); /* LOCK can't be cleared */ if (param == TEGRA_PINCONF_PARAM_LOCK) { if ((val & BIT(bit)) && !arg) { dev_err(pctldev->dev, "LOCK bit cannot be cleared\n"); return -EINVAL; } } /* Special-case Boolean values; allow any non-zero as true */ if (width == 1) arg = !!arg; /* Range-check user-supplied value */ mask = (1 << width) - 1; if (arg & ~mask) { dev_err(pctldev->dev, "config %lx: %x too big for %d bit register\n", configs[i], arg, width); return -EINVAL; } /* Update register */ val &= ~(mask << bit); val |= arg << bit; pmx_writel(pmx, val, bank, reg); } /* for each config */ return 0; } #ifdef CONFIG_DEBUG_FS static void tegra_pinconf_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset) { } static const char *strip_prefix(const char *s) { const char *comma = strchr(s, ','); if (!comma) return s; return comma + 1; } static void tegra_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned group) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); const struct tegra_pingroup *g; int i, ret; s8 bank, bit, width; s16 reg; u32 val; g = &pmx->soc->groups[group]; for (i = 0; i < ARRAY_SIZE(cfg_params); i++) { ret = tegra_pinconf_reg(pmx, g, cfg_params[i].param, false, &bank, &reg, &bit, &width); if (ret < 0) continue; val = pmx_readl(pmx, bank, reg); val >>= bit; val &= (1 << width) - 1; seq_printf(s, "\n\t%s=%u", strip_prefix(cfg_params[i].property), val); } } static void tegra_pinconf_config_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned long config) { enum tegra_pinconf_param param = TEGRA_PINCONF_UNPACK_PARAM(config); u16 arg = TEGRA_PINCONF_UNPACK_ARG(config); const char *pname = "unknown"; int i; for (i = 0; i < ARRAY_SIZE(cfg_params); i++) { if (cfg_params[i].param == param) { pname = cfg_params[i].property; break; } } seq_printf(s, "%s=%d", strip_prefix(pname), arg); } #endif static const struct pinconf_ops tegra_pinconf_ops = { .pin_config_get = tegra_pinconf_get, .pin_config_set = tegra_pinconf_set, .pin_config_group_get = tegra_pinconf_group_get, .pin_config_group_set = tegra_pinconf_group_set, #ifdef CONFIG_DEBUG_FS .pin_config_dbg_show = tegra_pinconf_dbg_show, .pin_config_group_dbg_show = tegra_pinconf_group_dbg_show, .pin_config_config_dbg_show = tegra_pinconf_config_dbg_show, #endif }; static struct pinctrl_gpio_range tegra_pinctrl_gpio_range = { .name = "Tegra GPIOs", .id = 0, .base = 0, }; static struct pinctrl_desc tegra_pinctrl_desc = { .pctlops = &tegra_pinctrl_ops, .pmxops = &tegra_pinmux_ops, .confops = &tegra_pinconf_ops, .owner = THIS_MODULE, }; static bool gpio_node_has_range(void) { struct device_node *np; bool has_prop = false; np = of_find_compatible_node(NULL, NULL, "nvidia,tegra30-gpio"); if (!np) return has_prop; has_prop = of_find_property(np, "gpio-ranges", NULL); of_node_put(np); return has_prop; } int tegra_pinctrl_probe(struct platform_device *pdev, const struct tegra_pinctrl_soc_data *soc_data) { struct tegra_pmx *pmx; struct resource *res; int i; const char **group_pins; int fn, gn, gfn; pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL); if (!pmx) { dev_err(&pdev->dev, "Can't alloc tegra_pmx\n"); return -ENOMEM; } pmx->dev = &pdev->dev; pmx->soc = soc_data; /* * Each mux group will appear in 4 functions' list of groups. * This over-allocates slightly, since not all groups are mux groups. */ pmx->group_pins = devm_kzalloc(&pdev->dev, soc_data->ngroups * 4 * sizeof(*pmx->group_pins), GFP_KERNEL); if (!pmx->group_pins) return -ENOMEM; group_pins = pmx->group_pins; for (fn = 0; fn < soc_data->nfunctions; fn++) { struct tegra_function *func = &soc_data->functions[fn]; func->groups = group_pins; for (gn = 0; gn < soc_data->ngroups; gn++) { const struct tegra_pingroup *g = &soc_data->groups[gn]; if (g->mux_reg == -1) continue; for (gfn = 0; gfn < 4; gfn++) if (g->funcs[gfn] == fn) break; if (gfn == 4) continue; BUG_ON(group_pins - pmx->group_pins >= soc_data->ngroups * 4); *group_pins++ = g->name; func->ngroups++; } } tegra_pinctrl_gpio_range.npins = pmx->soc->ngpios; tegra_pinctrl_desc.name = dev_name(&pdev->dev); tegra_pinctrl_desc.pins = pmx->soc->pins; tegra_pinctrl_desc.npins = pmx->soc->npins; for (i = 0; ; i++) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) break; } pmx->nbanks = i; pmx->regs = devm_kzalloc(&pdev->dev, pmx->nbanks * sizeof(*pmx->regs), GFP_KERNEL); if (!pmx->regs) { dev_err(&pdev->dev, "Can't alloc regs pointer\n"); return -ENOMEM; } for (i = 0; i < pmx->nbanks; i++) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); pmx->regs[i] = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pmx->regs[i])) return PTR_ERR(pmx->regs[i]); } pmx->pctl = pinctrl_register(&tegra_pinctrl_desc, &pdev->dev, pmx); if (IS_ERR(pmx->pctl)) { dev_err(&pdev->dev, "Couldn't register pinctrl driver\n"); return PTR_ERR(pmx->pctl); } if (!gpio_node_has_range()) pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range); platform_set_drvdata(pdev, pmx); dev_dbg(&pdev->dev, "Probed Tegra pinctrl driver\n"); return 0; } EXPORT_SYMBOL_GPL(tegra_pinctrl_probe); int tegra_pinctrl_remove(struct platform_device *pdev) { struct tegra_pmx *pmx = platform_get_drvdata(pdev); pinctrl_unregister(pmx->pctl); return 0; } EXPORT_SYMBOL_GPL(tegra_pinctrl_remove);
gpl-2.0
jrior001/android_kernel_samsung_d2
fs/file.c
359
12059
/* * linux/fs/file.c * * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes * * Manage the dynamic fd arrays in the process files_struct. */ #include <linux/export.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/time.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/bitops.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/workqueue.h> struct fdtable_defer { spinlock_t lock; struct work_struct wq; struct fdtable *next; }; int sysctl_nr_open __read_mostly = 1024*1024; int sysctl_nr_open_min = BITS_PER_LONG; int sysctl_nr_open_max = 1024 * 1024; /* raised later */ /* * We use this list to defer free fdtables that have vmalloced * sets/arrays. By keeping a per-cpu list, we avoid having to embed * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in * this per-task structure. */ static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); static void *alloc_fdmem(size_t size) { /* * Very large allocations can stress page reclaim, so fall back to * vmalloc() if the allocation size will be considered "large" by the VM. */ if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY); if (data != NULL) return data; } return vmalloc(size); } static void free_fdmem(void *ptr) { is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr); } static void __free_fdtable(struct fdtable *fdt) { free_fdmem(fdt->fd); free_fdmem(fdt->open_fds); kfree(fdt); } static void free_fdtable_work(struct work_struct *work) { struct fdtable_defer *f = container_of(work, struct fdtable_defer, wq); struct fdtable *fdt; spin_lock_bh(&f->lock); fdt = f->next; f->next = NULL; spin_unlock_bh(&f->lock); while(fdt) { struct fdtable *next = fdt->next; __free_fdtable(fdt); fdt = next; } } void free_fdtable_rcu(struct rcu_head *rcu) { struct fdtable *fdt = container_of(rcu, struct fdtable, rcu); struct fdtable_defer *fddef; BUG_ON(!fdt); if (fdt->max_fds <= NR_OPEN_DEFAULT) { /* * This fdtable is embedded in the files structure and that * structure itself is getting destroyed. */ kmem_cache_free(files_cachep, container_of(fdt, struct files_struct, fdtab)); return; } if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) { kfree(fdt->fd); kfree(fdt->open_fds); kfree(fdt); } else { fddef = &get_cpu_var(fdtable_defer_list); spin_lock(&fddef->lock); fdt->next = fddef->next; fddef->next = fdt; /* vmallocs are handled from the workqueue context */ schedule_work(&fddef->wq); spin_unlock(&fddef->lock); put_cpu_var(fdtable_defer_list); } } /* * Expand the fdset in the files_struct. Called with the files spinlock * held for write. */ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) { unsigned int cpy, set; BUG_ON(nfdt->max_fds < ofdt->max_fds); cpy = ofdt->max_fds * sizeof(struct file *); set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); memcpy(nfdt->fd, ofdt->fd, cpy); memset((char *)(nfdt->fd) + cpy, 0, set); cpy = ofdt->max_fds / BITS_PER_BYTE; set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE; memcpy(nfdt->open_fds, ofdt->open_fds, cpy); memset((char *)(nfdt->open_fds) + cpy, 0, set); memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); memset((char *)(nfdt->close_on_exec) + cpy, 0, set); } static struct fdtable * alloc_fdtable(unsigned int nr) { struct fdtable *fdt; void *data; /* * Figure out how many fds we actually want to support in this fdtable. * Allocation steps are keyed to the size of the fdarray, since it * grows far faster than any of the other dynamic data. We try to fit * the fdarray into comfortable page-tuned chunks: starting at 1024B * and growing in powers of two from there on. */ nr /= (1024 / sizeof(struct file *)); nr = roundup_pow_of_two(nr + 1); nr *= (1024 / sizeof(struct file *)); /* * Note that this can drive nr *below* what we had passed if sysctl_nr_open * had been set lower between the check in expand_files() and here. Deal * with that in caller, it's cheaper that way. * * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise * bitmaps handling below becomes unpleasant, to put it mildly... */ if (unlikely(nr > sysctl_nr_open)) nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); if (!fdt) goto out; fdt->max_fds = nr; data = alloc_fdmem(nr * sizeof(struct file *)); if (!data) goto out_fdt; fdt->fd = data; data = alloc_fdmem(max_t(size_t, 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES)); if (!data) goto out_arr; fdt->open_fds = data; data += nr / BITS_PER_BYTE; fdt->close_on_exec = data; fdt->next = NULL; return fdt; out_arr: free_fdmem(fdt->fd); out_fdt: kfree(fdt); out: return NULL; } /* * Expand the file descriptor table. * This function will allocate a new fdtable and both fd array and fdset, of * the given size. * Return <0 error code on error; 1 on successful completion. * The files->file_lock should be held on entry, and will be held on exit. */ static int expand_fdtable(struct files_struct *files, int nr) __releases(files->file_lock) __acquires(files->file_lock) { struct fdtable *new_fdt, *cur_fdt; spin_unlock(&files->file_lock); new_fdt = alloc_fdtable(nr); spin_lock(&files->file_lock); if (!new_fdt) return -ENOMEM; /* * extremely unlikely race - sysctl_nr_open decreased between the check in * caller and alloc_fdtable(). Cheaper to catch it here... */ if (unlikely(new_fdt->max_fds <= nr)) { __free_fdtable(new_fdt); return -EMFILE; } /* * Check again since another task may have expanded the fd table while * we dropped the lock */ cur_fdt = files_fdtable(files); if (nr >= cur_fdt->max_fds) { /* Continue as planned */ copy_fdtable(new_fdt, cur_fdt); rcu_assign_pointer(files->fdt, new_fdt); if (cur_fdt->max_fds > NR_OPEN_DEFAULT) free_fdtable(cur_fdt); } else { /* Somebody else expanded, so undo our attempt */ __free_fdtable(new_fdt); } return 1; } /* * Expand files. * This function will expand the file structures, if the requested size exceeds * the current capacity and there is room for expansion. * Return <0 error code on error; 0 when nothing done; 1 when files were * expanded and execution may have blocked. * The files->file_lock should be held on entry, and will be held on exit. */ int expand_files(struct files_struct *files, int nr) { struct fdtable *fdt; fdt = files_fdtable(files); /* * N.B. For clone tasks sharing a files structure, this test * will limit the total number of files that can be opened. */ if (nr >= rlimit(RLIMIT_NOFILE)) return -EMFILE; /* Do we need to expand? */ if (nr < fdt->max_fds) return 0; /* Can we expand? */ if (nr >= sysctl_nr_open) return -EMFILE; /* All good, so we try */ return expand_fdtable(files, nr); } static int count_open_files(struct fdtable *fdt) { int size = fdt->max_fds; int i; /* Find the last open fd */ for (i = size / BITS_PER_LONG; i > 0; ) { if (fdt->open_fds[--i]) break; } i = (i + 1) * BITS_PER_LONG; return i; } /* * Allocate a new files structure and copy contents from the * passed in files structure. * errorp will be valid only when the returned files_struct is NULL. */ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) { struct files_struct *newf; struct file **old_fds, **new_fds; int open_files, size, i; struct fdtable *old_fdt, *new_fdt; *errorp = -ENOMEM; newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); if (!newf) goto out; atomic_set(&newf->count, 1); spin_lock_init(&newf->file_lock); newf->next_fd = 0; new_fdt = &newf->fdtab; new_fdt->max_fds = NR_OPEN_DEFAULT; new_fdt->close_on_exec = newf->close_on_exec_init; new_fdt->open_fds = newf->open_fds_init; new_fdt->fd = &newf->fd_array[0]; new_fdt->next = NULL; spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); open_files = count_open_files(old_fdt); /* * Check whether we need to allocate a larger fd array and fd set. */ while (unlikely(open_files > new_fdt->max_fds)) { spin_unlock(&oldf->file_lock); if (new_fdt != &newf->fdtab) __free_fdtable(new_fdt); new_fdt = alloc_fdtable(open_files - 1); if (!new_fdt) { *errorp = -ENOMEM; goto out_release; } /* beyond sysctl_nr_open; nothing to do */ if (unlikely(new_fdt->max_fds < open_files)) { __free_fdtable(new_fdt); *errorp = -EMFILE; goto out_release; } /* * Reacquire the oldf lock and a pointer to its fd table * who knows it may have a new bigger fd table. We need * the latest pointer. */ spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); open_files = count_open_files(old_fdt); } old_fds = old_fdt->fd; new_fds = new_fdt->fd; memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8); memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8); for (i = open_files; i != 0; i--) { struct file *f = *old_fds++; if (f) { get_file(f); } else { /* * The fd may be claimed in the fd bitmap but not yet * instantiated in the files array if a sibling thread * is partway through open(). So make sure that this * fd is available to the new process. */ __clear_open_fd(open_files - i, new_fdt); } rcu_assign_pointer(*new_fds++, f); } spin_unlock(&oldf->file_lock); /* compute the remainder to be cleared */ size = (new_fdt->max_fds - open_files) * sizeof(struct file *); /* This is long word aligned thus could use a optimized version */ memset(new_fds, 0, size); if (new_fdt->max_fds > open_files) { int left = (new_fdt->max_fds - open_files) / 8; int start = open_files / BITS_PER_LONG; memset(&new_fdt->open_fds[start], 0, left); memset(&new_fdt->close_on_exec[start], 0, left); } rcu_assign_pointer(newf->fdt, new_fdt); return newf; out_release: kmem_cache_free(files_cachep, newf); out: return NULL; } static void __devinit fdtable_defer_list_init(int cpu) { struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); spin_lock_init(&fddef->lock); INIT_WORK(&fddef->wq, free_fdtable_work); fddef->next = NULL; } void __init files_defer_init(void) { int i; for_each_possible_cpu(i) fdtable_defer_list_init(i); sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG; } struct files_struct init_files = { .count = ATOMIC_INIT(1), .fdt = &init_files.fdtab, .fdtab = { .max_fds = NR_OPEN_DEFAULT, .fd = &init_files.fd_array[0], .close_on_exec = init_files.close_on_exec_init, .open_fds = init_files.open_fds_init, }, .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), }; /* * allocate a file descriptor, mark it busy. */ int alloc_fd(unsigned start, unsigned flags) { struct files_struct *files = current->files; unsigned int fd; int error; struct fdtable *fdt; spin_lock(&files->file_lock); repeat: fdt = files_fdtable(files); fd = start; if (fd < files->next_fd) fd = files->next_fd; if (fd < fdt->max_fds) fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd); error = expand_files(files, fd); if (error < 0) goto out; /* * If we needed to expand the fs array we * might have blocked - try again. */ if (error) goto repeat; if (start <= files->next_fd) files->next_fd = fd + 1; __set_open_fd(fd, fdt); if (flags & O_CLOEXEC) __set_close_on_exec(fd, fdt); else __clear_close_on_exec(fd, fdt); error = fd; #if 1 /* Sanity check */ if (rcu_dereference_raw(fdt->fd[fd]) != NULL) { printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); rcu_assign_pointer(fdt->fd[fd], NULL); } #endif out: spin_unlock(&files->file_lock); return error; } int get_unused_fd(void) { return alloc_fd(0, 0); } EXPORT_SYMBOL(get_unused_fd);
gpl-2.0
StNick/android_kernel_samsung_lt03lte
arch/arm/mach-msm/perf_debug.c
359
2472
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/types.h> #include <linux/uaccess.h> #include <linux/debugfs.h> /* * Subsequent patches should add an entry to end of this string. * Format is incrementing sequence number followed by text of * patch commit title with newline. * Note trailing ';' is on its own line to simplify addition of * future strings. */ static char *descriptions = "0 msm: perf: add debug patch logging framework\n" "1 Perf: Restore counter after powercollapse for generic ARM PMU's\n" "2 Perf: Toggle PMU IRQ when CPU's are hotplugged\n" "3 Perf: Correct irq for CPU hotplug detection\n" "4 Perf: Check perf activity on correct CPU\n" "5 Perf: Add DT support for L1 and L2 PMU\n" "6 Perf: Add cortex A5 device tree support\n" "7 Perf: Add L1 counters to tracepoints\n" "8 Perf: Add cortex A7 perf support\n" "9 ARM: dts: msm: add perf-events support for msm8226\n" "10 Perf: Fix counts across power collapse\n" "11 ARM: dts: msm: add perf-events support for msm8x10, msm8x12\n" "12 Perf: Make per-process counters configurable\n" ; static ssize_t desc_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { return simple_read_from_buffer(buf, count, pos, descriptions, strlen(descriptions)); } static const struct file_operations perf_debug_desc_fops = { .read = desc_read, }; static int msm_perf_debugfs_init(void) { int ret = 0; struct dentry *dir; struct dentry *file; dir = debugfs_create_dir("msm-perf-patches", NULL); if (IS_ERR_OR_NULL(dir)) { pr_err("failed to create msm-perf-patches dir in debugfs\n"); ret = PTR_ERR(dir); goto init_exit; } file = debugfs_create_file("descriptions", 0444, dir, NULL, &perf_debug_desc_fops); if (IS_ERR_OR_NULL(file)) { debugfs_remove(dir); pr_err("failed to create descriptions file for msm-perf-patches\n"); ret = PTR_ERR(file); goto init_exit; } init_exit: return ret; } late_initcall(msm_perf_debugfs_init);
gpl-2.0
antibyte/hannspad-kernel-antibyte
drivers/usb/host.old/uhci-debug.c
615
15204
/* * UHCI-specific debugging code. Invaluable when something * goes wrong, but don't get in my face. * * Kernel visible pointers are surrounded in []s and bus * visible pointers are surrounded in ()s * * (C) Copyright 1999 Linus Torvalds * (C) Copyright 1999-2001 Johannes Erdfelt */ #include <linux/kernel.h> #include <linux/debugfs.h> #include <linux/smp_lock.h> #include <asm/io.h> #include "uhci-hcd.h" #define uhci_debug_operations (* (const struct file_operations *) NULL) static struct dentry *uhci_debugfs_root; #ifdef DEBUG /* Handle REALLY large printks so we don't overflow buffers */ static void lprintk(char *buf) { char *p; /* Just write one line at a time */ while (buf) { p = strchr(buf, '\n'); if (p) *p = 0; printk(KERN_DEBUG "%s\n", buf); buf = p; if (buf) buf++; } } static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space) { char *out = buf; char *spid; u32 status, token; /* Try to make sure there's enough memory */ if (len < 160) return 0; status = td_status(td); out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td, le32_to_cpu(td->link)); out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ", ((status >> 27) & 3), (status & TD_CTRL_SPD) ? "SPD " : "", (status & TD_CTRL_LS) ? "LS " : "", (status & TD_CTRL_IOC) ? "IOC " : "", (status & TD_CTRL_ACTIVE) ? "Active " : "", (status & TD_CTRL_STALLED) ? "Stalled " : "", (status & TD_CTRL_DBUFERR) ? "DataBufErr " : "", (status & TD_CTRL_BABBLE) ? "Babble " : "", (status & TD_CTRL_NAK) ? "NAK " : "", (status & TD_CTRL_CRCTIMEO) ? "CRC/Timeo " : "", (status & TD_CTRL_BITSTUFF) ? "BitStuff " : "", status & 0x7ff); token = td_token(td); switch (uhci_packetid(token)) { case USB_PID_SETUP: spid = "SETUP"; break; case USB_PID_OUT: spid = "OUT"; break; case USB_PID_IN: spid = "IN"; break; default: spid = "?"; break; } out += sprintf(out, "MaxLen=%x DT%d EndPt=%x Dev=%x, PID=%x(%s) ", token >> 21, ((token >> 19) & 1), (token >> 15) & 15, (token >> 8) & 127, (token & 0xff), spid); out += sprintf(out, "(buf=%08x)\n", le32_to_cpu(td->buffer)); return out - buf; } static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space) { char *out = buf; struct uhci_td *td; int i, nactive, ninactive; char *ptype; if (len < 200) return 0; out += sprintf(out, "urb_priv [%p] ", urbp); out += sprintf(out, "urb [%p] ", urbp->urb); out += sprintf(out, "qh [%p] ", urbp->qh); out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe)); out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe), (usb_pipein(urbp->urb->pipe) ? "IN" : "OUT")); switch (usb_pipetype(urbp->urb->pipe)) { case PIPE_ISOCHRONOUS: ptype = "ISO"; break; case PIPE_INTERRUPT: ptype = "INT"; break; case PIPE_BULK: ptype = "BLK"; break; default: case PIPE_CONTROL: ptype = "CTL"; break; } out += sprintf(out, "%s%s", ptype, (urbp->fsbr ? " FSBR" : "")); out += sprintf(out, " Actlen=%d%s", urbp->urb->actual_length, (urbp->qh->type == USB_ENDPOINT_XFER_CONTROL ? "-8" : "")); if (urbp->urb->unlinked) out += sprintf(out, " Unlinked=%d", urbp->urb->unlinked); out += sprintf(out, "\n"); i = nactive = ninactive = 0; list_for_each_entry(td, &urbp->td_list, list) { if (urbp->qh->type != USB_ENDPOINT_XFER_ISOC && (++i <= 10 || debug > 2)) { out += sprintf(out, "%*s%d: ", space + 2, "", i); out += uhci_show_td(td, out, len - (out - buf), 0); } else { if (td_status(td) & TD_CTRL_ACTIVE) ++nactive; else ++ninactive; } } if (nactive + ninactive > 0) out += sprintf(out, "%*s[skipped %d inactive and %d active " "TDs]\n", space, "", ninactive, nactive); return out - buf; } static int uhci_show_qh(struct uhci_hcd *uhci, struct uhci_qh *qh, char *buf, int len, int space) { char *out = buf; int i, nurbs; __le32 element = qh_element(qh); char *qtype; /* Try to make sure there's enough memory */ if (len < 80 * 7) return 0; switch (qh->type) { case USB_ENDPOINT_XFER_ISOC: qtype = "ISO"; break; case USB_ENDPOINT_XFER_INT: qtype = "INT"; break; case USB_ENDPOINT_XFER_BULK: qtype = "BLK"; break; case USB_ENDPOINT_XFER_CONTROL: qtype = "CTL"; break; default: qtype = "Skel" ; break; } out += sprintf(out, "%*s[%p] %s QH link (%08x) element (%08x)\n", space, "", qh, qtype, le32_to_cpu(qh->link), le32_to_cpu(element)); if (qh->type == USB_ENDPOINT_XFER_ISOC) out += sprintf(out, "%*s period %d phase %d load %d us, " "frame %x desc [%p]\n", space, "", qh->period, qh->phase, qh->load, qh->iso_frame, qh->iso_packet_desc); else if (qh->type == USB_ENDPOINT_XFER_INT) out += sprintf(out, "%*s period %d phase %d load %d us\n", space, "", qh->period, qh->phase, qh->load); if (element & UHCI_PTR_QH) out += sprintf(out, "%*s Element points to QH (bug?)\n", space, ""); if (element & UHCI_PTR_DEPTH) out += sprintf(out, "%*s Depth traverse\n", space, ""); if (element & cpu_to_le32(8)) out += sprintf(out, "%*s Bit 3 set (bug?)\n", space, ""); if (!(element & ~(UHCI_PTR_QH | UHCI_PTR_DEPTH))) out += sprintf(out, "%*s Element is NULL (bug?)\n", space, ""); if (list_empty(&qh->queue)) { out += sprintf(out, "%*s queue is empty\n", space, ""); if (qh == uhci->skel_async_qh) out += uhci_show_td(uhci->term_td, out, len - (out - buf), 0); } else { struct urb_priv *urbp = list_entry(qh->queue.next, struct urb_priv, node); struct uhci_td *td = list_entry(urbp->td_list.next, struct uhci_td, list); if (element != LINK_TO_TD(td)) out += sprintf(out, "%*s Element != First TD\n", space, ""); i = nurbs = 0; list_for_each_entry(urbp, &qh->queue, node) { if (++i <= 10) out += uhci_show_urbp(urbp, out, len - (out - buf), space + 2); else ++nurbs; } if (nurbs > 0) out += sprintf(out, "%*s Skipped %d URBs\n", space, "", nurbs); } if (qh->dummy_td) { out += sprintf(out, "%*s Dummy TD\n", space, ""); out += uhci_show_td(qh->dummy_td, out, len - (out - buf), 0); } return out - buf; } static int uhci_show_sc(int port, unsigned short status, char *buf, int len) { char *out = buf; /* Try to make sure there's enough memory */ if (len < 160) return 0; out += sprintf(out, " stat%d = %04x %s%s%s%s%s%s%s%s%s%s\n", port, status, (status & USBPORTSC_SUSP) ? " Suspend" : "", (status & USBPORTSC_OCC) ? " OverCurrentChange" : "", (status & USBPORTSC_OC) ? " OverCurrent" : "", (status & USBPORTSC_PR) ? " Reset" : "", (status & USBPORTSC_LSDA) ? " LowSpeed" : "", (status & USBPORTSC_RD) ? " ResumeDetect" : "", (status & USBPORTSC_PEC) ? " EnableChange" : "", (status & USBPORTSC_PE) ? " Enabled" : "", (status & USBPORTSC_CSC) ? " ConnectChange" : "", (status & USBPORTSC_CCS) ? " Connected" : ""); return out - buf; } static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf, int len) { char *out = buf; char *rh_state; /* Try to make sure there's enough memory */ if (len < 60) return 0; switch (uhci->rh_state) { case UHCI_RH_RESET: rh_state = "reset"; break; case UHCI_RH_SUSPENDED: rh_state = "suspended"; break; case UHCI_RH_AUTO_STOPPED: rh_state = "auto-stopped"; break; case UHCI_RH_RESUMING: rh_state = "resuming"; break; case UHCI_RH_SUSPENDING: rh_state = "suspending"; break; case UHCI_RH_RUNNING: rh_state = "running"; break; case UHCI_RH_RUNNING_NODEVS: rh_state = "running, no devs"; break; default: rh_state = "?"; break; } out += sprintf(out, "Root-hub state: %s FSBR: %d\n", rh_state, uhci->fsbr_is_on); return out - buf; } static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len) { char *out = buf; unsigned long io_addr = uhci->io_addr; unsigned short usbcmd, usbstat, usbint, usbfrnum; unsigned int flbaseadd; unsigned char sof; unsigned short portsc1, portsc2; /* Try to make sure there's enough memory */ if (len < 80 * 9) return 0; usbcmd = inw(io_addr + 0); usbstat = inw(io_addr + 2); usbint = inw(io_addr + 4); usbfrnum = inw(io_addr + 6); flbaseadd = inl(io_addr + 8); sof = inb(io_addr + 12); portsc1 = inw(io_addr + 16); portsc2 = inw(io_addr + 18); out += sprintf(out, " usbcmd = %04x %s%s%s%s%s%s%s%s\n", usbcmd, (usbcmd & USBCMD_MAXP) ? "Maxp64 " : "Maxp32 ", (usbcmd & USBCMD_CF) ? "CF " : "", (usbcmd & USBCMD_SWDBG) ? "SWDBG " : "", (usbcmd & USBCMD_FGR) ? "FGR " : "", (usbcmd & USBCMD_EGSM) ? "EGSM " : "", (usbcmd & USBCMD_GRESET) ? "GRESET " : "", (usbcmd & USBCMD_HCRESET) ? "HCRESET " : "", (usbcmd & USBCMD_RS) ? "RS " : ""); out += sprintf(out, " usbstat = %04x %s%s%s%s%s%s\n", usbstat, (usbstat & USBSTS_HCH) ? "HCHalted " : "", (usbstat & USBSTS_HCPE) ? "HostControllerProcessError " : "", (usbstat & USBSTS_HSE) ? "HostSystemError " : "", (usbstat & USBSTS_RD) ? "ResumeDetect " : "", (usbstat & USBSTS_ERROR) ? "USBError " : "", (usbstat & USBSTS_USBINT) ? "USBINT " : ""); out += sprintf(out, " usbint = %04x\n", usbint); out += sprintf(out, " usbfrnum = (%d)%03x\n", (usbfrnum >> 10) & 1, 0xfff & (4*(unsigned int)usbfrnum)); out += sprintf(out, " flbaseadd = %08x\n", flbaseadd); out += sprintf(out, " sof = %02x\n", sof); out += uhci_show_sc(1, portsc1, out, len - (out - buf)); out += uhci_show_sc(2, portsc2, out, len - (out - buf)); out += sprintf(out, "Most recent frame: %x (%d) " "Last ISO frame: %x (%d)\n", uhci->frame_number, uhci->frame_number & 1023, uhci->last_iso_frame, uhci->last_iso_frame & 1023); return out - buf; } static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) { char *out = buf; int i, j; struct uhci_qh *qh; struct uhci_td *td; struct list_head *tmp, *head; int nframes, nerrs; __le32 link; __le32 fsbr_link; static const char * const qh_names[] = { "unlink", "iso", "int128", "int64", "int32", "int16", "int8", "int4", "int2", "async", "term" }; out += uhci_show_root_hub_state(uhci, out, len - (out - buf)); out += sprintf(out, "HC status\n"); out += uhci_show_status(uhci, out, len - (out - buf)); out += sprintf(out, "Periodic load table\n"); for (i = 0; i < MAX_PHASE; ++i) { out += sprintf(out, "\t%d", uhci->load[i]); if (i % 8 == 7) *out++ = '\n'; } out += sprintf(out, "Total: %d, #INT: %d, #ISO: %d\n", uhci->total_load, uhci_to_hcd(uhci)->self.bandwidth_int_reqs, uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs); if (debug <= 1) return out - buf; out += sprintf(out, "Frame List\n"); nframes = 10; nerrs = 0; for (i = 0; i < UHCI_NUMFRAMES; ++i) { __le32 qh_dma; j = 0; td = uhci->frame_cpu[i]; link = uhci->frame[i]; if (!td) goto check_link; if (nframes > 0) { out += sprintf(out, "- Frame %d -> (%08x)\n", i, le32_to_cpu(link)); j = 1; } head = &td->fl_list; tmp = head; do { td = list_entry(tmp, struct uhci_td, fl_list); tmp = tmp->next; if (link != LINK_TO_TD(td)) { if (nframes > 0) out += sprintf(out, " link does " "not match list entry!\n"); else ++nerrs; } if (nframes > 0) out += uhci_show_td(td, out, len - (out - buf), 4); link = td->link; } while (tmp != head); check_link: qh_dma = uhci_frame_skel_link(uhci, i); if (link != qh_dma) { if (nframes > 0) { if (!j) { out += sprintf(out, "- Frame %d -> (%08x)\n", i, le32_to_cpu(link)); j = 1; } out += sprintf(out, " link does not match " "QH (%08x)!\n", le32_to_cpu(qh_dma)); } else ++nerrs; } nframes -= j; } if (nerrs > 0) out += sprintf(out, "Skipped %d bad links\n", nerrs); out += sprintf(out, "Skeleton QHs\n"); fsbr_link = 0; for (i = 0; i < UHCI_NUM_SKELQH; ++i) { int cnt = 0; qh = uhci->skelqh[i]; out += sprintf(out, "- skel_%s_qh\n", qh_names[i]); \ out += uhci_show_qh(uhci, qh, out, len - (out - buf), 4); /* Last QH is the Terminating QH, it's different */ if (i == SKEL_TERM) { if (qh_element(qh) != LINK_TO_TD(uhci->term_td)) out += sprintf(out, " skel_term_qh element is not set to term_td!\n"); link = fsbr_link; if (!link) link = LINK_TO_QH(uhci->skel_term_qh); goto check_qh_link; } head = &qh->node; tmp = head->next; while (tmp != head) { qh = list_entry(tmp, struct uhci_qh, node); tmp = tmp->next; if (++cnt <= 10) out += uhci_show_qh(uhci, qh, out, len - (out - buf), 4); if (!fsbr_link && qh->skel >= SKEL_FSBR) fsbr_link = LINK_TO_QH(qh); } if ((cnt -= 10) > 0) out += sprintf(out, " Skipped %d QHs\n", cnt); link = UHCI_PTR_TERM; if (i <= SKEL_ISO) ; else if (i < SKEL_ASYNC) link = LINK_TO_QH(uhci->skel_async_qh); else if (!uhci->fsbr_is_on) ; else link = LINK_TO_QH(uhci->skel_term_qh); check_qh_link: if (qh->link != link) out += sprintf(out, " last QH not linked to next skeleton!\n"); } return out - buf; } #ifdef CONFIG_DEBUG_FS #define MAX_OUTPUT (64 * 1024) struct uhci_debug { int size; char *data; }; static int uhci_debug_open(struct inode *inode, struct file *file) { struct uhci_hcd *uhci = inode->i_private; struct uhci_debug *up; int ret = -ENOMEM; unsigned long flags; lock_kernel(); up = kmalloc(sizeof(*up), GFP_KERNEL); if (!up) goto out; up->data = kmalloc(MAX_OUTPUT, GFP_KERNEL); if (!up->data) { kfree(up); goto out; } up->size = 0; spin_lock_irqsave(&uhci->lock, flags); if (uhci->is_initialized) up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT); spin_unlock_irqrestore(&uhci->lock, flags); file->private_data = up; ret = 0; out: unlock_kernel(); return ret; } static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence) { struct uhci_debug *up; loff_t new = -1; lock_kernel(); up = file->private_data; switch (whence) { case 0: new = off; break; case 1: new = file->f_pos + off; break; } if (new < 0 || new > up->size) { unlock_kernel(); return -EINVAL; } unlock_kernel(); return (file->f_pos = new); } static ssize_t uhci_debug_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct uhci_debug *up = file->private_data; return simple_read_from_buffer(buf, nbytes, ppos, up->data, up->size); } static int uhci_debug_release(struct inode *inode, struct file *file) { struct uhci_debug *up = file->private_data; kfree(up->data); kfree(up); return 0; } #undef uhci_debug_operations static const struct file_operations uhci_debug_operations = { .owner = THIS_MODULE, .open = uhci_debug_open, .llseek = uhci_debug_lseek, .read = uhci_debug_read, .release = uhci_debug_release, }; #endif /* CONFIG_DEBUG_FS */ #else /* DEBUG */ static inline void lprintk(char *buf) {} static inline int uhci_show_qh(struct uhci_hcd *uhci, struct uhci_qh *qh, char *buf, int len, int space) { return 0; } static inline int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) { return 0; } #endif
gpl-2.0
schqiushui/kernel_lollipop_sense_a52
arch/ia64/kernel/salinfo.c
1895
19772
/* * salinfo.c * * Creates entries in /proc/sal for various system features. * * Copyright (c) 2003, 2006 Silicon Graphics, Inc. All rights reserved. * Copyright (c) 2003 Hewlett-Packard Co * Bjorn Helgaas <bjorn.helgaas@hp.com> * * 10/30/2001 jbarnes@sgi.com copied much of Stephane's palinfo * code to create this file * Oct 23 2003 kaos@sgi.com * Replace IPI with set_cpus_allowed() to read a record from the required cpu. * Redesign salinfo log processing to separate interrupt and user space * contexts. * Cache the record across multi-block reads from user space. * Support > 64 cpus. * Delete module_exit and MOD_INC/DEC_COUNT, salinfo cannot be a module. * * Jan 28 2004 kaos@sgi.com * Periodically check for outstanding MCA or INIT records. * * Dec 5 2004 kaos@sgi.com * Standardize which records are cleared automatically. * * Aug 18 2005 kaos@sgi.com * mca.c may not pass a buffer, a NULL buffer just indicates that a new * record is available in SAL. * Replace some NR_CPUS by cpus_online, for hotplug cpu. * * Jan 5 2006 kaos@sgi.com * Handle hotplug cpus coming online. * Handle hotplug cpus going offline while they still have outstanding records. * Use the cpu_* macros consistently. * Replace the counting semaphore with a mutex and a test if the cpumask is non-empty. * Modify the locking to make the test for "work to do" an atomic operation. */ #include <linux/capability.h> #include <linux/cpu.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/timer.h> #include <linux/vmalloc.h> #include <linux/semaphore.h> #include <asm/sal.h> #include <asm/uaccess.h> MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>"); MODULE_DESCRIPTION("/proc interface to IA-64 SAL features"); MODULE_LICENSE("GPL"); static const struct file_operations proc_salinfo_fops; typedef struct { const char *name; /* name of the proc entry */ unsigned long feature; /* feature bit */ struct proc_dir_entry *entry; /* registered entry (removal) */ } salinfo_entry_t; /* * List {name,feature} pairs for every entry in /proc/sal/<feature> * that this module exports */ static const salinfo_entry_t salinfo_entries[]={ { "bus_lock", IA64_SAL_PLATFORM_FEATURE_BUS_LOCK, }, { "irq_redirection", IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT, }, { "ipi_redirection", IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT, }, { "itc_drift", IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT, }, }; #define NR_SALINFO_ENTRIES ARRAY_SIZE(salinfo_entries) static char *salinfo_log_name[] = { "mca", "init", "cmc", "cpe", }; static struct proc_dir_entry *salinfo_proc_entries[ ARRAY_SIZE(salinfo_entries) + /* /proc/sal/bus_lock */ ARRAY_SIZE(salinfo_log_name) + /* /proc/sal/{mca,...} */ (2 * ARRAY_SIZE(salinfo_log_name)) + /* /proc/sal/mca/{event,data} */ 1]; /* /proc/sal */ /* Some records we get ourselves, some are accessed as saved data in buffers * that are owned by mca.c. */ struct salinfo_data_saved { u8* buffer; u64 size; u64 id; int cpu; }; /* State transitions. Actions are :- * Write "read <cpunum>" to the data file. * Write "clear <cpunum>" to the data file. * Write "oemdata <cpunum> <offset> to the data file. * Read from the data file. * Close the data file. * * Start state is NO_DATA. * * NO_DATA * write "read <cpunum>" -> NO_DATA or LOG_RECORD. * write "clear <cpunum>" -> NO_DATA or LOG_RECORD. * write "oemdata <cpunum> <offset> -> return -EINVAL. * read data -> return EOF. * close -> unchanged. Free record areas. * * LOG_RECORD * write "read <cpunum>" -> NO_DATA or LOG_RECORD. * write "clear <cpunum>" -> NO_DATA or LOG_RECORD. * write "oemdata <cpunum> <offset> -> format the oem data, goto OEMDATA. * read data -> return the INIT/MCA/CMC/CPE record. * close -> unchanged. Keep record areas. * * OEMDATA * write "read <cpunum>" -> NO_DATA or LOG_RECORD. * write "clear <cpunum>" -> NO_DATA or LOG_RECORD. * write "oemdata <cpunum> <offset> -> format the oem data, goto OEMDATA. * read data -> return the formatted oemdata. * close -> unchanged. Keep record areas. * * Closing the data file does not change the state. This allows shell scripts * to manipulate salinfo data, each shell redirection opens the file, does one * action then closes it again. The record areas are only freed at close when * the state is NO_DATA. */ enum salinfo_state { STATE_NO_DATA, STATE_LOG_RECORD, STATE_OEMDATA, }; struct salinfo_data { cpumask_t cpu_event; /* which cpus have outstanding events */ struct semaphore mutex; u8 *log_buffer; u64 log_size; u8 *oemdata; /* decoded oem data */ u64 oemdata_size; int open; /* single-open to prevent races */ u8 type; u8 saved_num; /* using a saved record? */ enum salinfo_state state :8; /* processing state */ u8 padding; int cpu_check; /* next CPU to check */ struct salinfo_data_saved data_saved[5];/* save last 5 records from mca.c, must be < 255 */ }; static struct salinfo_data salinfo_data[ARRAY_SIZE(salinfo_log_name)]; static DEFINE_SPINLOCK(data_lock); static DEFINE_SPINLOCK(data_saved_lock); /** salinfo_platform_oemdata - optional callback to decode oemdata from an error * record. * @sect_header: pointer to the start of the section to decode. * @oemdata: returns vmalloc area containing the decoded output. * @oemdata_size: returns length of decoded output (strlen). * * Description: If user space asks for oem data to be decoded by the kernel * and/or prom and the platform has set salinfo_platform_oemdata to the address * of a platform specific routine then call that routine. salinfo_platform_oemdata * vmalloc's and formats its output area, returning the address of the text * and its strlen. Returns 0 for success, -ve for error. The callback is * invoked on the cpu that generated the error record. */ int (*salinfo_platform_oemdata)(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size); struct salinfo_platform_oemdata_parms { const u8 *efi_guid; u8 **oemdata; u64 *oemdata_size; int ret; }; /* Kick the mutex that tells user space that there is work to do. Instead of * trying to track the state of the mutex across multiple cpus, in user * context, interrupt context, non-maskable interrupt context and hotplug cpu, * it is far easier just to grab the mutex if it is free then release it. * * This routine must be called with data_saved_lock held, to make the down/up * operation atomic. */ static void salinfo_work_to_do(struct salinfo_data *data) { (void)(down_trylock(&data->mutex) ?: 0); up(&data->mutex); } static void salinfo_platform_oemdata_cpu(void *context) { struct salinfo_platform_oemdata_parms *parms = context; parms->ret = salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size); } static void shift1_data_saved (struct salinfo_data *data, int shift) { memcpy(data->data_saved+shift, data->data_saved+shift+1, (ARRAY_SIZE(data->data_saved) - (shift+1)) * sizeof(data->data_saved[0])); memset(data->data_saved + ARRAY_SIZE(data->data_saved) - 1, 0, sizeof(data->data_saved[0])); } /* This routine is invoked in interrupt context. Note: mca.c enables * interrupts before calling this code for CMC/CPE. MCA and INIT events are * not irq safe, do not call any routines that use spinlocks, they may deadlock. * MCA and INIT records are recorded, a timer event will look for any * outstanding events and wake up the user space code. * * The buffer passed from mca.c points to the output from ia64_log_get. This is * a persistent buffer but its contents can change between the interrupt and * when user space processes the record. Save the record id to identify * changes. If the buffer is NULL then just update the bitmap. */ void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) { struct salinfo_data *data = salinfo_data + type; struct salinfo_data_saved *data_saved; unsigned long flags = 0; int i; int saved_size = ARRAY_SIZE(data->data_saved); BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); if (irqsafe) spin_lock_irqsave(&data_saved_lock, flags); if (buffer) { for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { if (!data_saved->buffer) break; } if (i == saved_size) { if (!data->saved_num) { shift1_data_saved(data, 0); data_saved = data->data_saved + saved_size - 1; } else data_saved = NULL; } if (data_saved) { data_saved->cpu = smp_processor_id(); data_saved->id = ((sal_log_record_header_t *)buffer)->id; data_saved->size = size; data_saved->buffer = buffer; } } cpu_set(smp_processor_id(), data->cpu_event); if (irqsafe) { salinfo_work_to_do(data); spin_unlock_irqrestore(&data_saved_lock, flags); } } /* Check for outstanding MCA/INIT records every minute (arbitrary) */ #define SALINFO_TIMER_DELAY (60*HZ) static struct timer_list salinfo_timer; extern void ia64_mlogbuf_dump(void); static void salinfo_timeout_check(struct salinfo_data *data) { unsigned long flags; if (!data->open) return; if (!cpus_empty(data->cpu_event)) { spin_lock_irqsave(&data_saved_lock, flags); salinfo_work_to_do(data); spin_unlock_irqrestore(&data_saved_lock, flags); } } static void salinfo_timeout (unsigned long arg) { ia64_mlogbuf_dump(); salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA); salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_INIT); salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; add_timer(&salinfo_timer); } static int salinfo_event_open(struct inode *inode, struct file *file) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; return 0; } static ssize_t salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct salinfo_data *data = PDE_DATA(file_inode(file)); char cmd[32]; size_t size; int i, n, cpu = -1; retry: if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; if (down_interruptible(&data->mutex)) return -EINTR; } n = data->cpu_check; for (i = 0; i < nr_cpu_ids; i++) { if (cpu_isset(n, data->cpu_event)) { if (!cpu_online(n)) { cpu_clear(n, data->cpu_event); continue; } cpu = n; break; } if (++n == nr_cpu_ids) n = 0; } if (cpu == -1) goto retry; ia64_mlogbuf_dump(); /* for next read, start checking at next CPU */ data->cpu_check = cpu; if (++data->cpu_check == nr_cpu_ids) data->cpu_check = 0; snprintf(cmd, sizeof(cmd), "read %d\n", cpu); size = strlen(cmd); if (size > count) size = count; if (copy_to_user(buffer, cmd, size)) return -EFAULT; return size; } static const struct file_operations salinfo_event_fops = { .open = salinfo_event_open, .read = salinfo_event_read, .llseek = noop_llseek, }; static int salinfo_log_open(struct inode *inode, struct file *file) { struct salinfo_data *data = PDE_DATA(inode); if (!capable(CAP_SYS_ADMIN)) return -EPERM; spin_lock(&data_lock); if (data->open) { spin_unlock(&data_lock); return -EBUSY; } data->open = 1; spin_unlock(&data_lock); if (data->state == STATE_NO_DATA && !(data->log_buffer = vmalloc(ia64_sal_get_state_info_size(data->type)))) { data->open = 0; return -ENOMEM; } return 0; } static int salinfo_log_release(struct inode *inode, struct file *file) { struct salinfo_data *data = PDE_DATA(inode); if (data->state == STATE_NO_DATA) { vfree(data->log_buffer); vfree(data->oemdata); data->log_buffer = NULL; data->oemdata = NULL; } spin_lock(&data_lock); data->open = 0; spin_unlock(&data_lock); return 0; } static void call_on_cpu(int cpu, void (*fn)(void *), void *arg) { cpumask_t save_cpus_allowed = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); (*fn)(arg); set_cpus_allowed_ptr(current, &save_cpus_allowed); } static void salinfo_log_read_cpu(void *context) { struct salinfo_data *data = context; sal_log_record_header_t *rh; data->log_size = ia64_sal_get_state_info(data->type, (u64 *) data->log_buffer); rh = (sal_log_record_header_t *)(data->log_buffer); /* Clear corrected errors as they are read from SAL */ if (rh->severity == sal_log_severity_corrected) ia64_sal_clear_state_info(data->type); } static void salinfo_log_new_read(int cpu, struct salinfo_data *data) { struct salinfo_data_saved *data_saved; unsigned long flags; int i; int saved_size = ARRAY_SIZE(data->data_saved); data->saved_num = 0; spin_lock_irqsave(&data_saved_lock, flags); retry: for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { if (data_saved->buffer && data_saved->cpu == cpu) { sal_log_record_header_t *rh = (sal_log_record_header_t *)(data_saved->buffer); data->log_size = data_saved->size; memcpy(data->log_buffer, rh, data->log_size); barrier(); /* id check must not be moved */ if (rh->id == data_saved->id) { data->saved_num = i+1; break; } /* saved record changed by mca.c since interrupt, discard it */ shift1_data_saved(data, i); goto retry; } } spin_unlock_irqrestore(&data_saved_lock, flags); if (!data->saved_num) call_on_cpu(cpu, salinfo_log_read_cpu, data); if (!data->log_size) { data->state = STATE_NO_DATA; cpu_clear(cpu, data->cpu_event); } else { data->state = STATE_LOG_RECORD; } } static ssize_t salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct salinfo_data *data = PDE_DATA(file_inode(file)); u8 *buf; u64 bufsize; if (data->state == STATE_LOG_RECORD) { buf = data->log_buffer; bufsize = data->log_size; } else if (data->state == STATE_OEMDATA) { buf = data->oemdata; bufsize = data->oemdata_size; } else { buf = NULL; bufsize = 0; } return simple_read_from_buffer(buffer, count, ppos, buf, bufsize); } static void salinfo_log_clear_cpu(void *context) { struct salinfo_data *data = context; ia64_sal_clear_state_info(data->type); } static int salinfo_log_clear(struct salinfo_data *data, int cpu) { sal_log_record_header_t *rh; unsigned long flags; spin_lock_irqsave(&data_saved_lock, flags); data->state = STATE_NO_DATA; if (!cpu_isset(cpu, data->cpu_event)) { spin_unlock_irqrestore(&data_saved_lock, flags); return 0; } cpu_clear(cpu, data->cpu_event); if (data->saved_num) { shift1_data_saved(data, data->saved_num - 1); data->saved_num = 0; } spin_unlock_irqrestore(&data_saved_lock, flags); rh = (sal_log_record_header_t *)(data->log_buffer); /* Corrected errors have already been cleared from SAL */ if (rh->severity != sal_log_severity_corrected) call_on_cpu(cpu, salinfo_log_clear_cpu, data); /* clearing a record may make a new record visible */ salinfo_log_new_read(cpu, data); if (data->state == STATE_LOG_RECORD) { spin_lock_irqsave(&data_saved_lock, flags); cpu_set(cpu, data->cpu_event); salinfo_work_to_do(data); spin_unlock_irqrestore(&data_saved_lock, flags); } return 0; } static ssize_t salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct salinfo_data *data = PDE_DATA(file_inode(file)); char cmd[32]; size_t size; u32 offset; int cpu; size = sizeof(cmd); if (count < size) size = count; if (copy_from_user(cmd, buffer, size)) return -EFAULT; if (sscanf(cmd, "read %d", &cpu) == 1) { salinfo_log_new_read(cpu, data); } else if (sscanf(cmd, "clear %d", &cpu) == 1) { int ret; if ((ret = salinfo_log_clear(data, cpu))) count = ret; } else if (sscanf(cmd, "oemdata %d %d", &cpu, &offset) == 2) { if (data->state != STATE_LOG_RECORD && data->state != STATE_OEMDATA) return -EINVAL; if (offset > data->log_size - sizeof(efi_guid_t)) return -EINVAL; data->state = STATE_OEMDATA; if (salinfo_platform_oemdata) { struct salinfo_platform_oemdata_parms parms = { .efi_guid = data->log_buffer + offset, .oemdata = &data->oemdata, .oemdata_size = &data->oemdata_size }; call_on_cpu(cpu, salinfo_platform_oemdata_cpu, &parms); if (parms.ret) count = parms.ret; } else data->oemdata_size = 0; } else return -EINVAL; return count; } static const struct file_operations salinfo_data_fops = { .open = salinfo_log_open, .release = salinfo_log_release, .read = salinfo_log_read, .write = salinfo_log_write, .llseek = default_llseek, }; static int __cpuinit salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int i, cpu = (unsigned long)hcpu; unsigned long flags; struct salinfo_data *data; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: spin_lock_irqsave(&data_saved_lock, flags); for (i = 0, data = salinfo_data; i < ARRAY_SIZE(salinfo_data); ++i, ++data) { cpu_set(cpu, data->cpu_event); salinfo_work_to_do(data); } spin_unlock_irqrestore(&data_saved_lock, flags); break; case CPU_DEAD: case CPU_DEAD_FROZEN: spin_lock_irqsave(&data_saved_lock, flags); for (i = 0, data = salinfo_data; i < ARRAY_SIZE(salinfo_data); ++i, ++data) { struct salinfo_data_saved *data_saved; int j; for (j = ARRAY_SIZE(data->data_saved) - 1, data_saved = data->data_saved + j; j >= 0; --j, --data_saved) { if (data_saved->buffer && data_saved->cpu == cpu) { shift1_data_saved(data, j); } } cpu_clear(cpu, data->cpu_event); } spin_unlock_irqrestore(&data_saved_lock, flags); break; } return NOTIFY_OK; } static struct notifier_block salinfo_cpu_notifier __cpuinitdata = { .notifier_call = salinfo_cpu_callback, .priority = 0, }; static int __init salinfo_init(void) { struct proc_dir_entry *salinfo_dir; /* /proc/sal dir entry */ struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */ struct proc_dir_entry *dir, *entry; struct salinfo_data *data; int i, j; salinfo_dir = proc_mkdir("sal", NULL); if (!salinfo_dir) return 0; for (i=0; i < NR_SALINFO_ENTRIES; i++) { /* pass the feature bit in question as misc data */ *sdir++ = proc_create_data(salinfo_entries[i].name, 0, salinfo_dir, &proc_salinfo_fops, (void *)salinfo_entries[i].feature); } for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { data = salinfo_data + i; data->type = i; sema_init(&data->mutex, 1); dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); if (!dir) continue; entry = proc_create_data("event", S_IRUSR, dir, &salinfo_event_fops, data); if (!entry) continue; *sdir++ = entry; entry = proc_create_data("data", S_IRUSR | S_IWUSR, dir, &salinfo_data_fops, data); if (!entry) continue; *sdir++ = entry; /* we missed any events before now */ for_each_online_cpu(j) cpu_set(j, data->cpu_event); *sdir++ = dir; } *sdir++ = salinfo_dir; init_timer(&salinfo_timer); salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; salinfo_timer.function = &salinfo_timeout; add_timer(&salinfo_timer); register_hotcpu_notifier(&salinfo_cpu_notifier); return 0; } /* * 'data' contains an integer that corresponds to the feature we're * testing */ static int proc_salinfo_show(struct seq_file *m, void *v) { unsigned long data = (unsigned long)v; seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n"); return 0; } static int proc_salinfo_open(struct inode *inode, struct file *file) { return single_open(file, proc_salinfo_show, PDE_DATA(inode)); } static const struct file_operations proc_salinfo_fops = { .open = proc_salinfo_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; module_init(salinfo_init);
gpl-2.0
krizhanovsky/linux-3.10.10-sync_sockets
drivers/memory/tegra20-mc.c
2151
6021
/* * Tegra20 Memory Controller * * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/ratelimit.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/io.h> #define DRV_NAME "tegra20-mc" #define MC_INTSTATUS 0x0 #define MC_INTMASK 0x4 #define MC_INT_ERR_SHIFT 6 #define MC_INT_ERR_MASK (0x1f << MC_INT_ERR_SHIFT) #define MC_INT_DECERR_EMEM BIT(MC_INT_ERR_SHIFT) #define MC_INT_INVALID_GART_PAGE BIT(MC_INT_ERR_SHIFT + 1) #define MC_INT_SECURITY_VIOLATION BIT(MC_INT_ERR_SHIFT + 2) #define MC_INT_ARBITRATION_EMEM BIT(MC_INT_ERR_SHIFT + 3) #define MC_GART_ERROR_REQ 0x30 #define MC_DECERR_EMEM_OTHERS_STATUS 0x58 #define MC_SECURITY_VIOLATION_STATUS 0x74 #define SECURITY_VIOLATION_TYPE BIT(30) /* 0=TRUSTZONE, 1=CARVEOUT */ #define MC_CLIENT_ID_MASK 0x3f #define NUM_MC_REG_BANKS 2 struct tegra20_mc { void __iomem *regs[NUM_MC_REG_BANKS]; struct device *dev; }; static inline u32 mc_readl(struct tegra20_mc *mc, u32 offs) { u32 val = 0; if (offs < 0x24) val = readl(mc->regs[0] + offs); else if (offs < 0x400) val = readl(mc->regs[1] + offs - 0x3c); return val; } static inline void mc_writel(struct tegra20_mc *mc, u32 val, u32 offs) { if (offs < 0x24) writel(val, mc->regs[0] + offs); else if (offs < 0x400) writel(val, mc->regs[1] + offs - 0x3c); } static const char * const tegra20_mc_client[] = { "cbr_display0a", "cbr_display0ab", "cbr_display0b", "cbr_display0bb", "cbr_display0c", "cbr_display0cb", "cbr_display1b", "cbr_display1bb", "cbr_eppup", "cbr_g2pr", "cbr_g2sr", "cbr_mpeunifbr", "cbr_viruv", "csr_avpcarm7r", "csr_displayhc", "csr_displayhcb", "csr_fdcdrd", "csr_g2dr", "csr_host1xdmar", "csr_host1xr", "csr_idxsrd", "csr_mpcorer", "csr_mpe_ipred", "csr_mpeamemrd", "csr_mpecsrd", "csr_ppcsahbdmar", "csr_ppcsahbslvr", "csr_texsrd", "csr_vdebsevr", "csr_vdember", "csr_vdemcer", "csr_vdetper", "cbw_eppu", "cbw_eppv", "cbw_eppy", "cbw_mpeunifbw", "cbw_viwsb", "cbw_viwu", "cbw_viwv", "cbw_viwy", "ccw_g2dw", "csw_avpcarm7w", "csw_fdcdwr", "csw_host1xw", "csw_ispw", "csw_mpcorew", "csw_mpecswr", "csw_ppcsahbdmaw", "csw_ppcsahbslvw", "csw_vdebsevw", "csw_vdembew", "csw_vdetpmw", }; static void tegra20_mc_decode(struct tegra20_mc *mc, int n) { u32 addr, req; const char *client = "Unknown"; int idx, cid; const struct reg_info { u32 offset; u32 write_bit; /* 0=READ, 1=WRITE */ int cid_shift; char *message; } reg[] = { { .offset = MC_DECERR_EMEM_OTHERS_STATUS, .write_bit = 31, .message = "MC_DECERR", }, { .offset = MC_GART_ERROR_REQ, .cid_shift = 1, .message = "MC_GART_ERR", }, { .offset = MC_SECURITY_VIOLATION_STATUS, .write_bit = 31, .message = "MC_SECURITY_ERR", }, }; idx = n - MC_INT_ERR_SHIFT; if ((idx < 0) || (idx >= ARRAY_SIZE(reg))) { dev_err_ratelimited(mc->dev, "Unknown interrupt status %08lx\n", BIT(n)); return; } req = mc_readl(mc, reg[idx].offset); cid = (req >> reg[idx].cid_shift) & MC_CLIENT_ID_MASK; if (cid < ARRAY_SIZE(tegra20_mc_client)) client = tegra20_mc_client[cid]; addr = mc_readl(mc, reg[idx].offset + sizeof(u32)); dev_err_ratelimited(mc->dev, "%s (0x%08x): 0x%08x %s (%s %s)\n", reg[idx].message, req, addr, client, (req & BIT(reg[idx].write_bit)) ? "write" : "read", (reg[idx].offset == MC_SECURITY_VIOLATION_STATUS) ? ((req & SECURITY_VIOLATION_TYPE) ? "carveout" : "trustzone") : ""); } static const struct of_device_id tegra20_mc_of_match[] = { { .compatible = "nvidia,tegra20-mc", }, {}, }; static irqreturn_t tegra20_mc_isr(int irq, void *data) { u32 stat, mask, bit; struct tegra20_mc *mc = data; stat = mc_readl(mc, MC_INTSTATUS); mask = mc_readl(mc, MC_INTMASK); mask &= stat; if (!mask) return IRQ_NONE; while ((bit = ffs(mask)) != 0) tegra20_mc_decode(mc, bit - 1); mc_writel(mc, stat, MC_INTSTATUS); return IRQ_HANDLED; } static int tegra20_mc_probe(struct platform_device *pdev) { struct resource *irq; struct tegra20_mc *mc; int i, err; u32 intmask; mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); if (!mc) return -ENOMEM; mc->dev = &pdev->dev; for (i = 0; i < ARRAY_SIZE(mc->regs); i++) { struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) return -ENODEV; mc->regs[i] = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mc->regs[i])) return PTR_ERR(mc->regs[i]); } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) return -ENODEV; err = devm_request_irq(&pdev->dev, irq->start, tegra20_mc_isr, IRQF_SHARED, dev_name(&pdev->dev), mc); if (err) return -ENODEV; platform_set_drvdata(pdev, mc); intmask = MC_INT_INVALID_GART_PAGE | MC_INT_DECERR_EMEM | MC_INT_SECURITY_VIOLATION; mc_writel(mc, intmask, MC_INTMASK); return 0; } static struct platform_driver tegra20_mc_driver = { .probe = tegra20_mc_probe, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .of_match_table = tegra20_mc_of_match, }, }; module_platform_driver(tegra20_mc_driver); MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); MODULE_DESCRIPTION("Tegra20 MC driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
ausdim/SGS3-JB-U8
sound/core/info.c
2663
23514
/* * Information interface for ALSA driver * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/string.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/info.h> #include <sound/version.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <stdarg.h> /* * */ #ifdef CONFIG_PROC_FS int snd_info_check_reserved_words(const char *str) { static char *reserved[] = { "version", "meminfo", "memdebug", "detect", "devices", "oss", "cards", "timers", "synth", "pcm", "seq", NULL }; char **xstr = reserved; while (*xstr) { if (!strcmp(*xstr, str)) return 0; xstr++; } if (!strncmp(str, "card", 4)) return 0; return 1; } static DEFINE_MUTEX(info_mutex); struct snd_info_private_data { struct snd_info_buffer *rbuffer; struct snd_info_buffer *wbuffer; struct snd_info_entry *entry; void *file_private_data; }; static int snd_info_version_init(void); static int snd_info_version_done(void); static void snd_info_disconnect(struct snd_info_entry *entry); /* resize the proc r/w buffer */ static int resize_info_buffer(struct snd_info_buffer *buffer, unsigned int nsize) { char *nbuf; nsize = PAGE_ALIGN(nsize); nbuf = krealloc(buffer->buffer, nsize, GFP_KERNEL); if (! nbuf) return -ENOMEM; buffer->buffer = nbuf; buffer->len = nsize; return 0; } /** * snd_iprintf - printf on the procfs buffer * @buffer: the procfs buffer * @fmt: the printf format * * Outputs the string on the procfs buffer just like printf(). * * Returns the size of output string. */ int snd_iprintf(struct snd_info_buffer *buffer, const char *fmt, ...) { va_list args; int len, res; int err = 0; might_sleep(); if (buffer->stop || buffer->error) return 0; len = buffer->len - buffer->size; va_start(args, fmt); for (;;) { va_list ap; va_copy(ap, args); res = vsnprintf(buffer->buffer + buffer->curr, len, fmt, ap); va_end(ap); if (res < len) break; err = resize_info_buffer(buffer, buffer->len + PAGE_SIZE); if (err < 0) break; len = buffer->len - buffer->size; } va_end(args); if (err < 0) return err; buffer->curr += res; buffer->size += res; return res; } EXPORT_SYMBOL(snd_iprintf); /* */ static struct proc_dir_entry *snd_proc_root; struct snd_info_entry *snd_seq_root; EXPORT_SYMBOL(snd_seq_root); #ifdef CONFIG_SND_OSSEMUL struct snd_info_entry *snd_oss_root; #endif static void snd_remove_proc_entry(struct proc_dir_entry *parent, struct proc_dir_entry *de) { if (de) remove_proc_entry(de->name, parent); } static loff_t snd_info_entry_llseek(struct file *file, loff_t offset, int orig) { struct snd_info_private_data *data; struct snd_info_entry *entry; loff_t ret = -EINVAL, size; data = file->private_data; entry = data->entry; mutex_lock(&entry->access); if (entry->content == SNDRV_INFO_CONTENT_DATA && entry->c.ops->llseek) { offset = entry->c.ops->llseek(entry, data->file_private_data, file, offset, orig); goto out; } if (entry->content == SNDRV_INFO_CONTENT_DATA) size = entry->size; else size = 0; switch (orig) { case SEEK_SET: break; case SEEK_CUR: offset += file->f_pos; break; case SEEK_END: if (!size) goto out; offset += size; break; default: goto out; } if (offset < 0) goto out; if (size && offset > size) offset = size; file->f_pos = offset; ret = offset; out: mutex_unlock(&entry->access); return ret; } static ssize_t snd_info_entry_read(struct file *file, char __user *buffer, size_t count, loff_t * offset) { struct snd_info_private_data *data; struct snd_info_entry *entry; struct snd_info_buffer *buf; size_t size = 0; loff_t pos; data = file->private_data; if (snd_BUG_ON(!data)) return -ENXIO; pos = *offset; if (pos < 0 || (long) pos != pos || (ssize_t) count < 0) return -EIO; if ((unsigned long) pos + (unsigned long) count < (unsigned long) pos) return -EIO; entry = data->entry; switch (entry->content) { case SNDRV_INFO_CONTENT_TEXT: buf = data->rbuffer; if (buf == NULL) return -EIO; if (pos >= buf->size) return 0; size = buf->size - pos; size = min(count, size); if (copy_to_user(buffer, buf->buffer + pos, size)) return -EFAULT; break; case SNDRV_INFO_CONTENT_DATA: if (pos >= entry->size) return 0; if (entry->c.ops->read) { size = entry->size - pos; size = min(count, size); size = entry->c.ops->read(entry, data->file_private_data, file, buffer, size, pos); } break; } if ((ssize_t) size > 0) *offset = pos + size; return size; } static ssize_t snd_info_entry_write(struct file *file, const char __user *buffer, size_t count, loff_t * offset) { struct snd_info_private_data *data; struct snd_info_entry *entry; struct snd_info_buffer *buf; ssize_t size = 0; loff_t pos; data = file->private_data; if (snd_BUG_ON(!data)) return -ENXIO; entry = data->entry; pos = *offset; if (pos < 0 || (long) pos != pos || (ssize_t) count < 0) return -EIO; if ((unsigned long) pos + (unsigned long) count < (unsigned long) pos) return -EIO; switch (entry->content) { case SNDRV_INFO_CONTENT_TEXT: buf = data->wbuffer; if (buf == NULL) return -EIO; mutex_lock(&entry->access); if (pos + count >= buf->len) { if (resize_info_buffer(buf, pos + count)) { mutex_unlock(&entry->access); return -ENOMEM; } } if (copy_from_user(buf->buffer + pos, buffer, count)) { mutex_unlock(&entry->access); return -EFAULT; } buf->size = pos + count; mutex_unlock(&entry->access); size = count; break; case SNDRV_INFO_CONTENT_DATA: if (entry->c.ops->write && count > 0) { size_t maxsize = entry->size - pos; count = min(count, maxsize); size = entry->c.ops->write(entry, data->file_private_data, file, buffer, count, pos); } break; } if ((ssize_t) size > 0) *offset = pos + size; return size; } static int snd_info_entry_open(struct inode *inode, struct file *file) { struct snd_info_entry *entry; struct snd_info_private_data *data; struct snd_info_buffer *buffer; struct proc_dir_entry *p; int mode, err; mutex_lock(&info_mutex); p = PDE(inode); entry = p == NULL ? NULL : (struct snd_info_entry *)p->data; if (entry == NULL || ! entry->p) { mutex_unlock(&info_mutex); return -ENODEV; } if (!try_module_get(entry->module)) { err = -EFAULT; goto __error1; } mode = file->f_flags & O_ACCMODE; if (mode == O_RDONLY || mode == O_RDWR) { if ((entry->content == SNDRV_INFO_CONTENT_DATA && entry->c.ops->read == NULL)) { err = -ENODEV; goto __error; } } if (mode == O_WRONLY || mode == O_RDWR) { if ((entry->content == SNDRV_INFO_CONTENT_DATA && entry->c.ops->write == NULL)) { err = -ENODEV; goto __error; } } data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) { err = -ENOMEM; goto __error; } data->entry = entry; switch (entry->content) { case SNDRV_INFO_CONTENT_TEXT: if (mode == O_RDONLY || mode == O_RDWR) { buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (buffer == NULL) goto __nomem; data->rbuffer = buffer; buffer->len = PAGE_SIZE; buffer->buffer = kmalloc(buffer->len, GFP_KERNEL); if (buffer->buffer == NULL) goto __nomem; } if (mode == O_WRONLY || mode == O_RDWR) { buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (buffer == NULL) goto __nomem; data->wbuffer = buffer; buffer->len = PAGE_SIZE; buffer->buffer = kmalloc(buffer->len, GFP_KERNEL); if (buffer->buffer == NULL) goto __nomem; } break; case SNDRV_INFO_CONTENT_DATA: /* data */ if (entry->c.ops->open) { if ((err = entry->c.ops->open(entry, mode, &data->file_private_data)) < 0) { kfree(data); goto __error; } } break; } file->private_data = data; mutex_unlock(&info_mutex); if (entry->content == SNDRV_INFO_CONTENT_TEXT && (mode == O_RDONLY || mode == O_RDWR)) { if (entry->c.text.read) { mutex_lock(&entry->access); entry->c.text.read(entry, data->rbuffer); mutex_unlock(&entry->access); } } return 0; __nomem: if (data->rbuffer) { kfree(data->rbuffer->buffer); kfree(data->rbuffer); } if (data->wbuffer) { kfree(data->wbuffer->buffer); kfree(data->wbuffer); } kfree(data); err = -ENOMEM; __error: module_put(entry->module); __error1: mutex_unlock(&info_mutex); return err; } static int snd_info_entry_release(struct inode *inode, struct file *file) { struct snd_info_entry *entry; struct snd_info_private_data *data; int mode; mode = file->f_flags & O_ACCMODE; data = file->private_data; entry = data->entry; switch (entry->content) { case SNDRV_INFO_CONTENT_TEXT: if (data->rbuffer) { kfree(data->rbuffer->buffer); kfree(data->rbuffer); } if (data->wbuffer) { if (entry->c.text.write) { entry->c.text.write(entry, data->wbuffer); if (data->wbuffer->error) { snd_printk(KERN_WARNING "data write error to %s (%i)\n", entry->name, data->wbuffer->error); } } kfree(data->wbuffer->buffer); kfree(data->wbuffer); } break; case SNDRV_INFO_CONTENT_DATA: if (entry->c.ops->release) entry->c.ops->release(entry, mode, data->file_private_data); break; } module_put(entry->module); kfree(data); return 0; } static unsigned int snd_info_entry_poll(struct file *file, poll_table * wait) { struct snd_info_private_data *data; struct snd_info_entry *entry; unsigned int mask; data = file->private_data; if (data == NULL) return 0; entry = data->entry; mask = 0; switch (entry->content) { case SNDRV_INFO_CONTENT_DATA: if (entry->c.ops->poll) return entry->c.ops->poll(entry, data->file_private_data, file, wait); if (entry->c.ops->read) mask |= POLLIN | POLLRDNORM; if (entry->c.ops->write) mask |= POLLOUT | POLLWRNORM; break; } return mask; } static long snd_info_entry_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_info_private_data *data; struct snd_info_entry *entry; data = file->private_data; if (data == NULL) return 0; entry = data->entry; switch (entry->content) { case SNDRV_INFO_CONTENT_DATA: if (entry->c.ops->ioctl) return entry->c.ops->ioctl(entry, data->file_private_data, file, cmd, arg); break; } return -ENOTTY; } static int snd_info_entry_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file->f_path.dentry->d_inode; struct snd_info_private_data *data; struct snd_info_entry *entry; data = file->private_data; if (data == NULL) return 0; entry = data->entry; switch (entry->content) { case SNDRV_INFO_CONTENT_DATA: if (entry->c.ops->mmap) return entry->c.ops->mmap(entry, data->file_private_data, inode, file, vma); break; } return -ENXIO; } static const struct file_operations snd_info_entry_operations = { .owner = THIS_MODULE, .llseek = snd_info_entry_llseek, .read = snd_info_entry_read, .write = snd_info_entry_write, .poll = snd_info_entry_poll, .unlocked_ioctl = snd_info_entry_ioctl, .mmap = snd_info_entry_mmap, .open = snd_info_entry_open, .release = snd_info_entry_release, }; int __init snd_info_init(void) { struct proc_dir_entry *p; p = create_proc_entry("asound", S_IFDIR | S_IRUGO | S_IXUGO, NULL); if (p == NULL) return -ENOMEM; snd_proc_root = p; #ifdef CONFIG_SND_OSSEMUL { struct snd_info_entry *entry; if ((entry = snd_info_create_module_entry(THIS_MODULE, "oss", NULL)) == NULL) return -ENOMEM; entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); return -ENOMEM; } snd_oss_root = entry; } #endif #if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE) { struct snd_info_entry *entry; if ((entry = snd_info_create_module_entry(THIS_MODULE, "seq", NULL)) == NULL) return -ENOMEM; entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); return -ENOMEM; } snd_seq_root = entry; } #endif snd_info_version_init(); snd_minor_info_init(); snd_minor_info_oss_init(); snd_card_info_init(); return 0; } int __exit snd_info_done(void) { snd_card_info_done(); snd_minor_info_oss_done(); snd_minor_info_done(); snd_info_version_done(); if (snd_proc_root) { #if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE) snd_info_free_entry(snd_seq_root); #endif #ifdef CONFIG_SND_OSSEMUL snd_info_free_entry(snd_oss_root); #endif snd_remove_proc_entry(NULL, snd_proc_root); } return 0; } /* */ /* * create a card proc file * called from init.c */ int snd_info_card_create(struct snd_card *card) { char str[8]; struct snd_info_entry *entry; if (snd_BUG_ON(!card)) return -ENXIO; sprintf(str, "card%i", card->number); if ((entry = snd_info_create_module_entry(card->module, str, NULL)) == NULL) return -ENOMEM; entry->mode = S_IFDIR | S_IRUGO | S_IXUGO; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); return -ENOMEM; } card->proc_root = entry; return 0; } /* * register the card proc file * called from init.c */ int snd_info_card_register(struct snd_card *card) { struct proc_dir_entry *p; if (snd_BUG_ON(!card)) return -ENXIO; if (!strcmp(card->id, card->proc_root->name)) return 0; p = proc_symlink(card->id, snd_proc_root, card->proc_root->name); if (p == NULL) return -ENOMEM; card->proc_root_link = p; return 0; } /* * called on card->id change */ void snd_info_card_id_change(struct snd_card *card) { mutex_lock(&info_mutex); if (card->proc_root_link) { snd_remove_proc_entry(snd_proc_root, card->proc_root_link); card->proc_root_link = NULL; } if (strcmp(card->id, card->proc_root->name)) card->proc_root_link = proc_symlink(card->id, snd_proc_root, card->proc_root->name); mutex_unlock(&info_mutex); } /* * de-register the card proc file * called from init.c */ void snd_info_card_disconnect(struct snd_card *card) { if (!card) return; mutex_lock(&info_mutex); if (card->proc_root_link) { snd_remove_proc_entry(snd_proc_root, card->proc_root_link); card->proc_root_link = NULL; } if (card->proc_root) snd_info_disconnect(card->proc_root); mutex_unlock(&info_mutex); } /* * release the card proc file resources * called from init.c */ int snd_info_card_free(struct snd_card *card) { if (!card) return 0; snd_info_free_entry(card->proc_root); card->proc_root = NULL; return 0; } /** * snd_info_get_line - read one line from the procfs buffer * @buffer: the procfs buffer * @line: the buffer to store * @len: the max. buffer size - 1 * * Reads one line from the buffer and stores the string. * * Returns zero if successful, or 1 if error or EOF. */ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len) { int c = -1; if (len <= 0 || buffer->stop || buffer->error) return 1; while (--len > 0) { c = buffer->buffer[buffer->curr++]; if (c == '\n') { if (buffer->curr >= buffer->size) buffer->stop = 1; break; } *line++ = c; if (buffer->curr >= buffer->size) { buffer->stop = 1; break; } } while (c != '\n' && !buffer->stop) { c = buffer->buffer[buffer->curr++]; if (buffer->curr >= buffer->size) buffer->stop = 1; } *line = '\0'; return 0; } EXPORT_SYMBOL(snd_info_get_line); /** * snd_info_get_str - parse a string token * @dest: the buffer to store the string token * @src: the original string * @len: the max. length of token - 1 * * Parses the original string and copy a token to the given * string buffer. * * Returns the updated pointer of the original string so that * it can be used for the next call. */ const char *snd_info_get_str(char *dest, const char *src, int len) { int c; while (*src == ' ' || *src == '\t') src++; if (*src == '"' || *src == '\'') { c = *src++; while (--len > 0 && *src && *src != c) { *dest++ = *src++; } if (*src == c) src++; } else { while (--len > 0 && *src && *src != ' ' && *src != '\t') { *dest++ = *src++; } } *dest = 0; while (*src == ' ' || *src == '\t') src++; return src; } EXPORT_SYMBOL(snd_info_get_str); /** * snd_info_create_entry - create an info entry * @name: the proc file name * * Creates an info entry with the given file name and initializes as * the default state. * * Usually called from other functions such as * snd_info_create_card_entry(). * * Returns the pointer of the new instance, or NULL on failure. */ static struct snd_info_entry *snd_info_create_entry(const char *name) { struct snd_info_entry *entry; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (entry == NULL) return NULL; entry->name = kstrdup(name, GFP_KERNEL); if (entry->name == NULL) { kfree(entry); return NULL; } entry->mode = S_IFREG | S_IRUGO; entry->content = SNDRV_INFO_CONTENT_TEXT; mutex_init(&entry->access); INIT_LIST_HEAD(&entry->children); INIT_LIST_HEAD(&entry->list); return entry; } /** * snd_info_create_module_entry - create an info entry for the given module * @module: the module pointer * @name: the file name * @parent: the parent directory * * Creates a new info entry and assigns it to the given module. * * Returns the pointer of the new instance, or NULL on failure. */ struct snd_info_entry *snd_info_create_module_entry(struct module * module, const char *name, struct snd_info_entry *parent) { struct snd_info_entry *entry = snd_info_create_entry(name); if (entry) { entry->module = module; entry->parent = parent; } return entry; } EXPORT_SYMBOL(snd_info_create_module_entry); /** * snd_info_create_card_entry - create an info entry for the given card * @card: the card instance * @name: the file name * @parent: the parent directory * * Creates a new info entry and assigns it to the given card. * * Returns the pointer of the new instance, or NULL on failure. */ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card, const char *name, struct snd_info_entry * parent) { struct snd_info_entry *entry = snd_info_create_entry(name); if (entry) { entry->module = card->module; entry->card = card; entry->parent = parent; } return entry; } EXPORT_SYMBOL(snd_info_create_card_entry); static void snd_info_disconnect(struct snd_info_entry *entry) { struct list_head *p, *n; struct proc_dir_entry *root; list_for_each_safe(p, n, &entry->children) { snd_info_disconnect(list_entry(p, struct snd_info_entry, list)); } if (! entry->p) return; list_del_init(&entry->list); root = entry->parent == NULL ? snd_proc_root : entry->parent->p; snd_BUG_ON(!root); snd_remove_proc_entry(root, entry->p); entry->p = NULL; } static int snd_info_dev_free_entry(struct snd_device *device) { struct snd_info_entry *entry = device->device_data; snd_info_free_entry(entry); return 0; } static int snd_info_dev_register_entry(struct snd_device *device) { struct snd_info_entry *entry = device->device_data; return snd_info_register(entry); } /** * snd_card_proc_new - create an info entry for the given card * @card: the card instance * @name: the file name * @entryp: the pointer to store the new info entry * * Creates a new info entry and assigns it to the given card. * Unlike snd_info_create_card_entry(), this function registers the * info entry as an ALSA device component, so that it can be * unregistered/released without explicit call. * Also, you don't have to register this entry via snd_info_register(), * since this will be registered by snd_card_register() automatically. * * The parent is assumed as card->proc_root. * * For releasing this entry, use snd_device_free() instead of * snd_info_free_entry(). * * Returns zero if successful, or a negative error code on failure. */ int snd_card_proc_new(struct snd_card *card, const char *name, struct snd_info_entry **entryp) { static struct snd_device_ops ops = { .dev_free = snd_info_dev_free_entry, .dev_register = snd_info_dev_register_entry, /* disconnect is done via snd_info_card_disconnect() */ }; struct snd_info_entry *entry; int err; entry = snd_info_create_card_entry(card, name, card->proc_root); if (! entry) return -ENOMEM; if ((err = snd_device_new(card, SNDRV_DEV_INFO, entry, &ops)) < 0) { snd_info_free_entry(entry); return err; } if (entryp) *entryp = entry; return 0; } EXPORT_SYMBOL(snd_card_proc_new); /** * snd_info_free_entry - release the info entry * @entry: the info entry * * Releases the info entry. Don't call this after registered. */ void snd_info_free_entry(struct snd_info_entry * entry) { if (entry == NULL) return; if (entry->p) { mutex_lock(&info_mutex); snd_info_disconnect(entry); mutex_unlock(&info_mutex); } kfree(entry->name); if (entry->private_free) entry->private_free(entry); kfree(entry); } EXPORT_SYMBOL(snd_info_free_entry); /** * snd_info_register - register the info entry * @entry: the info entry * * Registers the proc info entry. * * Returns zero if successful, or a negative error code on failure. */ int snd_info_register(struct snd_info_entry * entry) { struct proc_dir_entry *root, *p = NULL; if (snd_BUG_ON(!entry)) return -ENXIO; root = entry->parent == NULL ? snd_proc_root : entry->parent->p; mutex_lock(&info_mutex); p = create_proc_entry(entry->name, entry->mode, root); if (!p) { mutex_unlock(&info_mutex); return -ENOMEM; } if (!S_ISDIR(entry->mode)) p->proc_fops = &snd_info_entry_operations; p->size = entry->size; p->data = entry; entry->p = p; if (entry->parent) list_add_tail(&entry->list, &entry->parent->children); mutex_unlock(&info_mutex); return 0; } EXPORT_SYMBOL(snd_info_register); /* */ static struct snd_info_entry *snd_info_version_entry; static void snd_info_version_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { snd_iprintf(buffer, "Advanced Linux Sound Architecture Driver Version " CONFIG_SND_VERSION CONFIG_SND_DATE ".\n" ); } static int __init snd_info_version_init(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, "version", NULL); if (entry == NULL) return -ENOMEM; entry->c.text.read = snd_info_version_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); return -ENOMEM; } snd_info_version_entry = entry; return 0; } static int __exit snd_info_version_done(void) { snd_info_free_entry(snd_info_version_entry); return 0; } #endif /* CONFIG_PROC_FS */
gpl-2.0
glewarne/Note2Core_v3_kernel_N710x
arch/alpha/kernel/irq.c
2919
3315
/* * linux/arch/alpha/kernel/irq.c * * Copyright (C) 1995 Linus Torvalds * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/interrupt.h> #include <linux/random.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/profile.h> #include <linux/bitops.h> #include <asm/system.h> #include <asm/io.h> #include <asm/uaccess.h> volatile unsigned long irq_err_count; DEFINE_PER_CPU(unsigned long, irq_pmi_count); void ack_bad_irq(unsigned int irq) { irq_err_count++; printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq); } #ifdef CONFIG_SMP static char irq_user_affinity[NR_IRQS]; int irq_select_affinity(unsigned int irq) { struct irq_data *data = irq_get_irq_data(irq); struct irq_chip *chip; static int last_cpu; int cpu = last_cpu + 1; if (!data) return 1; chip = irq_data_get_irq_chip(data); if (!chip->irq_set_affinity || irq_user_affinity[irq]) return 1; while (!cpu_possible(cpu) || !cpumask_test_cpu(cpu, irq_default_affinity)) cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); last_cpu = cpu; cpumask_copy(data->affinity, cpumask_of(cpu)); chip->irq_set_affinity(data, cpumask_of(cpu), false); return 0; } #endif /* CONFIG_SMP */ int arch_show_interrupts(struct seq_file *p, int prec) { int j; #ifdef CONFIG_SMP seq_puts(p, "IPI: "); for_each_online_cpu(j) seq_printf(p, "%10lu ", cpu_data[j].ipi_count); seq_putc(p, '\n'); #endif seq_puts(p, "PMI: "); for_each_online_cpu(j) seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); seq_puts(p, " Performance Monitoring\n"); seq_printf(p, "ERR: %10lu\n", irq_err_count); return 0; } /* * handle_irq handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ #define MAX_ILLEGAL_IRQS 16 void handle_irq(int irq) { /* * We ack quickly, we don't want the irq controller * thinking we're snobs just because some other CPU has * disabled global interrupts (we have already done the * INT_ACK cycles, it's too late to try to pretend to the * controller that we aren't taking the interrupt). * * 0 return value means that this irq is already being * handled by some other CPU. (or is disabled) */ static unsigned int illegal_count=0; struct irq_desc *desc = irq_to_desc(irq); if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS)) { irq_err_count++; illegal_count++; printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n", irq); return; } /* * From here we must proceed with IPL_MAX. Note that we do not * explicitly enable interrupts afterwards - some MILO PALcode * (namely LX164 one) seems to have severe problems with RTI * at IPL 0. */ local_irq_disable(); irq_enter(); generic_handle_irq_desc(irq, desc); irq_exit(); }
gpl-2.0
muftiarfan/DWI_xm
net/netfilter/nf_conntrack_proto.c
4455
10448
/* L3/L4 protocol support for nf_conntrack. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/vmalloc.h> #include <linux/stddef.h> #include <linux/err.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_l3proto.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_core.h> static struct nf_conntrack_l4proto __rcu **nf_ct_protos[PF_MAX] __read_mostly; struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX] __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_l3protos); static DEFINE_MUTEX(nf_ct_proto_mutex); #ifdef CONFIG_SYSCTL static int nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_path *path, struct ctl_table *table, unsigned int *users) { if (*header == NULL) { *header = register_sysctl_paths(path, table); if (*header == NULL) return -ENOMEM; } if (users != NULL) (*users)++; return 0; } static void nf_ct_unregister_sysctl(struct ctl_table_header **header, struct ctl_table *table, unsigned int *users) { if (users != NULL && --*users > 0) return; unregister_sysctl_table(*header); *header = NULL; } #endif struct nf_conntrack_l4proto * __nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto) { if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL)) return &nf_conntrack_l4proto_generic; return rcu_dereference(nf_ct_protos[l3proto][l4proto]); } EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find); /* this is guaranteed to always return a valid protocol helper, since * it falls back to generic_protocol */ struct nf_conntrack_l3proto * nf_ct_l3proto_find_get(u_int16_t l3proto) { struct nf_conntrack_l3proto *p; rcu_read_lock(); p = __nf_ct_l3proto_find(l3proto); if (!try_module_get(p->me)) p = &nf_conntrack_l3proto_generic; rcu_read_unlock(); return p; } EXPORT_SYMBOL_GPL(nf_ct_l3proto_find_get); void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p) { module_put(p->me); } EXPORT_SYMBOL_GPL(nf_ct_l3proto_put); int nf_ct_l3proto_try_module_get(unsigned short l3proto) { int ret; struct nf_conntrack_l3proto *p; retry: p = nf_ct_l3proto_find_get(l3proto); if (p == &nf_conntrack_l3proto_generic) { ret = request_module("nf_conntrack-%d", l3proto); if (!ret) goto retry; return -EPROTOTYPE; } return 0; } EXPORT_SYMBOL_GPL(nf_ct_l3proto_try_module_get); void nf_ct_l3proto_module_put(unsigned short l3proto) { struct nf_conntrack_l3proto *p; /* rcu_read_lock not necessary since the caller holds a reference, but * taken anyways to avoid lockdep warnings in __nf_ct_l3proto_find() */ rcu_read_lock(); p = __nf_ct_l3proto_find(l3proto); module_put(p->me); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put); struct nf_conntrack_l4proto * nf_ct_l4proto_find_get(u_int16_t l3num, u_int8_t l4num) { struct nf_conntrack_l4proto *p; rcu_read_lock(); p = __nf_ct_l4proto_find(l3num, l4num); if (!try_module_get(p->me)) p = &nf_conntrack_l4proto_generic; rcu_read_unlock(); return p; } EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get); void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p) { module_put(p->me); } EXPORT_SYMBOL_GPL(nf_ct_l4proto_put); static int kill_l3proto(struct nf_conn *i, void *data) { return nf_ct_l3num(i) == ((struct nf_conntrack_l3proto *)data)->l3proto; } static int kill_l4proto(struct nf_conn *i, void *data) { struct nf_conntrack_l4proto *l4proto; l4proto = (struct nf_conntrack_l4proto *)data; return nf_ct_protonum(i) == l4proto->l4proto && nf_ct_l3num(i) == l4proto->l3proto; } static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto) { int err = 0; #ifdef CONFIG_SYSCTL if (l3proto->ctl_table != NULL) { err = nf_ct_register_sysctl(&l3proto->ctl_table_header, l3proto->ctl_table_path, l3proto->ctl_table, NULL); } #endif return err; } static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto) { #ifdef CONFIG_SYSCTL if (l3proto->ctl_table_header != NULL) nf_ct_unregister_sysctl(&l3proto->ctl_table_header, l3proto->ctl_table, NULL); #endif } int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto) { int ret = 0; struct nf_conntrack_l3proto *old; if (proto->l3proto >= AF_MAX) return -EBUSY; if (proto->tuple_to_nlattr && !proto->nlattr_tuple_size) return -EINVAL; mutex_lock(&nf_ct_proto_mutex); old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto], lockdep_is_held(&nf_ct_proto_mutex)); if (old != &nf_conntrack_l3proto_generic) { ret = -EBUSY; goto out_unlock; } ret = nf_ct_l3proto_register_sysctl(proto); if (ret < 0) goto out_unlock; if (proto->nlattr_tuple_size) proto->nla_size = 3 * proto->nlattr_tuple_size(); rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto); out_unlock: mutex_unlock(&nf_ct_proto_mutex); return ret; } EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register); void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto) { struct net *net; BUG_ON(proto->l3proto >= AF_MAX); mutex_lock(&nf_ct_proto_mutex); BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto], lockdep_is_held(&nf_ct_proto_mutex) ) != proto); rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], &nf_conntrack_l3proto_generic); nf_ct_l3proto_unregister_sysctl(proto); mutex_unlock(&nf_ct_proto_mutex); synchronize_rcu(); /* Remove all contrack entries for this protocol */ rtnl_lock(); for_each_net(net) nf_ct_iterate_cleanup(net, kill_l3proto, proto); rtnl_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto) { int err = 0; #ifdef CONFIG_SYSCTL if (l4proto->ctl_table != NULL) { err = nf_ct_register_sysctl(l4proto->ctl_table_header, nf_net_netfilter_sysctl_path, l4proto->ctl_table, l4proto->ctl_table_users); if (err < 0) goto out; } #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT if (l4proto->ctl_compat_table != NULL) { err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header, nf_net_ipv4_netfilter_sysctl_path, l4proto->ctl_compat_table, NULL); if (err == 0) goto out; nf_ct_unregister_sysctl(l4proto->ctl_table_header, l4proto->ctl_table, l4proto->ctl_table_users); } #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ out: #endif /* CONFIG_SYSCTL */ return err; } static void nf_ct_l4proto_unregister_sysctl(struct nf_conntrack_l4proto *l4proto) { #ifdef CONFIG_SYSCTL if (l4proto->ctl_table_header != NULL && *l4proto->ctl_table_header != NULL) nf_ct_unregister_sysctl(l4proto->ctl_table_header, l4proto->ctl_table, l4proto->ctl_table_users); #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT if (l4proto->ctl_compat_table_header != NULL) nf_ct_unregister_sysctl(&l4proto->ctl_compat_table_header, l4proto->ctl_compat_table, NULL); #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ #endif /* CONFIG_SYSCTL */ } /* FIXME: Allow NULL functions and sub in pointers to generic for them. --RR */ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) { int ret = 0; if (l4proto->l3proto >= PF_MAX) return -EBUSY; if ((l4proto->to_nlattr && !l4proto->nlattr_size) || (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size)) return -EINVAL; mutex_lock(&nf_ct_proto_mutex); if (!nf_ct_protos[l4proto->l3proto]) { /* l3proto may be loaded latter. */ struct nf_conntrack_l4proto __rcu **proto_array; int i; proto_array = kmalloc(MAX_NF_CT_PROTO * sizeof(struct nf_conntrack_l4proto *), GFP_KERNEL); if (proto_array == NULL) { ret = -ENOMEM; goto out_unlock; } for (i = 0; i < MAX_NF_CT_PROTO; i++) RCU_INIT_POINTER(proto_array[i], &nf_conntrack_l4proto_generic); /* Before making proto_array visible to lockless readers, * we must make sure its content is committed to memory. */ smp_wmb(); nf_ct_protos[l4proto->l3proto] = proto_array; } else if (rcu_dereference_protected( nf_ct_protos[l4proto->l3proto][l4proto->l4proto], lockdep_is_held(&nf_ct_proto_mutex) ) != &nf_conntrack_l4proto_generic) { ret = -EBUSY; goto out_unlock; } ret = nf_ct_l4proto_register_sysctl(l4proto); if (ret < 0) goto out_unlock; l4proto->nla_size = 0; if (l4proto->nlattr_size) l4proto->nla_size += l4proto->nlattr_size(); if (l4proto->nlattr_tuple_size) l4proto->nla_size += 3 * l4proto->nlattr_tuple_size(); rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], l4proto); out_unlock: mutex_unlock(&nf_ct_proto_mutex); return ret; } EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register); void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto) { struct net *net; BUG_ON(l4proto->l3proto >= PF_MAX); mutex_lock(&nf_ct_proto_mutex); BUG_ON(rcu_dereference_protected( nf_ct_protos[l4proto->l3proto][l4proto->l4proto], lockdep_is_held(&nf_ct_proto_mutex) ) != l4proto); rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], &nf_conntrack_l4proto_generic); nf_ct_l4proto_unregister_sysctl(l4proto); mutex_unlock(&nf_ct_proto_mutex); synchronize_rcu(); /* Remove all contrack entries for this protocol */ rtnl_lock(); for_each_net(net) nf_ct_iterate_cleanup(net, kill_l4proto, l4proto); rtnl_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); int nf_conntrack_proto_init(void) { unsigned int i; int err; err = nf_ct_l4proto_register_sysctl(&nf_conntrack_l4proto_generic); if (err < 0) return err; for (i = 0; i < AF_MAX; i++) rcu_assign_pointer(nf_ct_l3protos[i], &nf_conntrack_l3proto_generic); return 0; } void nf_conntrack_proto_fini(void) { unsigned int i; nf_ct_l4proto_unregister_sysctl(&nf_conntrack_l4proto_generic); /* free l3proto protocol tables */ for (i = 0; i < PF_MAX; i++) kfree(nf_ct_protos[i]); }
gpl-2.0
leehz/android_kernel_samsung_ms013g
arch/powerpc/sysdev/ppc4xx_msi.c
4455
7106
/* * Adding PCI-E MSI support for PPC4XX SoCs. * * Copyright (c) 2010, Applied Micro Circuits Corporation * Authors: Tirumala R Marri <tmarri@apm.com> * Feng Kan <fkan@apm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <linux/irq.h> #include <linux/bootmem.h> #include <linux/pci.h> #include <linux/msi.h> #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/export.h> #include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <boot/dcr.h> #include <asm/dcr-regs.h> #include <asm/msi_bitmap.h> #define PEIH_TERMADH 0x00 #define PEIH_TERMADL 0x08 #define PEIH_MSIED 0x10 #define PEIH_MSIMK 0x18 #define PEIH_MSIASS 0x20 #define PEIH_FLUSH0 0x30 #define PEIH_FLUSH1 0x38 #define PEIH_CNTRST 0x48 #define NR_MSI_IRQS 4 struct ppc4xx_msi { u32 msi_addr_lo; u32 msi_addr_hi; void __iomem *msi_regs; int msi_virqs[NR_MSI_IRQS]; struct msi_bitmap bitmap; struct device_node *msi_dev; }; static struct ppc4xx_msi ppc4xx_msi; static int ppc4xx_msi_init_allocator(struct platform_device *dev, struct ppc4xx_msi *msi_data) { int err; err = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS, dev->dev.of_node); if (err) return err; err = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap); if (err < 0) { msi_bitmap_free(&msi_data->bitmap); return err; } return 0; } static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { int int_no = -ENOMEM; unsigned int virq; struct msi_msg msg; struct msi_desc *entry; struct ppc4xx_msi *msi_data = &ppc4xx_msi; list_for_each_entry(entry, &dev->msi_list, list) { int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); if (int_no >= 0) break; if (int_no < 0) { pr_debug("%s: fail allocating msi interrupt\n", __func__); } virq = irq_of_parse_and_map(msi_data->msi_dev, int_no); if (virq == NO_IRQ) { dev_err(&dev->dev, "%s: fail mapping irq\n", __func__); msi_bitmap_free_hwirqs(&msi_data->bitmap, int_no, 1); return -ENOSPC; } dev_dbg(&dev->dev, "%s: virq = %d\n", __func__, virq); /* Setup msi address space */ msg.address_hi = msi_data->msi_addr_hi; msg.address_lo = msi_data->msi_addr_lo; irq_set_msi_desc(virq, entry); msg.data = int_no; write_msi_msg(virq, &msg); } return 0; } void ppc4xx_teardown_msi_irqs(struct pci_dev *dev) { struct msi_desc *entry; struct ppc4xx_msi *msi_data = &ppc4xx_msi; dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n"); list_for_each_entry(entry, &dev->msi_list, list) { if (entry->irq == NO_IRQ) continue; irq_set_msi_desc(entry->irq, NULL); msi_bitmap_free_hwirqs(&msi_data->bitmap, virq_to_hw(entry->irq), 1); irq_dispose_mapping(entry->irq); } } static int ppc4xx_msi_check_device(struct pci_dev *pdev, int nvec, int type) { dev_dbg(&pdev->dev, "PCIE-MSI:%s called. vec %x type %d\n", __func__, nvec, type); if (type == PCI_CAP_ID_MSIX) pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n"); return 0; } static int ppc4xx_setup_pcieh_hw(struct platform_device *dev, struct resource res, struct ppc4xx_msi *msi) { const u32 *msi_data; const u32 *msi_mask; const u32 *sdr_addr; dma_addr_t msi_phys; void *msi_virt; sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL); if (!sdr_addr) return -1; SDR0_WRITE(sdr_addr, (u64)res.start >> 32); /*HIGH addr */ SDR0_WRITE(sdr_addr + 1, res.start & 0xFFFFFFFF); /* Low addr */ msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi"); if (msi->msi_dev) return -ENODEV; msi->msi_regs = of_iomap(msi->msi_dev, 0); if (!msi->msi_regs) { dev_err(&dev->dev, "of_iomap problem failed\n"); return -ENOMEM; } dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n", (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs)); msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL); msi->msi_addr_hi = 0x0; msi->msi_addr_lo = (u32) msi_phys; dev_dbg(&dev->dev, "PCIE-MSI: msi address 0x%x\n", msi->msi_addr_lo); /* Progam the Interrupt handler Termination addr registers */ out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi); out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo); msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL); if (!msi_data) return -1; msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL); if (!msi_mask) return -1; /* Program MSI Expected data and Mask bits */ out_be32(msi->msi_regs + PEIH_MSIED, *msi_data); out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask); return 0; } static int ppc4xx_of_msi_remove(struct platform_device *dev) { struct ppc4xx_msi *msi = dev->dev.platform_data; int i; int virq; for (i = 0; i < NR_MSI_IRQS; i++) { virq = msi->msi_virqs[i]; if (virq != NO_IRQ) irq_dispose_mapping(virq); } if (msi->bitmap.bitmap) msi_bitmap_free(&msi->bitmap); iounmap(msi->msi_regs); of_node_put(msi->msi_dev); kfree(msi); return 0; } static int __devinit ppc4xx_msi_probe(struct platform_device *dev) { struct ppc4xx_msi *msi; struct resource res; int err = 0; msi = &ppc4xx_msi;/*keep the msi data for further use*/ dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n"); msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL); if (!msi) { dev_err(&dev->dev, "No memory for MSI structure\n"); return -ENOMEM; } dev->dev.platform_data = msi; /* Get MSI ranges */ err = of_address_to_resource(dev->dev.of_node, 0, &res); if (err) { dev_err(&dev->dev, "%s resource error!\n", dev->dev.of_node->full_name); goto error_out; } if (ppc4xx_setup_pcieh_hw(dev, res, msi)) goto error_out; err = ppc4xx_msi_init_allocator(dev, msi); if (err) { dev_err(&dev->dev, "Error allocating MSI bitmap\n"); goto error_out; } ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs; ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs; ppc_md.msi_check_device = ppc4xx_msi_check_device; return err; error_out: ppc4xx_of_msi_remove(dev); return err; } static const struct of_device_id ppc4xx_msi_ids[] = { { .compatible = "amcc,ppc4xx-msi", }, {} }; static struct platform_driver ppc4xx_msi_driver = { .probe = ppc4xx_msi_probe, .remove = ppc4xx_of_msi_remove, .driver = { .name = "ppc4xx-msi", .owner = THIS_MODULE, .of_match_table = ppc4xx_msi_ids, }, }; static __init int ppc4xx_msi_init(void) { return platform_driver_register(&ppc4xx_msi_driver); } subsys_initcall(ppc4xx_msi_init);
gpl-2.0
PatrikKT/android_kernel_htc_a31ul
arch/x86/kernel/acpi/cstate.c
7271
5734
/* * Copyright (C) 2005 Intel Corporation * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * - Added _PDC for SMP C-states on Intel CPUs */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/cpu.h> #include <linux/sched.h> #include <acpi/processor.h> #include <asm/acpi.h> #include <asm/mwait.h> #include <asm/special_insns.h> /* * Initialize bm_flags based on the CPU cache properties * On SMP it depends on cache configuration * - When cache is not shared among all CPUs, we flush cache * before entering C3. * - When cache is shared among all CPUs, we use bm_check * mechanism as in UP case * * This routine is called only after all the CPUs are online */ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, unsigned int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); flags->bm_check = 0; if (num_online_cpus() == 1) flags->bm_check = 1; else if (c->x86_vendor == X86_VENDOR_INTEL) { /* * Today all MP CPUs that support C3 share cache. * And caches should not be flushed by software while * entering C3 type state. */ flags->bm_check = 1; } /* * On all recent Intel platforms, ARB_DISABLE is a nop. * So, set bm_control to zero to indicate that ARB_DISABLE * is not required while entering C3 type state on * P4, Core and beyond CPUs */ if (c->x86_vendor == X86_VENDOR_INTEL && (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f))) flags->bm_control = 0; } EXPORT_SYMBOL(acpi_processor_power_init_bm_check); /* The code below handles cstate entry with monitor-mwait pair on Intel*/ struct cstate_entry { struct { unsigned int eax; unsigned int ecx; } states[ACPI_PROCESSOR_MAX_POWER]; }; static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; #define NATIVE_CSTATE_BEYOND_HALT (2) static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) { struct acpi_processor_cx *cx = _cx; long retval; unsigned int eax, ebx, ecx, edx; unsigned int edx_part; unsigned int cstate_type; /* C-state type and not ACPI C-state type */ unsigned int num_cstate_subtype; cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); /* Check whether this particular cx_type (in CST) is supported or not */ cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; retval = 0; if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) { retval = -1; goto out; } /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) { retval = -1; goto out; } if (!mwait_supported[cstate_type]) { mwait_supported[cstate_type] = 1; printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d " "state\n", cx->type); } snprintf(cx->desc, ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x", cx->address); out: return retval; } int acpi_processor_ffh_cstate_probe(unsigned int cpu, struct acpi_processor_cx *cx, struct acpi_power_register *reg) { struct cstate_entry *percpu_entry; struct cpuinfo_x86 *c = &cpu_data(cpu); long retval; if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF) return -1; if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) return -1; percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); percpu_entry->states[cx->index].eax = 0; percpu_entry->states[cx->index].ecx = 0; /* Make sure we are running on right CPU */ retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx); if (retval == 0) { /* Use the hint in CST */ percpu_entry->states[cx->index].eax = cx->address; percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; } /* * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared, * then we should skip checking BM_STS for this C-state. * ref: "Intel Processor Vendor-Specific ACPI Interface Specification" */ if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2)) cx->bm_sts_skip = 1; return retval; } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); /* * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, * which can obviate IPI to trigger checking of need_resched. * We execute MONITOR against need_resched and enter optimized wait state * through MWAIT. Whenever someone changes need_resched, we would be woken * up from MWAIT (without an IPI). * * New with Core Duo processors, MWAIT can take some hints based on CPU * capability. */ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) { if (!need_resched()) { if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)&current_thread_info()->flags); __monitor((void *)&current_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __mwait(ax, cx); } } void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); struct cstate_entry *percpu_entry; percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); mwait_idle_with_hints(percpu_entry->states[cx->index].eax, percpu_entry->states[cx->index].ecx); } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter); static int __init ffh_cstate_init(void) { struct cpuinfo_x86 *c = &boot_cpu_data; if (c->x86_vendor != X86_VENDOR_INTEL) return -1; cpu_cstate_entry = alloc_percpu(struct cstate_entry); return 0; } static void __exit ffh_cstate_exit(void) { free_percpu(cpu_cstate_entry); cpu_cstate_entry = NULL; } arch_initcall(ffh_cstate_init); __exitcall(ffh_cstate_exit);
gpl-2.0
windxixi/android_kernel_htc_msm8660
drivers/ata/pata_hpt37x.c
8039
26312
/* * Libata driver for the highpoint 37x and 30x UDMA66 ATA controllers. * * This driver is heavily based upon: * * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003 * * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> * Portions Copyright (C) 2001 Sun Microsystems, Inc. * Portions Copyright (C) 2003 Red Hat Inc * Portions Copyright (C) 2005-2010 MontaVista Software, Inc. * * TODO * Look into engine reset on timeout errors. Should not be required. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_hpt37x" #define DRV_VERSION "0.6.23" struct hpt_clock { u8 xfer_speed; u32 timing; }; struct hpt_chip { const char *name; unsigned int base; struct hpt_clock const *clocks[4]; }; /* key for bus clock timings * bit * 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file * register access. * 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file * register access. * 18:20 udma_cycle_time. Clock cycles for UDMA xfer. * 21 CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock. * 22:24 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer. * 25:27 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file * register access. * 28 UDMA enable. * 29 DMA enable. * 30 PIO_MST enable. If set, the chip is in bus master mode during * PIO xfer. * 31 FIFO enable. Only for PIO. */ static struct hpt_clock hpt37x_timings_33[] = { { XFER_UDMA_6, 0x12446231 }, /* 0x12646231 ?? */ { XFER_UDMA_5, 0x12446231 }, { XFER_UDMA_4, 0x12446231 }, { XFER_UDMA_3, 0x126c6231 }, { XFER_UDMA_2, 0x12486231 }, { XFER_UDMA_1, 0x124c6233 }, { XFER_UDMA_0, 0x12506297 }, { XFER_MW_DMA_2, 0x22406c31 }, { XFER_MW_DMA_1, 0x22406c33 }, { XFER_MW_DMA_0, 0x22406c97 }, { XFER_PIO_4, 0x06414e31 }, { XFER_PIO_3, 0x06414e42 }, { XFER_PIO_2, 0x06414e53 }, { XFER_PIO_1, 0x06814e93 }, { XFER_PIO_0, 0x06814ea7 } }; static struct hpt_clock hpt37x_timings_50[] = { { XFER_UDMA_6, 0x12848242 }, { XFER_UDMA_5, 0x12848242 }, { XFER_UDMA_4, 0x12ac8242 }, { XFER_UDMA_3, 0x128c8242 }, { XFER_UDMA_2, 0x120c8242 }, { XFER_UDMA_1, 0x12148254 }, { XFER_UDMA_0, 0x121882ea }, { XFER_MW_DMA_2, 0x22808242 }, { XFER_MW_DMA_1, 0x22808254 }, { XFER_MW_DMA_0, 0x228082ea }, { XFER_PIO_4, 0x0a81f442 }, { XFER_PIO_3, 0x0a81f443 }, { XFER_PIO_2, 0x0a81f454 }, { XFER_PIO_1, 0x0ac1f465 }, { XFER_PIO_0, 0x0ac1f48a } }; static struct hpt_clock hpt37x_timings_66[] = { { XFER_UDMA_6, 0x1c869c62 }, { XFER_UDMA_5, 0x1cae9c62 }, /* 0x1c8a9c62 */ { XFER_UDMA_4, 0x1c8a9c62 }, { XFER_UDMA_3, 0x1c8e9c62 }, { XFER_UDMA_2, 0x1c929c62 }, { XFER_UDMA_1, 0x1c9a9c62 }, { XFER_UDMA_0, 0x1c829c62 }, { XFER_MW_DMA_2, 0x2c829c62 }, { XFER_MW_DMA_1, 0x2c829c66 }, { XFER_MW_DMA_0, 0x2c829d2e }, { XFER_PIO_4, 0x0c829c62 }, { XFER_PIO_3, 0x0c829c84 }, { XFER_PIO_2, 0x0c829ca6 }, { XFER_PIO_1, 0x0d029d26 }, { XFER_PIO_0, 0x0d029d5e } }; static const struct hpt_chip hpt370 = { "HPT370", 48, { hpt37x_timings_33, NULL, NULL, NULL } }; static const struct hpt_chip hpt370a = { "HPT370A", 48, { hpt37x_timings_33, NULL, hpt37x_timings_50, NULL } }; static const struct hpt_chip hpt372 = { "HPT372", 55, { hpt37x_timings_33, NULL, hpt37x_timings_50, hpt37x_timings_66 } }; static const struct hpt_chip hpt302 = { "HPT302", 66, { hpt37x_timings_33, NULL, hpt37x_timings_50, hpt37x_timings_66 } }; static const struct hpt_chip hpt371 = { "HPT371", 66, { hpt37x_timings_33, NULL, hpt37x_timings_50, hpt37x_timings_66 } }; static const struct hpt_chip hpt372a = { "HPT372A", 66, { hpt37x_timings_33, NULL, hpt37x_timings_50, hpt37x_timings_66 } }; static const struct hpt_chip hpt374 = { "HPT374", 48, { hpt37x_timings_33, NULL, NULL, NULL } }; /** * hpt37x_find_mode - reset the hpt37x bus * @ap: ATA port * @speed: transfer mode * * Return the 32bit register programming information for this channel * that matches the speed provided. */ static u32 hpt37x_find_mode(struct ata_port *ap, int speed) { struct hpt_clock *clocks = ap->host->private_data; while (clocks->xfer_speed) { if (clocks->xfer_speed == speed) return clocks->timing; clocks++; } BUG(); return 0xffffffffU; /* silence compiler warning */ } static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char * const list[]) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; int i = 0; ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); while (list[i] != NULL) { if (!strcmp(list[i], model_num)) { pr_warn("%s is not supported for %s\n", modestr, list[i]); return 1; } i++; } return 0; } static const char * const bad_ata33[] = { "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2", "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2", "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4", "Maxtor 90510D4", "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2", "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4", "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2", NULL }; static const char * const bad_ata100_5[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", "IBM-DTLA-307045", "IBM-DTLA-307030", "IBM-DTLA-307020", "IBM-DTLA-307015", "IBM-DTLA-305040", "IBM-DTLA-305030", "IBM-DTLA-305020", "IC35L010AVER07-0", "IC35L020AVER07-0", "IC35L030AVER07-0", "IC35L040AVER07-0", "IC35L060AVER07-0", "WDC AC310200R", NULL }; /** * hpt370_filter - mode selection filter * @adev: ATA device * * Block UDMA on devices that cause trouble with this controller. */ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask) { if (adev->class == ATA_DEV_ATA) { if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) mask &= ~ATA_MASK_UDMA; if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) mask &= ~(0xE0 << ATA_SHIFT_UDMA); } return mask; } /** * hpt370a_filter - mode selection filter * @adev: ATA device * * Block UDMA on devices that cause trouble with this controller. */ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) { if (adev->class == ATA_DEV_ATA) { if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) mask &= ~(0xE0 << ATA_SHIFT_UDMA); } return mask; } /** * hpt372_filter - mode selection filter * @adev: ATA device * @mask: mode mask * * The Marvell bridge chips used on the HighPoint SATA cards do not seem * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes... */ static unsigned long hpt372_filter(struct ata_device *adev, unsigned long mask) { if (ata_id_is_sata(adev->id)) mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA); return mask; } /** * hpt37x_cable_detect - Detect the cable type * @ap: ATA port to detect on * * Return the cable type attached to this port */ static int hpt37x_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 scr2, ata66; pci_read_config_byte(pdev, 0x5B, &scr2); pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01); udelay(10); /* debounce */ /* Cable register now active */ pci_read_config_byte(pdev, 0x5A, &ata66); /* Restore state */ pci_write_config_byte(pdev, 0x5B, scr2); if (ata66 & (2 >> ap->port_no)) return ATA_CBL_PATA40; else return ATA_CBL_PATA80; } /** * hpt374_fn1_cable_detect - Detect the cable type * @ap: ATA port to detect on * * Return the cable type attached to this port */ static int hpt374_fn1_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); unsigned int mcrbase = 0x50 + 4 * ap->port_no; u16 mcr3; u8 ata66; /* Do the extra channel work */ pci_read_config_word(pdev, mcrbase + 2, &mcr3); /* Set bit 15 of 0x52 to enable TCBLID as input */ pci_write_config_word(pdev, mcrbase + 2, mcr3 | 0x8000); pci_read_config_byte(pdev, 0x5A, &ata66); /* Reset TCBLID/FCBLID to output */ pci_write_config_word(pdev, mcrbase + 2, mcr3); if (ata66 & (2 >> ap->port_no)) return ATA_CBL_PATA40; else return ATA_CBL_PATA80; } /** * hpt37x_pre_reset - reset the hpt37x bus * @link: ATA link to reset * @deadline: deadline jiffies for the operation * * Perform the initial reset handling for the HPT37x. */ static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits hpt37x_enable_bits[] = { { 0x50, 1, 0x04, 0x04 }, { 0x54, 1, 0x04, 0x04 } }; if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no])) return -ENOENT; /* Reset the state machine */ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(100); return ata_sff_prereset(link, deadline); } static void hpt370_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 addr1, addr2; u32 reg, timing, mask; u8 fast; addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); addr2 = 0x51 + 4 * ap->port_no; /* Fast interrupt prediction disable, hold off interrupt disable */ pci_read_config_byte(pdev, addr2, &fast); fast &= ~0x02; fast |= 0x01; pci_write_config_byte(pdev, addr2, fast); /* Determine timing mask and find matching mode entry */ if (mode < XFER_MW_DMA_0) mask = 0xcfc3ffff; else if (mode < XFER_UDMA_0) mask = 0x31c001ff; else mask = 0x303c0000; timing = hpt37x_find_mode(ap, mode); pci_read_config_dword(pdev, addr1, &reg); reg = (reg & ~mask) | (timing & mask); pci_write_config_dword(pdev, addr1, reg); } /** * hpt370_set_piomode - PIO setup * @ap: ATA interface * @adev: device on the interface * * Perform PIO mode setup. */ static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev) { hpt370_set_mode(ap, adev, adev->pio_mode); } /** * hpt370_set_dmamode - DMA timing setup * @ap: ATA interface * @adev: Device being configured * * Set up the channel for MWDMA or UDMA modes. */ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) { hpt370_set_mode(ap, adev, adev->dma_mode); } /** * hpt370_bmdma_end - DMA engine stop * @qc: ATA command * * Work around the HPT370 DMA engine. */ static void hpt370_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); void __iomem *bmdma = ap->ioaddr.bmdma_addr; u8 dma_stat = ioread8(bmdma + ATA_DMA_STATUS); u8 dma_cmd; if (dma_stat & ATA_DMA_ACTIVE) { udelay(20); dma_stat = ioread8(bmdma + ATA_DMA_STATUS); } if (dma_stat & ATA_DMA_ACTIVE) { /* Clear the engine */ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(10); /* Stop DMA */ dma_cmd = ioread8(bmdma + ATA_DMA_CMD); iowrite8(dma_cmd & ~ATA_DMA_START, bmdma + ATA_DMA_CMD); /* Clear Error */ dma_stat = ioread8(bmdma + ATA_DMA_STATUS); iowrite8(dma_stat | ATA_DMA_INTR | ATA_DMA_ERR, bmdma + ATA_DMA_STATUS); /* Clear the engine */ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(10); } ata_bmdma_stop(qc); } static void hpt372_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 addr1, addr2; u32 reg, timing, mask; u8 fast; addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); addr2 = 0x51 + 4 * ap->port_no; /* Fast interrupt prediction disable, hold off interrupt disable */ pci_read_config_byte(pdev, addr2, &fast); fast &= ~0x07; pci_write_config_byte(pdev, addr2, fast); /* Determine timing mask and find matching mode entry */ if (mode < XFER_MW_DMA_0) mask = 0xcfc3ffff; else if (mode < XFER_UDMA_0) mask = 0x31c001ff; else mask = 0x303c0000; timing = hpt37x_find_mode(ap, mode); pci_read_config_dword(pdev, addr1, &reg); reg = (reg & ~mask) | (timing & mask); pci_write_config_dword(pdev, addr1, reg); } /** * hpt372_set_piomode - PIO setup * @ap: ATA interface * @adev: device on the interface * * Perform PIO mode setup. */ static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev) { hpt372_set_mode(ap, adev, adev->pio_mode); } /** * hpt372_set_dmamode - DMA timing setup * @ap: ATA interface * @adev: Device being configured * * Set up the channel for MWDMA or UDMA modes. */ static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev) { hpt372_set_mode(ap, adev, adev->dma_mode); } /** * hpt37x_bmdma_end - DMA engine stop * @qc: ATA command * * Clean up after the HPT372 and later DMA engine */ static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); int mscreg = 0x50 + 4 * ap->port_no; u8 bwsr_stat, msc_stat; pci_read_config_byte(pdev, 0x6A, &bwsr_stat); pci_read_config_byte(pdev, mscreg, &msc_stat); if (bwsr_stat & (1 << ap->port_no)) pci_write_config_byte(pdev, mscreg, msc_stat | 0x30); ata_bmdma_stop(qc); } static struct scsi_host_template hpt37x_sht = { ATA_BMDMA_SHT(DRV_NAME), }; /* * Configuration for HPT370 */ static struct ata_port_operations hpt370_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt370_bmdma_stop, .mode_filter = hpt370_filter, .cable_detect = hpt37x_cable_detect, .set_piomode = hpt370_set_piomode, .set_dmamode = hpt370_set_dmamode, .prereset = hpt37x_pre_reset, }; /* * Configuration for HPT370A. Close to 370 but less filters */ static struct ata_port_operations hpt370a_port_ops = { .inherits = &hpt370_port_ops, .mode_filter = hpt370a_filter, }; /* * Configuration for HPT371 and HPT302. Slightly different PIO and DMA * mode setting functionality. */ static struct ata_port_operations hpt302_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt37x_bmdma_stop, .cable_detect = hpt37x_cable_detect, .set_piomode = hpt372_set_piomode, .set_dmamode = hpt372_set_dmamode, .prereset = hpt37x_pre_reset, }; /* * Configuration for HPT372. Mode setting works like 371 and 302 * but we have a mode filter. */ static struct ata_port_operations hpt372_port_ops = { .inherits = &hpt302_port_ops, .mode_filter = hpt372_filter, }; /* * Configuration for HPT374. Mode setting and filtering works like 372 * but we have a different cable detection procedure for function 1. */ static struct ata_port_operations hpt374_fn1_port_ops = { .inherits = &hpt372_port_ops, .cable_detect = hpt374_fn1_cable_detect, }; /** * hpt37x_clock_slot - Turn timing to PC clock entry * @freq: Reported frequency timing * @base: Base timing * * Turn the timing data intoa clock slot (0 for 33, 1 for 40, 2 for 50 * and 3 for 66Mhz) */ static int hpt37x_clock_slot(unsigned int freq, unsigned int base) { unsigned int f = (base * freq) / 192; /* Mhz */ if (f < 40) return 0; /* 33Mhz slot */ if (f < 45) return 1; /* 40Mhz slot */ if (f < 55) return 2; /* 50Mhz slot */ return 3; /* 60Mhz slot */ } /** * hpt37x_calibrate_dpll - Calibrate the DPLL loop * @dev: PCI device * * Perform a calibration cycle on the HPT37x DPLL. Returns 1 if this * succeeds */ static int hpt37x_calibrate_dpll(struct pci_dev *dev) { u8 reg5b; u32 reg5c; int tries; for (tries = 0; tries < 0x5000; tries++) { udelay(50); pci_read_config_byte(dev, 0x5b, &reg5b); if (reg5b & 0x80) { /* See if it stays set */ for (tries = 0; tries < 0x1000; tries++) { pci_read_config_byte(dev, 0x5b, &reg5b); /* Failed ? */ if ((reg5b & 0x80) == 0) return 0; } /* Turn off tuning, we have the DPLL set */ pci_read_config_dword(dev, 0x5c, &reg5c); pci_write_config_dword(dev, 0x5c, reg5c & ~0x100); return 1; } } /* Never went stable */ return 0; } static u32 hpt374_read_freq(struct pci_dev *pdev) { u32 freq; unsigned long io_base = pci_resource_start(pdev, 4); if (PCI_FUNC(pdev->devfn) & 1) { struct pci_dev *pdev_0; pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1); /* Someone hot plugged the controller on us ? */ if (pdev_0 == NULL) return 0; io_base = pci_resource_start(pdev_0, 4); freq = inl(io_base + 0x90); pci_dev_put(pdev_0); } else freq = inl(io_base + 0x90); return freq; } /** * hpt37x_init_one - Initialise an HPT37X/302 * @dev: PCI device * @id: Entry in match table * * Initialise an HPT37x device. There are some interesting complications * here. Firstly the chip may report 366 and be one of several variants. * Secondly all the timings depend on the clock for the chip which we must * detect and look up * * This is the known chip mappings. It may be missing a couple of later * releases. * * Chip version PCI Rev Notes * HPT366 4 (HPT366) 0 Other driver * HPT366 4 (HPT366) 1 Other driver * HPT368 4 (HPT366) 2 Other driver * HPT370 4 (HPT366) 3 UDMA100 * HPT370A 4 (HPT366) 4 UDMA100 * HPT372 4 (HPT366) 5 UDMA133 (1) * HPT372N 4 (HPT366) 6 Other driver * HPT372A 5 (HPT372) 1 UDMA133 (1) * HPT372N 5 (HPT372) 2 Other driver * HPT302 6 (HPT302) 1 UDMA133 * HPT302N 6 (HPT302) 2 Other driver * HPT371 7 (HPT371) * UDMA133 * HPT374 8 (HPT374) * UDMA133 4 channel * HPT372N 9 (HPT372N) * Other driver * * (1) UDMA133 support depends on the bus clock */ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) { /* HPT370 - UDMA100 */ static const struct ata_port_info info_hpt370 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &hpt370_port_ops }; /* HPT370A - UDMA100 */ static const struct ata_port_info info_hpt370a = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &hpt370a_port_ops }; /* HPT370 - UDMA66 */ static const struct ata_port_info info_hpt370_33 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &hpt370_port_ops }; /* HPT370A - UDMA66 */ static const struct ata_port_info info_hpt370a_33 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &hpt370a_port_ops }; /* HPT372 - UDMA133 */ static const struct ata_port_info info_hpt372 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &hpt372_port_ops }; /* HPT371, 302 - UDMA133 */ static const struct ata_port_info info_hpt302 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &hpt302_port_ops }; /* HPT374 - UDMA100, function 1 uses different cable_detect method */ static const struct ata_port_info info_hpt374_fn0 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &hpt372_port_ops }; static const struct ata_port_info info_hpt374_fn1 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &hpt374_fn1_port_ops }; static const int MHz[4] = { 33, 40, 50, 66 }; void *private_data = NULL; const struct ata_port_info *ppi[] = { NULL, NULL }; u8 rev = dev->revision; u8 irqmask; u8 mcr1; u32 freq; int prefer_dpll = 1; unsigned long iobase = pci_resource_start(dev, 4); const struct hpt_chip *chip_table; int clock_slot; int rc; rc = pcim_enable_device(dev); if (rc) return rc; switch (dev->device) { case PCI_DEVICE_ID_TTI_HPT366: /* May be a later chip in disguise. Check */ /* Older chips are in the HPT366 driver. Ignore them */ if (rev < 3) return -ENODEV; /* N series chips have their own driver. Ignore */ if (rev == 6) return -ENODEV; switch (rev) { case 3: ppi[0] = &info_hpt370; chip_table = &hpt370; prefer_dpll = 0; break; case 4: ppi[0] = &info_hpt370a; chip_table = &hpt370a; prefer_dpll = 0; break; case 5: ppi[0] = &info_hpt372; chip_table = &hpt372; break; default: pr_err("Unknown HPT366 subtype, please report (%d)\n", rev); return -ENODEV; } break; case PCI_DEVICE_ID_TTI_HPT372: /* 372N if rev >= 2 */ if (rev >= 2) return -ENODEV; ppi[0] = &info_hpt372; chip_table = &hpt372a; break; case PCI_DEVICE_ID_TTI_HPT302: /* 302N if rev > 1 */ if (rev > 1) return -ENODEV; ppi[0] = &info_hpt302; /* Check this */ chip_table = &hpt302; break; case PCI_DEVICE_ID_TTI_HPT371: if (rev > 1) return -ENODEV; ppi[0] = &info_hpt302; chip_table = &hpt371; /* * Single channel device, master is not present but the BIOS * (or us for non x86) must mark it absent */ pci_read_config_byte(dev, 0x50, &mcr1); mcr1 &= ~0x04; pci_write_config_byte(dev, 0x50, mcr1); break; case PCI_DEVICE_ID_TTI_HPT374: chip_table = &hpt374; if (!(PCI_FUNC(dev->devfn) & 1)) *ppi = &info_hpt374_fn0; else *ppi = &info_hpt374_fn1; break; default: pr_err("PCI table is bogus, please report (%d)\n", dev->device); return -ENODEV; } /* Ok so this is a chip we support */ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); pci_write_config_byte(dev, PCI_MAX_LAT, 0x08); pci_read_config_byte(dev, 0x5A, &irqmask); irqmask &= ~0x10; pci_write_config_byte(dev, 0x5a, irqmask); /* * default to pci clock. make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. Needed * for some drives such as IBM-DTLA which will not enter ready * state on reset when PDIAG is a input. */ pci_write_config_byte(dev, 0x5b, 0x23); /* * HighPoint does this for HPT372A. * NOTE: This register is only writeable via I/O space. */ if (chip_table == &hpt372a) outb(0x0e, iobase + 0x9c); /* * Some devices do not let this value be accessed via PCI space * according to the old driver. In addition we must use the value * from FN 0 on the HPT374. */ if (chip_table == &hpt374) { freq = hpt374_read_freq(dev); if (freq == 0) return -ENODEV; } else freq = inl(iobase + 0x90); if ((freq >> 12) != 0xABCDE) { int i; u8 sr; u32 total = 0; pr_warn("BIOS has not set timing clocks\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { pci_read_config_byte(dev, 0x78, &sr); total += sr & 0x1FF; udelay(15); } freq = total / 128; } freq &= 0x1FF; /* * Turn the frequency check into a band and then find a timing * table to match it. */ clock_slot = hpt37x_clock_slot(freq, chip_table->base); if (chip_table->clocks[clock_slot] == NULL || prefer_dpll) { /* * We need to try PLL mode instead * * For non UDMA133 capable devices we should * use a 50MHz DPLL by choice */ unsigned int f_low, f_high; int dpll, adjust; /* Compute DPLL */ dpll = (ppi[0]->udma_mask & 0xC0) ? 3 : 2; f_low = (MHz[clock_slot] * 48) / MHz[dpll]; f_high = f_low + 2; if (clock_slot > 1) f_high += 2; /* Select the DPLL clock. */ pci_write_config_byte(dev, 0x5b, 0x21); pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100); for (adjust = 0; adjust < 8; adjust++) { if (hpt37x_calibrate_dpll(dev)) break; /* * See if it'll settle at a fractionally * different clock */ if (adjust & 1) f_low -= adjust >> 1; else f_high += adjust >> 1; pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100); } if (adjust == 8) { pr_err("DPLL did not stabilize!\n"); return -ENODEV; } if (dpll == 3) private_data = (void *)hpt37x_timings_66; else private_data = (void *)hpt37x_timings_50; pr_info("bus clock %dMHz, using %dMHz DPLL\n", MHz[clock_slot], MHz[dpll]); } else { private_data = (void *)chip_table->clocks[clock_slot]; /* * Perform a final fixup. Note that we will have used the * DPLL on the HPT372 which means we don't have to worry * about lack of UDMA133 support on lower clocks */ if (clock_slot < 2 && ppi[0] == &info_hpt370) ppi[0] = &info_hpt370_33; if (clock_slot < 2 && ppi[0] == &info_hpt370a) ppi[0] = &info_hpt370a_33; pr_info("%s using %dMHz bus clock\n", chip_table->name, MHz[clock_slot]); } /* Now kick off ATA set up */ return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0); } static const struct pci_device_id hpt37x[] = { { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), }, { }, }; static struct pci_driver hpt37x_pci_driver = { .name = DRV_NAME, .id_table = hpt37x, .probe = hpt37x_init_one, .remove = ata_pci_remove_one }; static int __init hpt37x_init(void) { return pci_register_driver(&hpt37x_pci_driver); } static void __exit hpt37x_exit(void) { pci_unregister_driver(&hpt37x_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, hpt37x); MODULE_VERSION(DRV_VERSION); module_init(hpt37x_init); module_exit(hpt37x_exit);
gpl-2.0
dkthompson/bricked-pyramid-3.0_own
drivers/media/dvb/siano/smsir.c
8295
3182
/**************************************************************** Siano Mobile Silicon, Inc. MDTV receiver kernel modules. Copyright (C) 2006-2009, Uri Shkolnik Copyright (c) 2010 - Mauro Carvalho Chehab - Ported the driver to use rc-core - IR raw event decoding is now done at rc-core - Code almost re-written This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ****************************************************************/ #include <linux/types.h> #include <linux/input.h> #include "smscoreapi.h" #include "smsir.h" #include "sms-cards.h" #define MODULE_NAME "smsmdtv" void sms_ir_event(struct smscore_device_t *coredev, const char *buf, int len) { int i; const s32 *samples = (const void *)buf; for (i = 0; i < len >> 2; i++) { DEFINE_IR_RAW_EVENT(ev); ev.duration = abs(samples[i]) * 1000; /* Convert to ns */ ev.pulse = (samples[i] > 0) ? false : true; ir_raw_event_store(coredev->ir.dev, &ev); } ir_raw_event_handle(coredev->ir.dev); } int sms_ir_init(struct smscore_device_t *coredev) { int err; int board_id = smscore_get_board_id(coredev); struct rc_dev *dev; sms_log("Allocating rc device"); dev = rc_allocate_device(); if (!dev) { sms_err("Not enough memory"); return -ENOMEM; } coredev->ir.controller = 0; /* Todo: vega/nova SPI number */ coredev->ir.timeout = IR_DEFAULT_TIMEOUT; sms_log("IR port %d, timeout %d ms", coredev->ir.controller, coredev->ir.timeout); snprintf(coredev->ir.name, sizeof(coredev->ir.name), "SMS IR (%s)", sms_get_board(board_id)->name); strlcpy(coredev->ir.phys, coredev->devpath, sizeof(coredev->ir.phys)); strlcat(coredev->ir.phys, "/ir0", sizeof(coredev->ir.phys)); dev->input_name = coredev->ir.name; dev->input_phys = coredev->ir.phys; dev->dev.parent = coredev->device; #if 0 /* TODO: properly initialize the parameters bellow */ dev->input_id.bustype = BUS_USB; dev->input_id.version = 1; dev->input_id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); dev->input_id.product = le16_to_cpu(dev->udev->descriptor.idProduct); #endif dev->priv = coredev; dev->driver_type = RC_DRIVER_IR_RAW; dev->allowed_protos = RC_TYPE_ALL; dev->map_name = sms_get_board(board_id)->rc_codes; dev->driver_name = MODULE_NAME; sms_log("Input device (IR) %s is set for key events", dev->input_name); err = rc_register_device(dev); if (err < 0) { sms_err("Failed to register device"); rc_free_device(dev); return err; } coredev->ir.dev = dev; return 0; } void sms_ir_exit(struct smscore_device_t *coredev) { if (coredev->ir.dev) rc_unregister_device(coredev->ir.dev); sms_log(""); }
gpl-2.0
kelvinbui31/android_mediatek_muse72
drivers/pcmcia/pxa2xx_cm_x255.c
9831
3080
/* * linux/drivers/pcmcia/pxa/pxa_cm_x255.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Compulab Ltd., 2003, 2007, 2008 * Mike Rapoport <mike@compulab.co.il> * */ #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/export.h> #include "soc_common.h" #define GPIO_PCMCIA_SKTSEL (54) #define GPIO_PCMCIA_S0_CD_VALID (16) #define GPIO_PCMCIA_S1_CD_VALID (17) #define GPIO_PCMCIA_S0_RDYINT (6) #define GPIO_PCMCIA_S1_RDYINT (8) #define GPIO_PCMCIA_RESET (9) static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { int ret = gpio_request(GPIO_PCMCIA_RESET, "PCCard reset"); if (ret) return ret; gpio_direction_output(GPIO_PCMCIA_RESET, 0); if (skt->nr == 0) { skt->stat[SOC_STAT_CD].gpio = GPIO_PCMCIA_S0_CD_VALID; skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD"; skt->stat[SOC_STAT_RDY].gpio = GPIO_PCMCIA_S0_RDYINT; skt->stat[SOC_STAT_RDY].name = "PCMCIA0 RDY"; } else { skt->stat[SOC_STAT_CD].gpio = GPIO_PCMCIA_S1_CD_VALID; skt->stat[SOC_STAT_CD].name = "PCMCIA1 CD"; skt->stat[SOC_STAT_RDY].gpio = GPIO_PCMCIA_S1_RDYINT; skt->stat[SOC_STAT_RDY].name = "PCMCIA1 RDY"; } return 0; } static void cmx255_pcmcia_shutdown(struct soc_pcmcia_socket *skt) { gpio_free(GPIO_PCMCIA_RESET); } static void cmx255_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { state->vs_3v = 0; state->vs_Xv = 0; } static int cmx255_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { switch (skt->nr) { case 0: if (state->flags & SS_RESET) { gpio_set_value(GPIO_PCMCIA_SKTSEL, 0); udelay(1); gpio_set_value(GPIO_PCMCIA_RESET, 1); udelay(10); gpio_set_value(GPIO_PCMCIA_RESET, 0); } break; case 1: if (state->flags & SS_RESET) { gpio_set_value(GPIO_PCMCIA_SKTSEL, 1); udelay(1); gpio_set_value(GPIO_PCMCIA_RESET, 1); udelay(10); gpio_set_value(GPIO_PCMCIA_RESET, 0); } break; } return 0; } static struct pcmcia_low_level cmx255_pcmcia_ops __initdata = { .owner = THIS_MODULE, .hw_init = cmx255_pcmcia_hw_init, .hw_shutdown = cmx255_pcmcia_shutdown, .socket_state = cmx255_pcmcia_socket_state, .configure_socket = cmx255_pcmcia_configure_socket, .nr = 1, }; static struct platform_device *cmx255_pcmcia_device; int __init cmx255_pcmcia_init(void) { int ret; cmx255_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!cmx255_pcmcia_device) return -ENOMEM; ret = platform_device_add_data(cmx255_pcmcia_device, &cmx255_pcmcia_ops, sizeof(cmx255_pcmcia_ops)); if (ret == 0) { printk(KERN_INFO "Registering cm-x255 PCMCIA interface.\n"); ret = platform_device_add(cmx255_pcmcia_device); } if (ret) platform_device_put(cmx255_pcmcia_device); return ret; } void __exit cmx255_pcmcia_exit(void) { platform_device_unregister(cmx255_pcmcia_device); }
gpl-2.0
AOSParadox/android_kernel_oneplus_onyx
drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
10855
4327
/* * Copyright (C) 2006-2008 PA Semi, Inc * * Ethtool hooks for the PA Semi PWRficient onchip 1G/10G Ethernet MACs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> #include <linux/inet_lro.h> #include <asm/pasemi_dma.h> #include "pasemi_mac.h" static struct { const char str[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "rx-drops" }, { "rx-bytes" }, { "rx-packets" }, { "rx-broadcast-packets" }, { "rx-multicast-packets" }, { "rx-crc-errors" }, { "rx-undersize-errors" }, { "rx-oversize-errors" }, { "rx-short-fragment-errors" }, { "rx-jabber-errors" }, { "rx-64-byte-packets" }, { "rx-65-127-byte-packets" }, { "rx-128-255-byte-packets" }, { "rx-256-511-byte-packets" }, { "rx-512-1023-byte-packets" }, { "rx-1024-1518-byte-packets" }, { "rx-pause-frames" }, { "tx-bytes" }, { "tx-packets" }, { "tx-broadcast-packets" }, { "tx-multicast-packets" }, { "tx-collisions" }, { "tx-late-collisions" }, { "tx-excessive-collisions" }, { "tx-crc-errors" }, { "tx-undersize-errors" }, { "tx-oversize-errors" }, { "tx-64-byte-packets" }, { "tx-65-127-byte-packets" }, { "tx-128-255-byte-packets" }, { "tx-256-511-byte-packets" }, { "tx-512-1023-byte-packets" }, { "tx-1024-1518-byte-packets" }, }; static int pasemi_mac_ethtool_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct pasemi_mac *mac = netdev_priv(netdev); struct phy_device *phydev = mac->phydev; if (!phydev) return -EOPNOTSUPP; return phy_ethtool_gset(phydev, cmd); } static int pasemi_mac_ethtool_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct pasemi_mac *mac = netdev_priv(netdev); struct phy_device *phydev = mac->phydev; if (!phydev) return -EOPNOTSUPP; return phy_ethtool_sset(phydev, cmd); } static u32 pasemi_mac_ethtool_get_msglevel(struct net_device *netdev) { struct pasemi_mac *mac = netdev_priv(netdev); return mac->msg_enable; } static void pasemi_mac_ethtool_set_msglevel(struct net_device *netdev, u32 level) { struct pasemi_mac *mac = netdev_priv(netdev); mac->msg_enable = level; } static void pasemi_mac_ethtool_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ering) { struct pasemi_mac *mac = netdev_priv(netdev); ering->tx_max_pending = TX_RING_SIZE/2; ering->tx_pending = RING_USED(mac->tx)/2; ering->rx_max_pending = RX_RING_SIZE/4; ering->rx_pending = RING_USED(mac->rx)/4; } static int pasemi_mac_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(ethtool_stats_keys); default: return -EOPNOTSUPP; } } static void pasemi_mac_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct pasemi_mac *mac = netdev_priv(netdev); int i; data[0] = pasemi_read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)) >> PAS_DMA_RXINT_RCMDSTA_DROPS_S; for (i = 0; i < 32; i++) data[1+i] = pasemi_read_mac_reg(mac->dma_if, PAS_MAC_RMON(i)); } static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); } const struct ethtool_ops pasemi_mac_ethtool_ops = { .get_settings = pasemi_mac_ethtool_get_settings, .set_settings = pasemi_mac_ethtool_set_settings, .get_msglevel = pasemi_mac_ethtool_get_msglevel, .set_msglevel = pasemi_mac_ethtool_set_msglevel, .get_link = ethtool_op_get_link, .get_ringparam = pasemi_mac_ethtool_get_ringparam, .get_strings = pasemi_mac_get_strings, .get_sset_count = pasemi_mac_get_sset_count, .get_ethtool_stats = pasemi_mac_get_ethtool_stats, };
gpl-2.0
Ironjim41/angler_kernel
drivers/infiniband/hw/cxgb3/iwch.c
13671
7675
/* * Copyright (c) 2006 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <rdma/ib_verbs.h> #include "cxgb3_offload.h" #include "iwch_provider.h" #include "iwch_user.h" #include "iwch.h" #include "iwch_cm.h" #define DRV_VERSION "1.1" MODULE_AUTHOR("Boyd Faulkner, Steve Wise"); MODULE_DESCRIPTION("Chelsio T3 RDMA Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); static void open_rnic_dev(struct t3cdev *); static void close_rnic_dev(struct t3cdev *); static void iwch_event_handler(struct t3cdev *, u32, u32); struct cxgb3_client t3c_client = { .name = "iw_cxgb3", .add = open_rnic_dev, .remove = close_rnic_dev, .handlers = t3c_handlers, .redirect = iwch_ep_redirect, .event_handler = iwch_event_handler }; static LIST_HEAD(dev_list); static DEFINE_MUTEX(dev_mutex); static int disable_qp_db(int id, void *p, void *data) { struct iwch_qp *qhp = p; cxio_disable_wq_db(&qhp->wq); return 0; } static int enable_qp_db(int id, void *p, void *data) { struct iwch_qp *qhp = p; if (data) ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid); cxio_enable_wq_db(&qhp->wq); return 0; } static void disable_dbs(struct iwch_dev *rnicp) { spin_lock_irq(&rnicp->lock); idr_for_each(&rnicp->qpidr, disable_qp_db, NULL); spin_unlock_irq(&rnicp->lock); } static void enable_dbs(struct iwch_dev *rnicp, int ring_db) { spin_lock_irq(&rnicp->lock); idr_for_each(&rnicp->qpidr, enable_qp_db, (void *)(unsigned long)ring_db); spin_unlock_irq(&rnicp->lock); } static void iwch_db_drop_task(struct work_struct *work) { struct iwch_dev *rnicp = container_of(work, struct iwch_dev, db_drop_task.work); enable_dbs(rnicp, 1); } static void rnic_init(struct iwch_dev *rnicp) { PDBG("%s iwch_dev %p\n", __func__, rnicp); idr_init(&rnicp->cqidr); idr_init(&rnicp->qpidr); idr_init(&rnicp->mmidr); spin_lock_init(&rnicp->lock); INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task); rnicp->attr.max_qps = T3_MAX_NUM_QP - 32; rnicp->attr.max_wrs = T3_MAX_QP_DEPTH; rnicp->attr.max_sge_per_wr = T3_MAX_SGE; rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE; rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1; rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH; rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev); rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE; rnicp->attr.max_pds = T3_MAX_NUM_PD - 1; rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK; rnicp->attr.max_mr_size = T3_MAX_MR_SIZE; rnicp->attr.can_resize_wq = 0; rnicp->attr.max_rdma_reads_per_qp = 8; rnicp->attr.max_rdma_read_resources = rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps; rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */ rnicp->attr.max_rdma_read_depth = rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps; rnicp->attr.rq_overflow_handled = 0; rnicp->attr.can_modify_ird = 0; rnicp->attr.can_modify_ord = 0; rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1; rnicp->attr.stag0_value = 1; rnicp->attr.zbva_support = 1; rnicp->attr.local_invalidate_fence = 1; rnicp->attr.cq_overflow_detection = 1; return; } static void open_rnic_dev(struct t3cdev *tdev) { struct iwch_dev *rnicp; PDBG("%s t3cdev %p\n", __func__, tdev); printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n", DRV_VERSION); rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp)); if (!rnicp) { printk(KERN_ERR MOD "Cannot allocate ib device\n"); return; } rnicp->rdev.ulp = rnicp; rnicp->rdev.t3cdev_p = tdev; mutex_lock(&dev_mutex); if (cxio_rdev_open(&rnicp->rdev)) { mutex_unlock(&dev_mutex); printk(KERN_ERR MOD "Unable to open CXIO rdev\n"); ib_dealloc_device(&rnicp->ibdev); return; } rnic_init(rnicp); list_add_tail(&rnicp->entry, &dev_list); mutex_unlock(&dev_mutex); if (iwch_register_device(rnicp)) { printk(KERN_ERR MOD "Unable to register device\n"); close_rnic_dev(tdev); } printk(KERN_INFO MOD "Initialized device %s\n", pci_name(rnicp->rdev.rnic_info.pdev)); return; } static void close_rnic_dev(struct t3cdev *tdev) { struct iwch_dev *dev, *tmp; PDBG("%s t3cdev %p\n", __func__, tdev); mutex_lock(&dev_mutex); list_for_each_entry_safe(dev, tmp, &dev_list, entry) { if (dev->rdev.t3cdev_p == tdev) { dev->rdev.flags = CXIO_ERROR_FATAL; synchronize_net(); cancel_delayed_work_sync(&dev->db_drop_task); list_del(&dev->entry); iwch_unregister_device(dev); cxio_rdev_close(&dev->rdev); idr_destroy(&dev->cqidr); idr_destroy(&dev->qpidr); idr_destroy(&dev->mmidr); ib_dealloc_device(&dev->ibdev); break; } } mutex_unlock(&dev_mutex); } static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id) { struct cxio_rdev *rdev = tdev->ulp; struct iwch_dev *rnicp; struct ib_event event; u32 portnum = port_id + 1; int dispatch = 0; if (!rdev) return; rnicp = rdev_to_iwch_dev(rdev); switch (evt) { case OFFLOAD_STATUS_DOWN: { rdev->flags = CXIO_ERROR_FATAL; synchronize_net(); event.event = IB_EVENT_DEVICE_FATAL; dispatch = 1; break; } case OFFLOAD_PORT_DOWN: { event.event = IB_EVENT_PORT_ERR; dispatch = 1; break; } case OFFLOAD_PORT_UP: { event.event = IB_EVENT_PORT_ACTIVE; dispatch = 1; break; } case OFFLOAD_DB_FULL: { disable_dbs(rnicp); break; } case OFFLOAD_DB_EMPTY: { enable_dbs(rnicp, 1); break; } case OFFLOAD_DB_DROP: { unsigned long delay = 1000; unsigned short r; disable_dbs(rnicp); get_random_bytes(&r, 2); delay += r & 1023; /* * delay is between 1000-2023 usecs. */ schedule_delayed_work(&rnicp->db_drop_task, usecs_to_jiffies(delay)); break; } } if (dispatch) { event.device = &rnicp->ibdev; event.element.port_num = portnum; ib_dispatch_event(&event); } return; } static int __init iwch_init_module(void) { int err; err = cxio_hal_init(); if (err) return err; err = iwch_cm_init(); if (err) return err; cxio_register_ev_cb(iwch_ev_dispatch); cxgb3_register_client(&t3c_client); return 0; } static void __exit iwch_exit_module(void) { cxgb3_unregister_client(&t3c_client); cxio_unregister_ev_cb(iwch_ev_dispatch); iwch_cm_term(); cxio_hal_exit(); } module_init(iwch_init_module); module_exit(iwch_exit_module);
gpl-2.0
hoelzl/argos2
simulator/libs/FreeImage/Source/LibJPEG/jdinput.c
104
25699
/* * jdinput.c * * Copyright (C) 1991-1997, Thomas G. Lane. * Modified 2002-2009 by Guido Vollbeding. * This file is part of the Independent JPEG Group's software. * For conditions of distribution and use, see the accompanying README file. * * This file contains input control logic for the JPEG decompressor. * These routines are concerned with controlling the decompressor's input * processing (marker reading and coefficient decoding). The actual input * reading is done in jdmarker.c, jdhuff.c, and jdarith.c. */ #define JPEG_INTERNALS #include "jinclude.h" #include "jpeglib.h" /* Private state */ typedef struct { struct jpeg_input_controller pub; /* public fields */ int inheaders; /* Nonzero until first SOS is reached */ } my_input_controller; typedef my_input_controller * my_inputctl_ptr; /* Forward declarations */ METHODDEF(int) consume_markers JPP((j_decompress_ptr cinfo)); /* * Routines to calculate various quantities related to the size of the image. */ /* * Compute output image dimensions and related values. * NOTE: this is exported for possible use by application. * Hence it mustn't do anything that can't be done twice. */ GLOBAL(void) jpeg_core_output_dimensions (j_decompress_ptr cinfo) /* Do computations that are needed before master selection phase. * This function is used for transcoding and full decompression. */ { #ifdef IDCT_SCALING_SUPPORTED int ci; jpeg_component_info *compptr; /* Compute actual output image dimensions and DCT scaling choices. */ if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom) { /* Provide 1/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 1; cinfo->min_DCT_v_scaled_size = 1; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 2) { /* Provide 2/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 2L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 2L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 2; cinfo->min_DCT_v_scaled_size = 2; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 3) { /* Provide 3/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 3L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 3L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 3; cinfo->min_DCT_v_scaled_size = 3; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 4) { /* Provide 4/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 4L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 4L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 4; cinfo->min_DCT_v_scaled_size = 4; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 5) { /* Provide 5/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 5L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 5L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 5; cinfo->min_DCT_v_scaled_size = 5; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 6) { /* Provide 6/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 6L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 6L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 6; cinfo->min_DCT_v_scaled_size = 6; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 7) { /* Provide 7/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 7L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 7L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 7; cinfo->min_DCT_v_scaled_size = 7; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 8) { /* Provide 8/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 8L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 8L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 8; cinfo->min_DCT_v_scaled_size = 8; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 9) { /* Provide 9/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 9L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 9L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 9; cinfo->min_DCT_v_scaled_size = 9; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 10) { /* Provide 10/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 10L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 10L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 10; cinfo->min_DCT_v_scaled_size = 10; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 11) { /* Provide 11/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 11L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 11L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 11; cinfo->min_DCT_v_scaled_size = 11; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 12) { /* Provide 12/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 12L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 12L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 12; cinfo->min_DCT_v_scaled_size = 12; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 13) { /* Provide 13/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 13L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 13L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 13; cinfo->min_DCT_v_scaled_size = 13; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 14) { /* Provide 14/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 14L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 14L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 14; cinfo->min_DCT_v_scaled_size = 14; } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 15) { /* Provide 15/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 15L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 15L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 15; cinfo->min_DCT_v_scaled_size = 15; } else { /* Provide 16/block_size scaling */ cinfo->output_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * 16L, (long) cinfo->block_size); cinfo->output_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * 16L, (long) cinfo->block_size); cinfo->min_DCT_h_scaled_size = 16; cinfo->min_DCT_v_scaled_size = 16; } /* Recompute dimensions of components */ for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { compptr->DCT_h_scaled_size = cinfo->min_DCT_h_scaled_size; compptr->DCT_v_scaled_size = cinfo->min_DCT_v_scaled_size; } #else /* !IDCT_SCALING_SUPPORTED */ /* Hardwire it to "no scaling" */ cinfo->output_width = cinfo->image_width; cinfo->output_height = cinfo->image_height; /* jdinput.c has already initialized DCT_scaled_size, * and has computed unscaled downsampled_width and downsampled_height. */ #endif /* IDCT_SCALING_SUPPORTED */ } LOCAL(void) initial_setup (j_decompress_ptr cinfo) /* Called once, when first SOS marker is reached */ { int ci; jpeg_component_info *compptr; /* Make sure image isn't bigger than I can handle */ if ((long) cinfo->image_height > (long) JPEG_MAX_DIMENSION || (long) cinfo->image_width > (long) JPEG_MAX_DIMENSION) ERREXIT1(cinfo, JERR_IMAGE_TOO_BIG, (unsigned int) JPEG_MAX_DIMENSION); /* For now, precision must match compiled-in value... */ if (cinfo->data_precision != BITS_IN_JSAMPLE) ERREXIT1(cinfo, JERR_BAD_PRECISION, cinfo->data_precision); /* Check that number of components won't exceed internal array sizes */ if (cinfo->num_components > MAX_COMPONENTS) ERREXIT2(cinfo, JERR_COMPONENT_COUNT, cinfo->num_components, MAX_COMPONENTS); /* Compute maximum sampling factors; check factor validity */ cinfo->max_h_samp_factor = 1; cinfo->max_v_samp_factor = 1; for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { if (compptr->h_samp_factor<=0 || compptr->h_samp_factor>MAX_SAMP_FACTOR || compptr->v_samp_factor<=0 || compptr->v_samp_factor>MAX_SAMP_FACTOR) ERREXIT(cinfo, JERR_BAD_SAMPLING); cinfo->max_h_samp_factor = MAX(cinfo->max_h_samp_factor, compptr->h_samp_factor); cinfo->max_v_samp_factor = MAX(cinfo->max_v_samp_factor, compptr->v_samp_factor); } /* Derive block_size, natural_order, and lim_Se */ if (cinfo->is_baseline || (cinfo->progressive_mode && cinfo->comps_in_scan)) { /* no pseudo SOS marker */ cinfo->block_size = DCTSIZE; cinfo->natural_order = jpeg_natural_order; cinfo->lim_Se = DCTSIZE2-1; } else switch (cinfo->Se) { case (1*1-1): cinfo->block_size = 1; cinfo->natural_order = jpeg_natural_order; /* not needed */ cinfo->lim_Se = cinfo->Se; break; case (2*2-1): cinfo->block_size = 2; cinfo->natural_order = jpeg_natural_order2; cinfo->lim_Se = cinfo->Se; break; case (3*3-1): cinfo->block_size = 3; cinfo->natural_order = jpeg_natural_order3; cinfo->lim_Se = cinfo->Se; break; case (4*4-1): cinfo->block_size = 4; cinfo->natural_order = jpeg_natural_order4; cinfo->lim_Se = cinfo->Se; break; case (5*5-1): cinfo->block_size = 5; cinfo->natural_order = jpeg_natural_order5; cinfo->lim_Se = cinfo->Se; break; case (6*6-1): cinfo->block_size = 6; cinfo->natural_order = jpeg_natural_order6; cinfo->lim_Se = cinfo->Se; break; case (7*7-1): cinfo->block_size = 7; cinfo->natural_order = jpeg_natural_order7; cinfo->lim_Se = cinfo->Se; break; case (8*8-1): cinfo->block_size = 8; cinfo->natural_order = jpeg_natural_order; cinfo->lim_Se = DCTSIZE2-1; break; case (9*9-1): cinfo->block_size = 9; cinfo->natural_order = jpeg_natural_order; cinfo->lim_Se = DCTSIZE2-1; break; case (10*10-1): cinfo->block_size = 10; cinfo->natural_order = jpeg_natural_order; cinfo->lim_Se = DCTSIZE2-1; break; case (11*11-1): cinfo->block_size = 11; cinfo->natural_order = jpeg_natural_order; cinfo->lim_Se = DCTSIZE2-1; break; case (12*12-1): cinfo->block_size = 12; cinfo->natural_order = jpeg_natural_order; cinfo->lim_Se = DCTSIZE2-1; break; case (13*13-1): cinfo->block_size = 13; cinfo->natural_order = jpeg_natural_order; cinfo->lim_Se = DCTSIZE2-1; break; case (14*14-1): cinfo->block_size = 14; cinfo->natural_order = jpeg_natural_order; cinfo->lim_Se = DCTSIZE2-1; break; case (15*15-1): cinfo->block_size = 15; cinfo->natural_order = jpeg_natural_order; cinfo->lim_Se = DCTSIZE2-1; break; case (16*16-1): cinfo->block_size = 16; cinfo->natural_order = jpeg_natural_order; cinfo->lim_Se = DCTSIZE2-1; break; default: ERREXIT4(cinfo, JERR_BAD_PROGRESSION, cinfo->Ss, cinfo->Se, cinfo->Ah, cinfo->Al); break; } /* We initialize DCT_scaled_size and min_DCT_scaled_size to block_size. * In the full decompressor, * this will be overridden by jpeg_calc_output_dimensions in jdmaster.c; * but in the transcoder, * jpeg_calc_output_dimensions is not used, so we must do it here. */ cinfo->min_DCT_h_scaled_size = cinfo->block_size; cinfo->min_DCT_v_scaled_size = cinfo->block_size; /* Compute dimensions of components */ for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { compptr->DCT_h_scaled_size = cinfo->block_size; compptr->DCT_v_scaled_size = cinfo->block_size; /* Size in DCT blocks */ compptr->width_in_blocks = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * (long) compptr->h_samp_factor, (long) (cinfo->max_h_samp_factor * cinfo->block_size)); compptr->height_in_blocks = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * (long) compptr->v_samp_factor, (long) (cinfo->max_v_samp_factor * cinfo->block_size)); /* downsampled_width and downsampled_height will also be overridden by * jdmaster.c if we are doing full decompression. The transcoder library * doesn't use these values, but the calling application might. */ /* Size in samples */ compptr->downsampled_width = (JDIMENSION) jdiv_round_up((long) cinfo->image_width * (long) compptr->h_samp_factor, (long) cinfo->max_h_samp_factor); compptr->downsampled_height = (JDIMENSION) jdiv_round_up((long) cinfo->image_height * (long) compptr->v_samp_factor, (long) cinfo->max_v_samp_factor); /* Mark component needed, until color conversion says otherwise */ compptr->component_needed = TRUE; /* Mark no quantization table yet saved for component */ compptr->quant_table = NULL; } /* Compute number of fully interleaved MCU rows. */ cinfo->total_iMCU_rows = (JDIMENSION) jdiv_round_up((long) cinfo->image_height, (long) (cinfo->max_v_samp_factor * cinfo->block_size)); /* Decide whether file contains multiple scans */ if (cinfo->comps_in_scan < cinfo->num_components || cinfo->progressive_mode) cinfo->inputctl->has_multiple_scans = TRUE; else cinfo->inputctl->has_multiple_scans = FALSE; } LOCAL(void) per_scan_setup (j_decompress_ptr cinfo) /* Do computations that are needed before processing a JPEG scan */ /* cinfo->comps_in_scan and cinfo->cur_comp_info[] were set from SOS marker */ { int ci, mcublks, tmp; jpeg_component_info *compptr; if (cinfo->comps_in_scan == 1) { /* Noninterleaved (single-component) scan */ compptr = cinfo->cur_comp_info[0]; /* Overall image size in MCUs */ cinfo->MCUs_per_row = compptr->width_in_blocks; cinfo->MCU_rows_in_scan = compptr->height_in_blocks; /* For noninterleaved scan, always one block per MCU */ compptr->MCU_width = 1; compptr->MCU_height = 1; compptr->MCU_blocks = 1; compptr->MCU_sample_width = compptr->DCT_h_scaled_size; compptr->last_col_width = 1; /* For noninterleaved scans, it is convenient to define last_row_height * as the number of block rows present in the last iMCU row. */ tmp = (int) (compptr->height_in_blocks % compptr->v_samp_factor); if (tmp == 0) tmp = compptr->v_samp_factor; compptr->last_row_height = tmp; /* Prepare array describing MCU composition */ cinfo->blocks_in_MCU = 1; cinfo->MCU_membership[0] = 0; } else { /* Interleaved (multi-component) scan */ if (cinfo->comps_in_scan <= 0 || cinfo->comps_in_scan > MAX_COMPS_IN_SCAN) ERREXIT2(cinfo, JERR_COMPONENT_COUNT, cinfo->comps_in_scan, MAX_COMPS_IN_SCAN); /* Overall image size in MCUs */ cinfo->MCUs_per_row = (JDIMENSION) jdiv_round_up((long) cinfo->image_width, (long) (cinfo->max_h_samp_factor * cinfo->block_size)); cinfo->MCU_rows_in_scan = (JDIMENSION) jdiv_round_up((long) cinfo->image_height, (long) (cinfo->max_v_samp_factor * cinfo->block_size)); cinfo->blocks_in_MCU = 0; for (ci = 0; ci < cinfo->comps_in_scan; ci++) { compptr = cinfo->cur_comp_info[ci]; /* Sampling factors give # of blocks of component in each MCU */ compptr->MCU_width = compptr->h_samp_factor; compptr->MCU_height = compptr->v_samp_factor; compptr->MCU_blocks = compptr->MCU_width * compptr->MCU_height; compptr->MCU_sample_width = compptr->MCU_width * compptr->DCT_h_scaled_size; /* Figure number of non-dummy blocks in last MCU column & row */ tmp = (int) (compptr->width_in_blocks % compptr->MCU_width); if (tmp == 0) tmp = compptr->MCU_width; compptr->last_col_width = tmp; tmp = (int) (compptr->height_in_blocks % compptr->MCU_height); if (tmp == 0) tmp = compptr->MCU_height; compptr->last_row_height = tmp; /* Prepare array describing MCU composition */ mcublks = compptr->MCU_blocks; if (cinfo->blocks_in_MCU + mcublks > D_MAX_BLOCKS_IN_MCU) ERREXIT(cinfo, JERR_BAD_MCU_SIZE); while (mcublks-- > 0) { cinfo->MCU_membership[cinfo->blocks_in_MCU++] = ci; } } } } /* * Save away a copy of the Q-table referenced by each component present * in the current scan, unless already saved during a prior scan. * * In a multiple-scan JPEG file, the encoder could assign different components * the same Q-table slot number, but change table definitions between scans * so that each component uses a different Q-table. (The IJG encoder is not * currently capable of doing this, but other encoders might.) Since we want * to be able to dequantize all the components at the end of the file, this * means that we have to save away the table actually used for each component. * We do this by copying the table at the start of the first scan containing * the component. * The JPEG spec prohibits the encoder from changing the contents of a Q-table * slot between scans of a component using that slot. If the encoder does so * anyway, this decoder will simply use the Q-table values that were current * at the start of the first scan for the component. * * The decompressor output side looks only at the saved quant tables, * not at the current Q-table slots. */ LOCAL(void) latch_quant_tables (j_decompress_ptr cinfo) { int ci, qtblno; jpeg_component_info *compptr; JQUANT_TBL * qtbl; for (ci = 0; ci < cinfo->comps_in_scan; ci++) { compptr = cinfo->cur_comp_info[ci]; /* No work if we already saved Q-table for this component */ if (compptr->quant_table != NULL) continue; /* Make sure specified quantization table is present */ qtblno = compptr->quant_tbl_no; if (qtblno < 0 || qtblno >= NUM_QUANT_TBLS || cinfo->quant_tbl_ptrs[qtblno] == NULL) ERREXIT1(cinfo, JERR_NO_QUANT_TABLE, qtblno); /* OK, save away the quantization table */ qtbl = (JQUANT_TBL *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, SIZEOF(JQUANT_TBL)); MEMCOPY(qtbl, cinfo->quant_tbl_ptrs[qtblno], SIZEOF(JQUANT_TBL)); compptr->quant_table = qtbl; } } /* * Initialize the input modules to read a scan of compressed data. * The first call to this is done by jdmaster.c after initializing * the entire decompressor (during jpeg_start_decompress). * Subsequent calls come from consume_markers, below. */ METHODDEF(void) start_input_pass (j_decompress_ptr cinfo) { per_scan_setup(cinfo); latch_quant_tables(cinfo); (*cinfo->entropy->start_pass) (cinfo); (*cinfo->coef->start_input_pass) (cinfo); cinfo->inputctl->consume_input = cinfo->coef->consume_data; } /* * Finish up after inputting a compressed-data scan. * This is called by the coefficient controller after it's read all * the expected data of the scan. */ METHODDEF(void) finish_input_pass (j_decompress_ptr cinfo) { cinfo->inputctl->consume_input = consume_markers; } /* * Read JPEG markers before, between, or after compressed-data scans. * Change state as necessary when a new scan is reached. * Return value is JPEG_SUSPENDED, JPEG_REACHED_SOS, or JPEG_REACHED_EOI. * * The consume_input method pointer points either here or to the * coefficient controller's consume_data routine, depending on whether * we are reading a compressed data segment or inter-segment markers. * * Note: This function should NOT return a pseudo SOS marker (with zero * component number) to the caller. A pseudo marker received by * read_markers is processed and then skipped for other markers. */ METHODDEF(int) consume_markers (j_decompress_ptr cinfo) { my_inputctl_ptr inputctl = (my_inputctl_ptr) cinfo->inputctl; int val; if (inputctl->pub.eoi_reached) /* After hitting EOI, read no further */ return JPEG_REACHED_EOI; for (;;) { /* Loop to pass pseudo SOS marker */ val = (*cinfo->marker->read_markers) (cinfo); switch (val) { case JPEG_REACHED_SOS: /* Found SOS */ if (inputctl->inheaders) { /* 1st SOS */ if (inputctl->inheaders == 1) initial_setup(cinfo); if (cinfo->comps_in_scan == 0) { /* pseudo SOS marker */ inputctl->inheaders = 2; break; } inputctl->inheaders = 0; /* Note: start_input_pass must be called by jdmaster.c * before any more input can be consumed. jdapimin.c is * responsible for enforcing this sequencing. */ } else { /* 2nd or later SOS marker */ if (! inputctl->pub.has_multiple_scans) ERREXIT(cinfo, JERR_EOI_EXPECTED); /* Oops, I wasn't expecting this! */ if (cinfo->comps_in_scan == 0) /* unexpected pseudo SOS marker */ break; start_input_pass(cinfo); } return val; case JPEG_REACHED_EOI: /* Found EOI */ inputctl->pub.eoi_reached = TRUE; if (inputctl->inheaders) { /* Tables-only datastream, apparently */ if (cinfo->marker->saw_SOF) ERREXIT(cinfo, JERR_SOF_NO_SOS); } else { /* Prevent infinite loop in coef ctlr's decompress_data routine * if user set output_scan_number larger than number of scans. */ if (cinfo->output_scan_number > cinfo->input_scan_number) cinfo->output_scan_number = cinfo->input_scan_number; } return val; case JPEG_SUSPENDED: return val; default: return val; } } } /* * Reset state to begin a fresh datastream. */ METHODDEF(void) reset_input_controller (j_decompress_ptr cinfo) { my_inputctl_ptr inputctl = (my_inputctl_ptr) cinfo->inputctl; inputctl->pub.consume_input = consume_markers; inputctl->pub.has_multiple_scans = FALSE; /* "unknown" would be better */ inputctl->pub.eoi_reached = FALSE; inputctl->inheaders = 1; /* Reset other modules */ (*cinfo->err->reset_error_mgr) ((j_common_ptr) cinfo); (*cinfo->marker->reset_marker_reader) (cinfo); /* Reset progression state -- would be cleaner if entropy decoder did this */ cinfo->coef_bits = NULL; } /* * Initialize the input controller module. * This is called only once, when the decompression object is created. */ GLOBAL(void) jinit_input_controller (j_decompress_ptr cinfo) { my_inputctl_ptr inputctl; /* Create subobject in permanent pool */ inputctl = (my_inputctl_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, SIZEOF(my_input_controller)); cinfo->inputctl = (struct jpeg_input_controller *) inputctl; /* Initialize method pointers */ inputctl->pub.consume_input = consume_markers; inputctl->pub.reset_input_controller = reset_input_controller; inputctl->pub.start_input_pass = start_input_pass; inputctl->pub.finish_input_pass = finish_input_pass; /* Initialize state: can't use reset_input_controller since we don't * want to try to reset other modules yet. */ inputctl->pub.has_multiple_scans = FALSE; /* "unknown" would be better */ inputctl->pub.eoi_reached = FALSE; inputctl->inheaders = 1; }
gpl-2.0
suse110/linux-1
arch/x86/xen/spinlock.c
360
8484
/* * Split spinlock implementation out into its own file, so it can be * compiled in a FTRACE-compatible way. */ #include <linux/kernel_stat.h> #include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/log2.h> #include <linux/gfp.h> #include <linux/slab.h> #include <asm/paravirt.h> #include <xen/interface/xen.h> #include <xen/events.h> #include "xen-ops.h" #include "debugfs.h" enum xen_contention_stat { TAKEN_SLOW, TAKEN_SLOW_PICKUP, TAKEN_SLOW_SPURIOUS, RELEASED_SLOW, RELEASED_SLOW_KICKED, NR_CONTENTION_STATS }; #ifdef CONFIG_XEN_DEBUG_FS #define HISTO_BUCKETS 30 static struct xen_spinlock_stats { u32 contention_stats[NR_CONTENTION_STATS]; u32 histo_spin_blocked[HISTO_BUCKETS+1]; u64 time_blocked; } spinlock_stats; static u8 zero_stats; static inline void check_zero(void) { u8 ret; u8 old = READ_ONCE(zero_stats); if (unlikely(old)) { ret = cmpxchg(&zero_stats, old, 0); /* This ensures only one fellow resets the stat */ if (ret == old) memset(&spinlock_stats, 0, sizeof(spinlock_stats)); } } static inline void add_stats(enum xen_contention_stat var, u32 val) { check_zero(); spinlock_stats.contention_stats[var] += val; } static inline u64 spin_time_start(void) { return xen_clocksource_read(); } static void __spin_time_accum(u64 delta, u32 *array) { unsigned index = ilog2(delta); check_zero(); if (index < HISTO_BUCKETS) array[index]++; else array[HISTO_BUCKETS]++; } static inline void spin_time_accum_blocked(u64 start) { u32 delta = xen_clocksource_read() - start; __spin_time_accum(delta, spinlock_stats.histo_spin_blocked); spinlock_stats.time_blocked += delta; } #else /* !CONFIG_XEN_DEBUG_FS */ static inline void add_stats(enum xen_contention_stat var, u32 val) { } static inline u64 spin_time_start(void) { return 0; } static inline void spin_time_accum_blocked(u64 start) { } #endif /* CONFIG_XEN_DEBUG_FS */ struct xen_lock_waiting { struct arch_spinlock *lock; __ticket_t want; }; static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; static DEFINE_PER_CPU(char *, irq_name); static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); static cpumask_t waiting_cpus; static bool xen_pvspin = true; __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) { int irq = __this_cpu_read(lock_kicker_irq); struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); int cpu = smp_processor_id(); u64 start; __ticket_t head; unsigned long flags; /* If kicker interrupts not initialized yet, just spin */ if (irq == -1) return; start = spin_time_start(); /* * Make sure an interrupt handler can't upset things in a * partially setup state. */ local_irq_save(flags); /* * We don't really care if we're overwriting some other * (lock,want) pair, as that would mean that we're currently * in an interrupt context, and the outer context had * interrupts enabled. That has already kicked the VCPU out * of xen_poll_irq(), so it will just return spuriously and * retry with newly setup (lock,want). * * The ordering protocol on this is that the "lock" pointer * may only be set non-NULL if the "want" ticket is correct. * If we're updating "want", we must first clear "lock". */ w->lock = NULL; smp_wmb(); w->want = want; smp_wmb(); w->lock = lock; /* This uses set_bit, which atomic and therefore a barrier */ cpumask_set_cpu(cpu, &waiting_cpus); add_stats(TAKEN_SLOW, 1); /* clear pending */ xen_clear_irq_pending(irq); /* Only check lock once pending cleared */ barrier(); /* * Mark entry to slowpath before doing the pickup test to make * sure we don't deadlock with an unlocker. */ __ticket_enter_slowpath(lock); /* make sure enter_slowpath, which is atomic does not cross the read */ smp_mb__after_atomic(); /* * check again make sure it didn't become free while * we weren't looking */ head = READ_ONCE(lock->tickets.head); if (__tickets_equal(head, want)) { add_stats(TAKEN_SLOW_PICKUP, 1); goto out; } /* Allow interrupts while blocked */ local_irq_restore(flags); /* * If an interrupt happens here, it will leave the wakeup irq * pending, which will cause xen_poll_irq() to return * immediately. */ /* Block until irq becomes pending (or perhaps a spurious wakeup) */ xen_poll_irq(irq); add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq)); local_irq_save(flags); kstat_incr_irq_this_cpu(irq); out: cpumask_clear_cpu(cpu, &waiting_cpus); w->lock = NULL; local_irq_restore(flags); spin_time_accum_blocked(start); } PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning); static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next) { int cpu; add_stats(RELEASED_SLOW, 1); for_each_cpu(cpu, &waiting_cpus) { const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); /* Make sure we read lock before want */ if (READ_ONCE(w->lock) == lock && READ_ONCE(w->want) == next) { add_stats(RELEASED_SLOW_KICKED, 1); xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); break; } } } static irqreturn_t dummy_handler(int irq, void *dev_id) { BUG(); return IRQ_HANDLED; } void xen_init_lock_cpu(int cpu) { int irq; char *name; if (!xen_pvspin) return; WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", cpu, per_cpu(lock_kicker_irq, cpu)); name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, cpu, dummy_handler, IRQF_PERCPU|IRQF_NOBALANCING, name, NULL); if (irq >= 0) { disable_irq(irq); /* make sure it's never delivered */ per_cpu(lock_kicker_irq, cpu) = irq; per_cpu(irq_name, cpu) = name; } printk("cpu %d spinlock event irq %d\n", cpu, irq); } void xen_uninit_lock_cpu(int cpu) { if (!xen_pvspin) return; unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); per_cpu(lock_kicker_irq, cpu) = -1; kfree(per_cpu(irq_name, cpu)); per_cpu(irq_name, cpu) = NULL; } /* * Our init of PV spinlocks is split in two init functions due to us * using paravirt patching and jump labels patching and having to do * all of this before SMP code is invoked. * * The paravirt patching needs to be done _before_ the alternative asm code * is started, otherwise we would not patch the core kernel code. */ void __init xen_init_spinlocks(void) { if (!xen_pvspin) { printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); return; } printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); pv_lock_ops.unlock_kick = xen_unlock_kick; } /* * While the jump_label init code needs to happend _after_ the jump labels are * enabled and before SMP is started. Hence we use pre-SMP initcall level * init. We cannot do it in xen_init_spinlocks as that is done before * jump labels are activated. */ static __init int xen_init_spinlocks_jump(void) { if (!xen_pvspin) return 0; if (!xen_domain()) return 0; static_key_slow_inc(&paravirt_ticketlocks_enabled); return 0; } early_initcall(xen_init_spinlocks_jump); static __init int xen_parse_nopvspin(char *arg) { xen_pvspin = false; return 0; } early_param("xen_nopvspin", xen_parse_nopvspin); #ifdef CONFIG_XEN_DEBUG_FS static struct dentry *d_spin_debug; static int __init xen_spinlock_debugfs(void) { struct dentry *d_xen = xen_init_debugfs(); if (d_xen == NULL) return -ENOMEM; if (!xen_pvspin) return 0; d_spin_debug = debugfs_create_dir("spinlocks", d_xen); debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); debugfs_create_u32("taken_slow", 0444, d_spin_debug, &spinlock_stats.contention_stats[TAKEN_SLOW]); debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]); debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug, &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]); debugfs_create_u32("released_slow", 0444, d_spin_debug, &spinlock_stats.contention_stats[RELEASED_SLOW]); debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]); debugfs_create_u64("time_blocked", 0444, d_spin_debug, &spinlock_stats.time_blocked); debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); return 0; } fs_initcall(xen_spinlock_debugfs); #endif /* CONFIG_XEN_DEBUG_FS */
gpl-2.0
GAXUSXX/GalaxyS7edge_G935F_Kernel
drivers/firmware/efi/runtime-map.c
360
4638
/* * linux/drivers/efi/runtime-map.c * Copyright (C) 2013 Red Hat, Inc., Dave Young <dyoung@redhat.com> * * This file is released under the GPLv2. */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/efi.h> #include <linux/slab.h> #include <asm/setup.h> static void *efi_runtime_map; static int nr_efi_runtime_map; static u32 efi_memdesc_size; struct efi_runtime_map_entry { efi_memory_desc_t md; struct kobject kobj; /* kobject for each entry */ }; static struct efi_runtime_map_entry **map_entries; struct map_attribute { struct attribute attr; ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf); }; static inline struct map_attribute *to_map_attr(struct attribute *attr) { return container_of(attr, struct map_attribute, attr); } static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type); } #define EFI_RUNTIME_FIELD(var) entry->md.var #define EFI_RUNTIME_U64_ATTR_SHOW(name) \ static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \ { \ return snprintf(buf, PAGE_SIZE, "0x%llx\n", EFI_RUNTIME_FIELD(name)); \ } EFI_RUNTIME_U64_ATTR_SHOW(phys_addr); EFI_RUNTIME_U64_ATTR_SHOW(virt_addr); EFI_RUNTIME_U64_ATTR_SHOW(num_pages); EFI_RUNTIME_U64_ATTR_SHOW(attribute); static inline struct efi_runtime_map_entry *to_map_entry(struct kobject *kobj) { return container_of(kobj, struct efi_runtime_map_entry, kobj); } static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct efi_runtime_map_entry *entry = to_map_entry(kobj); struct map_attribute *map_attr = to_map_attr(attr); return map_attr->show(entry, buf); } static struct map_attribute map_type_attr = __ATTR_RO(type); static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr); static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr); static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages); static struct map_attribute map_attribute_attr = __ATTR_RO(attribute); /* * These are default attributes that are added for every memmap entry. */ static struct attribute *def_attrs[] = { &map_type_attr.attr, &map_phys_addr_attr.attr, &map_virt_addr_attr.attr, &map_num_pages_attr.attr, &map_attribute_attr.attr, NULL }; static const struct sysfs_ops map_attr_ops = { .show = map_attr_show, }; static void map_release(struct kobject *kobj) { struct efi_runtime_map_entry *entry; entry = to_map_entry(kobj); kfree(entry); } static struct kobj_type __refdata map_ktype = { .sysfs_ops = &map_attr_ops, .default_attrs = def_attrs, .release = map_release, }; static struct kset *map_kset; static struct efi_runtime_map_entry * add_sysfs_runtime_map_entry(struct kobject *kobj, int nr) { int ret; struct efi_runtime_map_entry *entry; if (!map_kset) { map_kset = kset_create_and_add("runtime-map", NULL, kobj); if (!map_kset) return ERR_PTR(-ENOMEM); } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { kset_unregister(map_kset); return entry; } memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size, sizeof(efi_memory_desc_t)); kobject_init(&entry->kobj, &map_ktype); entry->kobj.kset = map_kset; ret = kobject_add(&entry->kobj, NULL, "%d", nr); if (ret) { kobject_put(&entry->kobj); kset_unregister(map_kset); return ERR_PTR(ret); } return entry; } int efi_get_runtime_map_size(void) { return nr_efi_runtime_map * efi_memdesc_size; } int efi_get_runtime_map_desc_size(void) { return efi_memdesc_size; } int efi_runtime_map_copy(void *buf, size_t bufsz) { size_t sz = efi_get_runtime_map_size(); if (sz > bufsz) sz = bufsz; memcpy(buf, efi_runtime_map, sz); return 0; } void efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) { efi_runtime_map = map; nr_efi_runtime_map = nr_entries; efi_memdesc_size = desc_size; } int __init efi_runtime_map_init(struct kobject *efi_kobj) { int i, j, ret = 0; struct efi_runtime_map_entry *entry; if (!efi_runtime_map) return 0; map_entries = kzalloc(nr_efi_runtime_map * sizeof(entry), GFP_KERNEL); if (!map_entries) { ret = -ENOMEM; goto out; } for (i = 0; i < nr_efi_runtime_map; i++) { entry = add_sysfs_runtime_map_entry(efi_kobj, i); if (IS_ERR(entry)) { ret = PTR_ERR(entry); goto out_add_entry; } *(map_entries + i) = entry; } return 0; out_add_entry: for (j = i - 1; j >= 0; j--) { entry = *(map_entries + j); kobject_put(&entry->kobj); } if (map_kset) kset_unregister(map_kset); out: return ret; }
gpl-2.0
ChameleonOS/android_kernel_amazon_bowser-common
sound/soc/kirkwood/kirkwood-dma.c
872
11231
/* * kirkwood-dma.c * * (c) 2010 Arnaud Patard <apatard@mandriva.com> * (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/mbus.h> #include <sound/soc.h> #include "kirkwood.h" #define KIRKWOOD_RATES \ (SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000) #define KIRKWOOD_FORMATS \ (SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE) struct kirkwood_dma_priv { struct snd_pcm_substream *play_stream; struct snd_pcm_substream *rec_stream; struct kirkwood_dma_data *data; }; static struct snd_pcm_hardware kirkwood_dma_snd_hw = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE), .formats = KIRKWOOD_FORMATS, .rates = KIRKWOOD_RATES, .rate_min = 44100, .rate_max = 96000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = KIRKWOOD_SND_MAX_PERIOD_BYTES * KIRKWOOD_SND_MAX_PERIODS, .period_bytes_min = KIRKWOOD_SND_MIN_PERIOD_BYTES, .period_bytes_max = KIRKWOOD_SND_MAX_PERIOD_BYTES, .periods_min = KIRKWOOD_SND_MIN_PERIODS, .periods_max = KIRKWOOD_SND_MAX_PERIODS, .fifo_size = 0, }; static u64 kirkwood_dma_dmamask = 0xFFFFFFFFUL; static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id) { struct kirkwood_dma_priv *prdata = dev_id; struct kirkwood_dma_data *priv = prdata->data; unsigned long mask, status, cause; mask = readl(priv->io + KIRKWOOD_INT_MASK); status = readl(priv->io + KIRKWOOD_INT_CAUSE) & mask; cause = readl(priv->io + KIRKWOOD_ERR_CAUSE); if (unlikely(cause)) { printk(KERN_WARNING "%s: got err interrupt 0x%lx\n", __func__, cause); writel(cause, priv->io + KIRKWOOD_ERR_CAUSE); return IRQ_HANDLED; } /* we've enabled only bytes interrupts ... */ if (status & ~(KIRKWOOD_INT_CAUSE_PLAY_BYTES | \ KIRKWOOD_INT_CAUSE_REC_BYTES)) { printk(KERN_WARNING "%s: unexpected interrupt %lx\n", __func__, status); return IRQ_NONE; } /* ack int */ writel(status, priv->io + KIRKWOOD_INT_CAUSE); if (status & KIRKWOOD_INT_CAUSE_PLAY_BYTES) snd_pcm_period_elapsed(prdata->play_stream); if (status & KIRKWOOD_INT_CAUSE_REC_BYTES) snd_pcm_period_elapsed(prdata->rec_stream); return IRQ_HANDLED; } static void kirkwood_dma_conf_mbus_windows(void __iomem *base, int win, unsigned long dma, struct mbus_dram_target_info *dram) { int i; /* First disable and clear windows */ writel(0, base + KIRKWOOD_AUDIO_WIN_CTRL_REG(win)); writel(0, base + KIRKWOOD_AUDIO_WIN_BASE_REG(win)); /* try to find matching cs for current dma address */ for (i = 0; i < dram->num_cs; i++) { struct mbus_dram_window *cs = dram->cs + i; if ((cs->base & 0xffff0000) < (dma & 0xffff0000)) { writel(cs->base & 0xffff0000, base + KIRKWOOD_AUDIO_WIN_BASE_REG(win)); writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, base + KIRKWOOD_AUDIO_WIN_CTRL_REG(win)); } } } static int kirkwood_dma_open(struct snd_pcm_substream *substream) { int err; struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct snd_soc_platform *platform = soc_runtime->platform; struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai; struct kirkwood_dma_data *priv; struct kirkwood_dma_priv *prdata = snd_soc_platform_get_drvdata(platform); unsigned long addr; priv = snd_soc_dai_get_dma_data(cpu_dai, substream); snd_soc_set_runtime_hwparams(substream, &kirkwood_dma_snd_hw); /* Ensure that all constraints linked to dma burst are fulfilled */ err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, priv->burst * 2, KIRKWOOD_AUDIO_BUF_MAX-1); if (err < 0) return err; err = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, priv->burst); if (err < 0) return err; err = snd_pcm_hw_constraint_step(substream->runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, priv->burst); if (err < 0) return err; if (prdata == NULL) { prdata = kzalloc(sizeof(struct kirkwood_dma_priv), GFP_KERNEL); if (prdata == NULL) return -ENOMEM; prdata->data = priv; err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED, "kirkwood-i2s", prdata); if (err) { kfree(prdata); return -EBUSY; } snd_soc_platform_set_drvdata(platform, prdata); /* * Enable Error interrupts. We're only ack'ing them but * it's useful for diagnostics */ writel((unsigned long)-1, priv->io + KIRKWOOD_ERR_MASK); } addr = virt_to_phys(substream->dma_buffer.area); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { prdata->play_stream = substream; kirkwood_dma_conf_mbus_windows(priv->io, KIRKWOOD_PLAYBACK_WIN, addr, priv->dram); } else { prdata->rec_stream = substream; kirkwood_dma_conf_mbus_windows(priv->io, KIRKWOOD_RECORD_WIN, addr, priv->dram); } return 0; } static int kirkwood_dma_close(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai; struct snd_soc_platform *platform = soc_runtime->platform; struct kirkwood_dma_priv *prdata = snd_soc_platform_get_drvdata(platform); struct kirkwood_dma_data *priv; priv = snd_soc_dai_get_dma_data(cpu_dai, substream); if (!prdata || !priv) return 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) prdata->play_stream = NULL; else prdata->rec_stream = NULL; if (!prdata->play_stream && !prdata->rec_stream) { writel(0, priv->io + KIRKWOOD_ERR_MASK); free_irq(priv->irq, prdata); kfree(prdata); snd_soc_platform_set_drvdata(platform, NULL); } return 0; } static int kirkwood_dma_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); runtime->dma_bytes = params_buffer_bytes(params); return 0; } static int kirkwood_dma_hw_free(struct snd_pcm_substream *substream) { snd_pcm_set_runtime_buffer(substream, NULL); return 0; } static int kirkwood_dma_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai; struct kirkwood_dma_data *priv; unsigned long size, count; priv = snd_soc_dai_get_dma_data(cpu_dai, substream); /* compute buffer size in term of "words" as requested in specs */ size = frames_to_bytes(runtime, runtime->buffer_size); size = (size>>2)-1; count = snd_pcm_lib_period_bytes(substream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { writel(count, priv->io + KIRKWOOD_PLAY_BYTE_INT_COUNT); writel(runtime->dma_addr, priv->io + KIRKWOOD_PLAY_BUF_ADDR); writel(size, priv->io + KIRKWOOD_PLAY_BUF_SIZE); } else { writel(count, priv->io + KIRKWOOD_REC_BYTE_INT_COUNT); writel(runtime->dma_addr, priv->io + KIRKWOOD_REC_BUF_ADDR); writel(size, priv->io + KIRKWOOD_REC_BUF_SIZE); } return 0; } static snd_pcm_uframes_t kirkwood_dma_pointer(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai; struct kirkwood_dma_data *priv; snd_pcm_uframes_t count; priv = snd_soc_dai_get_dma_data(cpu_dai, substream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) count = bytes_to_frames(substream->runtime, readl(priv->io + KIRKWOOD_PLAY_BYTE_COUNT)); else count = bytes_to_frames(substream->runtime, readl(priv->io + KIRKWOOD_REC_BYTE_COUNT)); return count; } struct snd_pcm_ops kirkwood_dma_ops = { .open = kirkwood_dma_open, .close = kirkwood_dma_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = kirkwood_dma_hw_params, .hw_free = kirkwood_dma_hw_free, .prepare = kirkwood_dma_prepare, .pointer = kirkwood_dma_pointer, }; static int kirkwood_dma_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size = kirkwood_dma_snd_hw.buffer_bytes_max; buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->area = dma_alloc_coherent(pcm->card->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) return -ENOMEM; buf->bytes = size; buf->private_data = NULL; return 0; } static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_soc_dai *dai = rtd->cpu_dai; struct snd_pcm *pcm = rtd->pcm; int ret; if (!card->dev->dma_mask) card->dev->dma_mask = &kirkwood_dma_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = 0xffffffff; if (dai->driver->playback.channels_min) { ret = kirkwood_dma_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) return ret; } if (dai->driver->capture.channels_min) { ret = kirkwood_dma_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) return ret; } return 0; } static void kirkwood_dma_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; dma_free_coherent(pcm->card->dev, buf->bytes, buf->area, buf->addr); buf->area = NULL; } } static struct snd_soc_platform_driver kirkwood_soc_platform = { .ops = &kirkwood_dma_ops, .pcm_new = kirkwood_dma_new, .pcm_free = kirkwood_dma_free_dma_buffers, }; static int __devinit kirkwood_soc_platform_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &kirkwood_soc_platform); } static int __devexit kirkwood_soc_platform_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver kirkwood_pcm_driver = { .driver = { .name = "kirkwood-pcm-audio", .owner = THIS_MODULE, }, .probe = kirkwood_soc_platform_probe, .remove = __devexit_p(kirkwood_soc_platform_remove), }; static int __init kirkwood_pcm_init(void) { return platform_driver_register(&kirkwood_pcm_driver); } module_init(kirkwood_pcm_init); static void __exit kirkwood_pcm_exit(void) { platform_driver_unregister(&kirkwood_pcm_driver); } module_exit(kirkwood_pcm_exit); MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); MODULE_DESCRIPTION("Marvell Kirkwood Audio DMA module"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:kirkwood-pcm-audio");
gpl-2.0
abhishekr700/Nemesis_Kernel
arch/arm/mach-davinci/devices-da8xx.c
872
25022
/* * DA8XX/OMAP L1XX platform device data * * Copyright (c) 2007-2009, MontaVista Software, Inc. <source@mvista.com> * Derived from code that was: * Copyright (C) 2006 Komal Shah <komal_shah802003@yahoo.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-contiguous.h> #include <linux/serial_8250.h> #include <linux/ahci_platform.h> #include <linux/clk.h> #include <linux/reboot.h> #include <mach/cputype.h> #include <mach/common.h> #include <mach/time.h> #include <mach/da8xx.h> #include <mach/cpuidle.h> #include <mach/sram.h> #include "clock.h" #include "asp.h" #define DA8XX_TPCC_BASE 0x01c00000 #define DA8XX_TPTC0_BASE 0x01c08000 #define DA8XX_TPTC1_BASE 0x01c08400 #define DA8XX_WDOG_BASE 0x01c21000 /* DA8XX_TIMER64P1_BASE */ #define DA8XX_I2C0_BASE 0x01c22000 #define DA8XX_RTC_BASE 0x01c23000 #define DA8XX_PRUSS_MEM_BASE 0x01c30000 #define DA8XX_MMCSD0_BASE 0x01c40000 #define DA8XX_SPI0_BASE 0x01c41000 #define DA830_SPI1_BASE 0x01e12000 #define DA8XX_LCD_CNTRL_BASE 0x01e13000 #define DA850_SATA_BASE 0x01e18000 #define DA850_MMCSD1_BASE 0x01e1b000 #define DA8XX_EMAC_CPPI_PORT_BASE 0x01e20000 #define DA8XX_EMAC_CPGMACSS_BASE 0x01e22000 #define DA8XX_EMAC_CPGMAC_BASE 0x01e23000 #define DA8XX_EMAC_MDIO_BASE 0x01e24000 #define DA8XX_I2C1_BASE 0x01e28000 #define DA850_TPCC1_BASE 0x01e30000 #define DA850_TPTC2_BASE 0x01e38000 #define DA850_SPI1_BASE 0x01f0e000 #define DA8XX_DDR2_CTL_BASE 0xb0000000 #define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000 #define DA8XX_EMAC_MOD_REG_OFFSET 0x2000 #define DA8XX_EMAC_RAM_OFFSET 0x0000 #define DA8XX_EMAC_CTRL_RAM_SIZE SZ_8K #define DA8XX_DMA_SPI0_RX EDMA_CTLR_CHAN(0, 14) #define DA8XX_DMA_SPI0_TX EDMA_CTLR_CHAN(0, 15) #define DA8XX_DMA_MMCSD0_RX EDMA_CTLR_CHAN(0, 16) #define DA8XX_DMA_MMCSD0_TX EDMA_CTLR_CHAN(0, 17) #define DA8XX_DMA_SPI1_RX EDMA_CTLR_CHAN(0, 18) #define DA8XX_DMA_SPI1_TX EDMA_CTLR_CHAN(0, 19) #define DA850_DMA_MMCSD1_RX EDMA_CTLR_CHAN(1, 28) #define DA850_DMA_MMCSD1_TX EDMA_CTLR_CHAN(1, 29) void __iomem *da8xx_syscfg0_base; void __iomem *da8xx_syscfg1_base; static struct plat_serial8250_port da8xx_serial_pdata[] = { { .mapbase = DA8XX_UART0_BASE, .irq = IRQ_DA8XX_UARTINT0, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, .regshift = 2, }, { .mapbase = DA8XX_UART1_BASE, .irq = IRQ_DA8XX_UARTINT1, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, .regshift = 2, }, { .mapbase = DA8XX_UART2_BASE, .irq = IRQ_DA8XX_UARTINT2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, .regshift = 2, }, { .flags = 0, }, }; struct platform_device da8xx_serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = da8xx_serial_pdata, }, }; static const s8 da8xx_queue_tc_mapping[][2] = { /* {event queue no, TC no} */ {0, 0}, {1, 1}, {-1, -1} }; static const s8 da8xx_queue_priority_mapping[][2] = { /* {event queue no, Priority} */ {0, 3}, {1, 7}, {-1, -1} }; static const s8 da850_queue_tc_mapping[][2] = { /* {event queue no, TC no} */ {0, 0}, {-1, -1} }; static const s8 da850_queue_priority_mapping[][2] = { /* {event queue no, Priority} */ {0, 3}, {-1, -1} }; static struct edma_soc_info da830_edma_cc0_info = { .n_channel = 32, .n_region = 4, .n_slot = 128, .n_tc = 2, .n_cc = 1, .queue_tc_mapping = da8xx_queue_tc_mapping, .queue_priority_mapping = da8xx_queue_priority_mapping, .default_queue = EVENTQ_1, }; static struct edma_soc_info *da830_edma_info[EDMA_MAX_CC] = { &da830_edma_cc0_info, }; static struct edma_soc_info da850_edma_cc_info[] = { { .n_channel = 32, .n_region = 4, .n_slot = 128, .n_tc = 2, .n_cc = 1, .queue_tc_mapping = da8xx_queue_tc_mapping, .queue_priority_mapping = da8xx_queue_priority_mapping, .default_queue = EVENTQ_1, }, { .n_channel = 32, .n_region = 4, .n_slot = 128, .n_tc = 1, .n_cc = 1, .queue_tc_mapping = da850_queue_tc_mapping, .queue_priority_mapping = da850_queue_priority_mapping, .default_queue = EVENTQ_0, }, }; static struct edma_soc_info *da850_edma_info[EDMA_MAX_CC] = { &da850_edma_cc_info[0], &da850_edma_cc_info[1], }; static struct resource da830_edma_resources[] = { { .name = "edma_cc0", .start = DA8XX_TPCC_BASE, .end = DA8XX_TPCC_BASE + SZ_32K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc0", .start = DA8XX_TPTC0_BASE, .end = DA8XX_TPTC0_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc1", .start = DA8XX_TPTC1_BASE, .end = DA8XX_TPTC1_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma0", .start = IRQ_DA8XX_CCINT0, .flags = IORESOURCE_IRQ, }, { .name = "edma0_err", .start = IRQ_DA8XX_CCERRINT, .flags = IORESOURCE_IRQ, }, }; static struct resource da850_edma_resources[] = { { .name = "edma_cc0", .start = DA8XX_TPCC_BASE, .end = DA8XX_TPCC_BASE + SZ_32K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc0", .start = DA8XX_TPTC0_BASE, .end = DA8XX_TPTC0_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc1", .start = DA8XX_TPTC1_BASE, .end = DA8XX_TPTC1_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_cc1", .start = DA850_TPCC1_BASE, .end = DA850_TPCC1_BASE + SZ_32K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc2", .start = DA850_TPTC2_BASE, .end = DA850_TPTC2_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma0", .start = IRQ_DA8XX_CCINT0, .flags = IORESOURCE_IRQ, }, { .name = "edma0_err", .start = IRQ_DA8XX_CCERRINT, .flags = IORESOURCE_IRQ, }, { .name = "edma1", .start = IRQ_DA850_CCINT1, .flags = IORESOURCE_IRQ, }, { .name = "edma1_err", .start = IRQ_DA850_CCERRINT1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da830_edma_device = { .name = "edma", .id = -1, .dev = { .platform_data = da830_edma_info, }, .num_resources = ARRAY_SIZE(da830_edma_resources), .resource = da830_edma_resources, }; static struct platform_device da850_edma_device = { .name = "edma", .id = -1, .dev = { .platform_data = da850_edma_info, }, .num_resources = ARRAY_SIZE(da850_edma_resources), .resource = da850_edma_resources, }; int __init da830_register_edma(struct edma_rsv_info *rsv) { da830_edma_cc0_info.rsv = rsv; return platform_device_register(&da830_edma_device); } int __init da850_register_edma(struct edma_rsv_info *rsv[2]) { if (rsv) { da850_edma_cc_info[0].rsv = rsv[0]; da850_edma_cc_info[1].rsv = rsv[1]; } return platform_device_register(&da850_edma_device); } static struct resource da8xx_i2c_resources0[] = { { .start = DA8XX_I2C0_BASE, .end = DA8XX_I2C0_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_DA8XX_I2CINT0, .end = IRQ_DA8XX_I2CINT0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da8xx_i2c_device0 = { .name = "i2c_davinci", .id = 1, .num_resources = ARRAY_SIZE(da8xx_i2c_resources0), .resource = da8xx_i2c_resources0, }; static struct resource da8xx_i2c_resources1[] = { { .start = DA8XX_I2C1_BASE, .end = DA8XX_I2C1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_DA8XX_I2CINT1, .end = IRQ_DA8XX_I2CINT1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da8xx_i2c_device1 = { .name = "i2c_davinci", .id = 2, .num_resources = ARRAY_SIZE(da8xx_i2c_resources1), .resource = da8xx_i2c_resources1, }; int __init da8xx_register_i2c(int instance, struct davinci_i2c_platform_data *pdata) { struct platform_device *pdev; if (instance == 0) pdev = &da8xx_i2c_device0; else if (instance == 1) pdev = &da8xx_i2c_device1; else return -EINVAL; pdev->dev.platform_data = pdata; return platform_device_register(pdev); } static struct resource da8xx_watchdog_resources[] = { { .start = DA8XX_WDOG_BASE, .end = DA8XX_WDOG_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device da8xx_wdt_device = { .name = "watchdog", .id = -1, .num_resources = ARRAY_SIZE(da8xx_watchdog_resources), .resource = da8xx_watchdog_resources, }; void da8xx_restart(enum reboot_mode mode, const char *cmd) { struct device *dev; dev = bus_find_device_by_name(&platform_bus_type, NULL, "watchdog"); if (!dev) { pr_err("%s: failed to find watchdog device\n", __func__); return; } davinci_watchdog_reset(to_platform_device(dev)); } int __init da8xx_register_watchdog(void) { return platform_device_register(&da8xx_wdt_device); } static struct resource da8xx_emac_resources[] = { { .start = DA8XX_EMAC_CPPI_PORT_BASE, .end = DA8XX_EMAC_CPPI_PORT_BASE + SZ_16K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_DA8XX_C0_RX_THRESH_PULSE, .end = IRQ_DA8XX_C0_RX_THRESH_PULSE, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_C0_RX_PULSE, .end = IRQ_DA8XX_C0_RX_PULSE, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_C0_TX_PULSE, .end = IRQ_DA8XX_C0_TX_PULSE, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_C0_MISC_PULSE, .end = IRQ_DA8XX_C0_MISC_PULSE, .flags = IORESOURCE_IRQ, }, }; struct emac_platform_data da8xx_emac_pdata = { .ctrl_reg_offset = DA8XX_EMAC_CTRL_REG_OFFSET, .ctrl_mod_reg_offset = DA8XX_EMAC_MOD_REG_OFFSET, .ctrl_ram_offset = DA8XX_EMAC_RAM_OFFSET, .ctrl_ram_size = DA8XX_EMAC_CTRL_RAM_SIZE, .version = EMAC_VERSION_2, }; static struct platform_device da8xx_emac_device = { .name = "davinci_emac", .id = 1, .dev = { .platform_data = &da8xx_emac_pdata, }, .num_resources = ARRAY_SIZE(da8xx_emac_resources), .resource = da8xx_emac_resources, }; static struct resource da8xx_mdio_resources[] = { { .start = DA8XX_EMAC_MDIO_BASE, .end = DA8XX_EMAC_MDIO_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device da8xx_mdio_device = { .name = "davinci_mdio", .id = 0, .num_resources = ARRAY_SIZE(da8xx_mdio_resources), .resource = da8xx_mdio_resources, }; int __init da8xx_register_emac(void) { int ret; ret = platform_device_register(&da8xx_mdio_device); if (ret < 0) return ret; ret = platform_device_register(&da8xx_emac_device); if (ret < 0) return ret; ret = clk_add_alias(NULL, dev_name(&da8xx_mdio_device.dev), NULL, &da8xx_emac_device.dev); return ret; } static struct resource da830_mcasp1_resources[] = { { .name = "mcasp1", .start = DAVINCI_DA830_MCASP1_REG_BASE, .end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1, .flags = IORESOURCE_MEM, }, /* TX event */ { .start = DAVINCI_DA830_DMA_MCASP1_AXEVT, .end = DAVINCI_DA830_DMA_MCASP1_AXEVT, .flags = IORESOURCE_DMA, }, /* RX event */ { .start = DAVINCI_DA830_DMA_MCASP1_AREVT, .end = DAVINCI_DA830_DMA_MCASP1_AREVT, .flags = IORESOURCE_DMA, }, }; static struct platform_device da830_mcasp1_device = { .name = "davinci-mcasp", .id = 1, .num_resources = ARRAY_SIZE(da830_mcasp1_resources), .resource = da830_mcasp1_resources, }; static struct resource da850_mcasp_resources[] = { { .name = "mcasp", .start = DAVINCI_DA8XX_MCASP0_REG_BASE, .end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1, .flags = IORESOURCE_MEM, }, /* TX event */ { .start = DAVINCI_DA8XX_DMA_MCASP0_AXEVT, .end = DAVINCI_DA8XX_DMA_MCASP0_AXEVT, .flags = IORESOURCE_DMA, }, /* RX event */ { .start = DAVINCI_DA8XX_DMA_MCASP0_AREVT, .end = DAVINCI_DA8XX_DMA_MCASP0_AREVT, .flags = IORESOURCE_DMA, }, }; static struct platform_device da850_mcasp_device = { .name = "davinci-mcasp", .id = 0, .num_resources = ARRAY_SIZE(da850_mcasp_resources), .resource = da850_mcasp_resources, }; void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) { /* DA830/OMAP-L137 has 3 instances of McASP */ if (cpu_is_davinci_da830() && id == 1) { da830_mcasp1_device.dev.platform_data = pdata; platform_device_register(&da830_mcasp1_device); } else if (cpu_is_davinci_da850()) { da850_mcasp_device.dev.platform_data = pdata; platform_device_register(&da850_mcasp_device); } } static struct resource da8xx_pruss_resources[] = { { .start = DA8XX_PRUSS_MEM_BASE, .end = DA8XX_PRUSS_MEM_BASE + 0xFFFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_DA8XX_EVTOUT0, .end = IRQ_DA8XX_EVTOUT0, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_EVTOUT1, .end = IRQ_DA8XX_EVTOUT1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_EVTOUT2, .end = IRQ_DA8XX_EVTOUT2, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_EVTOUT3, .end = IRQ_DA8XX_EVTOUT3, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_EVTOUT4, .end = IRQ_DA8XX_EVTOUT4, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_EVTOUT5, .end = IRQ_DA8XX_EVTOUT5, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_EVTOUT6, .end = IRQ_DA8XX_EVTOUT6, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_EVTOUT7, .end = IRQ_DA8XX_EVTOUT7, .flags = IORESOURCE_IRQ, }, }; static struct uio_pruss_pdata da8xx_uio_pruss_pdata = { .pintc_base = 0x4000, }; static struct platform_device da8xx_uio_pruss_dev = { .name = "pruss_uio", .id = -1, .num_resources = ARRAY_SIZE(da8xx_pruss_resources), .resource = da8xx_pruss_resources, .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &da8xx_uio_pruss_pdata, } }; int __init da8xx_register_uio_pruss(void) { da8xx_uio_pruss_pdata.sram_pool = sram_get_gen_pool(); return platform_device_register(&da8xx_uio_pruss_dev); } static struct lcd_ctrl_config lcd_cfg = { .panel_shade = COLOR_ACTIVE, .bpp = 16, }; struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata = { .manu_name = "sharp", .controller_data = &lcd_cfg, .type = "Sharp_LCD035Q3DG01", }; struct da8xx_lcdc_platform_data sharp_lk043t1dg01_pdata = { .manu_name = "sharp", .controller_data = &lcd_cfg, .type = "Sharp_LK043T1DG01", }; static struct resource da8xx_lcdc_resources[] = { [0] = { /* registers */ .start = DA8XX_LCD_CNTRL_BASE, .end = DA8XX_LCD_CNTRL_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { /* interrupt */ .start = IRQ_DA8XX_LCDINT, .end = IRQ_DA8XX_LCDINT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da8xx_lcdc_device = { .name = "da8xx_lcdc", .id = 0, .num_resources = ARRAY_SIZE(da8xx_lcdc_resources), .resource = da8xx_lcdc_resources, }; int __init da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata) { da8xx_lcdc_device.dev.platform_data = pdata; return platform_device_register(&da8xx_lcdc_device); } static struct resource da8xx_mmcsd0_resources[] = { { /* registers */ .start = DA8XX_MMCSD0_BASE, .end = DA8XX_MMCSD0_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { /* interrupt */ .start = IRQ_DA8XX_MMCSDINT0, .end = IRQ_DA8XX_MMCSDINT0, .flags = IORESOURCE_IRQ, }, { /* DMA RX */ .start = DA8XX_DMA_MMCSD0_RX, .end = DA8XX_DMA_MMCSD0_RX, .flags = IORESOURCE_DMA, }, { /* DMA TX */ .start = DA8XX_DMA_MMCSD0_TX, .end = DA8XX_DMA_MMCSD0_TX, .flags = IORESOURCE_DMA, }, }; static struct platform_device da8xx_mmcsd0_device = { .name = "da830-mmc", .id = 0, .num_resources = ARRAY_SIZE(da8xx_mmcsd0_resources), .resource = da8xx_mmcsd0_resources, }; int __init da8xx_register_mmcsd0(struct davinci_mmc_config *config) { da8xx_mmcsd0_device.dev.platform_data = config; return platform_device_register(&da8xx_mmcsd0_device); } #ifdef CONFIG_ARCH_DAVINCI_DA850 static struct resource da850_mmcsd1_resources[] = { { /* registers */ .start = DA850_MMCSD1_BASE, .end = DA850_MMCSD1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { /* interrupt */ .start = IRQ_DA850_MMCSDINT0_1, .end = IRQ_DA850_MMCSDINT0_1, .flags = IORESOURCE_IRQ, }, { /* DMA RX */ .start = DA850_DMA_MMCSD1_RX, .end = DA850_DMA_MMCSD1_RX, .flags = IORESOURCE_DMA, }, { /* DMA TX */ .start = DA850_DMA_MMCSD1_TX, .end = DA850_DMA_MMCSD1_TX, .flags = IORESOURCE_DMA, }, }; static struct platform_device da850_mmcsd1_device = { .name = "da830-mmc", .id = 1, .num_resources = ARRAY_SIZE(da850_mmcsd1_resources), .resource = da850_mmcsd1_resources, }; int __init da850_register_mmcsd1(struct davinci_mmc_config *config) { da850_mmcsd1_device.dev.platform_data = config; return platform_device_register(&da850_mmcsd1_device); } #endif static struct resource da8xx_rproc_resources[] = { { /* DSP boot address */ .start = DA8XX_SYSCFG0_BASE + DA8XX_HOST1CFG_REG, .end = DA8XX_SYSCFG0_BASE + DA8XX_HOST1CFG_REG + 3, .flags = IORESOURCE_MEM, }, { /* DSP interrupt registers */ .start = DA8XX_SYSCFG0_BASE + DA8XX_CHIPSIG_REG, .end = DA8XX_SYSCFG0_BASE + DA8XX_CHIPSIG_REG + 7, .flags = IORESOURCE_MEM, }, { /* dsp irq */ .start = IRQ_DA8XX_CHIPINT0, .end = IRQ_DA8XX_CHIPINT0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da8xx_dsp = { .name = "davinci-rproc", .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(da8xx_rproc_resources), .resource = da8xx_rproc_resources, }; #if IS_ENABLED(CONFIG_DA8XX_REMOTEPROC) static phys_addr_t rproc_base __initdata; static unsigned long rproc_size __initdata; static int __init early_rproc_mem(char *p) { char *endp; if (p == NULL) return 0; rproc_size = memparse(p, &endp); if (*endp == '@') rproc_base = memparse(endp + 1, NULL); return 0; } early_param("rproc_mem", early_rproc_mem); void __init da8xx_rproc_reserve_cma(void) { int ret; if (!rproc_base || !rproc_size) { pr_err("%s: 'rproc_mem=nn@address' badly specified\n" " 'nn' and 'address' must both be non-zero\n", __func__); return; } pr_info("%s: reserving 0x%lx @ 0x%lx...\n", __func__, rproc_size, (unsigned long)rproc_base); ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0); if (ret) pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret); } #else void __init da8xx_rproc_reserve_cma(void) { } #endif int __init da8xx_register_rproc(void) { int ret; ret = platform_device_register(&da8xx_dsp); if (ret) pr_err("%s: can't register DSP device: %d\n", __func__, ret); return ret; }; static struct resource da8xx_rtc_resources[] = { { .start = DA8XX_RTC_BASE, .end = DA8XX_RTC_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { /* timer irq */ .start = IRQ_DA8XX_RTC, .end = IRQ_DA8XX_RTC, .flags = IORESOURCE_IRQ, }, { /* alarm irq */ .start = IRQ_DA8XX_RTC, .end = IRQ_DA8XX_RTC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da8xx_rtc_device = { .name = "da830-rtc", .id = -1, .num_resources = ARRAY_SIZE(da8xx_rtc_resources), .resource = da8xx_rtc_resources, }; int da8xx_register_rtc(void) { int ret; ret = platform_device_register(&da8xx_rtc_device); if (!ret) /* Atleast on DA850, RTC is a wakeup source */ device_init_wakeup(&da8xx_rtc_device.dev, true); return ret; } static void __iomem *da8xx_ddr2_ctlr_base; void __iomem * __init da8xx_get_mem_ctlr(void) { if (da8xx_ddr2_ctlr_base) return da8xx_ddr2_ctlr_base; da8xx_ddr2_ctlr_base = ioremap(DA8XX_DDR2_CTL_BASE, SZ_32K); if (!da8xx_ddr2_ctlr_base) pr_warn("%s: Unable to map DDR2 controller", __func__); return da8xx_ddr2_ctlr_base; } static struct resource da8xx_cpuidle_resources[] = { { .start = DA8XX_DDR2_CTL_BASE, .end = DA8XX_DDR2_CTL_BASE + SZ_32K - 1, .flags = IORESOURCE_MEM, }, }; /* DA8XX devices support DDR2 power down */ static struct davinci_cpuidle_config da8xx_cpuidle_pdata = { .ddr2_pdown = 1, }; static struct platform_device da8xx_cpuidle_device = { .name = "cpuidle-davinci", .num_resources = ARRAY_SIZE(da8xx_cpuidle_resources), .resource = da8xx_cpuidle_resources, .dev = { .platform_data = &da8xx_cpuidle_pdata, }, }; int __init da8xx_register_cpuidle(void) { da8xx_cpuidle_pdata.ddr2_ctlr_base = da8xx_get_mem_ctlr(); return platform_device_register(&da8xx_cpuidle_device); } static struct resource da8xx_spi0_resources[] = { [0] = { .start = DA8XX_SPI0_BASE, .end = DA8XX_SPI0_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_DA8XX_SPINT0, .end = IRQ_DA8XX_SPINT0, .flags = IORESOURCE_IRQ, }, [2] = { .start = DA8XX_DMA_SPI0_RX, .end = DA8XX_DMA_SPI0_RX, .flags = IORESOURCE_DMA, }, [3] = { .start = DA8XX_DMA_SPI0_TX, .end = DA8XX_DMA_SPI0_TX, .flags = IORESOURCE_DMA, }, }; static struct resource da8xx_spi1_resources[] = { [0] = { .start = DA830_SPI1_BASE, .end = DA830_SPI1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_DA8XX_SPINT1, .end = IRQ_DA8XX_SPINT1, .flags = IORESOURCE_IRQ, }, [2] = { .start = DA8XX_DMA_SPI1_RX, .end = DA8XX_DMA_SPI1_RX, .flags = IORESOURCE_DMA, }, [3] = { .start = DA8XX_DMA_SPI1_TX, .end = DA8XX_DMA_SPI1_TX, .flags = IORESOURCE_DMA, }, }; static struct davinci_spi_platform_data da8xx_spi_pdata[] = { [0] = { .version = SPI_VERSION_2, .intr_line = 1, .dma_event_q = EVENTQ_0, }, [1] = { .version = SPI_VERSION_2, .intr_line = 1, .dma_event_q = EVENTQ_0, }, }; static struct platform_device da8xx_spi_device[] = { [0] = { .name = "spi_davinci", .id = 0, .num_resources = ARRAY_SIZE(da8xx_spi0_resources), .resource = da8xx_spi0_resources, .dev = { .platform_data = &da8xx_spi_pdata[0], }, }, [1] = { .name = "spi_davinci", .id = 1, .num_resources = ARRAY_SIZE(da8xx_spi1_resources), .resource = da8xx_spi1_resources, .dev = { .platform_data = &da8xx_spi_pdata[1], }, }, }; int __init da8xx_register_spi_bus(int instance, unsigned num_chipselect) { if (instance < 0 || instance > 1) return -EINVAL; da8xx_spi_pdata[instance].num_chipselect = num_chipselect; if (instance == 1 && cpu_is_davinci_da850()) { da8xx_spi1_resources[0].start = DA850_SPI1_BASE; da8xx_spi1_resources[0].end = DA850_SPI1_BASE + SZ_4K - 1; } return platform_device_register(&da8xx_spi_device[instance]); } #ifdef CONFIG_ARCH_DAVINCI_DA850 static struct resource da850_sata_resources[] = { { .start = DA850_SATA_BASE, .end = DA850_SATA_BASE + 0x1fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_DA850_SATAINT, .flags = IORESOURCE_IRQ, }, }; /* SATA PHY Control Register offset from AHCI base */ #define SATA_P0PHYCR_REG 0x178 #define SATA_PHY_MPY(x) ((x) << 0) #define SATA_PHY_LOS(x) ((x) << 6) #define SATA_PHY_RXCDR(x) ((x) << 10) #define SATA_PHY_RXEQ(x) ((x) << 13) #define SATA_PHY_TXSWING(x) ((x) << 19) #define SATA_PHY_ENPLL(x) ((x) << 31) static struct clk *da850_sata_clk; static unsigned long da850_sata_refclkpn; /* Supported DA850 SATA crystal frequencies */ #define KHZ_TO_HZ(freq) ((freq) * 1000) static unsigned long da850_sata_xtal[] = { KHZ_TO_HZ(300000), KHZ_TO_HZ(250000), 0, /* Reserved */ KHZ_TO_HZ(187500), KHZ_TO_HZ(150000), KHZ_TO_HZ(125000), KHZ_TO_HZ(120000), KHZ_TO_HZ(100000), KHZ_TO_HZ(75000), KHZ_TO_HZ(60000), }; static int da850_sata_init(struct device *dev, void __iomem *addr) { int i, ret; unsigned int val; da850_sata_clk = clk_get(dev, NULL); if (IS_ERR(da850_sata_clk)) return PTR_ERR(da850_sata_clk); ret = clk_prepare_enable(da850_sata_clk); if (ret) goto err0; /* Enable SATA clock receiver */ val = __raw_readl(DA8XX_SYSCFG1_VIRT(DA8XX_PWRDN_REG)); val &= ~BIT(0); __raw_writel(val, DA8XX_SYSCFG1_VIRT(DA8XX_PWRDN_REG)); /* Get the multiplier needed for 1.5GHz PLL output */ for (i = 0; i < ARRAY_SIZE(da850_sata_xtal); i++) if (da850_sata_xtal[i] == da850_sata_refclkpn) break; if (i == ARRAY_SIZE(da850_sata_xtal)) { ret = -EINVAL; goto err1; } val = SATA_PHY_MPY(i + 1) | SATA_PHY_LOS(1) | SATA_PHY_RXCDR(4) | SATA_PHY_RXEQ(1) | SATA_PHY_TXSWING(3) | SATA_PHY_ENPLL(1); __raw_writel(val, addr + SATA_P0PHYCR_REG); return 0; err1: clk_disable_unprepare(da850_sata_clk); err0: clk_put(da850_sata_clk); return ret; } static void da850_sata_exit(struct device *dev) { clk_disable_unprepare(da850_sata_clk); clk_put(da850_sata_clk); } static struct ahci_platform_data da850_sata_pdata = { .init = da850_sata_init, .exit = da850_sata_exit, }; static u64 da850_sata_dmamask = DMA_BIT_MASK(32); static struct platform_device da850_sata_device = { .name = "ahci", .id = -1, .dev = { .platform_data = &da850_sata_pdata, .dma_mask = &da850_sata_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(da850_sata_resources), .resource = da850_sata_resources, }; int __init da850_register_sata(unsigned long refclkpn) { da850_sata_refclkpn = refclkpn; if (!da850_sata_refclkpn) return -EINVAL; return platform_device_register(&da850_sata_device); } #endif
gpl-2.0
TeamGlide/LiteKernel
sound/soc/kirkwood/kirkwood-dma.c
872
11231
/* * kirkwood-dma.c * * (c) 2010 Arnaud Patard <apatard@mandriva.com> * (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/mbus.h> #include <sound/soc.h> #include "kirkwood.h" #define KIRKWOOD_RATES \ (SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000) #define KIRKWOOD_FORMATS \ (SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE) struct kirkwood_dma_priv { struct snd_pcm_substream *play_stream; struct snd_pcm_substream *rec_stream; struct kirkwood_dma_data *data; }; static struct snd_pcm_hardware kirkwood_dma_snd_hw = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE), .formats = KIRKWOOD_FORMATS, .rates = KIRKWOOD_RATES, .rate_min = 44100, .rate_max = 96000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = KIRKWOOD_SND_MAX_PERIOD_BYTES * KIRKWOOD_SND_MAX_PERIODS, .period_bytes_min = KIRKWOOD_SND_MIN_PERIOD_BYTES, .period_bytes_max = KIRKWOOD_SND_MAX_PERIOD_BYTES, .periods_min = KIRKWOOD_SND_MIN_PERIODS, .periods_max = KIRKWOOD_SND_MAX_PERIODS, .fifo_size = 0, }; static u64 kirkwood_dma_dmamask = 0xFFFFFFFFUL; static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id) { struct kirkwood_dma_priv *prdata = dev_id; struct kirkwood_dma_data *priv = prdata->data; unsigned long mask, status, cause; mask = readl(priv->io + KIRKWOOD_INT_MASK); status = readl(priv->io + KIRKWOOD_INT_CAUSE) & mask; cause = readl(priv->io + KIRKWOOD_ERR_CAUSE); if (unlikely(cause)) { printk(KERN_WARNING "%s: got err interrupt 0x%lx\n", __func__, cause); writel(cause, priv->io + KIRKWOOD_ERR_CAUSE); return IRQ_HANDLED; } /* we've enabled only bytes interrupts ... */ if (status & ~(KIRKWOOD_INT_CAUSE_PLAY_BYTES | \ KIRKWOOD_INT_CAUSE_REC_BYTES)) { printk(KERN_WARNING "%s: unexpected interrupt %lx\n", __func__, status); return IRQ_NONE; } /* ack int */ writel(status, priv->io + KIRKWOOD_INT_CAUSE); if (status & KIRKWOOD_INT_CAUSE_PLAY_BYTES) snd_pcm_period_elapsed(prdata->play_stream); if (status & KIRKWOOD_INT_CAUSE_REC_BYTES) snd_pcm_period_elapsed(prdata->rec_stream); return IRQ_HANDLED; } static void kirkwood_dma_conf_mbus_windows(void __iomem *base, int win, unsigned long dma, struct mbus_dram_target_info *dram) { int i; /* First disable and clear windows */ writel(0, base + KIRKWOOD_AUDIO_WIN_CTRL_REG(win)); writel(0, base + KIRKWOOD_AUDIO_WIN_BASE_REG(win)); /* try to find matching cs for current dma address */ for (i = 0; i < dram->num_cs; i++) { struct mbus_dram_window *cs = dram->cs + i; if ((cs->base & 0xffff0000) < (dma & 0xffff0000)) { writel(cs->base & 0xffff0000, base + KIRKWOOD_AUDIO_WIN_BASE_REG(win)); writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, base + KIRKWOOD_AUDIO_WIN_CTRL_REG(win)); } } } static int kirkwood_dma_open(struct snd_pcm_substream *substream) { int err; struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct snd_soc_platform *platform = soc_runtime->platform; struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai; struct kirkwood_dma_data *priv; struct kirkwood_dma_priv *prdata = snd_soc_platform_get_drvdata(platform); unsigned long addr; priv = snd_soc_dai_get_dma_data(cpu_dai, substream); snd_soc_set_runtime_hwparams(substream, &kirkwood_dma_snd_hw); /* Ensure that all constraints linked to dma burst are fulfilled */ err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, priv->burst * 2, KIRKWOOD_AUDIO_BUF_MAX-1); if (err < 0) return err; err = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, priv->burst); if (err < 0) return err; err = snd_pcm_hw_constraint_step(substream->runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, priv->burst); if (err < 0) return err; if (prdata == NULL) { prdata = kzalloc(sizeof(struct kirkwood_dma_priv), GFP_KERNEL); if (prdata == NULL) return -ENOMEM; prdata->data = priv; err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED, "kirkwood-i2s", prdata); if (err) { kfree(prdata); return -EBUSY; } snd_soc_platform_set_drvdata(platform, prdata); /* * Enable Error interrupts. We're only ack'ing them but * it's useful for diagnostics */ writel((unsigned long)-1, priv->io + KIRKWOOD_ERR_MASK); } addr = virt_to_phys(substream->dma_buffer.area); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { prdata->play_stream = substream; kirkwood_dma_conf_mbus_windows(priv->io, KIRKWOOD_PLAYBACK_WIN, addr, priv->dram); } else { prdata->rec_stream = substream; kirkwood_dma_conf_mbus_windows(priv->io, KIRKWOOD_RECORD_WIN, addr, priv->dram); } return 0; } static int kirkwood_dma_close(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai; struct snd_soc_platform *platform = soc_runtime->platform; struct kirkwood_dma_priv *prdata = snd_soc_platform_get_drvdata(platform); struct kirkwood_dma_data *priv; priv = snd_soc_dai_get_dma_data(cpu_dai, substream); if (!prdata || !priv) return 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) prdata->play_stream = NULL; else prdata->rec_stream = NULL; if (!prdata->play_stream && !prdata->rec_stream) { writel(0, priv->io + KIRKWOOD_ERR_MASK); free_irq(priv->irq, prdata); kfree(prdata); snd_soc_platform_set_drvdata(platform, NULL); } return 0; } static int kirkwood_dma_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); runtime->dma_bytes = params_buffer_bytes(params); return 0; } static int kirkwood_dma_hw_free(struct snd_pcm_substream *substream) { snd_pcm_set_runtime_buffer(substream, NULL); return 0; } static int kirkwood_dma_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai; struct kirkwood_dma_data *priv; unsigned long size, count; priv = snd_soc_dai_get_dma_data(cpu_dai, substream); /* compute buffer size in term of "words" as requested in specs */ size = frames_to_bytes(runtime, runtime->buffer_size); size = (size>>2)-1; count = snd_pcm_lib_period_bytes(substream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { writel(count, priv->io + KIRKWOOD_PLAY_BYTE_INT_COUNT); writel(runtime->dma_addr, priv->io + KIRKWOOD_PLAY_BUF_ADDR); writel(size, priv->io + KIRKWOOD_PLAY_BUF_SIZE); } else { writel(count, priv->io + KIRKWOOD_REC_BYTE_INT_COUNT); writel(runtime->dma_addr, priv->io + KIRKWOOD_REC_BUF_ADDR); writel(size, priv->io + KIRKWOOD_REC_BUF_SIZE); } return 0; } static snd_pcm_uframes_t kirkwood_dma_pointer(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai; struct kirkwood_dma_data *priv; snd_pcm_uframes_t count; priv = snd_soc_dai_get_dma_data(cpu_dai, substream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) count = bytes_to_frames(substream->runtime, readl(priv->io + KIRKWOOD_PLAY_BYTE_COUNT)); else count = bytes_to_frames(substream->runtime, readl(priv->io + KIRKWOOD_REC_BYTE_COUNT)); return count; } struct snd_pcm_ops kirkwood_dma_ops = { .open = kirkwood_dma_open, .close = kirkwood_dma_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = kirkwood_dma_hw_params, .hw_free = kirkwood_dma_hw_free, .prepare = kirkwood_dma_prepare, .pointer = kirkwood_dma_pointer, }; static int kirkwood_dma_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size = kirkwood_dma_snd_hw.buffer_bytes_max; buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->area = dma_alloc_coherent(pcm->card->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) return -ENOMEM; buf->bytes = size; buf->private_data = NULL; return 0; } static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_soc_dai *dai = rtd->cpu_dai; struct snd_pcm *pcm = rtd->pcm; int ret; if (!card->dev->dma_mask) card->dev->dma_mask = &kirkwood_dma_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = 0xffffffff; if (dai->driver->playback.channels_min) { ret = kirkwood_dma_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) return ret; } if (dai->driver->capture.channels_min) { ret = kirkwood_dma_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) return ret; } return 0; } static void kirkwood_dma_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; dma_free_coherent(pcm->card->dev, buf->bytes, buf->area, buf->addr); buf->area = NULL; } } static struct snd_soc_platform_driver kirkwood_soc_platform = { .ops = &kirkwood_dma_ops, .pcm_new = kirkwood_dma_new, .pcm_free = kirkwood_dma_free_dma_buffers, }; static int __devinit kirkwood_soc_platform_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &kirkwood_soc_platform); } static int __devexit kirkwood_soc_platform_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver kirkwood_pcm_driver = { .driver = { .name = "kirkwood-pcm-audio", .owner = THIS_MODULE, }, .probe = kirkwood_soc_platform_probe, .remove = __devexit_p(kirkwood_soc_platform_remove), }; static int __init kirkwood_pcm_init(void) { return platform_driver_register(&kirkwood_pcm_driver); } module_init(kirkwood_pcm_init); static void __exit kirkwood_pcm_exit(void) { platform_driver_unregister(&kirkwood_pcm_driver); } module_exit(kirkwood_pcm_exit); MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); MODULE_DESCRIPTION("Marvell Kirkwood Audio DMA module"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:kirkwood-pcm-audio");
gpl-2.0
chaosmaster/android_kernel_amazon_ford
drivers/net/bonding/bond_3ad.c
1640
82347
/* * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/skbuff.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/if_bonding.h> #include <linux/pkt_sched.h> #include <net/net_namespace.h> #include "bonding.h" #include "bond_3ad.h" // General definitions #define AD_SHORT_TIMEOUT 1 #define AD_LONG_TIMEOUT 0 #define AD_STANDBY 0x2 #define AD_MAX_TX_IN_SECOND 3 #define AD_COLLECTOR_MAX_DELAY 0 // Timer definitions(43.4.4 in the 802.3ad standard) #define AD_FAST_PERIODIC_TIME 1 #define AD_SLOW_PERIODIC_TIME 30 #define AD_SHORT_TIMEOUT_TIME (3*AD_FAST_PERIODIC_TIME) #define AD_LONG_TIMEOUT_TIME (3*AD_SLOW_PERIODIC_TIME) #define AD_CHURN_DETECTION_TIME 60 #define AD_AGGREGATE_WAIT_TIME 2 // Port state definitions(43.4.2.2 in the 802.3ad standard) #define AD_STATE_LACP_ACTIVITY 0x1 #define AD_STATE_LACP_TIMEOUT 0x2 #define AD_STATE_AGGREGATION 0x4 #define AD_STATE_SYNCHRONIZATION 0x8 #define AD_STATE_COLLECTING 0x10 #define AD_STATE_DISTRIBUTING 0x20 #define AD_STATE_DEFAULTED 0x40 #define AD_STATE_EXPIRED 0x80 // Port Variables definitions used by the State Machines(43.4.7 in the 802.3ad standard) #define AD_PORT_BEGIN 0x1 #define AD_PORT_LACP_ENABLED 0x2 #define AD_PORT_ACTOR_CHURN 0x4 #define AD_PORT_PARTNER_CHURN 0x8 #define AD_PORT_READY 0x10 #define AD_PORT_READY_N 0x20 #define AD_PORT_MATCHED 0x40 #define AD_PORT_STANDBY 0x80 #define AD_PORT_SELECTED 0x100 #define AD_PORT_MOVED 0x200 // Port Key definitions // key is determined according to the link speed, duplex and // user key(which is yet not supported) // ------------------------------------------------------------ // Port key : | User key | Speed |Duplex| // ------------------------------------------------------------ // 16 6 1 0 #define AD_DUPLEX_KEY_BITS 0x1 #define AD_SPEED_KEY_BITS 0x3E #define AD_USER_KEY_BITS 0xFFC0 //dalloun #define AD_LINK_SPEED_BITMASK_1MBPS 0x1 #define AD_LINK_SPEED_BITMASK_10MBPS 0x2 #define AD_LINK_SPEED_BITMASK_100MBPS 0x4 #define AD_LINK_SPEED_BITMASK_1000MBPS 0x8 #define AD_LINK_SPEED_BITMASK_10000MBPS 0x10 //endalloun // compare MAC addresses #define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN) static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } }; static u16 ad_ticks_per_sec; static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR; // ================= main 802.3ad protocol functions ================== static int ad_lacpdu_send(struct port *port); static int ad_marker_send(struct port *port, struct bond_marker *marker); static void ad_mux_machine(struct port *port); static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port); static void ad_tx_machine(struct port *port); static void ad_periodic_machine(struct port *port); static void ad_port_selection_logic(struct port *port); static void ad_agg_selection_logic(struct aggregator *aggregator); static void ad_clear_agg(struct aggregator *aggregator); static void ad_initialize_agg(struct aggregator *aggregator); static void ad_initialize_port(struct port *port, int lacp_fast); static void ad_enable_collecting_distributing(struct port *port); static void ad_disable_collecting_distributing(struct port *port); static void ad_marker_info_received(struct bond_marker *marker_info, struct port *port); static void ad_marker_response_received(struct bond_marker *marker, struct port *port); ///////////////////////////////////////////////////////////////////////////////// // ================= api to bonding and kernel code ================== ///////////////////////////////////////////////////////////////////////////////// /** * __get_bond_by_port - get the port's bonding struct * @port: the port we're looking at * * Return @port's bonding struct, or %NULL if it can't be found. */ static inline struct bonding *__get_bond_by_port(struct port *port) { if (port->slave == NULL) return NULL; return bond_get_bond_by_slave(port->slave); } /** * __get_first_port - get the first port in the bond * @bond: the bond we're looking at * * Return the port of the first slave in @bond, or %NULL if it can't be found. */ static inline struct port *__get_first_port(struct bonding *bond) { if (bond->slave_cnt == 0) return NULL; return &(SLAVE_AD_INFO(bond->first_slave).port); } /** * __get_next_port - get the next port in the bond * @port: the port we're looking at * * Return the port of the slave that is next in line of @port's slave in the * bond, or %NULL if it can't be found. */ static inline struct port *__get_next_port(struct port *port) { struct bonding *bond = __get_bond_by_port(port); struct slave *slave = port->slave; // If there's no bond for this port, or this is the last slave if ((bond == NULL) || (slave->next == bond->first_slave)) return NULL; return &(SLAVE_AD_INFO(slave->next).port); } /** * __get_first_agg - get the first aggregator in the bond * @bond: the bond we're looking at * * Return the aggregator of the first slave in @bond, or %NULL if it can't be * found. */ static inline struct aggregator *__get_first_agg(struct port *port) { struct bonding *bond = __get_bond_by_port(port); // If there's no bond for this port, or bond has no slaves if ((bond == NULL) || (bond->slave_cnt == 0)) return NULL; return &(SLAVE_AD_INFO(bond->first_slave).aggregator); } /** * __get_next_agg - get the next aggregator in the bond * @aggregator: the aggregator we're looking at * * Return the aggregator of the slave that is next in line of @aggregator's * slave in the bond, or %NULL if it can't be found. */ static inline struct aggregator *__get_next_agg(struct aggregator *aggregator) { struct slave *slave = aggregator->slave; struct bonding *bond = bond_get_bond_by_slave(slave); // If there's no bond for this aggregator, or this is the last slave if ((bond == NULL) || (slave->next == bond->first_slave)) return NULL; return &(SLAVE_AD_INFO(slave->next).aggregator); } /* * __agg_has_partner * * Return nonzero if aggregator has a partner (denoted by a non-zero ether * address for the partner). Return 0 if not. */ static inline int __agg_has_partner(struct aggregator *agg) { return !is_zero_ether_addr(agg->partner_system.mac_addr_value); } /** * __disable_port - disable the port's slave * @port: the port we're looking at * */ static inline void __disable_port(struct port *port) { bond_set_slave_inactive_flags(port->slave); } /** * __enable_port - enable the port's slave, if it's up * @port: the port we're looking at * */ static inline void __enable_port(struct port *port) { struct slave *slave = port->slave; if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) bond_set_slave_active_flags(slave); } /** * __port_is_enabled - check if the port's slave is in active state * @port: the port we're looking at * */ static inline int __port_is_enabled(struct port *port) { return bond_is_active_slave(port->slave); } /** * __get_agg_selection_mode - get the aggregator selection mode * @port: the port we're looking at * * Get the aggregator selection mode. Can be %STABLE, %BANDWIDTH or %COUNT. */ static inline u32 __get_agg_selection_mode(struct port *port) { struct bonding *bond = __get_bond_by_port(port); if (bond == NULL) return BOND_AD_STABLE; return bond->params.ad_select; } /** * __check_agg_selection_timer - check if the selection timer has expired * @port: the port we're looking at * */ static inline int __check_agg_selection_timer(struct port *port) { struct bonding *bond = __get_bond_by_port(port); if (bond == NULL) return 0; return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0; } /** * __get_state_machine_lock - lock the port's state machines * @port: the port we're looking at * */ static inline void __get_state_machine_lock(struct port *port) { spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); } /** * __release_state_machine_lock - unlock the port's state machines * @port: the port we're looking at * */ static inline void __release_state_machine_lock(struct port *port) { spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); } /** * __get_link_speed - get a port's speed * @port: the port we're looking at * * Return @port's speed in 802.3ad bitmask format. i.e. one of: * 0, * %AD_LINK_SPEED_BITMASK_10MBPS, * %AD_LINK_SPEED_BITMASK_100MBPS, * %AD_LINK_SPEED_BITMASK_1000MBPS, * %AD_LINK_SPEED_BITMASK_10000MBPS */ static u16 __get_link_speed(struct port *port) { struct slave *slave = port->slave; u16 speed; /* this if covers only a special case: when the configuration starts with * link down, it sets the speed to 0. * This is done in spite of the fact that the e100 driver reports 0 to be * compatible with MVT in the future.*/ if (slave->link != BOND_LINK_UP) speed = 0; else { switch (slave->speed) { case SPEED_10: speed = AD_LINK_SPEED_BITMASK_10MBPS; break; case SPEED_100: speed = AD_LINK_SPEED_BITMASK_100MBPS; break; case SPEED_1000: speed = AD_LINK_SPEED_BITMASK_1000MBPS; break; case SPEED_10000: speed = AD_LINK_SPEED_BITMASK_10000MBPS; break; default: speed = 0; // unknown speed value from ethtool. shouldn't happen break; } } pr_debug("Port %d Received link speed %d update from adapter\n", port->actor_port_number, speed); return speed; } /** * __get_duplex - get a port's duplex * @port: the port we're looking at * * Return @port's duplex in 802.3ad bitmask format. i.e.: * 0x01 if in full duplex * 0x00 otherwise */ static u8 __get_duplex(struct port *port) { struct slave *slave = port->slave; u8 retval; // handling a special case: when the configuration starts with // link down, it sets the duplex to 0. if (slave->link != BOND_LINK_UP) retval = 0x0; else { switch (slave->duplex) { case DUPLEX_FULL: retval = 0x1; pr_debug("Port %d Received status full duplex update from adapter\n", port->actor_port_number); break; case DUPLEX_HALF: default: retval = 0x0; pr_debug("Port %d Received status NOT full duplex update from adapter\n", port->actor_port_number); break; } } return retval; } /** * __initialize_port_locks - initialize a port's STATE machine spinlock * @port: the slave of the port we're looking at * */ static inline void __initialize_port_locks(struct slave *slave) { // make sure it isn't called twice spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock)); } //conversions /** * __ad_timer_to_ticks - convert a given timer type to AD module ticks * @timer_type: which timer to operate * @par: timer parameter. see below * * If @timer_type is %current_while_timer, @par indicates long/short timer. * If @timer_type is %periodic_timer, @par is one of %FAST_PERIODIC_TIME, * %SLOW_PERIODIC_TIME. */ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par) { u16 retval = 0; /* to silence the compiler */ switch (timer_type) { case AD_CURRENT_WHILE_TIMER: // for rx machine usage if (par) retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec); // short timeout else retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec); // long timeout break; case AD_ACTOR_CHURN_TIMER: // for local churn machine retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec); break; case AD_PERIODIC_TIMER: // for periodic machine retval = (par*ad_ticks_per_sec); // long timeout break; case AD_PARTNER_CHURN_TIMER: // for remote churn machine retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec); break; case AD_WAIT_WHILE_TIMER: // for selection machine retval = (AD_AGGREGATE_WAIT_TIME*ad_ticks_per_sec); break; } return retval; } ///////////////////////////////////////////////////////////////////////////////// // ================= ad_rx_machine helper functions ================== ///////////////////////////////////////////////////////////////////////////////// /** * __choose_matched - update a port's matched variable from a received lacpdu * @lacpdu: the lacpdu we've received * @port: the port we're looking at * * Update the value of the matched variable, using parameter values from a * newly received lacpdu. Parameter values for the partner carried in the * received PDU are compared with the corresponding operational parameter * values for the actor. Matched is set to TRUE if all of these parameters * match and the PDU parameter partner_state.aggregation has the same value as * actor_oper_port_state.aggregation and lacp will actively maintain the link * in the aggregation. Matched is also set to TRUE if the value of * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates * an individual link and lacp will actively maintain the link. Otherwise, * matched is set to FALSE. LACP is considered to be actively maintaining the * link if either the PDU's actor_state.lacp_activity variable is TRUE or both * the actor's actor_oper_port_state.lacp_activity and the PDU's * partner_state.lacp_activity variables are TRUE. * * Note: the AD_PORT_MATCHED "variable" is not specified by 802.3ad; it is * used here to implement the language from 802.3ad 43.4.9 that requires * recordPDU to "match" the LACPDU parameters to the stored values. */ static void __choose_matched(struct lacpdu *lacpdu, struct port *port) { // check if all parameters are alike if (((ntohs(lacpdu->partner_port) == port->actor_port_number) && (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) && !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) && (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) && (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) && ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) || // or this is individual link(aggregation == FALSE) ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0) ) { // update the state machine Matched variable port->sm_vars |= AD_PORT_MATCHED; } else { port->sm_vars &= ~AD_PORT_MATCHED; } } /** * __record_pdu - record parameters from a received lacpdu * @lacpdu: the lacpdu we've received * @port: the port we're looking at * * Record the parameter values for the Actor carried in a received lacpdu as * the current partner operational parameter values and sets * actor_oper_port_state.defaulted to FALSE. */ static void __record_pdu(struct lacpdu *lacpdu, struct port *port) { if (lacpdu && port) { struct port_params *partner = &port->partner_oper; __choose_matched(lacpdu, port); // record the new parameter values for the partner operational partner->port_number = ntohs(lacpdu->actor_port); partner->port_priority = ntohs(lacpdu->actor_port_priority); partner->system = lacpdu->actor_system; partner->system_priority = ntohs(lacpdu->actor_system_priority); partner->key = ntohs(lacpdu->actor_key); partner->port_state = lacpdu->actor_state; // set actor_oper_port_state.defaulted to FALSE port->actor_oper_port_state &= ~AD_STATE_DEFAULTED; // set the partner sync. to on if the partner is sync. and the port is matched if ((port->sm_vars & AD_PORT_MATCHED) && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) partner->port_state |= AD_STATE_SYNCHRONIZATION; else partner->port_state &= ~AD_STATE_SYNCHRONIZATION; } } /** * __record_default - record default parameters * @port: the port we're looking at * * This function records the default parameter values for the partner carried * in the Partner Admin parameters as the current partner operational parameter * values and sets actor_oper_port_state.defaulted to TRUE. */ static void __record_default(struct port *port) { if (port) { // record the partner admin parameters memcpy(&port->partner_oper, &port->partner_admin, sizeof(struct port_params)); // set actor_oper_port_state.defaulted to true port->actor_oper_port_state |= AD_STATE_DEFAULTED; } } /** * __update_selected - update a port's Selected variable from a received lacpdu * @lacpdu: the lacpdu we've received * @port: the port we're looking at * * Update the value of the selected variable, using parameter values from a * newly received lacpdu. The parameter values for the Actor carried in the * received PDU are compared with the corresponding operational parameter * values for the ports partner. If one or more of the comparisons shows that * the value(s) received in the PDU differ from the current operational values, * then selected is set to FALSE and actor_oper_port_state.synchronization is * set to out_of_sync. Otherwise, selected remains unchanged. */ static void __update_selected(struct lacpdu *lacpdu, struct port *port) { if (lacpdu && port) { const struct port_params *partner = &port->partner_oper; // check if any parameter is different if (ntohs(lacpdu->actor_port) != partner->port_number || ntohs(lacpdu->actor_port_priority) != partner->port_priority || MAC_ADDRESS_COMPARE(&lacpdu->actor_system, &partner->system) || ntohs(lacpdu->actor_system_priority) != partner->system_priority || ntohs(lacpdu->actor_key) != partner->key || (lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) { // update the state machine Selected variable port->sm_vars &= ~AD_PORT_SELECTED; } } } /** * __update_default_selected - update a port's Selected variable from Partner * @port: the port we're looking at * * This function updates the value of the selected variable, using the partner * administrative parameter values. The administrative values are compared with * the corresponding operational parameter values for the partner. If one or * more of the comparisons shows that the administrative value(s) differ from * the current operational values, then Selected is set to FALSE and * actor_oper_port_state.synchronization is set to OUT_OF_SYNC. Otherwise, * Selected remains unchanged. */ static void __update_default_selected(struct port *port) { if (port) { const struct port_params *admin = &port->partner_admin; const struct port_params *oper = &port->partner_oper; // check if any parameter is different if (admin->port_number != oper->port_number || admin->port_priority != oper->port_priority || MAC_ADDRESS_COMPARE(&admin->system, &oper->system) || admin->system_priority != oper->system_priority || admin->key != oper->key || (admin->port_state & AD_STATE_AGGREGATION) != (oper->port_state & AD_STATE_AGGREGATION)) { // update the state machine Selected variable port->sm_vars &= ~AD_PORT_SELECTED; } } } /** * __update_ntt - update a port's ntt variable from a received lacpdu * @lacpdu: the lacpdu we've received * @port: the port we're looking at * * Updates the value of the ntt variable, using parameter values from a newly * received lacpdu. The parameter values for the partner carried in the * received PDU are compared with the corresponding operational parameter * values for the Actor. If one or more of the comparisons shows that the * value(s) received in the PDU differ from the current operational values, * then ntt is set to TRUE. Otherwise, ntt remains unchanged. */ static void __update_ntt(struct lacpdu *lacpdu, struct port *port) { // validate lacpdu and port if (lacpdu && port) { // check if any parameter is different if ((ntohs(lacpdu->partner_port) != port->actor_port_number) || (ntohs(lacpdu->partner_port_priority) != port->actor_port_priority) || MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) || (ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) || (ntohs(lacpdu->partner_key) != port->actor_oper_port_key) || ((lacpdu->partner_state & AD_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY)) || ((lacpdu->partner_state & AD_STATE_LACP_TIMEOUT) != (port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)) || ((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) || ((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION)) ) { port->ntt = true; } } } /** * __attach_bond_to_agg * @port: the port we're looking at * * Handle the attaching of the port's control parser/multiplexer and the * aggregator. This function does nothing since the parser/multiplexer of the * receive and the parser/multiplexer of the aggregator are already combined. */ static void __attach_bond_to_agg(struct port *port) { port = NULL; /* just to satisfy the compiler */ // This function does nothing since the parser/multiplexer of the receive // and the parser/multiplexer of the aggregator are already combined } /** * __detach_bond_from_agg * @port: the port we're looking at * * Handle the detaching of the port's control parser/multiplexer from the * aggregator. This function does nothing since the parser/multiplexer of the * receive and the parser/multiplexer of the aggregator are already combined. */ static void __detach_bond_from_agg(struct port *port) { port = NULL; /* just to satisfy the compiler */ // This function does nothing since the parser/multiplexer of the receive // and the parser/multiplexer of the aggregator are already combined } /** * __agg_ports_are_ready - check if all ports in an aggregator are ready * @aggregator: the aggregator we're looking at * */ static int __agg_ports_are_ready(struct aggregator *aggregator) { struct port *port; int retval = 1; if (aggregator) { // scan all ports in this aggregator to verfy if they are all ready for (port = aggregator->lag_ports; port; port = port->next_port_in_aggregator) { if (!(port->sm_vars & AD_PORT_READY_N)) { retval = 0; break; } } } return retval; } /** * __set_agg_ports_ready - set value of Ready bit in all ports of an aggregator * @aggregator: the aggregator we're looking at * @val: Should the ports' ready bit be set on or off * */ static void __set_agg_ports_ready(struct aggregator *aggregator, int val) { struct port *port; for (port = aggregator->lag_ports; port; port = port->next_port_in_aggregator) { if (val) port->sm_vars |= AD_PORT_READY; else port->sm_vars &= ~AD_PORT_READY; } } /** * __get_agg_bandwidth - get the total bandwidth of an aggregator * @aggregator: the aggregator we're looking at * */ static u32 __get_agg_bandwidth(struct aggregator *aggregator) { u32 bandwidth = 0; if (aggregator->num_of_ports) { switch (__get_link_speed(aggregator->lag_ports)) { case AD_LINK_SPEED_BITMASK_1MBPS: bandwidth = aggregator->num_of_ports; break; case AD_LINK_SPEED_BITMASK_10MBPS: bandwidth = aggregator->num_of_ports * 10; break; case AD_LINK_SPEED_BITMASK_100MBPS: bandwidth = aggregator->num_of_ports * 100; break; case AD_LINK_SPEED_BITMASK_1000MBPS: bandwidth = aggregator->num_of_ports * 1000; break; case AD_LINK_SPEED_BITMASK_10000MBPS: bandwidth = aggregator->num_of_ports * 10000; break; default: bandwidth = 0; /*to silence the compiler ....*/ } } return bandwidth; } /** * __get_active_agg - get the current active aggregator * @aggregator: the aggregator we're looking at * */ static struct aggregator *__get_active_agg(struct aggregator *aggregator) { struct aggregator *retval = NULL; for (; aggregator; aggregator = __get_next_agg(aggregator)) { if (aggregator->is_active) { retval = aggregator; break; } } return retval; } /** * __update_lacpdu_from_port - update a port's lacpdu fields * @port: the port we're looking at * */ static inline void __update_lacpdu_from_port(struct port *port) { struct lacpdu *lacpdu = &port->lacpdu; const struct port_params *partner = &port->partner_oper; /* update current actual Actor parameters */ /* lacpdu->subtype initialized * lacpdu->version_number initialized * lacpdu->tlv_type_actor_info initialized * lacpdu->actor_information_length initialized */ lacpdu->actor_system_priority = htons(port->actor_system_priority); lacpdu->actor_system = port->actor_system; lacpdu->actor_key = htons(port->actor_oper_port_key); lacpdu->actor_port_priority = htons(port->actor_port_priority); lacpdu->actor_port = htons(port->actor_port_number); lacpdu->actor_state = port->actor_oper_port_state; /* lacpdu->reserved_3_1 initialized * lacpdu->tlv_type_partner_info initialized * lacpdu->partner_information_length initialized */ lacpdu->partner_system_priority = htons(partner->system_priority); lacpdu->partner_system = partner->system; lacpdu->partner_key = htons(partner->key); lacpdu->partner_port_priority = htons(partner->port_priority); lacpdu->partner_port = htons(partner->port_number); lacpdu->partner_state = partner->port_state; /* lacpdu->reserved_3_2 initialized * lacpdu->tlv_type_collector_info initialized * lacpdu->collector_information_length initialized * collector_max_delay initialized * reserved_12[12] initialized * tlv_type_terminator initialized * terminator_length initialized * reserved_50[50] initialized */ } ////////////////////////////////////////////////////////////////////////////////////// // ================= main 802.3ad protocol code ====================================== ////////////////////////////////////////////////////////////////////////////////////// /** * ad_lacpdu_send - send out a lacpdu packet on a given port * @port: the port we're looking at * * Returns: 0 on success * < 0 on error */ static int ad_lacpdu_send(struct port *port) { struct slave *slave = port->slave; struct sk_buff *skb; struct lacpdu_header *lacpdu_header; int length = sizeof(struct lacpdu_header); skb = dev_alloc_skb(length); if (!skb) return -ENOMEM; skb->dev = slave->dev; skb_reset_mac_header(skb); skb->network_header = skb->mac_header + ETH_HLEN; skb->protocol = PKT_TYPE_LACPDU; skb->priority = TC_PRIO_CONTROL; lacpdu_header = (struct lacpdu_header *)skb_put(skb, length); memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); /* Note: source address is set to be the member's PERMANENT address, because we use it to identify loopback lacpdus in receive. */ memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU; lacpdu_header->lacpdu = port->lacpdu; // struct copy dev_queue_xmit(skb); return 0; } /** * ad_marker_send - send marker information/response on a given port * @port: the port we're looking at * @marker: marker data to send * * Returns: 0 on success * < 0 on error */ static int ad_marker_send(struct port *port, struct bond_marker *marker) { struct slave *slave = port->slave; struct sk_buff *skb; struct bond_marker_header *marker_header; int length = sizeof(struct bond_marker_header); skb = dev_alloc_skb(length + 16); if (!skb) return -ENOMEM; skb_reserve(skb, 16); skb->dev = slave->dev; skb_reset_mac_header(skb); skb->network_header = skb->mac_header + ETH_HLEN; skb->protocol = PKT_TYPE_LACPDU; marker_header = (struct bond_marker_header *)skb_put(skb, length); memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); /* Note: source address is set to be the member's PERMANENT address, because we use it to identify loopback MARKERs in receive. */ memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); marker_header->hdr.h_proto = PKT_TYPE_LACPDU; marker_header->marker = *marker; // struct copy dev_queue_xmit(skb); return 0; } /** * ad_mux_machine - handle a port's mux state machine * @port: the port we're looking at * */ static void ad_mux_machine(struct port *port) { mux_states_t last_state; // keep current State Machine state to compare later if it was changed last_state = port->sm_mux_state; if (port->sm_vars & AD_PORT_BEGIN) { port->sm_mux_state = AD_MUX_DETACHED; // next state } else { switch (port->sm_mux_state) { case AD_MUX_DETACHED: if ((port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) /* if SELECTED or STANDBY */ port->sm_mux_state = AD_MUX_WAITING; // next state break; case AD_MUX_WAITING: // if SELECTED == FALSE return to DETACH state if (!(port->sm_vars & AD_PORT_SELECTED)) { // if UNSELECTED port->sm_vars &= ~AD_PORT_READY_N; // in order to withhold the Selection Logic to check all ports READY_N value // every callback cycle to update ready variable, we check READY_N and update READY here __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); port->sm_mux_state = AD_MUX_DETACHED; // next state break; } // check if the wait_while_timer expired if (port->sm_mux_timer_counter && !(--port->sm_mux_timer_counter)) port->sm_vars |= AD_PORT_READY_N; // in order to withhold the selection logic to check all ports READY_N value // every callback cycle to update ready variable, we check READY_N and update READY here __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); // if the wait_while_timer expired, and the port is in READY state, move to ATTACHED state if ((port->sm_vars & AD_PORT_READY) && !port->sm_mux_timer_counter) port->sm_mux_state = AD_MUX_ATTACHED; // next state break; case AD_MUX_ATTACHED: // check also if agg_select_timer expired(so the edable port will take place only after this timer) if ((port->sm_vars & AD_PORT_SELECTED) && (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) && !__check_agg_selection_timer(port)) { port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;// next state } else if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) { // if UNSELECTED or STANDBY port->sm_vars &= ~AD_PORT_READY_N; // in order to withhold the selection logic to check all ports READY_N value // every callback cycle to update ready variable, we check READY_N and update READY here __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); port->sm_mux_state = AD_MUX_DETACHED;// next state } break; case AD_MUX_COLLECTING_DISTRIBUTING: if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY) || !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) ) { port->sm_mux_state = AD_MUX_ATTACHED;// next state } else { // if port state hasn't changed make // sure that a collecting distributing // port in an active aggregator is enabled if (port->aggregator && port->aggregator->is_active && !__port_is_enabled(port)) { __enable_port(port); } } break; default: //to silence the compiler break; } } // check if the state machine was changed if (port->sm_mux_state != last_state) { pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_mux_state); switch (port->sm_mux_state) { case AD_MUX_DETACHED: __detach_bond_from_agg(port); port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION; ad_disable_collecting_distributing(port); port->actor_oper_port_state &= ~AD_STATE_COLLECTING; port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING; port->ntt = true; break; case AD_MUX_WAITING: port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0); break; case AD_MUX_ATTACHED: __attach_bond_to_agg(port); port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION; port->actor_oper_port_state &= ~AD_STATE_COLLECTING; port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING; ad_disable_collecting_distributing(port); port->ntt = true; break; case AD_MUX_COLLECTING_DISTRIBUTING: port->actor_oper_port_state |= AD_STATE_COLLECTING; port->actor_oper_port_state |= AD_STATE_DISTRIBUTING; ad_enable_collecting_distributing(port); port->ntt = true; break; default: //to silence the compiler break; } } } /** * ad_rx_machine - handle a port's rx State Machine * @lacpdu: the lacpdu we've received * @port: the port we're looking at * * If lacpdu arrived, stop previous timer (if exists) and set the next state as * CURRENT. If timer expired set the state machine in the proper state. * In other cases, this function checks if we need to switch to other state. */ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) { rx_states_t last_state; // keep current State Machine state to compare later if it was changed last_state = port->sm_rx_state; // check if state machine should change state // first, check if port was reinitialized if (port->sm_vars & AD_PORT_BEGIN) /* next state */ port->sm_rx_state = AD_RX_INITIALIZE; // check if port is not enabled else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED)) /* next state */ port->sm_rx_state = AD_RX_PORT_DISABLED; // check if new lacpdu arrived else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) { port->sm_rx_timer_counter = 0; // zero timer port->sm_rx_state = AD_RX_CURRENT; } else { // if timer is on, and if it is expired if (port->sm_rx_timer_counter && !(--port->sm_rx_timer_counter)) { switch (port->sm_rx_state) { case AD_RX_EXPIRED: port->sm_rx_state = AD_RX_DEFAULTED; // next state break; case AD_RX_CURRENT: port->sm_rx_state = AD_RX_EXPIRED; // next state break; default: //to silence the compiler break; } } else { // if no lacpdu arrived and no timer is on switch (port->sm_rx_state) { case AD_RX_PORT_DISABLED: if (port->sm_vars & AD_PORT_MOVED) port->sm_rx_state = AD_RX_INITIALIZE; // next state else if (port->is_enabled && (port->sm_vars & AD_PORT_LACP_ENABLED)) port->sm_rx_state = AD_RX_EXPIRED; // next state else if (port->is_enabled && ((port->sm_vars & AD_PORT_LACP_ENABLED) == 0)) port->sm_rx_state = AD_RX_LACP_DISABLED; // next state break; default: //to silence the compiler break; } } } // check if the State machine was changed or new lacpdu arrived if ((port->sm_rx_state != last_state) || (lacpdu)) { pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_rx_state); switch (port->sm_rx_state) { case AD_RX_INITIALIZE: if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) port->sm_vars &= ~AD_PORT_LACP_ENABLED; else port->sm_vars |= AD_PORT_LACP_ENABLED; port->sm_vars &= ~AD_PORT_SELECTED; __record_default(port); port->actor_oper_port_state &= ~AD_STATE_EXPIRED; port->sm_vars &= ~AD_PORT_MOVED; port->sm_rx_state = AD_RX_PORT_DISABLED; // next state /*- Fall Through -*/ case AD_RX_PORT_DISABLED: port->sm_vars &= ~AD_PORT_MATCHED; break; case AD_RX_LACP_DISABLED: port->sm_vars &= ~AD_PORT_SELECTED; __record_default(port); port->partner_oper.port_state &= ~AD_STATE_AGGREGATION; port->sm_vars |= AD_PORT_MATCHED; port->actor_oper_port_state &= ~AD_STATE_EXPIRED; break; case AD_RX_EXPIRED: //Reset of the Synchronization flag. (Standard 43.4.12) //This reset cause to disable this port in the COLLECTING_DISTRIBUTING state of the //mux machine in case of EXPIRED even if LINK_DOWN didn't arrive for the port. port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION; port->sm_vars &= ~AD_PORT_MATCHED; port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY; port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT)); port->actor_oper_port_state |= AD_STATE_EXPIRED; break; case AD_RX_DEFAULTED: __update_default_selected(port); __record_default(port); port->sm_vars |= AD_PORT_MATCHED; port->actor_oper_port_state &= ~AD_STATE_EXPIRED; break; case AD_RX_CURRENT: // detect loopback situation if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) { // INFO_RECEIVED_LOOPBACK_FRAMES pr_err("%s: An illegal loopback occurred on adapter (%s).\n" "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", port->slave->bond->dev->name, port->slave->dev->name); return; } __update_selected(lacpdu, port); __update_ntt(lacpdu, port); __record_pdu(lacpdu, port); port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)); port->actor_oper_port_state &= ~AD_STATE_EXPIRED; break; default: //to silence the compiler break; } } } /** * ad_tx_machine - handle a port's tx state machine * @port: the port we're looking at * */ static void ad_tx_machine(struct port *port) { // check if tx timer expired, to verify that we do not send more than 3 packets per second if (port->sm_tx_timer_counter && !(--port->sm_tx_timer_counter)) { // check if there is something to send if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) { __update_lacpdu_from_port(port); if (ad_lacpdu_send(port) >= 0) { pr_debug("Sent LACPDU on port %d\n", port->actor_port_number); /* mark ntt as false, so it will not be sent again until demanded */ port->ntt = false; } } // restart tx timer(to verify that we will not exceed AD_MAX_TX_IN_SECOND port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND; } } /** * ad_periodic_machine - handle a port's periodic state machine * @port: the port we're looking at * * Turn ntt flag on priodically to perform periodic transmission of lacpdu's. */ static void ad_periodic_machine(struct port *port) { periodic_states_t last_state; // keep current state machine state to compare later if it was changed last_state = port->sm_periodic_state; // check if port was reinitialized if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) || (!(port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & AD_STATE_LACP_ACTIVITY)) ) { port->sm_periodic_state = AD_NO_PERIODIC; // next state } // check if state machine should change state else if (port->sm_periodic_timer_counter) { // check if periodic state machine expired if (!(--port->sm_periodic_timer_counter)) { // if expired then do tx port->sm_periodic_state = AD_PERIODIC_TX; // next state } else { // If not expired, check if there is some new timeout parameter from the partner state switch (port->sm_periodic_state) { case AD_FAST_PERIODIC: if (!(port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) port->sm_periodic_state = AD_SLOW_PERIODIC; // next state break; case AD_SLOW_PERIODIC: if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) { // stop current timer port->sm_periodic_timer_counter = 0; port->sm_periodic_state = AD_PERIODIC_TX; // next state } break; default: //to silence the compiler break; } } } else { switch (port->sm_periodic_state) { case AD_NO_PERIODIC: port->sm_periodic_state = AD_FAST_PERIODIC; // next state break; case AD_PERIODIC_TX: if (!(port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) port->sm_periodic_state = AD_SLOW_PERIODIC; // next state else port->sm_periodic_state = AD_FAST_PERIODIC; // next state break; default: //to silence the compiler break; } } // check if the state machine was changed if (port->sm_periodic_state != last_state) { pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_periodic_state); switch (port->sm_periodic_state) { case AD_NO_PERIODIC: port->sm_periodic_timer_counter = 0; // zero timer break; case AD_FAST_PERIODIC: port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_FAST_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle break; case AD_SLOW_PERIODIC: port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle break; case AD_PERIODIC_TX: port->ntt = true; break; default: //to silence the compiler break; } } } /** * ad_port_selection_logic - select aggregation groups * @port: the port we're looking at * * Select aggregation groups, and assign each port for it's aggregetor. The * selection logic is called in the inititalization (after all the handshkes), * and after every lacpdu receive (if selected is off). */ static void ad_port_selection_logic(struct port *port) { struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator; struct port *last_port = NULL, *curr_port; int found = 0; // if the port is already Selected, do nothing if (port->sm_vars & AD_PORT_SELECTED) return; // if the port is connected to other aggregator, detach it if (port->aggregator) { // detach the port from its former aggregator temp_aggregator = port->aggregator; for (curr_port = temp_aggregator->lag_ports; curr_port; last_port = curr_port, curr_port = curr_port->next_port_in_aggregator) { if (curr_port == port) { temp_aggregator->num_of_ports--; if (!last_port) {// if it is the first port attached to the aggregator temp_aggregator->lag_ports = port->next_port_in_aggregator; } else {// not the first port attached to the aggregator last_port->next_port_in_aggregator = port->next_port_in_aggregator; } // clear the port's relations to this aggregator port->aggregator = NULL; port->next_port_in_aggregator = NULL; port->actor_port_aggregator_identifier = 0; pr_debug("Port %d left LAG %d\n", port->actor_port_number, temp_aggregator->aggregator_identifier); // if the aggregator is empty, clear its parameters, and set it ready to be attached if (!temp_aggregator->lag_ports) ad_clear_agg(temp_aggregator); break; } } if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list pr_warning("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n", port->slave->bond->dev->name, port->actor_port_number, port->slave->dev->name, port->aggregator->aggregator_identifier); } } // search on all aggregators for a suitable aggregator for this port for (aggregator = __get_first_agg(port); aggregator; aggregator = __get_next_agg(aggregator)) { // keep a free aggregator for later use(if needed) if (!aggregator->lag_ports) { if (!free_aggregator) free_aggregator = aggregator; continue; } // check if current aggregator suits us if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && // if all parameters match AND !MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(port->partner_oper.system)) && (aggregator->partner_system_priority == port->partner_oper.system_priority) && (aggregator->partner_oper_aggregator_key == port->partner_oper.key) ) && ((MAC_ADDRESS_COMPARE(&(port->partner_oper.system), &(null_mac_addr)) && // partner answers !aggregator->is_individual) // but is not individual OR ) ) { // attach to the founded aggregator port->aggregator = aggregator; port->actor_port_aggregator_identifier = port->aggregator->aggregator_identifier; port->next_port_in_aggregator = aggregator->lag_ports; port->aggregator->num_of_ports++; aggregator->lag_ports = port; pr_debug("Port %d joined LAG %d(existing LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier); // mark this port as selected port->sm_vars |= AD_PORT_SELECTED; found = 1; break; } } // the port couldn't find an aggregator - attach it to a new aggregator if (!found) { if (free_aggregator) { // assign port a new aggregator port->aggregator = free_aggregator; port->actor_port_aggregator_identifier = port->aggregator->aggregator_identifier; // update the new aggregator's parameters // if port was responsed from the end-user if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS) /* if port is full duplex */ port->aggregator->is_individual = false; else port->aggregator->is_individual = true; port->aggregator->actor_admin_aggregator_key = port->actor_admin_port_key; port->aggregator->actor_oper_aggregator_key = port->actor_oper_port_key; port->aggregator->partner_system = port->partner_oper.system; port->aggregator->partner_system_priority = port->partner_oper.system_priority; port->aggregator->partner_oper_aggregator_key = port->partner_oper.key; port->aggregator->receive_state = 1; port->aggregator->transmit_state = 1; port->aggregator->lag_ports = port; port->aggregator->num_of_ports++; // mark this port as selected port->sm_vars |= AD_PORT_SELECTED; pr_debug("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier); } else { pr_err("%s: Port %d (on %s) did not find a suitable aggregator\n", port->slave->bond->dev->name, port->actor_port_number, port->slave->dev->name); } } // if all aggregator's ports are READY_N == TRUE, set ready=TRUE in all aggregator's ports // else set ready=FALSE in all aggregator's ports __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); aggregator = __get_first_agg(port); ad_agg_selection_logic(aggregator); } /* * Decide if "agg" is a better choice for the new active aggregator that * the current best, according to the ad_select policy. */ static struct aggregator *ad_agg_selection_test(struct aggregator *best, struct aggregator *curr) { /* * 0. If no best, select current. * * 1. If the current agg is not individual, and the best is * individual, select current. * * 2. If current agg is individual and the best is not, keep best. * * 3. Therefore, current and best are both individual or both not * individual, so: * * 3a. If current agg partner replied, and best agg partner did not, * select current. * * 3b. If current agg partner did not reply and best agg partner * did reply, keep best. * * 4. Therefore, current and best both have partner replies or * both do not, so perform selection policy: * * BOND_AD_COUNT: Select by count of ports. If count is equal, * select by bandwidth. * * BOND_AD_STABLE, BOND_AD_BANDWIDTH: Select by bandwidth. */ if (!best) return curr; if (!curr->is_individual && best->is_individual) return curr; if (curr->is_individual && !best->is_individual) return best; if (__agg_has_partner(curr) && !__agg_has_partner(best)) return curr; if (!__agg_has_partner(curr) && __agg_has_partner(best)) return best; switch (__get_agg_selection_mode(curr->lag_ports)) { case BOND_AD_COUNT: if (curr->num_of_ports > best->num_of_ports) return curr; if (curr->num_of_ports < best->num_of_ports) return best; /*FALLTHROUGH*/ case BOND_AD_STABLE: case BOND_AD_BANDWIDTH: if (__get_agg_bandwidth(curr) > __get_agg_bandwidth(best)) return curr; break; default: pr_warning("%s: Impossible agg select mode %d\n", curr->slave->bond->dev->name, __get_agg_selection_mode(curr->lag_ports)); break; } return best; } static int agg_device_up(const struct aggregator *agg) { struct port *port = agg->lag_ports; if (!port) return 0; return (netif_running(port->slave->dev) && netif_carrier_ok(port->slave->dev)); } /** * ad_agg_selection_logic - select an aggregation group for a team * @aggregator: the aggregator we're looking at * * It is assumed that only one aggregator may be selected for a team. * * The logic of this function is to select the aggregator according to * the ad_select policy: * * BOND_AD_STABLE: select the aggregator with the most ports attached to * it, and to reselect the active aggregator only if the previous * aggregator has no more ports related to it. * * BOND_AD_BANDWIDTH: select the aggregator with the highest total * bandwidth, and reselect whenever a link state change takes place or the * set of slaves in the bond changes. * * BOND_AD_COUNT: select the aggregator with largest number of ports * (slaves), and reselect whenever a link state change takes place or the * set of slaves in the bond changes. * * FIXME: this function MUST be called with the first agg in the bond, or * __get_active_agg() won't work correctly. This function should be better * called with the bond itself, and retrieve the first agg from it. */ static void ad_agg_selection_logic(struct aggregator *agg) { struct aggregator *best, *active, *origin; struct port *port; origin = agg; active = __get_active_agg(agg); best = (active && agg_device_up(active)) ? active : NULL; do { agg->is_active = 0; if (agg->num_of_ports && agg_device_up(agg)) best = ad_agg_selection_test(best, agg); } while ((agg = __get_next_agg(agg))); if (best && __get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) { /* * For the STABLE policy, don't replace the old active * aggregator if it's still active (it has an answering * partner) or if both the best and active don't have an * answering partner. */ if (active && active->lag_ports && active->lag_ports->is_enabled && (__agg_has_partner(active) || (!__agg_has_partner(active) && !__agg_has_partner(best)))) { if (!(!active->actor_oper_aggregator_key && best->actor_oper_aggregator_key)) { best = NULL; active->is_active = 1; } } } if (best && (best == active)) { best = NULL; active->is_active = 1; } // if there is new best aggregator, activate it if (best) { pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", best->aggregator_identifier, best->num_of_ports, best->actor_oper_aggregator_key, best->partner_oper_aggregator_key, best->is_individual, best->is_active); pr_debug("best ports %p slave %p %s\n", best->lag_ports, best->slave, best->slave ? best->slave->dev->name : "NULL"); for (agg = __get_first_agg(best->lag_ports); agg; agg = __get_next_agg(agg)) { pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", agg->aggregator_identifier, agg->num_of_ports, agg->actor_oper_aggregator_key, agg->partner_oper_aggregator_key, agg->is_individual, agg->is_active); } // check if any partner replys if (best->is_individual) { pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n", best->slave ? best->slave->bond->dev->name : "NULL"); } best->is_active = 1; pr_debug("LAG %d chosen as the active LAG\n", best->aggregator_identifier); pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", best->aggregator_identifier, best->num_of_ports, best->actor_oper_aggregator_key, best->partner_oper_aggregator_key, best->is_individual, best->is_active); // disable the ports that were related to the former active_aggregator if (active) { for (port = active->lag_ports; port; port = port->next_port_in_aggregator) { __disable_port(port); } } } /* * if the selected aggregator is of join individuals * (partner_system is NULL), enable their ports */ active = __get_active_agg(origin); if (active) { if (!__agg_has_partner(active)) { for (port = active->lag_ports; port; port = port->next_port_in_aggregator) { __enable_port(port); } } } if (origin->slave) { struct bonding *bond; bond = bond_get_bond_by_slave(origin->slave); if (bond) bond_3ad_set_carrier(bond); } } /** * ad_clear_agg - clear a given aggregator's parameters * @aggregator: the aggregator we're looking at * */ static void ad_clear_agg(struct aggregator *aggregator) { if (aggregator) { aggregator->is_individual = false; aggregator->actor_admin_aggregator_key = 0; aggregator->actor_oper_aggregator_key = 0; aggregator->partner_system = null_mac_addr; aggregator->partner_system_priority = 0; aggregator->partner_oper_aggregator_key = 0; aggregator->receive_state = 0; aggregator->transmit_state = 0; aggregator->lag_ports = NULL; aggregator->is_active = 0; aggregator->num_of_ports = 0; pr_debug("LAG %d was cleared\n", aggregator->aggregator_identifier); } } /** * ad_initialize_agg - initialize a given aggregator's parameters * @aggregator: the aggregator we're looking at * */ static void ad_initialize_agg(struct aggregator *aggregator) { if (aggregator) { ad_clear_agg(aggregator); aggregator->aggregator_mac_address = null_mac_addr; aggregator->aggregator_identifier = 0; aggregator->slave = NULL; } } /** * ad_initialize_port - initialize a given port's parameters * @aggregator: the aggregator we're looking at * @lacp_fast: boolean. whether fast periodic should be used * */ static void ad_initialize_port(struct port *port, int lacp_fast) { static const struct port_params tmpl = { .system_priority = 0xffff, .key = 1, .port_number = 1, .port_priority = 0xff, .port_state = 1, }; static const struct lacpdu lacpdu = { .subtype = 0x01, .version_number = 0x01, .tlv_type_actor_info = 0x01, .actor_information_length = 0x14, .tlv_type_partner_info = 0x02, .partner_information_length = 0x14, .tlv_type_collector_info = 0x03, .collector_information_length = 0x10, .collector_max_delay = htons(AD_COLLECTOR_MAX_DELAY), }; if (port) { port->actor_port_number = 1; port->actor_port_priority = 0xff; port->actor_system = null_mac_addr; port->actor_system_priority = 0xffff; port->actor_port_aggregator_identifier = 0; port->ntt = false; port->actor_admin_port_key = 1; port->actor_oper_port_key = 1; port->actor_admin_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY; port->actor_oper_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY; if (lacp_fast) port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; memcpy(&port->partner_admin, &tmpl, sizeof(tmpl)); memcpy(&port->partner_oper, &tmpl, sizeof(tmpl)); port->is_enabled = true; // ****** private parameters ****** port->sm_vars = 0x3; port->sm_rx_state = 0; port->sm_rx_timer_counter = 0; port->sm_periodic_state = 0; port->sm_periodic_timer_counter = 0; port->sm_mux_state = 0; port->sm_mux_timer_counter = 0; port->sm_tx_state = 0; port->sm_tx_timer_counter = 0; port->slave = NULL; port->aggregator = NULL; port->next_port_in_aggregator = NULL; port->transaction_id = 0; memcpy(&port->lacpdu, &lacpdu, sizeof(lacpdu)); } } /** * ad_enable_collecting_distributing - enable a port's transmit/receive * @port: the port we're looking at * * Enable @port if it's in an active aggregator */ static void ad_enable_collecting_distributing(struct port *port) { if (port->aggregator->is_active) { pr_debug("Enabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier); __enable_port(port); } } /** * ad_disable_collecting_distributing - disable a port's transmit/receive * @port: the port we're looking at * */ static void ad_disable_collecting_distributing(struct port *port) { if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) { pr_debug("Disabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier); __disable_port(port); } } #if 0 /** * ad_marker_info_send - send a marker information frame * @port: the port we're looking at * * This function does nothing since we decided not to implement send and handle * response for marker PDU's, in this stage, but only to respond to marker * information. */ static void ad_marker_info_send(struct port *port) { struct bond_marker marker; u16 index; // fill the marker PDU with the appropriate values marker.subtype = 0x02; marker.version_number = 0x01; marker.tlv_type = AD_MARKER_INFORMATION_SUBTYPE; marker.marker_length = 0x16; // convert requester_port to Big Endian marker.requester_port = (((port->actor_port_number & 0xFF) << 8) |((u16)(port->actor_port_number & 0xFF00) >> 8)); marker.requester_system = port->actor_system; // convert requester_port(u32) to Big Endian marker.requester_transaction_id = (((++port->transaction_id & 0xFF) << 24) | ((port->transaction_id & 0xFF00) << 8) | ((port->transaction_id & 0xFF0000) >> 8) | ((port->transaction_id & 0xFF000000) >> 24)); marker.pad = 0; marker.tlv_type_terminator = 0x00; marker.terminator_length = 0x00; for (index = 0; index < 90; index++) marker.reserved_90[index] = 0; // send the marker information if (ad_marker_send(port, &marker) >= 0) { pr_debug("Sent Marker Information on port %d\n", port->actor_port_number); } } #endif /** * ad_marker_info_received - handle receive of a Marker information frame * @marker_info: Marker info received * @port: the port we're looking at * */ static void ad_marker_info_received(struct bond_marker *marker_info, struct port *port) { struct bond_marker marker; // copy the received marker data to the response marker //marker = *marker_info; memcpy(&marker, marker_info, sizeof(struct bond_marker)); // change the marker subtype to marker response marker.tlv_type = AD_MARKER_RESPONSE_SUBTYPE; // send the marker response if (ad_marker_send(port, &marker) >= 0) { pr_debug("Sent Marker Response on port %d\n", port->actor_port_number); } } /** * ad_marker_response_received - handle receive of a marker response frame * @marker: marker PDU received * @port: the port we're looking at * * This function does nothing since we decided not to implement send and handle * response for marker PDU's, in this stage, but only to respond to marker * information. */ static void ad_marker_response_received(struct bond_marker *marker, struct port *port) { marker = NULL; /* just to satisfy the compiler */ port = NULL; /* just to satisfy the compiler */ // DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW } ////////////////////////////////////////////////////////////////////////////////////// // ================= AD exported functions to the main bonding code ================== ////////////////////////////////////////////////////////////////////////////////////// // Check aggregators status in team every T seconds #define AD_AGGREGATOR_SELECTION_TIMER 8 /* * bond_3ad_initiate_agg_selection(struct bonding *bond) * * Set the aggregation selection timer, to initiate an agg selection in * the very near future. Called during first initialization, and during * any down to up transitions of the bond. */ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) { BOND_AD_INFO(bond).agg_select_timer = timeout; } /** * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures * @bond: bonding struct to work on * @tick_resolution: tick duration (millisecond resolution) * * Can be called only after the mac address of the bond is set. */ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution) { // check that the bond is not initialized yet if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr), bond->dev->dev_addr)) { BOND_AD_INFO(bond).aggregator_identifier = 0; BOND_AD_INFO(bond).system.sys_priority = 0xFFFF; BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr); // initialize how many times this module is called in one second(should be about every 100ms) ad_ticks_per_sec = tick_resolution; bond_3ad_initiate_agg_selection(bond, AD_AGGREGATOR_SELECTION_TIMER * ad_ticks_per_sec); } } /** * bond_3ad_bind_slave - initialize a slave's port * @slave: slave struct to work on * * Returns: 0 on success * < 0 on error */ int bond_3ad_bind_slave(struct slave *slave) { struct bonding *bond = bond_get_bond_by_slave(slave); struct port *port; struct aggregator *aggregator; if (bond == NULL) { pr_err("%s: The slave %s is not attached to its bond\n", slave->bond->dev->name, slave->dev->name); return -1; } //check that the slave has not been initialized yet. if (SLAVE_AD_INFO(slave).port.slave != slave) { // port initialization port = &(SLAVE_AD_INFO(slave).port); ad_initialize_port(port, bond->params.lacp_fast); __initialize_port_locks(slave); port->slave = slave; port->actor_port_number = SLAVE_AD_INFO(slave).id; // key is determined according to the link speed, duplex and user key(which is yet not supported) // ------------------------------------------------------------ // Port key : | User key | Speed |Duplex| // ------------------------------------------------------------ // 16 6 1 0 port->actor_admin_port_key = 0; // initialize this parameter port->actor_admin_port_key |= __get_duplex(port); port->actor_admin_port_key |= (__get_link_speed(port) << 1); port->actor_oper_port_key = port->actor_admin_port_key; // if the port is not full duplex, then the port should be not lacp Enabled if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) port->sm_vars &= ~AD_PORT_LACP_ENABLED; // actor system is the bond's system port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr; // tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second) port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND; port->aggregator = NULL; port->next_port_in_aggregator = NULL; __disable_port(port); // aggregator initialization aggregator = &(SLAVE_AD_INFO(slave).aggregator); ad_initialize_agg(aggregator); aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr); aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier; aggregator->slave = slave; aggregator->is_active = 0; aggregator->num_of_ports = 0; } return 0; } /** * bond_3ad_unbind_slave - deinitialize a slave's port * @slave: slave struct to work on * * Search for the aggregator that is related to this port, remove the * aggregator and assign another aggregator for other port related to it * (if any), and remove the port. */ void bond_3ad_unbind_slave(struct slave *slave) { struct port *port, *prev_port, *temp_port; struct aggregator *aggregator, *new_aggregator, *temp_aggregator; int select_new_active_agg = 0; // find the aggregator related to this slave aggregator = &(SLAVE_AD_INFO(slave).aggregator); // find the port related to this slave port = &(SLAVE_AD_INFO(slave).port); // if slave is null, the whole port is not initialized if (!port->slave) { pr_warning("Warning: %s: Trying to unbind an uninitialized port on %s\n", slave->bond->dev->name, slave->dev->name); return; } pr_debug("Unbinding Link Aggregation Group %d\n", aggregator->aggregator_identifier); /* Tell the partner that this port is not suitable for aggregation */ port->actor_oper_port_state &= ~AD_STATE_AGGREGATION; __update_lacpdu_from_port(port); ad_lacpdu_send(port); // check if this aggregator is occupied if (aggregator->lag_ports) { // check if there are other ports related to this aggregator except // the port related to this slave(thats ensure us that there is a // reason to search for new aggregator, and that we will find one if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) { // find new aggregator for the related port(s) new_aggregator = __get_first_agg(port); for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) { // if the new aggregator is empty, or it is connected to our port only if (!new_aggregator->lag_ports || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator)) break; } // if new aggregator found, copy the aggregator's parameters // and connect the related lag_ports to the new aggregator if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) { pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier); if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) { pr_info("%s: Removing an active aggregator\n", aggregator->slave->bond->dev->name); // select new active aggregator select_new_active_agg = 1; } new_aggregator->is_individual = aggregator->is_individual; new_aggregator->actor_admin_aggregator_key = aggregator->actor_admin_aggregator_key; new_aggregator->actor_oper_aggregator_key = aggregator->actor_oper_aggregator_key; new_aggregator->partner_system = aggregator->partner_system; new_aggregator->partner_system_priority = aggregator->partner_system_priority; new_aggregator->partner_oper_aggregator_key = aggregator->partner_oper_aggregator_key; new_aggregator->receive_state = aggregator->receive_state; new_aggregator->transmit_state = aggregator->transmit_state; new_aggregator->lag_ports = aggregator->lag_ports; new_aggregator->is_active = aggregator->is_active; new_aggregator->num_of_ports = aggregator->num_of_ports; // update the information that is written on the ports about the aggregator for (temp_port = aggregator->lag_ports; temp_port; temp_port = temp_port->next_port_in_aggregator) { temp_port->aggregator = new_aggregator; temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier; } // clear the aggregator ad_clear_agg(aggregator); if (select_new_active_agg) ad_agg_selection_logic(__get_first_agg(port)); } else { pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n", slave->bond->dev->name); } } else { // in case that the only port related to this aggregator is the one we want to remove select_new_active_agg = aggregator->is_active; // clear the aggregator ad_clear_agg(aggregator); if (select_new_active_agg) { pr_info("%s: Removing an active aggregator\n", slave->bond->dev->name); // select new active aggregator ad_agg_selection_logic(__get_first_agg(port)); } } } pr_debug("Unbinding port %d\n", port->actor_port_number); // find the aggregator that this port is connected to temp_aggregator = __get_first_agg(port); for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) { prev_port = NULL; // search the port in the aggregator's related ports for (temp_port = temp_aggregator->lag_ports; temp_port; prev_port = temp_port, temp_port = temp_port->next_port_in_aggregator) { if (temp_port == port) { // the aggregator found - detach the port from this aggregator if (prev_port) prev_port->next_port_in_aggregator = temp_port->next_port_in_aggregator; else temp_aggregator->lag_ports = temp_port->next_port_in_aggregator; temp_aggregator->num_of_ports--; if (temp_aggregator->num_of_ports == 0) { select_new_active_agg = temp_aggregator->is_active; // clear the aggregator ad_clear_agg(temp_aggregator); if (select_new_active_agg) { pr_info("%s: Removing an active aggregator\n", slave->bond->dev->name); // select new active aggregator ad_agg_selection_logic(__get_first_agg(port)); } } break; } } } port->slave = NULL; } /** * bond_3ad_state_machine_handler - handle state machines timeout * @bond: bonding struct to work on * * The state machine handling concept in this module is to check every tick * which state machine should operate any function. The execution order is * round robin, so when we have an interaction between state machines, the * reply of one to each other might be delayed until next tick. * * This function also complete the initialization when the agg_select_timer * times out, and it selects an aggregator for the ports that are yet not * related to any aggregator, and selects the active aggregator for a bond. */ void bond_3ad_state_machine_handler(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, ad_work.work); struct port *port; struct aggregator *aggregator; read_lock(&bond->lock); //check if there are any slaves if (bond->slave_cnt == 0) goto re_arm; // check if agg_select_timer timer after initialize is timed out if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) { // select the active aggregator for the bond if ((port = __get_first_port(bond))) { if (!port->slave) { pr_warning("%s: Warning: bond's first port is uninitialized\n", bond->dev->name); goto re_arm; } aggregator = __get_first_agg(port); ad_agg_selection_logic(aggregator); } bond_3ad_set_carrier(bond); } // for each port run the state machines for (port = __get_first_port(bond); port; port = __get_next_port(port)) { if (!port->slave) { pr_warning("%s: Warning: Found an uninitialized port\n", bond->dev->name); goto re_arm; } /* Lock around state machines to protect data accessed * by all (e.g., port->sm_vars). ad_rx_machine may run * concurrently due to incoming LACPDU. */ __get_state_machine_lock(port); ad_rx_machine(NULL, port); ad_periodic_machine(port); ad_port_selection_logic(port); ad_mux_machine(port); ad_tx_machine(port); // turn off the BEGIN bit, since we already handled it if (port->sm_vars & AD_PORT_BEGIN) port->sm_vars &= ~AD_PORT_BEGIN; __release_state_machine_lock(port); } re_arm: queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); read_unlock(&bond->lock); } /** * bond_3ad_rx_indication - handle a received frame * @lacpdu: received lacpdu * @slave: slave struct to work on * @length: length of the data received * * It is assumed that frames that were sent on this NIC don't returned as new * received frames (loopback). Since only the payload is given to this * function, it check for loopback. */ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length) { struct port *port; int ret = RX_HANDLER_ANOTHER; if (length >= sizeof(struct lacpdu)) { port = &(SLAVE_AD_INFO(slave).port); if (!port->slave) { pr_warning("%s: Warning: port of slave %s is uninitialized\n", slave->dev->name, slave->bond->dev->name); return ret; } switch (lacpdu->subtype) { case AD_TYPE_LACPDU: ret = RX_HANDLER_CONSUMED; pr_debug("Received LACPDU on port %d\n", port->actor_port_number); /* Protect against concurrent state machines */ __get_state_machine_lock(port); ad_rx_machine(lacpdu, port); __release_state_machine_lock(port); break; case AD_TYPE_MARKER: ret = RX_HANDLER_CONSUMED; // No need to convert fields to Little Endian since we don't use the marker's fields. switch (((struct bond_marker *)lacpdu)->tlv_type) { case AD_MARKER_INFORMATION_SUBTYPE: pr_debug("Received Marker Information on port %d\n", port->actor_port_number); ad_marker_info_received((struct bond_marker *)lacpdu, port); break; case AD_MARKER_RESPONSE_SUBTYPE: pr_debug("Received Marker Response on port %d\n", port->actor_port_number); ad_marker_response_received((struct bond_marker *)lacpdu, port); break; default: pr_debug("Received an unknown Marker subtype on slot %d\n", port->actor_port_number); } } } return ret; } /** * bond_3ad_adapter_speed_changed - handle a slave's speed change indication * @slave: slave struct to work on * * Handle reselection of aggregator (if needed) for this port. */ void bond_3ad_adapter_speed_changed(struct slave *slave) { struct port *port; port = &(SLAVE_AD_INFO(slave).port); // if slave is null, the whole port is not initialized if (!port->slave) { pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", slave->bond->dev->name, slave->dev->name); return; } port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= (__get_link_speed(port) << 1); pr_debug("Port %d changed speed\n", port->actor_port_number); // there is no need to reselect a new aggregator, just signal the // state machines to reinitialize port->sm_vars |= AD_PORT_BEGIN; } /** * bond_3ad_adapter_duplex_changed - handle a slave's duplex change indication * @slave: slave struct to work on * * Handle reselection of aggregator (if needed) for this port. */ void bond_3ad_adapter_duplex_changed(struct slave *slave) { struct port *port; port = &(SLAVE_AD_INFO(slave).port); // if slave is null, the whole port is not initialized if (!port->slave) { pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", slave->bond->dev->name, slave->dev->name); return; } port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= __get_duplex(port); pr_debug("Port %d changed duplex\n", port->actor_port_number); // there is no need to reselect a new aggregator, just signal the // state machines to reinitialize port->sm_vars |= AD_PORT_BEGIN; } /** * bond_3ad_handle_link_change - handle a slave's link status change indication * @slave: slave struct to work on * @status: whether the link is now up or down * * Handle reselection of aggregator (if needed) for this port. */ void bond_3ad_handle_link_change(struct slave *slave, char link) { struct port *port; port = &(SLAVE_AD_INFO(slave).port); // if slave is null, the whole port is not initialized if (!port->slave) { pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", slave->bond->dev->name, slave->dev->name); return; } // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed) // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report if (link == BOND_LINK_UP) { port->is_enabled = true; port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= __get_duplex(port); port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= (__get_link_speed(port) << 1); } else { /* link has failed */ port->is_enabled = false; port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; port->actor_oper_port_key = (port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS); } //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); // there is no need to reselect a new aggregator, just signal the // state machines to reinitialize port->sm_vars |= AD_PORT_BEGIN; } /* * set link state for bonding master: if we have an active * aggregator, we're up, if not, we're down. Presumes that we cannot * have an active aggregator if there are no slaves with link up. * * This behavior complies with IEEE 802.3 section 43.3.9. * * Called by bond_set_carrier(). Return zero if carrier state does not * change, nonzero if it does. */ int bond_3ad_set_carrier(struct bonding *bond) { struct aggregator *active; active = __get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator)); if (active) { /* are enough slaves available to consider link up? */ if (active->num_of_ports < bond->params.min_links) { if (netif_carrier_ok(bond->dev)) { netif_carrier_off(bond->dev); return 1; } } else if (!netif_carrier_ok(bond->dev)) { netif_carrier_on(bond->dev); return 1; } return 0; } if (netif_carrier_ok(bond->dev)) { netif_carrier_off(bond->dev); return 1; } return 0; } /** * __bond_3ad_get_active_agg_info - get information of the active aggregator * @bond: bonding struct to work on * @ad_info: ad_info struct to fill with the bond's info * * Returns: 0 on success * < 0 on error */ int __bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) { struct aggregator *aggregator = NULL; struct port *port; for (port = __get_first_port(bond); port; port = __get_next_port(port)) { if (port->aggregator && port->aggregator->is_active) { aggregator = port->aggregator; break; } } if (aggregator) { ad_info->aggregator_id = aggregator->aggregator_identifier; ad_info->ports = aggregator->num_of_ports; ad_info->actor_key = aggregator->actor_oper_aggregator_key; ad_info->partner_key = aggregator->partner_oper_aggregator_key; memcpy(ad_info->partner_system, aggregator->partner_system.mac_addr_value, ETH_ALEN); return 0; } return -1; } /* Wrapper used to hold bond->lock so no slave manipulation can occur */ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) { int ret; read_lock(&bond->lock); ret = __bond_3ad_get_active_agg_info(bond, ad_info); read_unlock(&bond->lock); return ret; } int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) { struct slave *slave, *start_at; struct bonding *bond = netdev_priv(dev); int slave_agg_no; int slaves_in_agg; int agg_id; int i; struct ad_info ad_info; int res = 1; if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n", dev->name); goto out; } slaves_in_agg = ad_info.ports; agg_id = ad_info.aggregator_id; if (slaves_in_agg == 0) { /*the aggregator is empty*/ pr_debug("%s: Error: active aggregator is empty\n", dev->name); goto out; } slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg); bond_for_each_slave(bond, slave, i) { struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; if (agg && (agg->aggregator_identifier == agg_id)) { slave_agg_no--; if (slave_agg_no < 0) break; } } if (slave_agg_no >= 0) { pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n", dev->name, agg_id); goto out; } start_at = slave; bond_for_each_slave_from(bond, slave, i, start_at) { int slave_agg_id = 0; struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; if (agg) slave_agg_id = agg->aggregator_identifier; if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) { res = bond_dev_queue_xmit(bond, skb, slave->dev); break; } } out: if (res) { /* no suitable interface, frame not sent */ kfree_skb(skb); } return NETDEV_TX_OK; } int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave) { int ret = RX_HANDLER_ANOTHER; struct lacpdu *lacpdu, _lacpdu; if (skb->protocol != PKT_TYPE_LACPDU) return ret; lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu); if (!lacpdu) return ret; read_lock(&bond->lock); ret = bond_3ad_rx_indication(lacpdu, slave, skb->len); read_unlock(&bond->lock); return ret; } /* * When modify lacp_rate parameter via sysfs, * update actor_oper_port_state of each port. * * Hold slave->state_machine_lock, * so we can modify port->actor_oper_port_state, * no matter bond is up or down. */ void bond_3ad_update_lacp_rate(struct bonding *bond) { int i; struct slave *slave; struct port *port = NULL; int lacp_fast; write_lock_bh(&bond->lock); lacp_fast = bond->params.lacp_fast; bond_for_each_slave(bond, slave, i) { port = &(SLAVE_AD_INFO(slave).port); if (port->slave == NULL) continue; __get_state_machine_lock(port); if (lacp_fast) port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; else port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT; __release_state_machine_lock(port); } write_unlock_bh(&bond->lock); }
gpl-2.0
alianmohammad/linux-kernel-gem5
drivers/scsi/isci/port_config.c
2152
25706
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "host.h" #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) #define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (1000) enum SCIC_SDS_APC_ACTIVITY { SCIC_SDS_APC_SKIP_PHY, SCIC_SDS_APC_ADD_PHY, SCIC_SDS_APC_START_TIMER, SCIC_SDS_APC_ACTIVITY_MAX }; /* * ****************************************************************************** * General port configuration agent routines * ****************************************************************************** */ /** * * @address_one: A SAS Address to be compared. * @address_two: A SAS Address to be compared. * * Compare the two SAS Address and if SAS Address One is greater than SAS * Address Two then return > 0 else if SAS Address One is less than SAS Address * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0 * > y where x is returned for Address One > Address Two y is returned for * Address One < Address Two 0 is returned ofr Address One = Address Two */ static s32 sci_sas_address_compare( struct sci_sas_address address_one, struct sci_sas_address address_two) { if (address_one.high > address_two.high) { return 1; } else if (address_one.high < address_two.high) { return -1; } else if (address_one.low > address_two.low) { return 1; } else if (address_one.low < address_two.low) { return -1; } /* The two SAS Address must be identical */ return 0; } /** * * @controller: The controller object used for the port search. * @phy: The phy object to match. * * This routine will find a matching port for the phy. This means that the * port and phy both have the same broadcast sas address and same received sas * address. The port address or the NULL if there is no matching * port. port address if the port can be found to match the phy. * NULL if there is no matching port for the phy. */ static struct isci_port *sci_port_configuration_agent_find_port( struct isci_host *ihost, struct isci_phy *iphy) { u8 i; struct sci_sas_address port_sas_address; struct sci_sas_address port_attached_device_address; struct sci_sas_address phy_sas_address; struct sci_sas_address phy_attached_device_address; /* * Since this phy can be a member of a wide port check to see if one or * more phys match the sent and received SAS address as this phy in which * case it should participate in the same port. */ sci_phy_get_sas_address(iphy, &phy_sas_address); sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address); for (i = 0; i < ihost->logical_port_entries; i++) { struct isci_port *iport = &ihost->ports[i]; sci_port_get_sas_address(iport, &port_sas_address); sci_port_get_attached_sas_address(iport, &port_attached_device_address); if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 && sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0) return iport; } return NULL; } /** * * @controller: This is the controller object that contains the port agent * @port_agent: This is the port configruation agent for the controller. * * This routine will validate the port configuration is correct for the SCU * hardware. The SCU hardware allows for port configurations as follows. LP0 * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2, * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION * the port configuration is not valid for this port configuration agent. */ static enum sci_status sci_port_configuration_agent_validate_ports( struct isci_host *ihost, struct sci_port_configuration_agent *port_agent) { struct sci_sas_address first_address; struct sci_sas_address second_address; /* * Sanity check the max ranges for all the phys the max index * is always equal to the port range index */ if (port_agent->phy_valid_port_range[0].max_index != 0 || port_agent->phy_valid_port_range[1].max_index != 1 || port_agent->phy_valid_port_range[2].max_index != 2 || port_agent->phy_valid_port_range[3].max_index != 3) return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; /* * This is a request to configure a single x4 port or at least attempt * to make all the phys into a single port */ if (port_agent->phy_valid_port_range[0].min_index == 0 && port_agent->phy_valid_port_range[1].min_index == 0 && port_agent->phy_valid_port_range[2].min_index == 0 && port_agent->phy_valid_port_range[3].min_index == 0) return SCI_SUCCESS; /* * This is a degenerate case where phy 1 and phy 2 are assigned * to the same port this is explicitly disallowed by the hardware * unless they are part of the same x4 port and this condition was * already checked above. */ if (port_agent->phy_valid_port_range[2].min_index == 1) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } /* * PE0 and PE3 can never have the same SAS Address unless they * are part of the same x4 wide port and we have already checked * for this condition. */ sci_phy_get_sas_address(&ihost->phys[0], &first_address); sci_phy_get_sas_address(&ihost->phys[3], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } /* * PE0 and PE1 are configured into a 2x1 ports make sure that the * SAS Address for PE0 and PE2 are different since they can not be * part of the same port. */ if (port_agent->phy_valid_port_range[0].min_index == 0 && port_agent->phy_valid_port_range[1].min_index == 1) { sci_phy_get_sas_address(&ihost->phys[0], &first_address); sci_phy_get_sas_address(&ihost->phys[2], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } } /* * PE2 and PE3 are configured into a 2x1 ports make sure that the * SAS Address for PE1 and PE3 are different since they can not be * part of the same port. */ if (port_agent->phy_valid_port_range[2].min_index == 2 && port_agent->phy_valid_port_range[3].min_index == 3) { sci_phy_get_sas_address(&ihost->phys[1], &first_address); sci_phy_get_sas_address(&ihost->phys[3], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } } return SCI_SUCCESS; } /* * ****************************************************************************** * Manual port configuration agent routines * ****************************************************************************** */ /* verify all of the phys in the same port are using the same SAS address */ static enum sci_status sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, struct sci_port_configuration_agent *port_agent) { u32 phy_mask; u32 assigned_phy_mask; struct sci_sas_address sas_address; struct sci_sas_address phy_assigned_address; u8 port_index; u8 phy_index; assigned_phy_mask = 0; sas_address.high = 0; sas_address.low = 0; for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) { phy_mask = ihost->oem_parameters.ports[port_index].phy_mask; if (!phy_mask) continue; /* * Make sure that one or more of the phys were not already assinged to * a different port. */ if ((phy_mask & ~assigned_phy_mask) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } /* Find the starting phy index for this round through the loop */ for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { if ((phy_mask & (1 << phy_index)) == 0) continue; sci_phy_get_sas_address(&ihost->phys[phy_index], &sas_address); /* * The phy_index can be used as the starting point for the * port range since the hardware starts all logical ports * the same as the PE index. */ port_agent->phy_valid_port_range[phy_index].min_index = port_index; port_agent->phy_valid_port_range[phy_index].max_index = phy_index; if (phy_index != port_index) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } break; } /* * See how many additional phys are being added to this logical port. * Note: We have not moved the current phy_index so we will actually * compare the startting phy with itself. * This is expected and required to add the phy to the port. */ while (phy_index < SCI_MAX_PHYS) { if ((phy_mask & (1 << phy_index)) == 0) continue; sci_phy_get_sas_address(&ihost->phys[phy_index], &phy_assigned_address); if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) { /* * The phy mask specified that this phy is part of the same port * as the starting phy and it is not so fail this configuration */ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } port_agent->phy_valid_port_range[phy_index].min_index = port_index; port_agent->phy_valid_port_range[phy_index].max_index = phy_index; sci_port_add_phy(&ihost->ports[port_index], &ihost->phys[phy_index]); assigned_phy_mask |= (1 << phy_index); phy_index++; } } return sci_port_configuration_agent_validate_ports(ihost, port_agent); } static void mpc_agent_timeout(unsigned long data) { u8 index; struct sci_timer *tmr = (struct sci_timer *)data; struct sci_port_configuration_agent *port_agent; struct isci_host *ihost; unsigned long flags; u16 configure_phy_mask; port_agent = container_of(tmr, typeof(*port_agent), timer); ihost = container_of(port_agent, typeof(*ihost), port_agent); spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; port_agent->timer_pending = false; /* Find the mask of phys that are reported read but as yet unconfigured into a port */ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask; for (index = 0; index < SCI_MAX_PHYS; index++) { struct isci_phy *iphy = &ihost->phys[index]; if (configure_phy_mask & (1 << index)) { port_agent->link_up_handler(ihost, port_agent, phy_get_non_dummy_port(iphy), iphy); } } done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } static void sci_mpc_agent_link_up(struct isci_host *ihost, struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { /* If the port is NULL then the phy was not assigned to a port. * This is because the phy was not given the same SAS Address as * the other PHYs in the port. */ if (!iport) return; port_agent->phy_ready_mask |= (1 << iphy->phy_index); sci_port_link_up(iport, iphy); if ((iport->active_phy_mask & (1 << iphy->phy_index))) port_agent->phy_configured_mask |= (1 << iphy->phy_index); } /** * * @controller: This is the controller object that receives the link down * notification. * @port: This is the port object associated with the phy. If the is no * associated port this is an NULL. The port is an invalid * handle only if the phy was never port of this port. This happens when * the phy is not broadcasting the same SAS address as the other phys in the * assigned port. * @phy: This is the phy object which has gone link down. * * This function handles the manual port configuration link down notifications. * Since all ports and phys are associated at initialization time we just turn * around and notifiy the port object of the link down event. If this PHY is * not associated with a port there is no action taken. Is it possible to get a * link down notification from a phy that has no assocoated port? */ static void sci_mpc_agent_link_down( struct isci_host *ihost, struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { if (iport != NULL) { /* * If we can form a new port from the remainder of the phys * then we want to start the timer to allow the SCI User to * cleanup old devices and rediscover the port before * rebuilding the port with the phys that remain in the ready * state. */ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); /* * Check to see if there are more phys waiting to be * configured into a port. If there are allow the SCI User * to tear down this port, if necessary, and then reconstruct * the port after the timeout. */ if ((port_agent->phy_configured_mask == 0x0000) && (port_agent->phy_ready_mask != 0x0000) && !port_agent->timer_pending) { port_agent->timer_pending = true; sci_mod_timer(&port_agent->timer, SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT); } sci_port_link_down(iport, iphy); } } /* verify phys are assigned a valid SAS address for automatic port * configuration mode. */ static enum sci_status sci_apc_agent_validate_phy_configuration(struct isci_host *ihost, struct sci_port_configuration_agent *port_agent) { u8 phy_index; u8 port_index; struct sci_sas_address sas_address; struct sci_sas_address phy_assigned_address; phy_index = 0; while (phy_index < SCI_MAX_PHYS) { port_index = phy_index; /* Get the assigned SAS Address for the first PHY on the controller. */ sci_phy_get_sas_address(&ihost->phys[phy_index], &sas_address); while (++phy_index < SCI_MAX_PHYS) { sci_phy_get_sas_address(&ihost->phys[phy_index], &phy_assigned_address); /* Verify each of the SAS address are all the same for every PHY */ if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) { port_agent->phy_valid_port_range[phy_index].min_index = port_index; port_agent->phy_valid_port_range[phy_index].max_index = phy_index; } else { port_agent->phy_valid_port_range[phy_index].min_index = phy_index; port_agent->phy_valid_port_range[phy_index].max_index = phy_index; break; } } } return sci_port_configuration_agent_validate_ports(ihost, port_agent); } /* * This routine will restart the automatic port configuration timeout * timer for the next time period. This could be caused by either a link * down event or a link up event where we can not yet tell to which a phy * belongs. */ static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent, u32 timeout) { port_agent->timer_pending = true; sci_mod_timer(&port_agent->timer, timeout); } static void sci_apc_agent_configure_ports(struct isci_host *ihost, struct sci_port_configuration_agent *port_agent, struct isci_phy *iphy, bool start_timer) { u8 port_index; enum sci_status status; struct isci_port *iport; enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY; iport = sci_port_configuration_agent_find_port(ihost, iphy); if (iport) { if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) apc_activity = SCIC_SDS_APC_ADD_PHY; else apc_activity = SCIC_SDS_APC_SKIP_PHY; } else { /* * There is no matching Port for this PHY so lets search through the * Ports and see if we can add the PHY to its own port or maybe start * the timer and wait to see if a wider port can be made. * * Note the break when we reach the condition of the port id == phy id */ for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index; port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index; port_index++) { iport = &ihost->ports[port_index]; /* First we must make sure that this PHY can be added to this Port. */ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { /* * Port contains a PHY with a greater PHY ID than the current * PHY that has gone link up. This phy can not be part of any * port so skip it and move on. */ if (iport->active_phy_mask > (1 << iphy->phy_index)) { apc_activity = SCIC_SDS_APC_SKIP_PHY; break; } /* * We have reached the end of our Port list and have not found * any reason why we should not either add the PHY to the port * or wait for more phys to become active. */ if (iport->physical_port_index == iphy->phy_index) { /* * The Port either has no active PHYs. * Consider that if the port had any active PHYs we would have * or active PHYs with * a lower PHY Id than this PHY. */ if (apc_activity != SCIC_SDS_APC_START_TIMER) { apc_activity = SCIC_SDS_APC_ADD_PHY; } break; } /* * The current Port has no active PHYs and this PHY could be part * of this Port. Since we dont know as yet setup to start the * timer and see if there is a better configuration. */ if (iport->active_phy_mask == 0) { apc_activity = SCIC_SDS_APC_START_TIMER; } } else if (iport->active_phy_mask != 0) { /* * The Port has an active phy and the current Phy can not * participate in this port so skip the PHY and see if * there is a better configuration. */ apc_activity = SCIC_SDS_APC_SKIP_PHY; } } } /* * Check to see if the start timer operations should instead map to an * add phy operation. This is caused because we have been waiting to * add a phy to a port but could not becuase the automatic port * configuration engine had a choice of possible ports for the phy. * Since we have gone through a timeout we are going to restrict the * choice to the smallest possible port. */ if ( (start_timer == false) && (apc_activity == SCIC_SDS_APC_START_TIMER) ) { apc_activity = SCIC_SDS_APC_ADD_PHY; } switch (apc_activity) { case SCIC_SDS_APC_ADD_PHY: status = sci_port_add_phy(iport, iphy); if (status == SCI_SUCCESS) { port_agent->phy_configured_mask |= (1 << iphy->phy_index); } break; case SCIC_SDS_APC_START_TIMER: sci_apc_agent_start_timer(port_agent, SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); break; case SCIC_SDS_APC_SKIP_PHY: default: /* do nothing the PHY can not be made part of a port at this time. */ break; } } /** * sci_apc_agent_link_up - handle apc link up events * @scic: This is the controller object that receives the link up * notification. * @sci_port: This is the port object associated with the phy. If the is no * associated port this is an NULL. * @sci_phy: This is the phy object which has gone link up. * * This method handles the automatic port configuration for link up * notifications. Is it possible to get a link down notification from a phy * that has no assocoated port? */ static void sci_apc_agent_link_up(struct isci_host *ihost, struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { u8 phy_index = iphy->phy_index; if (!iport) { /* the phy is not the part of this port */ port_agent->phy_ready_mask |= 1 << phy_index; sci_apc_agent_start_timer(port_agent, SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); } else { /* the phy is already the part of the port */ port_agent->phy_ready_mask |= 1 << phy_index; sci_port_link_up(iport, iphy); } } /** * * @controller: This is the controller object that receives the link down * notification. * @iport: This is the port object associated with the phy. If the is no * associated port this is an NULL. * @iphy: This is the phy object which has gone link down. * * This method handles the automatic port configuration link down * notifications. not associated with a port there is no action taken. Is it * possible to get a link down notification from a phy that has no assocoated * port? */ static void sci_apc_agent_link_down( struct isci_host *ihost, struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); if (!iport) return; if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) { enum sci_status status; status = sci_port_remove_phy(iport, iphy); if (status == SCI_SUCCESS) port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); } } /* configure the phys into ports when the timer fires */ static void apc_agent_timeout(unsigned long data) { u32 index; struct sci_timer *tmr = (struct sci_timer *)data; struct sci_port_configuration_agent *port_agent; struct isci_host *ihost; unsigned long flags; u16 configure_phy_mask; port_agent = container_of(tmr, typeof(*port_agent), timer); ihost = container_of(port_agent, typeof(*ihost), port_agent); spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; port_agent->timer_pending = false; configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask; if (!configure_phy_mask) goto done; for (index = 0; index < SCI_MAX_PHYS; index++) { if ((configure_phy_mask & (1 << index)) == 0) continue; sci_apc_agent_configure_ports(ihost, port_agent, &ihost->phys[index], false); } if (is_controller_start_complete(ihost)) sci_controller_transition_to_ready(ihost, SCI_SUCCESS); done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } /* * ****************************************************************************** * Public port configuration agent routines * ****************************************************************************** */ /** * * * This method will construct the port configuration agent for operation. This * call is universal for both manual port configuration and automatic port * configuration modes. */ void sci_port_configuration_agent_construct( struct sci_port_configuration_agent *port_agent) { u32 index; port_agent->phy_configured_mask = 0x00; port_agent->phy_ready_mask = 0x00; port_agent->link_up_handler = NULL; port_agent->link_down_handler = NULL; port_agent->timer_pending = false; for (index = 0; index < SCI_MAX_PORTS; index++) { port_agent->phy_valid_port_range[index].min_index = 0; port_agent->phy_valid_port_range[index].max_index = 0; } } bool is_port_config_apc(struct isci_host *ihost) { return ihost->port_agent.link_up_handler == sci_apc_agent_link_up; } enum sci_status sci_port_configuration_agent_initialize( struct isci_host *ihost, struct sci_port_configuration_agent *port_agent) { enum sci_status status; enum sci_port_configuration_mode mode; mode = ihost->oem_parameters.controller.mode_type; if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { status = sci_mpc_agent_validate_phy_configuration( ihost, port_agent); port_agent->link_up_handler = sci_mpc_agent_link_up; port_agent->link_down_handler = sci_mpc_agent_link_down; sci_init_timer(&port_agent->timer, mpc_agent_timeout); } else { status = sci_apc_agent_validate_phy_configuration( ihost, port_agent); port_agent->link_up_handler = sci_apc_agent_link_up; port_agent->link_down_handler = sci_apc_agent_link_down; sci_init_timer(&port_agent->timer, apc_agent_timeout); } return status; }
gpl-2.0
onealtom/ricoboard_kernel
drivers/input/tablet/gtco.c
2408
27157
/* -*- linux-c -*- GTCO digitizer USB driver TO CHECK: Is pressure done right on report 5? Copyright (C) 2006 GTCO CalComp This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Permission to use, copy, modify, distribute, and sell this software and its documentation for any purpose is hereby granted without fee, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of GTCO-CalComp not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. GTCO-CalComp makes no representations about the suitability of this software for any purpose. It is provided "as is" without express or implied warranty. GTCO-CALCOMP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL GTCO-CALCOMP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTIONS, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. GTCO CalComp, Inc. 7125 Riverwood Drive Columbia, MD 21046 Jeremy Roberson jroberson@gtcocalcomp.com Scott Hill shill@gtcocalcomp.com */ /*#define DEBUG*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/usb.h> #include <asm/uaccess.h> #include <asm/unaligned.h> #include <asm/byteorder.h> #include <linux/usb/input.h> /* Version with a Major number of 2 is for kernel inclusion only. */ #define GTCO_VERSION "2.00.0006" /* MACROS */ #define VENDOR_ID_GTCO 0x078C #define PID_400 0x400 #define PID_401 0x401 #define PID_1000 0x1000 #define PID_1001 0x1001 #define PID_1002 0x1002 /* Max size of a single report */ #define REPORT_MAX_SIZE 10 /* Bitmask whether pen is in range */ #define MASK_INRANGE 0x20 #define MASK_BUTTON 0x01F #define PATHLENGTH 64 /* DATA STRUCTURES */ /* Device table */ static const struct usb_device_id gtco_usbid_table[] = { { USB_DEVICE(VENDOR_ID_GTCO, PID_400) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_401) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_1000) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_1001) }, { USB_DEVICE(VENDOR_ID_GTCO, PID_1002) }, { } }; MODULE_DEVICE_TABLE (usb, gtco_usbid_table); /* Structure to hold all of our device specific stuff */ struct gtco { struct input_dev *inputdevice; /* input device struct pointer */ struct usb_device *usbdev; /* the usb device for this device */ struct usb_interface *intf; /* the usb interface for this device */ struct urb *urbinfo; /* urb for incoming reports */ dma_addr_t buf_dma; /* dma addr of the data buffer*/ unsigned char * buffer; /* databuffer for reports */ char usbpath[PATHLENGTH]; int openCount; /* Information pulled from Report Descriptor */ u32 usage; u32 min_X; u32 max_X; u32 min_Y; u32 max_Y; s8 mintilt_X; s8 maxtilt_X; s8 mintilt_Y; s8 maxtilt_Y; u32 maxpressure; u32 minpressure; }; /* Code for parsing the HID REPORT DESCRIPTOR */ /* From HID1.11 spec */ struct hid_descriptor { struct usb_descriptor_header header; __le16 bcdHID; u8 bCountryCode; u8 bNumDescriptors; u8 bDescriptorType; __le16 wDescriptorLength; } __attribute__ ((packed)); #define HID_DESCRIPTOR_SIZE 9 #define HID_DEVICE_TYPE 33 #define REPORT_DEVICE_TYPE 34 #define PREF_TAG(x) ((x)>>4) #define PREF_TYPE(x) ((x>>2)&0x03) #define PREF_SIZE(x) ((x)&0x03) #define TYPE_MAIN 0 #define TYPE_GLOBAL 1 #define TYPE_LOCAL 2 #define TYPE_RESERVED 3 #define TAG_MAIN_INPUT 0x8 #define TAG_MAIN_OUTPUT 0x9 #define TAG_MAIN_FEATURE 0xB #define TAG_MAIN_COL_START 0xA #define TAG_MAIN_COL_END 0xC #define TAG_GLOB_USAGE 0 #define TAG_GLOB_LOG_MIN 1 #define TAG_GLOB_LOG_MAX 2 #define TAG_GLOB_PHYS_MIN 3 #define TAG_GLOB_PHYS_MAX 4 #define TAG_GLOB_UNIT_EXP 5 #define TAG_GLOB_UNIT 6 #define TAG_GLOB_REPORT_SZ 7 #define TAG_GLOB_REPORT_ID 8 #define TAG_GLOB_REPORT_CNT 9 #define TAG_GLOB_PUSH 10 #define TAG_GLOB_POP 11 #define TAG_GLOB_MAX 12 #define DIGITIZER_USAGE_TIP_PRESSURE 0x30 #define DIGITIZER_USAGE_TILT_X 0x3D #define DIGITIZER_USAGE_TILT_Y 0x3E /* * This is an abbreviated parser for the HID Report Descriptor. We * know what devices we are talking to, so this is by no means meant * to be generic. We can make some safe assumptions: * * - We know there are no LONG tags, all short * - We know that we have no MAIN Feature and MAIN Output items * - We know what the IRQ reports are supposed to look like. * * The main purpose of this is to use the HID report desc to figure * out the mins and maxs of the fields in the IRQ reports. The IRQ * reports for 400/401 change slightly if the max X is bigger than 64K. * */ static void parse_hid_report_descriptor(struct gtco *device, char * report, int length) { struct device *ddev = &device->intf->dev; int x, i = 0; /* Tag primitive vars */ __u8 prefix; __u8 size; __u8 tag; __u8 type; __u8 data = 0; __u16 data16 = 0; __u32 data32 = 0; /* For parsing logic */ int inputnum = 0; __u32 usage = 0; /* Global Values, indexed by TAG */ __u32 globalval[TAG_GLOB_MAX]; __u32 oldval[TAG_GLOB_MAX]; /* Debug stuff */ char maintype = 'x'; char globtype[12]; int indent = 0; char indentstr[10] = ""; dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n"); /* Walk this report and pull out the info we need */ while (i < length) { prefix = report[i]; /* Skip over prefix */ i++; /* Determine data size and save the data in the proper variable */ size = PREF_SIZE(prefix); switch (size) { case 1: data = report[i]; break; case 2: data16 = get_unaligned_le16(&report[i]); break; case 3: size = 4; data32 = get_unaligned_le32(&report[i]); break; } /* Skip size of data */ i += size; /* What we do depends on the tag type */ tag = PREF_TAG(prefix); type = PREF_TYPE(prefix); switch (type) { case TYPE_MAIN: strcpy(globtype, ""); switch (tag) { case TAG_MAIN_INPUT: /* * The INPUT MAIN tag signifies this is * information from a report. We need to * figure out what it is and store the * min/max values */ maintype = 'I'; if (data == 2) strcpy(globtype, "Variable"); else if (data == 3) strcpy(globtype, "Var|Const"); dev_dbg(ddev, "::::: Saving Report: %d input #%d Max: 0x%X(%d) Min:0x%X(%d) of %d bits\n", globalval[TAG_GLOB_REPORT_ID], inputnum, globalval[TAG_GLOB_LOG_MAX], globalval[TAG_GLOB_LOG_MAX], globalval[TAG_GLOB_LOG_MIN], globalval[TAG_GLOB_LOG_MIN], globalval[TAG_GLOB_REPORT_SZ] * globalval[TAG_GLOB_REPORT_CNT]); /* We can assume that the first two input items are always the X and Y coordinates. After that, we look for everything else by local usage value */ switch (inputnum) { case 0: /* X coord */ dev_dbg(ddev, "GER: X Usage: 0x%x\n", usage); if (device->max_X == 0) { device->max_X = globalval[TAG_GLOB_LOG_MAX]; device->min_X = globalval[TAG_GLOB_LOG_MIN]; } break; case 1: /* Y coord */ dev_dbg(ddev, "GER: Y Usage: 0x%x\n", usage); if (device->max_Y == 0) { device->max_Y = globalval[TAG_GLOB_LOG_MAX]; device->min_Y = globalval[TAG_GLOB_LOG_MIN]; } break; default: /* Tilt X */ if (usage == DIGITIZER_USAGE_TILT_X) { if (device->maxtilt_X == 0) { device->maxtilt_X = globalval[TAG_GLOB_LOG_MAX]; device->mintilt_X = globalval[TAG_GLOB_LOG_MIN]; } } /* Tilt Y */ if (usage == DIGITIZER_USAGE_TILT_Y) { if (device->maxtilt_Y == 0) { device->maxtilt_Y = globalval[TAG_GLOB_LOG_MAX]; device->mintilt_Y = globalval[TAG_GLOB_LOG_MIN]; } } /* Pressure */ if (usage == DIGITIZER_USAGE_TIP_PRESSURE) { if (device->maxpressure == 0) { device->maxpressure = globalval[TAG_GLOB_LOG_MAX]; device->minpressure = globalval[TAG_GLOB_LOG_MIN]; } } break; } inputnum++; break; case TAG_MAIN_OUTPUT: maintype = 'O'; break; case TAG_MAIN_FEATURE: maintype = 'F'; break; case TAG_MAIN_COL_START: maintype = 'S'; if (data == 0) { dev_dbg(ddev, "======>>>>>> Physical\n"); strcpy(globtype, "Physical"); } else dev_dbg(ddev, "======>>>>>>\n"); /* Indent the debug output */ indent++; for (x = 0; x < indent; x++) indentstr[x] = '-'; indentstr[x] = 0; /* Save global tags */ for (x = 0; x < TAG_GLOB_MAX; x++) oldval[x] = globalval[x]; break; case TAG_MAIN_COL_END: dev_dbg(ddev, "<<<<<<======\n"); maintype = 'E'; indent--; for (x = 0; x < indent; x++) indentstr[x] = '-'; indentstr[x] = 0; /* Copy global tags back */ for (x = 0; x < TAG_GLOB_MAX; x++) globalval[x] = oldval[x]; break; } switch (size) { case 1: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data); break; case 2: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data16); break; case 4: dev_dbg(ddev, "%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x\n", indentstr, tag, maintype, size, globtype, data32); break; } break; case TYPE_GLOBAL: switch (tag) { case TAG_GLOB_USAGE: /* * First time we hit the global usage tag, * it should tell us the type of device */ if (device->usage == 0) device->usage = data; strcpy(globtype, "USAGE"); break; case TAG_GLOB_LOG_MIN: strcpy(globtype, "LOG_MIN"); break; case TAG_GLOB_LOG_MAX: strcpy(globtype, "LOG_MAX"); break; case TAG_GLOB_PHYS_MIN: strcpy(globtype, "PHYS_MIN"); break; case TAG_GLOB_PHYS_MAX: strcpy(globtype, "PHYS_MAX"); break; case TAG_GLOB_UNIT_EXP: strcpy(globtype, "EXP"); break; case TAG_GLOB_UNIT: strcpy(globtype, "UNIT"); break; case TAG_GLOB_REPORT_SZ: strcpy(globtype, "REPORT_SZ"); break; case TAG_GLOB_REPORT_ID: strcpy(globtype, "REPORT_ID"); /* New report, restart numbering */ inputnum = 0; break; case TAG_GLOB_REPORT_CNT: strcpy(globtype, "REPORT_CNT"); break; case TAG_GLOB_PUSH: strcpy(globtype, "PUSH"); break; case TAG_GLOB_POP: strcpy(globtype, "POP"); break; } /* Check to make sure we have a good tag number so we don't overflow array */ if (tag < TAG_GLOB_MAX) { switch (size) { case 1: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data); globalval[tag] = data; break; case 2: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data16); globalval[tag] = data16; break; case 4: dev_dbg(ddev, "%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x\n", indentstr, globtype, tag, size, data32); globalval[tag] = data32; break; } } else { dev_dbg(ddev, "%sGLOBALTAG: ILLEGAL TAG:%d SIZE: %d\n", indentstr, tag, size); } break; case TYPE_LOCAL: switch (tag) { case TAG_GLOB_USAGE: strcpy(globtype, "USAGE"); /* Always 1 byte */ usage = data; break; case TAG_GLOB_LOG_MIN: strcpy(globtype, "MIN"); break; case TAG_GLOB_LOG_MAX: strcpy(globtype, "MAX"); break; default: strcpy(globtype, "UNKNOWN"); break; } switch (size) { case 1: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data); break; case 2: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data16); break; case 4: dev_dbg(ddev, "%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x\n", indentstr, tag, globtype, size, data32); break; } break; } } } /* INPUT DRIVER Routines */ /* * Called when opening the input device. This will submit the URB to * the usb system so we start getting reports */ static int gtco_input_open(struct input_dev *inputdev) { struct gtco *device = input_get_drvdata(inputdev); device->urbinfo->dev = device->usbdev; if (usb_submit_urb(device->urbinfo, GFP_KERNEL)) return -EIO; return 0; } /* * Called when closing the input device. This will unlink the URB */ static void gtco_input_close(struct input_dev *inputdev) { struct gtco *device = input_get_drvdata(inputdev); usb_kill_urb(device->urbinfo); } /* * Setup input device capabilities. Tell the input system what this * device is capable of generating. * * This information is based on what is read from the HID report and * placed in the struct gtco structure * */ static void gtco_setup_caps(struct input_dev *inputdev) { struct gtco *device = input_get_drvdata(inputdev); /* Which events */ inputdev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) | BIT_MASK(EV_MSC); /* Misc event menu block */ inputdev->mscbit[0] = BIT_MASK(MSC_SCAN) | BIT_MASK(MSC_SERIAL) | BIT_MASK(MSC_RAW); /* Absolute values based on HID report info */ input_set_abs_params(inputdev, ABS_X, device->min_X, device->max_X, 0, 0); input_set_abs_params(inputdev, ABS_Y, device->min_Y, device->max_Y, 0, 0); /* Proximity */ input_set_abs_params(inputdev, ABS_DISTANCE, 0, 1, 0, 0); /* Tilt & pressure */ input_set_abs_params(inputdev, ABS_TILT_X, device->mintilt_X, device->maxtilt_X, 0, 0); input_set_abs_params(inputdev, ABS_TILT_Y, device->mintilt_Y, device->maxtilt_Y, 0, 0); input_set_abs_params(inputdev, ABS_PRESSURE, device->minpressure, device->maxpressure, 0, 0); /* Transducer */ input_set_abs_params(inputdev, ABS_MISC, 0, 0xFF, 0, 0); } /* USB Routines */ /* * URB callback routine. Called when we get IRQ reports from the * digitizer. * * This bridges the USB and input device worlds. It generates events * on the input device based on the USB reports. */ static void gtco_urb_callback(struct urb *urbinfo) { struct gtco *device = urbinfo->context; struct input_dev *inputdev; int rc; u32 val = 0; s8 valsigned = 0; char le_buffer[2]; inputdev = device->inputdevice; /* Was callback OK? */ if (urbinfo->status == -ECONNRESET || urbinfo->status == -ENOENT || urbinfo->status == -ESHUTDOWN) { /* Shutdown is occurring. Return and don't queue up any more */ return; } if (urbinfo->status != 0) { /* * Some unknown error. Hopefully temporary. Just go and * requeue an URB */ goto resubmit; } /* * Good URB, now process */ /* PID dependent when we interpret the report */ if (inputdev->id.product == PID_1000 || inputdev->id.product == PID_1001 || inputdev->id.product == PID_1002) { /* * Switch on the report ID * Conveniently, the reports have more information, the higher * the report number. We can just fall through the case * statements if we start with the highest number report */ switch (device->buffer[0]) { case 5: /* Pressure is 9 bits */ val = ((u16)(device->buffer[8]) << 1); val |= (u16)(device->buffer[7] >> 7); input_report_abs(inputdev, ABS_PRESSURE, device->buffer[8]); /* Mask out the Y tilt value used for pressure */ device->buffer[7] = (u8)((device->buffer[7]) & 0x7F); /* Fall thru */ case 4: /* Tilt */ /* Sign extend these 7 bit numbers. */ if (device->buffer[6] & 0x40) device->buffer[6] |= 0x80; if (device->buffer[7] & 0x40) device->buffer[7] |= 0x80; valsigned = (device->buffer[6]); input_report_abs(inputdev, ABS_TILT_X, (s32)valsigned); valsigned = (device->buffer[7]); input_report_abs(inputdev, ABS_TILT_Y, (s32)valsigned); /* Fall thru */ case 2: case 3: /* Convert buttons, only 5 bits possible */ val = (device->buffer[5]) & MASK_BUTTON; /* We don't apply any meaning to the bitmask, just report */ input_event(inputdev, EV_MSC, MSC_SERIAL, val); /* Fall thru */ case 1: /* All reports have X and Y coords in the same place */ val = get_unaligned_le16(&device->buffer[1]); input_report_abs(inputdev, ABS_X, val); val = get_unaligned_le16(&device->buffer[3]); input_report_abs(inputdev, ABS_Y, val); /* Ditto for proximity bit */ val = device->buffer[5] & MASK_INRANGE ? 1 : 0; input_report_abs(inputdev, ABS_DISTANCE, val); /* Report 1 is an exception to how we handle buttons */ /* Buttons are an index, not a bitmask */ if (device->buffer[0] == 1) { /* * Convert buttons, 5 bit index * Report value of index set as one, * the rest as 0 */ val = device->buffer[5] & MASK_BUTTON; dev_dbg(&device->intf->dev, "======>>>>>>REPORT 1: val 0x%X(%d)\n", val, val); /* * We don't apply any meaning to the button * index, just report it */ input_event(inputdev, EV_MSC, MSC_SERIAL, val); } break; case 7: /* Menu blocks */ input_event(inputdev, EV_MSC, MSC_SCAN, device->buffer[1]); break; } } /* Other pid class */ if (inputdev->id.product == PID_400 || inputdev->id.product == PID_401) { /* Report 2 */ if (device->buffer[0] == 2) { /* Menu blocks */ input_event(inputdev, EV_MSC, MSC_SCAN, device->buffer[1]); } /* Report 1 */ if (device->buffer[0] == 1) { char buttonbyte; /* IF X max > 64K, we still a bit from the y report */ if (device->max_X > 0x10000) { val = (u16)(((u16)(device->buffer[2] << 8)) | (u8)device->buffer[1]); val |= (u32)(((u8)device->buffer[3] & 0x1) << 16); input_report_abs(inputdev, ABS_X, val); le_buffer[0] = (u8)((u8)(device->buffer[3]) >> 1); le_buffer[0] |= (u8)((device->buffer[3] & 0x1) << 7); le_buffer[1] = (u8)(device->buffer[4] >> 1); le_buffer[1] |= (u8)((device->buffer[5] & 0x1) << 7); val = get_unaligned_le16(le_buffer); input_report_abs(inputdev, ABS_Y, val); /* * Shift the button byte right by one to * make it look like the standard report */ buttonbyte = device->buffer[5] >> 1; } else { val = get_unaligned_le16(&device->buffer[1]); input_report_abs(inputdev, ABS_X, val); val = get_unaligned_le16(&device->buffer[3]); input_report_abs(inputdev, ABS_Y, val); buttonbyte = device->buffer[5]; } /* BUTTONS and PROXIMITY */ val = buttonbyte & MASK_INRANGE ? 1 : 0; input_report_abs(inputdev, ABS_DISTANCE, val); /* Convert buttons, only 4 bits possible */ val = buttonbyte & 0x0F; #ifdef USE_BUTTONS for (i = 0; i < 5; i++) input_report_key(inputdev, BTN_DIGI + i, val & (1 << i)); #else /* We don't apply any meaning to the bitmask, just report */ input_event(inputdev, EV_MSC, MSC_SERIAL, val); #endif /* TRANSDUCER */ input_report_abs(inputdev, ABS_MISC, device->buffer[6]); } } /* Everybody gets report ID's */ input_event(inputdev, EV_MSC, MSC_RAW, device->buffer[0]); /* Sync it up */ input_sync(inputdev); resubmit: rc = usb_submit_urb(urbinfo, GFP_ATOMIC); if (rc != 0) dev_err(&device->intf->dev, "usb_submit_urb failed rc=0x%x\n", rc); } /* * The probe routine. This is called when the kernel find the matching USB * vendor/product. We do the following: * * - Allocate mem for a local structure to manage the device * - Request a HID Report Descriptor from the device and parse it to * find out the device parameters * - Create an input device and assign it attributes * - Allocate an URB so the device can talk to us when the input * queue is open */ static int gtco_probe(struct usb_interface *usbinterface, const struct usb_device_id *id) { struct gtco *gtco; struct input_dev *input_dev; struct hid_descriptor *hid_desc; char *report; int result = 0, retry; int error; struct usb_endpoint_descriptor *endpoint; /* Allocate memory for device structure */ gtco = kzalloc(sizeof(struct gtco), GFP_KERNEL); input_dev = input_allocate_device(); if (!gtco || !input_dev) { dev_err(&usbinterface->dev, "No more memory\n"); error = -ENOMEM; goto err_free_devs; } /* Set pointer to the input device */ gtco->inputdevice = input_dev; /* Save interface information */ gtco->usbdev = usb_get_dev(interface_to_usbdev(usbinterface)); gtco->intf = usbinterface; /* Allocate some data for incoming reports */ gtco->buffer = usb_alloc_coherent(gtco->usbdev, REPORT_MAX_SIZE, GFP_KERNEL, &gtco->buf_dma); if (!gtco->buffer) { dev_err(&usbinterface->dev, "No more memory for us buffers\n"); error = -ENOMEM; goto err_free_devs; } /* Allocate URB for reports */ gtco->urbinfo = usb_alloc_urb(0, GFP_KERNEL); if (!gtco->urbinfo) { dev_err(&usbinterface->dev, "Failed to allocate URB\n"); error = -ENOMEM; goto err_free_buf; } /* * The endpoint is always altsetting 0, we know this since we know * this device only has one interrupt endpoint */ endpoint = &usbinterface->altsetting[0].endpoint[0].desc; /* Some debug */ dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting); dev_dbg(&usbinterface->dev, "num endpoints: %d\n", usbinterface->cur_altsetting->desc.bNumEndpoints); dev_dbg(&usbinterface->dev, "interface class: %d\n", usbinterface->cur_altsetting->desc.bInterfaceClass); dev_dbg(&usbinterface->dev, "endpoint: attribute:0x%x type:0x%x\n", endpoint->bmAttributes, endpoint->bDescriptorType); if (usb_endpoint_xfer_int(endpoint)) dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n"); dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen); /* * Find the HID descriptor so we can find out the size of the * HID report descriptor */ if (usb_get_extra_descriptor(usbinterface->cur_altsetting, HID_DEVICE_TYPE, &hid_desc) != 0){ dev_err(&usbinterface->dev, "Can't retrieve exta USB descriptor to get hid report descriptor length\n"); error = -EIO; goto err_free_urb; } dev_dbg(&usbinterface->dev, "Extra descriptor success: type:%d len:%d\n", hid_desc->bDescriptorType, hid_desc->wDescriptorLength); report = kzalloc(le16_to_cpu(hid_desc->wDescriptorLength), GFP_KERNEL); if (!report) { dev_err(&usbinterface->dev, "No more memory for report\n"); error = -ENOMEM; goto err_free_urb; } /* Couple of tries to get reply */ for (retry = 0; retry < 3; retry++) { result = usb_control_msg(gtco->usbdev, usb_rcvctrlpipe(gtco->usbdev, 0), USB_REQ_GET_DESCRIPTOR, USB_RECIP_INTERFACE | USB_DIR_IN, REPORT_DEVICE_TYPE << 8, 0, /* interface */ report, le16_to_cpu(hid_desc->wDescriptorLength), 5000); /* 5 secs */ dev_dbg(&usbinterface->dev, "usb_control_msg result: %d\n", result); if (result == le16_to_cpu(hid_desc->wDescriptorLength)) { parse_hid_report_descriptor(gtco, report, result); break; } } kfree(report); /* If we didn't get the report, fail */ if (result != le16_to_cpu(hid_desc->wDescriptorLength)) { dev_err(&usbinterface->dev, "Failed to get HID Report Descriptor of size: %d\n", hid_desc->wDescriptorLength); error = -EIO; goto err_free_urb; } /* Create a device file node */ usb_make_path(gtco->usbdev, gtco->usbpath, sizeof(gtco->usbpath)); strlcat(gtco->usbpath, "/input0", sizeof(gtco->usbpath)); /* Set Input device functions */ input_dev->open = gtco_input_open; input_dev->close = gtco_input_close; /* Set input device information */ input_dev->name = "GTCO_CalComp"; input_dev->phys = gtco->usbpath; input_set_drvdata(input_dev, gtco); /* Now set up all the input device capabilities */ gtco_setup_caps(input_dev); /* Set input device required ID information */ usb_to_input_id(gtco->usbdev, &input_dev->id); input_dev->dev.parent = &usbinterface->dev; /* Setup the URB, it will be posted later on open of input device */ endpoint = &usbinterface->altsetting[0].endpoint[0].desc; usb_fill_int_urb(gtco->urbinfo, gtco->usbdev, usb_rcvintpipe(gtco->usbdev, endpoint->bEndpointAddress), gtco->buffer, REPORT_MAX_SIZE, gtco_urb_callback, gtco, endpoint->bInterval); gtco->urbinfo->transfer_dma = gtco->buf_dma; gtco->urbinfo->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* Save gtco pointer in USB interface gtco */ usb_set_intfdata(usbinterface, gtco); /* All done, now register the input device */ error = input_register_device(input_dev); if (error) goto err_free_urb; return 0; err_free_urb: usb_free_urb(gtco->urbinfo); err_free_buf: usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE, gtco->buffer, gtco->buf_dma); err_free_devs: input_free_device(input_dev); kfree(gtco); return error; } /* * This function is a standard USB function called when the USB device * is disconnected. We will get rid of the URV, de-register the input * device, and free up allocated memory */ static void gtco_disconnect(struct usb_interface *interface) { /* Grab private device ptr */ struct gtco *gtco = usb_get_intfdata(interface); /* Now reverse all the registration stuff */ if (gtco) { input_unregister_device(gtco->inputdevice); usb_kill_urb(gtco->urbinfo); usb_free_urb(gtco->urbinfo); usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE, gtco->buffer, gtco->buf_dma); kfree(gtco); } dev_info(&interface->dev, "gtco driver disconnected\n"); } /* STANDARD MODULE LOAD ROUTINES */ static struct usb_driver gtco_driverinfo_table = { .name = "gtco", .id_table = gtco_usbid_table, .probe = gtco_probe, .disconnect = gtco_disconnect, }; module_usb_driver(gtco_driverinfo_table); MODULE_DESCRIPTION("GTCO digitizer USB driver"); MODULE_LICENSE("GPL");
gpl-2.0
qyx210an/kernel
arch/arm/mach-kirkwood/mpp.c
2664
1087
/* * arch/arm/mach-kirkwood/mpp.c * * MPP functions for Marvell Kirkwood SoCs * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mbus.h> #include <linux/io.h> #include <asm/gpio.h> #include <mach/hardware.h> #include <plat/mpp.h> #include "common.h" #include "mpp.h" static unsigned int __init kirkwood_variant(void) { u32 dev, rev; kirkwood_pcie_id(&dev, &rev); if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) || (dev == MV88F6282_DEV_ID)) return MPP_F6281_MASK; if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0) return MPP_F6192_MASK; if (dev == MV88F6180_DEV_ID) return MPP_F6180_MASK; printk(KERN_ERR "MPP setup: unknown kirkwood variant " "(dev %#x rev %#x)\n", dev, rev); return 0; } void __init kirkwood_mpp_conf(unsigned int *mpp_list) { orion_mpp_conf(mpp_list, kirkwood_variant(), MPP_MAX, DEV_BUS_VIRT_BASE); }
gpl-2.0
1N4148/kernel_smdk4412
drivers/net/wireless/atmel_cs.c
2920
10271
/*** -*- linux-c -*- ********************************************************** Driver for Atmel at76c502 at76c504 and at76c506 wireless cards. Copyright 2000-2001 ATMEL Corporation. Copyright 2003 Simon Kelley. This code was developed from version 2.1.1 of the Atmel drivers, released by Atmel corp. under the GPL in December 2002. It also includes code from the Linux aironet drivers (C) Benjamin Reed, and the Linux PCMCIA package, (C) David Hinds. For all queries about this code, please contact the current author, Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Atmel wireless lan drivers; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ******************************************************************************/ #ifdef __IN_PCMCIA_PACKAGE__ #include <pcmcia/k_compat.h> #endif #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/netdevice.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include <pcmcia/ciscode.h> #include <asm/io.h> #include <asm/system.h> #include <linux/wireless.h> #include "atmel.h" /*====================================================================*/ MODULE_AUTHOR("Simon Kelley"); MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards"); /*====================================================================*/ static int atmel_config(struct pcmcia_device *link); static void atmel_release(struct pcmcia_device *link); static void atmel_detach(struct pcmcia_device *p_dev); typedef struct local_info_t { struct net_device *eth_dev; } local_info_t; static int atmel_probe(struct pcmcia_device *p_dev) { local_info_t *local; dev_dbg(&p_dev->dev, "atmel_attach()\n"); /* Allocate space for private device-specific data */ local = kzalloc(sizeof(local_info_t), GFP_KERNEL); if (!local) { printk(KERN_ERR "atmel_cs: no memory for new device\n"); return -ENOMEM; } p_dev->priv = local; return atmel_config(p_dev); } /* atmel_attach */ static void atmel_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "atmel_detach\n"); atmel_release(link); kfree(link->priv); } /* Call-back function to interrogate PCMCIA-specific information about the current existence of the card */ static int card_present(void *arg) { struct pcmcia_device *link = (struct pcmcia_device *)arg; if (pcmcia_dev_present(link)) return 1; return 0; } static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data) { if (p_dev->config_index == 0) return -EINVAL; return pcmcia_request_io(p_dev); } static int atmel_config(struct pcmcia_device *link) { local_info_t *dev; int ret; const struct pcmcia_device_id *did; dev = link->priv; did = dev_get_drvdata(&link->dev); dev_dbg(&link->dev, "atmel_config\n"); link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_SET_IO; if (pcmcia_loop_config(link, atmel_config_check, NULL)) goto failed; if (!link->irq) { dev_err(&link->dev, "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config."); goto failed; } ret = pcmcia_enable_device(link); if (ret) goto failed; ((local_info_t*)link->priv)->eth_dev = init_atmel_card(link->irq, link->resource[0]->start, did ? did->driver_info : ATMEL_FW_TYPE_NONE, &link->dev, card_present, link); if (!((local_info_t*)link->priv)->eth_dev) goto failed; return 0; failed: atmel_release(link); return -ENODEV; } static void atmel_release(struct pcmcia_device *link) { struct net_device *dev = ((local_info_t*)link->priv)->eth_dev; dev_dbg(&link->dev, "atmel_release\n"); if (dev) stop_atmel_card(dev); ((local_info_t*)link->priv)->eth_dev = NULL; pcmcia_disable_device(link); } static int atmel_suspend(struct pcmcia_device *link) { local_info_t *local = link->priv; netif_device_detach(local->eth_dev); return 0; } static int atmel_resume(struct pcmcia_device *link) { local_info_t *local = link->priv; atmel_open(local->eth_dev); netif_device_attach(local->eth_dev); return 0; } /*====================================================================*/ /* We use the driver_info field to store the correct firmware type for a card. */ #define PCMCIA_DEVICE_MANF_CARD_INFO(manf, card, info) { \ .match_flags = PCMCIA_DEV_ID_MATCH_MANF_ID| \ PCMCIA_DEV_ID_MATCH_CARD_ID, \ .manf_id = (manf), \ .card_id = (card), \ .driver_info = (kernel_ulong_t)(info), } #define PCMCIA_DEVICE_PROD_ID12_INFO(v1, v2, vh1, vh2, info) { \ .match_flags = PCMCIA_DEV_ID_MATCH_PROD_ID1| \ PCMCIA_DEV_ID_MATCH_PROD_ID2, \ .prod_id = { (v1), (v2), NULL, NULL }, \ .prod_id_hash = { (vh1), (vh2), 0, 0 }, \ .driver_info = (kernel_ulong_t)(info), } static const struct pcmcia_device_id atmel_ids[] = { PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0620, ATMEL_FW_TYPE_502_3COM), PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0696, ATMEL_FW_TYPE_502_3COM), PCMCIA_DEVICE_MANF_CARD_INFO(0x01bf, 0x3302, ATMEL_FW_TYPE_502E), PCMCIA_DEVICE_MANF_CARD_INFO(0xd601, 0x0007, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("11WAVE", "11WP611AL-E", 0x9eb2da1f, 0xc9a0d3f9, ATMEL_FW_TYPE_502E), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR", 0xabda4164, 0x41b37e1f, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_D", 0xabda4164, 0x3675d704, ATMEL_FW_TYPE_502D), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_E", 0xabda4164, 0x4172e792, ATMEL_FW_TYPE_502E), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504_R", 0xabda4164, 0x917f3d72, ATMEL_FW_TYPE_504_2958), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504", 0xabda4164, 0x5040670a, ATMEL_FW_TYPE_504), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504A", 0xabda4164, 0xe15ed87f, ATMEL_FW_TYPE_504A_2958), PCMCIA_DEVICE_PROD_ID12_INFO("BT", "Voyager 1020 Laptop Adapter", 0xae49b86a, 0x1e957cd5, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("CNet", "CNWLC 11Mbps Wireless PC Card V-5", 0xbc477dde, 0x502fae6b, ATMEL_FW_TYPE_502E), PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN PC Card", 0x5b878724, 0x122f1df6, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN Card S", 0x5b878724, 0x5fba533a, ATMEL_FW_TYPE_504_2958), PCMCIA_DEVICE_PROD_ID12_INFO("OEM", "11Mbps Wireless LAN PC Card V-3", 0xfea54c90, 0x1c5b0f68, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W", 0xc4f8b18b, 0x30f38774, ATMEL_FW_TYPE_502D), PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W-V2", 0xc4f8b18b, 0x172d1377, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("Wireless", "PC_CARD", 0xa407ecdd, 0x119f6314, ATMEL_FW_TYPE_502D), PCMCIA_DEVICE_PROD_ID12_INFO("WLAN", "802.11b PC CARD", 0x575c516c, 0xb1f6dbc4, ATMEL_FW_TYPE_502D), PCMCIA_DEVICE_PROD_ID12_INFO("LG", "LW2100N", 0xb474d43a, 0x6b1fec94, ATMEL_FW_TYPE_502E), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, atmel_ids); static struct pcmcia_driver atmel_driver = { .owner = THIS_MODULE, .name = "atmel_cs", .probe = atmel_probe, .remove = atmel_detach, .id_table = atmel_ids, .suspend = atmel_suspend, .resume = atmel_resume, }; static int __init atmel_cs_init(void) { return pcmcia_register_driver(&atmel_driver); } static void __exit atmel_cs_cleanup(void) { pcmcia_unregister_driver(&atmel_driver); } /* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. In addition: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ module_init(atmel_cs_init); module_exit(atmel_cs_cleanup);
gpl-2.0
johnnyslt/kernel_zte_v967s
arch/sh/boards/board-sh7757lcr.c
4456
15645
/* * Renesas R0P7757LC0012RL Support. * * Copyright (C) 2009 - 2010 Renesas Solutions Corp. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/irq.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/io.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/sh_eth.h> #include <linux/usb/renesas_usbhs.h> #include <cpu/sh7757.h> #include <asm/heartbeat.h> static struct resource heartbeat_resource = { .start = 0xffec005c, /* PUDR */ .end = 0xffec005c, .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, }; static unsigned char heartbeat_bit_pos[] = { 0, 1, 2, 3 }; static struct heartbeat_data heartbeat_data = { .bit_pos = heartbeat_bit_pos, .nr_bits = ARRAY_SIZE(heartbeat_bit_pos), .flags = HEARTBEAT_INVERTED, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .dev = { .platform_data = &heartbeat_data, }, .num_resources = 1, .resource = &heartbeat_resource, }; /* Fast Ethernet */ #define GBECONT 0xffc10100 #define GBECONT_RMII1 BIT(17) #define GBECONT_RMII0 BIT(16) static void sh7757_eth_set_mdio_gate(void *addr) { if (((unsigned long)addr & 0x00000fff) < 0x0800) writel(readl(GBECONT) | GBECONT_RMII0, GBECONT); else writel(readl(GBECONT) | GBECONT_RMII1, GBECONT); } static struct resource sh_eth0_resources[] = { { .start = 0xfef00000, .end = 0xfef001ff, .flags = IORESOURCE_MEM, }, { .start = 84, .end = 84, .flags = IORESOURCE_IRQ, }, }; static struct sh_eth_plat_data sh7757_eth0_pdata = { .phy = 1, .edmac_endian = EDMAC_LITTLE_ENDIAN, .register_type = SH_ETH_REG_FAST_SH4, .set_mdio_gate = sh7757_eth_set_mdio_gate, }; static struct platform_device sh7757_eth0_device = { .name = "sh-eth", .resource = sh_eth0_resources, .id = 0, .num_resources = ARRAY_SIZE(sh_eth0_resources), .dev = { .platform_data = &sh7757_eth0_pdata, }, }; static struct resource sh_eth1_resources[] = { { .start = 0xfef00800, .end = 0xfef009ff, .flags = IORESOURCE_MEM, }, { .start = 84, .end = 84, .flags = IORESOURCE_IRQ, }, }; static struct sh_eth_plat_data sh7757_eth1_pdata = { .phy = 1, .edmac_endian = EDMAC_LITTLE_ENDIAN, .register_type = SH_ETH_REG_FAST_SH4, .set_mdio_gate = sh7757_eth_set_mdio_gate, }; static struct platform_device sh7757_eth1_device = { .name = "sh-eth", .resource = sh_eth1_resources, .id = 1, .num_resources = ARRAY_SIZE(sh_eth1_resources), .dev = { .platform_data = &sh7757_eth1_pdata, }, }; static void sh7757_eth_giga_set_mdio_gate(void *addr) { if (((unsigned long)addr & 0x00000fff) < 0x0800) { gpio_set_value(GPIO_PTT4, 1); writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT); } else { gpio_set_value(GPIO_PTT4, 0); writel(readl(GBECONT) & ~GBECONT_RMII1, GBECONT); } } static struct resource sh_eth_giga0_resources[] = { { .start = 0xfee00000, .end = 0xfee007ff, .flags = IORESOURCE_MEM, }, { /* TSU */ .start = 0xfee01800, .end = 0xfee01fff, .flags = IORESOURCE_MEM, }, { .start = 315, .end = 315, .flags = IORESOURCE_IRQ, }, }; static struct sh_eth_plat_data sh7757_eth_giga0_pdata = { .phy = 18, .edmac_endian = EDMAC_LITTLE_ENDIAN, .register_type = SH_ETH_REG_GIGABIT, .set_mdio_gate = sh7757_eth_giga_set_mdio_gate, .phy_interface = PHY_INTERFACE_MODE_RGMII_ID, }; static struct platform_device sh7757_eth_giga0_device = { .name = "sh-eth", .resource = sh_eth_giga0_resources, .id = 2, .num_resources = ARRAY_SIZE(sh_eth_giga0_resources), .dev = { .platform_data = &sh7757_eth_giga0_pdata, }, }; static struct resource sh_eth_giga1_resources[] = { { .start = 0xfee00800, .end = 0xfee00fff, .flags = IORESOURCE_MEM, }, { /* TSU */ .start = 0xfee01800, .end = 0xfee01fff, .flags = IORESOURCE_MEM, }, { .start = 316, .end = 316, .flags = IORESOURCE_IRQ, }, }; static struct sh_eth_plat_data sh7757_eth_giga1_pdata = { .phy = 19, .edmac_endian = EDMAC_LITTLE_ENDIAN, .register_type = SH_ETH_REG_GIGABIT, .set_mdio_gate = sh7757_eth_giga_set_mdio_gate, .phy_interface = PHY_INTERFACE_MODE_RGMII_ID, }; static struct platform_device sh7757_eth_giga1_device = { .name = "sh-eth", .resource = sh_eth_giga1_resources, .id = 3, .num_resources = ARRAY_SIZE(sh_eth_giga1_resources), .dev = { .platform_data = &sh7757_eth_giga1_pdata, }, }; /* SH_MMCIF */ static struct resource sh_mmcif_resources[] = { [0] = { .start = 0xffcb0000, .end = 0xffcb00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = 211, .flags = IORESOURCE_IRQ, }, [2] = { .start = 212, .flags = IORESOURCE_IRQ, }, }; static struct sh_mmcif_plat_data sh_mmcif_plat = { .sup_pclk = 0x0f, .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, .ocr = MMC_VDD_32_33 | MMC_VDD_33_34, .slave_id_tx = SHDMA_SLAVE_MMCIF_TX, .slave_id_rx = SHDMA_SLAVE_MMCIF_RX, }; static struct platform_device sh_mmcif_device = { .name = "sh_mmcif", .id = 0, .dev = { .platform_data = &sh_mmcif_plat, }, .num_resources = ARRAY_SIZE(sh_mmcif_resources), .resource = sh_mmcif_resources, }; /* SDHI0 */ static struct sh_mobile_sdhi_info sdhi_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI_RX, .tmio_caps = MMC_CAP_SD_HIGHSPEED, }; static struct resource sdhi_resources[] = { [0] = { .start = 0xffe50000, .end = 0xffe501ff, .flags = IORESOURCE_MEM, }, [1] = { .start = 20, .flags = IORESOURCE_IRQ, }, }; static struct platform_device sdhi_device = { .name = "sh_mobile_sdhi", .num_resources = ARRAY_SIZE(sdhi_resources), .resource = sdhi_resources, .id = 0, .dev = { .platform_data = &sdhi_info, }, }; static int usbhs0_get_id(struct platform_device *pdev) { return USBHS_GADGET; } static struct renesas_usbhs_platform_info usb0_data = { .platform_callback = { .get_id = usbhs0_get_id, }, .driver_param = { .buswait_bwait = 5, } }; static struct resource usb0_resources[] = { [0] = { .start = 0xfe450000, .end = 0xfe4501ff, .flags = IORESOURCE_MEM, }, [1] = { .start = 50, .end = 50, .flags = IORESOURCE_IRQ, }, }; static struct platform_device usb0_device = { .name = "renesas_usbhs", .id = 0, .dev = { .platform_data = &usb0_data, }, .num_resources = ARRAY_SIZE(usb0_resources), .resource = usb0_resources, }; static struct platform_device *sh7757lcr_devices[] __initdata = { &heartbeat_device, &sh7757_eth0_device, &sh7757_eth1_device, &sh7757_eth_giga0_device, &sh7757_eth_giga1_device, &sh_mmcif_device, &sdhi_device, &usb0_device, }; static struct flash_platform_data spi_flash_data = { .name = "m25p80", .type = "m25px64", }; static struct spi_board_info spi_board_info[] = { { .modalias = "m25p80", .max_speed_hz = 25000000, .bus_num = 0, .chip_select = 1, .platform_data = &spi_flash_data, }, }; static int __init sh7757lcr_devices_setup(void) { /* RGMII (PTA) */ gpio_request(GPIO_FN_ET0_MDC, NULL); gpio_request(GPIO_FN_ET0_MDIO, NULL); gpio_request(GPIO_FN_ET1_MDC, NULL); gpio_request(GPIO_FN_ET1_MDIO, NULL); /* ONFI (PTB, PTZ) */ gpio_request(GPIO_FN_ON_NRE, NULL); gpio_request(GPIO_FN_ON_NWE, NULL); gpio_request(GPIO_FN_ON_NWP, NULL); gpio_request(GPIO_FN_ON_NCE0, NULL); gpio_request(GPIO_FN_ON_R_B0, NULL); gpio_request(GPIO_FN_ON_ALE, NULL); gpio_request(GPIO_FN_ON_CLE, NULL); gpio_request(GPIO_FN_ON_DQ7, NULL); gpio_request(GPIO_FN_ON_DQ6, NULL); gpio_request(GPIO_FN_ON_DQ5, NULL); gpio_request(GPIO_FN_ON_DQ4, NULL); gpio_request(GPIO_FN_ON_DQ3, NULL); gpio_request(GPIO_FN_ON_DQ2, NULL); gpio_request(GPIO_FN_ON_DQ1, NULL); gpio_request(GPIO_FN_ON_DQ0, NULL); /* IRQ8 to 0 (PTB, PTC) */ gpio_request(GPIO_FN_IRQ8, NULL); gpio_request(GPIO_FN_IRQ7, NULL); gpio_request(GPIO_FN_IRQ6, NULL); gpio_request(GPIO_FN_IRQ5, NULL); gpio_request(GPIO_FN_IRQ4, NULL); gpio_request(GPIO_FN_IRQ3, NULL); gpio_request(GPIO_FN_IRQ2, NULL); gpio_request(GPIO_FN_IRQ1, NULL); gpio_request(GPIO_FN_IRQ0, NULL); /* SPI0 (PTD) */ gpio_request(GPIO_FN_SP0_MOSI, NULL); gpio_request(GPIO_FN_SP0_MISO, NULL); gpio_request(GPIO_FN_SP0_SCK, NULL); gpio_request(GPIO_FN_SP0_SCK_FB, NULL); gpio_request(GPIO_FN_SP0_SS0, NULL); gpio_request(GPIO_FN_SP0_SS1, NULL); gpio_request(GPIO_FN_SP0_SS2, NULL); gpio_request(GPIO_FN_SP0_SS3, NULL); /* RMII 0/1 (PTE, PTF) */ gpio_request(GPIO_FN_RMII0_CRS_DV, NULL); gpio_request(GPIO_FN_RMII0_TXD1, NULL); gpio_request(GPIO_FN_RMII0_TXD0, NULL); gpio_request(GPIO_FN_RMII0_TXEN, NULL); gpio_request(GPIO_FN_RMII0_REFCLK, NULL); gpio_request(GPIO_FN_RMII0_RXD1, NULL); gpio_request(GPIO_FN_RMII0_RXD0, NULL); gpio_request(GPIO_FN_RMII0_RX_ER, NULL); gpio_request(GPIO_FN_RMII1_CRS_DV, NULL); gpio_request(GPIO_FN_RMII1_TXD1, NULL); gpio_request(GPIO_FN_RMII1_TXD0, NULL); gpio_request(GPIO_FN_RMII1_TXEN, NULL); gpio_request(GPIO_FN_RMII1_REFCLK, NULL); gpio_request(GPIO_FN_RMII1_RXD1, NULL); gpio_request(GPIO_FN_RMII1_RXD0, NULL); gpio_request(GPIO_FN_RMII1_RX_ER, NULL); /* eMMC (PTG) */ gpio_request(GPIO_FN_MMCCLK, NULL); gpio_request(GPIO_FN_MMCCMD, NULL); gpio_request(GPIO_FN_MMCDAT7, NULL); gpio_request(GPIO_FN_MMCDAT6, NULL); gpio_request(GPIO_FN_MMCDAT5, NULL); gpio_request(GPIO_FN_MMCDAT4, NULL); gpio_request(GPIO_FN_MMCDAT3, NULL); gpio_request(GPIO_FN_MMCDAT2, NULL); gpio_request(GPIO_FN_MMCDAT1, NULL); gpio_request(GPIO_FN_MMCDAT0, NULL); /* LPC (PTG, PTH, PTQ, PTU) */ gpio_request(GPIO_FN_SERIRQ, NULL); gpio_request(GPIO_FN_LPCPD, NULL); gpio_request(GPIO_FN_LDRQ, NULL); gpio_request(GPIO_FN_WP, NULL); gpio_request(GPIO_FN_FMS0, NULL); gpio_request(GPIO_FN_LAD3, NULL); gpio_request(GPIO_FN_LAD2, NULL); gpio_request(GPIO_FN_LAD1, NULL); gpio_request(GPIO_FN_LAD0, NULL); gpio_request(GPIO_FN_LFRAME, NULL); gpio_request(GPIO_FN_LRESET, NULL); gpio_request(GPIO_FN_LCLK, NULL); gpio_request(GPIO_FN_LGPIO7, NULL); gpio_request(GPIO_FN_LGPIO6, NULL); gpio_request(GPIO_FN_LGPIO5, NULL); gpio_request(GPIO_FN_LGPIO4, NULL); /* SPI1 (PTH) */ gpio_request(GPIO_FN_SP1_MOSI, NULL); gpio_request(GPIO_FN_SP1_MISO, NULL); gpio_request(GPIO_FN_SP1_SCK, NULL); gpio_request(GPIO_FN_SP1_SCK_FB, NULL); gpio_request(GPIO_FN_SP1_SS0, NULL); gpio_request(GPIO_FN_SP1_SS1, NULL); /* SDHI (PTI) */ gpio_request(GPIO_FN_SD_WP, NULL); gpio_request(GPIO_FN_SD_CD, NULL); gpio_request(GPIO_FN_SD_CLK, NULL); gpio_request(GPIO_FN_SD_CMD, NULL); gpio_request(GPIO_FN_SD_D3, NULL); gpio_request(GPIO_FN_SD_D2, NULL); gpio_request(GPIO_FN_SD_D1, NULL); gpio_request(GPIO_FN_SD_D0, NULL); /* SCIF3/4 (PTJ, PTW) */ gpio_request(GPIO_FN_RTS3, NULL); gpio_request(GPIO_FN_CTS3, NULL); gpio_request(GPIO_FN_TXD3, NULL); gpio_request(GPIO_FN_RXD3, NULL); gpio_request(GPIO_FN_RTS4, NULL); gpio_request(GPIO_FN_RXD4, NULL); gpio_request(GPIO_FN_TXD4, NULL); gpio_request(GPIO_FN_CTS4, NULL); /* SERMUX (PTK, PTL, PTO, PTV) */ gpio_request(GPIO_FN_COM2_TXD, NULL); gpio_request(GPIO_FN_COM2_RXD, NULL); gpio_request(GPIO_FN_COM2_RTS, NULL); gpio_request(GPIO_FN_COM2_CTS, NULL); gpio_request(GPIO_FN_COM2_DTR, NULL); gpio_request(GPIO_FN_COM2_DSR, NULL); gpio_request(GPIO_FN_COM2_DCD, NULL); gpio_request(GPIO_FN_COM2_RI, NULL); gpio_request(GPIO_FN_RAC_RXD, NULL); gpio_request(GPIO_FN_RAC_RTS, NULL); gpio_request(GPIO_FN_RAC_CTS, NULL); gpio_request(GPIO_FN_RAC_DTR, NULL); gpio_request(GPIO_FN_RAC_DSR, NULL); gpio_request(GPIO_FN_RAC_DCD, NULL); gpio_request(GPIO_FN_RAC_TXD, NULL); gpio_request(GPIO_FN_COM1_TXD, NULL); gpio_request(GPIO_FN_COM1_RXD, NULL); gpio_request(GPIO_FN_COM1_RTS, NULL); gpio_request(GPIO_FN_COM1_CTS, NULL); writeb(0x10, 0xfe470000); /* SMR0: SerMux mode 0 */ /* IIC (PTM, PTR, PTS) */ gpio_request(GPIO_FN_SDA7, NULL); gpio_request(GPIO_FN_SCL7, NULL); gpio_request(GPIO_FN_SDA6, NULL); gpio_request(GPIO_FN_SCL6, NULL); gpio_request(GPIO_FN_SDA5, NULL); gpio_request(GPIO_FN_SCL5, NULL); gpio_request(GPIO_FN_SDA4, NULL); gpio_request(GPIO_FN_SCL4, NULL); gpio_request(GPIO_FN_SDA3, NULL); gpio_request(GPIO_FN_SCL3, NULL); gpio_request(GPIO_FN_SDA2, NULL); gpio_request(GPIO_FN_SCL2, NULL); gpio_request(GPIO_FN_SDA1, NULL); gpio_request(GPIO_FN_SCL1, NULL); gpio_request(GPIO_FN_SDA0, NULL); gpio_request(GPIO_FN_SCL0, NULL); /* USB (PTN) */ gpio_request(GPIO_FN_VBUS_EN, NULL); gpio_request(GPIO_FN_VBUS_OC, NULL); /* SGPIO1/0 (PTN, PTO) */ gpio_request(GPIO_FN_SGPIO1_CLK, NULL); gpio_request(GPIO_FN_SGPIO1_LOAD, NULL); gpio_request(GPIO_FN_SGPIO1_DI, NULL); gpio_request(GPIO_FN_SGPIO1_DO, NULL); gpio_request(GPIO_FN_SGPIO0_CLK, NULL); gpio_request(GPIO_FN_SGPIO0_LOAD, NULL); gpio_request(GPIO_FN_SGPIO0_DI, NULL); gpio_request(GPIO_FN_SGPIO0_DO, NULL); /* WDT (PTN) */ gpio_request(GPIO_FN_SUB_CLKIN, NULL); /* System (PTT) */ gpio_request(GPIO_FN_STATUS1, NULL); gpio_request(GPIO_FN_STATUS0, NULL); /* PWMX (PTT) */ gpio_request(GPIO_FN_PWMX1, NULL); gpio_request(GPIO_FN_PWMX0, NULL); /* R-SPI (PTV) */ gpio_request(GPIO_FN_R_SPI_MOSI, NULL); gpio_request(GPIO_FN_R_SPI_MISO, NULL); gpio_request(GPIO_FN_R_SPI_RSPCK, NULL); gpio_request(GPIO_FN_R_SPI_SSL0, NULL); gpio_request(GPIO_FN_R_SPI_SSL1, NULL); /* EVC (PTV, PTW) */ gpio_request(GPIO_FN_EVENT7, NULL); gpio_request(GPIO_FN_EVENT6, NULL); gpio_request(GPIO_FN_EVENT5, NULL); gpio_request(GPIO_FN_EVENT4, NULL); gpio_request(GPIO_FN_EVENT3, NULL); gpio_request(GPIO_FN_EVENT2, NULL); gpio_request(GPIO_FN_EVENT1, NULL); gpio_request(GPIO_FN_EVENT0, NULL); /* LED for heartbeat */ gpio_request(GPIO_PTU3, NULL); gpio_direction_output(GPIO_PTU3, 1); gpio_request(GPIO_PTU2, NULL); gpio_direction_output(GPIO_PTU2, 1); gpio_request(GPIO_PTU1, NULL); gpio_direction_output(GPIO_PTU1, 1); gpio_request(GPIO_PTU0, NULL); gpio_direction_output(GPIO_PTU0, 1); /* control for MDIO of Gigabit Ethernet */ gpio_request(GPIO_PTT4, NULL); gpio_direction_output(GPIO_PTT4, 1); /* control for eMMC */ gpio_request(GPIO_PTT7, NULL); /* eMMC_RST# */ gpio_direction_output(GPIO_PTT7, 0); gpio_request(GPIO_PTT6, NULL); /* eMMC_INDEX# */ gpio_direction_output(GPIO_PTT6, 0); gpio_request(GPIO_PTT5, NULL); /* eMMC_PRST# */ gpio_direction_output(GPIO_PTT5, 1); /* register SPI device information */ spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info)); /* General platform */ return platform_add_devices(sh7757lcr_devices, ARRAY_SIZE(sh7757lcr_devices)); } arch_initcall(sh7757lcr_devices_setup); /* Initialize IRQ setting */ void __init init_sh7757lcr_IRQ(void) { plat_irq_setup_pins(IRQ_MODE_IRQ7654); plat_irq_setup_pins(IRQ_MODE_IRQ3210); } /* Initialize the board */ static void __init sh7757lcr_setup(char **cmdline_p) { printk(KERN_INFO "Renesas R0P7757LC0012RL support.\n"); } static int sh7757lcr_mode_pins(void) { int value = 0; /* These are the factory default settings of S3 (Low active). * If you change these dip switches then you will need to * adjust the values below as well. */ value |= MODE_PIN0; /* Clock Mode: 1 */ return value; } /* The Machine Vector */ static struct sh_machine_vector mv_sh7757lcr __initmv = { .mv_name = "SH7757LCR", .mv_setup = sh7757lcr_setup, .mv_init_irq = init_sh7757lcr_IRQ, .mv_mode_pins = sh7757lcr_mode_pins, };
gpl-2.0
zaidshb/semc-kernel-qsd8k
lib/iomap_copy.c
5224
2198
/* * Copyright 2006 PathScale, Inc. All Rights Reserved. * * This file is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/io.h> /** * __iowrite32_copy - copy data to MMIO space, in 32-bit units * @to: destination, in MMIO space (must be 32-bit aligned) * @from: source (must be 32-bit aligned) * @count: number of 32-bit quantities to copy * * Copy data from kernel space to MMIO space, in units of 32 bits at a * time. Order of access is not guaranteed, nor is a memory barrier * performed afterwards. */ void __attribute__((weak)) __iowrite32_copy(void __iomem *to, const void *from, size_t count) { u32 __iomem *dst = to; const u32 *src = from; const u32 *end = src + count; while (src < end) __raw_writel(*src++, dst++); } EXPORT_SYMBOL_GPL(__iowrite32_copy); /** * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units * @to: destination, in MMIO space (must be 64-bit aligned) * @from: source (must be 64-bit aligned) * @count: number of 64-bit quantities to copy * * Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a * time. Order of access is not guaranteed, nor is a memory barrier * performed afterwards. */ void __attribute__((weak)) __iowrite64_copy(void __iomem *to, const void *from, size_t count) { #ifdef CONFIG_64BIT u64 __iomem *dst = to; const u64 *src = from; const u64 *end = src + count; while (src < end) __raw_writeq(*src++, dst++); #else __iowrite32_copy(to, from, count * 2); #endif } EXPORT_SYMBOL_GPL(__iowrite64_copy);
gpl-2.0
jeehyn/NewWorld_kernel_ef52
drivers/hwspinlock/u8500_hsem.c
5224
5046
/* * u8500 HWSEM driver * * Copyright (C) 2010-2011 ST-Ericsson * * Implements u8500 semaphore handling for protocol 1, no interrupts. * * Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Heavily borrowed from the work of : * Simon Que <sque@ti.com> * Hari Kanigeri <h-kanigeri2@ti.com> * Ohad Ben-Cohen <ohad@wizery.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/hwspinlock.h> #include <linux/platform_device.h> #include "hwspinlock_internal.h" /* * Implementation of STE's HSem protocol 1 without interrutps. * The only masterID we allow is '0x01' to force people to use * HSems for synchronisation between processors rather than processes * on the ARM core. */ #define U8500_MAX_SEMAPHORE 32 /* a total of 32 semaphore */ #define RESET_SEMAPHORE (0) /* free */ /* * CPU ID for master running u8500 kernel. * Hswpinlocks should only be used to synchonise operations * between the Cortex A9 core and the other CPUs. Hence * forcing the masterID to a preset value. */ #define HSEM_MASTER_ID 0x01 #define HSEM_REGISTER_OFFSET 0x08 #define HSEM_CTRL_REG 0x00 #define HSEM_ICRALL 0x90 #define HSEM_PROTOCOL_1 0x01 static int u8500_hsem_trylock(struct hwspinlock *lock) { void __iomem *lock_addr = lock->priv; writel(HSEM_MASTER_ID, lock_addr); /* get only first 4 bit and compare to masterID. * if equal, we have the semaphore, otherwise * someone else has it. */ return (HSEM_MASTER_ID == (0x0F & readl(lock_addr))); } static void u8500_hsem_unlock(struct hwspinlock *lock) { void __iomem *lock_addr = lock->priv; /* release the lock by writing 0 to it */ writel(RESET_SEMAPHORE, lock_addr); } /* * u8500: what value is recommended here ? */ static void u8500_hsem_relax(struct hwspinlock *lock) { ndelay(50); } static const struct hwspinlock_ops u8500_hwspinlock_ops = { .trylock = u8500_hsem_trylock, .unlock = u8500_hsem_unlock, .relax = u8500_hsem_relax, }; static int __devinit u8500_hsem_probe(struct platform_device *pdev) { struct hwspinlock_pdata *pdata = pdev->dev.platform_data; struct hwspinlock_device *bank; struct hwspinlock *hwlock; struct resource *res; void __iomem *io_base; int i, ret, num_locks = U8500_MAX_SEMAPHORE; ulong val; if (!pdata) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; io_base = ioremap(res->start, resource_size(res)); if (!io_base) return -ENOMEM; /* make sure protocol 1 is selected */ val = readl(io_base + HSEM_CTRL_REG); writel((val & ~HSEM_PROTOCOL_1), io_base + HSEM_CTRL_REG); /* clear all interrupts */ writel(0xFFFF, io_base + HSEM_ICRALL); bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL); if (!bank) { ret = -ENOMEM; goto iounmap_base; } platform_set_drvdata(pdev, bank); for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++) hwlock->priv = io_base + HSEM_REGISTER_OFFSET + sizeof(u32) * i; /* no pm needed for HSem but required to comply with hwspilock core */ pm_runtime_enable(&pdev->dev); ret = hwspin_lock_register(bank, &pdev->dev, &u8500_hwspinlock_ops, pdata->base_id, num_locks); if (ret) goto reg_fail; return 0; reg_fail: pm_runtime_disable(&pdev->dev); kfree(bank); iounmap_base: iounmap(io_base); return ret; } static int __devexit u8500_hsem_remove(struct platform_device *pdev) { struct hwspinlock_device *bank = platform_get_drvdata(pdev); void __iomem *io_base = bank->lock[0].priv - HSEM_REGISTER_OFFSET; int ret; /* clear all interrupts */ writel(0xFFFF, io_base + HSEM_ICRALL); ret = hwspin_lock_unregister(bank); if (ret) { dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret); return ret; } pm_runtime_disable(&pdev->dev); iounmap(io_base); kfree(bank); return 0; } static struct platform_driver u8500_hsem_driver = { .probe = u8500_hsem_probe, .remove = __devexit_p(u8500_hsem_remove), .driver = { .name = "u8500_hsem", .owner = THIS_MODULE, }, }; static int __init u8500_hsem_init(void) { return platform_driver_register(&u8500_hsem_driver); } /* board init code might need to reserve hwspinlocks for predefined purposes */ postcore_initcall(u8500_hsem_init); static void __exit u8500_hsem_exit(void) { platform_driver_unregister(&u8500_hsem_driver); } module_exit(u8500_hsem_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Hardware Spinlock driver for u8500"); MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
gpl-2.0
fear130986/GT-I9195_EUR_KK_Opensource_kernel
drivers/pci/pcie/aer/aerdrv_errprint.c
5736
7680
/* * drivers/pci/pcie/aer/aerdrv_errprint.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Format error messages and print them to console. * * Copyright (C) 2006 Intel Corp. * Tom Long Nguyen (tom.l.nguyen@intel.com) * Zhang Yanmin (yanmin.zhang@intel.com) * */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/pm.h> #include <linux/suspend.h> #include <linux/cper.h> #include "aerdrv.h" #define AER_AGENT_RECEIVER 0 #define AER_AGENT_REQUESTER 1 #define AER_AGENT_COMPLETER 2 #define AER_AGENT_TRANSMITTER 3 #define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \ 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP)) #define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \ 0 : PCI_ERR_UNC_COMP_ABORT) #define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \ (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0) #define AER_GET_AGENT(t, e) \ ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \ (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \ (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \ AER_AGENT_RECEIVER) #define AER_PHYSICAL_LAYER_ERROR 0 #define AER_DATA_LINK_LAYER_ERROR 1 #define AER_TRANSACTION_LAYER_ERROR 2 #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \ PCI_ERR_COR_RCVR : 0) #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \ (PCI_ERR_COR_BAD_TLP| \ PCI_ERR_COR_BAD_DLLP| \ PCI_ERR_COR_REP_ROLL| \ PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP) #define AER_GET_LAYER_ERROR(t, e) \ ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \ (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \ AER_TRANSACTION_LAYER_ERROR) /* * AER error strings */ static const char *aer_error_severity_string[] = { "Uncorrected (Non-Fatal)", "Uncorrected (Fatal)", "Corrected" }; static const char *aer_error_layer[] = { "Physical Layer", "Data Link Layer", "Transaction Layer" }; static const char *aer_correctable_error_string[] = { "Receiver Error", /* Bit Position 0 */ NULL, NULL, NULL, NULL, NULL, "Bad TLP", /* Bit Position 6 */ "Bad DLLP", /* Bit Position 7 */ "RELAY_NUM Rollover", /* Bit Position 8 */ NULL, NULL, NULL, "Replay Timer Timeout", /* Bit Position 12 */ "Advisory Non-Fatal", /* Bit Position 13 */ }; static const char *aer_uncorrectable_error_string[] = { NULL, NULL, NULL, NULL, "Data Link Protocol", /* Bit Position 4 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Poisoned TLP", /* Bit Position 12 */ "Flow Control Protocol", /* Bit Position 13 */ "Completion Timeout", /* Bit Position 14 */ "Completer Abort", /* Bit Position 15 */ "Unexpected Completion", /* Bit Position 16 */ "Receiver Overflow", /* Bit Position 17 */ "Malformed TLP", /* Bit Position 18 */ "ECRC", /* Bit Position 19 */ "Unsupported Request", /* Bit Position 20 */ }; static const char *aer_agent_string[] = { "Receiver ID", "Requester ID", "Completer ID", "Transmitter ID" }; static void __aer_print_error(const char *prefix, struct aer_err_info *info) { int i, status; const char *errmsg = NULL; status = (info->status & ~info->mask); for (i = 0; i < 32; i++) { if (!(status & (1 << i))) continue; if (info->severity == AER_CORRECTABLE) errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ? aer_correctable_error_string[i] : NULL; else errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ? aer_uncorrectable_error_string[i] : NULL; if (errmsg) printk("%s"" [%2d] %-22s%s\n", prefix, i, errmsg, info->first_error == i ? " (First)" : ""); else printk("%s"" [%2d] Unknown Error Bit%s\n", prefix, i, info->first_error == i ? " (First)" : ""); } } void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) { int id = ((dev->bus->number << 8) | dev->devfn); char prefix[44]; snprintf(prefix, sizeof(prefix), "%s%s %s: ", (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR, dev_driver_string(&dev->dev), dev_name(&dev->dev)); if (info->status == 0) { printk("%s""PCIe Bus Error: severity=%s, type=Unaccessible, " "id=%04x(Unregistered Agent ID)\n", prefix, aer_error_severity_string[info->severity], id); } else { int layer, agent; layer = AER_GET_LAYER_ERROR(info->severity, info->status); agent = AER_GET_AGENT(info->severity, info->status); printk("%s""PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", prefix, aer_error_severity_string[info->severity], aer_error_layer[layer], id, aer_agent_string[agent]); printk("%s"" device [%04x:%04x] error status/mask=%08x/%08x\n", prefix, dev->vendor, dev->device, info->status, info->mask); __aer_print_error(prefix, info); if (info->tlp_header_valid) { unsigned char *tlp = (unsigned char *) &info->tlp; printk("%s"" TLP Header:" " %02x%02x%02x%02x %02x%02x%02x%02x" " %02x%02x%02x%02x %02x%02x%02x%02x\n", prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), *(tlp + 11), *(tlp + 10), *(tlp + 9), *(tlp + 8), *(tlp + 15), *(tlp + 14), *(tlp + 13), *(tlp + 12)); } } if (info->id && info->error_dev_num > 1 && info->id == id) printk("%s"" Error of this Agent(%04x) is reported first\n", prefix, id); } void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) { dev_info(&dev->dev, "AER: %s%s error received: id=%04x\n", info->multi_error_valid ? "Multiple " : "", aer_error_severity_string[info->severity], info->id); } #ifdef CONFIG_ACPI_APEI_PCIEAER int cper_severity_to_aer(int cper_severity) { switch (cper_severity) { case CPER_SEV_RECOVERABLE: return AER_NONFATAL; case CPER_SEV_FATAL: return AER_FATAL; default: return AER_CORRECTABLE; } } EXPORT_SYMBOL_GPL(cper_severity_to_aer); void cper_print_aer(const char *prefix, int cper_severity, struct aer_capability_regs *aer) { int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0; u32 status, mask; const char **status_strs; aer_severity = cper_severity_to_aer(cper_severity); if (aer_severity == AER_CORRECTABLE) { status = aer->cor_status; mask = aer->cor_mask; status_strs = aer_correctable_error_string; status_strs_size = ARRAY_SIZE(aer_correctable_error_string); } else { status = aer->uncor_status; mask = aer->uncor_mask; status_strs = aer_uncorrectable_error_string; status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string); tlp_header_valid = status & AER_LOG_TLP_MASKS; } layer = AER_GET_LAYER_ERROR(aer_severity, status); agent = AER_GET_AGENT(aer_severity, status); printk("%s""aer_status: 0x%08x, aer_mask: 0x%08x\n", prefix, status, mask); cper_print_bits(prefix, status, status_strs, status_strs_size); printk("%s""aer_layer=%s, aer_agent=%s\n", prefix, aer_error_layer[layer], aer_agent_string[agent]); if (aer_severity != AER_CORRECTABLE) printk("%s""aer_uncor_severity: 0x%08x\n", prefix, aer->uncor_severity); if (tlp_header_valid) { const unsigned char *tlp; tlp = (const unsigned char *)&aer->header_log; printk("%s""aer_tlp_header:" " %02x%02x%02x%02x %02x%02x%02x%02x" " %02x%02x%02x%02x %02x%02x%02x%02x\n", prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), *(tlp + 11), *(tlp + 10), *(tlp + 9), *(tlp + 8), *(tlp + 15), *(tlp + 14), *(tlp + 13), *(tlp + 12)); } } #endif
gpl-2.0
BytecodeMe/BCM-mako
arch/mips/jz4740/time.c
7528
3824
/* * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 platform time support * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/clockchips.h> #include <asm/mach-jz4740/irq.h> #include <asm/time.h> #include "clock.h" #include "timer.h" #define TIMER_CLOCKEVENT 0 #define TIMER_CLOCKSOURCE 1 static uint16_t jz4740_jiffies_per_tick; static cycle_t jz4740_clocksource_read(struct clocksource *cs) { return jz4740_timer_get_count(TIMER_CLOCKSOURCE); } static struct clocksource jz4740_clocksource = { .name = "jz4740-timer", .rating = 200, .read = jz4740_clocksource_read, .mask = CLOCKSOURCE_MASK(16), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static irqreturn_t jz4740_clockevent_irq(int irq, void *devid) { struct clock_event_device *cd = devid; jz4740_timer_ack_full(TIMER_CLOCKEVENT); if (cd->mode != CLOCK_EVT_MODE_PERIODIC) jz4740_timer_disable(TIMER_CLOCKEVENT); cd->event_handler(cd); return IRQ_HANDLED; } static void jz4740_clockevent_set_mode(enum clock_event_mode mode, struct clock_event_device *cd) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: jz4740_timer_set_count(TIMER_CLOCKEVENT, 0); jz4740_timer_set_period(TIMER_CLOCKEVENT, jz4740_jiffies_per_tick); case CLOCK_EVT_MODE_RESUME: jz4740_timer_irq_full_enable(TIMER_CLOCKEVENT); jz4740_timer_enable(TIMER_CLOCKEVENT); break; case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_SHUTDOWN: jz4740_timer_disable(TIMER_CLOCKEVENT); break; default: break; } } static int jz4740_clockevent_set_next(unsigned long evt, struct clock_event_device *cd) { jz4740_timer_set_count(TIMER_CLOCKEVENT, 0); jz4740_timer_set_period(TIMER_CLOCKEVENT, evt); jz4740_timer_enable(TIMER_CLOCKEVENT); return 0; } static struct clock_event_device jz4740_clockevent = { .name = "jz4740-timer", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_next_event = jz4740_clockevent_set_next, .set_mode = jz4740_clockevent_set_mode, .rating = 200, .irq = JZ4740_IRQ_TCU0, }; static struct irqaction timer_irqaction = { .handler = jz4740_clockevent_irq, .flags = IRQF_PERCPU | IRQF_TIMER, .name = "jz4740-timerirq", .dev_id = &jz4740_clockevent, }; void __init plat_time_init(void) { int ret; uint32_t clk_rate; uint16_t ctrl; jz4740_timer_init(); clk_rate = jz4740_clock_bdata.ext_rate >> 4; jz4740_jiffies_per_tick = DIV_ROUND_CLOSEST(clk_rate, HZ); clockevent_set_clock(&jz4740_clockevent, clk_rate); jz4740_clockevent.min_delta_ns = clockevent_delta2ns(100, &jz4740_clockevent); jz4740_clockevent.max_delta_ns = clockevent_delta2ns(0xffff, &jz4740_clockevent); jz4740_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&jz4740_clockevent); ret = clocksource_register_hz(&jz4740_clocksource, clk_rate); if (ret) printk(KERN_ERR "Failed to register clocksource: %d\n", ret); setup_irq(JZ4740_IRQ_TCU0, &timer_irqaction); ctrl = JZ_TIMER_CTRL_PRESCALE_16 | JZ_TIMER_CTRL_SRC_EXT; jz4740_timer_set_ctrl(TIMER_CLOCKEVENT, ctrl); jz4740_timer_set_ctrl(TIMER_CLOCKSOURCE, ctrl); jz4740_timer_set_period(TIMER_CLOCKEVENT, jz4740_jiffies_per_tick); jz4740_timer_irq_full_enable(TIMER_CLOCKEVENT); jz4740_timer_set_period(TIMER_CLOCKSOURCE, 0xffff); jz4740_timer_enable(TIMER_CLOCKEVENT); jz4740_timer_enable(TIMER_CLOCKSOURCE); }
gpl-2.0
adbaby/android_kernel_msm8974
drivers/staging/bcm/Bcmnet.c
8040
6191
#include "headers.h" struct net_device *gblpnetdev; static INT bcm_open(struct net_device *dev) { PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev); if (Adapter->fw_download_done == FALSE) { pr_notice(PFX "%s: link up failed (download in progress)\n", dev->name); return -EBUSY; } if (netif_msg_ifup(Adapter)) pr_info(PFX "%s: enabling interface\n", dev->name); if (Adapter->LinkUpStatus) { if (netif_msg_link(Adapter)) pr_info(PFX "%s: link up\n", dev->name); netif_carrier_on(Adapter->dev); netif_start_queue(Adapter->dev); } return 0; } static INT bcm_close(struct net_device *dev) { PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev); if (netif_msg_ifdown(Adapter)) pr_info(PFX "%s: disabling interface\n", dev->name); netif_carrier_off(dev); netif_stop_queue(dev); return 0; } static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) { return ClassifyPacket(netdev_priv(dev), skb); } /******************************************************************* * Function - bcm_transmit() * * Description - This is the main transmit function for our virtual * interface(eth0). It handles the ARP packets. It * clones this packet and then Queue it to a suitable * Queue. Then calls the transmit_packet(). * * Parameter - skb - Pointer to the socket buffer structure * dev - Pointer to the virtual net device structure * *********************************************************************/ static netdev_tx_t bcm_transmit(struct sk_buff *skb, struct net_device *dev) { PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev); u16 qindex = skb_get_queue_mapping(skb); if (Adapter->device_removed || !Adapter->LinkUpStatus) goto drop; if (Adapter->TransferMode != IP_PACKET_ONLY_MODE) goto drop; if (INVALID_QUEUE_INDEX == qindex) goto drop; if (Adapter->PackInfo[qindex].uiCurrentPacketsOnHost >= SF_MAX_ALLOWED_PACKETS_TO_BACKUP) return NETDEV_TX_BUSY; /* Now Enqueue the packet */ if (netif_msg_tx_queued(Adapter)) pr_info(PFX "%s: enqueueing packet to queue %d\n", dev->name, qindex); spin_lock(&Adapter->PackInfo[qindex].SFQueueLock); Adapter->PackInfo[qindex].uiCurrentBytesOnHost += skb->len; Adapter->PackInfo[qindex].uiCurrentPacketsOnHost++; *((B_UINT32 *) skb->cb + SKB_CB_LATENCY_OFFSET) = jiffies; ENQUEUEPACKET(Adapter->PackInfo[qindex].FirstTxQueue, Adapter->PackInfo[qindex].LastTxQueue, skb); atomic_inc(&Adapter->TotalPacketCount); spin_unlock(&Adapter->PackInfo[qindex].SFQueueLock); /* FIXME - this is racy and incorrect, replace with work queue */ if (!atomic_read(&Adapter->TxPktAvail)) { atomic_set(&Adapter->TxPktAvail, 1); wake_up(&Adapter->tx_packet_wait_queue); } return NETDEV_TX_OK; drop: dev_kfree_skb(skb); return NETDEV_TX_OK; } /** @ingroup init_functions Register other driver entry points with the kernel */ static const struct net_device_ops bcmNetDevOps = { .ndo_open = bcm_open, .ndo_stop = bcm_close, .ndo_start_xmit = bcm_transmit, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_select_queue = bcm_select_queue, }; static struct device_type wimax_type = { .name = "wimax", }; static int bcm_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { cmd->supported = 0; cmd->advertising = 0; cmd->speed = SPEED_10000; cmd->duplex = DUPLEX_FULL; cmd->port = PORT_TP; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; return 0; } static void bcm_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev); PS_INTERFACE_ADAPTER psIntfAdapter = Adapter->pvInterfaceAdapter; struct usb_device *udev = interface_to_usbdev(psIntfAdapter->interface); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u", Adapter->uiFlashLayoutMajorVersion, Adapter->uiFlashLayoutMinorVersion); usb_make_path(udev, info->bus_info, sizeof(info->bus_info)); } static u32 bcm_get_link(struct net_device *dev) { PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev); return Adapter->LinkUpStatus; } static u32 bcm_get_msglevel(struct net_device *dev) { PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev); return Adapter->msg_enable; } static void bcm_set_msglevel(struct net_device *dev, u32 level) { PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev); Adapter->msg_enable = level; } static const struct ethtool_ops bcm_ethtool_ops = { .get_settings = bcm_get_settings, .get_drvinfo = bcm_get_drvinfo, .get_link = bcm_get_link, .get_msglevel = bcm_get_msglevel, .set_msglevel = bcm_set_msglevel, }; int register_networkdev(PMINI_ADAPTER Adapter) { struct net_device *net = Adapter->dev; PS_INTERFACE_ADAPTER IntfAdapter = Adapter->pvInterfaceAdapter; struct usb_interface *udev = IntfAdapter->interface; struct usb_device *xdev = IntfAdapter->udev; int result; net->netdev_ops = &bcmNetDevOps; net->ethtool_ops = &bcm_ethtool_ops; net->mtu = MTU_SIZE; /* 1400 Bytes */ net->tx_queue_len = TX_QLEN; net->flags |= IFF_NOARP; netif_carrier_off(net); SET_NETDEV_DEVTYPE(net, &wimax_type); /* Read the MAC Address from EEPROM */ result = ReadMacAddressFromNVM(Adapter); if (result != STATUS_SUCCESS) { dev_err(&udev->dev, PFX "Error in Reading the mac Address: %d", result); return -EIO; } result = register_netdev(net); if (result) return result; gblpnetdev = Adapter->dev; if (netif_msg_probe(Adapter)) dev_info(&udev->dev, PFX "%s: register usb-%s-%s %pM\n", net->name, xdev->bus->bus_name, xdev->devpath, net->dev_addr); return 0; } void unregister_networkdev(PMINI_ADAPTER Adapter) { struct net_device *net = Adapter->dev; PS_INTERFACE_ADAPTER IntfAdapter = Adapter->pvInterfaceAdapter; struct usb_interface *udev = IntfAdapter->interface; struct usb_device *xdev = IntfAdapter->udev; if (netif_msg_probe(Adapter)) dev_info(&udev->dev, PFX "%s: unregister usb-%s%s\n", net->name, xdev->bus->bus_name, xdev->devpath); unregister_netdev(Adapter->dev); }
gpl-2.0
freexperia/android_kernel_semc_msm7x30
samples/kfifo/dma-example.c
10856
3496
/* * Sample fifo dma implementation * * Copyright (C) 2010 Stefani Seibold <stefani@seibold.net> * * Released under the GPL version 2 only. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kfifo.h> /* * This module shows how to handle fifo dma operations. */ /* fifo size in elements (bytes) */ #define FIFO_SIZE 32 static struct kfifo fifo; static int __init example_init(void) { int i; unsigned int ret; unsigned int nents; struct scatterlist sg[10]; printk(KERN_INFO "DMA fifo test start\n"); if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) { printk(KERN_WARNING "error kfifo_alloc\n"); return -ENOMEM; } printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo)); kfifo_in(&fifo, "test", 4); for (i = 0; i != 9; i++) kfifo_put(&fifo, &i); /* kick away first byte */ kfifo_skip(&fifo); printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); /* * Configure the kfifo buffer to receive data from DMA input. * * .--------------------------------------. * | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 | * |---|------------------|---------------| * \_/ \________________/ \_____________/ * \ \ \ * \ \_allocated data \ * \_*free space* \_*free space* * * We need two different SG entries: one for the free space area at the * end of the kfifo buffer (19 bytes) and another for the first free * byte at the beginning, after the kfifo_skip(). */ sg_init_table(sg, ARRAY_SIZE(sg)); nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); printk(KERN_INFO "DMA sgl entries: %d\n", nents); if (!nents) { /* fifo is full and no sgl was created */ printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); return -EIO; } /* receive data */ printk(KERN_INFO "scatterlist for receive:\n"); for (i = 0; i < nents; i++) { printk(KERN_INFO "sg[%d] -> " "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", i, sg[i].page_link, sg[i].offset, sg[i].length); if (sg_is_last(&sg[i])) break; } /* put here your code to setup and exectute the dma operation */ /* ... */ /* example: zero bytes received */ ret = 0; /* finish the dma operation and update the received data */ kfifo_dma_in_finish(&fifo, ret); /* Prepare to transmit data, example: 8 bytes */ nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); printk(KERN_INFO "DMA sgl entries: %d\n", nents); if (!nents) { /* no data was available and no sgl was created */ printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); return -EIO; } printk(KERN_INFO "scatterlist for transmit:\n"); for (i = 0; i < nents; i++) { printk(KERN_INFO "sg[%d] -> " "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", i, sg[i].page_link, sg[i].offset, sg[i].length); if (sg_is_last(&sg[i])) break; } /* put here your code to setup and exectute the dma operation */ /* ... */ /* example: 5 bytes transmitted */ ret = 5; /* finish the dma operation and update the transmitted data */ kfifo_dma_out_finish(&fifo, ret); ret = kfifo_len(&fifo); printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); if (ret != 7) { printk(KERN_WARNING "size mismatch: test failed"); return -EIO; } printk(KERN_INFO "test passed\n"); return 0; } static void __exit example_exit(void) { kfifo_free(&fifo); } module_init(example_init); module_exit(example_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
gpl-2.0
Stuxnet-Kernel/kernel_g3
samples/kfifo/dma-example.c
10856
3496
/* * Sample fifo dma implementation * * Copyright (C) 2010 Stefani Seibold <stefani@seibold.net> * * Released under the GPL version 2 only. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kfifo.h> /* * This module shows how to handle fifo dma operations. */ /* fifo size in elements (bytes) */ #define FIFO_SIZE 32 static struct kfifo fifo; static int __init example_init(void) { int i; unsigned int ret; unsigned int nents; struct scatterlist sg[10]; printk(KERN_INFO "DMA fifo test start\n"); if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) { printk(KERN_WARNING "error kfifo_alloc\n"); return -ENOMEM; } printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo)); kfifo_in(&fifo, "test", 4); for (i = 0; i != 9; i++) kfifo_put(&fifo, &i); /* kick away first byte */ kfifo_skip(&fifo); printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); /* * Configure the kfifo buffer to receive data from DMA input. * * .--------------------------------------. * | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 | * |---|------------------|---------------| * \_/ \________________/ \_____________/ * \ \ \ * \ \_allocated data \ * \_*free space* \_*free space* * * We need two different SG entries: one for the free space area at the * end of the kfifo buffer (19 bytes) and another for the first free * byte at the beginning, after the kfifo_skip(). */ sg_init_table(sg, ARRAY_SIZE(sg)); nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); printk(KERN_INFO "DMA sgl entries: %d\n", nents); if (!nents) { /* fifo is full and no sgl was created */ printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); return -EIO; } /* receive data */ printk(KERN_INFO "scatterlist for receive:\n"); for (i = 0; i < nents; i++) { printk(KERN_INFO "sg[%d] -> " "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", i, sg[i].page_link, sg[i].offset, sg[i].length); if (sg_is_last(&sg[i])) break; } /* put here your code to setup and exectute the dma operation */ /* ... */ /* example: zero bytes received */ ret = 0; /* finish the dma operation and update the received data */ kfifo_dma_in_finish(&fifo, ret); /* Prepare to transmit data, example: 8 bytes */ nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); printk(KERN_INFO "DMA sgl entries: %d\n", nents); if (!nents) { /* no data was available and no sgl was created */ printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); return -EIO; } printk(KERN_INFO "scatterlist for transmit:\n"); for (i = 0; i < nents; i++) { printk(KERN_INFO "sg[%d] -> " "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", i, sg[i].page_link, sg[i].offset, sg[i].length); if (sg_is_last(&sg[i])) break; } /* put here your code to setup and exectute the dma operation */ /* ... */ /* example: 5 bytes transmitted */ ret = 5; /* finish the dma operation and update the transmitted data */ kfifo_dma_out_finish(&fifo, ret); ret = kfifo_len(&fifo); printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); if (ret != 7) { printk(KERN_WARNING "size mismatch: test failed"); return -EIO; } printk(KERN_INFO "test passed\n"); return 0; } static void __exit example_exit(void) { kfifo_free(&fifo); } module_init(example_init); module_exit(example_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
gpl-2.0
RoGod/kernel_xtreme_mega_beta
sound/synth/emux/emux_proc.c
12904
4566
/* * Copyright (C) 2000 Takashi Iwai <tiwai@suse.de> * * Proc interface for Emu8k/Emu10k1 WaveTable synth * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/wait.h> #include <sound/core.h> #include <sound/emux_synth.h> #include <sound/info.h> #include "emux_voice.h" #ifdef CONFIG_PROC_FS static void snd_emux_proc_info_read(struct snd_info_entry *entry, struct snd_info_buffer *buf) { struct snd_emux *emu; int i; emu = entry->private_data; mutex_lock(&emu->register_mutex); if (emu->name) snd_iprintf(buf, "Device: %s\n", emu->name); snd_iprintf(buf, "Ports: %d\n", emu->num_ports); snd_iprintf(buf, "Addresses:"); for (i = 0; i < emu->num_ports; i++) snd_iprintf(buf, " %d:%d", emu->client, emu->ports[i]); snd_iprintf(buf, "\n"); snd_iprintf(buf, "Use Counter: %d\n", emu->used); snd_iprintf(buf, "Max Voices: %d\n", emu->max_voices); snd_iprintf(buf, "Allocated Voices: %d\n", emu->num_voices); if (emu->memhdr) { snd_iprintf(buf, "Memory Size: %d\n", emu->memhdr->size); snd_iprintf(buf, "Memory Available: %d\n", snd_util_mem_avail(emu->memhdr)); snd_iprintf(buf, "Allocated Blocks: %d\n", emu->memhdr->nblocks); } else { snd_iprintf(buf, "Memory Size: 0\n"); } if (emu->sflist) { mutex_lock(&emu->sflist->presets_mutex); snd_iprintf(buf, "SoundFonts: %d\n", emu->sflist->fonts_size); snd_iprintf(buf, "Instruments: %d\n", emu->sflist->zone_counter); snd_iprintf(buf, "Samples: %d\n", emu->sflist->sample_counter); snd_iprintf(buf, "Locked Instruments: %d\n", emu->sflist->zone_locked); snd_iprintf(buf, "Locked Samples: %d\n", emu->sflist->sample_locked); mutex_unlock(&emu->sflist->presets_mutex); } #if 0 /* debug */ if (emu->voices[0].state != SNDRV_EMUX_ST_OFF && emu->voices[0].ch >= 0) { struct snd_emux_voice *vp = &emu->voices[0]; snd_iprintf(buf, "voice 0: on\n"); snd_iprintf(buf, "mod delay=%x, atkhld=%x, dcysus=%x, rel=%x\n", vp->reg.parm.moddelay, vp->reg.parm.modatkhld, vp->reg.parm.moddcysus, vp->reg.parm.modrelease); snd_iprintf(buf, "vol delay=%x, atkhld=%x, dcysus=%x, rel=%x\n", vp->reg.parm.voldelay, vp->reg.parm.volatkhld, vp->reg.parm.voldcysus, vp->reg.parm.volrelease); snd_iprintf(buf, "lfo1 delay=%x, lfo2 delay=%x, pefe=%x\n", vp->reg.parm.lfo1delay, vp->reg.parm.lfo2delay, vp->reg.parm.pefe); snd_iprintf(buf, "fmmod=%x, tremfrq=%x, fm2frq2=%x\n", vp->reg.parm.fmmod, vp->reg.parm.tremfrq, vp->reg.parm.fm2frq2); snd_iprintf(buf, "cutoff=%x, filterQ=%x, chorus=%x, reverb=%x\n", vp->reg.parm.cutoff, vp->reg.parm.filterQ, vp->reg.parm.chorus, vp->reg.parm.reverb); snd_iprintf(buf, "avol=%x, acutoff=%x, apitch=%x\n", vp->avol, vp->acutoff, vp->apitch); snd_iprintf(buf, "apan=%x, aaux=%x, ptarget=%x, vtarget=%x, ftarget=%x\n", vp->apan, vp->aaux, vp->ptarget, vp->vtarget, vp->ftarget); snd_iprintf(buf, "start=%x, end=%x, loopstart=%x, loopend=%x\n", vp->reg.start, vp->reg.end, vp->reg.loopstart, vp->reg.loopend); snd_iprintf(buf, "sample_mode=%x, rate=%x\n", vp->reg.sample_mode, vp->reg.rate_offset); } #endif mutex_unlock(&emu->register_mutex); } void snd_emux_proc_init(struct snd_emux *emu, struct snd_card *card, int device) { struct snd_info_entry *entry; char name[64]; sprintf(name, "wavetableD%d", device); entry = snd_info_create_card_entry(card, name, card->proc_root); if (entry == NULL) return; entry->content = SNDRV_INFO_CONTENT_TEXT; entry->private_data = emu; entry->c.text.read = snd_emux_proc_info_read; if (snd_info_register(entry) < 0) snd_info_free_entry(entry); else emu->proc = entry; } void snd_emux_proc_free(struct snd_emux *emu) { snd_info_free_entry(emu->proc); emu->proc = NULL; } #endif /* CONFIG_PROC_FS */
gpl-2.0
ya-mouse/openbmc-openwrt
tools/firmware-utils/src/mksenaofw.c
361
9889
/* * * Copyright (C) 2012 OpenWrt.org * Copyright (C) 2012 Mikko Hissa <mikko.hissa@uta.fi> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <libgen.h> #include <errno.h> #include <arpa/inet.h> #include <unistd.h> #include "md5.h" #define HDR_LEN 0x60 #define BUF_SIZE 0x200 #define VERSION_SIZE 0x10 #define MD5_SIZE 0x10 #define PAD_SIZE 0x20 #define DEFAULT_BLOCK_SIZE 65535 #define DEFAULT_HEAD_VALUE 0x0 #define DEFAULT_VERSION "123" #define DEFAULT_MAGIC 0x12345678 typedef struct { uint32_t head; uint32_t vendor_id; uint32_t product_id; uint8_t version[VERSION_SIZE]; uint32_t firmware_type; uint32_t filesize; uint32_t zero; uint8_t md5sum[MD5_SIZE]; uint8_t pad[PAD_SIZE]; uint32_t chksum; uint32_t magic; } img_header; typedef struct { uint8_t id; char * name; } firmware_type; typedef enum { NONE, ENCODE, DECODE } op_mode; static firmware_type FIRMWARE_TYPES[] = { { 0x01, "bootloader" }, { 0x02, "kernel" }, { 0x03, "kernelapp" }, { 0x04, "apps" }, /* The types below this line vary by manufacturer */ { 0x05, "littleapps (D-Link)/factoryapps (EnGenius)" }, { 0x06, "sounds (D-Link)/littleapps (EnGenius)" }, { 0x07, "userconfig (D-Link)/appdata (EnGenius)" }, { 0x08, "userconfig (EnGenius)"}, { 0x09, "odmapps (EnGenius)"}, { 0x0a, "factoryapps (D-Link)" }, { 0x0b, "odmapps (D-Link)" }, { 0x0c, "langpack (D-Link)" } }; static long get_file_size(const char *filename) { FILE *fp_file; long result; fp_file = fopen(filename, "r"); if (!fp_file) return -1; fseek(fp_file, 0, SEEK_END); result = ftell(fp_file); fclose(fp_file); return result; } static int header_checksum(void *data, int len) { int i; int sum; sum = 0; if (data != NULL && len >= 0) { for (i = 0; i < len; ++i) sum += *(unsigned char *) (data + i); return sum; } return -1; } static int md5_file(const char *filename, uint8_t *dst) { FILE *fp_src; MD5_CTX ctx; char buf[BUF_SIZE]; size_t bytes_read; MD5_Init(&ctx); fp_src = fopen(filename, "r+b"); if (!fp_src) { return -1; } while (!feof(fp_src)) { bytes_read = fread(&buf, 1, BUF_SIZE, fp_src); MD5_Update(&ctx, &buf, bytes_read); } fclose(fp_src); MD5_Final(dst, &ctx); return 0; } static int encode_image(const char *input_file_name, const char *output_file_name, img_header *header, int block_size) { char buf[BUF_SIZE]; size_t bytes_read; size_t pad_len = 0; size_t bytes_avail; FILE *fp_input; FILE *fp_output; int i; long magic; fp_input = fopen(input_file_name, "r+b"); if (!fp_input) { fprintf(stderr, "Cannot open %s !!\n", input_file_name); return -1; } fp_output = fopen(output_file_name, "w+b"); if (!fp_output) { fprintf(stderr, "Cannot open %s !!\n", output_file_name); fclose(fp_input); return -1; } header->filesize = get_file_size(input_file_name); if (!header->filesize) { fprintf(stderr, "File %s open/size error!\n", input_file_name); fclose(fp_input); fclose(fp_output); return -1; } /* * Zero padding */ if (block_size > 0) { pad_len = block_size - (header->filesize % block_size); } if (md5_file(input_file_name, (uint8_t *) &header->md5sum) < 0) { fprintf(stderr, "MD5 failed on file %s\n", input_file_name); fclose(fp_input); fclose(fp_output); return -1; } header->zero = 0; header->chksum = header_checksum(header, HDR_LEN); header->head = htonl(header->head); header->vendor_id = htonl(header->vendor_id); header->product_id = htonl(header->product_id); header->firmware_type = htonl(header->firmware_type); header->filesize = htonl(header->filesize); header->chksum = htonl(header->chksum); magic = header->magic; header->magic = htonl(header->magic); fwrite(header, HDR_LEN, 1, fp_output); while (!feof(fp_input) || pad_len > 0) { if (!feof(fp_input)) bytes_read = fread(&buf, 1, BUF_SIZE, fp_input); else bytes_read = 0; /* * No more bytes read, start padding */ if (bytes_read < BUF_SIZE && pad_len > 0) { bytes_avail = BUF_SIZE - bytes_read; memset( &buf[bytes_read], 0, bytes_avail); bytes_read += bytes_avail < pad_len ? bytes_avail : pad_len; pad_len -= bytes_avail < pad_len ? bytes_avail : pad_len; } for (i = 0; i < bytes_read; i++) buf[i] ^= magic >> (i % 8) & 0xff; fwrite(&buf, bytes_read, 1, fp_output); } fclose(fp_input); fclose(fp_output); return 1; } int decode_image(const char *input_file_name, const char *output_file_name) { img_header header; char buf[BUF_SIZE]; FILE *fp_input; FILE *fp_output; unsigned int i; size_t bytes_read; size_t bytes_written; fp_input = fopen(input_file_name, "r+b"); if (!fp_input) { fprintf(stderr, "Cannot open %s !!\n", input_file_name); fclose(fp_input); return -1; } fp_output = fopen(output_file_name, "w+b"); if (!fp_output) { fprintf(stderr, "Cannot open %s !!\n", output_file_name); fclose(fp_output); return -1; } if (fread(&header, 1, HDR_LEN, fp_input) != HDR_LEN) { fprintf(stderr, "Incorrect header size!!"); fclose(fp_input); fclose(fp_output); return -1; } header.head = ntohl(header.head); header.vendor_id = ntohl(header.vendor_id); header.product_id = ntohl(header.product_id); header.firmware_type = ntohl(header.firmware_type); header.filesize = ntohl(header.filesize); header.chksum = ntohl(header.chksum); header.magic = ntohl(header.magic); bytes_written = 0; while (!feof(fp_input)) { bytes_read = fread(&buf, 1, BUF_SIZE, fp_input); for (i = 0; i < bytes_read; i++) buf[i] ^= header.magic >> (i % 8) & 0xff; /* * Handle padded source file */ if (bytes_written + bytes_read > header.filesize) { bytes_read = header.filesize - bytes_written; if (bytes_read > 0) fwrite(&buf, bytes_read, 1, fp_output); break; } fwrite(&buf, bytes_read, 1, fp_output); bytes_written += bytes_read; } fclose(fp_input); fclose(fp_output); return 1; } static void usage(const char *progname, int status) { FILE *stream = (status != EXIT_SUCCESS) ? stderr : stdout; int i; fprintf(stream, "Usage: %s [OPTIONS...]\n", progname); fprintf(stream, "\n" "Options:\n" " -e <file> encode image file <file>\n" " -d <file> decode image file <file>\n" " -o <file> write output to the file <file>\n" " -t <type> set image type to <type>\n" " valid image <type> values:\n"); for (i = 0; i < sizeof(FIRMWARE_TYPES) / sizeof(firmware_type); i++) { fprintf(stream, " %-5i= %s\n", FIRMWARE_TYPES[i].id, FIRMWARE_TYPES[i].name); } fprintf(stream, " -v <version> set image version to <version>\n" " -r <vendor> set image vendor id to <vendor>\n" " -p <product> set image product id to <product>\n" " -m <magic> set encoding magic <magic>\n" " -z enable image padding to <blocksize>\n" " -b <blocksize> set image <blocksize>, defaults to %u\n" " -h show this screen\n", DEFAULT_BLOCK_SIZE); exit(status); } int main(int argc, char *argv[]) { int opt; char *input_file, *output_file, *progname = NULL; op_mode mode = NONE; int tmp, i, pad = 0; int block_size; img_header header; block_size = DEFAULT_BLOCK_SIZE; progname = basename(argv[0]); memset(&header, 0, sizeof( img_header )); header.magic = DEFAULT_MAGIC; header.head = DEFAULT_HEAD_VALUE; strncpy( (char*)&header.version, DEFAULT_VERSION, VERSION_SIZE - 1); while ((opt = getopt(argc, argv, ":o:e:d:t:v:r:p:m:b:h?z")) != -1) { switch (opt) { case 'e': input_file = optarg; mode = ENCODE; break; case 'd': input_file = optarg; mode = DECODE; break; case 'o': output_file = optarg; break; case 't': tmp = strtol(optarg, 0, 10); for (i = 0; i < sizeof(FIRMWARE_TYPES) / sizeof(firmware_type); i++) { if (FIRMWARE_TYPES[i].id == tmp) { header.firmware_type = FIRMWARE_TYPES[i].id; break; } } if (header.firmware_type == 0) { fprintf(stderr, "Invalid firmware type \"0\"!\n"); usage(progname, EXIT_FAILURE); } break; case 'v': strncpy( (char*)&header.version, optarg, VERSION_SIZE - 1); break; case 'r': header.vendor_id = strtol(optarg, 0, 0); break; case 'p': header.product_id = strtol(optarg, 0, 0); break; case 'm': header.magic = strtoul(optarg, 0, 16); break; case 'z': pad = 1; break; case 'b': block_size = strtol(optarg, 0, 10); break; case 'h': usage(progname, EXIT_SUCCESS); break; case ':': fprintf(stderr, "Option -%c requires an operand\n", optopt); usage(progname, EXIT_FAILURE); break; case '?': fprintf(stderr, "Unrecognized option: -%c\n", optopt); usage(progname, EXIT_FAILURE); break; default: usage(progname, EXIT_FAILURE); break; } } /* Check required arguments*/ if (header.firmware_type == 0) { fprintf(stderr, "Firmware type must be defined\n"); usage(progname, EXIT_FAILURE); } else if (input_file == 0 || output_file == 0) { fprintf(stderr, "Input and output files must be defined\n"); usage(progname, EXIT_FAILURE); } else if (header.vendor_id == 0 || header.product_id == 0) { fprintf(stderr, "Vendor ID and Product ID must be defined and non-zero\n"); usage(progname, EXIT_FAILURE); } switch (mode) { case NONE: fprintf(stderr, "A mode must be defined\n"); usage(progname, EXIT_FAILURE); break; case ENCODE: if (encode_image(input_file, output_file, &header, pad ? block_size : 0) < 0) return EXIT_FAILURE; break; case DECODE: if (decode_image(input_file, output_file) < 0) return EXIT_FAILURE; break; } return EXIT_SUCCESS; }
gpl-2.0
ryrzy/ics_kernel
kernel/sysctl_binary.c
361
52822
#include <linux/stat.h> #include <linux/sysctl.h> #include "../fs/xfs/linux-2.6/xfs_sysctl.h" #include <linux/sunrpc/debug.h> #include <linux/string.h> #include <net/ip_vs.h> #include <linux/syscalls.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/fs.h> #include <linux/nsproxy.h> #include <linux/pid_namespace.h> #include <linux/file.h> #include <linux/ctype.h> #include <linux/netdevice.h> #include <linux/kernel.h> #include <linux/slab.h> #ifdef CONFIG_SYSCTL_SYSCALL struct bin_table; typedef ssize_t bin_convert_t(struct file *file, void __user *oldval, size_t oldlen, void __user *newval, size_t newlen); static bin_convert_t bin_dir; static bin_convert_t bin_string; static bin_convert_t bin_intvec; static bin_convert_t bin_ulongvec; static bin_convert_t bin_uuid; static bin_convert_t bin_dn_node_address; #define CTL_DIR bin_dir #define CTL_STR bin_string #define CTL_INT bin_intvec #define CTL_ULONG bin_ulongvec #define CTL_UUID bin_uuid #define CTL_DNADR bin_dn_node_address #define BUFSZ 256 struct bin_table { bin_convert_t *convert; int ctl_name; const char *procname; const struct bin_table *child; }; static const struct bin_table bin_random_table[] = { { CTL_INT, RANDOM_POOLSIZE, "poolsize" }, { CTL_INT, RANDOM_ENTROPY_COUNT, "entropy_avail" }, { CTL_INT, RANDOM_READ_THRESH, "read_wakeup_threshold" }, { CTL_INT, RANDOM_WRITE_THRESH, "write_wakeup_threshold" }, { CTL_UUID, RANDOM_BOOT_ID, "boot_id" }, { CTL_UUID, RANDOM_UUID, "uuid" }, {} }; static const struct bin_table bin_pty_table[] = { { CTL_INT, PTY_MAX, "max" }, { CTL_INT, PTY_NR, "nr" }, {} }; static const struct bin_table bin_kern_table[] = { { CTL_STR, KERN_OSTYPE, "ostype" }, { CTL_STR, KERN_OSRELEASE, "osrelease" }, /* KERN_OSREV not used */ { CTL_STR, KERN_VERSION, "version" }, /* KERN_SECUREMASK not used */ /* KERN_PROF not used */ { CTL_STR, KERN_NODENAME, "hostname" }, { CTL_STR, KERN_DOMAINNAME, "domainname" }, { CTL_INT, KERN_PANIC, "panic" }, { CTL_INT, KERN_REALROOTDEV, "real-root-dev" }, { CTL_STR, KERN_SPARC_REBOOT, "reboot-cmd" }, { CTL_INT, KERN_CTLALTDEL, "ctrl-alt-del" }, { CTL_INT, KERN_PRINTK, "printk" }, /* KERN_NAMETRANS not used */ /* KERN_PPC_HTABRECLAIM not used */ /* KERN_PPC_ZEROPAGED not used */ { CTL_INT, KERN_PPC_POWERSAVE_NAP, "powersave-nap" }, { CTL_STR, KERN_MODPROBE, "modprobe" }, { CTL_INT, KERN_SG_BIG_BUFF, "sg-big-buff" }, { CTL_INT, KERN_ACCT, "acct" }, /* KERN_PPC_L2CR "l2cr" no longer used */ /* KERN_RTSIGNR not used */ /* KERN_RTSIGMAX not used */ { CTL_ULONG, KERN_SHMMAX, "shmmax" }, { CTL_INT, KERN_MSGMAX, "msgmax" }, { CTL_INT, KERN_MSGMNB, "msgmnb" }, /* KERN_MSGPOOL not used*/ { CTL_INT, KERN_SYSRQ, "sysrq" }, { CTL_INT, KERN_MAX_THREADS, "threads-max" }, { CTL_DIR, KERN_RANDOM, "random", bin_random_table }, { CTL_ULONG, KERN_SHMALL, "shmall" }, { CTL_INT, KERN_MSGMNI, "msgmni" }, { CTL_INT, KERN_SEM, "sem" }, { CTL_INT, KERN_SPARC_STOP_A, "stop-a" }, { CTL_INT, KERN_SHMMNI, "shmmni" }, { CTL_INT, KERN_OVERFLOWUID, "overflowuid" }, { CTL_INT, KERN_OVERFLOWGID, "overflowgid" }, { CTL_STR, KERN_HOTPLUG, "hotplug", }, { CTL_INT, KERN_IEEE_EMULATION_WARNINGS, "ieee_emulation_warnings" }, { CTL_INT, KERN_S390_USER_DEBUG_LOGGING, "userprocess_debug" }, { CTL_INT, KERN_CORE_USES_PID, "core_uses_pid" }, /* KERN_TAINTED "tainted" no longer used */ { CTL_INT, KERN_CADPID, "cad_pid" }, { CTL_INT, KERN_PIDMAX, "pid_max" }, { CTL_STR, KERN_CORE_PATTERN, "core_pattern" }, { CTL_INT, KERN_PANIC_ON_OOPS, "panic_on_oops" }, { CTL_INT, KERN_HPPA_PWRSW, "soft-power" }, { CTL_INT, KERN_HPPA_UNALIGNED, "unaligned-trap" }, { CTL_INT, KERN_PRINTK_RATELIMIT, "printk_ratelimit" }, { CTL_INT, KERN_PRINTK_RATELIMIT_BURST, "printk_ratelimit_burst" }, { CTL_DIR, KERN_PTY, "pty", bin_pty_table }, { CTL_INT, KERN_NGROUPS_MAX, "ngroups_max" }, { CTL_INT, KERN_SPARC_SCONS_PWROFF, "scons-poweroff" }, /* KERN_HZ_TIMER "hz_timer" no longer used */ { CTL_INT, KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" }, { CTL_INT, KERN_BOOTLOADER_TYPE, "bootloader_type" }, { CTL_INT, KERN_RANDOMIZE, "randomize_va_space" }, { CTL_INT, KERN_SPIN_RETRY, "spin_retry" }, /* KERN_ACPI_VIDEO_FLAGS "acpi_video_flags" no longer used */ { CTL_INT, KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" }, { CTL_INT, KERN_COMPAT_LOG, "compat-log" }, { CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" }, { CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" }, { CTL_INT, KERN_BOOT_REASON, "boot_reason" }, {} }; static const struct bin_table bin_vm_table[] = { { CTL_INT, VM_OVERCOMMIT_MEMORY, "overcommit_memory" }, { CTL_INT, VM_PAGE_CLUSTER, "page-cluster" }, { CTL_INT, VM_DIRTY_BACKGROUND, "dirty_background_ratio" }, { CTL_INT, VM_DIRTY_RATIO, "dirty_ratio" }, /* VM_DIRTY_WB_CS "dirty_writeback_centisecs" no longer used */ /* VM_DIRTY_EXPIRE_CS "dirty_expire_centisecs" no longer used */ { CTL_INT, VM_NR_PDFLUSH_THREADS, "nr_pdflush_threads" }, { CTL_INT, VM_OVERCOMMIT_RATIO, "overcommit_ratio" }, /* VM_PAGEBUF unused */ /* VM_HUGETLB_PAGES "nr_hugepages" no longer used */ { CTL_INT, VM_SWAPPINESS, "swappiness" }, { CTL_INT, VM_LOWMEM_RESERVE_RATIO, "lowmem_reserve_ratio" }, { CTL_INT, VM_MIN_FREE_KBYTES, "min_free_kbytes" }, { CTL_INT, VM_MAX_MAP_COUNT, "max_map_count" }, { CTL_INT, VM_LAPTOP_MODE, "laptop_mode" }, { CTL_INT, VM_BLOCK_DUMP, "block_dump" }, { CTL_INT, VM_HUGETLB_GROUP, "hugetlb_shm_group" }, { CTL_INT, VM_VFS_CACHE_PRESSURE, "vfs_cache_pressure" }, { CTL_INT, VM_LEGACY_VA_LAYOUT, "legacy_va_layout" }, /* VM_SWAP_TOKEN_TIMEOUT unused */ { CTL_INT, VM_DROP_PAGECACHE, "drop_caches" }, { CTL_INT, VM_PERCPU_PAGELIST_FRACTION, "percpu_pagelist_fraction" }, { CTL_INT, VM_ZONE_RECLAIM_MODE, "zone_reclaim_mode" }, { CTL_INT, VM_MIN_UNMAPPED, "min_unmapped_ratio" }, { CTL_INT, VM_PANIC_ON_OOM, "panic_on_oom" }, { CTL_INT, VM_VDSO_ENABLED, "vdso_enabled" }, { CTL_INT, VM_MIN_SLAB, "min_slab_ratio" }, {} }; static const struct bin_table bin_net_core_table[] = { { CTL_INT, NET_CORE_WMEM_MAX, "wmem_max" }, { CTL_INT, NET_CORE_RMEM_MAX, "rmem_max" }, { CTL_INT, NET_CORE_WMEM_DEFAULT, "wmem_default" }, { CTL_INT, NET_CORE_RMEM_DEFAULT, "rmem_default" }, /* NET_CORE_DESTROY_DELAY unused */ { CTL_INT, NET_CORE_MAX_BACKLOG, "netdev_max_backlog" }, /* NET_CORE_FASTROUTE unused */ { CTL_INT, NET_CORE_MSG_COST, "message_cost" }, { CTL_INT, NET_CORE_MSG_BURST, "message_burst" }, { CTL_INT, NET_CORE_OPTMEM_MAX, "optmem_max" }, /* NET_CORE_HOT_LIST_LENGTH unused */ /* NET_CORE_DIVERT_VERSION unused */ /* NET_CORE_NO_CONG_THRESH unused */ /* NET_CORE_NO_CONG unused */ /* NET_CORE_LO_CONG unused */ /* NET_CORE_MOD_CONG unused */ { CTL_INT, NET_CORE_DEV_WEIGHT, "dev_weight" }, { CTL_INT, NET_CORE_SOMAXCONN, "somaxconn" }, { CTL_INT, NET_CORE_BUDGET, "netdev_budget" }, { CTL_INT, NET_CORE_AEVENT_ETIME, "xfrm_aevent_etime" }, { CTL_INT, NET_CORE_AEVENT_RSEQTH, "xfrm_aevent_rseqth" }, { CTL_INT, NET_CORE_WARNINGS, "warnings" }, {}, }; static const struct bin_table bin_net_unix_table[] = { /* NET_UNIX_DESTROY_DELAY unused */ /* NET_UNIX_DELETE_DELAY unused */ { CTL_INT, NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" }, {} }; static const struct bin_table bin_net_ipv4_route_table[] = { { CTL_INT, NET_IPV4_ROUTE_FLUSH, "flush" }, /* NET_IPV4_ROUTE_MIN_DELAY "min_delay" no longer used */ /* NET_IPV4_ROUTE_MAX_DELAY "max_delay" no longer used */ { CTL_INT, NET_IPV4_ROUTE_GC_THRESH, "gc_thresh" }, { CTL_INT, NET_IPV4_ROUTE_MAX_SIZE, "max_size" }, { CTL_INT, NET_IPV4_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" }, { CTL_INT, NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" }, { CTL_INT, NET_IPV4_ROUTE_GC_TIMEOUT, "gc_timeout" }, { CTL_INT, NET_IPV4_ROUTE_GC_INTERVAL, "gc_interval" }, { CTL_INT, NET_IPV4_ROUTE_REDIRECT_LOAD, "redirect_load" }, { CTL_INT, NET_IPV4_ROUTE_REDIRECT_NUMBER, "redirect_number" }, { CTL_INT, NET_IPV4_ROUTE_REDIRECT_SILENCE, "redirect_silence" }, { CTL_INT, NET_IPV4_ROUTE_ERROR_COST, "error_cost" }, { CTL_INT, NET_IPV4_ROUTE_ERROR_BURST, "error_burst" }, { CTL_INT, NET_IPV4_ROUTE_GC_ELASTICITY, "gc_elasticity" }, { CTL_INT, NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" }, { CTL_INT, NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" }, { CTL_INT, NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" }, {} }; static const struct bin_table bin_net_ipv4_conf_vars_table[] = { { CTL_INT, NET_IPV4_CONF_FORWARDING, "forwarding" }, { CTL_INT, NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" }, { CTL_INT, NET_IPV4_CONF_ACCEPT_REDIRECTS, "accept_redirects" }, { CTL_INT, NET_IPV4_CONF_SECURE_REDIRECTS, "secure_redirects" }, { CTL_INT, NET_IPV4_CONF_SEND_REDIRECTS, "send_redirects" }, { CTL_INT, NET_IPV4_CONF_SHARED_MEDIA, "shared_media" }, { CTL_INT, NET_IPV4_CONF_RP_FILTER, "rp_filter" }, { CTL_INT, NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE, "accept_source_route" }, { CTL_INT, NET_IPV4_CONF_PROXY_ARP, "proxy_arp" }, { CTL_INT, NET_IPV4_CONF_MEDIUM_ID, "medium_id" }, { CTL_INT, NET_IPV4_CONF_BOOTP_RELAY, "bootp_relay" }, { CTL_INT, NET_IPV4_CONF_LOG_MARTIANS, "log_martians" }, { CTL_INT, NET_IPV4_CONF_TAG, "tag" }, { CTL_INT, NET_IPV4_CONF_ARPFILTER, "arp_filter" }, { CTL_INT, NET_IPV4_CONF_ARP_ANNOUNCE, "arp_announce" }, { CTL_INT, NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" }, { CTL_INT, NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" }, { CTL_INT, NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" }, { CTL_INT, NET_IPV4_CONF_NOXFRM, "disable_xfrm" }, { CTL_INT, NET_IPV4_CONF_NOPOLICY, "disable_policy" }, { CTL_INT, NET_IPV4_CONF_FORCE_IGMP_VERSION, "force_igmp_version" }, { CTL_INT, NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, {} }; static const struct bin_table bin_net_ipv4_conf_table[] = { { CTL_DIR, NET_PROTO_CONF_ALL, "all", bin_net_ipv4_conf_vars_table }, { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_ipv4_conf_vars_table }, { CTL_DIR, 0, NULL, bin_net_ipv4_conf_vars_table }, {} }; static const struct bin_table bin_net_neigh_vars_table[] = { { CTL_INT, NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" }, { CTL_INT, NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" }, { CTL_INT, NET_NEIGH_APP_SOLICIT, "app_solicit" }, /* NET_NEIGH_RETRANS_TIME "retrans_time" no longer used */ { CTL_INT, NET_NEIGH_REACHABLE_TIME, "base_reachable_time" }, { CTL_INT, NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time" }, { CTL_INT, NET_NEIGH_GC_STALE_TIME, "gc_stale_time" }, { CTL_INT, NET_NEIGH_UNRES_QLEN, "unres_qlen" }, { CTL_INT, NET_NEIGH_PROXY_QLEN, "proxy_qlen" }, /* NET_NEIGH_ANYCAST_DELAY "anycast_delay" no longer used */ /* NET_NEIGH_PROXY_DELAY "proxy_delay" no longer used */ /* NET_NEIGH_LOCKTIME "locktime" no longer used */ { CTL_INT, NET_NEIGH_GC_INTERVAL, "gc_interval" }, { CTL_INT, NET_NEIGH_GC_THRESH1, "gc_thresh1" }, { CTL_INT, NET_NEIGH_GC_THRESH2, "gc_thresh2" }, { CTL_INT, NET_NEIGH_GC_THRESH3, "gc_thresh3" }, { CTL_INT, NET_NEIGH_RETRANS_TIME_MS, "retrans_time_ms" }, { CTL_INT, NET_NEIGH_REACHABLE_TIME_MS, "base_reachable_time_ms" }, {} }; static const struct bin_table bin_net_neigh_table[] = { { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_neigh_vars_table }, { CTL_DIR, 0, NULL, bin_net_neigh_vars_table }, {} }; static const struct bin_table bin_net_ipv4_netfilter_table[] = { { CTL_INT, NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" }, /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT "ip_conntrack_tcp_timeout_syn_sent" no longer used */ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV "ip_conntrack_tcp_timeout_syn_recv" no longer used */ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED "ip_conntrack_tcp_timeout_established" no longer used */ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT "ip_conntrack_tcp_timeout_fin_wait" no longer used */ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT "ip_conntrack_tcp_timeout_close_wait" no longer used */ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK "ip_conntrack_tcp_timeout_last_ack" no longer used */ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT "ip_conntrack_tcp_timeout_time_wait" no longer used */ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE "ip_conntrack_tcp_timeout_close" no longer used */ /* NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT "ip_conntrack_udp_timeout" no longer used */ /* NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM "ip_conntrack_udp_timeout_stream" no longer used */ /* NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT "ip_conntrack_icmp_timeout" no longer used */ /* NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT "ip_conntrack_generic_timeout" no longer used */ { CTL_INT, NET_IPV4_NF_CONNTRACK_BUCKETS, "ip_conntrack_buckets" }, { CTL_INT, NET_IPV4_NF_CONNTRACK_LOG_INVALID, "ip_conntrack_log_invalid" }, /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS "ip_conntrack_tcp_timeout_max_retrans" no longer used */ { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_LOOSE, "ip_conntrack_tcp_loose" }, { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, "ip_conntrack_tcp_be_liberal" }, { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, "ip_conntrack_tcp_max_retrans" }, /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED "ip_conntrack_sctp_timeout_closed" no longer used */ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT "ip_conntrack_sctp_timeout_cookie_wait" no longer used */ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED "ip_conntrack_sctp_timeout_cookie_echoed" no longer used */ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED "ip_conntrack_sctp_timeout_established" no longer used */ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT "ip_conntrack_sctp_timeout_shutdown_sent" no longer used */ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD "ip_conntrack_sctp_timeout_shutdown_recd" no longer used */ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT "ip_conntrack_sctp_timeout_shutdown_ack_sent" no longer used */ { CTL_INT, NET_IPV4_NF_CONNTRACK_COUNT, "ip_conntrack_count" }, { CTL_INT, NET_IPV4_NF_CONNTRACK_CHECKSUM, "ip_conntrack_checksum" }, {} }; static const struct bin_table bin_net_ipv4_table[] = { {CTL_INT, NET_IPV4_FORWARD, "ip_forward" }, { CTL_DIR, NET_IPV4_CONF, "conf", bin_net_ipv4_conf_table }, { CTL_DIR, NET_IPV4_NEIGH, "neigh", bin_net_neigh_table }, { CTL_DIR, NET_IPV4_ROUTE, "route", bin_net_ipv4_route_table }, /* NET_IPV4_FIB_HASH unused */ { CTL_DIR, NET_IPV4_NETFILTER, "netfilter", bin_net_ipv4_netfilter_table }, { CTL_INT, NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" }, { CTL_INT, NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" }, { CTL_INT, NET_IPV4_TCP_SACK, "tcp_sack" }, { CTL_INT, NET_IPV4_TCP_RETRANS_COLLAPSE, "tcp_retrans_collapse" }, { CTL_INT, NET_IPV4_DEFAULT_TTL, "ip_default_ttl" }, /* NET_IPV4_AUTOCONFIG unused */ { CTL_INT, NET_IPV4_NO_PMTU_DISC, "ip_no_pmtu_disc" }, { CTL_INT, NET_IPV4_NONLOCAL_BIND, "ip_nonlocal_bind" }, { CTL_INT, NET_IPV4_TCP_SYN_RETRIES, "tcp_syn_retries" }, { CTL_INT, NET_TCP_SYNACK_RETRIES, "tcp_synack_retries" }, { CTL_INT, NET_TCP_MAX_ORPHANS, "tcp_max_orphans" }, { CTL_INT, NET_TCP_MAX_TW_BUCKETS, "tcp_max_tw_buckets" }, { CTL_INT, NET_IPV4_DYNADDR, "ip_dynaddr" }, { CTL_INT, NET_IPV4_TCP_KEEPALIVE_TIME, "tcp_keepalive_time" }, { CTL_INT, NET_IPV4_TCP_KEEPALIVE_PROBES, "tcp_keepalive_probes" }, { CTL_INT, NET_IPV4_TCP_KEEPALIVE_INTVL, "tcp_keepalive_intvl" }, { CTL_INT, NET_IPV4_TCP_RETRIES1, "tcp_retries1" }, { CTL_INT, NET_IPV4_TCP_RETRIES2, "tcp_retries2" }, { CTL_INT, NET_IPV4_TCP_FIN_TIMEOUT, "tcp_fin_timeout" }, { CTL_INT, NET_TCP_SYNCOOKIES, "tcp_syncookies" }, { CTL_INT, NET_TCP_TW_RECYCLE, "tcp_tw_recycle" }, { CTL_INT, NET_TCP_ABORT_ON_OVERFLOW, "tcp_abort_on_overflow" }, { CTL_INT, NET_TCP_STDURG, "tcp_stdurg" }, { CTL_INT, NET_TCP_RFC1337, "tcp_rfc1337" }, { CTL_INT, NET_TCP_MAX_SYN_BACKLOG, "tcp_max_syn_backlog" }, { CTL_INT, NET_IPV4_LOCAL_PORT_RANGE, "ip_local_port_range" }, { CTL_INT, NET_IPV4_IGMP_MAX_MEMBERSHIPS, "igmp_max_memberships" }, { CTL_INT, NET_IPV4_IGMP_MAX_MSF, "igmp_max_msf" }, { CTL_INT, NET_IPV4_INET_PEER_THRESHOLD, "inet_peer_threshold" }, { CTL_INT, NET_IPV4_INET_PEER_MINTTL, "inet_peer_minttl" }, { CTL_INT, NET_IPV4_INET_PEER_MAXTTL, "inet_peer_maxttl" }, { CTL_INT, NET_IPV4_INET_PEER_GC_MINTIME, "inet_peer_gc_mintime" }, { CTL_INT, NET_IPV4_INET_PEER_GC_MAXTIME, "inet_peer_gc_maxtime" }, { CTL_INT, NET_TCP_ORPHAN_RETRIES, "tcp_orphan_retries" }, { CTL_INT, NET_TCP_FACK, "tcp_fack" }, { CTL_INT, NET_TCP_REORDERING, "tcp_reordering" }, { CTL_INT, NET_TCP_ECN, "tcp_ecn" }, { CTL_INT, NET_TCP_DSACK, "tcp_dsack" }, { CTL_INT, NET_TCP_MEM, "tcp_mem" }, { CTL_INT, NET_TCP_WMEM, "tcp_wmem" }, { CTL_INT, NET_TCP_RMEM, "tcp_rmem" }, { CTL_INT, NET_TCP_APP_WIN, "tcp_app_win" }, { CTL_INT, NET_TCP_ADV_WIN_SCALE, "tcp_adv_win_scale" }, { CTL_INT, NET_TCP_TW_REUSE, "tcp_tw_reuse" }, { CTL_INT, NET_TCP_FRTO, "tcp_frto" }, { CTL_INT, NET_TCP_FRTO_RESPONSE, "tcp_frto_response" }, { CTL_INT, NET_TCP_LOW_LATENCY, "tcp_low_latency" }, { CTL_INT, NET_TCP_NO_METRICS_SAVE, "tcp_no_metrics_save" }, { CTL_INT, NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" }, { CTL_INT, NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" }, { CTL_STR, NET_TCP_CONG_CONTROL, "tcp_congestion_control" }, { CTL_INT, NET_TCP_ABC, "tcp_abc" }, { CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" }, { CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" }, { CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" }, { CTL_INT, NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" }, { CTL_INT, NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" }, { CTL_INT, NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" }, { CTL_INT, NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" }, { CTL_INT, NET_CIPSOV4_RBM_OPTFMT, "cipso_rbm_optfmt" }, { CTL_INT, NET_CIPSOV4_RBM_STRICTVALID, "cipso_rbm_strictvalid" }, /* NET_TCP_AVAIL_CONG_CONTROL "tcp_available_congestion_control" no longer used */ { CTL_STR, NET_TCP_ALLOWED_CONG_CONTROL, "tcp_allowed_congestion_control" }, { CTL_INT, NET_TCP_MAX_SSTHRESH, "tcp_max_ssthresh" }, { CTL_INT, NET_IPV4_ICMP_ECHO_IGNORE_ALL, "icmp_echo_ignore_all" }, { CTL_INT, NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, "icmp_echo_ignore_broadcasts" }, { CTL_INT, NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, "icmp_ignore_bogus_error_responses" }, { CTL_INT, NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, "icmp_errors_use_inbound_ifaddr" }, { CTL_INT, NET_IPV4_ICMP_RATELIMIT, "icmp_ratelimit" }, { CTL_INT, NET_IPV4_ICMP_RATEMASK, "icmp_ratemask" }, { CTL_INT, NET_IPV4_IPFRAG_HIGH_THRESH, "ipfrag_high_thresh" }, { CTL_INT, NET_IPV4_IPFRAG_LOW_THRESH, "ipfrag_low_thresh" }, { CTL_INT, NET_IPV4_IPFRAG_TIME, "ipfrag_time" }, { CTL_INT, NET_IPV4_IPFRAG_SECRET_INTERVAL, "ipfrag_secret_interval" }, /* NET_IPV4_IPFRAG_MAX_DIST "ipfrag_max_dist" no longer used */ { CTL_INT, 2088 /* NET_IPQ_QMAX */, "ip_queue_maxlen" }, /* NET_TCP_DEFAULT_WIN_SCALE unused */ /* NET_TCP_BIC_BETA unused */ /* NET_IPV4_TCP_MAX_KA_PROBES unused */ /* NET_IPV4_IP_MASQ_DEBUG unused */ /* NET_TCP_SYN_TAILDROP unused */ /* NET_IPV4_ICMP_SOURCEQUENCH_RATE unused */ /* NET_IPV4_ICMP_DESTUNREACH_RATE unused */ /* NET_IPV4_ICMP_TIMEEXCEED_RATE unused */ /* NET_IPV4_ICMP_PARAMPROB_RATE unused */ /* NET_IPV4_ICMP_ECHOREPLY_RATE unused */ /* NET_IPV4_ALWAYS_DEFRAG unused */ {} }; static const struct bin_table bin_net_ipx_table[] = { { CTL_INT, NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" }, /* NET_IPX_FORWARDING unused */ {} }; static const struct bin_table bin_net_atalk_table[] = { { CTL_INT, NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" }, { CTL_INT, NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" }, { CTL_INT, NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" }, { CTL_INT, NET_ATALK_AARP_RESOLVE_TIME, "aarp-resolve-time" }, {}, }; static const struct bin_table bin_net_netrom_table[] = { { CTL_INT, NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" }, { CTL_INT, NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" }, { CTL_INT, NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" }, { CTL_INT, NET_NETROM_TRANSPORT_TIMEOUT, "transport_timeout" }, { CTL_INT, NET_NETROM_TRANSPORT_MAXIMUM_TRIES, "transport_maximum_tries" }, { CTL_INT, NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY, "transport_acknowledge_delay" }, { CTL_INT, NET_NETROM_TRANSPORT_BUSY_DELAY, "transport_busy_delay" }, { CTL_INT, NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE, "transport_requested_window_size" }, { CTL_INT, NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT, "transport_no_activity_timeout" }, { CTL_INT, NET_NETROM_ROUTING_CONTROL, "routing_control" }, { CTL_INT, NET_NETROM_LINK_FAILS_COUNT, "link_fails_count" }, { CTL_INT, NET_NETROM_RESET, "reset" }, {} }; static const struct bin_table bin_net_ax25_param_table[] = { { CTL_INT, NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" }, { CTL_INT, NET_AX25_DEFAULT_MODE, "ax25_default_mode" }, { CTL_INT, NET_AX25_BACKOFF_TYPE, "backoff_type" }, { CTL_INT, NET_AX25_CONNECT_MODE, "connect_mode" }, { CTL_INT, NET_AX25_STANDARD_WINDOW, "standard_window_size" }, { CTL_INT, NET_AX25_EXTENDED_WINDOW, "extended_window_size" }, { CTL_INT, NET_AX25_T1_TIMEOUT, "t1_timeout" }, { CTL_INT, NET_AX25_T2_TIMEOUT, "t2_timeout" }, { CTL_INT, NET_AX25_T3_TIMEOUT, "t3_timeout" }, { CTL_INT, NET_AX25_IDLE_TIMEOUT, "idle_timeout" }, { CTL_INT, NET_AX25_N2, "maximum_retry_count" }, { CTL_INT, NET_AX25_PACLEN, "maximum_packet_length" }, { CTL_INT, NET_AX25_PROTOCOL, "protocol" }, { CTL_INT, NET_AX25_DAMA_SLAVE_TIMEOUT, "dama_slave_timeout" }, {} }; static const struct bin_table bin_net_ax25_table[] = { { CTL_DIR, 0, NULL, bin_net_ax25_param_table }, {} }; static const struct bin_table bin_net_rose_table[] = { { CTL_INT, NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" }, { CTL_INT, NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" }, { CTL_INT, NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" }, { CTL_INT, NET_ROSE_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" }, { CTL_INT, NET_ROSE_ACK_HOLD_BACK_TIMEOUT, "acknowledge_hold_back_timeout" }, { CTL_INT, NET_ROSE_ROUTING_CONTROL, "routing_control" }, { CTL_INT, NET_ROSE_LINK_FAIL_TIMEOUT, "link_fail_timeout" }, { CTL_INT, NET_ROSE_MAX_VCS, "maximum_virtual_circuits" }, { CTL_INT, NET_ROSE_WINDOW_SIZE, "window_size" }, { CTL_INT, NET_ROSE_NO_ACTIVITY_TIMEOUT, "no_activity_timeout" }, {} }; static const struct bin_table bin_net_ipv6_conf_var_table[] = { { CTL_INT, NET_IPV6_FORWARDING, "forwarding" }, { CTL_INT, NET_IPV6_HOP_LIMIT, "hop_limit" }, { CTL_INT, NET_IPV6_MTU, "mtu" }, { CTL_INT, NET_IPV6_ACCEPT_RA, "accept_ra" }, { CTL_INT, NET_IPV6_ACCEPT_REDIRECTS, "accept_redirects" }, { CTL_INT, NET_IPV6_AUTOCONF, "autoconf" }, { CTL_INT, NET_IPV6_DAD_TRANSMITS, "dad_transmits" }, { CTL_INT, NET_IPV6_RTR_SOLICITS, "router_solicitations" }, { CTL_INT, NET_IPV6_RTR_SOLICIT_INTERVAL, "router_solicitation_interval" }, { CTL_INT, NET_IPV6_RTR_SOLICIT_DELAY, "router_solicitation_delay" }, { CTL_INT, NET_IPV6_USE_TEMPADDR, "use_tempaddr" }, { CTL_INT, NET_IPV6_TEMP_VALID_LFT, "temp_valid_lft" }, { CTL_INT, NET_IPV6_TEMP_PREFERED_LFT, "temp_prefered_lft" }, { CTL_INT, NET_IPV6_REGEN_MAX_RETRY, "regen_max_retry" }, { CTL_INT, NET_IPV6_MAX_DESYNC_FACTOR, "max_desync_factor" }, { CTL_INT, NET_IPV6_MAX_ADDRESSES, "max_addresses" }, { CTL_INT, NET_IPV6_FORCE_MLD_VERSION, "force_mld_version" }, { CTL_INT, NET_IPV6_ACCEPT_RA_DEFRTR, "accept_ra_defrtr" }, { CTL_INT, NET_IPV6_ACCEPT_RA_PINFO, "accept_ra_pinfo" }, { CTL_INT, NET_IPV6_ACCEPT_RA_RTR_PREF, "accept_ra_rtr_pref" }, { CTL_INT, NET_IPV6_RTR_PROBE_INTERVAL, "router_probe_interval" }, { CTL_INT, NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN, "accept_ra_rt_info_max_plen" }, { CTL_INT, NET_IPV6_PROXY_NDP, "proxy_ndp" }, { CTL_INT, NET_IPV6_ACCEPT_SOURCE_ROUTE, "accept_source_route" }, {} }; static const struct bin_table bin_net_ipv6_conf_table[] = { { CTL_DIR, NET_PROTO_CONF_ALL, "all", bin_net_ipv6_conf_var_table }, { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_ipv6_conf_var_table }, { CTL_DIR, 0, NULL, bin_net_ipv6_conf_var_table }, {} }; static const struct bin_table bin_net_ipv6_route_table[] = { /* NET_IPV6_ROUTE_FLUSH "flush" no longer used */ { CTL_INT, NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" }, { CTL_INT, NET_IPV6_ROUTE_MAX_SIZE, "max_size" }, { CTL_INT, NET_IPV6_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" }, { CTL_INT, NET_IPV6_ROUTE_GC_TIMEOUT, "gc_timeout" }, { CTL_INT, NET_IPV6_ROUTE_GC_INTERVAL, "gc_interval" }, { CTL_INT, NET_IPV6_ROUTE_GC_ELASTICITY, "gc_elasticity" }, { CTL_INT, NET_IPV6_ROUTE_MTU_EXPIRES, "mtu_expires" }, { CTL_INT, NET_IPV6_ROUTE_MIN_ADVMSS, "min_adv_mss" }, { CTL_INT, NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" }, {} }; static const struct bin_table bin_net_ipv6_icmp_table[] = { { CTL_INT, NET_IPV6_ICMP_RATELIMIT, "ratelimit" }, {} }; static const struct bin_table bin_net_ipv6_table[] = { { CTL_DIR, NET_IPV6_CONF, "conf", bin_net_ipv6_conf_table }, { CTL_DIR, NET_IPV6_NEIGH, "neigh", bin_net_neigh_table }, { CTL_DIR, NET_IPV6_ROUTE, "route", bin_net_ipv6_route_table }, { CTL_DIR, NET_IPV6_ICMP, "icmp", bin_net_ipv6_icmp_table }, { CTL_INT, NET_IPV6_BINDV6ONLY, "bindv6only" }, { CTL_INT, NET_IPV6_IP6FRAG_HIGH_THRESH, "ip6frag_high_thresh" }, { CTL_INT, NET_IPV6_IP6FRAG_LOW_THRESH, "ip6frag_low_thresh" }, { CTL_INT, NET_IPV6_IP6FRAG_TIME, "ip6frag_time" }, { CTL_INT, NET_IPV6_IP6FRAG_SECRET_INTERVAL, "ip6frag_secret_interval" }, { CTL_INT, NET_IPV6_MLD_MAX_MSF, "mld_max_msf" }, { CTL_INT, 2088 /* IPQ_QMAX */, "ip6_queue_maxlen" }, {} }; static const struct bin_table bin_net_x25_table[] = { { CTL_INT, NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" }, { CTL_INT, NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" }, { CTL_INT, NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" }, { CTL_INT, NET_X25_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" }, { CTL_INT, NET_X25_ACK_HOLD_BACK_TIMEOUT, "acknowledgement_hold_back_timeout" }, { CTL_INT, NET_X25_FORWARD, "x25_forward" }, {} }; static const struct bin_table bin_net_tr_table[] = { { CTL_INT, NET_TR_RIF_TIMEOUT, "rif_timeout" }, {} }; static const struct bin_table bin_net_decnet_conf_vars[] = { { CTL_INT, NET_DECNET_CONF_DEV_FORWARDING, "forwarding" }, { CTL_INT, NET_DECNET_CONF_DEV_PRIORITY, "priority" }, { CTL_INT, NET_DECNET_CONF_DEV_T2, "t2" }, { CTL_INT, NET_DECNET_CONF_DEV_T3, "t3" }, {} }; static const struct bin_table bin_net_decnet_conf[] = { { CTL_DIR, NET_DECNET_CONF_ETHER, "ethernet", bin_net_decnet_conf_vars }, { CTL_DIR, NET_DECNET_CONF_GRE, "ipgre", bin_net_decnet_conf_vars }, { CTL_DIR, NET_DECNET_CONF_X25, "x25", bin_net_decnet_conf_vars }, { CTL_DIR, NET_DECNET_CONF_PPP, "ppp", bin_net_decnet_conf_vars }, { CTL_DIR, NET_DECNET_CONF_DDCMP, "ddcmp", bin_net_decnet_conf_vars }, { CTL_DIR, NET_DECNET_CONF_LOOPBACK, "loopback", bin_net_decnet_conf_vars }, { CTL_DIR, 0, NULL, bin_net_decnet_conf_vars }, {} }; static const struct bin_table bin_net_decnet_table[] = { { CTL_DIR, NET_DECNET_CONF, "conf", bin_net_decnet_conf }, { CTL_DNADR, NET_DECNET_NODE_ADDRESS, "node_address" }, { CTL_STR, NET_DECNET_NODE_NAME, "node_name" }, { CTL_STR, NET_DECNET_DEFAULT_DEVICE, "default_device" }, { CTL_INT, NET_DECNET_TIME_WAIT, "time_wait" }, { CTL_INT, NET_DECNET_DN_COUNT, "dn_count" }, { CTL_INT, NET_DECNET_DI_COUNT, "di_count" }, { CTL_INT, NET_DECNET_DR_COUNT, "dr_count" }, { CTL_INT, NET_DECNET_DST_GC_INTERVAL, "dst_gc_interval" }, { CTL_INT, NET_DECNET_NO_FC_MAX_CWND, "no_fc_max_cwnd" }, { CTL_INT, NET_DECNET_MEM, "decnet_mem" }, { CTL_INT, NET_DECNET_RMEM, "decnet_rmem" }, { CTL_INT, NET_DECNET_WMEM, "decnet_wmem" }, { CTL_INT, NET_DECNET_DEBUG_LEVEL, "debug" }, {} }; static const struct bin_table bin_net_sctp_table[] = { { CTL_INT, NET_SCTP_RTO_INITIAL, "rto_initial" }, { CTL_INT, NET_SCTP_RTO_MIN, "rto_min" }, { CTL_INT, NET_SCTP_RTO_MAX, "rto_max" }, { CTL_INT, NET_SCTP_RTO_ALPHA, "rto_alpha_exp_divisor" }, { CTL_INT, NET_SCTP_RTO_BETA, "rto_beta_exp_divisor" }, { CTL_INT, NET_SCTP_VALID_COOKIE_LIFE, "valid_cookie_life" }, { CTL_INT, NET_SCTP_ASSOCIATION_MAX_RETRANS, "association_max_retrans" }, { CTL_INT, NET_SCTP_PATH_MAX_RETRANS, "path_max_retrans" }, { CTL_INT, NET_SCTP_MAX_INIT_RETRANSMITS, "max_init_retransmits" }, { CTL_INT, NET_SCTP_HB_INTERVAL, "hb_interval" }, { CTL_INT, NET_SCTP_PRESERVE_ENABLE, "cookie_preserve_enable" }, { CTL_INT, NET_SCTP_MAX_BURST, "max_burst" }, { CTL_INT, NET_SCTP_ADDIP_ENABLE, "addip_enable" }, { CTL_INT, NET_SCTP_PRSCTP_ENABLE, "prsctp_enable" }, { CTL_INT, NET_SCTP_SNDBUF_POLICY, "sndbuf_policy" }, { CTL_INT, NET_SCTP_SACK_TIMEOUT, "sack_timeout" }, { CTL_INT, NET_SCTP_RCVBUF_POLICY, "rcvbuf_policy" }, {} }; static const struct bin_table bin_net_llc_llc2_timeout_table[] = { { CTL_INT, NET_LLC2_ACK_TIMEOUT, "ack" }, { CTL_INT, NET_LLC2_P_TIMEOUT, "p" }, { CTL_INT, NET_LLC2_REJ_TIMEOUT, "rej" }, { CTL_INT, NET_LLC2_BUSY_TIMEOUT, "busy" }, {} }; static const struct bin_table bin_net_llc_station_table[] = { { CTL_INT, NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" }, {} }; static const struct bin_table bin_net_llc_llc2_table[] = { { CTL_DIR, NET_LLC2, "timeout", bin_net_llc_llc2_timeout_table }, {} }; static const struct bin_table bin_net_llc_table[] = { { CTL_DIR, NET_LLC2, "llc2", bin_net_llc_llc2_table }, { CTL_DIR, NET_LLC_STATION, "station", bin_net_llc_station_table }, {} }; static const struct bin_table bin_net_netfilter_table[] = { { CTL_INT, NET_NF_CONNTRACK_MAX, "nf_conntrack_max" }, /* NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT "nf_conntrack_tcp_timeout_syn_sent" no longer used */ /* NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV "nf_conntrack_tcp_timeout_syn_recv" no longer used */ /* NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED "nf_conntrack_tcp_timeout_established" no longer used */ /* NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT "nf_conntrack_tcp_timeout_fin_wait" no longer used */ /* NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT "nf_conntrack_tcp_timeout_close_wait" no longer used */ /* NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK "nf_conntrack_tcp_timeout_last_ack" no longer used */ /* NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT "nf_conntrack_tcp_timeout_time_wait" no longer used */ /* NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE "nf_conntrack_tcp_timeout_close" no longer used */ /* NET_NF_CONNTRACK_UDP_TIMEOUT "nf_conntrack_udp_timeout" no longer used */ /* NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM "nf_conntrack_udp_timeout_stream" no longer used */ /* NET_NF_CONNTRACK_ICMP_TIMEOUT "nf_conntrack_icmp_timeout" no longer used */ /* NET_NF_CONNTRACK_GENERIC_TIMEOUT "nf_conntrack_generic_timeout" no longer used */ { CTL_INT, NET_NF_CONNTRACK_BUCKETS, "nf_conntrack_buckets" }, { CTL_INT, NET_NF_CONNTRACK_LOG_INVALID, "nf_conntrack_log_invalid" }, /* NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS "nf_conntrack_tcp_timeout_max_retrans" no longer used */ { CTL_INT, NET_NF_CONNTRACK_TCP_LOOSE, "nf_conntrack_tcp_loose" }, { CTL_INT, NET_NF_CONNTRACK_TCP_BE_LIBERAL, "nf_conntrack_tcp_be_liberal" }, { CTL_INT, NET_NF_CONNTRACK_TCP_MAX_RETRANS, "nf_conntrack_tcp_max_retrans" }, /* NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED "nf_conntrack_sctp_timeout_closed" no longer used */ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT "nf_conntrack_sctp_timeout_cookie_wait" no longer used */ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED "nf_conntrack_sctp_timeout_cookie_echoed" no longer used */ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED "nf_conntrack_sctp_timeout_established" no longer used */ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT "nf_conntrack_sctp_timeout_shutdown_sent" no longer used */ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD "nf_conntrack_sctp_timeout_shutdown_recd" no longer used */ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT "nf_conntrack_sctp_timeout_shutdown_ack_sent" no longer used */ { CTL_INT, NET_NF_CONNTRACK_COUNT, "nf_conntrack_count" }, /* NET_NF_CONNTRACK_ICMPV6_TIMEOUT "nf_conntrack_icmpv6_timeout" no longer used */ /* NET_NF_CONNTRACK_FRAG6_TIMEOUT "nf_conntrack_frag6_timeout" no longer used */ { CTL_INT, NET_NF_CONNTRACK_FRAG6_LOW_THRESH, "nf_conntrack_frag6_low_thresh" }, { CTL_INT, NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, "nf_conntrack_frag6_high_thresh" }, { CTL_INT, NET_NF_CONNTRACK_CHECKSUM, "nf_conntrack_checksum" }, {} }; static const struct bin_table bin_net_irda_table[] = { { CTL_INT, NET_IRDA_DISCOVERY, "discovery" }, { CTL_STR, NET_IRDA_DEVNAME, "devname" }, { CTL_INT, NET_IRDA_DEBUG, "debug" }, { CTL_INT, NET_IRDA_FAST_POLL, "fast_poll_increase" }, { CTL_INT, NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" }, { CTL_INT, NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" }, { CTL_INT, NET_IRDA_SLOT_TIMEOUT, "slot_timeout" }, { CTL_INT, NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" }, { CTL_INT, NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" }, { CTL_INT, NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" }, { CTL_INT, NET_IRDA_MAX_TX_WINDOW, "max_tx_window" }, { CTL_INT, NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" }, { CTL_INT, NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" }, { CTL_INT, NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" }, {} }; static const struct bin_table bin_net_table[] = { { CTL_DIR, NET_CORE, "core", bin_net_core_table }, /* NET_ETHER not used */ /* NET_802 not used */ { CTL_DIR, NET_UNIX, "unix", bin_net_unix_table }, { CTL_DIR, NET_IPV4, "ipv4", bin_net_ipv4_table }, { CTL_DIR, NET_IPX, "ipx", bin_net_ipx_table }, { CTL_DIR, NET_ATALK, "appletalk", bin_net_atalk_table }, { CTL_DIR, NET_NETROM, "netrom", bin_net_netrom_table }, { CTL_DIR, NET_AX25, "ax25", bin_net_ax25_table }, /* NET_BRIDGE "bridge" no longer used */ { CTL_DIR, NET_ROSE, "rose", bin_net_rose_table }, { CTL_DIR, NET_IPV6, "ipv6", bin_net_ipv6_table }, { CTL_DIR, NET_X25, "x25", bin_net_x25_table }, { CTL_DIR, NET_TR, "token-ring", bin_net_tr_table }, { CTL_DIR, NET_DECNET, "decnet", bin_net_decnet_table }, /* NET_ECONET not used */ { CTL_DIR, NET_SCTP, "sctp", bin_net_sctp_table }, { CTL_DIR, NET_LLC, "llc", bin_net_llc_table }, { CTL_DIR, NET_NETFILTER, "netfilter", bin_net_netfilter_table }, /* NET_DCCP "dccp" no longer used */ { CTL_DIR, NET_IRDA, "irda", bin_net_irda_table }, { CTL_INT, 2089, "nf_conntrack_max" }, {} }; static const struct bin_table bin_fs_quota_table[] = { { CTL_INT, FS_DQ_LOOKUPS, "lookups" }, { CTL_INT, FS_DQ_DROPS, "drops" }, { CTL_INT, FS_DQ_READS, "reads" }, { CTL_INT, FS_DQ_WRITES, "writes" }, { CTL_INT, FS_DQ_CACHE_HITS, "cache_hits" }, { CTL_INT, FS_DQ_ALLOCATED, "allocated_dquots" }, { CTL_INT, FS_DQ_FREE, "free_dquots" }, { CTL_INT, FS_DQ_SYNCS, "syncs" }, { CTL_INT, FS_DQ_WARNINGS, "warnings" }, {} }; static const struct bin_table bin_fs_xfs_table[] = { { CTL_INT, XFS_SGID_INHERIT, "irix_sgid_inherit" }, { CTL_INT, XFS_SYMLINK_MODE, "irix_symlink_mode" }, { CTL_INT, XFS_PANIC_MASK, "panic_mask" }, { CTL_INT, XFS_ERRLEVEL, "error_level" }, { CTL_INT, XFS_SYNCD_TIMER, "xfssyncd_centisecs" }, { CTL_INT, XFS_INHERIT_SYNC, "inherit_sync" }, { CTL_INT, XFS_INHERIT_NODUMP, "inherit_nodump" }, { CTL_INT, XFS_INHERIT_NOATIME, "inherit_noatime" }, { CTL_INT, XFS_BUF_TIMER, "xfsbufd_centisecs" }, { CTL_INT, XFS_BUF_AGE, "age_buffer_centisecs" }, { CTL_INT, XFS_INHERIT_NOSYM, "inherit_nosymlinks" }, { CTL_INT, XFS_ROTORSTEP, "rotorstep" }, { CTL_INT, XFS_INHERIT_NODFRG, "inherit_nodefrag" }, { CTL_INT, XFS_FILESTREAM_TIMER, "filestream_centisecs" }, { CTL_INT, XFS_STATS_CLEAR, "stats_clear" }, {} }; static const struct bin_table bin_fs_ocfs2_nm_table[] = { { CTL_STR, 1, "hb_ctl_path" }, {} }; static const struct bin_table bin_fs_ocfs2_table[] = { { CTL_DIR, 1, "nm", bin_fs_ocfs2_nm_table }, {} }; static const struct bin_table bin_inotify_table[] = { { CTL_INT, INOTIFY_MAX_USER_INSTANCES, "max_user_instances" }, { CTL_INT, INOTIFY_MAX_USER_WATCHES, "max_user_watches" }, { CTL_INT, INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" }, {} }; static const struct bin_table bin_fs_table[] = { { CTL_INT, FS_NRINODE, "inode-nr" }, { CTL_INT, FS_STATINODE, "inode-state" }, /* FS_MAXINODE unused */ /* FS_NRDQUOT unused */ /* FS_MAXDQUOT unused */ /* FS_NRFILE "file-nr" no longer used */ { CTL_INT, FS_MAXFILE, "file-max" }, { CTL_INT, FS_DENTRY, "dentry-state" }, /* FS_NRSUPER unused */ /* FS_MAXUPSER unused */ { CTL_INT, FS_OVERFLOWUID, "overflowuid" }, { CTL_INT, FS_OVERFLOWGID, "overflowgid" }, { CTL_INT, FS_LEASES, "leases-enable" }, { CTL_INT, FS_DIR_NOTIFY, "dir-notify-enable" }, { CTL_INT, FS_LEASE_TIME, "lease-break-time" }, { CTL_DIR, FS_DQSTATS, "quota", bin_fs_quota_table }, { CTL_DIR, FS_XFS, "xfs", bin_fs_xfs_table }, { CTL_ULONG, FS_AIO_NR, "aio-nr" }, { CTL_ULONG, FS_AIO_MAX_NR, "aio-max-nr" }, { CTL_DIR, FS_INOTIFY, "inotify", bin_inotify_table }, { CTL_DIR, FS_OCFS2, "ocfs2", bin_fs_ocfs2_table }, { CTL_INT, KERN_SETUID_DUMPABLE, "suid_dumpable" }, {} }; static const struct bin_table bin_ipmi_table[] = { { CTL_INT, DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" }, {} }; static const struct bin_table bin_mac_hid_files[] = { /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */ /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */ { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" }, { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE, "mouse_button2_keycode" }, { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE, "mouse_button3_keycode" }, /* DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES unused */ {} }; static const struct bin_table bin_raid_table[] = { { CTL_INT, DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" }, { CTL_INT, DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" }, {} }; static const struct bin_table bin_scsi_table[] = { { CTL_INT, DEV_SCSI_LOGGING_LEVEL, "logging_level" }, {} }; static const struct bin_table bin_dev_table[] = { /* DEV_CDROM "cdrom" no longer used */ /* DEV_HWMON unused */ /* DEV_PARPORT "parport" no longer used */ { CTL_DIR, DEV_RAID, "raid", bin_raid_table }, { CTL_DIR, DEV_MAC_HID, "mac_hid", bin_mac_hid_files }, { CTL_DIR, DEV_SCSI, "scsi", bin_scsi_table }, { CTL_DIR, DEV_IPMI, "ipmi", bin_ipmi_table }, {} }; static const struct bin_table bin_bus_isa_table[] = { { CTL_INT, BUS_ISA_MEM_BASE, "membase" }, { CTL_INT, BUS_ISA_PORT_BASE, "portbase" }, { CTL_INT, BUS_ISA_PORT_SHIFT, "portshift" }, {} }; static const struct bin_table bin_bus_table[] = { { CTL_DIR, CTL_BUS_ISA, "isa", bin_bus_isa_table }, {} }; static const struct bin_table bin_s390dbf_table[] = { { CTL_INT, 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" }, { CTL_INT, 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" }, {} }; static const struct bin_table bin_sunrpc_table[] = { /* CTL_RPCDEBUG "rpc_debug" no longer used */ /* CTL_NFSDEBUG "nfs_debug" no longer used */ /* CTL_NFSDDEBUG "nfsd_debug" no longer used */ /* CTL_NLMDEBUG "nlm_debug" no longer used */ { CTL_INT, CTL_SLOTTABLE_UDP, "udp_slot_table_entries" }, { CTL_INT, CTL_SLOTTABLE_TCP, "tcp_slot_table_entries" }, { CTL_INT, CTL_MIN_RESVPORT, "min_resvport" }, { CTL_INT, CTL_MAX_RESVPORT, "max_resvport" }, {} }; static const struct bin_table bin_pm_table[] = { /* frv specific */ /* 1 == CTL_PM_SUSPEND "suspend" no longer used" */ { CTL_INT, 2 /* CTL_PM_CMODE */, "cmode" }, { CTL_INT, 3 /* CTL_PM_P0 */, "p0" }, { CTL_INT, 4 /* CTL_PM_CM */, "cm" }, {} }; static const struct bin_table bin_root_table[] = { { CTL_DIR, CTL_KERN, "kernel", bin_kern_table }, { CTL_DIR, CTL_VM, "vm", bin_vm_table }, { CTL_DIR, CTL_NET, "net", bin_net_table }, /* CTL_PROC not used */ { CTL_DIR, CTL_FS, "fs", bin_fs_table }, /* CTL_DEBUG "debug" no longer used */ { CTL_DIR, CTL_DEV, "dev", bin_dev_table }, { CTL_DIR, CTL_BUS, "bus", bin_bus_table }, { CTL_DIR, CTL_ABI, "abi" }, /* CTL_CPU not used */ /* CTL_ARLAN "arlan" no longer used */ { CTL_DIR, CTL_S390DBF, "s390dbf", bin_s390dbf_table }, { CTL_DIR, CTL_SUNRPC, "sunrpc", bin_sunrpc_table }, { CTL_DIR, CTL_PM, "pm", bin_pm_table }, {} }; static ssize_t bin_dir(struct file *file, void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) { return -ENOTDIR; } static ssize_t bin_string(struct file *file, void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) { ssize_t result, copied = 0; if (oldval && oldlen) { char __user *lastp; loff_t pos = 0; int ch; result = vfs_read(file, oldval, oldlen, &pos); if (result < 0) goto out; copied = result; lastp = oldval + copied - 1; result = -EFAULT; if (get_user(ch, lastp)) goto out; /* Trim off the trailing newline */ if (ch == '\n') { result = -EFAULT; if (put_user('\0', lastp)) goto out; copied -= 1; } } if (newval && newlen) { loff_t pos = 0; result = vfs_write(file, newval, newlen, &pos); if (result < 0) goto out; } result = copied; out: return result; } static ssize_t bin_intvec(struct file *file, void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) { mm_segment_t old_fs = get_fs(); ssize_t copied = 0; char *buffer; ssize_t result; result = -ENOMEM; buffer = kmalloc(BUFSZ, GFP_KERNEL); if (!buffer) goto out; if (oldval && oldlen) { unsigned __user *vec = oldval; size_t length = oldlen / sizeof(*vec); loff_t pos = 0; char *str, *end; int i; set_fs(KERNEL_DS); result = vfs_read(file, buffer, BUFSZ - 1, &pos); set_fs(old_fs); if (result < 0) goto out_kfree; str = buffer; end = str + result; *end++ = '\0'; for (i = 0; i < length; i++) { unsigned long value; value = simple_strtoul(str, &str, 10); while (isspace(*str)) str++; result = -EFAULT; if (put_user(value, vec + i)) goto out_kfree; copied += sizeof(*vec); if (!isdigit(*str)) break; } } if (newval && newlen) { unsigned __user *vec = newval; size_t length = newlen / sizeof(*vec); loff_t pos = 0; char *str, *end; int i; str = buffer; end = str + BUFSZ; for (i = 0; i < length; i++) { unsigned long value; result = -EFAULT; if (get_user(value, vec + i)) goto out_kfree; str += snprintf(str, end - str, "%lu\t", value); } set_fs(KERNEL_DS); result = vfs_write(file, buffer, str - buffer, &pos); set_fs(old_fs); if (result < 0) goto out_kfree; } result = copied; out_kfree: kfree(buffer); out: return result; } static ssize_t bin_ulongvec(struct file *file, void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) { mm_segment_t old_fs = get_fs(); ssize_t copied = 0; char *buffer; ssize_t result; result = -ENOMEM; buffer = kmalloc(BUFSZ, GFP_KERNEL); if (!buffer) goto out; if (oldval && oldlen) { unsigned long __user *vec = oldval; size_t length = oldlen / sizeof(*vec); loff_t pos = 0; char *str, *end; int i; set_fs(KERNEL_DS); result = vfs_read(file, buffer, BUFSZ - 1, &pos); set_fs(old_fs); if (result < 0) goto out_kfree; str = buffer; end = str + result; *end++ = '\0'; for (i = 0; i < length; i++) { unsigned long value; value = simple_strtoul(str, &str, 10); while (isspace(*str)) str++; result = -EFAULT; if (put_user(value, vec + i)) goto out_kfree; copied += sizeof(*vec); if (!isdigit(*str)) break; } } if (newval && newlen) { unsigned long __user *vec = newval; size_t length = newlen / sizeof(*vec); loff_t pos = 0; char *str, *end; int i; str = buffer; end = str + BUFSZ; for (i = 0; i < length; i++) { unsigned long value; result = -EFAULT; if (get_user(value, vec + i)) goto out_kfree; str += snprintf(str, end - str, "%lu\t", value); } set_fs(KERNEL_DS); result = vfs_write(file, buffer, str - buffer, &pos); set_fs(old_fs); if (result < 0) goto out_kfree; } result = copied; out_kfree: kfree(buffer); out: return result; } static ssize_t bin_uuid(struct file *file, void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) { mm_segment_t old_fs = get_fs(); ssize_t result, copied = 0; /* Only supports reads */ if (oldval && oldlen) { loff_t pos = 0; char buf[40], *str = buf; unsigned char uuid[16]; int i; set_fs(KERNEL_DS); result = vfs_read(file, buf, sizeof(buf) - 1, &pos); set_fs(old_fs); if (result < 0) goto out; buf[result] = '\0'; /* Convert the uuid to from a string to binary */ for (i = 0; i < 16; i++) { result = -EIO; if (!isxdigit(str[0]) || !isxdigit(str[1])) goto out; uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]); str += 2; if (*str == '-') str++; } if (oldlen > 16) oldlen = 16; result = -EFAULT; if (copy_to_user(oldval, uuid, oldlen)) goto out; copied = oldlen; } result = copied; out: return result; } static ssize_t bin_dn_node_address(struct file *file, void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) { mm_segment_t old_fs = get_fs(); ssize_t result, copied = 0; if (oldval && oldlen) { loff_t pos = 0; char buf[15], *nodep; unsigned long area, node; __le16 dnaddr; set_fs(KERNEL_DS); result = vfs_read(file, buf, sizeof(buf) - 1, &pos); set_fs(old_fs); if (result < 0) goto out; buf[result] = '\0'; /* Convert the decnet address to binary */ result = -EIO; nodep = strchr(buf, '.') + 1; if (!nodep) goto out; area = simple_strtoul(buf, NULL, 10); node = simple_strtoul(nodep, NULL, 10); result = -EIO; if ((area > 63)||(node > 1023)) goto out; dnaddr = cpu_to_le16((area << 10) | node); result = -EFAULT; if (put_user(dnaddr, (__le16 __user *)oldval)) goto out; copied = sizeof(dnaddr); } if (newval && newlen) { loff_t pos = 0; __le16 dnaddr; char buf[15]; int len; result = -EINVAL; if (newlen != sizeof(dnaddr)) goto out; result = -EFAULT; if (get_user(dnaddr, (__le16 __user *)newval)) goto out; len = snprintf(buf, sizeof(buf), "%hu.%hu", le16_to_cpu(dnaddr) >> 10, le16_to_cpu(dnaddr) & 0x3ff); set_fs(KERNEL_DS); result = vfs_write(file, buf, len, &pos); set_fs(old_fs); if (result < 0) goto out; } result = copied; out: return result; } static const struct bin_table *get_sysctl(const int *name, int nlen, char *path) { const struct bin_table *table = &bin_root_table[0]; int ctl_name; /* The binary sysctl tables have a small maximum depth so * there is no danger of overflowing our path as it PATH_MAX * bytes long. */ memcpy(path, "sys/", 4); path += 4; repeat: if (!nlen) return ERR_PTR(-ENOTDIR); ctl_name = *name; name++; nlen--; for ( ; table->convert; table++) { int len = 0; /* * For a wild card entry map from ifindex to network * device name. */ if (!table->ctl_name) { #ifdef CONFIG_NET struct net *net = current->nsproxy->net_ns; struct net_device *dev; dev = dev_get_by_index(net, ctl_name); if (dev) { len = strlen(dev->name); memcpy(path, dev->name, len); dev_put(dev); } #endif /* Use the well known sysctl number to proc name mapping */ } else if (ctl_name == table->ctl_name) { len = strlen(table->procname); memcpy(path, table->procname, len); } if (len) { path += len; if (table->child) { *path++ = '/'; table = table->child; goto repeat; } *path = '\0'; return table; } } return ERR_PTR(-ENOTDIR); } static char *sysctl_getname(const int *name, int nlen, const struct bin_table **tablep) { char *tmp, *result; result = ERR_PTR(-ENOMEM); tmp = __getname(); if (tmp) { const struct bin_table *table = get_sysctl(name, nlen, tmp); result = tmp; *tablep = table; if (IS_ERR(table)) { __putname(tmp); result = ERR_CAST(table); } } return result; } static ssize_t binary_sysctl(const int *name, int nlen, void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) { const struct bin_table *table = NULL; struct vfsmount *mnt; struct file *file; ssize_t result; char *pathname; int flags; pathname = sysctl_getname(name, nlen, &table); result = PTR_ERR(pathname); if (IS_ERR(pathname)) goto out; /* How should the sysctl be accessed? */ if (oldval && oldlen && newval && newlen) { flags = O_RDWR; } else if (newval && newlen) { flags = O_WRONLY; } else if (oldval && oldlen) { flags = O_RDONLY; } else { result = 0; goto out_putname; } mnt = current->nsproxy->pid_ns->proc_mnt; file = file_open_root(mnt->mnt_root, mnt, pathname, flags); result = PTR_ERR(file); if (IS_ERR(file)) goto out_putname; result = table->convert(file, oldval, oldlen, newval, newlen); fput(file); out_putname: __putname(pathname); out: return result; } #else /* CONFIG_SYSCTL_SYSCALL */ static ssize_t binary_sysctl(const int *name, int nlen, void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) { return -ENOSYS; } #endif /* CONFIG_SYSCTL_SYSCALL */ static void deprecated_sysctl_warning(const int *name, int nlen) { int i; /* * CTL_KERN/KERN_VERSION is used by older glibc and cannot * ever go away. */ if (name[0] == CTL_KERN && name[1] == KERN_VERSION) return; if (printk_ratelimit()) { printk(KERN_INFO "warning: process `%s' used the deprecated sysctl " "system call with ", current->comm); for (i = 0; i < nlen; i++) printk("%d.", name[i]); printk("\n"); } return; } #define WARN_ONCE_HASH_BITS 8 #define WARN_ONCE_HASH_SIZE (1<<WARN_ONCE_HASH_BITS) static DECLARE_BITMAP(warn_once_bitmap, WARN_ONCE_HASH_SIZE); #define FNV32_OFFSET 2166136261U #define FNV32_PRIME 0x01000193 /* * Print each legacy sysctl (approximately) only once. * To avoid making the tables non-const use a external * hash-table instead. * Worst case hash collision: 6, but very rarely. * NOTE! We don't use the SMP-safe bit tests. We simply * don't care enough. */ static void warn_on_bintable(const int *name, int nlen) { int i; u32 hash = FNV32_OFFSET; for (i = 0; i < nlen; i++) hash = (hash ^ name[i]) * FNV32_PRIME; hash %= WARN_ONCE_HASH_SIZE; if (__test_and_set_bit(hash, warn_once_bitmap)) return; deprecated_sysctl_warning(name, nlen); } static ssize_t do_sysctl(int __user *args_name, int nlen, void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) { int name[CTL_MAXNAME]; int i; /* Check args->nlen. */ if (nlen < 0 || nlen > CTL_MAXNAME) return -ENOTDIR; /* Read in the sysctl name for simplicity */ for (i = 0; i < nlen; i++) if (get_user(name[i], args_name + i)) return -EFAULT; warn_on_bintable(name, nlen); return binary_sysctl(name, nlen, oldval, oldlen, newval, newlen); } SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args) { struct __sysctl_args tmp; size_t oldlen = 0; ssize_t result; if (copy_from_user(&tmp, args, sizeof(tmp))) return -EFAULT; if (tmp.oldval && !tmp.oldlenp) return -EFAULT; if (tmp.oldlenp && get_user(oldlen, tmp.oldlenp)) return -EFAULT; result = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, oldlen, tmp.newval, tmp.newlen); if (result >= 0) { oldlen = result; result = 0; } if (tmp.oldlenp && put_user(oldlen, tmp.oldlenp)) return -EFAULT; return result; } #ifdef CONFIG_COMPAT #include <asm/compat.h> struct compat_sysctl_args { compat_uptr_t name; int nlen; compat_uptr_t oldval; compat_uptr_t oldlenp; compat_uptr_t newval; compat_size_t newlen; compat_ulong_t __unused[4]; }; asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args) { struct compat_sysctl_args tmp; compat_size_t __user *compat_oldlenp; size_t oldlen = 0; ssize_t result; if (copy_from_user(&tmp, args, sizeof(tmp))) return -EFAULT; if (tmp.oldval && !tmp.oldlenp) return -EFAULT; compat_oldlenp = compat_ptr(tmp.oldlenp); if (compat_oldlenp && get_user(oldlen, compat_oldlenp)) return -EFAULT; result = do_sysctl(compat_ptr(tmp.name), tmp.nlen, compat_ptr(tmp.oldval), oldlen, compat_ptr(tmp.newval), tmp.newlen); if (result >= 0) { oldlen = result; result = 0; } if (compat_oldlenp && put_user(oldlen, compat_oldlenp)) return -EFAULT; return result; } #endif /* CONFIG_COMPAT */
gpl-2.0
Fox-Mc-Cloud/android_kernel_samsung_smdk4412
fs/ext4/resize.c
617
34499
/* * linux/fs/ext4/resize.c * * Support for resizing an ext4 filesystem while it is mounted. * * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> * * This could probably be made into a module, because it is not often in use. */ #define EXT4FS_DEBUG #include <linux/errno.h> #include <linux/slab.h> #include "ext4_jbd2.h" #define outside(b, first, last) ((b) < (first) || (b) >= (last)) #define inside(b, first, last) ((b) >= (first) && (b) < (last)) static int verify_group_input(struct super_block *sb, struct ext4_new_group_data *input) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_fsblk_t start = ext4_blocks_count(es); ext4_fsblk_t end = start + input->blocks_count; ext4_group_t group = input->group; ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; unsigned overhead = ext4_bg_has_super(sb, group) ? (1 + ext4_bg_num_gdb(sb, group) + le16_to_cpu(es->s_reserved_gdt_blocks)) : 0; ext4_fsblk_t metaend = start + overhead; struct buffer_head *bh = NULL; ext4_grpblk_t free_blocks_count, offset; int err = -EINVAL; input->free_blocks_count = free_blocks_count = input->blocks_count - 2 - overhead - sbi->s_itb_per_group; if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " "(%d free, %u reserved)\n", ext4_bg_has_super(sb, input->group) ? "normal" : "no-super", input->group, input->blocks_count, free_blocks_count, input->reserved_blocks); ext4_get_group_no_and_offset(sb, start, NULL, &offset); if (group != sbi->s_groups_count) ext4_warning(sb, "Cannot add at group %u (only %u groups)", input->group, sbi->s_groups_count); else if (offset != 0) ext4_warning(sb, "Last group not full"); else if (input->reserved_blocks > input->blocks_count / 5) ext4_warning(sb, "Reserved blocks too high (%u)", input->reserved_blocks); else if (free_blocks_count < 0) ext4_warning(sb, "Bad blocks count %u", input->blocks_count); else if (!(bh = sb_bread(sb, end - 1))) ext4_warning(sb, "Cannot read last block (%llu)", end - 1); else if (outside(input->block_bitmap, start, end)) ext4_warning(sb, "Block bitmap not in group (block %llu)", (unsigned long long)input->block_bitmap); else if (outside(input->inode_bitmap, start, end)) ext4_warning(sb, "Inode bitmap not in group (block %llu)", (unsigned long long)input->inode_bitmap); else if (outside(input->inode_table, start, end) || outside(itend - 1, start, end)) ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", (unsigned long long)input->inode_table, itend - 1); else if (input->inode_bitmap == input->block_bitmap) ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)", (unsigned long long)input->block_bitmap); else if (inside(input->block_bitmap, input->inode_table, itend)) ext4_warning(sb, "Block bitmap (%llu) in inode table " "(%llu-%llu)", (unsigned long long)input->block_bitmap, (unsigned long long)input->inode_table, itend - 1); else if (inside(input->inode_bitmap, input->inode_table, itend)) ext4_warning(sb, "Inode bitmap (%llu) in inode table " "(%llu-%llu)", (unsigned long long)input->inode_bitmap, (unsigned long long)input->inode_table, itend - 1); else if (inside(input->block_bitmap, start, metaend)) ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)", (unsigned long long)input->block_bitmap, start, metaend - 1); else if (inside(input->inode_bitmap, start, metaend)) ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)", (unsigned long long)input->inode_bitmap, start, metaend - 1); else if (inside(input->inode_table, start, metaend) || inside(itend - 1, start, metaend)) ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " "(%llu-%llu)", (unsigned long long)input->inode_table, itend - 1, start, metaend - 1); else err = 0; brelse(bh); return err; } static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, ext4_fsblk_t blk) { struct buffer_head *bh; int err; bh = sb_getblk(sb, blk); if (!bh) return ERR_PTR(-EIO); if ((err = ext4_journal_get_write_access(handle, bh))) { brelse(bh); bh = ERR_PTR(err); } else { lock_buffer(bh); memset(bh->b_data, 0, sb->s_blocksize); set_buffer_uptodate(bh); unlock_buffer(bh); } return bh; } /* * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA. * If that fails, restart the transaction & regain write access for the * buffer head which is used for block_bitmap modifications. */ static int extend_or_restart_transaction(handle_t *handle, int thresh, struct buffer_head *bh) { int err; if (ext4_handle_has_enough_credits(handle, thresh)) return 0; err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA); if (err < 0) return err; if (err) { if ((err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA))) return err; if ((err = ext4_journal_get_write_access(handle, bh))) return err; } return 0; } /* * Set up the block and inode bitmaps, and the inode table for the new group. * This doesn't need to be part of the main transaction, since we are only * changing blocks outside the actual filesystem. We still do journaling to * ensure the recovery is correct in case of a failure just after resize. * If any part of this fails, we simply abort the resize. */ static int setup_new_group_blocks(struct super_block *sb, struct ext4_new_group_data *input) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t start = ext4_group_first_block_no(sb, input->group); int reserved_gdb = ext4_bg_has_super(sb, input->group) ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0; unsigned long gdblocks = ext4_bg_num_gdb(sb, input->group); struct buffer_head *bh; handle_t *handle; ext4_fsblk_t block; ext4_grpblk_t bit; int i; int err = 0, err2; /* This transaction may be extended/restarted along the way */ handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA); if (IS_ERR(handle)) return PTR_ERR(handle); mutex_lock(&sbi->s_resize_lock); if (input->group != sbi->s_groups_count) { err = -EBUSY; goto exit_journal; } if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) { err = PTR_ERR(bh); goto exit_journal; } if (ext4_bg_has_super(sb, input->group)) { ext4_debug("mark backup superblock %#04llx (+0)\n", start); ext4_set_bit(0, bh->b_data); } /* Copy all of the GDT blocks into the backup in this group */ for (i = 0, bit = 1, block = start + 1; i < gdblocks; i++, block++, bit++) { struct buffer_head *gdb; ext4_debug("update backup group %#04llx (+%d)\n", block, bit); if ((err = extend_or_restart_transaction(handle, 1, bh))) goto exit_bh; gdb = sb_getblk(sb, block); if (!gdb) { err = -EIO; goto exit_bh; } if ((err = ext4_journal_get_write_access(handle, gdb))) { brelse(gdb); goto exit_bh; } lock_buffer(gdb); memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size); set_buffer_uptodate(gdb); unlock_buffer(gdb); err = ext4_handle_dirty_metadata(handle, NULL, gdb); if (unlikely(err)) { brelse(gdb); goto exit_bh; } ext4_set_bit(bit, bh->b_data); brelse(gdb); } /* Zero out all of the reserved backup group descriptor table blocks */ ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", block, sbi->s_itb_per_group); err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb, GFP_NOFS); if (err) goto exit_bh; for (i = 0, bit = gdblocks + 1; i < reserved_gdb; i++, bit++) ext4_set_bit(bit, bh->b_data); ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap, input->block_bitmap - start); ext4_set_bit(input->block_bitmap - start, bh->b_data); ext4_debug("mark inode bitmap %#04llx (+%llu)\n", input->inode_bitmap, input->inode_bitmap - start); ext4_set_bit(input->inode_bitmap - start, bh->b_data); /* Zero out all of the inode table blocks */ block = input->inode_table; ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", block, sbi->s_itb_per_group); err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS); if (err) goto exit_bh; for (i = 0, bit = input->inode_table - start; i < sbi->s_itb_per_group; i++, bit++) ext4_set_bit(bit, bh->b_data); if ((err = extend_or_restart_transaction(handle, 2, bh))) goto exit_bh; ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, bh->b_data); err = ext4_handle_dirty_metadata(handle, NULL, bh); if (unlikely(err)) { ext4_std_error(sb, err); goto exit_bh; } brelse(bh); /* Mark unused entries in inode bitmap used */ ext4_debug("clear inode bitmap %#04llx (+%llu)\n", input->inode_bitmap, input->inode_bitmap - start); if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) { err = PTR_ERR(bh); goto exit_journal; } ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, bh->b_data); err = ext4_handle_dirty_metadata(handle, NULL, bh); if (unlikely(err)) ext4_std_error(sb, err); exit_bh: brelse(bh); exit_journal: mutex_unlock(&sbi->s_resize_lock); if ((err2 = ext4_journal_stop(handle)) && !err) err = err2; return err; } /* * Iterate through the groups which hold BACKUP superblock/GDT copies in an * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before * calling this for the first time. In a sparse filesystem it will be the * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... */ static unsigned ext4_list_backups(struct super_block *sb, unsigned *three, unsigned *five, unsigned *seven) { unsigned *min = three; int mult = 3; unsigned ret; if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { ret = *min; *min += 1; return ret; } if (*five < *min) { min = five; mult = 5; } if (*seven < *min) { min = seven; mult = 7; } ret = *min; *min *= mult; return ret; } /* * Check that all of the backup GDT blocks are held in the primary GDT block. * It is assumed that they are stored in group order. Returns the number of * groups in current filesystem that have BACKUPS, or -ve error code. */ static int verify_reserved_gdb(struct super_block *sb, struct buffer_head *primary) { const ext4_fsblk_t blk = primary->b_blocknr; const ext4_group_t end = EXT4_SB(sb)->s_groups_count; unsigned three = 1; unsigned five = 5; unsigned seven = 7; unsigned grp; __le32 *p = (__le32 *)primary->b_data; int gdbackups = 0; while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { if (le32_to_cpu(*p++) != grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ ext4_warning(sb, "reserved GDT %llu" " missing grp %d (%llu)", blk, grp, grp * (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + blk); return -EINVAL; } if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) return -EFBIG; } return gdbackups; } /* * Called when we need to bring a reserved group descriptor table block into * use from the resize inode. The primary copy of the new GDT block currently * is an indirect block (under the double indirect block in the resize inode). * The new backup GDT blocks will be stored as leaf blocks in this indirect * block, in group order. Even though we know all the block numbers we need, * we check to ensure that the resize inode has actually reserved these blocks. * * Don't need to update the block bitmaps because the blocks are still in use. * * We get all of the error cases out of the way, so that we are sure to not * fail once we start modifying the data on disk, because JBD has no rollback. */ static int add_new_gdb(handle_t *handle, struct inode *inode, struct ext4_new_group_data *input, struct buffer_head **primary) { struct super_block *sb = inode->i_sb; struct ext4_super_block *es = EXT4_SB(sb)->s_es; unsigned long gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; struct buffer_head **o_group_desc, **n_group_desc; struct buffer_head *dind; int gdbackups; struct ext4_iloc iloc; __le32 *data; int err; if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", gdb_num); /* * If we are not using the primary superblock/GDT copy don't resize, * because the user tools have no way of handling this. Probably a * bad time to do it anyways. */ if (EXT4_SB(sb)->s_sbh->b_blocknr != le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { ext4_warning(sb, "won't resize using backup superblock at %llu", (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); return -EPERM; } *primary = sb_bread(sb, gdblock); if (!*primary) return -EIO; if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) { err = gdbackups; goto exit_bh; } data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; dind = sb_bread(sb, le32_to_cpu(*data)); if (!dind) { err = -EIO; goto exit_bh; } data = (__le32 *)dind->b_data; if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { ext4_warning(sb, "new group %u GDT block %llu not reserved", input->group, gdblock); err = -EINVAL; goto exit_dind; } err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); if (unlikely(err)) goto exit_dind; err = ext4_journal_get_write_access(handle, *primary); if (unlikely(err)) goto exit_sbh; err = ext4_journal_get_write_access(handle, dind); if (unlikely(err)) ext4_std_error(sb, err); /* ext4_reserve_inode_write() gets a reference on the iloc */ err = ext4_reserve_inode_write(handle, inode, &iloc); if (unlikely(err)) goto exit_dindj; n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *), GFP_NOFS); if (!n_group_desc) { err = -ENOMEM; ext4_warning(sb, "not enough memory for %lu groups", gdb_num + 1); goto exit_inode; } /* * Finally, we have all of the possible failures behind us... * * Remove new GDT block from inode double-indirect block and clear out * the new GDT block for use (which also "frees" the backup GDT blocks * from the reserved inode). We don't need to change the bitmaps for * these blocks, because they are marked as in-use from being in the * reserved inode, and will become GDT blocks (primary and backup). */ data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; err = ext4_handle_dirty_metadata(handle, NULL, dind); if (unlikely(err)) { ext4_std_error(sb, err); goto exit_inode; } inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; ext4_mark_iloc_dirty(handle, inode, &iloc); memset((*primary)->b_data, 0, sb->s_blocksize); err = ext4_handle_dirty_metadata(handle, NULL, *primary); if (unlikely(err)) { ext4_std_error(sb, err); goto exit_inode; } brelse(dind); o_group_desc = EXT4_SB(sb)->s_group_desc; memcpy(n_group_desc, o_group_desc, EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); n_group_desc[gdb_num] = *primary; EXT4_SB(sb)->s_group_desc = n_group_desc; EXT4_SB(sb)->s_gdb_count++; kfree(o_group_desc); le16_add_cpu(&es->s_reserved_gdt_blocks, -1); err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); if (err) ext4_std_error(sb, err); return err; exit_inode: kfree(n_group_desc); /* ext4_handle_release_buffer(handle, iloc.bh); */ brelse(iloc.bh); exit_dindj: /* ext4_handle_release_buffer(handle, dind); */ exit_sbh: /* ext4_handle_release_buffer(handle, EXT4_SB(sb)->s_sbh); */ exit_dind: brelse(dind); exit_bh: brelse(*primary); ext4_debug("leaving with error %d\n", err); return err; } /* * Called when we are adding a new group which has a backup copy of each of * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. * We need to add these reserved backup GDT blocks to the resize inode, so * that they are kept for future resizing and not allocated to files. * * Each reserved backup GDT block will go into a different indirect block. * The indirect blocks are actually the primary reserved GDT blocks, * so we know in advance what their block numbers are. We only get the * double-indirect block to verify it is pointing to the primary reserved * GDT blocks so we don't overwrite a data block by accident. The reserved * backup GDT blocks are stored in their reserved primary GDT block. */ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, struct ext4_new_group_data *input) { struct super_block *sb = inode->i_sb; int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); struct buffer_head **primary; struct buffer_head *dind; struct ext4_iloc iloc; ext4_fsblk_t blk; __le32 *data, *end; int gdbackups = 0; int res, i; int err; primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS); if (!primary) return -ENOMEM; data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; dind = sb_bread(sb, le32_to_cpu(*data)); if (!dind) { err = -EIO; goto exit_free; } blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count % EXT4_ADDR_PER_BLOCK(sb)); end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); /* Get each reserved primary GDT block and verify it holds backups */ for (res = 0; res < reserved_gdb; res++, blk++) { if (le32_to_cpu(*data) != blk) { ext4_warning(sb, "reserved block %llu" " not at offset %ld", blk, (long)(data - (__le32 *)dind->b_data)); err = -EINVAL; goto exit_bh; } primary[res] = sb_bread(sb, blk); if (!primary[res]) { err = -EIO; goto exit_bh; } if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) { brelse(primary[res]); err = gdbackups; goto exit_bh; } if (++data >= end) data = (__le32 *)dind->b_data; } for (i = 0; i < reserved_gdb; i++) { if ((err = ext4_journal_get_write_access(handle, primary[i]))) { /* int j; for (j = 0; j < i; j++) ext4_handle_release_buffer(handle, primary[j]); */ goto exit_bh; } } if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) goto exit_bh; /* * Finally we can add each of the reserved backup GDT blocks from * the new group to its reserved primary GDT block. */ blk = input->group * EXT4_BLOCKS_PER_GROUP(sb); for (i = 0; i < reserved_gdb; i++) { int err2; data = (__le32 *)primary[i]->b_data; /* printk("reserving backup %lu[%u] = %lu\n", primary[i]->b_blocknr, gdbackups, blk + primary[i]->b_blocknr); */ data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]); if (!err) err = err2; } inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9; ext4_mark_iloc_dirty(handle, inode, &iloc); exit_bh: while (--res >= 0) brelse(primary[res]); brelse(dind); exit_free: kfree(primary); return err; } /* * Update the backup copies of the ext4 metadata. These don't need to be part * of the main resize transaction, because e2fsck will re-write them if there * is a problem (basically only OOM will cause a problem). However, we * _should_ update the backups if possible, in case the primary gets trashed * for some reason and we need to run e2fsck from a backup superblock. The * important part is that the new block and inode counts are in the backup * superblocks, and the location of the new group metadata in the GDT backups. * * We do not need take the s_resize_lock for this, because these * blocks are not otherwise touched by the filesystem code when it is * mounted. We don't need to worry about last changing from * sbi->s_groups_count, because the worst that can happen is that we * do not copy the full number of backups at this time. The resize * which changed s_groups_count will backup again. */ static void update_backups(struct super_block *sb, int blk_off, char *data, int size) { struct ext4_sb_info *sbi = EXT4_SB(sb); const ext4_group_t last = sbi->s_groups_count; const int bpg = EXT4_BLOCKS_PER_GROUP(sb); unsigned three = 1; unsigned five = 5; unsigned seven = 7; ext4_group_t group; int rest = sb->s_blocksize - size; handle_t *handle; int err = 0, err2; handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA); if (IS_ERR(handle)) { group = 1; err = PTR_ERR(handle); goto exit_err; } while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) { struct buffer_head *bh; /* Out of journal space, and can't get more - abort - so sad */ if (ext4_handle_valid(handle) && handle->h_buffer_credits == 0 && ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) && (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA))) break; bh = sb_getblk(sb, group * bpg + blk_off); if (!bh) { err = -EIO; break; } ext4_debug("update metadata backup %#04lx\n", (unsigned long)bh->b_blocknr); if ((err = ext4_journal_get_write_access(handle, bh))) break; lock_buffer(bh); memcpy(bh->b_data, data, size); if (rest) memset(bh->b_data + size, 0, rest); set_buffer_uptodate(bh); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, NULL, bh); if (unlikely(err)) ext4_std_error(sb, err); brelse(bh); } if ((err2 = ext4_journal_stop(handle)) && !err) err = err2; /* * Ugh! Need to have e2fsck write the backup copies. It is too * late to revert the resize, we shouldn't fail just because of * the backup copies (they are only needed in case of corruption). * * However, if we got here we have a journal problem too, so we * can't really start a transaction to mark the superblock. * Chicken out and just set the flag on the hope it will be written * to disk, and if not - we will simply wait until next fsck. */ exit_err: if (err) { ext4_warning(sb, "can't update backup for group %u (err %d), " "forcing fsck on next reboot", group, err); sbi->s_mount_state &= ~EXT4_VALID_FS; sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); mark_buffer_dirty(sbi->s_sbh); } } /* Add group descriptor data to an existing or new group descriptor block. * Ensure we handle all possible error conditions _before_ we start modifying * the filesystem, because we cannot abort the transaction and not have it * write the data to disk. * * If we are on a GDT block boundary, we need to get the reserved GDT block. * Otherwise, we may need to add backup GDT blocks for a sparse group. * * We only need to hold the superblock lock while we are actually adding * in the new group's counts to the superblock. Prior to that we have * not really "added" the group at all. We re-check that we are still * adding in the last group in case things have changed since verifying. */ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int reserved_gdb = ext4_bg_has_super(sb, input->group) ? le16_to_cpu(es->s_reserved_gdt_blocks) : 0; struct buffer_head *primary = NULL; struct ext4_group_desc *gdp; struct inode *inode = NULL; handle_t *handle; int gdb_off, gdb_num; int err, err2; gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { ext4_warning(sb, "Can't resize non-sparse filesystem further"); return -EPERM; } if (ext4_blocks_count(es) + input->blocks_count < ext4_blocks_count(es)) { ext4_warning(sb, "blocks_count overflow"); return -EINVAL; } if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < le32_to_cpu(es->s_inodes_count)) { ext4_warning(sb, "inodes_count overflow"); return -EINVAL; } if (reserved_gdb || gdb_off == 0) { if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE) || !le16_to_cpu(es->s_reserved_gdt_blocks)) { ext4_warning(sb, "No reserved GDT blocks, can't resize"); return -EPERM; } inode = ext4_iget(sb, EXT4_RESIZE_INO); if (IS_ERR(inode)) { ext4_warning(sb, "Error opening resize inode"); return PTR_ERR(inode); } } if ((err = verify_group_input(sb, input))) goto exit_put; if ((err = setup_new_group_blocks(sb, input))) goto exit_put; /* * We will always be modifying at least the superblock and a GDT * block. If we are adding a group past the last current GDT block, * we will also modify the inode and the dindirect block. If we * are adding a group with superblock/GDT backups we will also * modify each of the reserved GDT dindirect blocks. */ handle = ext4_journal_start_sb(sb, ext4_bg_has_super(sb, input->group) ? 3 + reserved_gdb : 4); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto exit_put; } mutex_lock(&sbi->s_resize_lock); if (input->group != sbi->s_groups_count) { ext4_warning(sb, "multiple resizers run on filesystem!"); err = -EBUSY; goto exit_journal; } if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh))) goto exit_journal; /* * We will only either add reserved group blocks to a backup group * or remove reserved blocks for the first group in a new group block. * Doing both would be mean more complex code, and sane people don't * use non-sparse filesystems anymore. This is already checked above. */ if (gdb_off) { primary = sbi->s_group_desc[gdb_num]; if ((err = ext4_journal_get_write_access(handle, primary))) goto exit_journal; if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) && (err = reserve_backup_gdb(handle, inode, input))) goto exit_journal; } else if ((err = add_new_gdb(handle, inode, input, &primary))) goto exit_journal; /* * OK, now we've set up the new group. Time to make it active. * * We do not lock all allocations via s_resize_lock * so we have to be safe wrt. concurrent accesses the group * data. So we need to be careful to set all of the relevant * group descriptor data etc. *before* we enable the group. * * The key field here is sbi->s_groups_count: as long as * that retains its old value, nobody is going to access the new * group. * * So first we update all the descriptor metadata for the new * group; then we update the total disk blocks count; then we * update the groups count to enable the group; then finally we * update the free space counts so that the system can start * using the new disk blocks. */ /* Update group descriptor block for new group */ gdp = (struct ext4_group_desc *)((char *)primary->b_data + gdb_off * EXT4_DESC_SIZE(sb)); memset(gdp, 0, EXT4_DESC_SIZE(sb)); ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */ ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */ ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */ ext4_free_blks_set(sb, gdp, input->free_blocks_count); ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); gdp->bg_flags = cpu_to_le16(EXT4_BG_INODE_ZEROED); gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp); /* * We can allocate memory for mb_alloc based on the new group * descriptor */ err = ext4_mb_add_groupinfo(sb, input->group, gdp); if (err) goto exit_journal; /* * Make the new blocks and inodes valid next. We do this before * increasing the group count so that once the group is enabled, * all of its blocks and inodes are already valid. * * We always allocate group-by-group, then block-by-block or * inode-by-inode within a group, so enabling these * blocks/inodes before the group is live won't actually let us * allocate the new space yet. */ ext4_blocks_count_set(es, ext4_blocks_count(es) + input->blocks_count); le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb)); /* * We need to protect s_groups_count against other CPUs seeing * inconsistent state in the superblock. * * The precise rules we use are: * * * Writers of s_groups_count *must* hold s_resize_lock * AND * * Writers must perform a smp_wmb() after updating all dependent * data and before modifying the groups count * * * Readers must hold s_resize_lock over the access * OR * * Readers must perform an smp_rmb() after reading the groups count * and before reading any dependent data. * * NB. These rules can be relaxed when checking the group count * while freeing data, as we can only allocate from a block * group after serialising against the group count, and we can * only then free after serialising in turn against that * allocation. */ smp_wmb(); /* Update the global fs size fields */ sbi->s_groups_count++; err = ext4_handle_dirty_metadata(handle, NULL, primary); if (unlikely(err)) { ext4_std_error(sb, err); goto exit_journal; } /* Update the reserved block counts only once the new group is * active. */ ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + input->reserved_blocks); /* Update the free space counts */ percpu_counter_add(&sbi->s_freeblocks_counter, input->free_blocks_count); percpu_counter_add(&sbi->s_freeinodes_counter, EXT4_INODES_PER_GROUP(sb)); if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) && sbi->s_log_groups_per_flex) { ext4_group_t flex_group; flex_group = ext4_flex_group(sbi, input->group); atomic64_add(input->free_blocks_count, &sbi->s_flex_groups[flex_group].free_blocks); atomic_add(EXT4_INODES_PER_GROUP(sb), &sbi->s_flex_groups[flex_group].free_inodes); } ext4_handle_dirty_super(handle, sb); exit_journal: mutex_unlock(&sbi->s_resize_lock); if ((err2 = ext4_journal_stop(handle)) && !err) err = err2; if (!err) { update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, sizeof(struct ext4_super_block)); update_backups(sb, primary->b_blocknr, primary->b_data, primary->b_size); } exit_put: iput(inode); return err; } /* ext4_group_add */ /* * Extend the filesystem to the new number of blocks specified. This entry * point is only used to extend the current filesystem to the end of the last * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" * for emergencies (because it has no dependencies on reserved blocks). * * If we _really_ wanted, we could use default values to call ext4_group_add() * allow the "remount" trick to work for arbitrary resizing, assuming enough * GDT blocks are reserved to grow to the desired size. */ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, ext4_fsblk_t n_blocks_count) { ext4_fsblk_t o_blocks_count; ext4_grpblk_t last; ext4_grpblk_t add; struct buffer_head *bh; handle_t *handle; int err; ext4_group_t group; /* We don't need to worry about locking wrt other resizers just * yet: we're going to revalidate es->s_blocks_count after * taking the s_resize_lock below. */ o_blocks_count = ext4_blocks_count(es); if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: extending last group from %llu uto %llu blocks\n", o_blocks_count, n_blocks_count); if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) return 0; if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { printk(KERN_ERR "EXT4-fs: filesystem on %s:" " too large to resize to %llu blocks safely\n", sb->s_id, n_blocks_count); if (sizeof(sector_t) < 8) ext4_warning(sb, "CONFIG_LBDAF not enabled"); return -EINVAL; } if (n_blocks_count < o_blocks_count) { ext4_warning(sb, "can't shrink FS - resize aborted"); return -EBUSY; } /* Handle the remaining blocks in the last group only. */ ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); if (last == 0) { ext4_warning(sb, "need to use ext2online to resize further"); return -EPERM; } add = EXT4_BLOCKS_PER_GROUP(sb) - last; if (o_blocks_count + add < o_blocks_count) { ext4_warning(sb, "blocks_count overflow"); return -EINVAL; } if (o_blocks_count + add > n_blocks_count) add = n_blocks_count - o_blocks_count; if (o_blocks_count + add < n_blocks_count) ext4_warning(sb, "will only finish group (%llu blocks, %u new)", o_blocks_count + add, add); /* See if the device is actually as big as what was requested */ bh = sb_bread(sb, o_blocks_count + add - 1); if (!bh) { ext4_warning(sb, "can't read last block, resize aborted"); return -ENOSPC; } brelse(bh); /* We will update the superblock, one block bitmap, and * one group descriptor via ext4_free_blocks(). */ handle = ext4_journal_start_sb(sb, 3); if (IS_ERR(handle)) { err = PTR_ERR(handle); ext4_warning(sb, "error %d on journal start", err); goto exit_put; } mutex_lock(&EXT4_SB(sb)->s_resize_lock); if (o_blocks_count != ext4_blocks_count(es)) { ext4_warning(sb, "multiple resizers run on filesystem!"); mutex_unlock(&EXT4_SB(sb)->s_resize_lock); ext4_journal_stop(handle); err = -EBUSY; goto exit_put; } if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh))) { ext4_warning(sb, "error %d on journal write access", err); mutex_unlock(&EXT4_SB(sb)->s_resize_lock); ext4_journal_stop(handle); goto exit_put; } ext4_blocks_count_set(es, o_blocks_count + add); mutex_unlock(&EXT4_SB(sb)->s_resize_lock); ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, o_blocks_count + add); /* We add the blocks to the bitmap and set the group need init bit */ ext4_add_groupblocks(handle, sb, o_blocks_count, add); ext4_handle_dirty_super(handle, sb); ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, o_blocks_count + add); if ((err = ext4_journal_stop(handle))) goto exit_put; if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n", ext4_blocks_count(es)); update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es, sizeof(struct ext4_super_block)); exit_put: return err; } /* ext4_group_extend */
gpl-2.0