signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class AbstractFilter { /** * Convert an array of < code > AbstractFilter < / code > objects to a chain of * filters by calling the { @ link # setAnd } the method of each filter object . * @ param filterArray An array of AbstractFilter objects having length * greater than zero . * @ return The first filter in the chain which will be = = to filterArray [ 0] */ public static Filter filterArrayToChain ( Filter [ ] filterArray ) { } }
for ( int i = 0 ; i < ( filterArray . length - 1 ) ; i ++ ) { AbstractFilter thisFilter = ( AbstractFilter ) filterArray [ i ] ; thisFilter . setAnd ( filterArray [ i + 1 ] ) ; } AbstractFilter lastFilter = ( AbstractFilter ) filterArray [ filterArray . length - 1 ] ; lastFilter . setAnd ( null ) ; return filterArray [ 0 ] ;
public class BasicChronology { /** * Gets the maximum number of days in the month specified by the instant . * @ param instant millis from 1970-01-01T00:00:00Z * @ return the maximum number of days in the month */ int getDaysInMonthMax ( long instant ) { } }
int thisYear = getYear ( instant ) ; int thisMonth = getMonthOfYear ( instant , thisYear ) ; return getDaysInYearMonth ( thisYear , thisMonth ) ;
public class DBObject { /** * Return the names of all scalar fields for which new or " remove " values have been * assigned for this object . The given { @ link TableDefinition } is used to determine * which field names are scalars . * @ param tableDef { @ link TableDefinition } of a table . * @ return Set of all updated scalar field names . */ public Set < String > getUpdatedScalarFieldNames ( TableDefinition tableDef ) { } }
Set < String > fieldNames = new HashSet < String > ( ) ; for ( String fieldName : m_valueMap . keySet ( ) ) { if ( tableDef . isScalarField ( fieldName ) ) { fieldNames . add ( fieldName ) ; } } for ( String fieldName : m_valueRemoveMap . keySet ( ) ) { if ( tableDef . isScalarField ( fieldName ) ) { fieldNames . add ( fieldName ) ; } } return fieldNames ;
public class Fat16BootSector { /** * Sets the volume label that is stored in this boot sector . * @ param label the new volume label * @ throws IllegalArgumentException if the specified label is longer * than { @ link # MAX _ VOLUME _ LABEL _ LENGTH } */ public void setVolumeLabel ( String label ) throws IllegalArgumentException { } }
if ( label . length ( ) > MAX_VOLUME_LABEL_LENGTH ) throw new IllegalArgumentException ( "volume label too long" ) ; for ( int i = 0 ; i < MAX_VOLUME_LABEL_LENGTH ; i ++ ) { set8 ( VOLUME_LABEL_OFFSET + i , i < label . length ( ) ? label . charAt ( i ) : 0 ) ; }
public class AttributesUtils { /** * returns whether the pc is at a line number that also appears for a another byte code offset later on in the method . If this occurs we are in a jdk6 * finally replicated block , and so don ' t report this . If the code has no line number table , then just report it . * @ param obj * the code object to find line number attributes from * @ param pc * the pc to check * @ return whether the pc is in user code */ public static boolean isValidLineNumber ( Code obj , int pc ) { } }
LineNumberTable lnt = obj . getLineNumberTable ( ) ; if ( lnt == null ) return true ; LineNumber [ ] lineNumbers = lnt . getLineNumberTable ( ) ; if ( lineNumbers == null ) return true ; int lo = 0 ; int hi = lineNumbers . length - 1 ; int mid = 0 ; int linePC = 0 ; while ( lo <= hi ) { mid = ( lo + hi ) >>> 1 ; linePC = lineNumbers [ mid ] . getStartPC ( ) ; if ( linePC == pc ) break ; if ( linePC < pc ) lo = mid + 1 ; else hi = mid - 1 ; } int lineNo = lineNumbers [ mid ] . getLineNumber ( ) ; for ( int i = 0 ; i < lineNumbers . length ; i ++ ) { if ( ( mid != i ) && ( lineNumbers [ i ] . getLineNumber ( ) == lineNo ) ) return false ; } return true ;
public class NodeSequence { /** * Create a sequence of nodes that iterates over the supplied node keys . Note that the supplied iterator is accessed lazily as * the resulting sequence ' s { @ link # nextBatch ( ) first batch } is { @ link Batch # nextRow ( ) used } . * @ param keys the iterator over the keys of the nodes to be returned ; if null , an { @ link # emptySequence empty instance } is * returned * @ param score the score to return for all of the nodes * @ param workspaceName the name of the workspace in which all of the nodes exist * @ param repository the repository cache used to access the workspaces and cached nodes ; may be null only if the key sequence * is null or empty * @ return the sequence of nodes ; never null */ public static NodeSequence withNodeKeys ( final Collection < NodeKey > keys , final float score , final String workspaceName , final RepositoryCache repository ) { } }
if ( keys == null || keys . isEmpty ( ) ) return emptySequence ( 1 ) ; return withNodeKeys ( keys . iterator ( ) , keys . size ( ) , score , workspaceName , repository ) ;
public class JestClientFactory { /** * Extension point */ protected RequestConfig getRequestConfig ( ) { } }
return RequestConfig . custom ( ) . setConnectTimeout ( httpClientConfig . getConnTimeout ( ) ) . setSocketTimeout ( httpClientConfig . getReadTimeout ( ) ) . build ( ) ;
public class LSTMHelpers { /** * Returns FwdPassReturn object with activations / INDArrays . Allows activateHelper to be used for forward pass , backward pass * and rnnTimeStep whilst being reasonably efficient for all */ static public FwdPassReturn activateHelper ( final BaseLayer layer , final NeuralNetConfiguration conf , final IActivation gateActivationFn , // Activation function for the gates - sigmoid or hard sigmoid ( must be found in range 0 to 1) INDArray input , final INDArray recurrentWeights , // Shape : [ hiddenLayerSize , 4 * hiddenLayerSize + 3 ] ; order : [ wI , wF , wO , wG , wFF , wOO , wGG ] final INDArray originalInputWeights , // Shape : [ n ^ ( L - 1 ) , 4 * hiddenLayerSize ] ; order : [ wi , wf , wo , wg ] final INDArray biases , // Shape : [ 4 , hiddenLayerSize ] ; order : [ bi , bf , bo , bg ] ^ T final boolean training , final INDArray originalPrevOutputActivations , final INDArray originalPrevMemCellState , boolean forBackprop , boolean forwards , final String inputWeightKey , INDArray maskArray , // Input mask : should only be used with bidirectional RNNs + variable length final boolean hasPeepholeConnections , // True for GravesLSTM , false for LSTM final LSTMHelper helper , final CacheMode cacheMode , // cacheMode for layer calling this helper final LayerWorkspaceMgr workspaceMgr ) { } }
// Mini - batch data format : for mini - batch size m , nIn inputs , and T time series length // Data has shape [ m , nIn , T ] . Layer activations / output has shape [ m , nHiddenUnits , T ] if ( input == null || input . length ( ) == 0 ) throw new IllegalArgumentException ( "Invalid input: not set or 0 length" ) ; INDArray inputWeights = originalInputWeights ; INDArray prevOutputActivations = originalPrevOutputActivations ; if ( maskArray != null ) { maskArray = maskArray . castTo ( recurrentWeights . dataType ( ) ) ; } boolean is2dInput = input . rank ( ) < 3 ; // Edge case of T = 1 , may have shape [ m , nIn ] , equiv . to [ m , nIn , 1] input = input . castTo ( inputWeights . dataType ( ) ) ; // No - op if already correct dtype // FIXME int timeSeriesLength = ( int ) ( is2dInput ? 1 : input . size ( 2 ) ) ; int hiddenLayerSize = ( int ) recurrentWeights . size ( 0 ) ; int miniBatchSize = ( int ) input . size ( 0 ) ; INDArray prevMemCellState ; if ( originalPrevMemCellState == null ) { prevMemCellState = Nd4j . create ( inputWeights . dataType ( ) , new long [ ] { miniBatchSize , hiddenLayerSize } , 'f' ) ; } else { prevMemCellState = originalPrevMemCellState . dup ( 'f' ) ; } INDArray recurrentWeightsIFOG = recurrentWeights . get ( all ( ) , interval ( 0 , 4 * hiddenLayerSize ) ) . dup ( 'f' ) ; INDArray wFFTranspose = null ; INDArray wOOTranspose = null ; INDArray wGGTranspose = null ; if ( hasPeepholeConnections ) { wFFTranspose = recurrentWeights . get ( all ( ) , interval ( 4 * hiddenLayerSize , 4 * hiddenLayerSize + 1 ) ) . reshape ( 1 , recurrentWeights . size ( 0 ) ) ; // current wOOTranspose = recurrentWeights . get ( all ( ) , interval ( 4 * hiddenLayerSize + 1 , 4 * hiddenLayerSize + 2 ) ) . reshape ( 1 , recurrentWeights . size ( 0 ) ) ; // current wGGTranspose = recurrentWeights . get ( all ( ) , interval ( 4 * hiddenLayerSize + 2 , 4 * hiddenLayerSize + 3 ) ) . reshape ( 1 , recurrentWeights . size ( 0 ) ) ; // previous if ( timeSeriesLength > 1 || forBackprop ) { wFFTranspose = Shape . toMmulCompatible ( wFFTranspose ) ; wOOTranspose = Shape . toMmulCompatible ( wOOTranspose ) ; wGGTranspose = Shape . toMmulCompatible ( wGGTranspose ) ; } } // Allocate arrays for activations : boolean sigmoidGates = gateActivationFn instanceof ActivationSigmoid ; IActivation afn = layer . layerConf ( ) . getActivationFn ( ) ; INDArray outputActivations = null ; FwdPassReturn toReturn = new FwdPassReturn ( ) ; if ( forBackprop ) { toReturn . fwdPassOutputAsArrays = new INDArray [ timeSeriesLength ] ; toReturn . memCellState = new INDArray [ timeSeriesLength ] ; toReturn . memCellActivations = new INDArray [ timeSeriesLength ] ; toReturn . iz = new INDArray [ timeSeriesLength ] ; toReturn . ia = new INDArray [ timeSeriesLength ] ; toReturn . fa = new INDArray [ timeSeriesLength ] ; toReturn . oa = new INDArray [ timeSeriesLength ] ; toReturn . ga = new INDArray [ timeSeriesLength ] ; if ( ! sigmoidGates ) { toReturn . fz = new INDArray [ timeSeriesLength ] ; toReturn . oz = new INDArray [ timeSeriesLength ] ; toReturn . gz = new INDArray [ timeSeriesLength ] ; } if ( training && cacheMode != CacheMode . NONE && workspaceMgr . hasConfiguration ( ArrayType . FF_CACHE ) && workspaceMgr . isWorkspaceOpen ( ArrayType . FF_CACHE ) ) { try ( MemoryWorkspace wsB = workspaceMgr . notifyScopeBorrowed ( ArrayType . FF_CACHE ) ) { outputActivations = Nd4j . create ( inputWeights . dataType ( ) , new long [ ] { miniBatchSize , hiddenLayerSize , timeSeriesLength } , 'f' ) ; // F order to keep time steps together toReturn . fwdPassOutput = outputActivations ; } } else { outputActivations = workspaceMgr . create ( ArrayType . ACTIVATIONS , input . dataType ( ) , new long [ ] { miniBatchSize , hiddenLayerSize , timeSeriesLength } , 'f' ) ; // F order to keep time steps together toReturn . fwdPassOutput = outputActivations ; } } else { outputActivations = workspaceMgr . create ( ArrayType . ACTIVATIONS , input . dataType ( ) , new long [ ] { miniBatchSize , hiddenLayerSize , timeSeriesLength } , 'f' ) ; // F order to keep time steps together toReturn . fwdPassOutput = outputActivations ; } // Level1 l1BLAS = Nd4j . getBlasWrapper ( ) . level1 ( ) ; // Input validation : check input data matches nIn if ( input . size ( 1 ) != inputWeights . size ( 0 ) ) { throw new DL4JInvalidInputException ( "Received input with size(1) = " + input . size ( 1 ) + " (input array shape = " + Arrays . toString ( input . shape ( ) ) + "); input.size(1) must match layer nIn size (nIn = " + inputWeights . size ( 0 ) + ")" ) ; } // Input validation : check that if past state is provided , that it has same // These can be different if user forgets to call rnnClearPreviousState ( ) between calls of rnnTimeStep if ( prevOutputActivations != null && prevOutputActivations . size ( 0 ) != input . size ( 0 ) ) { throw new DL4JInvalidInputException ( "Previous activations (stored state) number of examples = " + prevOutputActivations . size ( 0 ) + " but input array number of examples = " + input . size ( 0 ) + ". Possible cause: using rnnTimeStep() without calling" + " rnnClearPreviousState() between different sequences?" ) ; } // initialize prevOutputActivations to zeroes if ( prevOutputActivations == null ) { prevOutputActivations = Nd4j . zeros ( input . dataType ( ) , new long [ ] { miniBatchSize , hiddenLayerSize } ) ; } if ( helper != null ) { FwdPassReturn ret = helper . activate ( layer , conf , gateActivationFn , input , recurrentWeights , inputWeights , biases , training , prevOutputActivations , prevMemCellState , forBackprop , forwards , inputWeightKey , maskArray , hasPeepholeConnections , workspaceMgr ) ; if ( ret != null ) { return ret ; } } for ( int iTimeIndex = 0 ; iTimeIndex < timeSeriesLength ; iTimeIndex ++ ) { try ( MemoryWorkspace ws = workspaceMgr . notifyScopeEntered ( ArrayType . RNN_FF_LOOP_WORKING_MEM ) ) { int time = iTimeIndex ; if ( ! forwards ) { time = timeSeriesLength - iTimeIndex - 1 ; } INDArray miniBatchData = ( is2dInput ? input : input . tensorAlongDimension ( time , 1 , 0 ) ) ; // [ Expected shape : [ m , nIn ] . Also deals with edge case of T = 1 , with ' time series ' data of shape [ m , nIn ] , equiv . to [ m , nIn , 1] miniBatchData = Shape . toMmulCompatible ( miniBatchData ) ; // if we ' re using cache here - let ' s create ifogActivations within cache workspace , so all views from this array will be valid in cache cacheEnter ( training , cacheMode , workspaceMgr ) ; // Calculate activations for : network input + forget , output , input modulation gates . Next 3 lines are first part of those INDArray ifogActivations = miniBatchData . mmul ( inputWeights ) ; // Shape : [ miniBatch , 4 * layerSize ] cacheExit ( training , cacheMode , workspaceMgr ) ; Nd4j . gemm ( prevOutputActivations , recurrentWeightsIFOG , ifogActivations , false , false , 1.0 , 1.0 ) ; ifogActivations . addiRowVector ( biases ) ; INDArray inputActivations = ifogActivations . get ( all ( ) , interval ( 0 , hiddenLayerSize ) ) ; if ( forBackprop ) { if ( shouldCache ( training , cacheMode , workspaceMgr ) ) { cacheEnter ( training , cacheMode , workspaceMgr ) ; toReturn . iz [ time ] = inputActivations . dup ( 'f' ) ; cacheExit ( training , cacheMode , workspaceMgr ) ; } else { toReturn . iz [ time ] = workspaceMgr . dup ( ArrayType . BP_WORKING_MEM , inputActivations , 'f' ) ; } } layer . layerConf ( ) . getActivationFn ( ) . getActivation ( inputActivations , training ) ; if ( forBackprop ) { if ( shouldCache ( training , cacheMode , workspaceMgr ) ) { cacheEnter ( training , cacheMode , workspaceMgr ) ; toReturn . ia [ time ] = inputActivations . dup ( 'f' ) ; cacheExit ( training , cacheMode , workspaceMgr ) ; } else { toReturn . ia [ time ] = workspaceMgr . leverageTo ( ArrayType . BP_WORKING_MEM , inputActivations ) ; } } INDArray forgetGateActivations = ifogActivations . get ( all ( ) , interval ( hiddenLayerSize , 2 * hiddenLayerSize ) ) ; if ( hasPeepholeConnections ) { INDArray pmcellWFF = prevMemCellState . dup ( 'f' ) . muliRowVector ( wFFTranspose ) ; forgetGateActivations . addi ( pmcellWFF ) ; } // Above line : treats matrix as a vector . Can only do this because we ' re sure both pwcelWFF and forgetGateACtivations are f order , offset 0 and have same strides if ( forBackprop && ! sigmoidGates ) { if ( shouldCache ( training , cacheMode , workspaceMgr ) ) { cacheEnter ( training , cacheMode , workspaceMgr ) ; toReturn . fz [ time ] = forgetGateActivations . dup ( 'f' ) ; // Forget gate pre - out ( z ) cacheExit ( training , cacheMode , workspaceMgr ) ; } else { toReturn . fz [ time ] = workspaceMgr . dup ( ArrayType . BP_WORKING_MEM , forgetGateActivations , 'f' ) ; // Forget gate pre - out ( z ) } } gateActivationFn . getActivation ( forgetGateActivations , training ) ; if ( forBackprop ) { if ( shouldCache ( training , cacheMode , workspaceMgr ) ) { cacheEnter ( training , cacheMode , workspaceMgr ) ; toReturn . fa [ time ] = forgetGateActivations . dup ( 'f' ) ; cacheExit ( training , cacheMode , workspaceMgr ) ; } else { toReturn . fa [ time ] = workspaceMgr . leverageTo ( ArrayType . BP_WORKING_MEM , forgetGateActivations ) ; } } INDArray inputModGateActivations = ifogActivations . get ( all ( ) , interval ( 3 * hiddenLayerSize , 4 * hiddenLayerSize ) ) ; if ( hasPeepholeConnections ) { INDArray pmcellWGG = prevMemCellState . dup ( 'f' ) . muliRowVector ( wGGTranspose ) ; inputModGateActivations . addi ( pmcellWGG ) ; } if ( forBackprop && ! sigmoidGates ) { cacheEnter ( training , cacheMode , workspaceMgr ) ; toReturn . gz [ time ] = workspaceMgr . dup ( ArrayType . BP_WORKING_MEM , inputModGateActivations , 'f' ) ; // Input modulation gate pre - out ( z ) cacheExit ( training , cacheMode , workspaceMgr ) ; } gateActivationFn . getActivation ( inputModGateActivations , training ) ; if ( forBackprop ) { if ( shouldCache ( training , cacheMode , workspaceMgr ) ) { cacheEnter ( training , cacheMode , workspaceMgr ) ; toReturn . ga [ time ] = inputModGateActivations . dup ( 'f' ) ; cacheExit ( training , cacheMode , workspaceMgr ) ; } else { toReturn . ga [ time ] = workspaceMgr . leverageTo ( ArrayType . BP_WORKING_MEM , inputModGateActivations ) ; } } // Memory cell state INDArray currentMemoryCellState ; INDArray inputModMulInput ; if ( forBackprop ) { cacheEnter ( training , cacheMode , workspaceMgr ) ; currentMemoryCellState = workspaceMgr . dup ( ArrayType . BP_WORKING_MEM , prevMemCellState , 'f' ) . muli ( forgetGateActivations ) ; cacheExit ( training , cacheMode , workspaceMgr ) ; // this variable isn ' t stored in cache inputModMulInput = inputModGateActivations . dup ( 'f' ) . muli ( inputActivations ) ; } else { currentMemoryCellState = workspaceMgr . leverageTo ( ArrayType . FF_WORKING_MEM , forgetGateActivations . muli ( prevMemCellState ) ) ; // TODO optimize without the copy inputModMulInput = inputModGateActivations . muli ( inputActivations ) ; } currentMemoryCellState . addi ( inputModMulInput ) ; INDArray outputGateActivations = ifogActivations . get ( all ( ) , interval ( 2 * hiddenLayerSize , 3 * hiddenLayerSize ) ) ; if ( hasPeepholeConnections ) { INDArray pmcellWOO = currentMemoryCellState . dup ( 'f' ) . muliRowVector ( wOOTranspose ) ; outputGateActivations . addi ( pmcellWOO ) ; } if ( forBackprop && ! sigmoidGates ) { cacheEnter ( training , cacheMode , workspaceMgr ) ; toReturn . oz [ time ] = workspaceMgr . dup ( ArrayType . BP_WORKING_MEM , outputGateActivations , 'f' ) ; // Output gate activations cacheExit ( training , cacheMode , workspaceMgr ) ; } gateActivationFn . getActivation ( outputGateActivations , training ) ; if ( forBackprop ) { if ( shouldCache ( training , cacheMode , workspaceMgr ) ) { cacheEnter ( training , cacheMode , workspaceMgr ) ; toReturn . oa [ time ] = outputGateActivations . dup ( 'f' ) ; cacheExit ( training , cacheMode , workspaceMgr ) ; } else { toReturn . oa [ time ] = workspaceMgr . leverageTo ( ArrayType . BP_WORKING_MEM , outputGateActivations ) ; // TODO optimize without leverage } } // / / / / / same as with iFogActivations - if we use cache , let ' s create this array right there cacheEnter ( training , cacheMode , workspaceMgr ) ; // LSTM unit outputs : INDArray currMemoryCellActivation ; currMemoryCellActivation = workspaceMgr . dup ( ArrayType . FF_WORKING_MEM , currentMemoryCellState , 'f' ) ; currMemoryCellActivation = afn . getActivation ( currMemoryCellActivation , training ) ; cacheExit ( training , cacheMode , workspaceMgr ) ; INDArray currHiddenUnitActivations ; if ( forBackprop ) { cacheEnter ( training , cacheMode , workspaceMgr ) ; currHiddenUnitActivations = workspaceMgr . dup ( ArrayType . BP_WORKING_MEM , currMemoryCellActivation , 'f' ) . muli ( outputGateActivations ) ; // Expected shape : [ m , hiddenLayerSize ] cacheExit ( training , cacheMode , workspaceMgr ) ; } else { currHiddenUnitActivations = currMemoryCellActivation . muli ( outputGateActivations ) ; // Expected shape : [ m , hiddenLayerSize ] } if ( maskArray != null ) { // Mask array is present : bidirectional RNN - > need to zero out these activations to avoid // incorrectly using activations from masked time steps ( i . e . , want 0 initialization in both directions ) // We * also * need to apply this to the memory cells , as they are carried forward // Mask array has shape [ minibatch , timeSeriesLength ] - > get column INDArray timeStepMaskColumn = maskArray . getColumn ( time , true ) ; currHiddenUnitActivations . muliColumnVector ( timeStepMaskColumn ) ; currentMemoryCellState . muliColumnVector ( timeStepMaskColumn ) ; } currentMemoryCellState = workspaceMgr . leverageTo ( ArrayType . FF_WORKING_MEM , currentMemoryCellState ) ; // TODO optimize , without the leverage if ( forBackprop ) { toReturn . fwdPassOutputAsArrays [ time ] = currHiddenUnitActivations ; toReturn . memCellState [ time ] = currentMemoryCellState ; toReturn . memCellActivations [ time ] = currMemoryCellActivation ; if ( training && cacheMode != CacheMode . NONE && workspaceMgr . hasConfiguration ( ArrayType . FF_CACHE ) && workspaceMgr . isWorkspaceOpen ( ArrayType . FF_CACHE ) ) { toReturn . memCellActivations [ time ] = workspaceMgr . leverageTo ( ArrayType . FF_CACHE , toReturn . memCellActivations [ time ] ) ; toReturn . memCellState [ time ] = workspaceMgr . leverageTo ( ArrayType . FF_CACHE , toReturn . memCellState [ time ] ) ; } if ( cacheMode != CacheMode . NONE ) { outputActivations . tensorAlongDimension ( time , 1 , 0 ) . assign ( currHiddenUnitActivations ) ; } } else { outputActivations . tensorAlongDimension ( time , 1 , 0 ) . assign ( currHiddenUnitActivations ) ; } prevOutputActivations = currHiddenUnitActivations ; prevMemCellState = currentMemoryCellState ; // no need to dup here , if that ' s cache - it ' s already within Cache workspace toReturn . lastAct = currHiddenUnitActivations ; // the same as above , already in cache toReturn . lastMemCell = currentMemoryCellState ; } } // toReturn . leverageTo ( ComputationGraph . workspaceExternal ) ; toReturn . prevAct = originalPrevOutputActivations ; toReturn . prevMemCell = originalPrevMemCellState ; return toReturn ;
public class FeatureCollectorTask { /** * Checks if epic information on a feature needs update */ private static boolean isEpicChanged ( Feature feature , Epic epic ) { } }
if ( ! feature . getsEpicAssetState ( ) . equalsIgnoreCase ( epic . getStatus ( ) ) ) { return true ; } if ( ! feature . getsEpicName ( ) . equalsIgnoreCase ( epic . getName ( ) ) || ! feature . getsEpicNumber ( ) . equalsIgnoreCase ( epic . getNumber ( ) ) ) { return true ; } if ( ! StringUtils . isEmpty ( feature . getChangeDate ( ) ) && ! StringUtils . isEmpty ( epic . getChangeDate ( ) ) && ! Objects . equals ( Utilities . parseDateWithoutFraction ( feature . getChangeDate ( ) ) , Utilities . parseDateWithoutFraction ( epic . getChangeDate ( ) ) ) ) { return true ; } if ( ! StringUtils . isEmpty ( feature . getsEpicBeginDate ( ) ) && ! StringUtils . isEmpty ( epic . getBeginDate ( ) ) && ! Objects . equals ( Utilities . parseDateWithoutFraction ( feature . getsEpicBeginDate ( ) ) , Utilities . parseDateWithoutFraction ( epic . getBeginDate ( ) ) ) ) { return true ; } return ! StringUtils . isEmpty ( feature . getsEpicEndDate ( ) ) && ! StringUtils . isEmpty ( epic . getEndDate ( ) ) && ! Objects . equals ( Utilities . parseDateWithoutFraction ( feature . getsEpicEndDate ( ) ) , Utilities . parseDateWithoutFraction ( epic . getEndDate ( ) ) ) ;
public class A_CmsResourceType { /** * Returns a list of property objects that are attached to the resource on creation . < p > * It ' s possible to use OpenCms macros for the property values . * Please see { @ link CmsMacroResolver } for allowed macro values . < p > * @ param properties the ( optional ) properties provided by the user * @ param resolver the resolver used to resolve the macro values * @ return a list of property objects that are attached to the resource on creation */ protected List < CmsProperty > processDefaultProperties ( List < CmsProperty > properties , CmsMacroResolver resolver ) { } }
if ( ( m_defaultProperties == null ) || ( m_defaultProperties . size ( ) == 0 ) ) { // no default properties are defined return properties ; } // the properties must be copied since the macros could contain macros that are // resolved differently for every user / context ArrayList < CmsProperty > result = new ArrayList < CmsProperty > ( ) ; Iterator < CmsProperty > i = m_defaultProperties . iterator ( ) ; while ( i . hasNext ( ) ) { // create a clone of the next property CmsProperty property = ( i . next ( ) ) . clone ( ) ; // resolve possible macros in the property values if ( property . getResourceValue ( ) != null ) { property . setResourceValue ( resolver . resolveMacros ( property . getResourceValue ( ) ) ) ; } if ( property . getStructureValue ( ) != null ) { property . setStructureValue ( resolver . resolveMacros ( property . getStructureValue ( ) ) ) ; } // save the new property in the result list result . add ( property ) ; } // add the original properties if ( properties != null ) { result . addAll ( properties ) ; } // return the result return result ;
public class Util { /** * Compose two enumerations into one . * @ param e1 an enumeration * @ param e2 another enumeration * @ return an enumeration containing every element from < code > e1 < / code > and < code > e2 < / code > */ static < T > Enumeration < T > compose ( Enumeration < T > e1 , Enumeration < T > e2 ) { } }
// return the composite of e1 and e2 , or whichever is non - empty return isEmpty ( e1 ) ? e2 : isEmpty ( e2 ) ? e1 : new CompositeEnumeration < T > ( e1 ) . add ( e2 ) ;
public class ForkJoinPool { /** * Returns an estimate of the number of worker threads that are * not blocked waiting to join tasks or for other managed * synchronization . This method may overestimate the * number of running threads . * @ return the number of worker threads */ public int getRunningThreadCount ( ) { } }
int rc = 0 ; WorkQueue [ ] ws ; WorkQueue w ; if ( ( ws = workQueues ) != null ) { for ( int i = 1 ; i < ws . length ; i += 2 ) { if ( ( w = ws [ i ] ) != null && w . isApparentlyUnblocked ( ) ) ++ rc ; } } return rc ;
public class CmsLock { /** * Determines whether to show the lock dialog depending on the users settings and the dilaog type . < p > * In case of locking a folder , a confirmation dialog is needed if any sub resources are already locked . < p > * @ return true if dialogs should be shown , otherwise false */ public boolean showConfirmation ( ) { } }
boolean showConfirmation = getSettings ( ) . getUserSettings ( ) . getDialogShowLock ( ) ; if ( DIALOG_TYPE_LOCK . equals ( getParamDialogtype ( ) ) ) { // in case of locking resources , check if there are locked sub resources in the selected folder ( s ) showConfirmation = showConfirmation || ( getLockedResources ( ) . size ( ) > 0 ) ; } return showConfirmation ;
public class FeedbackWindowTinyLfuPolicy { /** * Evicts from the admission window into the probation space . If the size exceeds the maximum , * then the admission candidate and probation ' s victim are evaluated and one is evicted . */ private void evict ( ) { } }
if ( sizeWindow <= maxWindow ) { return ; } Node candidate = headWindow . next ; sizeWindow -- ; candidate . remove ( ) ; candidate . status = Status . PROBATION ; candidate . appendToTail ( headProbation ) ; if ( data . size ( ) > maximumSize ) { Node evict ; Node victim = headProbation . next ; if ( admittor . admit ( candidate . key , victim . key ) ) { evict = victim ; } else if ( adapt ( candidate ) ) { evict = victim ; } else { evict = candidate ; feedback . put ( candidate . key ) ; } data . remove ( evict . key ) ; evict . remove ( ) ; policyStats . recordEviction ( ) ; }
public class LineDocRecordReader { /** * / * ( non - Javadoc ) * @ see org . apache . hadoop . mapred . RecordReader # next ( java . lang . Object , java . lang . Object ) */ public synchronized boolean next ( DocumentID key , LineDocTextAndOp value ) throws IOException { } }
if ( pos >= end ) { return false ; } // key is document id , which are bytes until first space if ( ! readInto ( key . getText ( ) , SPACE ) ) { return false ; } // read operation : i / d / u , or ins / del / upd , or insert / delete / update Text opText = new Text ( ) ; if ( ! readInto ( opText , SPACE ) ) { return false ; } String opStr = opText . toString ( ) ; DocumentAndOp . Op op ; if ( opStr . equals ( "i" ) || opStr . equals ( "ins" ) || opStr . equals ( "insert" ) ) { op = DocumentAndOp . Op . INSERT ; } else if ( opStr . equals ( "d" ) || opStr . equals ( "del" ) || opStr . equals ( "delete" ) ) { op = DocumentAndOp . Op . DELETE ; } else if ( opStr . equals ( "u" ) || opStr . equals ( "upd" ) || opStr . equals ( "update" ) ) { op = DocumentAndOp . Op . UPDATE ; } else { // default is insert op = DocumentAndOp . Op . INSERT ; } value . setOp ( op ) ; if ( op == DocumentAndOp . Op . DELETE ) { return true ; } else { // read rest of the line return readInto ( value . getText ( ) , EOL ) ; }
public class RebondTool { /** * Rebonding using a Binary Space Partition Tree . Note , that any bonds * defined will be deleted first . It assumes the unit of 3D space to * be 1 Å . */ public void rebond ( IAtomContainer container ) throws CDKException { } }
container . removeAllBonds ( ) ; maxCovalentRadius = 0.0 ; // construct a new binary space partition tree bspt = new Bspt ( 3 ) ; Iterator < IAtom > atoms = container . atoms ( ) . iterator ( ) ; while ( atoms . hasNext ( ) ) { IAtom atom = atoms . next ( ) ; double myCovalentRadius = atom . getCovalentRadius ( ) ; if ( myCovalentRadius == 0.0 ) { throw new CDKException ( "Atom(s) does not have covalentRadius defined." ) ; } if ( myCovalentRadius > maxCovalentRadius ) maxCovalentRadius = myCovalentRadius ; TupleAtom tupleAtom = new TupleAtom ( atom ) ; bspt . addTuple ( tupleAtom ) ; } // rebond all atoms atoms = container . atoms ( ) . iterator ( ) ; while ( atoms . hasNext ( ) ) { bondAtom ( container , ( IAtom ) atoms . next ( ) ) ; }
public class DefaultPlaceholderStrategy { /** * Add a placeholder to the strategy * @ param placeholder name of the placholder * @ param value value of the placeholder */ @ Override public void addPlaceholder ( String placeholder , String value ) { } }
placeholderMap . put ( placeholder , value ) ;
public class RuleMatchAsXmlSerializer { /** * Get an XML representation of the given rule matches . * @ param text the original text that was checked , used to get the context of the matches * @ param contextSize the desired context size in characters * @ param unknownWords unknown words to be printed in a separated list * @ since 3.0 */ public String ruleMatchesToXml ( List < RuleMatch > ruleMatches , String text , int contextSize , ApiPrintMode xmlMode , Language lang , List < String > unknownWords ) { } }
String xmlSnippet = ruleMatchesToXmlSnippet ( ruleMatches , text , contextSize ) ; switch ( xmlMode ) { case START_API : return getXmlStart ( lang , null ) + xmlSnippet ; case CONTINUE_API : return xmlSnippet ; case END_API : return xmlSnippet + getXmlUnknownWords ( unknownWords ) + getXmlEnd ( ) ; case NORMAL_API : return getXmlStart ( lang , null ) + xmlSnippet + getXmlUnknownWords ( unknownWords ) + getXmlEnd ( ) ; } throw new IllegalArgumentException ( "Unknown XML mode: " + xmlMode ) ;
public class AmazonWorkLinkClient { /** * Provides information about the certificate authority . * @ param describeWebsiteCertificateAuthorityRequest * @ return Result of the DescribeWebsiteCertificateAuthority operation returned by the service . * @ throws UnauthorizedException * You are not authorized to perform this action . * @ throws InternalServerErrorException * The service is temporarily unavailable . * @ throws InvalidRequestException * The request is not valid . * @ throws ResourceNotFoundException * The requested resource was not found . * @ throws TooManyRequestsException * The number of requests exceeds the limit . * @ sample AmazonWorkLink . DescribeWebsiteCertificateAuthority * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / worklink - 2018-09-25 / DescribeWebsiteCertificateAuthority " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DescribeWebsiteCertificateAuthorityResult describeWebsiteCertificateAuthority ( DescribeWebsiteCertificateAuthorityRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeWebsiteCertificateAuthority ( request ) ;
public class DescribeConfigurationAggregatorSourcesStatusResult { /** * Returns an AggregatedSourceStatus object . * @ param aggregatedSourceStatusList * Returns an AggregatedSourceStatus object . */ public void setAggregatedSourceStatusList ( java . util . Collection < AggregatedSourceStatus > aggregatedSourceStatusList ) { } }
if ( aggregatedSourceStatusList == null ) { this . aggregatedSourceStatusList = null ; return ; } this . aggregatedSourceStatusList = new com . amazonaws . internal . SdkInternalList < AggregatedSourceStatus > ( aggregatedSourceStatusList ) ;
public class FactoryFiducial { /** * Wrapper around chessboard calibration detector . Refine with lines is set to true automatically . This * isn ' t being used for calibration and its better to use the whole line . * @ param config Description of the chessboard . * @ param imageType Type of image it ' s processing * @ return FiducialDetector */ @ Deprecated public static < T extends ImageGray < T > > CalibrationFiducialDetector < T > calibChessboard ( @ Nullable ConfigChessboard config , ConfigGridDimen dimen , Class < T > imageType ) { } }
return new CalibrationFiducialDetector < > ( config , dimen , imageType ) ;
public class XMLUtil { /** * Replies the value that corresponds to the specified attribute ' s path . * < p > The path is an ordered list of tag ' s names and ended by the name of * the attribute . * @ param document is the XML document to explore . * @ param casesSensitive indicates of the { @ code path } ' s components are case sensitive . * @ param path is the list of and ended by the attribute ' s name . * @ return the value of the specified attribute or < code > null < / code > if * it was node found in the document */ @ Pure public static String getAttributeValue ( Node document , boolean casesSensitive , String ... path ) { } }
assert document != null : AssertMessages . notNullParameter ( 0 ) ; return getAttributeValue ( document , casesSensitive , 0 , path ) ;
public class MapOfLists { /** * This add the element value at the end of the list pointed by key */ public List < V > get ( K key ) { } }
return ( List < V > ) map . get ( key ) ;
public class StandardMessageResolver { /** * Format a message , merging it with its parameters , before returning . * This is meant to be overridden by subclasses if necessary . The default mechanism will simply * use a standard { @ link java . text . MessageFormat } instance . * @ param locale the locale * @ param message the resolved message * @ param messageParameters the message parameters ( might be null ) * @ return the formatted message */ protected String formatMessage ( final Locale locale , final String message , final Object [ ] messageParameters ) { } }
return StandardMessageResolutionUtils . formatMessage ( locale , message , messageParameters ) ;
public class GenericDocumentComplexConverter { /** * Restore all the dots in the keys where { @ link # TO _ REPLACE _ DOTS } is found . * @ param document Document to modify * @ return Restored document */ public static Document restoreDots ( Document document ) { } }
return modifyKeys ( document , key -> key . replace ( TO_REPLACE_DOTS , "." ) , TO_REPLACE_DOTS ) ;
public class AbstractSearch { /** * aligns all performances in the space and prints those tables to the log * file . * @ param spacethe current space to align the performances to * @ param performancesthe performances to align */ protected void logPerformances ( Space space , Vector < Performance > performances ) { } }
m_Owner . logPerformances ( space , performances ) ;
public class HtmlTree { /** * Generates an HTML anchor tag . * @ param ref reference url for the anchor tag * @ param body content for the anchor tag * @ return an HtmlTree object */ public static HtmlTree A ( String ref , Content body ) { } }
HtmlTree htmltree = new HtmlTree ( HtmlTag . A , nullCheck ( body ) ) ; htmltree . addAttr ( HtmlAttr . HREF , encodeURL ( ref ) ) ; return htmltree ;
public class FactoryAccAppBeansSqlite { /** * < p > Initialize inner factories after clear beans or on startup . < / p > * @ throws Exception - an exception */ @ Override public final synchronized void init ( ) throws Exception { } }
FactoryBldAccServices < ResultSet > factoryBldAccServices = new FactoryBldAccServices < ResultSet > ( ) ; factoryBldAccServices . setFactoryAppBeans ( this ) ; setFactoryBldServices ( factoryBldAccServices ) ; FactoryAccServices < ResultSet > factoryAccServices = new FactoryAccServices < ResultSet > ( ) ; factoryAccServices . setFactoryAppBeans ( this ) ; factoryAccServices . setFactoryBldAccServices ( factoryBldAccServices ) ; factoryBldAccServices . setFactoryAccServices ( factoryAccServices ) ; setFactoryOverBeans ( factoryAccServices ) ; FactoryAccReplicatorXmlHttps < ResultSet > factoryReplicatorXmlHttps = new FactoryAccReplicatorXmlHttps < ResultSet > ( ) ; factoryReplicatorXmlHttps . setFactoryAppBeans ( this ) ; setFactoryReplicatorXmlHttps ( factoryReplicatorXmlHttps ) ; FactoryAccDatabaseWriterXml < ResultSet > factoryDatabaseWriterXml = new FactoryAccDatabaseWriterXml < ResultSet > ( ) ; factoryDatabaseWriterXml . setFactoryAppBeans ( this ) ; setFactoryDatabaseWriterXml ( factoryDatabaseWriterXml ) ;
public class CmsDocumentTypeAddList { /** * Returns the systems configured document types that are not assigned * to the current indexsource ( those that may be added ) . < p > * @ return the systems configured document types that are not assigned * to the current indexsource ( those that may be added ) */ private List < CmsSearchDocumentType > documentTypes ( ) { } }
CmsSearchManager manager = OpenCms . getSearchManager ( ) ; CmsSearchIndexSource indexsource = manager . getIndexSource ( getParamIndexsource ( ) ) ; List < CmsSearchDocumentType > result ; if ( indexsource != null ) { List < String > systemDoctypeNames = new ArrayList < String > ( ) ; Iterator < CmsSearchDocumentType > itDocTypes = manager . getDocumentTypeConfigs ( ) . iterator ( ) ; while ( itDocTypes . hasNext ( ) ) { CmsSearchDocumentType docType = itDocTypes . next ( ) ; systemDoctypeNames . add ( docType . getName ( ) ) ; } // accept only the complement of system doctypes to the indexsources doctypes : systemDoctypeNames . removeAll ( indexsource . getDocumentTypes ( ) ) ; // transform these mere names to real document types . . . result = new ArrayList < CmsSearchDocumentType > ( systemDoctypeNames . size ( ) ) ; Iterator < String > it = systemDoctypeNames . iterator ( ) ; String doctypename = "" ; CmsSearchDocumentType doctype ; while ( it . hasNext ( ) ) { doctypename = it . next ( ) ; if ( doctypename != null ) { doctype = manager . getDocumentTypeConfig ( doctypename ) ; if ( doctype != null ) { result . add ( doctype ) ; } } } } else { result = Collections . emptyList ( ) ; if ( LOG . isErrorEnabled ( ) ) { LOG . error ( Messages . get ( ) . getBundle ( ) . key ( Messages . ERR_SEARCHINDEX_EDIT_MISSING_PARAM_1 , "indexsource" ) ) ; } } return result ;
public class CmsCollectionsGenericWrapper { /** * Provides a wrapper to create a { @ link LRUMap } with the given size that avoids warnings with Java 1.5 generic code . < p > * @ param < K > the type of keys maintained by the returned map * @ param < V > the type of mapped values * @ param size the initial size of the created Map * @ return a { @ link LRUMap } with the given size of the required generic type */ @ SuppressWarnings ( "unchecked" ) public static < K , V > Map < K , V > createLRUMap ( int size ) { } }
return new LRUMap ( size ) ;
public class RawPacket { /** * Read a byte region from specified offset in the RTP packet and with * specified length into a given buffer * @ param off * start offset in the RTP packet of the region to be read * @ param len * length of the region to be read * @ param outBuff * output buffer */ public void readRegionToBuff ( int off , int len , byte [ ] outBuff ) { } }
assert off >= 0 ; assert len > 0 ; assert outBuff != null ; assert outBuff . length >= len ; assert buffer . limit ( ) >= off + len ; buffer . position ( off ) ; buffer . get ( outBuff , 0 , len ) ;
public class DOM2DTM { /** * Given a node handle , return the XPath node name . This should be * the name as described by the XPath data model , NOT the DOM - style * name . * @ param nodeHandle the id of the node . * @ return String Name of this node , which may be an empty string . */ public String getNodeNameX ( int nodeHandle ) { } }
String name ; short type = getNodeType ( nodeHandle ) ; switch ( type ) { case DTM . NAMESPACE_NODE : { Node node = getNode ( nodeHandle ) ; // assume not null . name = node . getNodeName ( ) ; if ( name . startsWith ( "xmlns:" ) ) { name = QName . getLocalPart ( name ) ; } else if ( name . equals ( "xmlns" ) ) { name = "" ; } } break ; case DTM . ATTRIBUTE_NODE : case DTM . ELEMENT_NODE : case DTM . ENTITY_REFERENCE_NODE : case DTM . PROCESSING_INSTRUCTION_NODE : { Node node = getNode ( nodeHandle ) ; // assume not null . name = node . getNodeName ( ) ; } break ; default : name = "" ; } return name ;
public class CollectedStatistics { /** * Returns the variance of all StopWatches recorded . NB : This * call causes all of the StopWatches to be traversed , which makes it fairly slow . * @ return The variance . */ public synchronized double variance ( ) { } }
long n = 0 ; double mean = 0 ; double s = 0.0 ; for ( double x : m_times . m_values ) { n ++ ; double delta = x - mean ; mean += delta / n ; s += delta * ( x - mean ) ; } return ( s / n ) ;
public class AutoElasticsearch { /** * { @ inheritDoc } * @ deprecated This is available for any storm cluster that operates against the older method of obtaining credentials */ @ Override public void populateCredentials ( Map < String , String > credentials , Map topologyConfiguration ) { } }
populateCredentials ( credentials , topologyConfiguration , null ) ;
public class IPv6AddressSection { /** * This produces a string with no compressed segments and all segments of full length , * which is 4 characters for IPv6 segments and 3 characters for IPv4 segments . */ @ Override public String toFullString ( ) { } }
String result ; if ( hasNoStringCache ( ) || ( result = getStringCache ( ) . fullString ) == null ) { getStringCache ( ) . fullString = result = toNormalizedString ( IPv6StringCache . fullParams ) ; } return result ;
public class JDBCUtil { /** * Adds a column ( with name ' cname ' and definition ' cdef ' ) to the specified table . * @ param afterCname ( optional ) the name of the column after which to add the new column . * @ return true if the column was added , false if it already existed . */ public static boolean addColumn ( Connection conn , String table , String cname , String cdef , String afterCname ) throws SQLException { } }
if ( tableContainsColumn ( conn , table , cname ) ) { // Log . info ( " Database table ' " + table + " ' already has column ' " + cname + " ' . " ) ; return false ; } String update = "ALTER TABLE " + table + " ADD COLUMN " + cname + " " + cdef ; if ( afterCname != null ) { update += " AFTER " + afterCname ; } PreparedStatement stmt = null ; try { stmt = conn . prepareStatement ( update ) ; stmt . executeUpdate ( ) ; } finally { close ( stmt ) ; } log . info ( "Database column '" + cname + "' added to table '" + table + "'." ) ; return true ;
public class AbstractPartial { /** * Gets the index of the specified field , or - 1 if the field is unsupported . * @ param type the type to check , may be null which returns - 1 * @ return the index of the field , - 1 if unsupported */ public int indexOf ( DateTimeFieldType type ) { } }
for ( int i = 0 , isize = size ( ) ; i < isize ; i ++ ) { if ( getFieldType ( i ) == type ) { return i ; } } return - 1 ;
public class Template { /** * 为模板绑定变量 , 此变量在模板编译的时候 , 根据infer标记来决定是否要推测期类型 , 如果dynamic为true , * 则表示模板引擎优化不需要推测其类型 , 默认总是false , 即变量总是对应同一个类型 。 如果为true , 则认为 : 因为变量可能能对应不同java类型 , * 或者变量是容器 , 但容器里的元素是不同类型 * @ param varName * @ param o */ public void binding ( String varName , Object o , boolean dynamic ) { } }
ctx . set ( varName , o , dynamic ) ; // ctx . globalVar . put ( varName , o ) ; if ( dynamic ) { ctx . objectKeys . add ( varName ) ; }
public class CommandOutputResolverSupport { /** * Overridable hook to check whether { @ code selector } can be assigned from the provider type { @ code provider } . * This method descends the component type hierarchy and considers primitive / wrapper type conversion . * @ param selector must not be { @ literal null } . * @ param provider must not be { @ literal null } . * @ return { @ literal true } if selector can be assigned from its provider type . */ protected boolean isAssignableFrom ( OutputSelector selector , OutputType provider ) { } }
ResolvableType selectorType = selector . getOutputType ( ) ; ResolvableType resolvableType = provider . withCodec ( selector . getRedisCodec ( ) ) ; return selectorType . isAssignableFrom ( resolvableType ) ;
public class ChangesOnMyIssueNotificationHandler { /** * Creates the { @ link EmailDeliveryRequest } for the specified { @ code recipient } with issues from the * specified { @ code notification } it is the assignee of . * @ return { @ code null } when the recipient is the assignee of no issue in { @ code notification } . */ @ CheckForNull private static EmailDeliveryRequest toEmailDeliveryRequest ( NotificationWithProjectKeys notification , EmailRecipient recipient , Set < String > subscribedProjectKeys ) { } }
Set < ChangedIssue > recipientIssuesByProject = notification . getIssues ( ) . stream ( ) . filter ( issue -> issue . getAssignee ( ) . filter ( assignee -> recipient . getLogin ( ) . equals ( assignee . getLogin ( ) ) ) . isPresent ( ) ) . filter ( issue -> subscribedProjectKeys . contains ( issue . getProject ( ) . getKey ( ) ) ) . collect ( toSet ( notification . getIssues ( ) . size ( ) ) ) ; if ( recipientIssuesByProject . isEmpty ( ) ) { return null ; } return new EmailDeliveryRequest ( recipient . getEmail ( ) , new ChangesOnMyIssuesNotification ( notification . getChange ( ) , recipientIssuesByProject ) ) ;
public class TreeNode { /** * Removes the child at the specified index from this node ' s children and * sets that node ' s parent to { @ code null } . * @ param index the index in this node ' s child array of the child to remove * @ return { @ code this } tree - node , for method chaining * @ throws ArrayIndexOutOfBoundsException if the { @ code index } is out of * bounds */ public TreeNode < T > remove ( final int index ) { } }
if ( _children == null ) { throw new ArrayIndexOutOfBoundsException ( format ( "Child index is out of bounds: %s" , index ) ) ; } final TreeNode < T > child = _children . remove ( index ) ; assert child . _parent == this ; child . setParent ( null ) ; if ( _children . isEmpty ( ) ) { _children = null ; } return this ;
public class LoadedFieldSet { /** * Add a load of given field at given instruction . * @ param handle * the instruction * @ param field * the field */ public void addLoad ( InstructionHandle handle , XField field ) { } }
getLoadStoreCount ( field ) . loadCount ++ ; handleToFieldMap . put ( handle , field ) ; loadHandleSet . set ( handle . getPosition ( ) ) ;
public class Weld { /** * Define the set of extensions . * @ param extensions * @ return self */ public Weld extensions ( Extension ... extensions ) { } }
this . extensions . clear ( ) ; for ( Extension extension : extensions ) { addExtension ( extension ) ; } return this ;
public class DestinationManager { /** * starts DeletePubSubMsgsThread */ private void startDeletePubSubMsgsThread ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "startDeletePubSubMsgsThread" ) ; // every time create DeletePubSubMsgsThread as this is called from // a new DestinationManager . Thread delThread = new Thread ( ( new DeletePubSubMsgsThread ( messageProcessor ) ) , "startDeletePubSubMsgsThread" ) ; delThread . setPriority ( 1 ) ; delThread . start ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "startDeletePubSubMsgsThread" ) ;
public class RecordMarkerFailedEventAttributesMarshaller { /** * Marshall the given parameter object . */ public void marshall ( RecordMarkerFailedEventAttributes recordMarkerFailedEventAttributes , ProtocolMarshaller protocolMarshaller ) { } }
if ( recordMarkerFailedEventAttributes == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( recordMarkerFailedEventAttributes . getMarkerName ( ) , MARKERNAME_BINDING ) ; protocolMarshaller . marshall ( recordMarkerFailedEventAttributes . getCause ( ) , CAUSE_BINDING ) ; protocolMarshaller . marshall ( recordMarkerFailedEventAttributes . getDecisionTaskCompletedEventId ( ) , DECISIONTASKCOMPLETEDEVENTID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class CalculatorApp { /** * A simple adder of request parameters { @ code t1 } and { @ code t2} * @ param request The request to handle the addition for * @ return A response of an integer representing the sum */ static Response < Integer > add ( Request request ) { } }
Optional < String > t1 = request . parameter ( "t1" ) ; Optional < String > t2 = request . parameter ( "t2" ) ; if ( t1 . isPresent ( ) && t2 . isPresent ( ) ) { int result = Integer . valueOf ( t1 . get ( ) ) + Integer . valueOf ( t2 . get ( ) ) ; return Response . forPayload ( result ) ; } else { return Response . forStatus ( Status . BAD_REQUEST ) ; }
public class SpringReader { /** * Read the single Spring XML configuration file located at the specified * path , performing PropertyPlaceHolder interpolation , extracting all beans * which implement the RequestHandler interface , and construct a * RequestMapper for those RequestHandlers , on the specified ServletContext . * @ param configPath the path to the Spring XML file containing the * configuration . * @ param servletContext the ServletContext where the RequestHandlers should * be mapped * @ return a new ReqeustMapper which delegates requests for the * ServletContext */ public static RequestMapper readSpringConfig ( String configPath , ServletContext servletContext ) { } }
LOGGER . info ( "Loading from config file " + configPath ) ; currentContext = new FileSystemXmlApplicationContext ( "file:" + configPath ) ; Map < String , RequestHandler > beans = currentContext . getBeansOfType ( RequestHandler . class , false , false ) ; return new RequestMapper ( beans . values ( ) , servletContext ) ;
public class SraReader { /** * Read a run set from the specified file . * @ param file file , must not be null * @ return a run set read from the specified file * @ throws IOException if an I / O error occurs */ public static RunSet readRunSet ( final File file ) throws IOException { } }
checkNotNull ( file ) ; try ( BufferedReader reader = new BufferedReader ( new FileReader ( file ) ) ) { return readRunSet ( reader ) ; }
public class WCOutputStream { /** * @ see javax . servlet . ServletOutputStream # print ( double ) */ public void print ( double d ) throws IOException { } }
String value = Double . toString ( d ) ; this . output . write ( value . getBytes ( ) , 0 , value . length ( ) ) ;
public class BosClient { /** * Deletes the specified bucket . All objects in the bucket must be deleted before the bucket itself * can be deleted . * Only the owner of a bucket can delete it , regardless of the bucket ' s access control policy . * @ param request The request object containing all options for deleting a Bos bucket . */ public void deleteBucket ( DeleteBucketRequest request ) { } }
checkNotNull ( request , "request should not be null." ) ; this . invokeHttpClient ( this . createRequest ( request , HttpMethodName . DELETE ) , BosResponse . class ) ;
public class forwardingsession { /** * Use this API to add forwardingsession . */ public static base_response add ( nitro_service client , forwardingsession resource ) throws Exception { } }
forwardingsession addresource = new forwardingsession ( ) ; addresource . name = resource . name ; addresource . network = resource . network ; addresource . netmask = resource . netmask ; addresource . acl6name = resource . acl6name ; addresource . aclname = resource . aclname ; addresource . connfailover = resource . connfailover ; return addresource . add_resource ( client ) ;
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link LowlimitType } { @ code > } } */ @ XmlElementDecl ( namespace = "http://www.w3.org/1998/Math/MathML" , name = "lowlimit" ) public JAXBElement < LowlimitType > createLowlimit ( LowlimitType value ) { } }
return new JAXBElement < LowlimitType > ( _Lowlimit_QNAME , LowlimitType . class , null , value ) ;
public class Camera { /** * Draw the camera field of view according to a grid . * @ param g The graphic output . * @ param x The horizontal location . * @ param y The vertical location . * @ param gridH The horizontal grid . * @ param gridV The vertical grid . * @ param surface The surface referential ( minimap ) . */ public void drawFov ( Graphic g , int x , int y , int gridH , int gridV , Surface surface ) { } }
final int h = x + ( int ) Math . floor ( ( getX ( ) + getViewX ( ) ) / gridH ) ; final int v = y + ( int ) - Math . floor ( ( getY ( ) + getHeight ( ) ) / gridV ) ; final int tileWidth = getWidth ( ) / gridH ; final int tileHeight = getHeight ( ) / gridV ; g . drawRect ( h , v + surface . getHeight ( ) , tileWidth , tileHeight , false ) ;
public class ParameterUtil { /** * Fetches the supplied parameter from the request and converts it to a date . The value of the * parameter should be a date formatted like so : 2001-12-25 . If the parameter does not exist , * null is returned . If the parameter is not a well - formed date , a data validation exception is * thrown with the supplied message . */ public static Date getDateParameter ( HttpServletRequest req , String name , String invalidDataMessage ) throws DataValidationException { } }
String value = getParameter ( req , name , false ) ; if ( StringUtil . isBlank ( value ) || DATE_TEMPLATE . equalsIgnoreCase ( value ) ) { return null ; } return parseDateParameter ( value , invalidDataMessage ) ;
public class JavaHelper { /** * Removes the package from the type name from the given type */ public static String removeJavaPackageName ( String className ) { } }
int idx = className . lastIndexOf ( '.' ) ; if ( idx >= 0 ) { return className . substring ( idx + 1 ) ; } else { return className ; }
public class FilesystemContentWriter { /** * This method implements the ContentWriter interface for writing content * to a DataStore . In this case , the DataStore is a local filesystem . * The arg spaceId is the path to the destination directory . * @ param spaceId destination where arg chunk content will be written * @ param chunk content to be written * @ param properties user - defined properties associated with content * @ return MD5 of content * @ throws NotFoundException */ @ Override public String writeSingle ( String spaceId , String chunkChecksum , ChunkInputStream chunk , Map < String , String > properties ) throws NotFoundException { } }
AddContentResult result = writeContent ( spaceId , chunk . getChunkId ( ) , chunk , chunk . getChunkSize ( ) , properties ) ; String finalChecksum = chunk . getMD5 ( ) ; if ( chunkChecksum != null && chunk . md5Preserved ( ) ) { if ( ! chunkChecksum . equals ( finalChecksum ) ) { result . setState ( AddContentResult . State . ERROR ) ; } } result . setMd5 ( finalChecksum ) ; return finalChecksum ;
public class CmsLinkManager { /** * Calculates the absolute URI for the " relativeUri " with the given absolute " baseUri " as start . < p > * If " relativeUri " is already absolute , it is returned unchanged . * This method also returns " relativeUri " unchanged if it is not well - formed . < p > * @ param relativeUri the relative URI to calculate an absolute URI for * @ param baseUri the base URI , this must be an absolute URI * @ return an absolute URI calculated from " relativeUri " and " baseUri " */ public static String getAbsoluteUri ( String relativeUri , String baseUri ) { } }
if ( isAbsoluteUri ( relativeUri ) ) { // URI is null or already absolute return relativeUri ; } try { URL url = new URL ( new URL ( m_baseUrl , baseUri ) , relativeUri ) ; StringBuffer result = new StringBuffer ( 100 ) ; result . append ( url . getPath ( ) ) ; if ( url . getQuery ( ) != null ) { result . append ( '?' ) ; result . append ( url . getQuery ( ) ) ; } if ( url . getRef ( ) != null ) { result . append ( '#' ) ; result . append ( url . getRef ( ) ) ; } return result . toString ( ) ; } catch ( MalformedURLException e ) { LOG . debug ( e . getLocalizedMessage ( ) , e ) ; return relativeUri ; }
public class TorqueModelDef { /** * Generates a column for the given field and adds it to the table . * @ param fieldDef The field * @ param tableDef The table * @ return The column def */ private ColumnDef addColumnFor ( FieldDescriptorDef fieldDef , TableDef tableDef ) { } }
String name = fieldDef . getProperty ( PropertyHelper . OJB_PROPERTY_COLUMN ) ; ColumnDef columnDef = tableDef . getColumn ( name ) ; if ( columnDef == null ) { columnDef = new ColumnDef ( name ) ; tableDef . addColumn ( columnDef ) ; } if ( ! fieldDef . isNested ( ) ) { columnDef . setProperty ( PropertyHelper . TORQUE_PROPERTY_JAVANAME , fieldDef . getName ( ) ) ; } columnDef . setProperty ( PropertyHelper . TORQUE_PROPERTY_TYPE , fieldDef . getProperty ( PropertyHelper . OJB_PROPERTY_JDBC_TYPE ) ) ; columnDef . setProperty ( PropertyHelper . TORQUE_PROPERTY_ID , fieldDef . getProperty ( PropertyHelper . OJB_PROPERTY_ID ) ) ; if ( fieldDef . getBooleanProperty ( PropertyHelper . OJB_PROPERTY_PRIMARYKEY , false ) ) { columnDef . setProperty ( PropertyHelper . TORQUE_PROPERTY_PRIMARYKEY , "true" ) ; columnDef . setProperty ( PropertyHelper . TORQUE_PROPERTY_REQUIRED , "true" ) ; } else if ( ! fieldDef . getBooleanProperty ( PropertyHelper . OJB_PROPERTY_NULLABLE , true ) ) { columnDef . setProperty ( PropertyHelper . TORQUE_PROPERTY_REQUIRED , "true" ) ; } if ( "database" . equals ( fieldDef . getProperty ( PropertyHelper . OJB_PROPERTY_AUTOINCREMENT ) ) ) { columnDef . setProperty ( PropertyHelper . TORQUE_PROPERTY_AUTOINCREMENT , "true" ) ; } columnDef . setProperty ( PropertyHelper . TORQUE_PROPERTY_SIZE , fieldDef . getSizeConstraint ( ) ) ; if ( fieldDef . hasProperty ( PropertyHelper . OJB_PROPERTY_DOCUMENTATION ) ) { columnDef . setProperty ( PropertyHelper . OJB_PROPERTY_DOCUMENTATION , fieldDef . getProperty ( PropertyHelper . OJB_PROPERTY_DOCUMENTATION ) ) ; } if ( fieldDef . hasProperty ( PropertyHelper . OJB_PROPERTY_COLUMN_DOCUMENTATION ) ) { columnDef . setProperty ( PropertyHelper . OJB_PROPERTY_COLUMN_DOCUMENTATION , fieldDef . getProperty ( PropertyHelper . OJB_PROPERTY_COLUMN_DOCUMENTATION ) ) ; } return columnDef ;
public class ModuleWriterImpl { /** * Get the module header . * @ param heading the heading for the section */ @ Override public Content getModuleHeader ( String heading ) { } }
HtmlTree bodyTree = getBody ( true , getWindowTitle ( mdle . getQualifiedName ( ) . toString ( ) ) ) ; HtmlTree htmlTree = ( configuration . allowTag ( HtmlTag . HEADER ) ) ? HtmlTree . HEADER ( ) : bodyTree ; addTop ( htmlTree ) ; addNavLinks ( true , htmlTree ) ; if ( configuration . allowTag ( HtmlTag . HEADER ) ) { bodyTree . addContent ( htmlTree ) ; } HtmlTree div = new HtmlTree ( HtmlTag . DIV ) ; div . addStyle ( HtmlStyle . header ) ; Content annotationContent = new HtmlTree ( HtmlTag . P ) ; addAnnotationInfo ( mdle , annotationContent ) ; div . addContent ( annotationContent ) ; Content label = mdle . isOpen ( ) && ( configuration . docEnv . getModuleMode ( ) == ModuleMode . ALL ) ? contents . openModuleLabel : contents . moduleLabel ; Content tHeading = HtmlTree . HEADING ( HtmlConstants . TITLE_HEADING , true , HtmlStyle . title , label ) ; tHeading . addContent ( Contents . SPACE ) ; Content moduleHead = new RawHtml ( heading ) ; tHeading . addContent ( moduleHead ) ; div . addContent ( tHeading ) ; if ( configuration . allowTag ( HtmlTag . MAIN ) ) { mainTree . addContent ( div ) ; } else { bodyTree . addContent ( div ) ; } return bodyTree ;
public class CommerceShippingFixedOptionLocalServiceBaseImpl { /** * Returns a range of all the commerce shipping fixed options . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link com . liferay . portal . kernel . dao . orm . QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link com . liferay . portal . kernel . dao . orm . QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link com . liferay . commerce . shipping . engine . fixed . model . impl . CommerceShippingFixedOptionModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param start the lower bound of the range of commerce shipping fixed options * @ param end the upper bound of the range of commerce shipping fixed options ( not inclusive ) * @ return the range of commerce shipping fixed options */ @ Override public List < CommerceShippingFixedOption > getCommerceShippingFixedOptions ( int start , int end ) { } }
return commerceShippingFixedOptionPersistence . findAll ( start , end ) ;
public class AbstractFax4JClientSpi { /** * This function polls the new statues for the provided fax jobs . * @ param faxJobs * The fax jobs to poll * @ return The fax job statues */ protected FaxJobStatus [ ] pollForFaxJobStatuesImpl ( FaxJob [ ] faxJobs ) { } }
// get amount int amount = faxJobs . length ; // init array FaxJobStatus [ ] faxJobStatuses = new FaxJobStatus [ amount ] ; FaxJob faxJob = null ; FaxJobStatus faxJobStatus = null ; for ( int index = 0 ; index < amount ; index ++ ) { // get next fax job faxJob = faxJobs [ index ] ; // set default faxJobStatus = FaxJobStatus . UNKNOWN ; if ( faxJob != null ) { try { faxJobStatus = this . getFaxJobStatus ( faxJob ) ; } catch ( RuntimeException exception ) { // ignore errors as this method should not throw any exception } } // set default if ( faxJobStatus == null ) { faxJobStatus = FaxJobStatus . UNKNOWN ; } // put in output array faxJobStatuses [ index ] = faxJobStatus ; } return faxJobStatuses ;
public class SiteMap { /** * Find the closest parent for the uri - no new nodes will be created */ public synchronized SiteNode findClosestParent ( URI uri ) { } }
if ( uri == null ) { return null ; } SiteNode lastParent = null ; SiteNode parent = getRoot ( ) ; String folder = "" ; try { String host = getHostName ( uri ) ; // no host yet parent = findChild ( parent , host ) ; if ( parent == null ) { return null ; } lastParent = parent ; List < String > path = model . getSession ( ) . getTreePath ( uri ) ; for ( int i = 0 ; i < path . size ( ) ; i ++ ) { folder = path . get ( i ) ; if ( folder != null && ! folder . equals ( "" ) ) { if ( i == path . size ( ) - 1 ) { lastParent = parent ; } else { parent = findChild ( parent , folder ) ; if ( parent == null ) { break ; } lastParent = parent ; } } } } catch ( URIException e ) { log . error ( e . getMessage ( ) , e ) ; } return lastParent ;
public class Restful { /** * Gets the next step ( element ) to be executed on the model . An element can * be either a vertex or an edge , * @ return The label of the next step as a plain or a JSON formatted string . If the label is empty or * non - existent , the label of the step is an empty string . */ @ GET @ Produces ( "text/plain;charset=UTF-8" ) @ Path ( "getNext" ) public String getNext ( ) { } }
logger . debug ( "Received getNext" ) ; JSONObject resultJson ; try { machine . getNextStep ( ) ; resultJson = Util . getStepAsJSON ( machine , verbose , unvisited ) ; resultJson . put ( "result" , "ok" ) ; } catch ( Exception e ) { resultJson = new JSONObject ( ) ; resultJson . put ( "result" , "nok" ) ; resultJson . put ( "error" , e . getMessage ( ) ) ; } return resultJson . toString ( ) ;
public class DefaultTableModel { /** * Adds a new column definition to the table */ public void addColumn ( DefaultTableColumn column ) { } }
column . setModelIndex ( columnModel . getColumnCount ( ) ) ; columnModel . addColumn ( column ) ;
public class DefaultGroovyMethods { /** * Avoids doing unnecessary work when sorting an already sorted set * @ param self an already sorted set * @ return an ordered copy of the sorted set * @ since 2.4.0 */ public static < T > Set < T > toSorted ( SortedSet < T > self ) { } }
return new LinkedHashSet < T > ( self ) ;
public class AbstractRStarTreeNode { /** * Reads the id of this node , the numEntries and the entries array from the * specified stream . * @ param in the stream to read data from in order to restore the object * @ throws java . io . IOException if I / O errors occur * @ throws ClassNotFoundException If the class for an object being restored * cannot be found . */ @ Override @ SuppressWarnings ( "unchecked" ) public void readExternal ( ObjectInput in ) throws IOException , ClassNotFoundException { } }
super . readExternal ( in ) ; // TODO : do we need to write / read the capacity ? final int capacity = in . readInt ( ) ; if ( isLeaf ( ) ) { entries = ( E [ ] ) new SpatialPointLeafEntry [ capacity ] ; for ( int i = 0 ; i < numEntries ; i ++ ) { SpatialPointLeafEntry s = new SpatialPointLeafEntry ( ) ; s . readExternal ( in ) ; entries [ i ] = ( E ) s ; } } else { entries = ( E [ ] ) new SpatialDirectoryEntry [ capacity ] ; for ( int i = 0 ; i < numEntries ; i ++ ) { SpatialDirectoryEntry s = new SpatialDirectoryEntry ( ) ; s . readExternal ( in ) ; entries [ i ] = ( E ) s ; } }
public class GeneratedDOAuth2UserDaoImpl { /** * query - by method for field updatedBy * @ param updatedBy the specified attribute * @ return an Iterable of DOAuth2Users for the specified updatedBy */ public Iterable < DOAuth2User > queryByUpdatedBy ( java . lang . String updatedBy ) { } }
return queryByField ( null , DOAuth2UserMapper . Field . UPDATEDBY . getFieldName ( ) , updatedBy ) ;
public class AmazonHttpClient { /** * Shuts down this HTTP client object , releasing any resources that might be held open . This is * an optional method , and callers are not expected to call it , but can if they want to * explicitly release any open resources . Once a client has been shutdown , it cannot be used to * make more requests . */ public void shutdown ( ) { } }
clientExecutionTimer . shutdown ( ) ; httpRequestTimer . shutdown ( ) ; IdleConnectionReaper . removeConnectionManager ( httpClient . getHttpClientConnectionManager ( ) ) ; httpClient . getHttpClientConnectionManager ( ) . shutdown ( ) ;
public class Streams { /** * Fills ' dst ' with bytes from ' in ' , throwing EOFException if insufficient bytes are available . */ public static void readFully ( InputStream in , byte [ ] dst ) throws IOException { } }
readFully ( in , dst , 0 , dst . length ) ;
public class XMLOutputter { /** * Returns a copy of the element stack . The returned array will be a new * array . The size of the array will be equal to the element stack size * ( see { @ link # getElementStackSize ( ) } . * @ return a newly constructed array that contains all the element types * currently on the element stack , or < code > null < / code > if there are no * elements on the stack . * @ since XMLenc 0.22 */ public final String [ ] getElementStack ( ) { } }
if ( _elementStackSize == 0 ) { return null ; } else { String [ ] newStack = new String [ _elementStackSize ] ; System . arraycopy ( _elementStack , 0 , newStack , 0 , _elementStackSize ) ; return newStack ; }
public class Common { /** * Serialize the given object into the given stream */ public static void serialize ( Serializable obj , ByteArrayOutputStream bout ) { } }
try { ObjectOutputStream out = new ObjectOutputStream ( bout ) ; out . writeObject ( obj ) ; out . close ( ) ; } catch ( IOException e ) { throw new IllegalStateException ( "Could not serialize " + obj , e ) ; }
public class ConfigurationBuilder { /** * add class loader , might be used for resolving methods / fields */ public ConfigurationBuilder addClassLoaders ( Collection < ClassLoader > classLoaders ) { } }
return addClassLoaders ( classLoaders . toArray ( new ClassLoader [ classLoaders . size ( ) ] ) ) ;
public class BloatedAssignmentScope { /** * creates a scope block to describe this branch location . * @ param seen * the currently parsed opcode * @ param pc * the current program counter */ private void sawBranch ( int seen , int pc ) { } }
int target = getBranchTarget ( ) ; if ( target > pc ) { if ( ( seen == Const . GOTO ) || ( seen == Const . GOTO_W ) ) { int nextPC = getNextPC ( ) ; if ( ! switchTargets . get ( nextPC ) ) { ScopeBlock sb = findScopeBlockWithTarget ( rootScopeBlock , pc , nextPC ) ; if ( sb == null ) { sb = new ScopeBlock ( pc , target ) ; sb . setLoop ( ) ; sb . setGoto ( ) ; rootScopeBlock . addChild ( sb ) ; } else { sb = new ScopeBlock ( nextPC , target ) ; sb . setGoto ( ) ; rootScopeBlock . addChild ( sb ) ; } } } else { ScopeBlock sb = findScopeBlockWithTarget ( rootScopeBlock , pc , target ) ; if ( ( sb != null ) && ! sb . isLoop ( ) && ! sb . isCase ( ) && ! sb . hasChildren ( ) ) { if ( sb . isGoto ( ) ) { ScopeBlock parent = sb . getParent ( ) ; sb . pushUpLoadStores ( ) ; if ( parent != null ) { parent . removeChild ( sb ) ; } sb = new ScopeBlock ( pc , target ) ; rootScopeBlock . addChild ( sb ) ; } else { sb . pushUpLoadStores ( ) ; sb . setStart ( pc ) ; } } else { sb = new ScopeBlock ( pc , target ) ; rootScopeBlock . addChild ( sb ) ; } } } else { ScopeBlock sb = findScopeBlock ( rootScopeBlock , pc ) ; if ( sb != null ) { ScopeBlock parentSB = sb . getParent ( ) ; while ( parentSB != null ) { if ( parentSB . getStart ( ) >= target ) { sb = parentSB ; parentSB = parentSB . getParent ( ) ; } else { break ; } } if ( sb . getStart ( ) > target ) { ScopeBlock previous = findPreviousSiblingScopeBlock ( sb ) ; if ( ( previous != null ) && ( previous . getStart ( ) >= target ) ) { sb = previous ; } } sb . setLoop ( ) ; } }
public class JSON { /** * Encodes a object into a JSON string . * @ param source A object to encode . * @ return A JSON string . */ public String encode ( Object source ) { } }
return net . arnx . jsonic . JSON . encode ( source , true ) ;
public class FactoryKernel { /** * Create an integer table convolution kernel . All the elements are equal to one . * See { @ link boofcv . alg . filter . convolve . ConvolveImageBox } for faster ways to convolve these kernels . * @ param radius kernel ' s radius . * @ return table kernel . */ public static Kernel1D_S32 table1D_I32 ( int radius ) { } }
Kernel1D_S32 ret = new Kernel1D_S32 ( radius * 2 + 1 ) ; for ( int i = 0 ; i < ret . data . length ; i ++ ) { ret . data [ i ] = 1 ; } return ret ;
public class SiteTool { /** * Fixes the Failsafe report page . * @ param root * root element for the report page to fix */ private final void fixReportFailsafe ( final Element root ) { } }
final Collection < Element > elements ; // Found elements final Element heading ; // First h2 heading elements = root . getElementsByTag ( "h2" ) ; if ( ! elements . isEmpty ( ) ) { heading = elements . iterator ( ) . next ( ) ; heading . tagName ( "h1" ) ; heading . text ( "Failsafe Report" ) ; }
public class AccReplicatedSessionDataFactory { /** * ( non - Javadoc ) * @ see org . jdiameter . common . api . app . IAppSessionDataFactory # getAppSessionData ( java . lang . Class , java . lang . String ) */ @ Override public IAccSessionData getAppSessionData ( Class < ? extends AppSession > clazz , String sessionId ) { } }
if ( clazz . equals ( ClientAccSession . class ) ) { ClientAccSessionDataReplicatedImpl data = new ClientAccSessionDataReplicatedImpl ( sessionId , this . mobicentsCluster , this . replicatedSessionDataSource . getContainer ( ) ) ; return data ; } else if ( clazz . equals ( ServerAccSession . class ) ) { ServerAccSessionDataReplicatedImpl data = new ServerAccSessionDataReplicatedImpl ( sessionId , this . mobicentsCluster ) ; return data ; } throw new IllegalArgumentException ( ) ;
public class ServiceApiWrapper { /** * Returns observable to remove list of participants from a conversation . * @ param token Comapi access token . * @ param conversationId ID of a conversation to delete . * @ param ids List of participant ids to be removed . * @ return Observable to remove list of participants from a conversation . */ Observable < ComapiResult < Void > > doRemoveParticipants ( @ NonNull final String token , @ NonNull final String conversationId , @ NonNull final List < String > ids ) { } }
return wrapObservable ( service . deleteParticipants ( AuthManager . addAuthPrefix ( token ) , apiSpaceId , conversationId , ids ) . map ( mapToComapiResult ( ) ) , log , "Removing participants from " + conversationId ) ;
public class ZipFileArtifactNotifier { /** * Notification API : Remove all registrations of a listener . * See { @ link ArtifactNotifier # removeListener ( ArtifactListener ) } . * @ param listenerToRemove The listener which is to be removed . * @ return True or false telling if removal of the listener * caused at least one path which had a registered listener * to no longer have a registered listener . */ @ Override public boolean removeListener ( ArtifactListener listenerToRemove ) { } }
synchronized ( listenersLock ) { ArtifactListenerSelector listenerSelectorToRemove = new ArtifactListenerSelector ( listenerToRemove ) ; List < String > pathsToRemove = new ArrayList < String > ( 1 ) ; for ( Map . Entry < String , Collection < ArtifactListenerSelector > > listenersEntry : listeners . entrySet ( ) ) { for ( ArtifactListenerSelector listener : listenersEntry . getValue ( ) ) { if ( listener . equals ( listenerSelectorToRemove ) ) { pathsToRemove . add ( listenersEntry . getKey ( ) ) ; break ; } } } Iterator < String > usePathsToRemove = pathsToRemove . iterator ( ) ; while ( usePathsToRemove . hasNext ( ) ) { String pathToRemove = usePathsToRemove . next ( ) ; Collection < ArtifactListenerSelector > listenersForPath = listeners . get ( pathToRemove ) ; if ( listenersForPath . size ( ) == 1 ) { listeners . remove ( pathToRemove ) ; // The last listner for the path . } else { listenersForPath . remove ( listenerSelectorToRemove ) ; usePathsToRemove . remove ( ) ; } } if ( ! pathsToRemove . isEmpty ( ) ) { // TODO : Clearing and rebuilding the covering paths collection // Not optimal , but hard to do better , and not necessary // since removals should be rare and the registrations // are expected to be small . // Each path which was removed which is a covering path // may uncover paths . Those need to be found and added // to the covering paths collection . // But , not all paths which were covered should be added , // as some the newly uncovered paths may cover other of // the newly uncovered paths . // Also , an uncovered path which is discovered may itself // be a path to removed which hasn ' t yet been processed . coveringPaths . clear ( ) ; for ( String listenerPath : listeners . keySet ( ) ) { @ SuppressWarnings ( "unused" ) boolean addedUncoveredPath = addCoveringPath ( listenerPath ) ; } // Don ' t know if the covering paths collection was perturbed . // Assume that it was . updateMonitor ( ) ; } return ( ! pathsToRemove . isEmpty ( ) ) ; }
public class SynonymsEntry { /** * add a new synonyms word * and the newly added word will extends the part of speech and the entity * from the base word if there are not set * @ param word */ public void add ( IWord word ) { } }
// check and extends the entity from the base word if ( word . getEntity ( ) == null ) { word . setEntity ( rootWord . getEntity ( ) ) ; } // check and extends the part of speech from the base word if ( word . getPartSpeech ( ) == null ) { word . setPartSpeech ( rootWord . getPartSpeech ( ) ) ; } word . setSyn ( this ) ; synsList . add ( word ) ;
public class AWSMediaConnectClient { /** * Displays the details of a flow . The response includes the flow ARN , name , and Availability Zone , as well as * details about the source , outputs , and entitlements . * @ param describeFlowRequest * @ return Result of the DescribeFlow operation returned by the service . * @ throws BadRequestException * The request that you submitted is not valid . * @ throws InternalServerErrorException * AWS Elemental MediaConnect can ' t fulfill your request because it encountered an unexpected condition . * @ throws ForbiddenException * You don ' t have the required permissions to perform this operation . * @ throws NotFoundException * AWS Elemental MediaConnect did not find the resource that you specified in the request . * @ throws ServiceUnavailableException * AWS Elemental MediaConnect is currently unavailable . Try again later . * @ throws TooManyRequestsException * You have exceeded the service request rate limit for your AWS Elemental MediaConnect account . * @ sample AWSMediaConnect . DescribeFlow * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / mediaconnect - 2018-11-14 / DescribeFlow " target = " _ top " > AWS API * Documentation < / a > */ @ Override public DescribeFlowResult describeFlow ( DescribeFlowRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeFlow ( request ) ;
public class AbstractTaggingProcessor { /** * Tags the item adding the new field with the specified values . * @ param item * @ param values */ protected void addNewField ( Item item , String values ) { } }
if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Tagging item with field: " + newField + " and value: " + values ) ; } Document document = item . getDescriptorDom ( ) ; if ( document != null ) { Element root = document . getRootElement ( ) ; if ( root != null ) { for ( String value : values . split ( "," ) ) { Element newElement = root . addElement ( newField ) ; newElement . setText ( value ) ; } } }
public class IdentityMatMulStart { /** * Generate a matrix with random values . * @ param nRows number of matrix ' s rows . * @ param nColumns number of matrix ' s columns . * @ return Matrix that consists of random values . */ private Matrix < Double > generateRandomMatrix ( final int nRows , final int nColumns ) { } }
final List < List < Double > > rows = new ArrayList < > ( nRows ) ; final Random random = new Random ( ) ; for ( int i = 0 ; i < nRows ; i ++ ) { final List < Double > row = new ArrayList < > ( nColumns ) ; for ( int j = 0 ; j < nColumns ; j ++ ) { row . add ( random . nextDouble ( ) ) ; } rows . add ( row ) ; } return new RowMatrix ( rows ) ;
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link BigInteger } { @ code > } * @ param value * Java instance representing xml element ' s value . * @ return * the new instance of { @ link JAXBElement } { @ code < } { @ link BigInteger } { @ code > } */ @ XmlElementDecl ( namespace = "http://www.opengis.net/gml" , name = "minimumOccurs" ) public JAXBElement < BigInteger > createMinimumOccurs ( BigInteger value ) { } }
return new JAXBElement < BigInteger > ( _MinimumOccurs_QNAME , BigInteger . class , null , value ) ;
public class SQLEnum { /** * Hay un problema se puede llamar a este metodo antes de cargar la clase hija por lo que habria un proiblema * @ param clazz * @ param value * @ return */ protected static SQLEnum value ( Class < ? extends SQLEnum > clazz , String value ) { } }
if ( value == null ) { return null ; } value = value . trim ( ) ; if ( valuesCache . containsKey ( clazz ) ) { Map < String , SQLEnum > map2 = valuesCache . get ( clazz ) ; if ( map2 . containsKey ( value ) ) { return map2 . get ( value ) ; } } return null ;
public class PrometheusController { /** * CHECKSTYLE : OFF */ private void reportOnCacheInformations ( ) { } }
// NOPMD // CHECKSTYLE : ON final List < CacheInformations > cacheInformationsList = javaInformations . getCacheInformationsList ( ) ; final Map < String , CacheInformations > cacheInfos = new LinkedHashMap < String , CacheInformations > ( cacheInformationsList . size ( ) ) ; for ( final CacheInformations cacheInfo : cacheInformationsList ) { final String fields = "{cache_name=\"" + sanitizeName ( cacheInfo . getName ( ) ) + "\"}" ; cacheInfos . put ( fields , cacheInfo ) ; } printHeader ( MetricType . GAUGE , "cache_in_memory_count" , "cache in memory count" ) ; for ( final Map . Entry < String , CacheInformations > entry : cacheInfos . entrySet ( ) ) { printLongWithFields ( "cache_in_memory_count" , entry . getKey ( ) , entry . getValue ( ) . getInMemoryObjectCount ( ) ) ; } printHeader ( MetricType . GAUGE , "cache_in_memory_used_pct" , "in memory used percent" ) ; for ( final Map . Entry < String , CacheInformations > entry : cacheInfos . entrySet ( ) ) { printDoubleWithFields ( "cache_in_memory_used_pct" , entry . getKey ( ) , ( double ) entry . getValue ( ) . getInMemoryPercentUsed ( ) / 100 ) ; } printHeader ( MetricType . GAUGE , "cache_in_memory_hits_pct" , "cache in memory hit percent" ) ; for ( final Map . Entry < String , CacheInformations > entry : cacheInfos . entrySet ( ) ) { printDoubleWithFields ( "cache_in_memory_hits_pct" , entry . getKey ( ) , ( double ) entry . getValue ( ) . getInMemoryHitsRatio ( ) / 100 ) ; } printHeader ( MetricType . GAUGE , "cache_on_disk_count" , "cache on disk count" ) ; for ( final Map . Entry < String , CacheInformations > entry : cacheInfos . entrySet ( ) ) { printLongWithFields ( "cache_on_disk_count" , entry . getKey ( ) , entry . getValue ( ) . getOnDiskObjectCount ( ) ) ; } printHeader ( MetricType . GAUGE , "cache_hits_pct" , "cache hits percent" ) ; for ( final Map . Entry < String , CacheInformations > entry : cacheInfos . entrySet ( ) ) { printDoubleWithFields ( "cache_hits_pct" , entry . getKey ( ) , ( double ) entry . getValue ( ) . getHitsRatio ( ) / 100 ) ; } printHeader ( MetricType . COUNTER , "cache_in_memory_hits_count" , "total cache in memory hit count" ) ; for ( final Map . Entry < String , CacheInformations > entry : cacheInfos . entrySet ( ) ) { printLongWithFields ( "cache_in_memory_hits_count" , entry . getKey ( ) , entry . getValue ( ) . getInMemoryHits ( ) ) ; } printHeader ( MetricType . COUNTER , "cache_hits_count" , "total cache hit count" ) ; for ( final Map . Entry < String , CacheInformations > entry : cacheInfos . entrySet ( ) ) { printLongWithFields ( "cache_hits_count" , entry . getKey ( ) , entry . getValue ( ) . getCacheHits ( ) ) ; } printHeader ( MetricType . COUNTER , "cache_misses_count" , "total cache misses count" ) ; for ( final Map . Entry < String , CacheInformations > entry : cacheInfos . entrySet ( ) ) { printLongWithFields ( "cache_misses_count" , entry . getKey ( ) , entry . getValue ( ) . getCacheMisses ( ) ) ; }
public class NetServerEnumIterator { /** * { @ inheritDoc } * @ see java . util . Iterator # next ( ) */ @ Override public FileEntry next ( ) { } }
FileEntry n = this . next ; try { FileEntry ne = advance ( ) ; if ( ne == null ) { doClose ( ) ; return n ; } this . next = ne ; } catch ( CIFSException e ) { log . warn ( "Enumeration failed" , e ) ; this . next = null ; } return n ;
public class StringUtil { /** * < code > true < / code > is returned if the specified < code > str < / code > only * includes characters ( ' 0 ' ~ ' 9 ' , ' - ' , ' + ' ) . * < code > false < / code > is return if the specified String is null / empty , or contains empty chars . * " - 123 " = > true * " + 123 " = > true * " 123 " = > true * " + 0 " = > true * " - 0 " = > true * " 0 " = > true * " 0.1 " = > false * " abc " = > false * " 1 a " = > false * " 2e10 " = > false * @ param val * @ return */ public static boolean isAsciiDigtalInteger ( final String str ) { } }
if ( N . isNullOrEmpty ( str ) ) { return false ; } final char [ ] chs = getCharsForReadOnly ( str ) ; int i = 0 , num = 0 ; if ( chs [ i ] == '+' || chs [ i ] == '-' ) { i ++ ; } for ( ; i < chs . length && ( chs [ i ] >= '0' && chs [ i ] <= '9' ) ; i ++ ) { num ++ ; } if ( num == 0 ) { return false ; } return i == chs . length ;
public class AmazonCloudFormationWaiters { /** * Builds a StackUpdateComplete waiter by using custom parameters waiterParameters and other parameters defined in * the waiters specification , and then polls until it determines whether the resource entered the desired state or * not , where polling criteria is bound by either default polling strategy or custom polling strategy . */ public Waiter < DescribeStacksRequest > stackUpdateComplete ( ) { } }
return new WaiterBuilder < DescribeStacksRequest , DescribeStacksResult > ( ) . withSdkFunction ( new DescribeStacksFunction ( client ) ) . withAcceptors ( new StackUpdateComplete . IsUPDATE_COMPLETEMatcher ( ) , new StackUpdateComplete . IsUPDATE_FAILEDMatcher ( ) , new StackUpdateComplete . IsUPDATE_ROLLBACK_FAILEDMatcher ( ) , new StackUpdateComplete . IsUPDATE_ROLLBACK_COMPLETEMatcher ( ) , new StackUpdateComplete . IsValidationErrorMatcher ( ) ) . withDefaultPollingStrategy ( new PollingStrategy ( new MaxAttemptsRetryStrategy ( 120 ) , new FixedDelayStrategy ( 30 ) ) ) . withExecutorService ( executorService ) . build ( ) ;
public class ManagementClientAsync { /** * Checks whether a given queue exists or not . * @ param path - Path of the entity to check * @ return - True if the entity exists . False otherwise . * @ throws IllegalArgumentException - path is not null / empty / too long / invalid . */ public CompletableFuture < Boolean > queueExistsAsync ( String path ) { } }
EntityNameHelper . checkValidQueueName ( path ) ; CompletableFuture < Boolean > existsFuture = new CompletableFuture < > ( ) ; this . getQueueAsync ( path ) . handleAsync ( ( qd , ex ) -> { if ( ex != null ) { if ( ex instanceof MessagingEntityNotFoundException ) { existsFuture . complete ( Boolean . FALSE ) ; return false ; } existsFuture . completeExceptionally ( ex ) ; return false ; } existsFuture . complete ( Boolean . TRUE ) ; return true ; } , MessagingFactory . INTERNAL_THREAD_POOL ) ; return existsFuture ;
public class BlackBoxExplanationGenerator { /** * This is a recursive method that builds a hitting set tree to obtain all * justifications for an unsatisfiable class . * @ param justification The current justification for the current entailment . This * corresponds to a node in the hitting set tree . * @ param allJustifications All of the MUPS that have been found - this set gets populated * over the course of the tree building process . Initially this * should just contain the first justification * @ param satPaths Paths that have been completed . * @ param currentPathContents The contents of the current path . Initially this should be an * empty set . */ private void constructHittingSetTree ( E entailment , Explanation < E > justification , Set < Explanation < E > > allJustifications , Set < Set < OWLAxiom > > satPaths , Set < OWLAxiom > currentPathContents , int maxExplanations ) throws OWLException { } }
// dumpHSTNodeDiagnostics ( entailment , justification , allJustifications , currentPathContents ) ; // We go through the current justifications , checker by checker , and extend the tree // with edges for each checker List < OWLAxiom > orderedJustification = getOrderedJustifications ( new ArrayList < OWLAxiom > ( justification . getAxioms ( ) ) , allJustifications ) ; while ( ! orderedJustification . isEmpty ( ) ) { OWLAxiom axiom = orderedJustification . get ( 0 ) ; orderedJustification . remove ( 0 ) ; if ( allJustifications . size ( ) == maxExplanations ) { return ; } // Remove the current checker from all the ontologies it is included // in module . remove ( axiom ) ; currentPathContents . add ( axiom ) ; boolean earlyTermination = false ; // Early path termination . If our path contents are the superset of // the contents of a path then we can terminate here . for ( Set < OWLAxiom > satPath : satPaths ) { if ( currentPathContents . containsAll ( satPath ) ) { earlyTermination = true ; break ; } } if ( ! earlyTermination ) { Explanation < E > newJustification = null ; for ( Explanation < E > foundJustification : allJustifications ) { Set < OWLAxiom > foundMUPSCopy = new HashSet < OWLAxiom > ( foundJustification . getAxioms ( ) ) ; foundMUPSCopy . retainAll ( currentPathContents ) ; if ( foundMUPSCopy . isEmpty ( ) ) { // Justification reuse newJustification = foundJustification ; break ; } } if ( newJustification == null ) { newJustification = computeExplanation ( entailment ) ; // getExplanation ( ) ; } // Generate a new node - i . e . a new justification set if ( axiom . isLogicalAxiom ( ) && newJustification . contains ( axiom ) ) { // How can this be the case ? ? ? throw new OWLRuntimeException ( "Explanation contains removed axiom: " + axiom + " (Working axioms contains axiom: " + module . contains ( axiom ) + ")" ) ; } if ( ! newJustification . isEmpty ( ) ) { // Note that getting a previous justification does not mean // we // can stop . stopping here causes some justifications to be // missed boolean added = allJustifications . add ( newJustification ) ; if ( added ) { progressMonitor . foundExplanation ( this , newJustification , allJustifications ) ; } if ( progressMonitor . isCancelled ( ) ) { return ; } // Recompute priority here ? // MutableTree < Explanation > node = new MutableTree < Explanation > ( newJustification ) ; // currentNode . addChild ( node , checker ) ; constructHittingSetTree ( entailment , newJustification , allJustifications , satPaths , currentPathContents , maxExplanations ) ; // We have found a new MUPS , so recalculate the ordering // axioms in the MUPS at the current level orderedJustification = getOrderedJustifications ( orderedJustification , allJustifications ) ; } else { // End of current path - add it to the list of paths satPaths . add ( new HashSet < OWLAxiom > ( currentPathContents ) ) ; Explanation exp = new Explanation < E > ( entailment , new HashSet < OWLAxiom > ( 0 ) ) ; MutableTree < Explanation > node = new MutableTree < Explanation > ( exp ) ; // currentNode . addChild ( node , checker ) ; // increment ( checker ) ; } } // Back track - go one level up the tree and run for the next checker currentPathContents . remove ( axiom ) ; // Done with the checker that was removed . Add it back in module . add ( axiom ) ; }
public class VdmModel { /** * ( non - Javadoc ) * @ see org . overture . ide . core . ast . IVdmElement # isParseCorrect ( ) */ public synchronized boolean isParseCorrect ( ) { } }
boolean isParseCorrect = true ; for ( IVdmSourceUnit source : vdmSourceUnits ) { if ( source . hasParseErrors ( ) ) { isParseCorrect = false ; break ; } } return isParseCorrect ;
public class FourierTransform { /** * Computes the circular convolution of the given real vectors . Each vector ' s length must be the same . */ private static void convolve ( double [ ] x , double [ ] y , double [ ] out ) { } }
// if ( x . length ! = y . length | | x . length ! = out . length ) // throw new IllegalArgumentException ( " Mismatched lengths " ) ; int n = x . length ; convolve ( x , new double [ n ] , y , new double [ n ] , out , new double [ n ] ) ;
public class PhotosInterface { /** * Returns the list of people who have favorited a given photo . * This method does not require authentication . * @ param photoId * @ param perPage * @ param page * @ return List of { @ link com . flickr4java . flickr . people . User } */ public Collection < User > getFavorites ( String photoId , int perPage , int page ) throws FlickrException { } }
Map < String , Object > parameters = new HashMap < String , Object > ( ) ; parameters . put ( "method" , METHOD_GET_FAVORITES ) ; parameters . put ( "photo_id" , photoId ) ; if ( perPage > 0 ) { parameters . put ( "per_page" , Integer . toString ( perPage ) ) ; } if ( page > 0 ) { parameters . put ( "page" , Integer . toString ( page ) ) ; } Response response = transport . get ( transport . getPath ( ) , parameters , apiKey , sharedSecret ) ; if ( response . isError ( ) ) { throw new FlickrException ( response . getErrorCode ( ) , response . getErrorMessage ( ) ) ; } List < User > users = new ArrayList < User > ( ) ; Element userRoot = response . getPayload ( ) ; NodeList userNodes = userRoot . getElementsByTagName ( "person" ) ; for ( int i = 0 ; i < userNodes . getLength ( ) ; i ++ ) { Element userElement = ( Element ) userNodes . item ( i ) ; User user = new User ( ) ; user . setId ( userElement . getAttribute ( "nsid" ) ) ; user . setUsername ( userElement . getAttribute ( "username" ) ) ; user . setFaveDate ( userElement . getAttribute ( "favedate" ) ) ; users . add ( user ) ; } return users ;
public class CmsSecurityManager { /** * Performs a blocking permission check on a resource . < p > * If the required permissions are not satisfied by the permissions the user has on the resource , * an exception is thrown . < p > * @ param dbc the current database context * @ param resource the resource on which permissions are required * @ param requiredPermissions the set of permissions required to access the resource * @ param checkLock if true , the lock status of the resource is also checked * @ param filter the filter for the resource * @ throws CmsException in case of any i / o error * @ throws CmsSecurityException if the required permissions are not satisfied * @ see # hasPermissions ( CmsRequestContext , CmsResource , CmsPermissionSet , boolean , CmsResourceFilter ) */ protected void checkPermissions ( CmsDbContext dbc , CmsResource resource , CmsPermissionSet requiredPermissions , boolean checkLock , CmsResourceFilter filter ) throws CmsException , CmsSecurityException { } }
// get the permissions I_CmsPermissionHandler . CmsPermissionCheckResult permissions = hasPermissions ( dbc , resource , requiredPermissions , checkLock , filter ) ; if ( ! permissions . isAllowed ( ) ) { checkPermissions ( dbc . getRequestContext ( ) , resource , requiredPermissions , permissions ) ; }
public class AbstractMappableValidator { /** * Disposes all triggers and data providers that are mapped to each other . */ private void disposeTriggersAndDataProviders ( ) { } }
for ( final Map . Entry < T , List < DP > > entry : triggersToDataProviders . entrySet ( ) ) { // Disconnect from trigger unhookFromTrigger ( entry . getKey ( ) ) ; // Dispose trigger itself final T trigger = entry . getKey ( ) ; if ( trigger instanceof Disposable ) { ( ( Disposable ) trigger ) . dispose ( ) ; } // Dispose data providers final List < DP > dataProviders = entry . getValue ( ) ; if ( dataProviders != null ) { for ( final DP dataProvider : dataProviders ) { if ( dataProvider instanceof Disposable ) { ( ( Disposable ) dataProvider ) . dispose ( ) ; } } } } // Clears all triggers triggersToDataProviders . clear ( ) ;
public class Job { /** * Get or create FileInfo for given path . * @ param file relative URI to temporary directory * @ return created or existing file info object */ public FileInfo getOrCreateFileInfo ( final URI file ) { } }
assert file . getFragment ( ) == null ; URI f = file . normalize ( ) ; if ( f . isAbsolute ( ) ) { f = tempDirURI . relativize ( f ) ; } FileInfo i = getFileInfo ( file ) ; if ( i == null ) { i = new FileInfo ( f ) ; add ( i ) ; } return i ;
public class TwoPhaseCommitSinkFunction { /** * This method must be the only place to call { @ link # recoverAndCommit ( Object ) } to ensure that * the configuration parameters { @ link # transactionTimeout } and * { @ link # ignoreFailuresAfterTransactionTimeout } are respected . */ private void recoverAndCommitInternal ( TransactionHolder < TXN > transactionHolder ) { } }
try { logWarningIfTimeoutAlmostReached ( transactionHolder ) ; recoverAndCommit ( transactionHolder . handle ) ; } catch ( final Exception e ) { final long elapsedTime = clock . millis ( ) - transactionHolder . transactionStartTime ; if ( ignoreFailuresAfterTransactionTimeout && elapsedTime > transactionTimeout ) { LOG . error ( "Error while committing transaction {}. " + "Transaction has been open for longer than the transaction timeout ({})." + "Commit will not be attempted again. Data loss might have occurred." , transactionHolder . handle , transactionTimeout , e ) ; } else { throw e ; } }
public class Group { /** * Reduce the group set by eliminating redundant elements . These * transformations applied are : * ( ( a & b ) | b ) = > b * ( ( a | b ) & b ) = > b */ void reduce ( ) { } }
// Note that the scanner choice is opposite of our type . This is // because a group of ' and ' s has children which are ' or ' s and vice // versa . Scanner < S > scanner = mForAnd ? OrChildScanner . THE : AndChildScanner . THE ; Iterator < Filter < S > > it = mElements . iterator ( ) ; aLoop : while ( it . hasNext ( ) ) { Filter < S > a = it . next ( ) ; for ( Filter < S > b : mElements ) { if ( a != b ) { if ( a . accept ( scanner , b ) ) { it . remove ( ) ; continue aLoop ; } } } }
public class TaskTable { /** * 如果时间匹配则执行相应的Task , 无锁 * @ param millis 时间毫秒 * @ since 3.1.1 */ protected void executeTaskIfMatchInternal ( long millis ) { } }
for ( int i = 0 ; i < size ; i ++ ) { if ( patterns . get ( i ) . match ( timezone , millis , this . scheduler . matchSecond ) ) { this . scheduler . taskExecutorManager . spawnExecutor ( tasks . get ( i ) ) ; } }