signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class UIDMeta { /** * Convenience overload of { @ code getUIDMeta ( TSDB , UniqueIdType , byte [ ] ) }
* @ param tsdb The TSDB to use for storage access
* @ param type The type of UID to fetch
* @ param uid The ID of the meta to fetch
* @ return A UIDMeta from storage or a default
* @ throws HBaseException if there was an issue fetching
* @ throws NoSuchUniqueId If the UID does not exist */
public static Deferred < UIDMeta > getUIDMeta ( final TSDB tsdb , final UniqueIdType type , final String uid ) { } } | return getUIDMeta ( tsdb , type , UniqueId . stringToUid ( uid ) ) ; |
public class DisplayUtil { /** * Converts an { @ link Integer } value , which is measured in pixels , into a value , which is
* measured in dp .
* @ param context
* The context , which should be used , as an instance of the class { @ link Context } . The
* context may not be null
* @ param pixels
* The pixel value , which should be converted , as an { @ link Integer } value
* @ return The calculated dp value as an { @ link Integer } value . The value might be rounded */
public static int pixelsToDp ( @ NonNull final Context context , final int pixels ) { } } | Condition . INSTANCE . ensureNotNull ( context , "The context may not be null" ) ; DisplayMetrics displayMetrics = context . getResources ( ) . getDisplayMetrics ( ) ; return Math . round ( pixels / ( displayMetrics . densityDpi / PIXEL_DP_RATIO ) ) ; |
public class ConvertDMatrixStruct { /** * Converts { @ link DMatrixRBlock } into { @ link DMatrixRMaj }
* @ param src Input matrix .
* @ param dst Output matrix . If null a new matrix will be declared .
* @ return Converted matrix . */
public static DMatrixRMaj convert ( DMatrixRBlock src , DMatrixRMaj dst ) { } } | if ( dst != null ) { dst . reshape ( src . getNumRows ( ) , src . getNumCols ( ) ) ; } else { dst = new DMatrixRMaj ( src . numRows , src . numCols ) ; } for ( int i = 0 ; i < src . numRows ; i += src . blockLength ) { int blockHeight = Math . min ( src . blockLength , src . numRows - i ) ; for ( int j = 0 ; j < src . numCols ; j += src . blockLength ) { int blockWidth = Math . min ( src . blockLength , src . numCols - j ) ; int indexSrc = i * src . numCols + blockHeight * j ; int indexDstRow = i * dst . numCols + j ; for ( int k = 0 ; k < blockHeight ; k ++ ) { System . arraycopy ( src . data , indexSrc , dst . data , indexDstRow , blockWidth ) ; indexSrc += blockWidth ; indexDstRow += dst . numCols ; } } } return dst ; |
public class SecurityPhaseListener { /** * Inspect the annotations in the ViewConfigStore , enforcing any restrictions applicable to this phase
* @ param event
* @ param phaseIdType */
private void performObservation ( PhaseEvent event , PhaseIdType phaseIdType ) { } } | UIViewRoot viewRoot = ( UIViewRoot ) event . getFacesContext ( ) . getViewRoot ( ) ; List < ? extends Annotation > restrictionsForPhase = getRestrictionsForPhase ( phaseIdType , viewRoot . getViewId ( ) ) ; if ( restrictionsForPhase != null ) { log . debugf ( "Enforcing on phase %s" , phaseIdType ) ; enforce ( event . getFacesContext ( ) , viewRoot , restrictionsForPhase ) ; } |
public class ScanningQueryEngine { /** * Determine whether this column and the other are < i > union - compatible < / i > ( that is , having the same columns ) .
* @ param results1 the first result set ; may not be null
* @ param results2 the second result set ; may not be null
* @ param context the query execution context ; may not be null
* @ param query the query being executed ; may not be null
* @ return true if the supplied columns definition are union - compatible , or false if they are not */
protected boolean checkUnionCompatible ( Columns results1 , Columns results2 , ScanQueryContext context , QueryCommand query ) { } } | if ( results1 == results2 ) return true ; if ( results1 == null || results2 == null ) return false ; if ( results1 . hasFullTextSearchScores ( ) != results2 . hasFullTextSearchScores ( ) ) { // The query is not compatible
context . getProblems ( ) . addError ( JcrI18n . setQueryContainsResultSetsWithDifferentFullTextSearch ) ; return false ; } if ( results1 . getColumns ( ) . size ( ) != results2 . getColumns ( ) . size ( ) ) { // The query is not compatible
context . getProblems ( ) . addError ( JcrI18n . setQueryContainsResultSetsWithDifferentNumberOfColumns , results1 . getColumns ( ) . size ( ) , results2 . getColumns ( ) . size ( ) ) ; return false ; } // Go through the columns and make sure that the property names and types match . . .
// ( we can ' t just check column names , since the column names may include the selector if more than one selector )
int numColumns = results1 . getColumns ( ) . size ( ) ; boolean noProblems = true ; for ( int i = 0 ; i != numColumns ; ++ i ) { Column thisColumn = results1 . getColumns ( ) . get ( i ) ; Column thatColumn = results2 . getColumns ( ) . get ( i ) ; if ( ! thisColumn . getPropertyName ( ) . equalsIgnoreCase ( thatColumn . getPropertyName ( ) ) ) return false ; String thisType = results1 . getColumnTypeForProperty ( thisColumn . getSelectorName ( ) , thisColumn . getPropertyName ( ) ) ; String thatType = results2 . getColumnTypeForProperty ( thatColumn . getSelectorName ( ) , thatColumn . getPropertyName ( ) ) ; if ( ! thisType . equalsIgnoreCase ( thatType ) ) { // The query is not compatible
context . getProblems ( ) . addError ( JcrI18n . setQueryContainsResultSetsWithDifferentColumns , thisColumn , thatColumn ) ; noProblems = false ; } } return noProblems ; |
public class TokenCachingStrategy { /** * Gets the cached enum indicating the source of the token from the Bundle .
* @ param bundle
* A Bundle in which the enum was stored .
* @ return enum indicating the source of the token
* @ throws NullPointerException if the passed in Bundle is null */
public static AccessTokenSource getSource ( Bundle bundle ) { } } | Validate . notNull ( bundle , "bundle" ) ; if ( bundle . containsKey ( TokenCachingStrategy . TOKEN_SOURCE_KEY ) ) { return ( AccessTokenSource ) bundle . getSerializable ( TokenCachingStrategy . TOKEN_SOURCE_KEY ) ; } else { boolean isSSO = bundle . getBoolean ( TokenCachingStrategy . IS_SSO_KEY ) ; return isSSO ? AccessTokenSource . FACEBOOK_APPLICATION_WEB : AccessTokenSource . WEB_VIEW ; } |
public class ScriptingEngines { /** * Loads the given script engine by language name . Will throw an exception if no script engine can be loaded for the given language name .
* @ param language the name of the script language to lookup an implementation for
* @ return the script engine
* @ throws ProcessEngineException if no such engine can be found . */
public ScriptEngine getScriptEngineForLanguage ( String language ) { } } | if ( language != null ) { language = language . toLowerCase ( ) ; } ProcessApplicationReference pa = Context . getCurrentProcessApplication ( ) ; ProcessEngineConfigurationImpl config = Context . getProcessEngineConfiguration ( ) ; ScriptEngine engine = null ; if ( config . isEnableFetchScriptEngineFromProcessApplication ( ) ) { if ( pa != null ) { engine = getPaScriptEngine ( language , pa ) ; } } if ( engine == null ) { engine = getGlobalScriptEngine ( language ) ; } return engine ; |
public class FilePrinter { /** * Clean log files if should clean follow strategy */
private void cleanLogFilesIfNecessary ( ) { } } | File logDir = new File ( folderPath ) ; File [ ] files = logDir . listFiles ( ) ; if ( files == null ) { return ; } for ( File file : files ) { if ( cleanStrategy . shouldClean ( file ) ) { file . delete ( ) ; } } |
public class TextUICommandLine { /** * Parse the argument as auxclasspath entries and add them
* @ param argument */
private void addAuxClassPathEntries ( String argument ) { } } | StringTokenizer tok = new StringTokenizer ( argument , File . pathSeparator ) ; while ( tok . hasMoreTokens ( ) ) { project . addAuxClasspathEntry ( tok . nextToken ( ) ) ; } |
public class JobExecution { /** * Appends a log message to the execution log . The first time the log exceeds MAX _ LOG _ LENGTH , it
* gets truncated and the TRUNCATION _ BANNER gets added . Subsequent calls to appendLog will be
* ignored .
* @ param formattedMessage The formatted message to append to the log . */
void appendLog ( String formattedMessage ) { } } | if ( logTruncated ) return ; String combined = join ( getLog ( ) , formattedMessage ) ; if ( combined . length ( ) > MAX_LOG_LENGTH ) { String truncated = abbreviate ( combined , MAX_LOG_LENGTH - TRUNCATION_BANNER . length ( ) * 2 - 2 ) ; combined = join ( new String [ ] { TRUNCATION_BANNER , truncated , TRUNCATION_BANNER } , "\n" ) ; logTruncated = true ; } setLog ( combined ) ; |
public class ServletContainer { /** * Registers a filter .
* @ param filterClass filter class to be registered . This class must be annotated with { @ linkplain WebFilter } .
* @ return this . */
public final SC registerFilter ( Class < ? extends Filter > filterClass ) { } } | WebFilter webFilter = filterClass . getAnnotation ( WebFilter . class ) ; if ( webFilter == null ) throw new IllegalArgumentException ( String . format ( "Missing annotation '%s' for class '%s'" , WebFilter . class . getName ( ) , filterClass . getName ( ) ) ) ; String [ ] urlPatterns = webFilter . value ( ) ; if ( urlPatterns . length == 0 ) urlPatterns = webFilter . urlPatterns ( ) ; if ( urlPatterns . length == 0 ) throw new IllegalArgumentException ( String . format ( "Missing pattern mapping for '%s'" , filterClass . getName ( ) ) ) ; for ( String urlPattern : urlPatterns ) { registerFilter ( filterClass , urlPattern ) ; } return ( SC ) this ; |
public class FlowPreparer { /** * Update last modified time of the file if it exists .
* @ param path path to the target file */
@ VisibleForTesting void updateLastModifiedTime ( final Path path ) { } } | try { Files . setLastModifiedTime ( path , FileTime . fromMillis ( System . currentTimeMillis ( ) ) ) ; } catch ( final IOException ex ) { log . warn ( "Error when updating last modified time for {}" , path , ex ) ; } |
public class CmsToolManager { /** * Called by the < code > { @ link org . opencms . workplace . CmsWorkplaceManager # initialize ( CmsObject ) } < / code > method . < p >
* @ param cms the admin cms context */
public void configure ( CmsObject cms ) { } } | if ( CmsLog . INIT . isInfoEnabled ( ) ) { CmsLog . INIT . info ( Messages . get ( ) . getBundle ( ) . key ( Messages . INIT_TOOLMANAGER_CREATED_0 ) ) ; } if ( m_roots . getObject ( ROOTKEY_DEFAULT ) == null ) { CmsToolRootHandler defToolRoot = new CmsToolRootHandler ( ) ; defToolRoot . setKey ( ROOTKEY_DEFAULT ) ; defToolRoot . setUri ( CmsWorkplace . PATH_WORKPLACE + "admin/" ) ; defToolRoot . setName ( "${key." + Messages . GUI_ADMIN_VIEW_ROOT_NAME_0 + "}" ) ; defToolRoot . setHelpText ( "${key." + Messages . GUI_ADMIN_VIEW_ROOT_HELP_0 + "}" ) ; addToolRoot ( defToolRoot ) ; } m_tools . clear ( ) ; m_urls . clear ( ) ; Iterator < CmsToolRootHandler > it = getToolRoots ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { CmsToolRootHandler toolRoot = it . next ( ) ; if ( ! cms . existsResource ( toolRoot . getUri ( ) ) ) { if ( CmsLog . INIT . isInfoEnabled ( ) ) { CmsLog . INIT . info ( Messages . get ( ) . getBundle ( ) . key ( Messages . INIT_TOOLMANAGER_ROOT_SKIPPED_2 , toolRoot . getKey ( ) , toolRoot . getUri ( ) ) ) ; } continue ; } try { toolRoot . setup ( cms , null , toolRoot . getUri ( ) ) ; configureToolRoot ( cms , toolRoot ) ; // log info
if ( CmsLog . INIT . isInfoEnabled ( ) ) { CmsLog . INIT . info ( Messages . get ( ) . getBundle ( ) . key ( Messages . INIT_TOOLMANAGER_SETUP_1 , toolRoot . getKey ( ) ) ) ; } } catch ( CmsException e ) { // log failure
if ( CmsLog . INIT . isWarnEnabled ( ) ) { CmsLog . INIT . warn ( Messages . get ( ) . getBundle ( ) . key ( Messages . INIT_TOOLMANAGER_SETUP_ERROR_1 , toolRoot . getKey ( ) ) , e ) ; } } } |
public class StringArrayWrapper { /** * This method is used for unit test purposes to simulate the creation of a
* StringArrayWrapper whose destinations are all on the local bus .
* The individual strings of the first parameter may be of the following form ;
* destName - just the destination name to be used .
* destName : bus - destination name and associated bus name .
* @ param data The producer destination and elements up to but not including
* the big destination .
* @ param bigDestName The name of the ' big ' destination that the message will
* end up at .
* @ throws JMSException */
public static StringArrayWrapper create ( String [ ] data , String bigDestName ) throws JMSException { } } | int size = 0 ; if ( data != null ) size = data . length ; List fakedFullMsgPath = new ArrayList ( size + 1 ) ; if ( size > 0 ) { for ( int i = 0 ; i < size ; i ++ ) { // Create the appropriate List element .
String destName = data [ i ] ; String busName = null ; SIDestinationAddress sida ; // If this is of the form dest : bus
if ( destName . indexOf ( BUS_SEPARATOR ) != - 1 ) { busName = destName . substring ( destName . indexOf ( BUS_SEPARATOR ) + 1 ) ; destName = destName . substring ( 0 , destName . indexOf ( BUS_SEPARATOR ) ) ; } try { sida = JmsServiceFacade . getSIDestinationAddressFactory ( ) . createSIDestinationAddress ( destName , busName ) ; fakedFullMsgPath . add ( sida ) ; } catch ( Exception e ) { // No FFDC code needed
// This makes it the responsibility of the calling function to handle this
// problem . Note that the StringArrayWrapper is used only to handle forward
// and reverse routing paths , which are not supported function so this
// code should never be driven in normal product operation .
JMSException jmse = new JMSException ( e . getMessage ( ) ) ; jmse . setLinkedException ( e ) ; jmse . initCause ( e ) ; } // try
} // for
if ( bigDestName != null ) { // Make sure we add the real destination on the end of the msg FRP .
try { SIDestinationAddress sida = ( ( SIDestinationAddressFactory ) JmsServiceFacade . getSIDestinationAddressFactory ( ) ) . createSIDestinationAddress ( bigDestName , null ) ; fakedFullMsgPath . add ( sida ) ; } catch ( Exception e ) { // No FFDC code needed
// This makes it the responsibility of the calling function to handle this
// problem . Note that the StringArrayWrapper is used only to handle forward
// and reverse routing paths , which are not supported function so this
// code should never be driven in normal product operation .
JMSException jmse = new JMSException ( e . getMessage ( ) ) ; jmse . setLinkedException ( e ) ; jmse . initCause ( e ) ; } // try
} // if
} // if size > 0
StringArrayWrapper newSAW = new StringArrayWrapper ( fakedFullMsgPath ) ; return newSAW ; |
public class NodeSetDTM { /** * Return the last fetched node . Needed to support the UnionPathIterator .
* @ return the last fetched node .
* @ throws RuntimeException thrown if this NodeSetDTM is not of
* a cached type , and thus doesn ' t permit indexed access . */
public int getCurrentNode ( ) { } } | if ( ! m_cacheNodes ) throw new RuntimeException ( "This NodeSetDTM can not do indexing or counting functions!" ) ; int saved = m_next ; // because nextNode always increments
// But watch out for copy29 , where the root iterator didn ' t
// have nextNode called on it .
int current = ( m_next > 0 ) ? m_next - 1 : m_next ; int n = ( current < m_firstFree ) ? elementAt ( current ) : DTM . NULL ; m_next = saved ; // HACK : I think this is a bit of a hack . - sb
return n ; |
public class PollForActivityTaskRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( PollForActivityTaskRequest pollForActivityTaskRequest , ProtocolMarshaller protocolMarshaller ) { } } | if ( pollForActivityTaskRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( pollForActivityTaskRequest . getDomain ( ) , DOMAIN_BINDING ) ; protocolMarshaller . marshall ( pollForActivityTaskRequest . getTaskList ( ) , TASKLIST_BINDING ) ; protocolMarshaller . marshall ( pollForActivityTaskRequest . getIdentity ( ) , IDENTITY_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class TypeConversion { /** * A utility method to convert an int into bytes in an array .
* @ param value
* An int .
* @ param bytes
* The byte array to which the int should be copied .
* @ param offset
* The index where the int should start . */
public static void intToBytes ( int value , byte [ ] bytes , int offset ) { } } | bytes [ offset + 3 ] = ( byte ) ( value >>> 0 ) ; bytes [ offset + 2 ] = ( byte ) ( value >>> 8 ) ; bytes [ offset + 1 ] = ( byte ) ( value >>> 16 ) ; bytes [ offset + 0 ] = ( byte ) ( value >>> 24 ) ; |
public class Signer { /** * Returns a parsed Date .
* @ param date a date in the AWS format yyyyMMdd ' T ' HHmmss ' Z '
* @ return a date */
public static Date parseAWSDate ( String date ) { } } | if ( date == null ) { return null ; } return TIME_FORMATTER . parseDateTime ( date ) . toDate ( ) ; |
public class SameDiff { /** * Convert the specified variables to VARIABLE type SDVariables . < br >
* This can only be done for constants and placeholders , not ARRAY type variables ( which are usually network activations ) .
* As variables , this variable will modified during any subsequent training . < br >
* See also : { @ link VariableType } */
public void convertToVariables ( @ NonNull List < SDVariable > constants ) { } } | if ( constants . size ( ) == 0 ) return ; boolean allConst = true ; for ( SDVariable variable : constants ) { if ( variable . getVariableType ( ) != VariableType . VARIABLE ) { allConst = false ; } Preconditions . checkState ( variable . getVariableType ( ) != VariableType . ARRAY , "Cannot convert variable of type ARRAY to a variable: %s" , variable ) ; } if ( allConst ) { return ; // No op
} // Remove all sessions in case they have any cached arrays / state
sessions . clear ( ) ; // If gradient function has been defined , remove it ( so it will be recreated later )
sameDiffFunctionInstances . remove ( "grad" ) ; for ( SDVariable variable : constants ) { String n = variable . getVarName ( ) ; INDArray arr = variable . getArr ( ) ; Preconditions . checkNotNull ( arr , "Could not get array for variable %s: if this is a placeholder, use SDVariable.setArray before converting" , variable ) ; variablesArrays . put ( n , new DeviceLocalNDArray ( arr ) ) ; constantArrays . remove ( n ) ; if ( ! placeholdersPerThread . isEmpty ( ) ) { for ( Map < String , INDArray > m : placeholdersPerThread . values ( ) ) { m . remove ( n ) ; } } variable . setVariableType ( VariableType . VARIABLE ) ; } // For training : need to add new updater state
if ( trainingConfig != null ) { List < String > newTrainableParams = new ArrayList < > ( trainingConfig . getTrainableParams ( ) ) ; List < String > convertedToVars = new ArrayList < > ( ) ; for ( SDVariable v : constants ) { newTrainableParams . add ( v . getVarName ( ) ) ; convertedToVars . add ( v . getVarName ( ) ) ; } trainingConfig . setTrainableParams ( newTrainableParams ) ; // Add updater state for this variable : updaterState , updaterViews , updaterMap
if ( initializedTraining ) { long extraStateSize = 0 ; for ( String s : convertedToVars ) { INDArray arr = getVariable ( s ) . getArr ( ) ; long stateSize = trainingConfig . getUpdater ( ) . stateSize ( arr . length ( ) ) ; extraStateSize += stateSize ; } if ( extraStateSize > 0 ) { INDArray newState = Nd4j . createUninitialized ( updaterState . dataType ( ) , 1 , extraStateSize ) ; updaterState = ( updaterState == null ? newState : Nd4j . concat ( 1 , updaterState , newState ) ) ; // Now , update updaterViews map :
long viewSoFar = 0 ; updaterViews = new HashMap < > ( ) ; updaterMap = new HashMap < > ( ) ; for ( String s : trainingConfig . getTrainableParams ( ) ) { long thisSize = trainingConfig . getUpdater ( ) . stateSize ( this . variables . get ( s ) . getVariable ( ) . getArr ( ) . length ( ) ) ; INDArray view = ( updaterState == null || thisSize == 0 ? null : updaterState . get ( NDArrayIndex . interval ( 0 , 1 ) , NDArrayIndex . interval ( viewSoFar , viewSoFar + thisSize ) ) ) ; updaterViews . put ( s , view ) ; boolean init = convertedToVars . contains ( s ) ; // Only initialize / zero the states for the new variables
updaterMap . put ( s , trainingConfig . getUpdater ( ) . instantiate ( view , init ) ) ; viewSoFar += thisSize ; } } } } |
public class AmazonRedshiftClient { /** * Allows you to purchase reserved nodes . Amazon Redshift offers a predefined set of reserved node offerings . You
* can purchase one or more of the offerings . You can call the < a > DescribeReservedNodeOfferings < / a > API to obtain
* the available reserved node offerings . You can call this API by providing a specific reserved node offering and
* the number of nodes you want to reserve .
* For more information about reserved node offerings , go to < a
* href = " https : / / docs . aws . amazon . com / redshift / latest / mgmt / purchase - reserved - node - instance . html " > Purchasing Reserved
* Nodes < / a > in the < i > Amazon Redshift Cluster Management Guide < / i > .
* @ param purchaseReservedNodeOfferingRequest
* @ return Result of the PurchaseReservedNodeOffering operation returned by the service .
* @ throws ReservedNodeOfferingNotFoundException
* Specified offering does not exist .
* @ throws ReservedNodeAlreadyExistsException
* User already has a reservation with the given identifier .
* @ throws ReservedNodeQuotaExceededException
* Request would exceed the user ' s compute node quota . For information about increasing your quota , go to < a
* href = " https : / / docs . aws . amazon . com / redshift / latest / mgmt / amazon - redshift - limits . html " > Limits in Amazon
* Redshift < / a > in the < i > Amazon Redshift Cluster Management Guide < / i > .
* @ throws UnsupportedOperationException
* The requested operation isn ' t supported .
* @ sample AmazonRedshift . PurchaseReservedNodeOffering
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / redshift - 2012-12-01 / PurchaseReservedNodeOffering "
* target = " _ top " > AWS API Documentation < / a > */
@ Override public ReservedNode purchaseReservedNodeOffering ( PurchaseReservedNodeOfferingRequest request ) { } } | request = beforeClientExecution ( request ) ; return executePurchaseReservedNodeOffering ( request ) ; |
public class NavigationHandlerImpl { /** * Performs the algorithm specified in 7.4.2 for situations where no navigation cases are defined and instead
* the navigation case is to be determined from the outcome .
* TODO : cache results ? */
private NavigationCase getOutcomeNavigationCase ( FacesContext facesContext , String fromAction , String outcome ) { } } | String implicitViewId = null ; boolean includeViewParams = false ; int index ; boolean isRedirect = false ; String queryString = null ; NavigationCase result = null ; String viewId = facesContext . getViewRoot ( ) != null ? facesContext . getViewRoot ( ) . getViewId ( ) : null ; // String viewIdToTest = outcome ;
StringBuilder viewIdToTest = SharedStringBuilder . get ( facesContext , OUTCOME_NAVIGATION_SB ) ; viewIdToTest . append ( outcome ) ; // If viewIdToTest contains a query string , remove it and set queryString with that value .
index = viewIdToTest . indexOf ( "?" ) ; if ( index != - 1 ) { queryString = viewIdToTest . substring ( index + 1 ) ; // viewIdToTest = viewIdToTest . substring ( 0 , index ) ;
viewIdToTest . setLength ( index ) ; // If queryString contains " faces - redirect = true " , set isRedirect to true .
if ( queryString . indexOf ( "faces-redirect=true" ) != - 1 ) { isRedirect = true ; } // If queryString contains " includeViewParams = true " or
// " faces - include - view - params = true " , set includeViewParams to true .
if ( queryString . indexOf ( "includeViewParams=true" ) != - 1 || queryString . indexOf ( "faces-include-view-params=true" ) != - 1 ) { includeViewParams = true ; } } // If viewIdToTest does not have a " file extension " , use the one from the current viewId .
index = viewIdToTest . indexOf ( "." ) ; if ( index == - 1 ) { if ( viewId != null ) { index = viewId . lastIndexOf ( "." ) ; if ( index != - 1 ) { // viewIdToTest + = viewId . substring ( index ) ;
viewIdToTest . append ( viewId . substring ( index ) ) ; } } else { // This case happens when for for example there is a ViewExpiredException ,
// and a custom ExceptionHandler try to navigate using implicit navigation .
// In this case , there is no UIViewRoot set on the FacesContext , so viewId
// is null .
// In this case , it should try to derive the viewId of the view that was
// not able to restore , to get the extension and apply it to
// the implicit navigation .
String tempViewId = getNavigationHandlerSupport ( ) . calculateViewId ( facesContext ) ; if ( tempViewId != null ) { index = tempViewId . lastIndexOf ( "." ) ; if ( index != - 1 ) { viewIdToTest . append ( tempViewId . substring ( index ) ) ; } } } if ( log . isLoggable ( Level . FINEST ) ) { log . finest ( "getOutcomeNavigationCase -> viewIdToTest: " + viewIdToTest ) ; } } // If viewIdToTest does not start with " / " , look for the last " / " in viewId . If not found , simply prepend " / " .
// Otherwise , prepend everything before and including the last " / " in viewId .
// if ( ! viewIdToTest . startsWith ( " / " ) & & viewId ! = null )
boolean startWithSlash = false ; if ( viewIdToTest . length ( ) > 0 ) { startWithSlash = ( viewIdToTest . charAt ( 0 ) == '/' ) ; } if ( ! startWithSlash ) { index = - 1 ; if ( viewId != null ) { index = viewId . lastIndexOf ( "/" ) ; } if ( index == - 1 ) { // viewIdToTest = " / " + viewIdToTest ;
viewIdToTest . insert ( 0 , "/" ) ; } else { // viewIdToTest = viewId . substring ( 0 , index + 1 ) + viewIdToTest ;
viewIdToTest . insert ( 0 , viewId , 0 , index + 1 ) ; } } // Apply normalization
String viewIdToTestString = null ; boolean applyNormalization = false ; for ( int i = 0 ; i < viewIdToTest . length ( ) - 1 ; i ++ ) { if ( viewIdToTest . charAt ( i ) == '.' && viewIdToTest . charAt ( i + 1 ) == '/' ) { applyNormalization = true ; break ; } } if ( applyNormalization ) { viewIdToTestString = FilenameUtils . normalize ( viewIdToTest . toString ( ) , true ) ; } else { viewIdToTestString = viewIdToTest . toString ( ) ; } // Call ViewHandler . deriveViewId ( ) and set the result as implicitViewId .
try { implicitViewId = facesContext . getApplication ( ) . getViewHandler ( ) . deriveViewId ( facesContext , viewIdToTestString ) ; } catch ( UnsupportedOperationException e ) { // This is the case when a pre - JSF 2.0 ViewHandler is used .
// In this case , the default algorithm must be used .
// FIXME : I think we ' re always calling the " default " ViewHandler . deriveViewId ( ) algorithm and we don ' t
// distinguish between pre - JSF 2.0 and JSF 2.0 ViewHandlers . This probably needs to be addressed .
} if ( implicitViewId != null ) { // Append all params from the queryString
// ( excluding faces - redirect , includeViewParams and faces - include - view - params )
Map < String , List < String > > params = null ; if ( queryString != null && ! "" . equals ( queryString ) ) { // String [ ] splitQueryParams = queryString . split ( " & ( amp ; ) ? " ) ; / / " & " or " & amp ; "
String [ ] splitQueryParams = AMP_PATTERN . split ( queryString ) ; // " & " or " & amp ; "
params = new HashMap < String , List < String > > ( splitQueryParams . length , ( splitQueryParams . length * 4 + 3 ) / 3 ) ; for ( String queryParam : splitQueryParams ) { String [ ] splitParam = StringUtils . splitShortString ( queryParam , '=' ) ; if ( splitParam . length == 2 ) { // valid parameter - add it to params
if ( "includeViewParams" . equals ( splitParam [ 0 ] ) || "faces-include-view-params" . equals ( splitParam [ 0 ] ) || "faces-redirect" . equals ( splitParam [ 0 ] ) ) { // ignore includeViewParams , faces - include - view - params and faces - redirect
continue ; } List < String > paramValues = params . get ( splitParam [ 0 ] ) ; if ( paramValues == null ) { // no value for the given parameter yet
paramValues = new ArrayList < String > ( ) ; params . put ( splitParam [ 0 ] , paramValues ) ; } paramValues . add ( splitParam [ 1 ] ) ; } else { // invalid parameter
throw new FacesException ( "Invalid parameter \"" + queryParam + "\" in outcome " + outcome ) ; } } } // Finally , create the NavigationCase .
result = new NavigationCase ( viewId , fromAction , outcome , null , implicitViewId , params , isRedirect , includeViewParams ) ; } return result ; |
public class Spliterators { /** * Creates a { @ code Spliterator } covering a range of elements of a given
* array , using a customized set of spliterator characteristics .
* < p > This method is provided as an implementation convenience for
* Spliterators which store portions of their elements in arrays , and need
* fine control over Spliterator characteristics . Most other situations in
* which a Spliterator for an array is needed should use
* { @ link Arrays # spliterator ( Object [ ] ) } .
* < p > The returned spliterator always reports the characteristics
* { @ code SIZED } and { @ code SUBSIZED } . The caller may provide additional
* characteristics for the spliterator to report ; it is common to
* additionally specify { @ code IMMUTABLE } and { @ code ORDERED } .
* @ param < T > Type of elements
* @ param array The array , assumed to be unmodified during use
* @ param fromIndex The least index ( inclusive ) to cover
* @ param toIndex One past the greatest index to cover
* @ param additionalCharacteristics Additional spliterator characteristics
* of this spliterator ' s source or elements beyond { @ code SIZED } and
* { @ code SUBSIZED } which are are always reported
* @ return A spliterator for an array
* @ throws NullPointerException if the given array is { @ code null }
* @ throws ArrayIndexOutOfBoundsException if { @ code fromIndex } is negative ,
* { @ code toIndex } is less than { @ code fromIndex } , or
* { @ code toIndex } is greater than the array size
* @ see Arrays # spliterator ( Object [ ] , int , int ) */
public static < T > Spliterator < T > spliterator ( Object [ ] array , int fromIndex , int toIndex , int additionalCharacteristics ) { } } | checkFromToBounds ( Objects . requireNonNull ( array ) . length , fromIndex , toIndex ) ; return new ArraySpliterator < > ( array , fromIndex , toIndex , additionalCharacteristics ) ; |
public class CssScanner { /** * HASHNAME " # " { name } name { nmchar } + [ _ a - z0-9 - ] | { nonascii } | { escape }
* @ throws CssException */
private void _hashname ( ) throws IOException , CssException { } } | if ( debug ) { checkState ( reader . curChar == '#' ) ; checkState ( NMCHAR . matches ( ( char ) reader . peek ( ) ) || isNextEscape ( ) ) ; } builder . type = Type . HASHNAME ; builder . append ( '#' ) ; append ( NMCHAR ) ; |
public class DevicesInner { /** * Gets all the data box edge / gateway devices in a resource group .
* @ param resourceGroupName The resource group name .
* @ param expand Specify $ expand = details to populate additional fields related to the resource or Specify $ skipToken = & lt ; token & gt ; to populate the next page in the list .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the PagedList & lt ; DataBoxEdgeDeviceInner & gt ; object */
public Observable < Page < DataBoxEdgeDeviceInner > > listByResourceGroupAsync ( final String resourceGroupName , final String expand ) { } } | return listByResourceGroupWithServiceResponseAsync ( resourceGroupName , expand ) . map ( new Func1 < ServiceResponse < Page < DataBoxEdgeDeviceInner > > , Page < DataBoxEdgeDeviceInner > > ( ) { @ Override public Page < DataBoxEdgeDeviceInner > call ( ServiceResponse < Page < DataBoxEdgeDeviceInner > > response ) { return response . body ( ) ; } } ) ; |
public class EvictionPolicyEvaluatorProvider { /** * Gets the { @ link EvictionPolicyEvaluator } implementation specified with { @ code evictionPolicy } .
* @ param evictionConfig { @ link EvictionConfiguration } for requested { @ link EvictionPolicyEvaluator } implementation
* @ param classLoader the { @ link java . lang . ClassLoader } to be used
* while creating custom { @ link EvictionPolicyComparator } if it is specified in the config
* @ return the requested { @ link EvictionPolicyEvaluator } implementation */
public static < A , E extends Evictable > EvictionPolicyEvaluator < A , E > getEvictionPolicyEvaluator ( EvictionConfiguration evictionConfig , ClassLoader classLoader ) { } } | checkNotNull ( evictionConfig ) ; EvictionPolicyComparator evictionPolicyComparator ; String evictionPolicyComparatorClassName = evictionConfig . getComparatorClassName ( ) ; if ( ! isNullOrEmpty ( evictionPolicyComparatorClassName ) ) { try { evictionPolicyComparator = ClassLoaderUtil . newInstance ( classLoader , evictionPolicyComparatorClassName ) ; } catch ( Exception e ) { throw rethrow ( e ) ; } } else { EvictionPolicyComparator comparator = evictionConfig . getComparator ( ) ; if ( comparator != null ) { evictionPolicyComparator = comparator ; } else { evictionPolicyComparator = createEvictionPolicyComparator ( evictionConfig . getEvictionPolicy ( ) ) ; } } return new EvictionPolicyEvaluator < A , E > ( evictionPolicyComparator ) ; |
public class ProcessAnnotatedTypeImpl { /** * Call this method after all observer methods of this event have been invoked to get the final value of this { @ link AnnotatedType } .
* @ return the resulting annotated type */
public SlimAnnotatedType < X > getResultingAnnotatedType ( ) { } } | if ( isDirty ( ) ) { return ClassTransformer . instance ( manager ) . getUnbackedAnnotatedType ( originalAnnotatedType , annotatedType ) ; } else { return originalAnnotatedType ; } |
public class FTPClient { /** * Performs third - party transfer between two servers .
* @ param remoteSrcFile source filename
* @ param destination another client connected to destination server
* @ param remoteDstFile destination filename
* @ param append enables append mode ; if true ,
* data will be appened to the remote file , otherwise
* file will be overwritten .
* @ param mListener marker listener .
* Can be set to null . */
public void transfer ( String remoteSrcFile , FTPClient destination , String remoteDstFile , boolean append , MarkerListener mListener ) throws IOException , ServerException , ClientException { } } | session . matches ( destination . session ) ; // if transfer modes have not been defined ,
// set this ( source ) as active
if ( session . serverMode == Session . SERVER_DEFAULT ) { HostPort hp = destination . setPassive ( ) ; setActive ( hp ) ; } destination . controlChannel . write ( new Command ( ( append ) ? "APPE" : "STOR" , remoteDstFile ) ) ; controlChannel . write ( new Command ( "RETR" , remoteSrcFile ) ) ; transferRunSingleThread ( destination . controlChannel , mListener ) ; |
public class UcsApi { /** * Search for contacts . If ‘ sortCriteria ’ or ‘ startIndex ’ is specified , the query is based on SQL , otherwise on Lucene
* @ param luceneSearchData ( required )
* @ return ApiResponse & lt ; ApiSuccessResponse & gt ;
* @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */
public ApiResponse < ApiSuccessResponse > searchContactsWithHttpInfo ( LuceneSearchData luceneSearchData ) throws ApiException { } } | com . squareup . okhttp . Call call = searchContactsValidateBeforeCall ( luceneSearchData , null , null ) ; Type localVarReturnType = new TypeToken < ApiSuccessResponse > ( ) { } . getType ( ) ; return apiClient . execute ( call , localVarReturnType ) ; |
public class RefocusNotifier { /** * Sends an Refocus sample .
* @ param aspectPath The Refocus aspect path .
* @ param fired If the trigger is fired or not . */
private void sendMessage ( String aspectPath , boolean fired ) { } } | if ( Boolean . valueOf ( _config . getValue ( SystemConfiguration . Property . REFOCUS_ENABLED ) ) ) { int refreshMaxTimes = Integer . parseInt ( _config . getValue ( Property . REFOCUS_CONNECTION_REFRESH_MAX_TIMES . getName ( ) , Property . REFOCUS_CONNECTION_REFRESH_MAX_TIMES . getDefaultValue ( ) ) ) ; try { // TODO : get customer specified refocus sample values when UI is ready , currently use ' 1 ' for active trigger and ' 0 ' for non - active trigger
RefocusSample refocusSample = new RefocusSample ( aspectPath , fired ? "1" : "0" ) ; RefocusTransport refocusTransport = RefocusTransport . getInstance ( ) ; HttpClient httpclient = refocusTransport . getHttpClient ( _config ) ; PostMethod post = null ; try { post = new PostMethod ( String . format ( "%s/v1/samples/upsert" , endpoint ) ) ; post . setRequestHeader ( "Authorization" , token ) ; post . setRequestEntity ( new StringRequestEntity ( refocusSample . toJSON ( ) , "application/json" , null ) ) ; for ( int i = 0 ; i < 1 + refreshMaxTimes ; i ++ ) { int respCode = httpclient . executeMethod ( post ) ; // Check for success
if ( respCode == 200 || respCode == 201 || respCode == 204 ) { _logger . info ( "Success - send Refocus sample '{}'." , refocusSample . toJSON ( ) ) ; break ; } else if ( respCode == 401 ) { // Indication that the session timedout , Need to refresh and retry
continue ; } else { _logger . error ( "Failure - send Refocus sample '{}'. Response code '{}' response '{}'" , refocusSample . toJSON ( ) , respCode , post . getResponseBodyAsString ( ) ) ; break ; } } } catch ( Exception e ) { _logger . error ( "Failure - send Refocus sample '{}'. Exception '{}'" , refocusSample . toJSON ( ) , e ) ; } finally { if ( post != null ) { post . releaseConnection ( ) ; } } } catch ( RuntimeException ex ) { throw new SystemException ( "Failed to send an Refocus notification." , ex ) ; } } else { _logger . info ( "Sending Refocus notification is disabled. Not sending message for aspect '{}'." , aspectPath ) ; } |
public class Task { /** * Runs a continuation when a task completes successfully , forwarding along
* { @ link java . lang . Exception } s or cancellation . */
public < TContinuationResult > Task < TContinuationResult > onSuccessTask ( final Continuation < TResult , Task < TContinuationResult > > continuation , Executor executor , final CancellationToken ct ) { } } | return continueWithTask ( new Continuation < TResult , Task < TContinuationResult > > ( ) { @ Override public Task < TContinuationResult > then ( Task < TResult > task ) { if ( ct != null && ct . isCancellationRequested ( ) ) { return Task . cancelled ( ) ; } if ( task . isFaulted ( ) ) { return Task . forError ( task . getError ( ) ) ; } else if ( task . isCancelled ( ) ) { return Task . cancelled ( ) ; } else { return task . continueWithTask ( continuation ) ; } } } , executor ) ; |
public class TaxinvoiceServiceImp { /** * / * ( non - Javadoc )
* @ see com . popbill . api . TaxinvoiceService # getInfo ( java . lang . String , com . popbill . api . taxinvoice . MgtKeyType , java . lang . String ) */
@ Override public TaxinvoiceInfo getInfo ( String CorpNum , MgtKeyType KeyType , String MgtKey ) throws PopbillException { } } | if ( KeyType == null ) throw new PopbillException ( - 99999999 , "관리번호형태가 입력되지 않았습니다." ) ; if ( MgtKey == null || MgtKey . isEmpty ( ) ) throw new PopbillException ( - 99999999 , "관리번호가 입력되지 않았습니다." ) ; return httpget ( "/Taxinvoice/" + KeyType . name ( ) + "/" + MgtKey , CorpNum , null , TaxinvoiceInfo . class ) ; |
public class Actor { /** * < p > INTERNAL API < / p >
* Initialization of actor
* @ param path path of actor
* @ param context context of actor
* @ param mailbox mailbox of actor */
public final void initActor ( String path , ActorContext context , Mailbox mailbox ) { } } | this . path = path ; this . context = context ; this . mailbox = mailbox ; |
public class IncrementalDFADAGBuilder { /** * Creates a suffix state sequence , i . e . , a linear sequence of states connected by transitions labeled by the
* letters of the given suffix word .
* @ param suffix
* the suffix word
* @ param acc
* the acceptance status of the final state
* @ return the first state in the sequence */
private State createSuffix ( Word < ? extends I > suffix , Acceptance acc ) { } } | StateSignature sig = new StateSignature ( alphabetSize , acc ) ; sig . updateHashCode ( ) ; State last = replaceOrRegister ( sig ) ; int len = suffix . length ( ) ; for ( int i = len - 1 ; i >= 0 ; i -- ) { sig = new StateSignature ( alphabetSize , Acceptance . DONT_KNOW ) ; I sym = suffix . getSymbol ( i ) ; int idx = inputAlphabet . getSymbolIndex ( sym ) ; sig . successors . array [ idx ] = last ; sig . updateHashCode ( ) ; last = replaceOrRegister ( sig ) ; } return last ; |
public class Library { /** * Sets the locale for the complete Logdoc library .
* @ param newLocale
* the new locale , cannot be < code > null < / code > .
* @ throws UnsupportedLocaleException
* if the specified locale is not supported by < em > all < / em > registered < code > Log < / code >
* classes . */
public static synchronized void setLocale ( String newLocale ) throws UnsupportedLocaleException { } } | Preconditions . checkArgument ( newLocale == null , "newLocale == null" ) ; if ( ! newLocale . equals ( CURRENT_LOCALE ) ) { LogCentral . setLocale ( newLocale ) ; CURRENT_LOCALE = newLocale ; } |
public class Input { /** * One of two methods to set layer input values . This one is for raw double data , e . g . for scoring
* @ param seed For seeding the RNG inside ( for input dropout )
* @ param data Data ( training columns and responses ) to extract the training columns
* from to be mapped into the input neuron layer */
public void setInput ( long seed , final double [ ] data ) { } } | // Log . info ( " Data : " + ArrayUtils . toString ( data ) ) ;
assert ( _dinfo != null ) ; double [ ] nums = MemoryManager . malloc8d ( _dinfo . _nums ) ; // a bit wasteful - reallocated each time
int [ ] cats = MemoryManager . malloc4 ( _dinfo . _cats ) ; // a bit wasteful - reallocated each time
int i = 0 , ncats = 0 ; for ( ; i < _dinfo . _cats ; ++ i ) { // This can occur when testing data has categorical levels that are not part of training ( or if there ' s a missing value )
if ( Double . isNaN ( data [ i ] ) ) { if ( _dinfo . _catMissing [ i ] != 0 ) cats [ ncats ++ ] = ( _dinfo . _catOffsets [ i + 1 ] - 1 ) ; // use the extra level made during training
else { if ( ! _dinfo . _useAllFactorLevels ) throw new IllegalArgumentException ( "Model was built without missing categorical factors in column " + _dinfo . coefNames ( ) [ i ] + ", but found unknown (or missing) categorical factors during scoring." + "\nThe model needs to be built with use_all_factor_levels=true for this to work." ) ; // else just leave all activations at 0 , and since all factor levels were enabled ,
// this is OK ( missing or new categorical doesn ' t activate any levels seen during training )
} } else { int c = ( int ) data [ i ] ; if ( _dinfo . _useAllFactorLevels ) cats [ ncats ++ ] = c + _dinfo . _catOffsets [ i ] ; else if ( c != 0 ) cats [ ncats ++ ] = c + _dinfo . _catOffsets [ i ] - 1 ; } } final int n = data . length ; // data contains only input features - no response is included
for ( ; i < n ; ++ i ) { double d = data [ i ] ; if ( _dinfo . _normMul != null ) d = ( d - _dinfo . _normSub [ i - _dinfo . _cats ] ) * _dinfo . _normMul [ i - _dinfo . _cats ] ; nums [ i - _dinfo . _cats ] = d ; // can be NaN for missing numerical data
} setInput ( seed , nums , ncats , cats ) ; |
public class Matrix { /** * Set a submatrix .
* @ param i0 Initial row index
* @ param i1 Final row index
* @ param c Array of column indices .
* @ param X A ( i0 : i1 , c ( : ) )
* @ throws ArrayIndexOutOfBoundsException Submatrix indices */
public void setMatrix ( int i0 , int i1 , int [ ] c , Matrix X ) { } } | try { for ( int i = i0 ; i <= i1 ; i ++ ) { for ( int j = 0 ; j < c . length ; j ++ ) { A [ i ] [ c [ j ] ] = X . get ( i - i0 , j ) ; } } } catch ( ArrayIndexOutOfBoundsException e ) { throw new ArrayIndexOutOfBoundsException ( "Submatrix indices" ) ; } |
public class Duration { @ Override public List < TemporalUnit > getUnits ( ) { } } | return Collections . < TemporalUnit > unmodifiableList ( Arrays . asList ( SECONDS , NANOS ) ) ; |
public class NetUtils { /** * Get local Ip address . */
public static InetAddress getLocalIPAddress ( ) { } } | Enumeration < NetworkInterface > enumeration = null ; try { enumeration = NetworkInterface . getNetworkInterfaces ( ) ; } catch ( SocketException e ) { e . printStackTrace ( ) ; } if ( enumeration != null ) { while ( enumeration . hasMoreElements ( ) ) { NetworkInterface nif = enumeration . nextElement ( ) ; Enumeration < InetAddress > inetAddresses = nif . getInetAddresses ( ) ; if ( inetAddresses != null ) { while ( inetAddresses . hasMoreElements ( ) ) { InetAddress inetAddress = inetAddresses . nextElement ( ) ; if ( ! inetAddress . isLoopbackAddress ( ) && isIPv4Address ( inetAddress . getHostAddress ( ) ) ) { return inetAddress ; } } } } } return null ; |
public class SolrIndexer { /** * Initialize the plugin
* @ param jsonString
* The JSON configuration to use as a string
* @ throws IndexerException
* if errors occur during initialization */
@ Override public void init ( String jsonString ) throws IndexerException { } } | try { config = new JsonSimpleConfig ( jsonString ) ; init ( ) ; } catch ( IOException e ) { throw new IndexerException ( e ) ; } |
public class SnippetMaps { /** * Compute the set of imports to prepend to a snippet
* @ return a stream of the import needed */
private Stream < ImportSnippet > importSnippets ( ) { } } | return state . keyMap . importKeys ( ) . map ( key -> ( ImportSnippet ) getSnippet ( key ) ) . filter ( sn -> sn != null && state . status ( sn ) . isDefined ( ) ) ; |
public class BomParser { /** * Verifies a CycloneDX BoM conforms to the specification through XML validation .
* @ param file the CycloneDX BoM file to validate
* @ param schemaVersion the schema version to validate against
* @ return a List of SAXParseExceptions . If the size of the list is 0 , validation was successful
* @ since 2.0.0 */
public List < SAXParseException > validate ( File file , CycloneDxSchema . Version schemaVersion ) { } } | final Source xmlFile = new StreamSource ( file ) ; final List < SAXParseException > exceptions = new LinkedList < > ( ) ; try { final Schema schema = getXmlSchema ( schemaVersion ) ; final Validator validator = schema . newValidator ( ) ; validator . setErrorHandler ( new ErrorHandler ( ) { @ Override public void warning ( SAXParseException exception ) { exceptions . add ( exception ) ; } @ Override public void fatalError ( SAXParseException exception ) { exceptions . add ( exception ) ; } @ Override public void error ( SAXParseException exception ) { exceptions . add ( exception ) ; } } ) ; validator . validate ( xmlFile ) ; } catch ( IOException | SAXException e ) { // throw it away
} return exceptions ; |
public class AbstractLimitHandler { /** * Some dialect - specific LIMIT clauses require the maximum last row number ( aka , first _ row _ number + total _ row _ count ) ,
* while others require the maximum returned row count ( the total maximum number of rows to return ) .
* @ param selection the selection criteria for rows .
* @ return The appropriate value to bind into the limit clause . */
protected final int getMaxOrLimit ( RowSelection selection ) { } } | final int firstRow = convertToFirstRowValue ( LimitHelper . getFirstRow ( selection ) ) ; final int lastRow = selection . getMaxRows ( ) ; return useMaxForLimit ( ) ? lastRow + firstRow : lastRow ; |
public class AbstractConfigurableRemote { /** * { @ inheritDoc }
* @ param config { @ inheritDoc }
* @ return { @ inheritDoc }
* @ throws CouldNotPerformException { @ inheritDoc }
* @ throws InterruptedException { @ inheritDoc } */
@ Override public CONFIG applyConfigUpdate ( final CONFIG config ) throws CouldNotPerformException , InterruptedException { } } | synchronized ( CONFIG_LOCK ) { try { this . config = config ; configObservable . notifyObservers ( config ) ; // detect scope change if instance is already active and reinit if needed .
try { if ( isActive ( ) && ! currentScope . equals ( detectScope ( config ) ) ) { currentScope = detectScope ( ) ; reinit ( currentScope ) ; } } catch ( CouldNotPerformException ex ) { throw new CouldNotPerformException ( "Could not verify scope changes!" , ex ) ; } try { notifyConfigUpdate ( config ) ; } catch ( CouldNotPerformException ex ) { ExceptionPrinter . printHistory ( new CouldNotPerformException ( "Could not notify config update!" , ex ) , logger ) ; } return this . config ; } catch ( CouldNotPerformException ex ) { throw new CouldNotPerformException ( "Could not apply config update!" , ex ) ; } } |
public class LoggingAdvisingAppendable { /** * An implementation that only delegates { @ link # append } calls . This has the effect of coercing
* the content to a string by dropping all the strict content directives . */
public static LoggingAdvisingAppendable stringCoercing ( LoggingAdvisingAppendable delegate ) { } } | return new ForwardingLoggingAdvisingAppendable ( delegate ) { @ Override protected void notifyContentDirectionality ( @ Nullable Dir contentDir ) { } @ Override public LoggingAdvisingAppendable enterLoggableElement ( LogStatement statement ) { return this ; } @ Override public LoggingAdvisingAppendable exitLoggableElement ( ) { return this ; } @ Override public LoggingAdvisingAppendable appendLoggingFunctionInvocation ( LoggingFunctionInvocation funCall , ImmutableList < Function < String , String > > escapers ) throws IOException { return append ( escapePlaceholder ( funCall . placeholderValue ( ) , escapers ) ) ; } } ; |
public class Executors { /** * Returns a { @ link Callable } object that , when
* called , runs the given task and returns the given result . This
* can be useful when applying methods requiring a
* { @ code Callable } to an otherwise resultless action .
* @ param task the task to run
* @ param result the result to return
* @ param < T > the type of the result
* @ return a callable object
* @ throws NullPointerException if task null */
public static < T > Callable < T > callable ( Runnable task , T result ) { } } | if ( task == null ) throw new NullPointerException ( ) ; return new RunnableAdapter < T > ( task , result ) ; |
public class AbstractFax4JClientSpi { /** * This function polls the new statues for the provided fax jobs .
* @ param faxJobs
* The fax jobs to poll
* @ return The fax job statues */
public final FaxJobStatus [ ] pollForFaxJobStatues ( FaxJob [ ] faxJobs ) { } } | FaxJobStatus [ ] faxJobStatuses = null ; if ( ( faxJobs != null ) && ( faxJobs . length > 0 ) && ( this . isFaxMonitorEventsSupported ( ) ) ) { faxJobStatuses = this . pollForFaxJobStatuesImpl ( faxJobs ) ; } return faxJobStatuses ; |
public class ChecksumFileSystem { /** * Return true iff file is a checksum file name . */
public static boolean isChecksumFile ( Path file ) { } } | String name = file . getName ( ) ; return name . startsWith ( "." ) && name . endsWith ( ".crc" ) ; |
public class GenericCollectionTypeResolver { /** * Determine the generic element type of the given Collection field .
* @ param collectionField the collection field to introspect
* @ param nestingLevel the nesting level of the target type
* ( typically 1 ; e . g . in case of a List of Lists , 1 would indicate the
* nested List , whereas 2 would indicate the element of the nested List )
* @ param typeIndexesPerLevel Map keyed by nesting level , with each value
* expressing the type index for traversal at that level
* @ return the generic type , or { @ code null } if none
* @ deprecated as of 4.0 , in favor of using { @ link ResolvableType } for arbitrary nesting levels */
@ Deprecated public static Class < ? > getCollectionFieldType ( Field collectionField , int nestingLevel , Map < Integer , Integer > typeIndexesPerLevel ) { } } | return ResolvableType . forField ( collectionField ) . getNested ( nestingLevel , typeIndexesPerLevel ) . asCollection ( ) . resolveGeneric ( ) ; |
public class UserResource { /** * Change current users username .
* The username must be unique
* @ param usernameRequest new username
* @ return 200 if success
* 409 if username is not unique */
@ POST @ Path ( "me/username" ) @ RolesAllowed ( { } } | "ROLE_ADMIN" , "ROLE_USER" } ) public Response changeUsername ( @ Context HttpServletRequest request , UsernameRequest usernameRequest ) { Long userId = ( Long ) request . getAttribute ( OAuth2Filter . NAME_USER_ID ) ; return changeUsername ( userId , usernameRequest ) ; |
public class BsBadWordCB { public BadWordCB acceptPK ( String id ) { } } | assertObjectNotNull ( "id" , id ) ; BsBadWordCB cb = this ; cb . query ( ) . docMeta ( ) . setId_Equal ( id ) ; return ( BadWordCB ) this ; |
public class InetAddresses { /** * Returns the IPv4 address embedded in a 6to4 address .
* @ param ip { @ link Inet6Address } to be examined for embedded IPv4 in 6to4 address
* @ return { @ link Inet4Address } of embedded IPv4 in 6to4 address
* @ throws IllegalArgumentException if the argument is not a valid IPv6 6to4 address */
public static Inet4Address get6to4IPv4Address ( Inet6Address ip ) { } } | Preconditions . checkArgument ( is6to4Address ( ip ) , "Address '%s' is not a 6to4 address." , toAddrString ( ip ) ) ; return getInet4Address ( Arrays . copyOfRange ( ip . getAddress ( ) , 2 , 6 ) ) ; |
public class MultiPolylineMarkers { /** * Is it valid
* @ return */
public boolean isValid ( ) { } } | boolean valid = true ; for ( PolylineMarkers polyline : polylineMarkers ) { valid = polyline . isValid ( ) ; if ( ! valid ) { break ; } } return valid ; |
public class GsonMessageReaderWriterProvider { /** * Taken from com . fasterxml . jackson . jaxrs . json . JacksonJsonProvider */
protected boolean isJsonType ( MediaType mediaType ) { } } | /* * As suggested by Stephen D , there are 2 ways to check : either being as inclusive as possible ( if subtype is " json " ) , or exclusive
* ( major type " application " , minor type " json " ) . Let ' s start with inclusive one , hard to know which major types we should cover
* aside from " application " . */
if ( mediaType != null ) { // Ok : there are also " xxx + json " subtypes , which count as well
String subtype = mediaType . getSubtype ( ) ; return "json" . equalsIgnoreCase ( subtype ) || subtype . endsWith ( "+json" ) ; } /* * Not sure if this can happen ; but it seems reasonable that we can at least produce json without media type ? */
return true ; |
public class RiakClient { /** * Static factory method to create a new client instance .
* This method produces a client connected to the supplied addresses on
* the default ( protocol buffers ) port ( 8087 ) .
* @ param remoteAddresses a list of IP addresses or hostnames
* @ return a new client instance
* @ throws UnknownHostException if a supplied hostname cannot be resolved . */
public static RiakClient newClient ( List < String > remoteAddresses ) throws UnknownHostException { } } | return newClient ( RiakNode . Builder . DEFAULT_REMOTE_PORT , remoteAddresses ) ; |
public class MultiLayerFeaturesList { /** * Feed a map of features to the widget , so it can be built .
* @ param featureMap feature map */
public void setFeatures ( Map < String , List < org . geomajas . layer . feature . Feature > > featureMap ) { } } | MapModel mapModel = mapWidget . getMapModel ( ) ; for ( Entry < String , List < org . geomajas . layer . feature . Feature > > clientLayerId : featureMap . entrySet ( ) ) { Layer < ? > layer = mapModel . getLayer ( clientLayerId . getKey ( ) ) ; if ( null != layer ) { List < org . geomajas . layer . feature . Feature > orgFeatures = clientLayerId . getValue ( ) ; if ( ! orgFeatures . isEmpty ( ) ) { addFeatures ( layer , orgFeatures ) ; } } } |
public class ProcessDefinitionManager { /** * Deletes the timer start events for the given process definition .
* @ param processDefinition the process definition */
protected void deleteTimerStartEventsForProcessDefinition ( ProcessDefinition processDefinition ) { } } | List < JobEntity > timerStartJobs = getJobManager ( ) . findJobsByConfiguration ( TimerStartEventJobHandler . TYPE , processDefinition . getKey ( ) , processDefinition . getTenantId ( ) ) ; ProcessDefinitionEntity latestVersion = getProcessDefinitionManager ( ) . findLatestProcessDefinitionByKeyAndTenantId ( processDefinition . getKey ( ) , processDefinition . getTenantId ( ) ) ; // delete timer start event jobs only if this is the latest version of the process definition .
if ( latestVersion != null && latestVersion . getId ( ) . equals ( processDefinition . getId ( ) ) ) { for ( Job job : timerStartJobs ) { ( ( JobEntity ) job ) . delete ( ) ; } } |
public class NettyClientHandler { /** * Handler for the Channel shutting down . */
@ Override public void channelInactive ( ChannelHandlerContext ctx ) throws Exception { } } | try { logger . fine ( "Network channel is closed" ) ; Status status = Status . UNAVAILABLE . withDescription ( "Network closed for unknown reason" ) ; lifecycleManager . notifyShutdown ( status ) ; try { cancelPing ( lifecycleManager . getShutdownThrowable ( ) ) ; // Report status to the application layer for any open streams
connection ( ) . forEachActiveStream ( new Http2StreamVisitor ( ) { @ Override public boolean visit ( Http2Stream stream ) throws Http2Exception { NettyClientStream . TransportState clientStream = clientStream ( stream ) ; if ( clientStream != null ) { clientStream . transportReportStatus ( lifecycleManager . getShutdownStatus ( ) , false , new Metadata ( ) ) ; } return true ; } } ) ; } finally { lifecycleManager . notifyTerminated ( status ) ; } } finally { // Close any open streams
super . channelInactive ( ctx ) ; if ( keepAliveManager != null ) { keepAliveManager . onTransportTermination ( ) ; } } |
public class CallLogUtil { /** * returns true if call log of specified number exists */
public static boolean containsLogFromNumber ( List < CallLogEntry > logs , String number ) { } } | for ( CallLogEntry log : logs ) { if ( log . number . equals ( number ) ) { return true ; } } return false ; |
public class Documents { /** * Will set the id property on the document IF a mutator exists . Otherwise
* nothing happens .
* @ param document
* @ param id */
public static void setId ( Object document , String id ) { } } | DocumentAccessor d = getAccessor ( document ) ; if ( d . hasIdMutator ( ) ) { d . setId ( document , id ) ; } |
public class AWSOpsWorksClient { /** * Starts a specified instance . For more information , see < a
* href = " http : / / docs . aws . amazon . com / opsworks / latest / userguide / workinginstances - starting . html " > Starting , Stopping ,
* and Rebooting Instances < / a > .
* < b > Required Permissions < / b > : To use this action , an IAM user must have a Manage permissions level for the stack ,
* or an attached policy that explicitly grants permissions . For more information on user permissions , see < a
* href = " http : / / docs . aws . amazon . com / opsworks / latest / userguide / opsworks - security - users . html " > Managing User
* Permissions < / a > .
* @ param startInstanceRequest
* @ return Result of the StartInstance operation returned by the service .
* @ throws ValidationException
* Indicates that a request was not valid .
* @ throws ResourceNotFoundException
* Indicates that a resource was not found .
* @ sample AWSOpsWorks . StartInstance
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / opsworks - 2013-02-18 / StartInstance " target = " _ top " > AWS API
* Documentation < / a > */
@ Override public StartInstanceResult startInstance ( StartInstanceRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeStartInstance ( request ) ; |
public class Configuration { /** * Adds a set of deprecated keys to the global deprecations .
* This method is lockless . It works by means of creating a new
* DeprecationContext based on the old one , and then atomically swapping in
* the new context . If someone else updated the context in between us reading
* the old context and swapping in the new one , we try again until we win the
* race .
* @ param deltas The deprecations to add . */
public static void addDeprecations ( DeprecationDelta [ ] deltas ) { } } | DeprecationContext prev , next ; do { prev = deprecationContext . get ( ) ; next = new DeprecationContext ( prev , deltas ) ; } while ( ! deprecationContext . compareAndSet ( prev , next ) ) ; |
public class SmartObjectMapper { /** * This method behaves similarly to the < code > writeValueAsString ( Object value ) < / code > method
* except that it includes an indentation prefix that will be prepended to each line of the
* resulting string ( except the first line ) .
* @ param value The smart object to be written out as a string .
* @ param indentation The indentation string to be prepended to each line .
* @ return The formatted string .
* @ throws JsonProcessingException The JSON object mapper was not able to serialize the object . */
String writeValueAsString ( Object value , String indentation ) throws JsonProcessingException { } } | PrettyPrinter printer = new BetterPrettyPrinter ( indentation ) . withArrayIndenter ( new DefaultIndenter ( ) ) ; return writer ( printer ) . writeValueAsString ( value ) ; |
public class CmsLock { /** * Returns a set of locked unpublished related resources . < p >
* @ param resName the resource to check the related resources for
* @ param filter the lock filter to use
* @ param lockedResources a set of site relative paths , of locked resources to exclude */
private void addLockedRelatedResources ( String resName , CmsLockFilter filter , Set < String > lockedResources ) { } } | try { // get and iterate over all related resources
Iterator < CmsRelation > itRelations = getCms ( ) . getRelationsForResource ( resName , CmsRelationFilter . TARGETS . filterStrong ( ) . filterIncludeChildren ( ) ) . iterator ( ) ; while ( itRelations . hasNext ( ) ) { CmsRelation relation = itRelations . next ( ) ; CmsResource target = null ; try { target = relation . getTarget ( getCms ( ) , CmsResourceFilter . ALL ) ; } catch ( CmsException e ) { // error reading a resource , should usually never happen
if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( e . getLocalizedMessage ( getLocale ( ) ) , e ) ; } continue ; } // we are interested just in unpublished resources
if ( target . getState ( ) . isUnchanged ( ) ) { continue ; } String targetName = getCms ( ) . getSitePath ( target ) ; // if already selected
if ( lockedResources . contains ( targetName ) || lockedResources . contains ( targetName + "*" ) ) { continue ; } if ( m_lockedResources != null ) { if ( m_lockedResources . contains ( targetName ) || m_lockedResources . contains ( targetName + "*" ) ) { continue ; } } try { org . opencms . lock . CmsLock lock = getCms ( ) . getLock ( targetName ) ; if ( ! lock . isUnlocked ( ) && filter . match ( "/" , lock ) ) { // just add resources that may come in question
lockedResources . add ( targetName + "*" ) ; } } catch ( CmsException e ) { // error reading a lock , should usually never happen
if ( LOG . isErrorEnabled ( ) ) { LOG . error ( e . getLocalizedMessage ( getLocale ( ) ) , e ) ; } continue ; } } } catch ( CmsException e ) { // error reading the relations , should usually never happen
if ( LOG . isErrorEnabled ( ) ) { LOG . error ( e . getLocalizedMessage ( getLocale ( ) ) , e ) ; } } |
public class BooleanUtils { /** * < p > Performs an and on an array of Booleans . < / p >
* < pre >
* BooleanUtils . and ( Boolean . TRUE , Boolean . TRUE ) = Boolean . TRUE
* BooleanUtils . and ( Boolean . FALSE , Boolean . FALSE ) = Boolean . FALSE
* BooleanUtils . and ( Boolean . TRUE , Boolean . FALSE ) = Boolean . FALSE
* BooleanUtils . and ( Boolean . TRUE , Boolean . TRUE , Boolean . TRUE ) = Boolean . TRUE
* BooleanUtils . and ( Boolean . FALSE , Boolean . FALSE , Boolean . TRUE ) = Boolean . FALSE
* BooleanUtils . and ( Boolean . TRUE , Boolean . FALSE , Boolean . TRUE ) = Boolean . FALSE
* < / pre >
* @ param array an array of { @ code Boolean } s
* @ return { @ code true } if the and is successful .
* @ throws IllegalArgumentException if { @ code array } is { @ code null }
* @ throws IllegalArgumentException if { @ code array } is empty .
* @ throws IllegalArgumentException if { @ code array } contains a { @ code null }
* @ since 3.0.1 */
public static Boolean and ( final Boolean ... array ) { } } | if ( array == null ) { throw new IllegalArgumentException ( "The Array must not be null" ) ; } if ( array . length == 0 ) { throw new IllegalArgumentException ( "Array is empty" ) ; } try { final boolean [ ] primitive = ArrayUtils . toPrimitive ( array ) ; return and ( primitive ) ? Boolean . TRUE : Boolean . FALSE ; } catch ( final NullPointerException ex ) { throw new IllegalArgumentException ( "The array must not contain any null elements" ) ; } |
public class MSPDIWriter { /** * Writes task baseline data .
* @ param xmlTask MSPDI task
* @ param mpxjTask MPXJ task */
private void writeTaskBaselines ( Project . Tasks . Task xmlTask , Task mpxjTask ) { } } | Project . Tasks . Task . Baseline baseline = m_factory . createProjectTasksTaskBaseline ( ) ; boolean populated = false ; Number cost = mpxjTask . getBaselineCost ( ) ; if ( cost != null && cost . intValue ( ) != 0 ) { populated = true ; baseline . setCost ( DatatypeConverter . printCurrency ( cost ) ) ; } Duration duration = mpxjTask . getBaselineDuration ( ) ; if ( duration != null && duration . getDuration ( ) != 0 ) { populated = true ; baseline . setDuration ( DatatypeConverter . printDuration ( this , duration ) ) ; baseline . setDurationFormat ( DatatypeConverter . printDurationTimeUnits ( duration , false ) ) ; } Date date = mpxjTask . getBaselineFinish ( ) ; if ( date != null ) { populated = true ; baseline . setFinish ( date ) ; } date = mpxjTask . getBaselineStart ( ) ; if ( date != null ) { populated = true ; baseline . setStart ( date ) ; } duration = mpxjTask . getBaselineWork ( ) ; if ( duration != null && duration . getDuration ( ) != 0 ) { populated = true ; baseline . setWork ( DatatypeConverter . printDuration ( this , duration ) ) ; } if ( populated ) { baseline . setNumber ( BigInteger . ZERO ) ; xmlTask . getBaseline ( ) . add ( baseline ) ; } for ( int loop = 1 ; loop <= 10 ; loop ++ ) { baseline = m_factory . createProjectTasksTaskBaseline ( ) ; populated = false ; cost = mpxjTask . getBaselineCost ( loop ) ; if ( cost != null && cost . intValue ( ) != 0 ) { populated = true ; baseline . setCost ( DatatypeConverter . printCurrency ( cost ) ) ; } duration = mpxjTask . getBaselineDuration ( loop ) ; if ( duration != null && duration . getDuration ( ) != 0 ) { populated = true ; baseline . setDuration ( DatatypeConverter . printDuration ( this , duration ) ) ; baseline . setDurationFormat ( DatatypeConverter . printDurationTimeUnits ( duration , false ) ) ; } date = mpxjTask . getBaselineFinish ( loop ) ; if ( date != null ) { populated = true ; baseline . setFinish ( date ) ; } date = mpxjTask . getBaselineStart ( loop ) ; if ( date != null ) { populated = true ; baseline . setStart ( date ) ; } duration = mpxjTask . getBaselineWork ( loop ) ; if ( duration != null && duration . getDuration ( ) != 0 ) { populated = true ; baseline . setWork ( DatatypeConverter . printDuration ( this , duration ) ) ; } if ( populated ) { baseline . setNumber ( BigInteger . valueOf ( loop ) ) ; xmlTask . getBaseline ( ) . add ( baseline ) ; } } |
public class ValidatorUtil { /** * 验证方法参数是否符合规则
* @ param instance 方法所在的类的实例
* @ param method 方法实例
* @ param params 参数 */
public static void validateParameters ( Object instance , Method method , Object [ ] params ) { } } | Assert . notNull ( instance , "instance不能为null" ) ; Assert . notNull ( method , "method不能为null" ) ; Set < ConstraintViolation < Object > > constraintViolations = executableValidator . validateParameters ( instance , method , params ) ; check ( constraintViolations ) ; |
public class FieldPredicates { /** * Create a predicate to check that a field is annotated with one of the given annotations .
* @ param annotations present on the field
* @ return Predicate to check that a field is annotated with one of the given annotations . */
public static Predicate < Field > isAnnotatedWith ( Class < ? extends Annotation > ... annotations ) { } } | return field -> { for ( Class < ? extends Annotation > annotation : annotations ) { if ( field . isAnnotationPresent ( annotation ) ) { return true ; } } return false ; } ; |
public class RadialMenuItem { /** * Renders this menu item at the specified location . */
public void render ( Component host , RadialMenu menu , Graphics2D gfx , int x , int y ) { } } | paint ( gfx , x , y , menu ) ; |
public class VisualizeStereoDisparity { /** * Removes distortion and rectifies images . */
private void rectifyInputImages ( ) { } } | // get intrinsic camera calibration matrices
DMatrixRMaj K1 = PerspectiveOps . pinholeToMatrix ( calib . left , ( DMatrixRMaj ) null ) ; DMatrixRMaj K2 = PerspectiveOps . pinholeToMatrix ( calib . right , ( DMatrixRMaj ) null ) ; // compute rectification matrices
rectifyAlg . process ( K1 , new Se3_F64 ( ) , K2 , calib . getRightToLeft ( ) . invert ( null ) ) ; DMatrixRMaj rect1 = rectifyAlg . getRect1 ( ) ; DMatrixRMaj rect2 = rectifyAlg . getRect2 ( ) ; rectK = rectifyAlg . getCalibrationMatrix ( ) ; rectR = rectifyAlg . getRectifiedRotation ( ) ; // adjust view to maximize viewing area while not including black regions
RectifyImageOps . allInsideLeft ( calib . left , rect1 , rect2 , rectK ) ; // compute transforms to apply rectify the images
leftRectToPixel = transformRectToPixel ( calib . left , rect1 ) ; ImageType < T > imageType = ImageType . single ( activeAlg . getInputType ( ) ) ; FMatrixRMaj rect1_F32 = new FMatrixRMaj ( 3 , 3 ) ; // TODO simplify code some how
FMatrixRMaj rect2_F32 = new FMatrixRMaj ( 3 , 3 ) ; ConvertMatrixData . convert ( rect1 , rect1_F32 ) ; ConvertMatrixData . convert ( rect2 , rect2_F32 ) ; ImageDistort < T , T > distortRect1 = RectifyImageOps . rectifyImage ( calib . left , rect1_F32 , BorderType . SKIP , imageType ) ; ImageDistort < T , T > distortRect2 = RectifyImageOps . rectifyImage ( calib . right , rect2_F32 , BorderType . SKIP , imageType ) ; // rectify and undo distortion
distortRect1 . apply ( inputLeft , rectLeft ) ; distortRect2 . apply ( inputRight , rectRight ) ; rectifiedImages = true ; |
public class LifecycleChaincodePackage { /** * Lifecycle chaincode package as bytes
* @ return Lifecycle chaincode package as bytes . */
public byte [ ] getAsBytes ( ) { } } | byte [ ] ret = new byte [ pBytes . length ] ; System . arraycopy ( pBytes , 0 , ret , 0 , pBytes . length ) ; // make sure we keep our own copy .
return ret ; |
public class EfficientViewHolder { /** * Helper for { @ link EfficientCacheView # findViewByIdEfficient ( int , int ) } */
@ Nullable public < T extends View > T findViewByIdEfficient ( int parentId , int id ) { } } | return mCacheView . findViewByIdEfficient ( parentId , id ) ; |
public class Comparator { /** * https : / / stackoverflow . com / questions / 6701948 / efficient - way - to - compare - version - strings - in - java
* Compares two version strings .
* Use this instead of String . compareTo ( ) for a non - lexicographical
* comparison that works for version strings . e . g . " 1.10 " . compareTo ( " 1.6 " ) .
* @ param str1 a string of ordinal numbers separated by decimal points .
* @ param str2 a string of ordinal numbers separated by decimal points .
* @ return The result is a negative integer if str1 is _ numerically _ less than str2.
* The result is a positive integer if str1 is _ numerically _ greater than str2.
* The result is zero if the strings are _ numerically _ equal .
* @ note It does not work if " 1.10 " is supposed to be equal to " 1.10.0 " . */
public static Integer versionCompareNumerically ( String str1 , String str2 ) { } } | String [ ] vals1 = str1 . split ( "\\." ) ; String [ ] vals2 = str2 . split ( "\\." ) ; int i = 0 ; // set index to first non - equal ordinal or length of shortest version string
while ( i < vals1 . length && i < vals2 . length && vals1 [ i ] . equals ( vals2 [ i ] ) ) { i ++ ; } try { // compare first non - equal ordinal number
if ( i < vals1 . length && i < vals2 . length ) { int diff = Integer . valueOf ( vals1 [ i ] ) . compareTo ( Integer . valueOf ( vals2 [ i ] ) ) ; return Integer . signum ( diff ) ; } // the strings are equal or one string is a substring of the other
// e . g . " 1.2.3 " = " 1.2.3 " or " 1.2.3 " < " 1.2.3.4"
else { return Integer . signum ( vals1 . length - vals2 . length ) ; } } catch ( NumberFormatException e ) { // Possibly there are different versions of the app in the store , so we can ' t check .
return 0 ; } |
public class FSNamesystem { /** * Modify ( block - - > datanode ) map . Remove block from set of
* needed replications if this takes care of the problem .
* @ return the block that is stored in blockMap .
* @ throws IOException */
final boolean addStoredBlockInternal ( Block block , DatanodeDescriptor node , DatanodeDescriptor delNodeHint , boolean initialBlockReport , InitialReportWorker worker ) throws IOException { } } | InjectionHandler . processEvent ( InjectionEvent . FSNAMESYSTEM_ADDSTORED_BLOCK , node ) ; // either this is a direct call protected by writeLock , or
// this is parallel initial block report
assert ( hasWriteLock ( ) || ( worker != null ) ) ; // the call is invoked by the initial block report worker thread
final boolean parallelInitialBlockReport = ( worker != null ) ; BlockInfo storedBlock = blocksMap . getStoredBlock ( block ) ; // handle the case for standby when we receive a block
// that we don ' t know about yet - this is for block reports ,
// where we do not explicitly check this beforehand
if ( storedBlock == null && nameNode . shouldRetryAbsentBlocks ( ) // standby
&& nameNode . shouldRetryAbsentBlock ( block , storedBlock ) ) { // block should be retried
return false ; } if ( storedBlock == null ) { // then we need to do some special processing .
storedBlock = blocksMap . getStoredBlockWithoutMatchingGS ( block ) ; if ( storedBlock == null ) { rejectAddStoredBlock ( new Block ( block ) , node , "Block not in blockMap with any generation stamp" , initialBlockReport , parallelInitialBlockReport ) ; return false ; } INodeFile inode = storedBlock . getINode ( ) ; if ( inode == null ) { rejectAddStoredBlock ( new Block ( block ) , node , "Block does not correspond to any file" , initialBlockReport , parallelInitialBlockReport ) ; return false ; } boolean reportedOldGS = block . getGenerationStamp ( ) < storedBlock . getGenerationStamp ( ) ; boolean reportedNewGS = block . getGenerationStamp ( ) > storedBlock . getGenerationStamp ( ) ; boolean underConstruction = inode . isUnderConstruction ( ) ; boolean isLastBlock = inode . getLastBlock ( ) != null && inode . getLastBlock ( ) . getBlockId ( ) == block . getBlockId ( ) ; // Don ' t add blocks to the DN when they ' re part of the in - progress last block
// and have an inconsistent generation stamp . Instead just add them to targets
// for recovery purposes . They will get added to the node when
// commitBlockSynchronization runs
if ( reportedOldGS || reportedNewGS ) { // mismatched generation stamp
if ( underConstruction && isLastBlock ) { NameNode . stateChangeLog . info ( "BLOCK* NameSystem.addStoredBlock: " + "Targets updated: block " + block + " on " + node . getName ( ) + " is added as a target for block " + storedBlock + " with size " + block . getNumBytes ( ) ) ; lockParallelBRLock ( parallelInitialBlockReport ) ; try { ( ( INodeFileUnderConstruction ) inode ) . addTarget ( node , block . getGenerationStamp ( ) ) ; } finally { unlockParallelBRLock ( parallelInitialBlockReport ) ; } } else { rejectAddStoredBlock ( new Block ( block ) , node , "Reported block has mismatched generation stamp " + "but is not the last block of " + "an under-construction file. (current generation is " + storedBlock . getGenerationStamp ( ) + ")" , initialBlockReport , parallelInitialBlockReport ) ; } return false ; } } INodeFile fileINode = storedBlock . getINode ( ) ; if ( fileINode == null ) { rejectAddStoredBlock ( new Block ( block ) , node , "Block does not correspond to any file" , initialBlockReport , parallelInitialBlockReport ) ; return false ; } assert storedBlock != null : "Block must be stored by now" ; // add block to the data - node
boolean added ; if ( ! parallelInitialBlockReport ) { // full insert
added = node . addBlock ( storedBlock ) ; } else { // insert DN descriptor into stored block
int index = node . addBlockWithoutInsertion ( storedBlock ) ; added = index >= 0 ; // inform the worker so it can insert it into its local list
worker . setCurrentStoredBlock ( storedBlock , index ) ; } // Is the block being reported the last block of an underconstruction file ?
boolean blockUnderConstruction = false ; if ( fileINode . isUnderConstruction ( ) ) { Block last = fileINode . getLastBlock ( ) ; if ( last == null ) { // This should never happen , but better to handle it properly than to throw
// an NPE below .
LOG . error ( "Null blocks for reported block=" + block + " stored=" + storedBlock + " inode=" + fileINode ) ; return false ; } blockUnderConstruction = last . equals ( storedBlock ) ; } // block = = storedBlock when this addStoredBlock is the result of a block report
if ( block . getNumBytes ( ) != storedBlock . getNumBytes ( ) ) { if ( ! checkBlockSize ( block , storedBlock . getINode ( ) ) ) { try { // New replica has an invalid block size . Mark it as corrupted .
LOG . warn ( "Mark new replica " + block + " from " + node . getName ( ) + "as corrupt because its length " + block . getNumBytes ( ) + " is not valid" ) ; markBlockAsCorrupt ( block , node , parallelInitialBlockReport ) ; } catch ( IOException e ) { LOG . warn ( "Error in deleting bad block " + block + e ) ; } } else { long cursize = storedBlock . getNumBytes ( ) ; if ( cursize == 0 ) { storedBlock . setNumBytes ( block . getNumBytes ( ) ) ; } else if ( cursize != block . getNumBytes ( ) ) { String logMsg = "Inconsistent size for block " + block + " reported from " + node . getName ( ) + " current size is " + cursize + " reported size is " + block . getNumBytes ( ) ; // If the block is still under construction this isn ' t likely
// to be a problem , so just log at INFO level .
if ( blockUnderConstruction ) { if ( cursize != 1 ) { // cursize = = 1 implies block was fsynced
LOG . info ( logMsg ) ; } } else { LOG . warn ( logMsg ) ; } try { if ( cursize > block . getNumBytes ( ) && ! blockUnderConstruction ) { // new replica is smaller in size than existing block .
// Mark the new replica as corrupt .
LOG . warn ( "Mark new replica " + block + " from " + node . getName ( ) + "as corrupt because its length is shorter than existing ones" ) ; markBlockAsCorrupt ( block , node , parallelInitialBlockReport ) ; } else { // new replica is larger in size than existing block .
if ( ! blockUnderConstruction ) { // Mark pre - existing replicas as corrupt .
int numNodes = blocksMap . numNodes ( block ) ; int count = 0 ; DatanodeDescriptor nodes [ ] = new DatanodeDescriptor [ numNodes ] ; Iterator < DatanodeDescriptor > it = blocksMap . nodeIterator ( block ) ; for ( ; it != null && it . hasNext ( ) ; ) { DatanodeDescriptor dd = it . next ( ) ; if ( ! dd . equals ( node ) ) { nodes [ count ++ ] = dd ; } } for ( int j = 0 ; j < count ; j ++ ) { LOG . warn ( "Mark existing replica " + block + " from " + node . getName ( ) + " as corrupt because its length is shorter than the new one" ) ; markBlockAsCorrupt ( block , nodes [ j ] , parallelInitialBlockReport ) ; } } // change the size of block in blocksMap
storedBlock . setNumBytes ( block . getNumBytes ( ) ) ; } } catch ( IOException e ) { LOG . warn ( "Error in deleting bad block " + block + e ) ; } } } block = storedBlock ; } else { block = storedBlock ; } assert storedBlock == block : "Block must be stored by now" ; int curReplicaDelta = 0 ; if ( added ) { curReplicaDelta = 1 ; } else { NameNode . stateChangeLog . info ( "BLOCK* NameSystem.addStoredBlock: " + "Redundant addStoredBlock request received for " + block + " on " + node . getName ( ) + " size " + block . getNumBytes ( ) ) ; } // filter out containingNodes that are marked for decommission .
NumberReplicas num = countNodes ( storedBlock ) ; int numCurrentReplica = 0 ; int numLiveReplicas = num . liveReplicas ( ) ; boolean popReplQueuesBefore = isPopulatingReplQueuesInternal ( ) ; if ( ! popReplQueuesBefore ) { // if we haven ' t populated the replication queues
// then use a cheaper method to count
// we only need live and decommissioned replicas .
numCurrentReplica = numLiveReplicas + num . decommissionedReplicas ( ) ; } else { // count live & decommissioned replicas
numCurrentReplica = numLiveReplicas + pendingReplications . getNumReplicas ( block ) ; } if ( blockUnderConstruction ) { lockParallelBRLock ( parallelInitialBlockReport ) ; try { added = ( ( INodeFileUnderConstruction ) fileINode ) . addTarget ( node , block . getGenerationStamp ( ) ) ; } finally { unlockParallelBRLock ( parallelInitialBlockReport ) ; } } // check whether safe replication is reached for the block . Do not increment
// safe block count if this block has already been reported by the same
// datanode before . We used the check for ' added ' to achieve this .
if ( added && isInSafeModeInternal ( ) ) { int numSafeReplicas = numLiveReplicas + num . decommissionedReplicas ( ) ; if ( blockUnderConstruction ) { // If this is the last block under construction then all the replicas so
// far have been added to the " targets " field of
// INodeFileUnderConstruction and hence lookup the replication factor
// from there .
// In this code path , a " valid " replica is added to the block , and we
// only count " valid " replica to be safe replica . It is possible that
// earlier a replica with higher generation stamp has been added and a
// false was returned . In that case , if we only count number of targets
// we will miss to count in the case that we first accepted more than
// minReplication ' s number of higher generation stamp blocks but later
// blocks with " valid " generation stamps themselves reached minimum
// replication again .
DatanodeDescriptor [ ] validDNs = ( ( INodeFileUnderConstruction ) fileINode ) . getValidTargets ( ) ; numSafeReplicas = ( validDNs != null ) ? validDNs . length : 0 ; } if ( ! parallelInitialBlockReport && added ) { // regular add stored block
incrementSafeBlockCount ( numSafeReplicas , initialBlockReport ) ; } else if ( parallelInitialBlockReport && added ) { // for parallel initial block report increment local worker variable
worker . incrementSafeBlockCount ( numSafeReplicas ) ; } } if ( ! popReplQueuesBefore && isPopulatingReplQueuesInternal ( ) ) { // we have just initialized the repl queues
// must recompute num
num = countNodes ( storedBlock ) ; numLiveReplicas = num . liveReplicas ( ) ; numCurrentReplica = numLiveReplicas + pendingReplications . getNumReplicas ( block ) ; } // if file is being actively written to and it is the last block ,
// then do not check replication - factor here .
if ( blockUnderConstruction ) { return true ; } // do not handle mis - replicated blocks during start up
if ( ! isPopulatingReplQueuesInternal ( ) ) { return true ; } if ( curReplicaDelta == 0 ) { return true ; } // handle underReplication
short blockReplication = fileINode . getBlockReplication ( storedBlock ) ; updateNeededReplicationQueue ( storedBlock , curReplicaDelta , numCurrentReplica , num . decommissionedReplicas , node , blockReplication ) ; // handle over - replication
if ( numCurrentReplica > blockReplication ) { // Put block into a queue and handle excess block asyncly
if ( delNodeHint == null || node == delNodeHint ) { overReplicatedBlocks . add ( block ) ; } else { overReplicatedBlocks . add ( new OverReplicatedBlock ( block , node , delNodeHint ) ) ; } } // If the file replication has reached desired value
// we can remove any corrupt replicas the block may have
int corruptReplicasCount = corruptReplicas . numCorruptReplicas ( block ) ; int numCorruptNodes = num . corruptReplicas ( ) ; if ( numCorruptNodes != corruptReplicasCount ) { LOG . warn ( "Inconsistent number of corrupt replicas for " + block + "blockMap has " + numCorruptNodes + " but corrupt replicas map has " + corruptReplicasCount ) ; } if ( ( corruptReplicasCount > 0 ) && ( numLiveReplicas >= blockReplication ) ) { invalidateCorruptReplicas ( block ) ; } return true ; |
public class PrefHelper { /** * Set the given Branch Key to preference . Clears the preference data if the key is a new key .
* @ param key A { @ link String } representing Branch Key .
* @ return A { @ link Boolean } which is true if the key set is a new key . On Setting a new key need to clear all preference items . */
public boolean setBranchKey ( String key ) { } } | Branch_Key = key ; String currentBranchKey = getString ( KEY_BRANCH_KEY ) ; if ( key == null || currentBranchKey == null || ! currentBranchKey . equals ( key ) ) { clearPrefOnBranchKeyChange ( ) ; setString ( KEY_BRANCH_KEY , key ) ; return true ; } return false ; |
public class JavaIoFileSystemAccess { /** * { @ inheritDoc } */
@ Override public boolean isFile ( String path , String outputConfigurationName ) throws RuntimeIOException { } } | File file = getFile ( path , outputConfigurationName ) ; return file != null && file . exists ( ) && file . isFile ( ) ; |
public class SarlAnnotationTypeBuilderImpl { /** * Initialize the Ecore element when inner type declaration . */
public void eInit ( XtendTypeDeclaration container , String name , IJvmTypeProvider context ) { } } | if ( this . sarlAnnotationType == null ) { this . container = container ; this . sarlAnnotationType = SarlFactory . eINSTANCE . createSarlAnnotationType ( ) ; container . getMembers ( ) . add ( this . sarlAnnotationType ) ; if ( ! Strings . isEmpty ( name ) ) { this . sarlAnnotationType . setName ( name ) ; } } |
public class ConstCheck { /** * Reports a reassigned constant error . */
void reportError ( Node n , Var var , String name ) { } } | JSDocInfo info = NodeUtil . getBestJSDocInfo ( n ) ; if ( info == null || ! info . getSuppressions ( ) . contains ( "const" ) ) { Node declNode = var . getNode ( ) ; String declaredPosition = declNode . getSourceFileName ( ) + ":" + declNode . getLineno ( ) ; compiler . report ( JSError . make ( n , CONST_REASSIGNED_VALUE_ERROR , name , declaredPosition ) ) ; } |
public class CertificateCreator { /** * Convenience method for the most common case of certificate duplication .
* This method will not add any custom extensions and won ' t copy the extensions 2.5.29.8 : Issuer Alternative Name ,
* 2.5.29.18 : Issuer Alternative Name 2 , 2.5.29.31 : CRL Distribution Point or 1.3.6.1.5.5.7.1.1 : Authority Info Access , if they are present .
* @ param originalCert
* @ param newPubKey
* @ param caCert
* @ param caPrivateKey
* @ return
* @ throws CertificateParsingException
* @ throws SignatureException
* @ throws InvalidKeyException
* @ throws CertificateExpiredException
* @ throws CertificateNotYetValidException
* @ throws CertificateException
* @ throws NoSuchAlgorithmException
* @ throws NoSuchProviderException */
public static X509Certificate mitmDuplicateCertificate ( final X509Certificate originalCert , final PublicKey newPubKey , final X509Certificate caCert , final PrivateKey caPrivateKey ) throws CertificateParsingException , SignatureException , InvalidKeyException , CertificateExpiredException , CertificateNotYetValidException , CertificateException , NoSuchAlgorithmException , NoSuchProviderException { } } | return mitmDuplicateCertificate ( originalCert , newPubKey , caCert , caPrivateKey , clientCertDefaultOidsNotToCopy ) ; |
public class Application { /** * Return the singleton < code > ELResolver < / code > instance to be used for all EL resolution . This is actually an
* instance of < code > CompositeELResolver < / code > that must contain the following ELResolver instances in the
* following order :
* < ul >
* < li > < code > ELResolver < / code > instances declared using the & lt ; el - resolver & gt ; element in the application
* configuration resources . < / li >
* < li > An < code > implementation < / code > that wraps the head of the legacy VariableResolver chain , as per section
* < code > VariableResolver ChainWrapper < / code > in Chapter 5 in the spec document . < / li >
* < li > An < code > implementation < / code > that wraps the head of the legacy PropertyResolver chain , as per section
* < code > PropertyResolver ChainWrapper < / code > in Chapter 5 in the spec document . < / li >
* < li > Any < code > ELResolver < / code > instances added by calls to
* < code > { @ link # addELResolver ( javax . el . ELResolver ) } < / code > . < / li >
* < li > The default implementation throws < code > UnsupportedOperationException < / code > and is provided for the sole
* purpose of not breaking existing applications that extend < code > { @ link Application } < / code > . < / li >
* < / ul >
* @ since 1.2 */
public ELResolver getELResolver ( ) { } } | Application application = getMyfacesApplicationInstance ( ) ; if ( application != null ) { return application . getELResolver ( ) ; } throw new UnsupportedOperationException ( ) ; |
public class EJBOperations { /** * Creates a new { @ link JavaResource } in the specified project . If no project is available , use
* { @ link EJBOperations # newEntity ( DirectoryResource , String , String , GenerationType ) }
* @ param project the current project in which to create the bean . Must not be null
* @ param ejbName the name of the bean
* @ param targetPackage the package of the bean to be created
* @ param ejbType the { @ link EJBType } chosen for this bean
* @ param serializable whether or not the EJB should be serializable
* @ param destType JMS destination type
* @ return the created java resource
* @ throws FileNotFoundException if something wrong happens while saving the { @ link JavaClass } */
public JavaResource newEJB ( final Project project , final String ejbName , final String targetPackage , final EJBType ejbType , final boolean serializable ) throws FileNotFoundException { } } | final JavaSourceFacet java = project . getFacet ( JavaSourceFacet . class ) ; JavaClassSource javaClass = createJavaClass ( ejbName , targetPackage , ejbType , serializable ) ; return java . saveJavaSource ( javaClass ) ; |
public class WsLocationAdminImpl { /** * ( non - Javadoc )
* @ see com . ibm . wsspi . kernel . service . location . WsLocationAdmin # addLocation ( java . lang . String , java . lang . String ) */
@ Override public WsResource addLocation ( String fileName , String symbolicName ) { } } | return new SymbolicRootResource ( fileName , symbolicName , commonRoot ) ; |
public class FactorTemplateList { /** * Gets the number of observation function features . */
public int getNumObsFeats ( ) { } } | int count = 0 ; for ( FactorTemplate ft : fts ) { count += ft . getAlphabet ( ) . size ( ) ; } return count ; |
public class JdbcEndpointConfigurationParser { /** * Parse endpoint configuration .
* @ param endpointConfiguration
* @ param element */
public void parseEndpointConfiguration ( BeanDefinitionBuilder endpointConfiguration , Element element ) { } } | JdbcServerConfiguration serverConfiguration = new JdbcServerConfiguration ( ) ; if ( element . hasAttribute ( "host" ) ) { serverConfiguration . setHost ( element . getAttribute ( "host" ) ) ; } if ( element . hasAttribute ( "port" ) ) { serverConfiguration . setPort ( Integer . valueOf ( element . getAttribute ( "port" ) ) ) ; } if ( element . hasAttribute ( "database-name" ) ) { serverConfiguration . setDatabaseName ( element . getAttribute ( "database-name" ) ) ; } if ( element . hasAttribute ( "max-connections" ) ) { serverConfiguration . setMaxConnections ( Integer . valueOf ( element . getAttribute ( "max-connections" ) ) ) ; } endpointConfiguration . addPropertyValue ( "serverConfiguration" , serverConfiguration ) ; BeanDefinitionParserUtils . setPropertyValue ( endpointConfiguration , element . getAttribute ( "timeout" ) , "timeout" ) ; BeanDefinitionParserUtils . setPropertyValue ( endpointConfiguration , element . getAttribute ( "auto-connect" ) , "autoConnect" ) ; BeanDefinitionParserUtils . setPropertyValue ( endpointConfiguration , element . getAttribute ( "auto-create-statement" ) , "autoCreateStatement" ) ; BeanDefinitionParserUtils . setPropertyValue ( endpointConfiguration , element . getAttribute ( "auto-transaction-handling" ) , "autoTransactionHandling" ) ; BeanDefinitionParserUtils . setPropertyReference ( endpointConfiguration , element . getAttribute ( "auto-handle-queries" ) , "autoHandleQueries" ) ; BeanDefinitionParserUtils . setPropertyValue ( endpointConfiguration , element . getAttribute ( "polling-interval" ) , "pollingInterval" ) ; BeanDefinitionParserUtils . setPropertyReference ( endpointConfiguration , element . getAttribute ( "message-correlator" ) , "correlator" ) ; |
public class Path { private static void transport ( Grammar grammar , Attribute seed , int move , IntBitSet rawBorder , IntBitSet targetSymbols , List < AgBuffer > resultBuffers ) { } } | AgBuffer commulated ; IntBitSet border ; boolean down ; List < Attribute > attrs ; int i ; int max ; Attribute dest ; AgBuffer tmp ; int card ; AgBuffer buffer ; Attribute attr ; Occurrence occ ; if ( move == Path . DOWN || move == Path . UP ) { border = new IntBitSet ( ) ; grammar . getSymbols ( border ) ; } else { border = rawBorder ; } down = ( move == Path . DOWN || move == Path . DOWNS ) ; if ( down && ! border . contains ( seed . symbol ) ) { // TODO : needed for \ Block / / VariableReference in compiler examples
border . add ( seed . symbol ) ; } commulated = Pusher . run ( down , seed , border , grammar ) ; attrs = commulated . getTransportAttributes ( ) ; max = attrs . size ( ) ; for ( i = 0 ; i < max ; i ++ ) { dest = attrs . get ( i ) ; if ( dest != seed && targetSymbols . contains ( dest . symbol ) ) { tmp = commulated . createReduced ( dest ) ; occ = null ; if ( down ) { card = tmp . isDownOptional ( ) ? Type . OPTION : Type . VALUE ; card = Type . cardCard ( card , seed . type . card ) ; } else { occ = tmp . calcOccurrence ( dest ) ; card = Type . cardCard ( occ . card ( ) , seed . type . card ) ; if ( seed . type . card == Type . SEQUENCE || occ . max == Occurrence . UNBOUNDED ) { occ = null ; // don ' t split
} } if ( occ == null ) { buffer = new AgBuffer ( ( Attribute ) null ) ; attr = buffer . cloneAttributes ( tmp , new Type ( seed . type . type , card ) , dest ) ; buffer . setStart ( attr ) ; resultBuffers . add ( buffer ) ; } else { createSplitted ( tmp , seed . type . type , occ , dest , resultBuffers ) ; } } } |
public class CodeAttribute { /** * Returns the exceptions . */
public void addException ( ClassConstant type , int start , int end , int handler ) { } } | _exceptions . add ( new ExceptionItem ( type . getIndex ( ) , start , end , handler ) ) ; |
public class HorizontalRecordsProcessor { /** * ネストしたレコードの親のセルを結合する
* @ param sheet シート
* @ param mergedSize 結合するセルのサイズ
* @ param valueCellPositions 結合する開始位置のセルのアドレス */
private void processSavingNestedMergedRecord ( final Sheet sheet , final int mergedSize , final List < CellPosition > valueCellPositions ) { } } | if ( mergedSize <= 1 ) { return ; } // ネストした場合 、 上のセルのスタイルをコピーして 、 結合する
for ( CellPosition position : valueCellPositions ) { Cell valueCell = POIUtils . getCell ( sheet , position ) ; if ( valueCell == null ) { continue ; } final CellStyle style = valueCell . getCellStyle ( ) ; // 結合するセルに対して 、 上のセルのスタイルをコピーする 。
// 行を挿入するときなどに必要になるため 、 スタイルを設定する 。
for ( int i = 1 ; i < mergedSize ; i ++ ) { Cell mergedCell = POIUtils . getCell ( sheet , position . getColumn ( ) , position . getRow ( ) + i ) ; mergedCell . setCellStyle ( style ) ; mergedCell . setCellType ( CellType . BLANK ) ; } final CellRangeAddress range = new CellRangeAddress ( position . getRow ( ) , position . getRow ( ) + mergedSize - 1 , position . getColumn ( ) , position . getColumn ( ) ) ; // 既に結合済みのセルがある場合 、 外す 。
for ( int rowIdx = range . getFirstRow ( ) ; rowIdx <= range . getLastRow ( ) ; rowIdx ++ ) { CellRangeAddress r = POIUtils . getMergedRegion ( sheet , rowIdx , position . getColumn ( ) ) ; if ( r != null ) { POIUtils . removeMergedRange ( sheet , r ) ; } } sheet . addMergedRegion ( range ) ; } |
public class HistoryFilterPlusDialog { /** * This method initializes jPanel2
* @ return javax . swing . JPanel */
private JPanel getJPanel2 ( ) { } } | if ( jPanel2 == null ) { jPanel2 = new JPanel ( ) ; jPanel2 . setLayout ( new GridBagLayout ( ) ) ; GridBagConstraints gbc00 = LayoutHelper . getGBC ( 0 , 0 , 1 , 1.0 , stdInset ( ) ) ; GridBagConstraints gbc01 = LayoutHelper . getGBC ( 1 , 0 , 1 , 1.0 , stdInset ( ) ) ; GridBagConstraints gbc02 = LayoutHelper . getGBC ( 2 , 0 , 1 , 1.0 , stdInset ( ) ) ; GridBagConstraints gbc03 = LayoutHelper . getGBC ( 3 , 0 , 1 , 1.0 , stdInset ( ) ) ; GridBagConstraints gbc04 = LayoutHelper . getGBC ( 4 , 0 , 1 , 1.0 , stdInset ( ) ) ; GridBagConstraints gbc10 = LayoutHelper . getGBC ( 0 , 1 , 1 , 3 , 1.0 , 1.0 , GridBagConstraints . BOTH , GridBagConstraints . NORTHWEST , stdInset ( ) ) ; GridBagConstraints gbc11 = LayoutHelper . getGBC ( 1 , 1 , 1 , 3 , 1.0 , 1.0 , GridBagConstraints . BOTH , GridBagConstraints . NORTHWEST , stdInset ( ) ) ; GridBagConstraints gbc12 = LayoutHelper . getGBC ( 2 , 1 , 1 , 3 , 1.0 , 1.0 , GridBagConstraints . BOTH , GridBagConstraints . NORTHWEST , stdInset ( ) ) ; GridBagConstraints gbc13 = LayoutHelper . getGBC ( 3 , 1 , 1 , 2 , 1.0 , 1.0 , GridBagConstraints . BOTH , GridBagConstraints . NORTHWEST , stdInset ( ) ) ; GridBagConstraints gbc14 = LayoutHelper . getGBC ( 4 , 1 , 1 , 1 , 1.0 , 1.0 , GridBagConstraints . BOTH , GridBagConstraints . NORTHWEST , stdInset ( ) ) ; GridBagConstraints gbc24 = LayoutHelper . getGBC ( 4 , 2 , 1 , 1 , 0.0 , 0.0 , GridBagConstraints . NONE , GridBagConstraints . NORTHWEST , stdInset ( ) ) ; GridBagConstraints gbc33 = LayoutHelper . getGBC ( 3 , 3 , 1 , 1 , 1.0 , 1.0 , GridBagConstraints . BOTH , GridBagConstraints . NORTHWEST , stdInset ( ) ) ; GridBagConstraints gbc34 = LayoutHelper . getGBC ( 4 , 3 , 1 , 1 , 0.0 , 0.0 , GridBagConstraints . BOTH , GridBagConstraints . NORTHWEST , stdInset ( ) ) ; GridBagConstraints gbc30 = LayoutHelper . getGBC ( 0 , 4 , 2 , 1.0 , stdInset ( ) ) ; jPanel2 . add ( new JLabel ( Constant . messages . getString ( "history.filter.label.methods" ) ) , gbc00 ) ; jPanel2 . add ( new JLabel ( Constant . messages . getString ( "history.filter.label.codes" ) ) , gbc01 ) ; jPanel2 . add ( new JLabel ( Constant . messages . getString ( "history.filter.label.tags" ) ) , gbc02 ) ; jPanel2 . add ( new JLabel ( Constant . messages . getString ( "history.filter.label.alerts" ) ) , gbc03 ) ; jPanel2 . add ( new JLabel ( Constant . messages . getString ( "history.filter.label.urlincregex" ) ) , gbc04 ) ; jPanel2 . add ( getMethodScroller ( ) , gbc10 ) ; jPanel2 . add ( getCodeScroller ( ) , gbc11 ) ; jPanel2 . add ( getTagScroller ( ) , gbc12 ) ; jPanel2 . add ( getRiskScroller ( ) , gbc13 ) ; jPanel2 . add ( getUrlRegxIncScroller ( ) , gbc14 ) ; jPanel2 . add ( new JLabel ( Constant . messages . getString ( "history.filter.label.urlexcregex" ) ) , gbc24 ) ; jPanel2 . add ( getConfidenceScroller ( ) , gbc33 ) ; jPanel2 . add ( getUrlRegxExcScroller ( ) , gbc34 ) ; getUrlRegxExcScroller ( ) ; JPanel jPanel3 = new JPanel ( ) ; jPanel3 . setLayout ( new BoxLayout ( jPanel3 , BoxLayout . X_AXIS ) ) ; jPanel3 . add ( new JLabel ( Constant . messages . getString ( "history.filter.label.notes" ) ) ) ; jPanel3 . add ( getNotesComboBox ( ) ) ; jPanel2 . add ( jPanel3 , gbc30 ) ; } return jPanel2 ; |
public class ApiOvhOrder { /** * List all the items of a cart
* REST : GET / order / cart / { cartId } / item
* @ param cartId [ required ] Cart identifier */
public ArrayList < Long > cart_cartId_item_GET ( String cartId ) throws IOException { } } | String qPath = "/order/cart/{cartId}/item" ; StringBuilder sb = path ( qPath , cartId ) ; String resp = execN ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , t5 ) ; |
public class Lifecycle { /** * Removes a listener . */
public void removeListener ( LifecycleListener listener ) { } } | synchronized ( this ) { if ( _listeners == null ) return ; for ( int i = _listeners . size ( ) - 1 ; i >= 0 ; i -- ) { LifecycleListener oldListener = _listeners . get ( i ) . get ( ) ; if ( listener == oldListener ) { _listeners . remove ( i ) ; return ; } else if ( oldListener == null ) _listeners . remove ( i ) ; } } |
public class Strings { /** * Checks if a CharSequence is whitespace , empty ( " " ) or null .
* < pre >
* isBlank ( null ) = true
* isBlank ( " " ) = true
* isBlank ( " " ) = true
* isBlank ( " bob " ) = false
* isBlank ( " bob " ) = false
* < / pre >
* @ param cs
* the CharSequence to check , may be null
* @ return { @ code true } if the CharSequence is null , empty or whitespace
* @ since 3.0 */
public static boolean isBlank ( CharSequence cs ) { } } | int strLen ; if ( cs == null || ( strLen = cs . length ( ) ) == 0 ) { return true ; } for ( int i = 0 ; i < strLen ; i ++ ) { if ( Character . isWhitespace ( cs . charAt ( i ) ) == false ) { return false ; } } return true ; |
public class RaftSession { /** * Registers a session result .
* Results are stored in memory on all servers in order to provide linearizable semantics . When a command
* is applied to the state machine , the command ' s return value is stored with the sequence number . Once the
* client acknowledges receipt of the command output the result will be cleared from memory .
* @ param sequence The result sequence number .
* @ param result The result . */
public void registerResult ( long sequence , OperationResult result ) { } } | setRequestSequence ( sequence ) ; results . put ( sequence , result ) ; |
public class HtmlGame { /** * TODO ( fredsa ) : consider adding an onerror page handler , for non - GWT
* originated exceptions */
@ Override public final void onModuleLoad ( ) { } } | GWT . setUncaughtExceptionHandler ( new UncaughtExceptionHandler ( ) { @ Override public void onUncaughtException ( Throwable e ) { HtmlPlatform . log . error ( "Uncaught Exception: " , e ) ; } } ) ; // Need to do everything else in a deferred command , so that
// the uncaught exception handler has taken effect
Scheduler . get ( ) . scheduleDeferred ( new ScheduledCommand ( ) { @ Override public void execute ( ) { start ( ) ; } } ) ; |
public class MybatisRepository { /** * Given a base id , pass back the fully qualified version , for instance : < br / >
* < br / >
* & lt ; mapper namespace = " Person " & gt ; < br / >
* & nbsp ; & nbsp ; & lt ; select id = " findByName " . . . & gt ; < br / >
* & lt ; / mapper & gt ; < br / >
* < br / >
* String mapId = getMapId ( " findByName " ) ; < br / >
* / / mapId will be Person . findByName
* @ param baseMapId the namespaceless id of the query / insert mapping you wish to utilize
* @ return */
protected String getMapId ( String baseMapId ) { } } | final String simpleName = entityClass . getSimpleName ( ) ; return simpleName + "." + baseMapId ; |
public class Matrix4f { /** * Set only the translation components < code > ( m30 , m31 , m32 ) < / code > of this matrix to the given values < code > ( x , y , z ) < / code > .
* Note that this will only work properly for orthogonal matrices ( without any perspective ) .
* To build a translation matrix instead , use { @ link # translation ( float , float , float ) } .
* To apply a translation , use { @ link # translate ( float , float , float ) } .
* @ see # translation ( float , float , float )
* @ see # translate ( float , float , float )
* @ param x
* the offset to translate in x
* @ param y
* the offset to translate in y
* @ param z
* the offset to translate in z
* @ return this */
public Matrix4f setTranslation ( float x , float y , float z ) { } } | this . _m30 ( x ) ; this . _m31 ( y ) ; this . _m32 ( z ) ; properties &= ~ ( PROPERTY_PERSPECTIVE | PROPERTY_IDENTITY ) ; return this ; |
public class CmsUserInfoDialog { /** * Get Message for show last login information . < p >
* @ param inacTime time since last login in milli sec
* @ return HTML String */
private String getLastLoginMessage ( Long inacTime ) { } } | int days = ( int ) ( inacTime . longValue ( ) / ( 1000 * 60 * 60 * 24 ) ) ; if ( days == 0 ) { return CmsVaadinUtils . getMessageText ( Messages . GUI_USER_INFO_LAST_LOGIN_LESS_A_DAY_0 ) ; } if ( days == 1 ) { return CmsVaadinUtils . getMessageText ( Messages . GUI_USER_INFO_LAST_LOGIN_YESTERDAY_0 ) ; } return CmsVaadinUtils . getMessageText ( Messages . GUI_USER_INFO_LAST_LOGIN_DAYS_AGO_1 , new Integer ( days ) ) ; |
public class CommerceAddressRestrictionUtil { /** * Returns the first commerce address restriction in the ordered set where commerceCountryId = & # 63 ; .
* @ param commerceCountryId the commerce country ID
* @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > )
* @ return the first matching commerce address restriction
* @ throws NoSuchAddressRestrictionException if a matching commerce address restriction could not be found */
public static CommerceAddressRestriction findByCommerceCountryId_First ( long commerceCountryId , OrderByComparator < CommerceAddressRestriction > orderByComparator ) throws com . liferay . commerce . exception . NoSuchAddressRestrictionException { } } | return getPersistence ( ) . findByCommerceCountryId_First ( commerceCountryId , orderByComparator ) ; |
public class GobblinTaskRunner { /** * Helix participant cannot pre - configure tags before it connects to ZK . So this method can only be invoked after
* { @ link HelixManager # connect ( ) } . However this will still work because tagged jobs won ' t be sent to a non - tagged instance . Hence
* the job with EXAMPLE _ INSTANCE _ TAG will remain in the ZK until an instance with EXAMPLE _ INSTANCE _ TAG was found . */
private void addInstanceTags ( ) { } } | List < String > tags = ConfigUtils . getStringList ( this . config , GobblinClusterConfigurationKeys . HELIX_INSTANCE_TAGS_KEY ) ; HelixManager receiverManager = getReceiverManager ( ) ; if ( receiverManager . isConnected ( ) ) { if ( ! tags . isEmpty ( ) ) { logger . info ( "Adding tags binding " + tags ) ; tags . forEach ( tag -> receiverManager . getClusterManagmentTool ( ) . addInstanceTag ( this . clusterName , this . helixInstanceName , tag ) ) ; logger . info ( "Actual tags binding " + receiverManager . getClusterManagmentTool ( ) . getInstanceConfig ( this . clusterName , this . helixInstanceName ) . getTags ( ) ) ; } } |
public class SessionBeanTypeImpl { /** * If not already created , a new < code > around - timeout < / code > element will be created and returned .
* Otherwise , the first existing < code > around - timeout < / code > element will be returned .
* @ return the instance defined for the element < code > around - timeout < / code > */
public AroundTimeoutType < SessionBeanType < T > > getOrCreateAroundTimeout ( ) { } } | List < Node > nodeList = childNode . get ( "around-timeout" ) ; if ( nodeList != null && nodeList . size ( ) > 0 ) { return new AroundTimeoutTypeImpl < SessionBeanType < T > > ( this , "around-timeout" , childNode , nodeList . get ( 0 ) ) ; } return createAroundTimeout ( ) ; |
public class JmsJcaSessionImpl { /** * A convenience method that returns the core connection associated with
* this session ' s connection .
* @ return the core connection
* @ throws IllegalStateException
* if this session has been closed or invalidated */
public SICoreConnection getSICoreConnection ( ) throws IllegalStateException { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isEntryEnabled ( ) ) { SibTr . entry ( this , TRACE , "getSICoreConnection" ) ; } if ( _sessionClosed ) { throw new IllegalStateException ( NLS . getFormattedMessage ( ( "ILLEGAL_STATE_CWSJR1123" ) , new Object [ ] { "getSICoreConnection" } , null ) ) ; } if ( _sessionInvalidated ) { throw new IllegalStateException ( NLS . getFormattedMessage ( ( "ILLEGAL_STATE_CWSJR1124" ) , new Object [ ] { "getSICoreConnection" } , null ) ) ; } SICoreConnection coreConnection = null ; if ( _connection != null ) { coreConnection = _connection . getSICoreConnection ( ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isEntryEnabled ( ) ) { SibTr . exit ( this , TRACE , "getSICoreConnection" , coreConnection ) ; } return coreConnection ; |
public class SeaGlassStyleWrapper { /** * { @ inheritDoc } */
@ Override public Insets getInsets ( SynthContext ctx , Insets in ) { } } | return style . getInsets ( ctx , in ) ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.