signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class AlternateSizeValidator { /** * { @ inheritDoc } check if given object is valid . * @ see javax . validation . ConstraintValidator # isValid ( Object , * javax . validation . ConstraintValidatorContext ) */ @ Override public final boolean isValid ( final Object pvalue , final ConstraintValidatorContext pcontext ) { } }
String valueAsString = Objects . toString ( pvalue , StringUtils . EMPTY ) ; if ( StringUtils . isEmpty ( valueAsString ) ) { return true ; } if ( ignoreWhiteSpaces ) { valueAsString = valueAsString . replaceAll ( "\\s" , StringUtils . EMPTY ) ; } if ( ignoreMinus ) { valueAsString = valueAsString . replaceAll ( "-" , StringUtils . EMPTY ) ; } if ( ignoreSlashes ) { valueAsString = valueAsString . replaceAll ( "/" , StringUtils . EMPTY ) ; } return StringUtils . length ( valueAsString ) == size1 || StringUtils . length ( valueAsString ) == size2 ;
public class VariableNumMap { /** * Ensures that all variable numbers which are shared between other * and this are mapped to the same variables . */ private final void checkCompatibility ( VariableNumMap other ) { } }
int i = 0 , j = 0 ; int [ ] otherNums = other . nums ; String [ ] otherNames = other . names ; Variable [ ] otherVars = other . vars ; while ( i < nums . length && j < otherNums . length ) { if ( nums [ i ] < otherNums [ j ] ) { i ++ ; } else if ( nums [ i ] > otherNums [ j ] ) { j ++ ; } else { // Equal Preconditions . checkArgument ( names [ i ] . equals ( otherNames [ j ] ) ) ; Preconditions . checkArgument ( vars [ i ] . getName ( ) . equals ( otherVars [ j ] . getName ( ) ) ) ; i ++ ; j ++ ; } }
public class DAOValidatorHelper { /** * Methode d ' extraction de toutes les sous - chaines respectant le pattern donne * @ param expressionExpression mere * @ param patternPattern a rechercher * @ returnListe des sous - chaines respectant ce pattern */ public static String [ ] extractToken ( String expression , String pattern ) { } }
// Si la chaine est vide if ( expression == null || expression . trim ( ) . length ( ) == 0 ) { // On retourne null ; return null ; } // Si le pattern est null if ( pattern == null ) { // On retourne null ; return null ; } // On splitte par l ' espace String [ ] spacePlitted = expression . split ( SPLITTER_CHAIN ) ; // Array des Tokens StringBuffer aTokens = new StringBuffer ( ) ; // Un Index int index = 0 ; // On parcours le tableau for ( String spaceToken : spacePlitted ) { // Si le token ne respecte pas le pattern if ( isExpressionContainPattern ( spaceToken , pattern ) ) { // Si on est pas au premier if ( index ++ > 0 ) aTokens . append ( "@" ) ; // On ajoute aTokens . append ( spaceToken ) ; } } // On split la chaine originale avec ce pattern return aTokens . toString ( ) . split ( "@" ) ;
public class SymbolsExtractor { /** * to extract qualified name with prefix */ public static Set < QualifiedName > extractNames ( Expression expression , Set < NodeRef < Expression > > columnReferences ) { } }
ImmutableSet . Builder < QualifiedName > builder = ImmutableSet . builder ( ) ; new QualifiedNameBuilderVisitor ( columnReferences ) . process ( expression , builder ) ; return builder . build ( ) ;
public class DBCluster { /** * Provides the list of instances that make up the DB cluster . * @ param dBClusterMembers * Provides the list of instances that make up the DB cluster . */ public void setDBClusterMembers ( java . util . Collection < DBClusterMember > dBClusterMembers ) { } }
if ( dBClusterMembers == null ) { this . dBClusterMembers = null ; return ; } this . dBClusterMembers = new java . util . ArrayList < DBClusterMember > ( dBClusterMembers ) ;
public class Stage { /** * Answers the ActorProtocolActor [ ] for the newly created Actor instance . ( INTERNAL ONLY ) * @ param protocols the { @ code Class < ? > } [ ] protocols of the Actor * @ param definition the Definition of the Actor * @ param parent the Actor parent of this Actor * @ param maybeSupervisor the possible Supervisor of this Actor * @ param logger the Logger of this Actor * @ return ActorProtocolActor [ ] */ ActorProtocolActor < Object > [ ] actorProtocolFor ( final Class < ? > [ ] protocols , final Definition definition , final Actor parent , final Supervisor maybeSupervisor , final Logger logger ) { } }
assertProtocolCompliance ( protocols ) ; return actorProtocolFor ( protocols , definition , parent , null , null , maybeSupervisor , logger ) ;
public class EditController { /** * Show an overview of geometric attributes of the geometry that ' s being edited . */ public void showGeometricInfo ( ) { } }
FeatureTransaction ft = getFeatureTransaction ( ) ; if ( infoLabel == null && ft != null && ft . getNewFeatures ( ) != null && ft . getNewFeatures ( ) . length > 0 ) { infoLabel = new GeometricInfoLabel ( ) ; infoLabel . addClickHandler ( new DestroyLabelInfoOnClick ( ) ) ; infoLabel . setGeometry ( ft . getNewFeatures ( ) [ 0 ] . getGeometry ( ) ) ; infoLabel . animateMove ( mapWidget . getWidth ( ) - 155 , 10 ) ; }
public class GregorianCalendar { /** * Returns the fixed date of the first day of the year ( usually * January 1 ) before the specified date . * @ param date the date for which the first day of the year is * calculated . The date has to be in the cut - over year ( Gregorian * or Julian ) . * @ param fixedDate the fixed date representation of the date */ private long getFixedDateJan1 ( BaseCalendar . Date date , long fixedDate ) { } }
assert date . getNormalizedYear ( ) == gregorianCutoverYear || date . getNormalizedYear ( ) == gregorianCutoverYearJulian ; if ( gregorianCutoverYear != gregorianCutoverYearJulian ) { if ( fixedDate >= gregorianCutoverDate ) { // Dates before the cutover date don ' t exist // in the same ( Gregorian ) year . So , no // January 1 exists in the year . Use the // cutover date as the first day of the year . return gregorianCutoverDate ; } } // January 1 of the normalized year should exist . BaseCalendar juliancal = getJulianCalendarSystem ( ) ; return juliancal . getFixedDate ( date . getNormalizedYear ( ) , BaseCalendar . JANUARY , 1 , null ) ;
public class DeleteRelationalDatabaseRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DeleteRelationalDatabaseRequest deleteRelationalDatabaseRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( deleteRelationalDatabaseRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deleteRelationalDatabaseRequest . getRelationalDatabaseName ( ) , RELATIONALDATABASENAME_BINDING ) ; protocolMarshaller . marshall ( deleteRelationalDatabaseRequest . getSkipFinalSnapshot ( ) , SKIPFINALSNAPSHOT_BINDING ) ; protocolMarshaller . marshall ( deleteRelationalDatabaseRequest . getFinalRelationalDatabaseSnapshotName ( ) , FINALRELATIONALDATABASESNAPSHOTNAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class InstanceAdminClient { /** * Gets information about a particular instance . * < p > Sample code : * < pre > < code > * try ( InstanceAdminClient instanceAdminClient = InstanceAdminClient . create ( ) ) { * InstanceName name = InstanceName . of ( " [ PROJECT ] " , " [ INSTANCE ] " ) ; * Instance response = instanceAdminClient . getInstance ( name ) ; * < / code > < / pre > * @ param name Required . The name of the requested instance . Values are of the form * ` projects / & lt ; project & gt ; / instances / & lt ; instance & gt ; ` . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ public final Instance getInstance ( InstanceName name ) { } }
GetInstanceRequest request = GetInstanceRequest . newBuilder ( ) . setName ( name == null ? null : name . toString ( ) ) . build ( ) ; return getInstance ( request ) ;
public class BeanMappingUtil { /** * 对象属性的拷贝 , 与BeanUtils , BeanCopier功能类似 * @ param src * @ param target */ public static void copy ( Object src , Object target ) throws BeanMappingException { } }
BeanCopy copy = BeanCopy . create ( src . getClass ( ) , target . getClass ( ) ) ; copy . copy ( src , target ) ;
public class UriUtil { /** * Checks if the given URI is for a photo from the device ' s local media store . * @ param uri the URI to check * @ return true if the URI points to a media store photo */ public static boolean isLocalCameraUri ( Uri uri ) { } }
String uriString = uri . toString ( ) ; return uriString . startsWith ( MediaStore . Images . Media . EXTERNAL_CONTENT_URI . toString ( ) ) || uriString . startsWith ( MediaStore . Images . Media . INTERNAL_CONTENT_URI . toString ( ) ) ;
public class MessageProcessorControl { /** * / * ( non - Javadoc ) * @ see com . ibm . ws . sib . processor . runtime . SIMPMessageProcessorControllable # getLocalSubscriptionIterator ( ) */ public SIMPIterator getLocalSubscriptionIterator ( ) throws SIMPException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "getLocalSubscriptionIterator" ) ; SIMPIterator topicSpaces = getTopicSpaceIterator ( ) ; SubscriptionIndex subs = new SubscriptionIndex ( ) ; while ( topicSpaces . hasNext ( ) ) { Topicspace ts = ( Topicspace ) topicSpaces . next ( ) ; SIMPIterator sub = ts . getInternalLocalSubscriptionIterator ( ) ; while ( sub . hasNext ( ) ) { subs . put ( ( ControllableSubscription ) sub . next ( ) ) ; } } ControllableIterator itr = new ControllableIterator ( subs . iterator ( ) ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "getLocalSubscriptionIterator" , itr ) ; return itr ;
public class AbstractSearch { /** * Returns the parameter settings in structured way * @ param index the index of the trace item to obtain * @ return the parameter settings */ public List < Entry < String , Object > > getTraceParameterSettings ( int index ) { } }
List < Entry < String , Object > > result = new ArrayList < Map . Entry < String , Object > > ( ) ; List < String > dimensions = getSearchDimensions ( ) ; for ( int i = 0 ; i < dimensions . size ( ) ; ++ i ) { String parameter = dimensions . get ( i ) ; Object value = m_Trace . get ( index ) . getValue ( ) . getValues ( ) . getValue ( i ) ; Map . Entry < String , Object > current = new AbstractMap . SimpleEntry < String , Object > ( parameter , value ) ; result . add ( i , current ) ; } return result ;
public class CheckpointStatistics { public static CheckpointStatistics generateCheckpointStatistics ( AbstractCheckpointStats checkpointStats , boolean includeTaskCheckpointStatistics ) { } }
Preconditions . checkNotNull ( checkpointStats ) ; Map < JobVertexID , TaskCheckpointStatistics > checkpointStatisticsPerTask ; if ( includeTaskCheckpointStatistics ) { Collection < TaskStateStats > taskStateStats = checkpointStats . getAllTaskStateStats ( ) ; checkpointStatisticsPerTask = new HashMap < > ( taskStateStats . size ( ) ) ; for ( TaskStateStats taskStateStat : taskStateStats ) { checkpointStatisticsPerTask . put ( taskStateStat . getJobVertexId ( ) , new TaskCheckpointStatistics ( checkpointStats . getCheckpointId ( ) , checkpointStats . getStatus ( ) , taskStateStat . getLatestAckTimestamp ( ) , taskStateStat . getStateSize ( ) , taskStateStat . getEndToEndDuration ( checkpointStats . getTriggerTimestamp ( ) ) , taskStateStat . getAlignmentBuffered ( ) , taskStateStat . getNumberOfSubtasks ( ) , taskStateStat . getNumberOfAcknowledgedSubtasks ( ) ) ) ; } } else { checkpointStatisticsPerTask = Collections . emptyMap ( ) ; } if ( checkpointStats instanceof CompletedCheckpointStats ) { final CompletedCheckpointStats completedCheckpointStats = ( ( CompletedCheckpointStats ) checkpointStats ) ; return new CheckpointStatistics . CompletedCheckpointStatistics ( completedCheckpointStats . getCheckpointId ( ) , completedCheckpointStats . getStatus ( ) , completedCheckpointStats . getProperties ( ) . isSavepoint ( ) , completedCheckpointStats . getTriggerTimestamp ( ) , completedCheckpointStats . getLatestAckTimestamp ( ) , completedCheckpointStats . getStateSize ( ) , completedCheckpointStats . getEndToEndDuration ( ) , completedCheckpointStats . getAlignmentBuffered ( ) , completedCheckpointStats . getNumberOfSubtasks ( ) , completedCheckpointStats . getNumberOfAcknowledgedSubtasks ( ) , checkpointStatisticsPerTask , completedCheckpointStats . getExternalPath ( ) , completedCheckpointStats . isDiscarded ( ) ) ; } else if ( checkpointStats instanceof FailedCheckpointStats ) { final FailedCheckpointStats failedCheckpointStats = ( ( FailedCheckpointStats ) checkpointStats ) ; return new CheckpointStatistics . FailedCheckpointStatistics ( failedCheckpointStats . getCheckpointId ( ) , failedCheckpointStats . getStatus ( ) , failedCheckpointStats . getProperties ( ) . isSavepoint ( ) , failedCheckpointStats . getTriggerTimestamp ( ) , failedCheckpointStats . getLatestAckTimestamp ( ) , failedCheckpointStats . getStateSize ( ) , failedCheckpointStats . getEndToEndDuration ( ) , failedCheckpointStats . getAlignmentBuffered ( ) , failedCheckpointStats . getNumberOfSubtasks ( ) , failedCheckpointStats . getNumberOfAcknowledgedSubtasks ( ) , checkpointStatisticsPerTask , failedCheckpointStats . getFailureTimestamp ( ) , failedCheckpointStats . getFailureMessage ( ) ) ; } else if ( checkpointStats instanceof PendingCheckpointStats ) { final PendingCheckpointStats pendingCheckpointStats = ( ( PendingCheckpointStats ) checkpointStats ) ; return new CheckpointStatistics . PendingCheckpointStatistics ( pendingCheckpointStats . getCheckpointId ( ) , pendingCheckpointStats . getStatus ( ) , pendingCheckpointStats . getProperties ( ) . isSavepoint ( ) , pendingCheckpointStats . getTriggerTimestamp ( ) , pendingCheckpointStats . getLatestAckTimestamp ( ) , pendingCheckpointStats . getStateSize ( ) , pendingCheckpointStats . getEndToEndDuration ( ) , pendingCheckpointStats . getAlignmentBuffered ( ) , pendingCheckpointStats . getNumberOfSubtasks ( ) , pendingCheckpointStats . getNumberOfAcknowledgedSubtasks ( ) , checkpointStatisticsPerTask ) ; } else { throw new IllegalArgumentException ( "Given checkpoint stats object of type " + checkpointStats . getClass ( ) . getName ( ) + " cannot be converted." ) ; }
public class MtasPennTreebankParser { /** * ( non - Javadoc ) * @ see mtas . analysis . parser . MtasParser # createTokenCollection ( java . io . Reader ) */ @ Override public MtasTokenCollection createTokenCollection ( Reader reader ) throws MtasParserException , MtasConfigException { } }
tokenCollection = new MtasTokenCollection ( ) ; MtasTokenIdFactory mtasTokenIdFactory = new MtasTokenIdFactory ( ) ; List < Level > levels = new ArrayList < > ( ) ; // Map < String , MtasToken > referencesNode = new HashMap < > ( ) ; // Map < String , List < MtasToken > > referencesNullElement = new HashMap < > ( ) ; try { MtasPennTreebankReader treebankReader = new MtasPennTreebankReader ( reader ) ; // variables main administration int event = treebankReader . getEventType ( ) ; int position = 0 ; boolean ignore = false ; Level level = null ; // variables for code List < Integer > codePositions = new ArrayList < > ( ) ; Integer codeOffsetStart = null ; Integer codeOffsetEnd = null ; // variables for string String stringValue = null ; int stringOffsetStart ; int stringOffsetEnd ; // loop while ( true ) { switch ( event ) { case MtasPennTreebankReader . EVENT_STARTBRACKET : if ( level != null && level . code ) { throw new MtasParserException ( "unexpected start bracket for " + NODE_CODE ) ; } else { level = new Level ( ) ; level . ignore = ignore ; level . realOffsetStart = treebankReader . getPosition ( ) + 1 ; levels . add ( level ) ; } break ; case MtasPennTreebankReader . EVENT_ENDBRACKET : Objects . requireNonNull ( level , "no level while ending bracket" ) ; level . realOffsetEnd = treebankReader . getPosition ( ) - 1 ; Level parentLevel = levels . size ( ) > 1 ? levels . get ( levels . size ( ) - 2 ) : null ; createNodeMappings ( mtasTokenIdFactory , level , parentLevel ) ; // remove level if ( parentLevel != null ) { if ( level . positionStart != null && level . positionEnd != null ) { parentLevel . addPositionRange ( level . positionStart , level . positionEnd ) ; } parentLevel . offsetStart = parentLevel . offsetStart == null ? level . offsetStart : parentLevel . offsetStart ; parentLevel . offsetEnd = level . offsetEnd == null ? parentLevel . offsetEnd : level . offsetEnd ; levels . remove ( levels . size ( ) - 1 ) ; level = parentLevel ; ignore = level . ignore ; } else { levels . clear ( ) ; level = null ; ignore = false ; // referencesNode . clear ( ) ; // referencesNullElement . clear ( ) ; } break ; case MtasPennTreebankReader . EVENT_NODE : Objects . requireNonNull ( level , "no level while handling node" ) ; // register node with level level . node = treebankReader . getString ( ) ; if ( ignoreNodes . contains ( level . node ) ) { ignore = true ; level . ignore = true ; } if ( level . node . equals ( NODE_CODE ) ) { level . code = true ; if ( ! treebankReader . next ( ) || ( event = treebankReader . getEventType ( ) ) != MtasPennTreebankReader . EVENT_STRING ) { throw new MtasParserException ( "expected string for " + NODE_CODE ) ; } else if ( ! level . ignore ) { stringValue = treebankReader . getString ( ) ; stringOffsetStart = treebankReader . getPosition ( ) ; stringOffsetEnd = stringOffsetStart + stringValue . length ( ) ; if ( ! codePositions . isEmpty ( ) ) { createCodeMappings ( mtasTokenIdFactory , level , stringValue , codeOffsetStart , codeOffsetEnd , stringOffsetStart , stringOffsetEnd , codePositions ) ; } else { log . error ( "CODE without codePositions for " + stringValue ) ; } codePositions . clear ( ) ; codeOffsetStart = null ; codeOffsetEnd = null ; } } break ; case MtasPennTreebankReader . EVENT_STRING : Objects . requireNonNull ( level , "no level while handling string" ) ; if ( level . code ) { throw new MtasParserException ( "unexpected string for " + NODE_CODE ) ; } else if ( ! level . ignore ) { stringValue = treebankReader . getString ( ) ; stringOffsetStart = treebankReader . getPosition ( ) ; stringOffsetEnd = stringOffsetStart + stringValue . length ( ) ; if ( level . offsetStart == null ) { level . offsetStart = stringOffsetStart ; } level . offsetEnd = stringOffsetEnd ; if ( stringValue . startsWith ( NODE_CODE_PREFIX ) ) { codePositions . add ( position ) ; stringValue = stringValue . substring ( NODE_CODE_PREFIX . length ( ) , stringValue . length ( ) ) ; if ( codeOffsetStart == null ) { codeOffsetStart = stringOffsetStart ; } codeOffsetEnd = stringOffsetEnd ; } // register position level . addPosition ( position ) ; // create mappings createStringMappings ( mtasTokenIdFactory , level , stringValue , stringOffsetStart , stringOffsetEnd , position ) ; // increase position position ++ ; } break ; default : break ; } if ( ! treebankReader . next ( ) ) { break ; } else { event = treebankReader . getEventType ( ) ; } } } catch ( IOException e ) { log . debug ( e ) ; throw new MtasParserException ( "No valid Penn Treebank syntax: " + e . getMessage ( ) ) ; } // final check tokenCollection . check ( autorepair , makeunique ) ; return tokenCollection ;
public class Multipart { /** * Convenience method to add a form field to the request . * @ param name name of field * @ param value value of field * @ return self */ public Multipart field ( String name , String value ) { } }
formFields . add ( new FormField ( name , value ) ) ; return this ;
public class ExecutionEngineIPC { /** * the abstract api assumes construction initializes but here initialization * is just another command . */ public void initialize ( final int clusterIndex , final long siteId , final int partitionId , final int sitesPerHost , final int hostId , final String hostname , final int drClusterId , final int defaultDrBufferSize , final long tempTableMemory , final HashinatorConfig hashinatorConfig , final boolean createDrReplicatedStream , final long exportFlushTimeout ) { } }
synchronized ( printLockObject ) { System . out . println ( "Initializing an IPC EE " + this + " for hostId " + hostId + " siteId " + siteId + " from thread " + Thread . currentThread ( ) . getId ( ) ) ; } int result = ExecutionEngine . ERRORCODE_ERROR ; m_data . clear ( ) ; m_data . putInt ( Commands . Initialize . m_id ) ; m_data . putInt ( clusterIndex ) ; m_data . putLong ( siteId ) ; m_data . putInt ( partitionId ) ; m_data . putInt ( sitesPerHost ) ; m_data . putInt ( hostId ) ; m_data . putInt ( drClusterId ) ; m_data . putInt ( defaultDrBufferSize ) ; m_data . putLong ( EELoggers . getLogLevels ( ) ) ; m_data . putLong ( tempTableMemory ) ; m_data . putInt ( createDrReplicatedStream ? 1 : 0 ) ; m_data . putInt ( ( short ) hostname . length ( ) ) ; m_data . put ( hostname . getBytes ( Charsets . UTF_8 ) ) ; try { m_data . flip ( ) ; m_connection . write ( ) ; result = m_connection . readStatusByte ( ) ; } catch ( final IOException e ) { System . out . println ( "Exception: " + e . getMessage ( ) ) ; throw new RuntimeException ( e ) ; } checkErrorCode ( result ) ; updateHashinator ( hashinatorConfig ) ;
public class BaseExtension { /** * Add client request handlers */ protected void addClientRequestHandlers ( ) { } }
Set < String > commands = context . getClientRequestCommands ( ) ; for ( String command : commands ) addClientRequestHandler ( command ) ;
public class JPAPUnitInfo { /** * ( non - Javadoc ) * @ see javax . persistence . spi . PersistenceUnitInfo # getNonJtaDataSource ( ) */ @ Override public final DataSource getNonJtaDataSource ( ) { } }
if ( ivNonJtaDataSource == null || ivNonJtaDataSource instanceof GenericDataSource ) { // d455055 ivNonJtaDataSource = getJPADataSource ( ivNonJtaDataSourceJNDIName ) ; } return ivNonJtaDataSource ;
public class XMLConfigAdmin { /** * update the locale * @ param locale * @ throws SecurityException */ public void updateLocale ( String locale ) throws SecurityException { } }
checkWriteAccess ( ) ; boolean hasAccess = ConfigWebUtil . hasAccess ( config , SecurityManager . TYPE_SETTING ) ; if ( ! hasAccess ) throw new SecurityException ( "no access to update regional setting" ) ; Element scope = _getRootElement ( "regional" ) ; scope . setAttribute ( "locale" , locale . trim ( ) ) ;
public class BigDecimal { /** * calculate divideAndRound for ldividend * 10 ^ raise / divisor * when abs ( dividend ) = = abs ( divisor ) ; */ private static BigDecimal roundedTenPower ( int qsign , int raise , int scale , int preferredScale ) { } }
if ( scale > preferredScale ) { int diff = scale - preferredScale ; if ( diff < raise ) { return scaledTenPow ( raise - diff , qsign , preferredScale ) ; } else { return valueOf ( qsign , scale - raise ) ; } } else { return scaledTenPow ( raise , qsign , scale ) ; }
public class Mutation { /** * For all types except { @ link Op # DELETE } , constructs a map from column name to value . This is * mainly intended as a convenience for testing ; direct access via { @ link # getColumns ( ) } and * { @ link # getValues ( ) } is more efficient . * @ throws IllegalStateException if { @ code operation ( ) = = Op . DELETE } , or if any duplicate columns * are present . Detection of duplicates does not consider case . */ public Map < String , Value > asMap ( ) { } }
checkState ( operation != Op . DELETE , "asMap() cannot be called for a DELETE mutation" ) ; LinkedHashMap < String , Value > map = new LinkedHashMap < > ( ) ; for ( int i = 0 ; i < columns . size ( ) ; ++ i ) { Value existing = map . put ( columns . get ( i ) , values . get ( i ) ) ; } return Collections . unmodifiableMap ( map ) ;
public class RequestUtil { /** * 打印header信息 . * @ param request */ public static void printHeaders ( HttpServletRequest request ) { } }
Enumeration < String > e = request . getHeaderNames ( ) ; while ( e . hasMoreElements ( ) ) { String name = e . nextElement ( ) ; String value = request . getHeader ( name ) ; logger . info ( "header " + name + ":" + value ) ; }
public class BufferUtils { /** * Flip the buffer to fill mode . * The position is set to the first unused position in the buffer * ( the old limit ) and the limit is set to the capacity . * If the buffer is empty , then this call is effectively { @ link # clearToFill ( ByteBuffer ) } . * If there is no unused space to fill , a { @ link ByteBuffer # compact ( ) } is done to attempt * to create space . * This method is used as a replacement to { @ link ByteBuffer # compact ( ) } . * @ param buffer The buffer to flip * @ return The position of the valid data before the flipped position . This value should be * passed to a subsequent call to { @ link # flipToFlush ( ByteBuffer , int ) } */ public static int flipToFill ( ByteBuffer buffer ) { } }
int position = buffer . position ( ) ; int limit = buffer . limit ( ) ; if ( position == limit ) { buffer . position ( 0 ) ; buffer . limit ( buffer . capacity ( ) ) ; return 0 ; } int capacity = buffer . capacity ( ) ; if ( limit == capacity ) { buffer . compact ( ) ; return 0 ; } buffer . position ( limit ) ; buffer . limit ( capacity ) ; return position ;
public class TableWorks { /** * A VoltDB extended variant of addIndex that supports indexed generalized non - column expressions . * @ param cols int [ ] * @ param indexExprs Expression [ ] * @ param name HsqlName * @ param unique boolean * @ param predicate Expression * @ return new index */ Index addExprIndex ( int [ ] col , Expression [ ] indexExprs , HsqlName name , boolean unique , boolean migrating , Expression predicate ) { } }
Index newindex ; if ( table . isEmpty ( session ) || table . isIndexingMutable ( ) ) { newindex = table . createAndAddExprIndexStructure ( name , col , indexExprs , unique , migrating , false ) ; } else { newindex = table . createIndexStructure ( name , col , null , null , unique , migrating , false , false ) . withExpressions ( indexExprs ) ; Table tn = table . moveDefinition ( session , table . tableType , null , null , newindex , - 1 , 0 , emptySet , emptySet ) ; // for all sessions move the data tn . moveData ( session , table , - 1 , 0 ) ; database . persistentStoreCollection . releaseStore ( table ) ; table = tn ; setNewTableInSchema ( table ) ; updateConstraints ( table , emptySet ) ; } database . schemaManager . addSchemaObject ( newindex ) ; database . schemaManager . recompileDependentObjects ( table ) ; if ( predicate != null ) { newindex = newindex . withPredicate ( predicate ) ; } return newindex ;
public class LanguageServiceClient { /** * Analyzes the sentiment of the provided text . * < p > Sample code : * < pre > < code > * try ( LanguageServiceClient languageServiceClient = LanguageServiceClient . create ( ) ) { * Document document = Document . newBuilder ( ) . build ( ) ; * EncodingType encodingType = EncodingType . NONE ; * AnalyzeSentimentResponse response = languageServiceClient . analyzeSentiment ( document , encodingType ) ; * < / code > < / pre > * @ param document Input document . * @ param encodingType The encoding type used by the API to calculate sentence offsets . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ public final AnalyzeSentimentResponse analyzeSentiment ( Document document , EncodingType encodingType ) { } }
AnalyzeSentimentRequest request = AnalyzeSentimentRequest . newBuilder ( ) . setDocument ( document ) . setEncodingType ( encodingType ) . build ( ) ; return analyzeSentiment ( request ) ;
public class AbstractElement { /** * Click function that will wait for one of the ExpectedConditions to match . * { @ link org . openqa . selenium . TimeoutException } exception will be thrown if no conditions are matched within the * allowed time { @ link ConfigProperty # EXECUTION _ TIMEOUT } * @ param conditions * { @ link List } & lt ; { @ link ExpectedCondition } & lt ; ? & gt ; & gt ; of supplied conditions passed . * @ return first { @ link org . openqa . selenium . support . ui . ExpectedCondition } that was matched */ public ExpectedCondition < ? > clickAndExpectOneOf ( final List < ExpectedCondition < ? > > conditions ) { } }
dispatcher . beforeClick ( this , conditions ) ; getElement ( ) . click ( ) ; if ( Boolean . parseBoolean ( Config . getConfigProperty ( ConfigProperty . ENABLE_GUI_LOGGING ) ) ) { logUIAction ( UIActions . CLICKED ) ; } // If there are no expected objects , then it means user wants this method // to behave as a clickonly . So lets skip processing of alerts and leave // that to the user . if ( conditions == null || conditions . size ( ) <= 0 ) { return null ; } if ( parent != null ) { WebDriverWaitUtils . waitUntilPageIsLoaded ( parent . getCurrentPage ( ) ) ; } validatePresenceOfAlert ( ) ; long timeout = Grid . getExecutionTimeoutValue ( ) / 1000 ; try { WebDriverWait wait = new WebDriverWait ( Grid . driver ( ) , timeout ) ; wait . ignoring ( NoSuchElementException . class ) ; wait . ignoring ( ExpectOneOfException . class ) ; ExpectedCondition < ? > matchedCondition = wait . until ( new Function < WebDriver , ExpectedCondition < ? > > ( ) { // find the first condition that matches and return it @ Override public ExpectedCondition < ? > apply ( WebDriver webDriver ) { StringBuilder sb = new StringBuilder ( ) ; int i = 1 ; for ( final ExpectedCondition < ? > condition : conditions ) { try { Object value = condition . apply ( webDriver ) ; if ( value instanceof Boolean ) { if ( Boolean . TRUE . equals ( value ) ) { return condition ; } } else if ( value != null ) { return condition ; } } catch ( WebDriverException e ) { sb . append ( "\n\tObject " + i + ":\n" ) ; sb . append ( "\t" + ExceptionUtils . getRootCauseMessage ( e ) . split ( "\n" ) [ 0 ] + "\n" ) ; sb . append ( "\t\t" + StringUtils . substringBetween ( ExceptionUtils . getStackTrace ( e ) , "\n" ) ) ; } i ++ ; } throw new ExpectOneOfException ( sb . toString ( ) ) ; } } ) ; return matchedCondition ; } finally { // Attempt at taking screenshots even when there are time - outs triggered from the wait * methods . processScreenShot ( ) ; dispatcher . afterClick ( this , conditions ) ; }
public class ListBulkDeploymentDetailedReportsResult { /** * A list of the individual group deployments in the bulk deployment operation . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setDeployments ( java . util . Collection ) } or { @ link # withDeployments ( java . util . Collection ) } if you want to * override the existing values . * @ param deployments * A list of the individual group deployments in the bulk deployment operation . * @ return Returns a reference to this object so that method calls can be chained together . */ public ListBulkDeploymentDetailedReportsResult withDeployments ( BulkDeploymentResult ... deployments ) { } }
if ( this . deployments == null ) { setDeployments ( new java . util . ArrayList < BulkDeploymentResult > ( deployments . length ) ) ; } for ( BulkDeploymentResult ele : deployments ) { this . deployments . add ( ele ) ; } return this ;
public class MessageItem { /** * ( non - Javadoc ) * @ see com . ibm . ws . sib . processor . impl . interfaces . SIMPMessage # setMessageControlClassification ( java . lang . String ) */ @ Override public void setMessageControlClassification ( String classification ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "setMessageControlClassification" , classification ) ; JsMessage localMsg = getJSMessage ( true ) ; // Cannot set this parameter on a Control message if ( localMsg . isApiMessage ( ) ) { if ( localMsg instanceof JsApiMessage ) { ( ( JsApiMessage ) localMsg ) . setMessageControlClassification ( classification ) ; messageControlClassification = classification ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "setMessageControlClassification" ) ;
public class ScheduledExecutors { /** * Run runnable repeatedly with the given delay between calls , after the given * initial delay . Exceptions are caught and logged as errors . */ public static void scheduleWithFixedDelay ( final ScheduledExecutorService exec , final Duration initialDelay , final Duration delay , final Runnable runnable ) { } }
scheduleWithFixedDelay ( exec , initialDelay , delay , new Callable < Signal > ( ) { @ Override public Signal call ( ) { runnable . run ( ) ; // ( Exceptions are handled for us ) return Signal . REPEAT ; } } ) ;
public class RequestBuilder { /** * Build the request * @ return The request */ public Request < T > build ( ) { } }
MultiMap headers = resolveHeaders ( ) ; MultiMap parameters = resolveParameters ( ) ; parameters . add ( "version" , Integer . toString ( version ) ) ; Request < T > request = new Request < T > ( ) ; request . setEntityName ( entityName ) ; request . setEntityType ( objectType ) ; request . setHeaders ( headers ) ; request . setParameters ( parameters ) ; return request ;
public class ExtendedSAML2AuthnRequestsSignedSecurityHandler { /** * { @ inheritDoc } */ @ Override public void doInvoke ( MessageContext < SAMLObject > messageContext ) throws MessageHandlerException { } }
if ( this . wantAuthnRequestsSigned ) { SAMLObject samlMessage = messageContext . getMessage ( ) ; if ( ! ( samlMessage instanceof AuthnRequest ) ) { log . debug ( "Inbound message is not an instance of AuthnRequest, skipping evaluation..." ) ; return ; } if ( ! isMessageSigned ( messageContext ) ) { log . error ( "AuthnRequest was not signed - this is required by the IdP" ) ; throw new MessageHandlerException ( "Inbound AuthnRequest was required to be signed but was not" ) ; } } else { super . doInvoke ( messageContext ) ; }
public class HostsResource { /** * Sets the deployment of the job identified by its { @ link JobId } on the host named by * { @ code host } to { @ code deployment } . * @ param host The host to deploy to . * @ param jobId The job to deploy . * @ param deployment Deployment information . * @ param username The user deploying . * @ param token The authorization token for this deployment . * @ return The response . */ @ PUT @ Path ( "/{host}/jobs/{job}" ) @ Produces ( APPLICATION_JSON ) @ Timed @ ExceptionMetered public JobDeployResponse jobPut ( @ PathParam ( "host" ) final String host , @ PathParam ( "job" ) final JobId jobId , @ Valid final Deployment deployment , @ RequestUser final String username , @ QueryParam ( "token" ) @ DefaultValue ( EMPTY_TOKEN ) final String token ) { } }
if ( ! jobId . isFullyQualified ( ) ) { throw badRequest ( new JobDeployResponse ( JobDeployResponse . Status . INVALID_ID , host , jobId ) ) ; } try { final Deployment actualDeployment = deployment . toBuilder ( ) . setDeployerUser ( username ) . build ( ) ; model . deployJob ( host , actualDeployment , token ) ; return new JobDeployResponse ( JobDeployResponse . Status . OK , host , jobId ) ; } catch ( JobAlreadyDeployedException e ) { throw badRequest ( new JobDeployResponse ( JobDeployResponse . Status . JOB_ALREADY_DEPLOYED , host , jobId ) ) ; } catch ( HostNotFoundException e ) { throw badRequest ( new JobDeployResponse ( JobDeployResponse . Status . HOST_NOT_FOUND , host , jobId ) ) ; } catch ( JobDoesNotExistException e ) { throw badRequest ( new JobDeployResponse ( JobDeployResponse . Status . JOB_NOT_FOUND , host , jobId ) ) ; } catch ( JobPortAllocationConflictException e ) { throw badRequest ( new JobDeployResponse ( JobDeployResponse . Status . PORT_CONFLICT , host , jobId ) ) ; } catch ( TokenVerificationException e ) { throw forbidden ( new JobDeployResponse ( JobDeployResponse . Status . FORBIDDEN , host , jobId ) ) ; }
public class GenericTreeWalker { /** * Return the first child Node from the current node , after applying filter , * whatToshow . If result is not null , set the current Node . */ public Node firstChild ( ) { } }
if ( currentNode == null ) return null ; Node node = getFirstChild ( currentNode ) ; if ( node != null ) currentNode = node ; return node ;
public class AWSGlobalAcceleratorClient { /** * Update an accelerator . * @ param updateAcceleratorRequest * @ return Result of the UpdateAccelerator operation returned by the service . * @ throws AcceleratorNotFoundException * The accelerator that you specified doesn ' t exist . * @ throws InternalServiceErrorException * There was an internal error for AWS Global Accelerator . * @ throws InvalidArgumentException * An argument that you specified is invalid . * @ sample AWSGlobalAccelerator . UpdateAccelerator * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / globalaccelerator - 2018-08-08 / UpdateAccelerator " * target = " _ top " > AWS API Documentation < / a > */ @ Override public UpdateAcceleratorResult updateAccelerator ( UpdateAcceleratorRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeUpdateAccelerator ( request ) ;
public class InmemCounter { /** * { @ inheritDoc } * @ since 0.4.2 */ @ Override public InmemCounter setCounterFactory ( AbstractCounterFactory counterFactory ) { } }
if ( counterFactory instanceof InmemCounterFactory ) { super . setCounterFactory ( counterFactory ) ; } else { throw new IllegalArgumentException ( "Argument must be an instance of " + InmemCounterFactory . class . getName ( ) ) ; } return this ;
public class Password { public static String obfuscate ( String s ) { } }
StringBuffer buf = new StringBuffer ( ) ; byte [ ] b = s . getBytes ( ) ; synchronized ( buf ) { buf . append ( "OBF:" ) ; for ( int i = 0 ; i < b . length ; i ++ ) { byte b1 = b [ i ] ; byte b2 = b [ s . length ( ) - ( i + 1 ) ] ; int i1 = ( int ) b1 + ( int ) b2 + 127 ; int i2 = ( int ) b1 - ( int ) b2 + 127 ; int i0 = i1 * 256 + i2 ; String x = Integer . toString ( i0 , 36 ) ; switch ( x . length ( ) ) { case 1 : buf . append ( '0' ) ; case 2 : buf . append ( '0' ) ; case 3 : buf . append ( '0' ) ; default : buf . append ( x ) ; } } return buf . toString ( ) ; }
public class ScriptContainer { /** * / / / / / Protected Support / / / / / */ protected void pushIdScope ( ) { } }
if ( _idScope != null ) { HttpServletRequest req = ( HttpServletRequest ) pageContext . getRequest ( ) ; ArrayList /* < String > */ list = ( ArrayList /* < String > */ ) RequestUtils . getOuterAttribute ( req , SCOPE_ID ) ; if ( list == null ) { list = new ArrayList /* < String > */ ( ) ; RequestUtils . setOuterAttribute ( req , SCOPE_ID , list ) ; } list . add ( _idScope ) ; }
public class EntityHelper { /** * 获取全部的Field * @ param entityClass * @ param fieldList * @ return */ private static List < Field > getAllField ( Class < ? > entityClass , List < Field > fieldList ) { } }
if ( fieldList == null ) { fieldList = new ArrayList < Field > ( ) ; } if ( entityClass . equals ( Object . class ) ) { return fieldList ; } Field [ ] fields = entityClass . getDeclaredFields ( ) ; for ( Field field : fields ) { // 排除静态字段 if ( ! Modifier . isStatic ( field . getModifiers ( ) ) ) { fieldList . add ( field ) ; } } Class < ? > superClass = entityClass . getSuperclass ( ) ; if ( superClass != null && ! superClass . equals ( Object . class ) && ( ! Map . class . isAssignableFrom ( superClass ) && ! Collection . class . isAssignableFrom ( superClass ) ) ) { return getAllField ( entityClass . getSuperclass ( ) , fieldList ) ; } return fieldList ;
public class CommerceAddressRestrictionUtil { /** * Returns the last commerce address restriction in the ordered set where classNameId = & # 63 ; and classPK = & # 63 ; . * @ param classNameId the class name ID * @ param classPK the class pk * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the last matching commerce address restriction , or < code > null < / code > if a matching commerce address restriction could not be found */ public static CommerceAddressRestriction fetchByC_C_Last ( long classNameId , long classPK , OrderByComparator < CommerceAddressRestriction > orderByComparator ) { } }
return getPersistence ( ) . fetchByC_C_Last ( classNameId , classPK , orderByComparator ) ;
public class Scheduler { /** * 新增Task , 使用随机UUID * @ param pattern { @ link CronPattern } 对应的String表达式 * @ param task { @ link Task } * @ return ID */ public String schedule ( String pattern , Task task ) { } }
String id = UUID . randomUUID ( ) . toString ( ) ; schedule ( id , pattern , task ) ; return id ;
public class ExportManager { /** * Indicate to associated { @ link ExportGeneration } s to * prepare give up mastership for the given partition id to hostId * @ param partitionId */ synchronized public void prepareTransferMastership ( int partitionId , int hostId ) { } }
// remove mastership for partition id , so when failure happen during the mastership transfer // this node can be elected as new master again . m_masterOfPartitions . remove ( partitionId ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Export stream masters on " + partitionId + " are going to migrate away" ) ; } ExportGeneration generation = m_generation . get ( ) ; if ( generation == null ) { return ; } generation . prepareTransferMastership ( partitionId , hostId ) ;
public class Retryer { /** * Returns current { @ link AttemptTimelimit } instance * @ return */ public AttemptTimelimit < R > getTimelimiter ( ) { } }
return this . tryTimeout = ( null == this . tryTimeout ? new AttemptTimelimit < R > ( ) { // An implementation which actually does not attempt to limit time at all @ Override public R call ( Callable < R > callable ) throws Exception { return checkNotNull ( callable ) . call ( ) ; } } : this . tryTimeout ) ;
public class AmqpRunnableFactory { /** * Creates a new { @ link ExchangeConsumer } . For every message received ( or when the timeout waiting for messages is hit ) , the callback * is invoked with the message received . */ public ExchangeConsumer createExchangeListener ( final String name , final ConsumerCallback messageCallback ) { } }
Preconditions . checkState ( connectionFactory != null , "connection factory was never injected!" ) ; return new ExchangeConsumer ( connectionFactory , amqpConfig , name , messageCallback ) ;
public class WTree { /** * Map all item idsin the model to their row index . As this can be expensive save the map onto the scratch pad . * This can be very expensive and should be avoided . This will load all the nodes in a tree ( including those already * not expanded ) . * @ return the map between the all the item ids in the tree model and row indexes */ public Map < String , List < Integer > > getAllItemIdIndexMap ( ) { } }
// No user context present if ( getScratchMap ( ) == null ) { return createItemIdIndexMap ( false ) ; } Map < String , List < Integer > > map = ( Map < String , List < Integer > > ) getScratchMap ( ) . get ( ALL_IDS_TO_INDEX_SCRATCH_MAP_KEY ) ; if ( map == null ) { map = createItemIdIndexMap ( false ) ; getScratchMap ( ) . put ( ALL_IDS_TO_INDEX_SCRATCH_MAP_KEY , map ) ; } return map ;
public class X509Token { /** * Used during setup to get the certification from the keystore and encrypt the auth _ value with * the private key */ public void setCertificate ( ) throws KeyStoreException , IOException , NoSuchAlgorithmException , CertificateException , NoSuchPaddingException , InvalidKeyException , IllegalBlockSizeException , BadPaddingException , UnrecoverableEntryException { } }
KeyStore store = KeyStore . getInstance ( this . keystore_type ) ; InputStream inputStream = Thread . currentThread ( ) . getContextClassLoader ( ) . getResourceAsStream ( this . keystore_path ) ; if ( inputStream == null ) inputStream = new FileInputStream ( this . keystore_path ) ; store . load ( inputStream , this . keystore_password ) ; this . cipher = Cipher . getInstance ( this . cipher_type ) ; this . certificate = ( X509Certificate ) store . getCertificate ( this . cert_alias ) ; log . debug ( "certificate = " + this . certificate . toString ( ) ) ; this . cipher . init ( Cipher . ENCRYPT_MODE , this . certificate ) ; this . encryptedToken = this . cipher . doFinal ( this . auth_value . getBytes ( ) ) ; KeyStore . PrivateKeyEntry privateKey = ( KeyStore . PrivateKeyEntry ) store . getEntry ( this . cert_alias , new KeyStore . PasswordProtection ( this . cert_password ) ) ; this . certPrivateKey = privateKey . getPrivateKey ( ) ; this . valueSet = true ;
public class MemoryMapArchiveBase { /** * { @ inheritDoc } * @ see org . jboss . shrinkwrap . api . Archive # delete ( java . lang . String ) */ @ Override public Node delete ( String archivePath ) { } }
Validate . notNull ( archivePath , "No path was specified" ) ; return delete ( ArchivePaths . create ( archivePath ) ) ;
public class LogRecord { /** * Retrieves the absolute position ( in the log ByteBuffer ) of the beginning * of this LogRecord ' s view buffer . */ protected int absolutePosition ( ) { } }
if ( tc . isEntryEnabled ( ) ) Tr . entry ( tc , "absolutePosition" , this ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "absolutePosition" , new Integer ( _absolutePosition ) ) ; return _absolutePosition ;
public class WaitFor { /** * Wait for http endpoint availability under given < b > internal < / b > container port . * Given port MUST be exposed ( with { @ link DockerRuleBuilder # expose ( String , String ) } or * { @ link DockerRuleBuilder # publishAllPorts ( boolean ) } ) ( reachable from the test * code point of view ) . * Side note : Internal port is required for convenience - rule will find matching * external port or , report error at startup when given internal port was not exposed . * @ param internalHttpPort Http port to scan for availability . Port is scanned with HTTP HEAD method * until response with error code 2xx or 3xx is returned or until timeout . * Port MUST be exposed for wait to work and given port number must * be internal ( as seen on container , not as on host ) port number . */ public static StartCondition httpPing ( final int internalHttpPort ) { } }
return new StartCondition ( ) { @ Override public StartConditionCheck build ( DockerRule currentRule ) { String exposedPort = currentRule . getExposedContainerPort ( Integer . toString ( internalHttpPort ) ) ; String pingUrl = String . format ( "http://%s:%s/" , currentRule . getDockerHost ( ) , exposedPort ) ; log . debug ( "new wait for condition - http ping port: {}, url: '{}'" , internalHttpPort , pingUrl ) ; return new HttpPingChecker ( pingUrl , null , null ) ; } } ;
public class OptionalParamConverterProvider { /** * { @ inheritDoc } */ @ Override public < T > ParamConverter < T > getConverter ( final Class < T > rawType , final Type genericType , final Annotation [ ] annotations ) { } }
if ( Optional . class . equals ( rawType ) ) { final List < ClassTypePair > ctps = ReflectionHelper . getTypeArgumentAndClass ( genericType ) ; final ClassTypePair ctp = ( ctps . size ( ) == 1 ) ? ctps . get ( 0 ) : null ; if ( ctp == null || ctp . rawClass ( ) == String . class ) { return new ParamConverter < T > ( ) { @ Override public T fromString ( final String value ) { return rawType . cast ( Optional . ofNullable ( value ) ) ; } @ Override public String toString ( final T value ) { return value . toString ( ) ; } } ; } final Set < ParamConverterProvider > converterProviders = Providers . getProviders ( locator , ParamConverterProvider . class ) ; for ( ParamConverterProvider provider : converterProviders ) { final ParamConverter < ? > converter = provider . getConverter ( ctp . rawClass ( ) , ctp . type ( ) , annotations ) ; if ( converter != null ) { return new ParamConverter < T > ( ) { @ Override public T fromString ( final String value ) { return rawType . cast ( Optional . ofNullable ( value ) . map ( s -> converter . fromString ( value ) ) ) ; } @ Override public String toString ( final T value ) { return value . toString ( ) ; } } ; } } } return null ;
public class HELM2NotationUtils { /** * method to get all chem polymers given a list of PolymerNotation objects * @ param polymers List of PolymerNotation objects * @ return list of chem polymers */ public final static List < PolymerNotation > getCHEMPolymers ( List < PolymerNotation > polymers ) { } }
List < PolymerNotation > chemPolymers = new ArrayList < PolymerNotation > ( ) ; for ( PolymerNotation polymer : polymers ) { if ( polymer . getPolymerID ( ) instanceof ChemEntity ) { chemPolymers . add ( polymer ) ; } } return chemPolymers ;
public class RestHelper { /** * Creates an absolute url using the given request ' s url as base and appending optional segments . * @ param request a { @ code non - null } { @ link RequestContext } * @ param pathSegments an option array of segments * @ return a string representing an absolute - url */ public static String urlFrom ( Request request , String ... pathSegments ) { } }
return urlFrom ( request . uri ( ) , pathSegments ) ;
public class BaseClassFinderService { /** * Start up a basebundle service . * Note : You will probably want to call this from a thread and attach a service * listener since this may take some time . * @ param versionRange version * @ param secsToWait Time to wait for startup 0 = 0 , - 1 = default * @ param className * @ return true If I ' m up already * @ return false If I had a problem . */ public boolean startBaseBundle ( BundleContext context , String interfaceClassName , String dependentServiceClassName , String versionRange , Dictionary < String , String > filter , int secsToWait ) { } }
ServiceReference ServiceReference = getClassServiceReference ( ( bundleContext != null ) ? bundleContext : context , interfaceClassName , versionRange , filter ) ; if ( ( ServiceReference != null ) && ( ( ServiceReference . getBundle ( ) . getState ( ) & Bundle . ACTIVE ) != 0 ) ) return true ; // Already up ! // If the repository is not up , but the bundle is deployed , this will find it return ( ClassFinderActivator . waitForServiceStartup ( context , interfaceClassName , dependentServiceClassName , versionRange , null , secsToWait ) != null ) ;
public class Base256Serializer { /** * Returns the { @ code String } representation of the specified { @ code ByteBuffer } . * @ param byteBuffer The { @ code ByteBuffer } to be converted . * @ return The { @ code String } representation of the specified { @ code ByteBuffer } . */ public static String string ( ByteBuffer byteBuffer ) { } }
ByteBuffer bb = ByteBufferUtil . clone ( byteBuffer ) ; byte [ ] bytes = new byte [ bb . remaining ( ) ] ; bb . get ( bytes ) ; return new String ( chars ( bytes ) ) ;
public class DatabaseRecordIterator { /** * Return the next record . If there are no more records , throws * a < code > NoSuchElementException < / code > . If an error occurs , * throws a < code > JournalException < / code > . * @ return next record * @ throws java . util . NoSuchElementException if there are no more records * @ throws JournalException if another error occurs */ public Record nextRecord ( ) throws NoSuchElementException , JournalException { } }
if ( ! hasNext ( ) ) { String msg = "No current record." ; throw new NoSuchElementException ( msg ) ; } close ( lastRecord ) ; lastRecord = record ; record = null ; return lastRecord ;
public class ArrayUtils { /** * Applies a 2D torus shift to the specified row major order array of specified dimensions . * The array is interpreted as an image of given width and height ( with elements in row major order ) * and is shifted by x to the right and by y to the bottom . * @ param a array * @ param w width * @ param h height * @ param x shift in x direction * @ param y shift in y direction */ public static void shift2D ( double [ ] a , int w , int h , int x , int y ) { } }
assertPositive ( w , ( ) -> "specified width is not positive. w=" + w ) ; assertPositive ( h , ( ) -> "specified height is not positive. h=" + h ) ; while ( x < 0 ) x = w + x ; while ( y < 0 ) y = h + y ; x %= w ; y %= h ; if ( x == 0 && y == 0 ) { return ; } for ( int row = 0 ; row < h ; row ++ ) { int offset = row * w ; rotateArray ( a , w , offset , x ) ; } rotateArray ( a , w * h , 0 , y * w ) ;
public class N { /** * Note : Copied from Google Guava under Apache License v2.0 * < br / > * < br / > * If a thread is interrupted during such a call , the call continues to block until the result is available or the * timeout elapses , and only then re - interrupts the thread . * @ param timeout * @ param unit * @ param cmd */ public static void runUninterruptibly ( final long timeout , final TimeUnit unit , final Try . BiConsumer < Long , TimeUnit , InterruptedException > cmd ) { } }
N . checkArgNotNull ( unit , "unit" ) ; N . checkArgNotNull ( cmd ) ; boolean interrupted = false ; try { long remainingNanos = unit . toNanos ( timeout ) ; final long sysNanos = System . nanoTime ( ) ; final long end = remainingNanos >= Long . MAX_VALUE - sysNanos ? Long . MAX_VALUE : sysNanos + remainingNanos ; while ( true ) { try { cmd . accept ( remainingNanos , TimeUnit . NANOSECONDS ) ; return ; } catch ( InterruptedException e ) { interrupted = true ; remainingNanos = end - System . nanoTime ( ) ; } } } finally { if ( interrupted ) { Thread . currentThread ( ) . interrupt ( ) ; } }
public class DataSetLineageService { /** * Return the schema for the given tableName . * @ param datasetName tableName * @ return Schema as JSON */ @ Override @ GraphTransaction public String getSchema ( String datasetName ) throws AtlasException { } }
datasetName = ParamChecker . notEmpty ( datasetName , "table name" ) ; LOG . info ( "Fetching schema for tableName={}" , datasetName ) ; TypeUtils . Pair < String , String > typeIdPair = validateDatasetNameExists ( datasetName ) ; return getSchemaForId ( typeIdPair . left , typeIdPair . right ) ;
public class Query { /** * < code > * Helper method that takes a ~ separated string of additional parameters that can be * used to trigger rules . Takes ~ separated name / value list * < / code > * @ param values * The list of name / values */ public Query addCustomUrlParamsByString ( String values ) { } }
if ( values == null ) { return this ; } String [ ] params = values . split ( "&" ) ; for ( String value : params ) { if ( StringUtils . isNotBlank ( value ) ) { String [ ] keyValue = value . split ( "=" ) ; if ( keyValue . length == 2 && StringUtils . isNotBlank ( keyValue [ 0 ] ) && StringUtils . isNotBlank ( keyValue [ 1 ] ) ) { customUrlParams . add ( new CustomUrlParam ( ) . setKey ( keyValue [ 0 ] ) . setValue ( keyValue [ 1 ] ) ) ; } } } return this ;
public class Source { /** * Sets the velocity of the source . */ public void setVelocity ( float x , float y , float z ) { } }
if ( _vx != x || _vy != y || _vz != z ) { AL10 . alSource3f ( _id , AL10 . AL_VELOCITY , _vx = x , _vy = y , _vz = z ) ; }
public class Synchronizer { /** * writes the given { @ link byte [ ] } to the given { @ link FileChannel } . This method improves performance by 30 % , * because it is not needed anymore to create all AbstractKVStorable from byte - arrays . The given byte - array contains * only one { @ link AbstractKVStorable } . * @ param newData * @ param key * @ param alreadyExist * @ throws IOException */ protected boolean write ( byte [ ] newData , boolean alreadyExist ) throws IOException { } }
// If the data is invalid . if ( KeyUtils . isNull ( newData , prototype . getKey ( ) . length ) ) { // if ( alreadyExist ) System . err . println ( " invalid from disk " ) ; return false ; } // if the last readChunk was full ByteBuffer toAdd = ByteBuffer . wrap ( newData ) ; long positionOfToAddInFile = writeOffset + bufferedWriter . position ( ) ; bufferedWriter . put ( toAdd ) ; largestKeyInChunk = Arrays . copyOfRange ( newData , 0 , prototype . getKey ( ) . length ) ; // elements are stored ordered so we // can easily remember the largest key int chunkId = dataFile . getChunkIndex ( positionOfToAddInFile ) ; header . setLargestKey ( chunkId , largestKeyInChunk ) ; if ( bufferedWriter . remaining ( ) == 0 ) { writeBuffer ( ) ; } return true ;
public class ImmutableMap { /** * Creates an immutable singleton instance . * @ param key * @ param value * @ return */ public static < K , V > Map < K , V > of ( K key , V value ) { } }
return new ImmutableMapEntry < K , V > ( key , value ) ;
public class ErrorCollector { /** * Adds a WarningMessage to the message set . */ public void addWarning ( WarningMessage message ) { } }
if ( message . isRelevant ( configuration . getWarningLevel ( ) ) ) { if ( this . warnings == null ) { this . warnings = new LinkedList ( ) ; } this . warnings . add ( message ) ; }
public class NodeTraverser { /** * Version of { @ link # postOrder ( NodeVisitor , Collection ) } with one root . * @ param nodeVisitor the visitor of the nodes * @ param root the root node * @ return the accumulation result of this traversal */ public Object postOrder ( NodeVisitor nodeVisitor , Node root ) { } }
return postOrder ( nodeVisitor , Collections . singleton ( root ) ) ;
public class NonMaxLimiter { /** * Extracts local max and / or min from the intensity image . If more than the maximum features are found then * only the most intense ones will be returned * @ param intensity Feature image intensity */ public void process ( GrayF32 intensity ) { } }
originalMin . reset ( ) ; originalMax . reset ( ) ; nonmax . process ( intensity , null , null , originalMin , originalMax ) ; localExtreme . reset ( ) ; for ( int i = 0 ; i < originalMin . size ; i ++ ) { Point2D_I16 p = originalMin . get ( i ) ; float val = intensity . unsafe_get ( p . x , p . y ) ; localExtreme . grow ( ) . set ( - val , false , p ) ; } for ( int i = 0 ; i < originalMax . size ; i ++ ) { Point2D_I16 p = originalMax . get ( i ) ; float val = intensity . unsafe_get ( p . x , p . y ) ; localExtreme . grow ( ) . set ( val , true , p ) ; } if ( localExtreme . size > maxTotalFeatures ) { QuickSelect . select ( localExtreme . data , maxTotalFeatures , localExtreme . size ) ; localExtreme . size = maxTotalFeatures ; }
public class vpnvserver_stats { /** * Use this API to fetch the statistics of all vpnvserver _ stats resources that are configured on netscaler . */ public static vpnvserver_stats [ ] get ( nitro_service service ) throws Exception { } }
vpnvserver_stats obj = new vpnvserver_stats ( ) ; vpnvserver_stats [ ] response = ( vpnvserver_stats [ ] ) obj . stat_resources ( service ) ; return response ;
public class GobblinEncryptionProvider { /** * Return a StreamEncryptor for the given algorithm and with appropriate parameters . * @ param algorithm Algorithm to build * @ param parameters Parameters for algorithm * @ return A StreamEncoder for that algorithm * @ throws IllegalArgumentException If the given algorithm / parameter pair cannot be built */ public StreamCodec buildStreamCryptoProvider ( String algorithm , Map < String , Object > parameters ) { } }
switch ( algorithm ) { case EncryptionConfigParser . ENCRYPTION_TYPE_ANY : case "aes_rotating" : CredentialStore cs = CredentialStoreFactory . buildCredentialStore ( parameters ) ; if ( cs == null ) { throw new IllegalArgumentException ( "Failed to build credential store; can't instantiate AES" ) ; } return new RotatingAESCodec ( cs ) ; case GPGCodec . TAG : String password = EncryptionConfigParser . getKeystorePassword ( parameters ) ; String keystorePathStr = EncryptionConfigParser . getKeystorePath ( parameters ) ; String keyName = EncryptionConfigParser . getKeyName ( parameters ) ; String cipherName = EncryptionConfigParser . getCipher ( parameters ) ; // if not using a keystore then use password based encryption if ( keystorePathStr == null ) { Preconditions . checkNotNull ( password , "Must specify an en/decryption password for GPGCodec!" ) ; return new GPGCodec ( password , cipherName ) ; } // if a key name is not present then use a key id of 0 . A GPGCodec may be configured without a key name // when used only for decryption where the key name is retrieved from the encrypted file return new GPGCodec ( new Path ( keystorePathStr ) , password , keyName == null ? 0 : Long . parseUnsignedLong ( keyName , 16 ) , cipherName ) ; default : log . debug ( "Do not support encryption type {}" , algorithm ) ; return null ; }
public class KeyVaultClientBaseImpl { /** * Recovers the deleted certificate back to its current version under / certificates . * The RecoverDeletedCertificate operation performs the reversal of the Delete operation . The operation is applicable in vaults enabled for soft - delete , and must be issued during the retention interval ( available in the deleted certificate ' s attributes ) . This operation requires the certificates / recover permission . * @ param vaultBaseUrl The vault name , for example https : / / myvault . vault . azure . net . * @ param certificateName The name of the deleted certificate * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws KeyVaultErrorException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the CertificateBundle object if successful . */ public CertificateBundle recoverDeletedCertificate ( String vaultBaseUrl , String certificateName ) { } }
return recoverDeletedCertificateWithServiceResponseAsync ( vaultBaseUrl , certificateName ) . toBlocking ( ) . single ( ) . body ( ) ;
public class ParseLocalDateTime { /** * { @ inheritDoc } */ @ Override protected LocalDateTime parse ( final String string , final DateTimeFormatter formatter ) { } }
return LocalDateTime . parse ( string , formatter ) ;
public class IntegralImageOps { /** * Convolves a kernel around a single point in the integral image . * @ param integral Input integral image . Not modified . * @ param kernel Convolution kernel . * @ param x Pixel the convolution is performed at . * @ param y Pixel the convolution is performed at . * @ return Value of the convolution */ public static double convolveSparse ( GrayF64 integral , IntegralKernel kernel , int x , int y ) { } }
return ImplIntegralImageOps . convolveSparse ( integral , kernel , x , y ) ;
public class Stoichiometry { /** * Let a user - defined function handle the entire string representation of a stoichiometry . * @ param customStringGenerator * A function which accepts a list of subunit clusters and returns a string . */ public void setCustomStringGenerator ( Function < List < SubunitCluster > , String > customStringGenerator ) { } }
this . strategy = StringOverflowStrategy . CUSTOM ; this . customStringGenerator = customStringGenerator ;
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EClass getIfcOffsetCurve3D ( ) { } }
if ( ifcOffsetCurve3DEClass == null ) { ifcOffsetCurve3DEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 395 ) ; } return ifcOffsetCurve3DEClass ;
public class AgentOutput { /** * Send a SSH2 _ AGENTC _ SIGN _ REQUEST message to ssh - agent . * @ param rsaPublicKey The { @ link RSAPublicKey } that tells ssh - agent which private key to use to * sign the data . * @ param data The data in bytes to be signed . */ void signRequest ( final RSAPublicKey rsaPublicKey , final byte [ ] data ) throws IOException { } }
// TODO ( dxia ) Support more than just Rsa keys final String keyType = Rsa . RSA_LABEL ; final byte [ ] publicExponent = rsaPublicKey . getPublicExponent ( ) . toByteArray ( ) ; final byte [ ] modulus = rsaPublicKey . getModulus ( ) . toByteArray ( ) ; // Four bytes indicating length of string denoting key type // Four bytes indicating length of public exponent // Four bytes indicating length of modulus final int publicKeyLength = 4 + keyType . length ( ) + 4 + publicExponent . length + 4 + modulus . length ; // The message is made of : // Four bytes indicating length in bytes of rest of message // One byte indicating SSH2 _ AGENTC _ SIGN _ REQUEST // Four bytes denoting length of public key // Bytes representing the public key // Four bytes for length of data // Bytes representing data to be signed // Four bytes of flags final ByteBuffer buff = ByteBuffer . allocate ( INT_BYTES + 1 + INT_BYTES + publicKeyLength + INT_BYTES + data . length + 4 ) ; // 13 = // One byte indicating SSH2 _ AGENTC _ SIGN _ REQUEST // Four bytes denoting length of public key // Four bytes for length of data // Four bytes of flags buff . putInt ( publicKeyLength + data . length + 13 ) ; buff . put ( ( byte ) SSH2_AGENTC_SIGN_REQUEST ) ; // Add the public key buff . putInt ( publicKeyLength ) ; buff . putInt ( keyType . length ( ) ) ; for ( final byte b : keyType . getBytes ( ) ) { buff . put ( b ) ; } buff . putInt ( publicExponent . length ) ; buff . put ( publicExponent ) ; buff . putInt ( modulus . length ) ; buff . put ( modulus ) ; // Add the data to be signed buff . putInt ( data . length ) ; buff . put ( data ) ; // Add empty flags buff . put ( new byte [ ] { 0 , 0 , 0 , 0 } ) ; out . write ( buff . array ( ) ) ; out . flush ( ) ; log . debug ( "Sent SSH2_AGENTC_SIGN_REQUEST message to ssh-agent." ) ;
public class DateTimeValue { /** * Sets month and day of the given calendar making use of of the monthDay * representation defined in EXI format * @ param monthDay * monthDay * @ param cal * calendar */ protected static void setMonthDay ( int monthDay , Calendar cal ) { } }
// monthDay = month * 32 + day ; int month = monthDay / MONTH_MULTIPLICATOR ; cal . set ( Calendar . MONTH , month - 1 ) ; int day = monthDay - month * MONTH_MULTIPLICATOR ; cal . set ( Calendar . DAY_OF_MONTH , day ) ;
public class HTODDynacache { /** * clearDiskCache ( ) * Clear the memory HTOD Dependency Id table and Template table . Delete all disk cache and * then opens a FileManager instance , recreates the three HOD instances on disk . */ public int clearDiskCache ( ) { } }
final String methodName = "clearDiskCache()" ; int returnCode = NO_EXCEPTION ; Exception diskException = null ; try { this . invalidationBuffer . setDiskClearInProgress ( true ) ; if ( delayOffload ) { if ( ! this . disableDependencyId ) { auxDataDependencyTable . clear ( ) ; } if ( ! this . disableTemplatesSupport ) { auxTemplateDependencyTable . clear ( ) ; } } stop ( COMPLETE_CLEAR ) ; try { rwLock . writeLock ( ) . lock ( ) ; closeNoRWLock ( ) ; deleteDiskCacheFiles ( ) ; // delete disk cache files this . cod . diskCacheSizeInfo . reset ( ) ; init_files ( ) ; // restart things this . cod . enableCacheSizeInBytes = true ; this . cod . currentCacheSizeInBytes = this . minDiskCacheSizeInBytes ; if ( this . cod . diskCacheSizeInfo . diskCacheSizeInGBLimit > 0 ) { this . cache . setEnableDiskCacheSizeInBytesChecking ( true ) ; } if ( this . cod . evictionPolicy != CacheConfig . EVICTION_NONE ) { synchronized ( evictionTableMonitor ) { this . EvictionTable . clear ( ) ; } } } catch ( FileManagerException ex ) { this . diskCacheException = ex ; diskException = ex ; returnCode = DISK_EXCEPTION ; } catch ( HashtableOnDiskException ex ) { this . diskCacheException = ex ; diskException = ex ; returnCode = DISK_EXCEPTION ; } catch ( IOException ex ) { this . diskCacheException = ex ; diskException = ex ; returnCode = DISK_EXCEPTION ; } catch ( Exception ex ) { returnCode = OTHER_EXCEPTION ; diskException = ex ; } finally { if ( returnCode != NO_EXCEPTION ) { if ( tc . isDebugEnabled ( ) ) Tr . debug ( tc , methodName , "cacheName=" + this . cacheName + "\n Exception: " + ExceptionUtility . getStackTrace ( diskException ) ) ; } if ( returnCode == DISK_EXCEPTION || returnCode == OTHER_EXCEPTION ) { com . ibm . ws . ffdc . FFDCFilter . processException ( diskException , "com.ibm.ws.cache.HTODDynacache.clearDiskCache" , "525" , this ) ; } rwLock . writeLock ( ) . unlock ( ) ; } } finally { this . invalidationBuffer . setDiskClearInProgress ( false ) ; } return returnCode ;
public class IotHubResourcesInner { /** * Get the health for routing endpoints . * Get the health for routing endpoints . * ServiceResponse < PageImpl < EndpointHealthDataInner > > * @ param resourceGroupName the String value * ServiceResponse < PageImpl < EndpointHealthDataInner > > * @ param iotHubName the String value * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the PagedList & lt ; EndpointHealthDataInner & gt ; object wrapped in { @ link ServiceResponse } if successful . */ public Observable < ServiceResponse < Page < EndpointHealthDataInner > > > getEndpointHealthSinglePageAsync ( final String resourceGroupName , final String iotHubName ) { } }
if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( iotHubName == null ) { throw new IllegalArgumentException ( "Parameter iotHubName is required and cannot be null." ) ; } if ( this . client . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiVersion() is required and cannot be null." ) ; } return service . getEndpointHealth ( this . client . subscriptionId ( ) , resourceGroupName , iotHubName , this . client . apiVersion ( ) , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < Page < EndpointHealthDataInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < EndpointHealthDataInner > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < PageImpl < EndpointHealthDataInner > > result = getEndpointHealthDelegate ( response ) ; return Observable . just ( new ServiceResponse < Page < EndpointHealthDataInner > > ( result . body ( ) , result . response ( ) ) ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class ThreadLocalXPathVariableResolver { /** * / * ( non - Javadoc ) * @ see javax . xml . xpath . XPathVariableResolver # resolveVariable ( javax . xml . namespace . String ) */ @ Override public Object resolveVariable ( QName variableName ) { } }
final Map < String , ? > variables = this . localVariables . get ( ) ; if ( variables == null ) { return null ; } final String localPart = variableName . getLocalPart ( ) ; return variables . get ( localPart ) ;
public class RendererBuilder { /** * Return the item view type used by the adapter to implement recycle mechanism . * @ param content to be rendered . * @ return an integer that represents the renderer inside the adapter . */ int getItemViewType ( T content ) { } }
Class prototypeClass = getPrototypeClass ( content ) ; validatePrototypeClass ( prototypeClass ) ; return getItemViewType ( prototypeClass ) ;
public class ConfirmSignUpRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ConfirmSignUpRequest confirmSignUpRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( confirmSignUpRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( confirmSignUpRequest . getClientId ( ) , CLIENTID_BINDING ) ; protocolMarshaller . marshall ( confirmSignUpRequest . getSecretHash ( ) , SECRETHASH_BINDING ) ; protocolMarshaller . marshall ( confirmSignUpRequest . getUsername ( ) , USERNAME_BINDING ) ; protocolMarshaller . marshall ( confirmSignUpRequest . getConfirmationCode ( ) , CONFIRMATIONCODE_BINDING ) ; protocolMarshaller . marshall ( confirmSignUpRequest . getForceAliasCreation ( ) , FORCEALIASCREATION_BINDING ) ; protocolMarshaller . marshall ( confirmSignUpRequest . getAnalyticsMetadata ( ) , ANALYTICSMETADATA_BINDING ) ; protocolMarshaller . marshall ( confirmSignUpRequest . getUserContextData ( ) , USERCONTEXTDATA_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class SrvI18n { /** * < p > Default initializer that load message bundle by default locale . < / p > */ public final void initDefault ( ) { } }
try { this . messages = null ; this . messages = ResourceBundle . getBundle ( "Messages" ) ; } catch ( Exception e ) { this . logger . error ( null , SrvI18n . class , " when loading msgs for default locale " , e ) ; } if ( messages != null ) { this . messagesMap . put ( Locale . getDefault ( ) . getLanguage ( ) , this . messages ) ; this . logger . info ( null , SrvI18n . class , "Added messages for default locale: " + Locale . getDefault ( ) ) ; } else { // If there is no MessagesBundle [ current - locale ] . properties this . logger . error ( null , SrvI18n . class , "There is no messages for current locale: " + Locale . getDefault ( ) ) ; }
public class ScriptBuilderImpl { /** * Create a SarlSpace builder . * @ param name the name of the SarlSpace . * @ return the builder . */ public ISarlSpaceBuilder addSarlSpace ( String name ) { } }
ISarlSpaceBuilder builder = this . sarlSpaceProvider . get ( ) ; builder . eInit ( getScript ( ) , name , getTypeResolutionContext ( ) ) ; return builder ;
public class MesosScheduler { /** * Start the scheduler driver and wait it to get registered */ protected void startSchedulerDriver ( ) { } }
// start the driver non - blocking , // since we need to set heron state after the scheduler driver is started . // Heron will block the main thread eventually driver . start ( ) ; // Staging the Mesos Framework LOG . info ( "Waiting for Mesos Framework get registered" ) ; long timeout = MesosContext . getHeronMesosFrameworkStagingTimeoutMs ( config ) ; if ( ! mesosFramework . waitForRegistered ( timeout , TimeUnit . MILLISECONDS ) ) { throw new RuntimeException ( "Failed to register with Mesos Master in time" ) ; }
public class RegexGatewayPersonAttributeDao { /** * @ see org . jasig . services . persondir . IPersonAttributeDao # getPossibleUserAttributeNames ( ) */ @ JsonIgnore @ Override public Set < String > getPossibleUserAttributeNames ( final IPersonAttributeDaoFilter filter ) { } }
return targetPersonAttributeDao . getPossibleUserAttributeNames ( filter ) ;
public class PageBlobImpl { /** * Callback after the data has been written to the mmap , which allows for * reads ( but not necessarily fsynced . ) */ @ Override public void afterDataFlush ( PageServiceImpl tableService , int sequenceFlush ) { } }
super . afterDataFlush ( tableService , sequenceFlush ) ; tableService . compareAndSetBlobPage ( this , _stub ) ; ArrayList < TempBuffer > buffers = _buffers ; _buffers = null ; if ( buffers != null ) { for ( TempBuffer buffer : buffers ) { buffer . free ( ) ; } }
public class Filters { /** * Combines the filters in a filter chain . * The given filters are applied one by one in the order that hey appear in the method * argument list . * The string returns by the * { @ link Filter # beforeAppend ( String , StringBuilder ) } method of one filter is passed a * parameter to the next one if it is not { @ code null } . If it is { @ code null } , * then the { @ code beforeAppend } * won ' t be called any more and the latest non - null result is appended to the expect internal * buffer . * If the return value of the { @ link Filter # afterAppend ( StringBuilder ) } method is true , * then all the calls * of this method on the consequent filters will be suppressed . * @ param filters the filters , not { @ code null } * @ return the combined filter */ public static Filter chain ( final Filter ... filters ) { } }
return new FilterAdapter ( ) { @ Override protected String doBeforeAppend ( String string , StringBuilder buffer ) { String previousResult = null ; for ( Filter filter : filters ) { string = filter . beforeAppend ( string , buffer ) ; if ( string == null ) { return previousResult ; } previousResult = string ; } return string ; } @ Override protected boolean doAfterAppend ( StringBuilder buffer ) { for ( Filter filter : filters ) { if ( filter . afterAppend ( buffer ) ) { return true ; } } return false ; } } ;
public class Mappings { /** * ( mapping ) wrap a base mapping , and return an optional value instead of original value * @ param base base mapping * @ param constraints constraints * @ param < T > base type * @ return new created mapping */ public static < T > Mapping < Optional < T > > optional ( Mapping < T > base , Constraint ... constraints ) { } }
String mname = "optional " + base . meta ( ) . name ; return new FieldMapping < Optional < T > > ( base . options ( ) . _inputMode ( ) , ( ( name , data ) -> { logger . debug ( "optional - converting {}" , name ) ; if ( isEmptyInput ( name , data , base . options ( ) . _inputMode ( ) ) ) { return Optional . empty ( ) ; } else return Optional . of ( base . convert ( name , data ) ) ; } ) , ( ( name , data , messages , options ) -> { logger . debug ( "optional - validating {}" , name ) ; if ( isEmptyInput ( name , data , base . options ( ) . _inputMode ( ) ) ) { return Collections . emptyList ( ) ; } else { // merge the optional ' s constraints / label to base mapping then do validating return base . options ( o -> o . append_constraints ( options . _constraints ( ) ) ) . options ( o -> o . _label ( o . _label ( ) . orElse ( options . _label ( ) . orElse ( null ) ) ) ) . validate ( name , data , messages , options ) ; } } ) , new MappingMeta ( mname , Optional . class , base ) ) . options ( o -> o . _ignoreConstraints ( true ) ) . constraint ( constraints ) ;
public class RegionDiskClient { /** * Retrieves the list of persistent disks contained within the specified region . * < p > Sample code : * < pre > < code > * try ( RegionDiskClient regionDiskClient = RegionDiskClient . create ( ) ) { * ProjectRegionName region = ProjectRegionName . of ( " [ PROJECT ] " , " [ REGION ] " ) ; * for ( Disk element : regionDiskClient . listRegionDisks ( region . toString ( ) ) . iterateAll ( ) ) { * / / doThingsWith ( element ) ; * < / code > < / pre > * @ param region Name of the region for this request . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final ListRegionDisksPagedResponse listRegionDisks ( String region ) { } }
ListRegionDisksHttpRequest request = ListRegionDisksHttpRequest . newBuilder ( ) . setRegion ( region ) . build ( ) ; return listRegionDisks ( request ) ;
public class ServletsModuleBuilder { /** * the first level of the EDSL - - */ public ServletModule . ServletKeyBindingBuilder serve ( List < String > urlPatterns ) { } }
return new ServletKeyBindingBuilderImpl ( parsePatterns ( UriPatternType . SERVLET , urlPatterns ) ) ;
public class CassandraDeepJobConfig { /** * Validates if any of the mandatory fields have been configured or not . Throws an { @ link IllegalArgumentException } * if any of the mandatory properties have not been configured . */ void validate ( ) { } }
validateCassandraParams ( ) ; if ( pageSize <= 0 ) { throw new IllegalArgumentException ( "pageSize cannot be zero" ) ; } validateConsistencyLevels ( ) ; TableMetadata tableMetadata = fetchTableMetadata ( ) ; validateTableMetadata ( tableMetadata ) ; validateAdditionalFilters ( tableMetadata ) ; if ( ! ( this . isBisectModeSet && this . isSplitModeSet ) ) { if ( this . isBisectModeSet ) { if ( this . bisectFactor != Constants . DEFAULT_BISECT_FACTOR && ! this . checkIsPowerOfTwo ( this . bisectFactor ) ) { throw new IllegalArgumentException ( "Bisect factor should be greater than zero and a power of 2" ) ; } } else if ( this . isSplitModeSet ) { if ( this . splitSize <= 0 ) { throw new IllegalArgumentException ( "The split size must be a positve integer" ) ; } } else { throw new IllegalArgumentException ( "One split mode must be defined, please choose between Split or Bisect" ) ; } } else { throw new IllegalArgumentException ( "Only one split mode can be defined, please choose between Split or Bisect" ) ; }
public class AbstractParamContainerPanel { /** * Gets text field that shows the name of the selected panel . * @ return the text field that shows the name of the selected panel */ private ZapTextField getTxtHeadline ( ) { } }
if ( txtHeadline == null ) { txtHeadline = new ZapTextField ( ) ; txtHeadline . setBorder ( javax . swing . BorderFactory . createEtchedBorder ( javax . swing . border . EtchedBorder . RAISED ) ) ; txtHeadline . setEditable ( false ) ; txtHeadline . setEnabled ( false ) ; txtHeadline . setBackground ( java . awt . Color . white ) ; txtHeadline . setFont ( FontUtils . getFont ( Font . BOLD ) ) ; } return txtHeadline ;
public class DescribeTransitGatewaysRequest { /** * The IDs of the transit gateways . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setTransitGatewayIds ( java . util . Collection ) } or { @ link # withTransitGatewayIds ( java . util . Collection ) } if * you want to override the existing values . * @ param transitGatewayIds * The IDs of the transit gateways . * @ return Returns a reference to this object so that method calls can be chained together . */ public DescribeTransitGatewaysRequest withTransitGatewayIds ( String ... transitGatewayIds ) { } }
if ( this . transitGatewayIds == null ) { setTransitGatewayIds ( new com . amazonaws . internal . SdkInternalList < String > ( transitGatewayIds . length ) ) ; } for ( String ele : transitGatewayIds ) { this . transitGatewayIds . add ( ele ) ; } return this ;
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcFlowSegmentType ( ) { } }
if ( ifcFlowSegmentTypeEClass == null ) { ifcFlowSegmentTypeEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 249 ) ; } return ifcFlowSegmentTypeEClass ;
public class DatabaseTableConfigLoader { /** * Read a field into our table configuration for field = value line . */ private static < T > void readTableField ( DatabaseTableConfig < T > config , String field , String value ) { } }
if ( field . equals ( FIELD_NAME_DATA_CLASS ) ) { try { @ SuppressWarnings ( "unchecked" ) Class < T > clazz = ( Class < T > ) Class . forName ( value ) ; config . setDataClass ( clazz ) ; } catch ( ClassNotFoundException e ) { throw new IllegalArgumentException ( "Unknown class specified for dataClass: " + value ) ; } } else if ( field . equals ( FIELD_NAME_TABLE_NAME ) ) { config . setTableName ( value ) ; }
public class MailAddress { /** * Returns the decoded string , in case it contains non us - ascii characters . * Returns the same string if it doesn ' t or the passed value in case * of an UnsupportedEncodingException . * @ param str string to be decoded * @ return the decoded string , in case it contains non us - ascii characters ; * or the same string if it doesn ' t or the passed value in case * of an UnsupportedEncodingException . */ private String decodeStr ( String str ) { } }
try { return MimeUtility . decodeText ( str ) ; } catch ( UnsupportedEncodingException e ) { return str ; }
public class PathBuilder { /** * Create a new Set typed path * @ param < A > * @ param property property name * @ param type property type * @ return property path */ public < A > SetPath < A , PathBuilder < A > > getSet ( String property , Class < A > type ) { } }
return this . < A , PathBuilder < A > > getSet ( property , type , PathBuilder . class ) ;
public class DateTimePerformance { private void checkJodaGetYear ( ) { } }
int COUNT = COUNT_VERY_FAST ; DateTime dt = new DateTime ( GJChronology . getInstance ( ) ) ; for ( int i = 0 ; i < AVERAGE ; i ++ ) { start ( "Joda" , "getYear" ) ; for ( int j = 0 ; j < COUNT ; j ++ ) { int val = dt . getYear ( ) ; if ( val == 0 ) { System . out . println ( "Anti optimise" ) ; } } end ( COUNT ) ; }
public class CookieUtils { /** * 清除cookie * @ param response * 返回response * @ param cookieName * cookie名 */ public static void resetCookieValue ( HttpServletResponse response , String cookieName ) { } }
Cookie cookie = new Cookie ( cookieName , null ) ; cookie . setMaxAge ( 0 ) ; response . addCookie ( cookie ) ;
public class VirtualHostImpl { /** * This method is called by the VirtualHostMap when an endpoint that applies to * this virtual host ( matches one of the configured aliases ) has stopped . * @ param endpoint The endpoint that was stopped * @ param resolvedHostName A suitable hostname for use in messages * @ param port The port that has stopped listening * @ param isHttps True if this is an https port * @ see # listenerStarted ( HttpEndpointImpl , String , int , boolean ) */ synchronized void listenerStopped ( HttpEndpointImpl endpoint , VirtualHostConfig targetConfig , String resolvedHostName , int port , boolean isHttps ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) { Tr . event ( this , tc , "listener stopped for " + this + " on port " + port , endpoint ) ; } if ( ! activated ) return ; EndpointState oldState = myEndpoints . get ( endpoint ) ; if ( oldState == null ) { // we must have been here before . . return ; } int newHttpPort = isHttps ? oldState . httpPort : EndpointState . notStarted . httpPort ; int newHttpsPort = isHttps ? EndpointState . notStarted . httpsPort : oldState . httpsPort ; // Check if we actually removed the port boolean removedPort = ( oldState . httpPort > 0 && newHttpPort == EndpointState . notStarted . httpPort ) || ( oldState . httpsPort > 0 && newHttpsPort == EndpointState . notStarted . httpsPort ) ; if ( newHttpPort == EndpointState . notStarted . httpPort && newHttpsPort == EndpointState . notStarted . httpsPort ) { // remove the endpoint entirely ( see test above . . ) myEndpoints . remove ( endpoint ) ; } else { EndpointState newState = new EndpointState ( resolvedHostName , newHttpPort , newHttpsPort ) ; myEndpoints . put ( endpoint , newState ) ; } int numPorts ; if ( removedPort ) numPorts = listeningPorts . decrementAndGet ( ) ; else numPorts = listeningPorts . get ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( this , tc , "listener stopped: " + listeningPorts . get ( ) , oldState , myEndpoints . get ( endpoint ) ) ; } // notify all context roots ( test for URL change . . ) // the notification methods below use a ) how many ports are left in combination with // b ) what host : port they used for the initial notification to determine whether or not // to issue additional messages ( context removed vs . context moved ) for ( HttpContainerContext ctx : httpContainers ) { ctx . notifyExistingContexts ( false , resolvedHostName , port , isHttps , numPorts ) ; } osgiService . updateRegistration ( false , targetConfig , true ) ;
public class MakeValidOp { /** * Return a set of segments from a linestring * @ param lines * @ return */ private Set < LineString > getSegments ( Collection < LineString > lines ) { } }
Set < LineString > set = new HashSet < > ( ) ; for ( LineString line : lines ) { Coordinate [ ] cc = line . getCoordinates ( ) ; for ( int i = 1 ; i < cc . length ; i ++ ) { if ( ! cc [ i - 1 ] . equals ( cc [ i ] ) ) { LineString segment = line . getFactory ( ) . createLineString ( new Coordinate [ ] { new Coordinate ( cc [ i - 1 ] ) , new Coordinate ( cc [ i ] ) } ) ; set . add ( segment ) ; } } } return set ;