signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcConstructionMaterialResource ( ) { } }
if ( ifcConstructionMaterialResourceEClass == null ) { ifcConstructionMaterialResourceEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 112 ) ; } return ifcConstructionMaterialResourceEClass ;
public class Router { /** * Sets the root Controller . If any { @ link Controller } s are currently in the backstack , they will be removed . * @ param transaction The transaction detailing what should be pushed , including the { @ link Controller } , * and its push and pop { @ link ControllerChangeHandler } , and its tag . */ @ UiThread public void setRoot ( @ NonNull RouterTransaction transaction ) { } }
ThreadUtils . ensureMainThread ( ) ; List < RouterTransaction > transactions = Collections . singletonList ( transaction ) ; setBackstack ( transactions , transaction . pushChangeHandler ( ) ) ;
public class Entry { /** * Returns the children of this { @ code Entry } . * @ return a { @ code SortedSet } of { @ code Entry } objects * @ throws java . io . IOException if an I / O exception occurs */ public SortedSet < Entry > getChildEntries ( ) throws IOException { } }
if ( children == null ) { if ( isFile ( ) || rootNodeDId == - 1 ) { children = NO_CHILDREN ; } else { // Start at root node in R / B tree , and read to the left and right , // re - build tree , according to the docs children = Collections . unmodifiableSortedSet ( document . getEntries ( rootNodeDId , this ) ) ; } } return children ;
public class ObjectWrapper { /** * Returns the value of the specified mapped property from the wrapped object . * @ param property the mapped property whose value is to be extracted , cannot be { @ code null } * @ param key the key of the property value to be extracted , can be { @ code null } * @ return the mapped property value * @ throws ReflectionException if a reflection error occurs * @ throws IllegalArgumentException if the property parameter is { @ code null } * @ throws IllegalArgumentException if the mapped object in the wrapped object is not a { @ link Map } type * @ throws NullPointerException if the mapped object in the wrapped object is { @ code null } */ public Object getMappedValue ( Property property , Object key ) { } }
return getMappedValue ( object , property , key ) ;
public class BookKeeperLogFactory { /** * region AutoCloseable Implementation */ @ Override public void close ( ) { } }
val bk = this . bookKeeper . getAndSet ( null ) ; if ( bk != null ) { try { bk . close ( ) ; } catch ( Exception ex ) { log . error ( "Unable to close BookKeeper client." , ex ) ; } }
public class SynchronousRequest { /** * For more info on event detail API go < a href = " https : / / wiki . guildwars2 . com / wiki / API : 1 / event _ details " > here < / a > < br / > * @ param id event id * @ return event details * @ throws GuildWars2Exception see { @ link ErrorCode } for detail * @ see EventDetail event detail */ public EventDetail getEventDetailedInfo ( String id ) throws GuildWars2Exception { } }
isParamValid ( new ParamChecker ( ParamType . ID , id ) ) ; try { Response < EventDetail > response = gw2API . getEventDetailedInfo ( id , GuildWars2 . lang . getValue ( ) ) . execute ( ) ; if ( ! response . isSuccessful ( ) ) throwError ( response . code ( ) , response . errorBody ( ) ) ; return response . body ( ) ; } catch ( IOException e ) { throw new GuildWars2Exception ( ErrorCode . Network , "Network Error: " + e . getMessage ( ) ) ; }
public class ResourceServersFilter { /** * Filter by page * @ param pageNumber the page number to retrieve . * @ param amountPerPage the amount of items per page to retrieve . * @ return this filter instance */ public ResourceServersFilter withPage ( int pageNumber , int amountPerPage ) { } }
parameters . put ( "page" , pageNumber ) ; parameters . put ( "per_page" , amountPerPage ) ; return this ;
public class TaskLogsMonitor { /** * Truncate the log file of this task - attempt so that only the last retainSize * many bytes of each log file is retained and the log file is reduced in size * saving disk space . * @ param taskID Task whose logs need to be truncated * @ param oldLogFileDetail contains the original log details for the attempt * @ param taskRetainSize retain - size * @ param tmpFileWriter New log file to write to . Already opened in append * mode . * @ param logFileReader Original log file to read from . * @ return * @ throws IOException */ private LogFileDetail truncateALogFileOfAnAttempt ( final TaskAttemptID taskID , final LogFileDetail oldLogFileDetail , final long taskRetainSize , final FileWriter tmpFileWriter , final FileReader logFileReader ) throws IOException { } }
LogFileDetail newLogFileDetail = new LogFileDetail ( ) ; // / / / / / Truncate log file / / / / / // New location of log file is same as the old newLogFileDetail . location = oldLogFileDetail . location ; if ( taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION && oldLogFileDetail . length > taskRetainSize ) { LOG . info ( "Truncating logs for " + taskID + " from " + oldLogFileDetail . length + "bytes to " + taskRetainSize + "bytes." ) ; newLogFileDetail . length = taskRetainSize ; } else { LOG . info ( "No truncation needed for " + taskID + " length is " + oldLogFileDetail . length + " retain size " + taskRetainSize + "bytes." ) ; newLogFileDetail . length = oldLogFileDetail . length ; } long charsSkipped = logFileReader . skip ( oldLogFileDetail . length - newLogFileDetail . length ) ; if ( charsSkipped != oldLogFileDetail . length - newLogFileDetail . length ) { throw new IOException ( "Erroneously skipped " + charsSkipped + " instead of the expected " + ( oldLogFileDetail . length - newLogFileDetail . length ) ) ; } long alreadyRead = 0 ; while ( alreadyRead < newLogFileDetail . length ) { char tmpBuf [ ] ; // Temporary buffer to read logs if ( newLogFileDetail . length - alreadyRead >= DEFAULT_BUFFER_SIZE ) { tmpBuf = new char [ DEFAULT_BUFFER_SIZE ] ; } else { tmpBuf = new char [ ( int ) ( newLogFileDetail . length - alreadyRead ) ] ; } int bytesRead = logFileReader . read ( tmpBuf ) ; if ( bytesRead < 0 ) { break ; } else { alreadyRead += bytesRead ; } tmpFileWriter . write ( tmpBuf ) ; } // / / / / / End of truncating log file / / / / / return newLogFileDetail ;
public class DbDatum { public void insert ( final boolean argin ) { } }
values = new String [ 1 ] ; values [ 0 ] = String . valueOf ( argin ) ; is_empty_val = false ;
public class MediaUtils { /** * Revoke URI permissions to a specific URI that had been previously granted */ private static void revokePermissions ( Context ctx , Uri uri ) { } }
ctx . revokeUriPermission ( uri , Intent . FLAG_GRANT_WRITE_URI_PERMISSION | Intent . FLAG_GRANT_READ_URI_PERMISSION ) ;
public class CalendarPath { /** * Method to construct the greater than expression for date * @ param value the date value * @ return Expression */ public Expression < java . util . Date > gt ( java . util . Date value ) { } }
SimpleDateFormat formatter = getDateTimeFormatter ( ) ; String valueString = "'" + formatter . format ( value ) . concat ( "Z" ) + "'" ; return new Expression < java . util . Date > ( this , Operation . gt , valueString ) ;
public class ServerSideEncryptionHeaderHandler { /** * / * ( non - Javadoc ) * @ see com . amazonaws . services . s3 . internal . HeaderHandler # handle ( java . lang . Object , com . amazonaws . http . HttpResponse ) */ @ Override public void handle ( T result , HttpResponse response ) { } }
result . setSSEAlgorithm ( response . getHeaders ( ) . get ( Headers . SERVER_SIDE_ENCRYPTION ) ) ; result . setSSECustomerAlgorithm ( response . getHeaders ( ) . get ( Headers . SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM ) ) ; result . setSSECustomerKeyMd5 ( response . getHeaders ( ) . get ( Headers . SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5 ) ) ;
public class RenderingIntentImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public Object eGet ( int featureID , boolean resolve , boolean coreType ) { } }
switch ( featureID ) { case AfplibPackage . RENDERING_INTENT__RESERVED : return getReserved ( ) ; case AfplibPackage . RENDERING_INTENT__IOCARI : return getIOCARI ( ) ; case AfplibPackage . RENDERING_INTENT__OCRI : return getOCRI ( ) ; case AfplibPackage . RENDERING_INTENT__PTOCRI : return getPTOCRI ( ) ; case AfplibPackage . RENDERING_INTENT__GOCARI : return getGOCARI ( ) ; case AfplibPackage . RENDERING_INTENT__RESERVED2 : return getReserved2 ( ) ; } return super . eGet ( featureID , resolve , coreType ) ;
public class IOUtil { /** * Return the count of skipped bytes . * @ param input * @ param toSkip * @ return */ public static long skip ( final InputStream input , final long toSkip ) throws UncheckedIOException { } }
if ( toSkip < 0 ) { throw new IllegalArgumentException ( "Skip count must be non-negative, actual: " + toSkip ) ; } else if ( toSkip == 0 ) { return 0 ; } final byte [ ] buf = Objectory . createByteArrayBuffer ( ) ; long remain = toSkip ; try { while ( remain > 0 ) { long n = read ( input , buf , 0 , ( int ) Math . min ( remain , buf . length ) ) ; if ( n < 0 ) { // EOF break ; } remain -= n ; } return toSkip - remain ; } catch ( IOException e ) { throw new UncheckedIOException ( e ) ; } finally { Objectory . recycle ( buf ) ; }
public class CmsSiteManagerImpl { /** * Sets the default URI , this is only allowed during configuration . < p > * If this method is called after the configuration is finished , * a < code > RuntimeException < / code > is thrown . < p > * @ param defaultUri the defaultUri to set */ public void setDefaultUri ( String defaultUri ) { } }
if ( m_frozen ) { throw new CmsRuntimeException ( Messages . get ( ) . container ( Messages . ERR_CONFIG_FROZEN_0 ) ) ; } m_defaultUri = defaultUri ;
public class UnenclosedEsriJsonRecordReader { /** * Given an arbitrary byte offset into a unenclosed JSON document , * find the start of the next record in the document . Discard trailing * bytes from the previous record if we happened to seek to the middle * of it * Record boundary defined as : \ { \ s * " ( attributes | geometry ) " \ s * : \ s * \ { * @ throws IOException */ protected boolean moveToRecordStart ( ) throws IOException { } }
int next = 0 ; long resetPosition = readerPosition ; // The case of split point exactly at whitespace between records , is // handled by forcing it to the split following , in the interest of // better balancing the splits , by consuming the whitespace in next ( ) . // The alternative of forcing it to the split preceding , could be // done like what is commented here . // while ( next ! = ' { ' | | skipDup > 0 ) { / / skipDup > 0 = > record already consumed // next = getChar ( ) ; // if ( next < 0 ) return false ; / / end of stream , no good // if ( next = = ' } ' ) skipDup = - 1 ; / / Definitely not // else if ( skipDup = = 0 ) skipDup = 1 ; / / no info - Maybe so until refuted by ' } ' while ( true ) { // scan until we reach a { while ( next != '{' ) { next = getChar ( ) ; // end of stream , no good if ( next < 0 ) { return false ; } } resetPosition = readerPosition ; inputReader . mark ( 100 ) ; // ok last char was ' { ' , skip till we get to a ' " ' next = getNonWhite ( ) ; if ( next < 0 ) { // end of stream , no good return false ; } if ( next != '"' ) { continue ; } boolean inEscape = false ; String fieldName = "" ; // Next should be a field name of attributes or geometry . // If we see another opening brace , the previous one must have been inside // a quoted string literal ( after which the double quote we found , was a // closing quote mark rather than the opening quote mark ) - start over . while ( next != '{' ) { next = getChar ( ) ; if ( next < 0 ) { // end of stream , no good return false ; } inEscape = ( ! inEscape && next == '\\' ) ; if ( ! inEscape && next == '"' ) { break ; } fieldName += ( char ) next ; } if ( ! ( fieldName . equals ( "attributes" ) || fieldName . equals ( "geometry" ) ) ) { // not the field name we were expecting , start over continue ; } // ok last char was ' " ' , skip till we get to a ' : ' next = getNonWhite ( ) ; if ( next < 0 ) { // end of stream , no good return false ; } if ( next != ':' ) { continue ; } // and finally , if the next char is a { , we know for sure that this is a valid record next = getNonWhite ( ) ; if ( next < 0 ) { // end of stream , no good return false ; } if ( next == '{' ) { // at this point we can be sure that we have found the record boundary break ; } } inputReader . reset ( ) ; readerPosition = resetPosition ; firstBraceConsumed = true ; return true ;
public class ModelsImpl { /** * Adds a list of prebuilt entity extractors to the application . * @ param appId The application ID . * @ param versionId The version ID . * @ param prebuiltExtractorNames An array of prebuilt entity extractor names . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the List & lt ; PrebuiltEntityExtractor & gt ; object */ public Observable < ServiceResponse < List < PrebuiltEntityExtractor > > > addPrebuiltWithServiceResponseAsync ( UUID appId , String versionId , List < String > prebuiltExtractorNames ) { } }
if ( this . client . endpoint ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.endpoint() is required and cannot be null." ) ; } if ( appId == null ) { throw new IllegalArgumentException ( "Parameter appId is required and cannot be null." ) ; } if ( versionId == null ) { throw new IllegalArgumentException ( "Parameter versionId is required and cannot be null." ) ; } if ( prebuiltExtractorNames == null ) { throw new IllegalArgumentException ( "Parameter prebuiltExtractorNames is required and cannot be null." ) ; } Validator . validate ( prebuiltExtractorNames ) ; String parameterizedHost = Joiner . on ( ", " ) . join ( "{Endpoint}" , this . client . endpoint ( ) ) ; return service . addPrebuilt ( appId , versionId , prebuiltExtractorNames , this . client . acceptLanguage ( ) , parameterizedHost , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < List < PrebuiltEntityExtractor > > > > ( ) { @ Override public Observable < ServiceResponse < List < PrebuiltEntityExtractor > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < List < PrebuiltEntityExtractor > > clientResponse = addPrebuiltDelegate ( response ) ; return Observable . just ( clientResponse ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class JinjavaInterpreter { /** * Parse the given string into a root Node , and then renders it processing extend parents . * @ param template * string to parse * @ return rendered result */ public String render ( String template ) { } }
ENGINE_LOG . debug ( template ) ; return render ( parse ( template ) , true ) ;
public class FutureStreamUtils { /** * Perform a forEach operation over the Stream without closing it , capturing any elements and errors in the supplied consumers , but only consuming * the specified number of elements from the Stream , at this time . More elements can be consumed later , by called request on the returned Subscription * < pre > * { @ code * Subscription next = Streams . forEach ( Stream . of ( ( ) - > 1 , ( ) - > 2 , ( ) - > throw new RuntimeException ( ) , ( ) - > 4) * . map ( Supplier : : getValue ) , System . out : : println , e - > e . printStackTrace ( ) ) ; * System . out . println ( " First batch processed ! " ) ; * next . request ( 2 ) ; * System . out . println ( " Second batch processed ! " ) ; * / / prints * First batch processed ! * RuntimeException Stack Trace on System . err * Second batch processed ! * < / pre > * @ param stream - the Stream to consume data from * @ param x To consume from the Stream at this time * @ param consumerElement To accept incoming elements from the Stream * @ param consumerError To accept incoming processing errors from the Stream * @ return Subscription so that further processing can be continued or cancelled . */ public static < T , X extends Throwable > Tuple3 < CompletableFuture < Subscription > , Runnable , CompletableFuture < Boolean > > forEachXWithError ( final Stream < T > stream , final long x , final Consumer < ? super T > consumerElement , final Consumer < ? super Throwable > consumerError ) { } }
return forEachXEvents ( stream , x , consumerElement , consumerError , ( ) -> { } ) ;
public class FLUSH { /** * - - - - - end JMX attributes and operations - - - - - */ public Object down ( Event evt ) { } }
if ( ! bypass ) { switch ( evt . getType ( ) ) { case Event . CONNECT : case Event . CONNECT_USE_FLUSH : return handleConnect ( evt , true ) ; case Event . CONNECT_WITH_STATE_TRANSFER : case Event . CONNECT_WITH_STATE_TRANSFER_USE_FLUSH : return handleConnect ( evt , false ) ; case Event . SUSPEND : startFlush ( evt ) ; return null ; // only for testing , see FLUSH # testFlushWithCrashedFlushCoordinator case Event . SUSPEND_BUT_FAIL : if ( ! flushInProgress . get ( ) ) { flush_promise . reset ( ) ; ArrayList < Address > flushParticipants = null ; synchronized ( sharedLock ) { flushParticipants = new ArrayList < > ( currentView . getMembers ( ) ) ; } onSuspend ( flushParticipants ) ; } break ; case Event . RESUME : onResume ( evt ) ; return null ; case Event . SET_LOCAL_ADDRESS : localAddress = evt . getArg ( ) ; break ; } } return down_prot . down ( evt ) ;
public class BaseReader { /** * This method should be called along with ( or instead of ) normal * close . After calling this method , no further reads should be tried . * Method will try to recycle read buffers ( if any ) . */ public final void freeBuffers ( ) { } }
/* 11 - Apr - 2005 , TSa : Ok , we can release the buffer now , to be * recycled by the next stream reader instantiated by this * thread ( if any ) . */ if ( mRecycleBuffer ) { byte [ ] buf = mByteBuffer ; if ( buf != null ) { mByteBuffer = null ; if ( mConfig != null ) { mConfig . freeFullBBuffer ( buf ) ; } } }
public class ArrayUtil { /** * Returns true if the first count elements of arra and arrb are identical * sets of integers ( not necessarily in the same order ) . */ public static boolean haveEqualSets ( int [ ] arra , int [ ] arrb , int count ) { } }
if ( ArrayUtil . haveEqualArrays ( arra , arrb , count ) ) { return true ; } if ( count > arra . length || count > arrb . length ) { return false ; } if ( count == 1 ) { return arra [ 0 ] == arrb [ 0 ] ; } int [ ] tempa = ( int [ ] ) resizeArray ( arra , count ) ; int [ ] tempb = ( int [ ] ) resizeArray ( arrb , count ) ; sortArray ( tempa ) ; sortArray ( tempb ) ; for ( int j = 0 ; j < count ; j ++ ) { if ( tempa [ j ] != tempb [ j ] ) { return false ; } } return true ;
public class ExpressRouteCircuitsInner { /** * Gets all stats from an express route circuit in a resource group . * @ param resourceGroupName The name of the resource group . * @ param circuitName The name of the express route circuit . * @ param peeringName The name of the peering . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the ExpressRouteCircuitStatsInner object */ public Observable < ExpressRouteCircuitStatsInner > getPeeringStatsAsync ( String resourceGroupName , String circuitName , String peeringName ) { } }
return getPeeringStatsWithServiceResponseAsync ( resourceGroupName , circuitName , peeringName ) . map ( new Func1 < ServiceResponse < ExpressRouteCircuitStatsInner > , ExpressRouteCircuitStatsInner > ( ) { @ Override public ExpressRouteCircuitStatsInner call ( ServiceResponse < ExpressRouteCircuitStatsInner > response ) { return response . body ( ) ; } } ) ;
public class DocBookBuilder { /** * Gets the translated topics from the REST Interface and also creates any dummy translations for topics that have yet to be * translated . * @ param buildData Information and data structures for the build . * @ param translatedTopics The translated topic collection to add translated topics to . */ private void populateTranslatedTopicDatabase ( final BuildData buildData , final Map < String , BaseTopicWrapper < ? > > translatedTopics ) throws BuildProcessingException { } }
final List < ITopicNode > topicNodes = buildData . getContentSpec ( ) . getAllTopicNodes ( ) ; final int showPercent = 10 ; final float total = topicNodes . size ( ) ; float current = 0 ; int lastPercent = 0 ; // Loop over each Topic Node in the content spec and get it ' s translated topic for ( final ITopicNode topicNode : topicNodes ) { getTranslatedTopicForTopicNode ( buildData , topicNode , translatedTopics ) ; ++ current ; final int percent = Math . round ( current / total * 100 ) ; if ( percent - lastPercent >= showPercent ) { lastPercent = percent ; log . info ( "\tPopulate " + buildData . getBuildLocale ( ) + " Database Pass " + percent + "% Done" ) ; } }
public class IPv6AddressPool { /** * Allocate the given subnet from the pool . * @ param toAllocate subnet to allocate from the pool * @ return resulting pool */ public IPv6AddressPool allocate ( IPv6Network toAllocate ) { } }
if ( ! contains ( toAllocate ) ) throw new IllegalArgumentException ( "can not allocate network which is not contained in the pool to allocate from [" + toAllocate + "]" ) ; if ( ! this . allocationSubnetSize . equals ( toAllocate . getNetmask ( ) ) ) throw new IllegalArgumentException ( "can not allocate network with prefix length /" + toAllocate . getNetmask ( ) . asPrefixLength ( ) + " from a pool configured to hand out subnets with prefix length /" + allocationSubnetSize ) ; // go find the range that contains the requested subnet final IPv6AddressRange rangeToAllocateFrom = findFreeRangeContaining ( toAllocate ) ; if ( rangeToAllocateFrom != null ) { // found a range in which this subnet is free , allocate it return doAllocate ( toAllocate , rangeToAllocateFrom ) ; } else { // requested subnet not free return null ; }
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link DMSAngleType } { @ code > } * @ param value * Java instance representing xml element ' s value . * @ return * the new instance of { @ link JAXBElement } { @ code < } { @ link DMSAngleType } { @ code > } */ @ XmlElementDecl ( namespace = "http://www.opengis.net/gml" , name = "dmsAngle" ) public JAXBElement < DMSAngleType > createDmsAngle ( DMSAngleType value ) { } }
return new JAXBElement < DMSAngleType > ( _DmsAngle_QNAME , DMSAngleType . class , null , value ) ;
public class TransactionLocalMap { /** * Double the capacity of the table . */ private void resize ( ) { } }
Entry [ ] oldTab = table ; int oldLen = oldTab . length ; int newLen = oldLen * 2 ; Entry [ ] newTab = new Entry [ newLen ] ; int count = 0 ; for ( int j = 0 ; j < oldLen ; ++ j ) { Entry e = oldTab [ j ] ; if ( e != null ) { int h = e . key . hashCode & ( newLen - 1 ) ; while ( newTab [ h ] != null ) h = nextIndex ( h , newLen ) ; newTab [ h ] = e ; count ++ ; } } setThreshold ( newLen ) ; size = count ; table = newTab ;
public class ModelsImpl { /** * Gets information about the hierarchical entity model . * @ param appId The application ID . * @ param versionId The version ID . * @ param hEntityId The hierarchical entity extractor ID . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the HierarchicalEntityExtractor object */ public Observable < HierarchicalEntityExtractor > getHierarchicalEntityAsync ( UUID appId , String versionId , UUID hEntityId ) { } }
return getHierarchicalEntityWithServiceResponseAsync ( appId , versionId , hEntityId ) . map ( new Func1 < ServiceResponse < HierarchicalEntityExtractor > , HierarchicalEntityExtractor > ( ) { @ Override public HierarchicalEntityExtractor call ( ServiceResponse < HierarchicalEntityExtractor > response ) { return response . body ( ) ; } } ) ;
public class Channel { /** * query this channel for chain information . * The request is sent to a random peer in the channel * < STRONG > This method may not be thread safe if client context is changed ! < / STRONG > * @ return a { @ link BlockchainInfo } object containing the chain info requested * @ throws InvalidArgumentException * @ throws ProposalException */ public BlockchainInfo queryBlockchainInfo ( ) throws ProposalException , InvalidArgumentException { } }
return queryBlockchainInfo ( getShuffledPeers ( EnumSet . of ( PeerRole . LEDGER_QUERY ) ) , client . getUserContext ( ) ) ;
public class FileUtil { /** * Generate MD5 hash of topic and partitions . And extract first 4 characters of the MD5 hash . * @ param topic topic name * @ param partitions partitions * @ return md5 hash */ public static String getMd5Hash ( String topic , String [ ] partitions ) { } }
ArrayList < String > elements = new ArrayList < String > ( ) ; elements . add ( topic ) ; for ( String partition : partitions ) { elements . add ( partition ) ; } String pathPrefix = StringUtils . join ( elements , "/" ) ; try { final MessageDigest messageDigest = MessageDigest . getInstance ( "MD5" ) ; byte [ ] md5Bytes = messageDigest . digest ( pathPrefix . getBytes ( "UTF-8" ) ) ; return getHexEncode ( md5Bytes ) . substring ( 0 , 4 ) ; } catch ( NoSuchAlgorithmException e ) { LOG . error ( e . getMessage ( ) ) ; } catch ( UnsupportedEncodingException e ) { LOG . error ( e . getMessage ( ) ) ; } return "" ;
public class BaseReader { /** * Method for reading as many bytes from the underlying stream as possible * ( that fit in the buffer considering offset ) , to the specified offset . * @ return Number of bytes read , if any ; - 1 to indicate none available * ( that is , end of input ) */ protected final int readBytesAt ( int offset ) throws IOException { } }
// shouldn ' t modify mBytePtr , assumed to be ' offset ' if ( mIn != null ) { int count = mIn . read ( mByteBuffer , offset , mByteBuffer . length - offset ) ; if ( count > 0 ) { mByteBufferEnd += count ; } return count ; } return - 1 ;
public class TMasterSink { /** * If so , restart the TMasterClientService with the new TMasterLocation */ private void startTMasterChecker ( ) { } }
final int checkIntervalSec = TypeUtils . getInteger ( sinkConfig . get ( KEY_TMASTER_LOCATION_CHECK_INTERVAL_SEC ) ) ; Runnable runnable = new Runnable ( ) { @ Override public void run ( ) { TopologyMaster . TMasterLocation location = ( TopologyMaster . TMasterLocation ) SingletonRegistry . INSTANCE . getSingleton ( TMASTER_LOCATION_BEAN_NAME ) ; if ( location != null ) { if ( currentTMasterLocation == null || ! location . equals ( currentTMasterLocation ) ) { LOG . info ( "Update current TMasterLocation to: " + location ) ; currentTMasterLocation = location ; tMasterClientService . updateTMasterLocation ( currentTMasterLocation ) ; tMasterClientService . startNewMasterClient ( ) ; // Update Metrics sinkContext . exportCountMetric ( TMASTER_LOCATION_UPDATE_COUNT , 1 ) ; } } // Schedule itself in future tMasterLocationStarter . schedule ( this , checkIntervalSec , TimeUnit . SECONDS ) ; } } ; // First Entry tMasterLocationStarter . schedule ( runnable , checkIntervalSec , TimeUnit . SECONDS ) ; LOG . info ( "TMasterChecker started with interval: " + checkIntervalSec ) ;
public class ASTManager { /** * Parses any fragment of code and store the result into the subclass of * { @ link org . walkmod . javalang . ast . Node } defined . For example , if you need * to parse a single method , the class must be * { @ link org . walkmod . javalang . ast . body . MethodDeclaration } . The result does * NOT contain the location of the AST nodes . * @ param clazz * the subclass of { @ link org . walkmod . javalang . ast . Node } . The * result will be instance of that class . * @ param text * the fragment of code to parse . * @ return the partial abstract syntax tree ( AST ) produced . * @ throws ParseException * when the code contains an invalid syntax . */ public static Node parse ( Class < ? > clazz , String text ) throws ParseException { } }
return parse ( clazz , text , true ) ;
public class TransformsInner { /** * Get Transform . * Gets a Transform . * @ param resourceGroupName The name of the resource group within the Azure subscription . * @ param accountName The Media Services account name . * @ param transformName The Transform name . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < TransformInner > getAsync ( String resourceGroupName , String accountName , String transformName , final ServiceCallback < TransformInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( getWithServiceResponseAsync ( resourceGroupName , accountName , transformName ) , serviceCallback ) ;
public class CouchbaseBucketUtils { /** * Close bucket . * @ param bucket * the bucket */ public static void closeBucket ( Bucket bucket ) { } }
if ( bucket != null ) { if ( ! bucket . close ( ) ) { LOGGER . error ( "Not able to close bucket [" + bucket . name ( ) + "]." ) ; throw new KunderaException ( "Not able to close bucket [" + bucket . name ( ) + "]." ) ; } else { LOGGER . debug ( "Bucket [" + bucket . name ( ) + "] is closed!" ) ; } }
public class CSSColorHelper { /** * Get the passed values as CSS RGBA color value * @ param nRed * Red - is scaled to 0-255 * @ param nGreen * Green - is scaled to 0-255 * @ param nBlue * Blue - is scaled to 0-255 * @ param fOpacity * Opacity to use - is scaled to 0-1. * @ return The CSS string to use */ @ Nonnull @ Nonempty public static String getRGBAColorValue ( final int nRed , final int nGreen , final int nBlue , final float fOpacity ) { } }
return new StringBuilder ( 24 ) . append ( CCSSValue . PREFIX_RGBA_OPEN ) . append ( getRGBValue ( nRed ) ) . append ( ',' ) . append ( getRGBValue ( nGreen ) ) . append ( ',' ) . append ( getRGBValue ( nBlue ) ) . append ( ',' ) . append ( getOpacityToUse ( fOpacity ) ) . append ( CCSSValue . SUFFIX_RGBA_CLOSE ) . toString ( ) ;
public class I2b2QueryResultsHandler { /** * Calls stored procedures to drop all of the temp tables created . * @ throws SQLException if an error occurs while interacting with the * database */ private void truncateTempTables ( ) throws SQLException { } }
Logger logger = I2b2ETLUtil . logger ( ) ; logger . log ( Level . INFO , "Truncating temp data tables for query {0}" , this . query . getName ( ) ) ; try ( final Connection conn = openDataDatabaseConnection ( ) ) { conn . setAutoCommit ( true ) ; String [ ] dataschemaTables = { tempPatientTableName ( ) , tempPatientMappingTableName ( ) , tempVisitTableName ( ) , tempEncounterMappingTableName ( ) , tempProviderTableName ( ) , tempConceptTableName ( ) , tempModifierTableName ( ) , tempObservationFactTableName ( ) , tempObservationFactCompleteTableName ( ) } ; for ( String tableName : dataschemaTables ) { truncateTable ( conn , tableName ) ; } logger . log ( Level . INFO , "Done truncating temp data tables for query {0}" , this . query . getName ( ) ) ; }
public class SymmetricQREigenHelper_DDRM { /** * Performs a similar transform on A - pI */ protected void createBulge ( int x1 , double p , boolean byAngle ) { } }
double a11 = diag [ x1 ] ; double a22 = diag [ x1 + 1 ] ; double a12 = off [ x1 ] ; double a23 = off [ x1 + 1 ] ; if ( byAngle ) { c = Math . cos ( p ) ; s = Math . sin ( p ) ; c2 = c * c ; s2 = s * s ; cs = c * s ; } else { computeRotation ( a11 - p , a12 ) ; } // multiply the rotator on the top left . diag [ x1 ] = c2 * a11 + 2.0 * cs * a12 + s2 * a22 ; diag [ x1 + 1 ] = c2 * a22 - 2.0 * cs * a12 + s2 * a11 ; off [ x1 ] = a12 * ( c2 - s2 ) + cs * ( a22 - a11 ) ; off [ x1 + 1 ] = c * a23 ; bulge = s * a23 ; if ( Q != null ) updateQ ( x1 , x1 + 1 , c , s ) ;
public class AvatarNode { /** * Return true if the shared journal is active , or if the number * of active journals is equal to the number of configured journals . * Throw IOException otherwise . */ private void verifyEditStreams ( ) throws IOException { } }
// we check if the shared stream is still available if ( getFSImage ( ) . getEditLog ( ) . isSharedJournalAvailable ( ) && InjectionHandler . trueCondition ( InjectionEvent . AVATARNODE_CHECKEDITSTREAMS ) ) { return ; } // for sanity check if the number of available journals // is equal to the number of configured ones int expectedEditStreams = NNStorageConfiguration . getNamespaceEditsDirs ( confg ) . size ( ) ; int actualEditStreams = this . namesystem . getFSImage ( ) . getEditLog ( ) . getNumberOfAvailableJournals ( ) ; if ( expectedEditStreams == actualEditStreams && InjectionHandler . trueCondition ( InjectionEvent . AVATARNODE_CHECKEDITSTREAMS ) ) { return ; } String msg = "Failover: Cannot proceed - shared journal is not available. " + "Number of required edit streams: " + expectedEditStreams + " current number: " + actualEditStreams ; LOG . fatal ( msg ) ; throw new IOException ( msg ) ;
public class SourceDocInfo { /** * setter for offsetInSource - sets * @ generated * @ param v value to set into the feature */ public void setOffsetInSource ( int v ) { } }
if ( SourceDocInfo_Type . featOkTst && ( ( SourceDocInfo_Type ) jcasType ) . casFeat_offsetInSource == null ) jcasType . jcas . throwFeatMissing ( "offsetInSource" , "de.unihd.dbs.uima.types.heideltime.SourceDocInfo" ) ; jcasType . ll_cas . ll_setIntValue ( addr , ( ( SourceDocInfo_Type ) jcasType ) . casFeatCode_offsetInSource , v ) ;
public class LineDataImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public void eSet ( int featureID , Object newValue ) { } }
switch ( featureID ) { case AfplibPackage . LINE_DATA__LINEDATA : setLinedata ( ( String ) newValue ) ; return ; } super . eSet ( featureID , newValue ) ;
public class ST_CollectionExtract { /** * Filter line from a geometry * @ param lines * @ param geometry */ private static void getLinealGeometry ( ArrayList < LineString > lines , Geometry geometry ) { } }
for ( int i = 0 ; i < geometry . getNumGeometries ( ) ; i ++ ) { Geometry subGeom = geometry . getGeometryN ( i ) ; if ( subGeom instanceof LineString ) { lines . add ( ( LineString ) subGeom ) ; } else if ( subGeom instanceof GeometryCollection ) { getLinealGeometry ( lines , subGeom ) ; } }
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcLogical ( ) { } }
if ( ifcLogicalEClass == null ) { ifcLogicalEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 699 ) ; } return ifcLogicalEClass ;
public class ReactiveWifi { /** * Observes the WiFi network the device is connected to . * Returns the current WiFi network information as a { @ link WifiInfo } object . * @ param context Context of the activity or an application * @ return RxJava Observable with WifiInfo */ @ RequiresPermission ( ACCESS_WIFI_STATE ) public static Observable < WifiInfo > observeWifiAccessPointChanges ( final Context context ) { } }
final WifiManager wifiManager = ( WifiManager ) context . getSystemService ( Context . WIFI_SERVICE ) ; final IntentFilter filter = new IntentFilter ( ) ; filter . addAction ( WifiManager . SUPPLICANT_STATE_CHANGED_ACTION ) ; return Observable . create ( new ObservableOnSubscribe < WifiInfo > ( ) { @ Override public void subscribe ( final ObservableEmitter < WifiInfo > emitter ) throws Exception { final BroadcastReceiver receiver = createAccessPointChangesReceiver ( emitter , wifiManager ) ; context . registerReceiver ( receiver , filter ) ; Disposable disposable = disposeInUiThread ( new Action ( ) { @ Override public void run ( ) { tryToUnregisterReceiver ( context , receiver ) ; } } ) ; emitter . setDisposable ( disposable ) ; } } ) ;
public class CassandraSearcher { /** * Returns the set of resource ids that match the given * term query . */ private Set < String > searchForIds ( Context context , TermQuery query , ConsistencyLevel readConsistency ) { } }
Set < String > ids = Sets . newTreeSet ( ) ; BoundStatement bindStatement = m_searchStatement . bind ( ) ; bindStatement . setString ( Schema . C_TERMS_CONTEXT , context . getId ( ) ) ; bindStatement . setString ( Schema . C_TERMS_FIELD , query . getTerm ( ) . getField ( Constants . DEFAULT_TERM_FIELD ) ) ; bindStatement . setString ( Schema . C_TERMS_VALUE , query . getTerm ( ) . getValue ( ) ) ; bindStatement . setConsistencyLevel ( readConsistency ) ; for ( Row row : m_session . execute ( bindStatement ) ) { ids . add ( row . getString ( Constants . Schema . C_TERMS_RESOURCE ) ) ; } return ids ;
public class AnnotationDefinitionTable { /** * { @ inheritDoc } */ @ Override protected void _from ( ObjectInput in ) throws IOException , ClassNotFoundException { } }
// 1 : read index def map size final int size = in . readInt ( ) ; // size map accordingly indexDefinition = sizedHashMap ( size ) ; TableAnnotationDefinition tad ; for ( int i = 0 ; i < size ; ++ i ) { tad = new TableAnnotationDefinition ( ) ; // 1 : read the table annotation definition tad . readExternal ( in ) ; // 2 : read the number of documents final int documentsSize = in . readInt ( ) ; for ( int j = 0 ; j < documentsSize ; ++ j ) { // 1 : read each documents index addAnnotationDefinition ( tad , in . readInt ( ) ) ; } }
public class ChatDirector { /** * Do all the replacements ( mogrifications ) specified in the translation string specified by * the key . */ protected StringBuffer translatedReplacements ( String key , StringBuffer buf ) { } }
MessageBundle bundle = _ctx . getMessageManager ( ) . getBundle ( _bundle ) ; if ( ! bundle . exists ( key ) ) { return buf ; } StringTokenizer st = new StringTokenizer ( bundle . get ( key ) , "#" ) ; // apply the replacements to each mogrification that matches while ( st . hasMoreTokens ( ) ) { String pattern = st . nextToken ( ) ; String replace = st . nextToken ( ) ; Matcher m = Pattern . compile ( pattern , Pattern . CASE_INSENSITIVE ) . matcher ( buf ) ; if ( m . find ( ) ) { buf = new StringBuffer ( ) ; m . appendReplacement ( buf , replace ) ; // they may match more than once while ( m . find ( ) ) { m . appendReplacement ( buf , replace ) ; } m . appendTail ( buf ) ; } } return buf ;
public class CleverTapAPI { /** * InApp */ @ Override public void inAppNotificationDidShow ( Context context , CTInAppNotification inAppNotification , Bundle formData ) { } }
pushInAppNotificationStateEvent ( false , inAppNotification , formData ) ;
public class SubgraphAnalyzer { /** * This method searches a given { @ link Graph } for cycles . This method is * different to { @ link # hasDisconnectedSubgraph ( Graph ) } , because here it is * started at a dedicated vertex and only vertices are checked for cycles * which are connected to this start vertex . If disconnected subgraphs * exist , these are not checked . * @ param < V > * is the actual vertex implementation . * @ param < E > * is the actual edge implementation . * @ param graph * is the { @ link Graph } to be searched for cycles . * @ param startVertex * is the { @ link Vertex } to start from . This vertex has to be * part of the given graph . * @ return < code > true < / code > is returned if a cycle was found . * < code > false < / code > is returned otherwise . * @ throws IllegalArgumentException * is thrown in case the startVertex is not part of the graph or * the graph of vertex are < code > null < / code > . */ public static < V extends Vertex < V , E > , E extends Edge < V , E > > boolean hasDisconnectedSubgraph ( Graph < V , E > graph , V startVertex ) { } }
requireNonNull ( graph , "The given graph is null" ) ; requireNonNull ( startVertex , "The given start vertex is null" ) ; if ( ! graph . getVertices ( ) . contains ( startVertex ) ) { throw new IllegalArgumentException ( "The given start vertex '" + startVertex + "' is not part of the given graph '" + graph + "'." ) ; } HashSet < V > notVisited = new HashSet < V > ( graph . getVertices ( ) ) ; visitReachableGraph ( startVertex , new Stack < > ( ) , notVisited ) ; return ! notVisited . isEmpty ( ) ;
public class FileSystemUtil { /** * Gets the table name from a path , or null if the path is the root path . */ @ Nullable public static String getTableName ( Path rootPath , Path path ) { } }
path = qualified ( rootPath , path ) ; if ( rootPath . equals ( path ) ) { // Path is root , no table return null ; } Path tablePath ; Path parent = path . getParent ( ) ; if ( Objects . equals ( parent , rootPath ) ) { // The path itself represents a table ( e . g . ; emodb : / / ci . us / mytable ) tablePath = path ; } else if ( parent != null && Objects . equals ( parent . getParent ( ) , rootPath ) ) { // The path is a split ( e . g . ; emodb : / / ci . us / mytable / split - id ) tablePath = parent ; } else { throw new IllegalArgumentException ( format ( "Path does not represent a table, split, or root (path=%s, root=%s)" , path , rootPath ) ) ; } return decode ( tablePath . getName ( ) ) ;
public class SourceToHTMLConverter { /** * Add the line numbers for the source code . * @ param pre the content tree to which the line number will be added * @ param lineno The line number */ private static void addLineNo ( Content pre , int lineno ) { } }
HtmlTree span = new HtmlTree ( HtmlTag . SPAN ) ; span . addStyle ( HtmlStyle . sourceLineNo ) ; if ( lineno < 10 ) { span . addContent ( "00" + Integer . toString ( lineno ) ) ; } else if ( lineno < 100 ) { span . addContent ( "0" + Integer . toString ( lineno ) ) ; } else { span . addContent ( Integer . toString ( lineno ) ) ; } pre . addContent ( span ) ;
public class Strings { /** * Determine if the suffix of one string is the prefix of another . * @ param text1 First string . * @ param text2 Second string . * @ return The number of characters common to the end of the first * string and the start of the second string . */ public static int commonOverlap ( String text1 , String text2 ) { } }
// Cache the text lengths to prevent multiple calls . int text1_length = text1 . length ( ) ; int text2_length = text2 . length ( ) ; // Eliminate the null case . if ( text1_length == 0 || text2_length == 0 ) { return 0 ; } // Truncate the longer string . if ( text1_length > text2_length ) { text1 = text1 . substring ( text1_length - text2_length ) ; } else if ( text1_length < text2_length ) { text2 = text2 . substring ( 0 , text1_length ) ; } int text_length = Math . min ( text1_length , text2_length ) ; // Quick check for the worst case . if ( text1 . equals ( text2 ) ) { return text_length ; } // Start by looking for a single character match // and increase length until no match is found . // Performance analysis : http : / / neil . fraser . name / news / 2010/11/04/ int best = 0 ; int length = 1 ; while ( true ) { String pattern = text1 . substring ( text_length - length ) ; int found = text2 . indexOf ( pattern ) ; if ( found == - 1 ) { return best ; } length += found ; if ( found == 0 || text1 . substring ( text_length - length ) . equals ( text2 . substring ( 0 , length ) ) ) { best = length ; length ++ ; } }
public class ApiOvhCore { /** * Store password based credential for an automatic certificate generation * @ param nic * @ param password * @ param timeInSec */ public void setLoginInfo ( String nic , String password , int timeInSec ) { } }
nic = nic . toLowerCase ( ) ; this . nic = nic ; this . password = password ; this . timeInSec = timeInSec ;
public class AddMetadataAction { /** * Print warning message on the console * @ param property property that has not been read * @ param exception the reason for which wasn ' t read property * @ throws RepositoryException */ private void printWarning ( PropertyImpl property , Exception exception ) throws RepositoryException { } }
if ( PropertyManager . isDevelopping ( ) ) { LOG . warn ( "Binary value reader error, content by path " + property . getPath ( ) + ", property id " + property . getData ( ) . getIdentifier ( ) + " : " + exception . getMessage ( ) , exception ) ; } else { LOG . warn ( "Binary value reader error, content by path " + property . getPath ( ) + ", property id " + property . getData ( ) . getIdentifier ( ) + " : " + exception . getMessage ( ) ) ; }
public class PhaseTwoImpl { /** * { @ inheritDoc } */ @ Override public ProtoNetwork stage1Merger ( Collection < ProtoNetworkDescriptor > protoNetworkDescriptors ) { } }
ProtoNetwork mergedNetwork = null ; try { Iterator < ProtoNetworkDescriptor > it = protoNetworkDescriptors . iterator ( ) ; // Grab first proto network and iteratively merge the rest . ProtoNetworkDescriptor pnd = it . next ( ) ; mergedNetwork = protoNetworkService . read ( pnd ) ; while ( it . hasNext ( ) ) { ProtoNetwork nextPn = protoNetworkService . read ( it . next ( ) ) ; protoNetworkService . merge ( mergedNetwork , nextPn ) ; } } catch ( ProtoNetworkError e ) { e . printStackTrace ( ) ; Throwable cause = e . getCause ( ) ; if ( cause != null ) e . printStackTrace ( ) ; error ( "Unable to merge proto networks into global network." ) ; } return mergedNetwork ;
public class BundleProcessor { /** * Launch the bundle processing * @ param baseDirPath * the base directory path * @ param tmpDirPath * the temp directory path * @ param destDirPath * the destination directory path * @ param generateCdnFiles * the flag indicating if we should generate the CDN files or not * @ throws Exception * if an exception occurs */ public void process ( String baseDirPath , String tmpDirPath , String destDirPath , boolean generateCdnFiles ) throws Exception { } }
process ( baseDirPath , tmpDirPath , destDirPath , generateCdnFiles , DEFAULT_SERVLET_API_VERSION_2_3 ) ;
public class StringHelper { /** * Take a concatenated String and return a { @ link ICommonsList } of all elements * in the passed string , using specified separator string . * @ param cSep * The separator character to use . * @ param sElements * The concatenated String to convert . May be < code > null < / code > or empty . * @ param nMaxItems * The maximum number of items to explode . If the passed value is & le ; 0 * all items are used . If max items is 1 , than the result string is * returned as is . If max items is larger than the number of elements * found , it has no effect . * @ return The { @ link ICommonsList } represented by the passed string . Never * < code > null < / code > . If the passed input string is < code > null < / code > or * " " an empty list is returned . */ @ Nonnull @ ReturnsMutableCopy public static ICommonsList < String > getExploded ( final char cSep , @ Nullable final String sElements , final int nMaxItems ) { } }
return getExploded ( cSep , sElements , nMaxItems , nMaxItems >= 1 ? new CommonsArrayList < > ( nMaxItems ) : new CommonsArrayList < > ( ) ) ;
public class AbstractMethodVisitor { /** * Add a new Violation to the list of violations found by this visitor . * Only add the violation if the node lineNumber > = 0. * @ param node - the Groovy AST Node * @ param message - the message for the violation ; defaults to null */ protected void addViolation ( MethodNode node , String message ) { } }
addViolation ( ( ASTNode ) node , String . format ( "Violation in class %s. %s" , node . getDeclaringClass ( ) . getNameWithoutPackage ( ) , message ) ) ;
public class RationalObjectCounter { /** * Counts the object , increasing its total count by 1. */ public double count ( T obj ) { } }
double count = counts . get ( obj ) ; count ++ ; counts . put ( obj , count ) ; sum ++ ; return count ;
public class CmsCmisTypeManager { /** * Helper method for adding property definitions for the dynamic properties . < p > * @ param type the type definition to which the properties should be added */ private void addProviderPropertyDefinitions ( AbstractTypeDefinition type ) { } }
for ( I_CmsPropertyProvider provider : m_propertyProviders ) { type . addPropertyDefinition ( createPropDef ( PROPERTY_PREFIX_DYNAMIC + provider . getName ( ) , provider . getName ( ) , provider . getName ( ) , PropertyType . STRING , Cardinality . SINGLE , provider . isWritable ( ) ? Updatability . READWRITE : Updatability . READONLY , false , false ) ) ; }
public class QuotaHelper { /** * Asserts the specified assignment quota . * @ param parentId * The ID of the parent entity . * @ param requested * The number of entities that shall be assigned to the parent * entity . * @ param limit * The maximum number of entities that may be assigned to the * parent entity . * @ param type * The type of the entities that shall be assigned . * @ param parentType * The type of the parent entity . * @ param countFct * Function to count the entities that are currently assigned to * the parent entity . * @ throws QuotaExceededException * if the assignment operation would cause the quota to be * exceeded */ public static void assertAssignmentQuota ( final Long parentId , final long requested , final long limit , @ NotNull final String type , @ NotNull final String parentType , final Function < Long , Long > countFct ) { } }
// check if the quota is unlimited if ( limit <= 0 ) { LOG . debug ( "Quota 'Max {} entities per {}' is unlimited." , type , parentType ) ; return ; } if ( requested > limit ) { final String parentIdStr = parentId != null ? String . valueOf ( parentId ) : "<new>" ; LOG . warn ( "Cannot assign {} {} entities to {} '{}' because of the configured quota limit {}." , requested , type , parentType , parentIdStr , limit ) ; throw new QuotaExceededException ( type , parentType , parentId , requested , limit ) ; } if ( parentId != null && countFct != null ) { final long currentCount = countFct . apply ( parentId ) ; if ( currentCount + requested > limit ) { LOG . warn ( "Cannot assign {} {} entities to {} '{}' because of the configured quota limit {}. Currently, there are {} {} entities assigned." , requested , type , parentType , parentId , limit , currentCount , type ) ; throw new QuotaExceededException ( type , parentType , parentId , requested , limit ) ; } }
public class RmiJournalReceiver { /** * Request to open a file . Check that : * < ul > * < li > a file is not already open , < / li > * < li > we can create a { @ link TransportOutputFile } , and open a * { @ link Writer } on it . < / li > * < / ul > */ public void openFile ( String repositoryHash , String filename ) throws JournalException { } }
if ( journalFile != null ) { throw logAndGetException ( "Attempting to open file '" + filename + "' when file '" + journalFile . getName ( ) + "' has not been closed." ) ; } try { journalFile = new TransportOutputFile ( directory , filename ) ; writer = journalFile . open ( ) ; } catch ( IOException e ) { throw logAndGetException ( "Problem opening" + filename + "'" , e ) ; } currentRepositoryHash = repositoryHash ; itemIndex = 0 ; logger . debug ( "opened file '" + filename + "', hash is '" + repositoryHash + "'" ) ;
public class CliUtils { /** * Executes the specified command line and blocks until the process has finished . The output of * the process is captured , returned , as well as logged with info ( stdout ) and error ( stderr ) * level , respectively . * @ param cli * the command line * @ param loggerName * the name of the logger to use ( passed to { @ link LoggerFactory # getLogger ( String ) } ) ; * if { @ code null } this class ' name is used * @ return the process ' output */ public static CliOutput executeCommandLine ( final Commandline cli , final String loggerName ) { } }
return executeCommandLine ( cli , loggerName , null ) ;
public class LocaleFactory { /** * Get localization object for the specified locale . * @ param cls localization interface class * @ param locale locale string * @ param < T > localization interface class * @ return object implementing specified class */ @ SuppressWarnings ( { } }
"unchecked" } ) public static < T extends LocalizableResource > T get ( Class < T > cls , String locale ) { Map < String , LocalizableResource > localeCache = getLocaleCache ( cls ) ; T m = ( T ) localeCache . get ( locale ) ; if ( m != null ) { return m ; } synchronized ( cache ) { m = ( T ) localeCache . get ( locale ) ; if ( m != null ) { return m ; } m = provider . create ( cls , locale ) ; put ( cls , locale , m ) ; return m ; }
public class StringParser { /** * Parse the given { @ link Object } as { @ link Double } . Note : both the locale * independent form of a double can be parsed here ( e . g . 4.523 ) as well as a * localized form using the comma as the decimal separator ( e . g . the German * 4,523 ) . * @ param aObject * The object to parse . May be < code > null < / code > . * @ param aDefault * The default value to be returned if the parsed object cannot be * converted to a double . May be < code > null < / code > . * @ return < code > aDefault < / code > if the object does not represent a valid * value . */ @ Nullable public static Double parseDoubleObj ( @ Nullable final Object aObject , @ Nullable final Double aDefault ) { } }
final double dValue = parseDouble ( aObject , Double . NaN ) ; return Double . isNaN ( dValue ) ? aDefault : Double . valueOf ( dValue ) ;
public class DateTime { /** * Returns a copy of this datetime minus the specified number of minutes . * The calculation will subtract a duration equivalent to the number of * minutes expressed in milliseconds . * The following three lines are identical in effect : * < pre > * DateTime subtracted = dt . minusMinutes ( 6 ) ; * DateTime subtracted = dt . minus ( Period . minutes ( 6 ) ) ; * DateTime subtracted = dt . withFieldAdded ( DurationFieldType . minutes ( ) , - 6 ) ; * < / pre > * This datetime instance is immutable and unaffected by this method call . * @ param minutes the amount of minutes to subtract , may be negative * @ return the new datetime minus the increased minutes * @ since 1.1 */ public DateTime minusMinutes ( int minutes ) { } }
if ( minutes == 0 ) { return this ; } long instant = getChronology ( ) . minutes ( ) . subtract ( getMillis ( ) , minutes ) ; return withMillis ( instant ) ;
public class AttributesImpl { /** * If not already created , a new < code > version < / code > element will be created and returned . * Otherwise , the first existing < code > version < / code > element will be returned . * @ return the instance defined for the element < code > version < / code > */ public Version < Attributes < T > > getOrCreateVersion ( ) { } }
List < Node > nodeList = childNode . get ( "version" ) ; if ( nodeList != null && nodeList . size ( ) > 0 ) { return new VersionImpl < Attributes < T > > ( this , "version" , childNode , nodeList . get ( 0 ) ) ; } return createVersion ( ) ;
public class ConcurrentTaskExecutor { /** * Specify the { @ link java . util . concurrent . Executor } to delegate to . * Autodetects a JSR - 236 * @ param concurrentExecutor the new concurrent executor * { @ link javax . enterprise . concurrent . ManagedExecutorService } in order to * expose { @ link javax . enterprise . concurrent . ManagedTask } adapters for it . */ public final void setConcurrentExecutor ( Executor concurrentExecutor ) { } }
if ( concurrentExecutor != null ) { this . concurrentExecutor = concurrentExecutor ; if ( managedExecutorServiceClass != null && managedExecutorServiceClass . isInstance ( concurrentExecutor ) ) { this . adaptedExecutor = new ManagedTaskExecutorAdapter ( concurrentExecutor ) ; } else { this . adaptedExecutor = new TaskExecutorAdapter ( concurrentExecutor ) ; } } else { this . concurrentExecutor = Executors . newSingleThreadExecutor ( ) ; this . adaptedExecutor = new TaskExecutorAdapter ( this . concurrentExecutor ) ; }
public class ValidationReport { /** * Return the severity level this information message is associated with . */ public static ValidationStatus getInfoMsgLevel ( final String msg ) { } }
if ( msg . startsWith ( "ERROR" ) ) { return ValidationStatus . ERROR ; } if ( msg . startsWith ( "WARN" ) ) { return ValidationStatus . WARN ; } return ValidationStatus . PASS ;
public class PlanAssembler { /** * This function is called once it ' s been determined that we can push down * an aggregation plan node . * If an APPROX _ COUNT _ DISTINCT aggregate is distributed , then we need to * convert the distributed aggregate function to VALS _ TO _ HYPERLOGLOG , * and the coordinating aggregate function to HYPERLOGLOGS _ TO _ CARD . * @ param distNode The aggregate node executed on each partition * @ param coordNode The aggregate node executed on the coordinator */ private static void fixDistributedApproxCountDistinct ( AggregatePlanNode distNode , AggregatePlanNode coordNode ) { } }
assert ( distNode != null ) ; assert ( coordNode != null ) ; // Patch up any APPROX _ COUNT _ DISTINCT on the distributed node . List < ExpressionType > distAggTypes = distNode . getAggregateTypes ( ) ; boolean hasApproxCountDistinct = false ; for ( int i = 0 ; i < distAggTypes . size ( ) ; ++ i ) { ExpressionType et = distAggTypes . get ( i ) ; if ( et == ExpressionType . AGGREGATE_APPROX_COUNT_DISTINCT ) { hasApproxCountDistinct = true ; distNode . updateAggregate ( i , ExpressionType . AGGREGATE_VALS_TO_HYPERLOGLOG ) ; } } if ( hasApproxCountDistinct ) { // Now , patch up any APPROX _ COUNT _ DISTINCT on the coordinating node . List < ExpressionType > coordAggTypes = coordNode . getAggregateTypes ( ) ; for ( int i = 0 ; i < coordAggTypes . size ( ) ; ++ i ) { ExpressionType et = coordAggTypes . get ( i ) ; if ( et == ExpressionType . AGGREGATE_APPROX_COUNT_DISTINCT ) { coordNode . updateAggregate ( i , ExpressionType . AGGREGATE_HYPERLOGLOGS_TO_CARD ) ; } } }
public class CmsResourceTypeStatResultList { /** * Method to initialize the list . * @ param resList a given instance or null * @ return an instance */ public static CmsResourceTypeStatResultList init ( CmsResourceTypeStatResultList resList ) { } }
if ( resList == null ) { return new CmsResourceTypeStatResultList ( ) ; } resList . deleteOld ( ) ; return resList ;
public class SmileStorage { /** * Retrieves the next tuple to be processed . Implementations should NOT reuse * tuple objects ( or inner member objects ) they return across calls and * should return a different tuple object in each call . * @ return the next tuple to be processed or null if there are no more tuples * to be processed . * @ throws java . io . IOException if there is an exception while retrieving the next * tuple */ @ Override public Tuple getNext ( ) throws IOException { } }
try { if ( reader == null || ! reader . nextKeyValue ( ) ) { return null ; } final Object value = reader . getCurrentValue ( ) ; if ( value instanceof SmileEnvelopeEvent ) { final SmileEnvelopeEvent envelope = ( SmileEnvelopeEvent ) value ; final JsonNode data = ( JsonNode ) envelope . getData ( ) ; final Tuple tuple = factory . newTuple ( data . size ( ) ) ; int i = 0 ; for ( final GoodwillSchemaField field : schema . getSchema ( ) ) { final JsonNode node = data . get ( field . getName ( ) ) ; tuple . set ( i , getJsonValue ( field . getType ( ) , node ) ) ; i ++ ; } return tuple ; } else { throw new IOException ( String . format ( "Expected SmileEnvelopeEvent, not %s" , value . getClass ( ) ) ) ; } } catch ( NullPointerException e ) { String splitInfo = "<no split info>" ; if ( split != null ) { splitInfo = split . toString ( ) ; } log . error ( String . format ( "Corrupt Smile file (%s), ignoring the rest of the input" , splitInfo ) , e ) ; } catch ( com . fasterxml . jackson . core . JsonParseException e ) { String splitInfo = "<no split info>" ; if ( split != null ) { splitInfo = split . toString ( ) ; } log . error ( String . format ( "Corrupt Smile file (%s), ignoring the rest of the input" , splitInfo ) , e ) ; return null ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; } return null ;
public class StreamsUtils { /** * < p > Generates a stream that is computed from a provided stream following two steps . < / p > * < p > The first steps maps this stream to an < code > IntStream < / code > that is then rolled following * the same principle as the < code > roll ( ) < / code > method . This steps builds a < code > Stream & lt ; IntStream & gt ; < / code > . * < p > Then int summary statistics are computed on each < code > IntStream < / code > using a < code > collect ( ) < / code > call , * and a < code > Stream & lt ; IntSummaryStatistics & gt ; < / code > is returned . < / p > * < p > The resulting stream has the same number of elements as the provided stream , * minus the size of the window width , to preserve consistency of each collection . < / p > * < p > A < code > NullPointerException < / code > will be thrown if the provided stream or the mapper is null . < / p > * @ param stream the processed stream * @ param rollingFactor the size of the window to apply the collector on * @ param mapper the mapper applied * @ param < E > the type of the provided stream * @ return a stream in which each value is the collection of the provided stream */ public static < E > Stream < IntSummaryStatistics > shiftingWindowSummarizingInt ( Stream < E > stream , int rollingFactor , ToIntFunction < ? super E > mapper ) { } }
Objects . requireNonNull ( stream ) ; Objects . requireNonNull ( mapper ) ; IntStream intStream = stream . mapToInt ( mapper ) ; return shiftingWindowSummarizingInt ( intStream , rollingFactor ) ;
public class LocalTIDTable { /** * Return an array of all the transactions currently * running on the server . * @ return An array of all the server ' s transactions . */ public static TransactionImpl [ ] getAllTransactions ( ) { } }
if ( tc . isEntryEnabled ( ) ) Tr . entry ( tc , "getAllTransactions" ) ; final Collection < TransactionImpl > txns = localTIDMap . values ( ) ; if ( txns != null ) { if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "getAllTransactions" , txns ) ; return txns . toArray ( noTxns ) ; } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "getAllTransactions" , noTxns ) ; return noTxns ;
public class AOPool { /** * The RefreshConnection thread polls every connection in the connection pool . If it * detects a connection is idle for more than the pre - defined MAX _ IDLE _ TIME , it closes * the connection . It will stop when the pool is flagged as closed . */ @ Override final public void run ( ) { } }
while ( true ) { try { try { sleep ( delayTime ) ; } catch ( InterruptedException err ) { logger . log ( Level . WARNING , null , err ) ; } long time = System . currentTimeMillis ( ) ; List < C > connsToClose ; synchronized ( poolLock ) { if ( isClosed ) return ; // Find any connections that are available and been idle too long int maxIdle = maxIdleTime ; connsToClose = new ArrayList < > ( availableConnections . size ( ) ) ; for ( PooledConnection < C > availableConnection : availableConnections ) { synchronized ( availableConnection ) { C conn = availableConnection . connection ; if ( conn != null ) { if ( ( time - availableConnection . releaseTime ) > maxIdle // Idle too long || ( maxConnectionAge != UNLIMITED_MAX_CONNECTION_AGE && ( availableConnection . createTime > time // System time reset ? || ( time - availableConnection . createTime ) >= maxConnectionAge // Max connection age reached ) ) ) { availableConnection . connection = null ; connsToClose . add ( conn ) ; } } } } } // Close all of the connections for ( C conn : connsToClose ) { try { close ( conn ) ; } catch ( Exception err ) { logger . log ( Level . WARNING , null , err ) ; } } } catch ( ThreadDeath TD ) { throw TD ; } catch ( Throwable T ) { logger . logp ( Level . SEVERE , AOPool . class . getName ( ) , "run" , null , T ) ; } }
public class Optionals { /** * Casts { @ code optional } to an optional of type { @ code type } if the value held by { @ code optional } is an instance of { @ code type } * @ param optional the optional * @ param type the type * @ param < T > the type * @ return the optional */ public static < T > @ NonNull Optional < T > cast ( final @ NonNull Optional < ? > optional , final @ NonNull Class < T > type ) { } }
// not necessary to re - wrap , we can just cast return isInstance ( optional , type ) ? ( Optional < T > ) optional : Optional . empty ( ) ;
public class ServletUtil { /** * Gets the value of the given parameter from the request converted to * a { @ code double } . & nbsp ; If the parameter is not set or not parseable , the default * value is returned . * @ param pReq the servlet request * @ param pName the parameter name * @ param pDefault the default value * @ return the value of the parameter converted to n { @ code double } , or the default * value , if the parameter is not set . */ public static double getDoubleParameter ( final ServletRequest pReq , final String pName , final double pDefault ) { } }
String str = pReq . getParameter ( pName ) ; try { return str != null ? Double . parseDouble ( str ) : pDefault ; } catch ( NumberFormatException nfe ) { return pDefault ; }
public class AmazonS3Client { /** * Add IAM specific headers based on the credentials set & any optional * parameters added to the CreateBucketRequest object * @ param request * @ param createBucketRequest * @ return Request < CreateBucketRequest > */ protected Request < CreateBucketRequest > addIAMHeaders ( Request < CreateBucketRequest > request , CreateBucketRequest createBucketRequest ) { } }
if ( ( null != this . awsCredentialsProvider ) && ( this . awsCredentialsProvider . getCredentials ( ) instanceof IBMOAuthCredentials ) ) { if ( null != createBucketRequest . getServiceInstanceId ( ) ) { request . addHeader ( Headers . IBM_SERVICE_INSTANCE_ID , createBucketRequest . getServiceInstanceId ( ) ) ; if ( null != createBucketRequest . getEncryptionType ( ) ) { request . addHeader ( Headers . IBM_SSE_KP_ENCRYPTION_ALGORITHM , createBucketRequest . getEncryptionType ( ) . getKmsEncryptionAlgorithm ( ) ) ; request . addHeader ( Headers . IBM_SSE_KP_CUSTOMER_ROOT_KEY_CRN , createBucketRequest . getEncryptionType ( ) . getIBMSSEKMSCustomerRootKeyCrn ( ) ) ; } } else { IBMOAuthCredentials oAuthCreds = ( IBMOAuthCredentials ) this . awsCredentialsProvider . getCredentials ( ) ; if ( oAuthCreds . getServiceInstanceId ( ) != null ) { request . addHeader ( Headers . IBM_SERVICE_INSTANCE_ID , oAuthCreds . getServiceInstanceId ( ) ) ; if ( null != createBucketRequest . getEncryptionType ( ) ) { request . addHeader ( Headers . IBM_SSE_KP_ENCRYPTION_ALGORITHM , createBucketRequest . getEncryptionType ( ) . getKmsEncryptionAlgorithm ( ) ) ; request . addHeader ( Headers . IBM_SSE_KP_CUSTOMER_ROOT_KEY_CRN , createBucketRequest . getEncryptionType ( ) . getIBMSSEKMSCustomerRootKeyCrn ( ) ) ; } } } } return request ;
public class DevicesInner { /** * Installs the updates on the data box edge / gateway device . * @ param deviceName The device name . * @ param resourceGroupName The resource group name . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < Void > beginInstallUpdatesAsync ( String deviceName , String resourceGroupName , final ServiceCallback < Void > serviceCallback ) { } }
return ServiceFuture . fromResponse ( beginInstallUpdatesWithServiceResponseAsync ( deviceName , resourceGroupName ) , serviceCallback ) ;
public class RedisObjectFactory { /** * New redis connection factory . * @ param redis the redis * @ return the redis connection factory */ public static RedisConnectionFactory newRedisConnectionFactory ( final BaseRedisProperties redis ) { } }
val redisConfiguration = redis . getSentinel ( ) == null ? ( RedisConfiguration ) getStandaloneConfig ( redis ) : getSentinelConfig ( redis ) ; val factory = new LettuceConnectionFactory ( redisConfiguration , getRedisPoolConfig ( redis ) ) ; return factory ;
public class NSEC3Record { /** * Converts rdata to a String */ String rrToString ( ) { } }
StringBuffer sb = new StringBuffer ( ) ; sb . append ( hashAlg ) ; sb . append ( ' ' ) ; sb . append ( flags ) ; sb . append ( ' ' ) ; sb . append ( iterations ) ; sb . append ( ' ' ) ; if ( salt == null ) sb . append ( '-' ) ; else sb . append ( base16 . toString ( salt ) ) ; sb . append ( ' ' ) ; sb . append ( b32 . toString ( next ) ) ; if ( ! types . empty ( ) ) { sb . append ( ' ' ) ; sb . append ( types . toString ( ) ) ; } return sb . toString ( ) ;
public class AccumuloClient { /** * Searches through the given locality groups to find if this column has a locality group . * @ param columnName Column name to get the locality group of * @ param groups Optional locality group configuration * @ return Optional string containing the name of the locality group , if present */ private static Optional < String > getColumnLocalityGroup ( String columnName , Optional < Map < String , Set < String > > > groups ) { } }
if ( groups . isPresent ( ) ) { for ( Map . Entry < String , Set < String > > group : groups . get ( ) . entrySet ( ) ) { if ( group . getValue ( ) . contains ( columnName . toLowerCase ( Locale . ENGLISH ) ) ) { return Optional . of ( group . getKey ( ) ) ; } } } return Optional . empty ( ) ;
public class ReportDaoImpl { /** * Adds root element . */ private Element addRoot ( Document document , Map < ReportBuilderImpl . ReportKey , Object > reportsData ) { } }
Element rootElement = document . createElement ( "project" ) ; if ( reportsData . containsKey ( ReportKey . PROJECT_NAME ) ) { rootElement . setAttribute ( "name" , ( String ) reportsData . get ( ReportKey . PROJECT_NAME ) ) ; } document . appendChild ( rootElement ) ; return rootElement ;
public class ParquetGroupConverter { /** * convert a repeated field into a list of primitives or groups */ private static List < Object > convertRepeatedFieldToList ( Group g , int fieldIndex , boolean binaryAsString ) { } }
Type t = g . getType ( ) . getFields ( ) . get ( fieldIndex ) ; assert t . getRepetition ( ) . equals ( Type . Repetition . REPEATED ) ; int repeated = g . getFieldRepetitionCount ( fieldIndex ) ; List < Object > vals = new ArrayList < > ( ) ; for ( int i = 0 ; i < repeated ; i ++ ) { if ( t . isPrimitive ( ) ) { vals . add ( convertPrimitiveField ( g , fieldIndex , i , binaryAsString ) ) ; } else { vals . add ( g . getGroup ( fieldIndex , i ) ) ; } } return vals ;
public class HttpService { /** * - - - - - private methods - - - - - */ private void sendLifecycleEvent ( final LifecycleEvent event ) { } }
// instantiate and call lifecycle callbacks from configuration file final String listeners = Settings . LifecycleListeners . getValue ( ) ; if ( listeners != null ) { final String [ ] listenerClasses = listeners . split ( "[\\s ,;]+" ) ; for ( String listenerClass : listenerClasses ) { if ( StringUtils . isNotBlank ( listenerClass ) ) { try { final HttpServiceLifecycleListener listener = ( HttpServiceLifecycleListener ) Class . forName ( listenerClass ) . newInstance ( ) ; switch ( event ) { case Started : listener . serverStarted ( ) ; break ; case Stopped : listener . serverStopped ( ) ; break ; } } catch ( InstantiationException | IllegalAccessException | ClassNotFoundException ex ) { logger . error ( "Unable to send lifecycle event to listener " + listenerClass , ex ) ; } } } }
public class SlideDisabledEvent { /** * Fires a slide disabled event on all registered handlers in the handler * manager . If no such handlers exist , this method will do nothing . * @ param source the source of the handlers */ public static void fire ( final HasSlideDisabledHandlers source ) { } }
if ( TYPE != null ) { SlideDisabledEvent event = new SlideDisabledEvent ( ) ; source . fireEvent ( event ) ; }
class FibonacciSequence { /** * Calculates the n - th number in the Fibonacci sequence . * The Fibonacci sequence is characterized by the fact that every number * after the first two is the sum of the two preceding ones . * Example : * > > > fibonacci ( 10) * 55 * > > > fibonacci ( 1) * > > > fibonacci ( 8) * 21 * @ param n The position in the Fibonacci sequence . * @ return The n - th number in the Fibonacci sequence . */ public static int fibonacci ( int n ) { } }
if ( n == 0 ) { return 0 ; } else if ( n == 1 ) { return 1 ; } else { return fibonacci ( n - 1 ) + fibonacci ( n - 2 ) ; }
public class DaJLabModule { /** * Launch the module . * @ return thread */ public final Thread launch ( ) { } }
if ( state != RUN ) { onLaunch ( ) ; state = RUN ; Thread thread = ( new Thread ( this ) ) ; thread . start ( ) ; return thread ; } else { return null ; }
public class RDBMSPropertyReader { /** * Reads property file which is given in persistence unit * @ param pu */ public Configuration load ( String pu ) { } }
Configuration conf = new Configuration ( ) . addProperties ( puMetadata . getProperties ( ) ) ; String propertyFileName = externalProperties != null ? ( String ) externalProperties . get ( PersistenceProperties . KUNDERA_CLIENT_PROPERTY ) : null ; if ( propertyFileName == null ) { propertyFileName = puMetadata != null ? puMetadata . getProperty ( PersistenceProperties . KUNDERA_CLIENT_PROPERTY ) : null ; } if ( propertyFileName != null ) { PropertyType fileType = PropertyType . value ( propertyFileName ) ; switch ( fileType ) { case xml : conf . configure ( propertyFileName ) ; break ; case properties : Properties props = new Properties ( ) ; InputStream ioStream = puMetadata . getClassLoader ( ) . getResourceAsStream ( propertyFileName ) ; if ( ioStream == null ) { propertyFileName = KunderaCoreUtils . resolvePath ( propertyFileName ) ; try { ioStream = new FileInputStream ( new File ( propertyFileName ) ) ; } catch ( FileNotFoundException e ) { log . warn ( "File {} not found, Caused by " , propertyFileName ) ; } } try { if ( ioStream != null ) { props . load ( ioStream ) ; } } catch ( IOException e ) { log . error ( "Skipping as error occurred while loading property file {}, Cause by : {}." , propertyFileName , e ) ; } conf . addProperties ( props ) ; break ; default : log . error ( "Unsupported type{} for file{}, skipping load of properties." , fileType , propertyFileName ) ; break ; } } return conf ;
public class ElasticHashinator { /** * Track allocated bytes and invoke System . gc to encourage reclamation if it is growing large */ private static synchronized void trackAllocatedHashinatorBytes ( long bytes ) { } }
final long allocated = m_allocatedHashinatorBytes . addAndGet ( bytes ) ; if ( allocated > HASHINATOR_GC_THRESHHOLD ) { hostLogger . warn ( allocated + " bytes of hashinator data has been allocated" ) ; if ( m_emergencyGCThread == null || m_emergencyGCThread . getState ( ) == State . TERMINATED ) { m_emergencyGCThread = new Thread ( new Runnable ( ) { @ Override public void run ( ) { hostLogger . warn ( "Invoking System.gc() to recoup hashinator bytes" ) ; System . gc ( ) ; try { Thread . sleep ( 2000 ) ; } catch ( InterruptedException e ) { } hostLogger . info ( m_allocatedHashinatorBytes . get ( ) + " bytes of hashinator allocated after GC" ) ; } } , "Hashinator GC thread" ) ; m_emergencyGCThread . start ( ) ; } }
public class MessageSourceFieldFaceSource { /** * Returns the value of the required property of the FieldFace . Delegates to the getMessageKeys for the message key * generation strategy . */ protected String getMessage ( String contextId , String fieldPath , String [ ] faceDescriptorProperties , String defaultValue ) { } }
String [ ] keys = getMessageKeys ( contextId , fieldPath , faceDescriptorProperties ) ; try { return getMessageSourceAccessor ( ) . getMessage ( new DefaultMessageSourceResolvable ( keys , null , defaultValue ) ) ; } catch ( NoSuchMessageException e ) { if ( log . isDebugEnabled ( ) ) { log . debug ( e . getMessage ( ) ) ; } return null ; }
public class CmsHtmlDecorator { /** * Splits a String into substrings along the provided delimiter list and returns * the result as a List of Substrings . < p > * @ param source the String to split * @ param delimiters the delimiters to split at * @ param trim flag to indicate if leading and trailing whitespaces should be omitted * @ param includeDelimiters flag to indicate if the delimiters should be included as well * @ return the List of splitted Substrings */ public static List < String > splitAsList ( String source , String [ ] delimiters , boolean trim , boolean includeDelimiters ) { } }
List < String > result = new ArrayList < String > ( ) ; String delimiter = "" ; int i = 0 ; int l = source . length ( ) ; int n = - 1 ; int max = Integer . MAX_VALUE ; // find the next delimiter for ( int j = 0 ; j < delimiters . length ; j ++ ) { int delimPos = source . indexOf ( delimiters [ j ] ) ; if ( delimPos > - 1 ) { if ( delimPos < max ) { max = delimPos ; n = delimPos ; delimiter = delimiters [ j ] ; } } } while ( n != - 1 ) { // zero - length items are not seen as tokens at start or end if ( ( i < n ) || ( ( i > 0 ) && ( i < l ) ) ) { result . add ( trim ? source . substring ( i , n ) . trim ( ) : source . substring ( i , n ) ) ; // add the delimiter to the list as well if ( includeDelimiters && ( ( n + delimiter . length ( ) ) <= l ) ) { result . add ( source . substring ( n , n + delimiter . length ( ) ) ) ; } } else { // add the delimiter to the list as well if ( includeDelimiters && source . startsWith ( delimiter ) ) { result . add ( delimiter ) ; } } i = n + delimiter . length ( ) ; // find the next delimiter max = Integer . MAX_VALUE ; n = - 1 ; for ( int j = 0 ; j < delimiters . length ; j ++ ) { int delimPos = source . indexOf ( delimiters [ j ] , i ) ; if ( delimPos > - 1 ) { if ( delimPos < max ) { max = delimPos ; n = delimPos ; delimiter = delimiters [ j ] ; } } } } // is there a non - empty String to cut from the tail ? if ( n < 0 ) { n = source . length ( ) ; } if ( i < n ) { result . add ( trim ? source . substring ( i ) . trim ( ) : source . substring ( i ) ) ; } return result ;
public class PersistentResourceXMLDescription { /** * Parse xml from provided < code > reader < / code > and add resulting operations to passed list * @ param reader xml reader to parse from * @ param parentAddress address of the parent , used as base for all child elements * @ param list list of operations where result will be put to . * @ throws XMLStreamException if any error occurs while parsing */ public void parse ( final XMLExtendedStreamReader reader , PathAddress parentAddress , List < ModelNode > list ) throws XMLStreamException { } }
if ( decoratorElement != null ) { parseDecorator ( reader , parentAddress , list ) ; return ; } if ( xmlWrapperElement != null ) { if ( reader . getLocalName ( ) . equals ( xmlWrapperElement ) ) { if ( reader . hasNext ( ) && reader . nextTag ( ) == END_ELEMENT ) { return ; } } else { throw ParseUtils . unexpectedElement ( reader ) ; } parseInternal ( reader , parentAddress , list ) ; while ( reader . nextTag ( ) != END_ELEMENT && ! reader . getLocalName ( ) . equals ( xmlWrapperElement ) ) { parseInternal ( reader , parentAddress , list ) ; } } else { parseInternal ( reader , parentAddress , list ) ; }
public class RendererFactory { /** * Returns the image renderer * @ param config * the Jawr config * @ param isPlainImg * the flag indicating if it a plain image to render or not * @ return the image renderer */ public final static ImgRenderer getImgRenderer ( JawrConfig config , boolean isPlainImg ) { } }
ImgRenderer renderer = ( ImgRenderer ) ClassLoaderResourceUtils . buildObjectInstance ( config . getImgRendererClass ( ) ) ; renderer . init ( isPlainImg ) ; return renderer ;
public class ForLoop { /** * Visits this node , the initializer expression , the loop condition * expression , the increment expression , and then the loop body . */ @ Override public void visit ( NodeVisitor v ) { } }
if ( v . visit ( this ) ) { initializer . visit ( v ) ; condition . visit ( v ) ; increment . visit ( v ) ; body . visit ( v ) ; }
public class HeapAuxHashMap { /** * In C : two - registers . c Line 300. */ @ Override public void mustAdd ( final int slotNo , final int value ) { } }
final int index = find ( auxIntArr , lgAuxArrInts , lgConfigK , slotNo ) ; final int pair = HllUtil . pair ( slotNo , value ) ; if ( index >= 0 ) { final String pairStr = HllUtil . pairString ( pair ) ; throw new SketchesStateException ( "Found a slotNo that should not be there: " + pairStr ) ; } // Found empty entry auxIntArr [ ~ index ] = pair ; auxCount ++ ; checkGrow ( ) ;
public class CmsImportVersion10 { /** * Sorts the parsealble resources before we actually parse the links . < p > * This is needed because we may , for example , have resources A and B such that A has a link to B , and B requires * the relation corresponding to that link to be present for some functionality ( e . g . the page _ title macro in gallery name * mappings ) , so we need to parse the links for A first to create the relation before B is processed . * @ param parseables the list of parseable resources which should be sorted in place */ protected static void sortParseableResources ( List < CmsResource > parseables ) { } }
Collections . sort ( parseables , new Comparator < CmsResource > ( ) { public int compare ( CmsResource a , CmsResource b ) { return ComparisonChain . start ( ) . compare ( getRank ( a ) , getRank ( b ) ) . compare ( a . getRootPath ( ) , b . getRootPath ( ) ) . result ( ) ; } int getRank ( CmsResource res ) { if ( CmsResourceTypeXmlContainerPage . isContainerPage ( res ) ) { return 0 ; } else { return 1 ; } } } ) ;
public class ApplicationFeatureViewModel { /** * The parent package feature of this class or package . */ @ Programmatic public ApplicationFeatureViewModel getParentPackage ( ) { } }
return Functions . asViewModelForId ( applicationFeatureRepository , container ) . apply ( getFeatureId ( ) . getParentPackageId ( ) ) ;
public class URI { /** * Append to the end of the path of this URI . If the current path does * not end in a slash and the path to be appended does not begin with * a slash , a slash will be appended to the current path before the * new segment is added . Also , if the current path ends in a slash * and the new segment begins with a slash , the extra slash will be * removed before the new segment is appended . * @ param p _ addToPath the new segment to be added to the current path * @ throws MalformedURIException if p _ addToPath contains syntax * errors */ public void appendPath ( String p_addToPath ) throws MalformedURIException { } }
if ( p_addToPath == null || p_addToPath . trim ( ) . length ( ) == 0 ) { return ; } if ( ! isURIString ( p_addToPath ) ) { throw new MalformedURIException ( Utils . messages . createMessage ( MsgKey . ER_PATH_INVALID_CHAR , new Object [ ] { p_addToPath } ) ) ; // " Path contains invalid character ! " ) ; } if ( m_path == null || m_path . trim ( ) . length ( ) == 0 ) { if ( p_addToPath . startsWith ( "/" ) ) { m_path = p_addToPath ; } else { m_path = "/" + p_addToPath ; } } else if ( m_path . endsWith ( "/" ) ) { if ( p_addToPath . startsWith ( "/" ) ) { m_path = m_path . concat ( p_addToPath . substring ( 1 ) ) ; } else { m_path = m_path . concat ( p_addToPath ) ; } } else { if ( p_addToPath . startsWith ( "/" ) ) { m_path = m_path . concat ( p_addToPath ) ; } else { m_path = m_path . concat ( "/" + p_addToPath ) ; } }
public class WeeklyAutoScalingSchedule { /** * The schedule for Monday . * @ return The schedule for Monday . */ public java . util . Map < String , String > getMonday ( ) { } }
if ( monday == null ) { monday = new com . amazonaws . internal . SdkInternalMap < String , String > ( ) ; } return monday ;