idx int64 0 41.2k | question stringlengths 74 4.04k | target stringlengths 7 750 |
|---|---|---|
32,400 | public void setAttributes ( Map < String , String > actions ) { this . actions = actions ; this . actionCounter = actions . keySet ( ) . size ( ) ; } | You may use this field to directly programmatically add your own Map of key value pairs that you would like to send for this command . Setting your own map will reset the command index to the number of keys in the Map |
32,401 | public void publish ( ) { DataBuffer tmp = null ; Lock l = null ; synchronized ( swapLock ) { tmp = current ; current = previous ; previous = tmp ; l = current . getLock ( ) ; l . lock ( ) ; try { current . startCollection ( ) ; } finally { l . unlock ( ) ; } l = tmp . getLock ( ) ; l . lock ( ) ; } try { tmp . endCollection ( ) ; publish ( tmp ) ; } finally { l . unlock ( ) ; } } | Swaps the data collection buffers and computes statistics about the data collected up til now . |
32,402 | public double [ ] getPercentiles ( double [ ] percents , double [ ] percentiles ) { for ( int i = 0 ; i < percents . length ; i ++ ) { percentiles [ i ] = computePercentile ( percents [ i ] ) ; } return percentiles ; } | Gets the requested percentile statistics . |
32,403 | public static boolean applyFilters ( Object event , Set < EventFilter > filters , StatsTimer filterStats , String invokerDesc , Logger logger ) { if ( filters . isEmpty ( ) ) { return true ; } Stopwatch filterStart = filterStats . start ( ) ; try { for ( EventFilter filter : filters ) { if ( ! filter . apply ( event ) ) { logger . debug ( "Event: " + event + " filtered out for : " + invokerDesc + " due to the filter: " + filter ) ; return false ; } } return true ; } finally { filterStart . stop ( ) ; } } | Utility method to apply filters for an event this can be used both by publisher & subscriber code . |
32,404 | protected Predicate < Object > getEqualFilter ( ) { String xpath = getXPath ( getChild ( 0 ) ) ; Tree valueNode = getChild ( 1 ) ; switch ( valueNode . getType ( ) ) { case NUMBER : Number value = ( Number ) ( ( ValueTreeNode ) valueNode ) . getValue ( ) ; return new PathValueEventFilter ( xpath , new NumericValuePredicate ( value , "=" ) ) ; case STRING : String sValue = ( String ) ( ( ValueTreeNode ) valueNode ) . getValue ( ) ; return new PathValueEventFilter ( xpath , new StringValuePredicate ( sValue ) ) ; case TRUE : return new PathValueEventFilter ( xpath , BooleanValuePredicate . TRUE ) ; case FALSE : return new PathValueEventFilter ( xpath , BooleanValuePredicate . FALSE ) ; case NULL : return new PathValueEventFilter ( xpath , NullValuePredicate . INSTANCE ) ; case XPATH_FUN_NAME : String aPath = ( String ) ( ( ValueTreeNode ) valueNode ) . getValue ( ) ; return new PathValueEventFilter ( xpath , new XPathValuePredicate ( aPath , xpath ) ) ; case TIME_MILLIS_FUN_NAME : TimeMillisValueTreeNode timeNode = ( TimeMillisValueTreeNode ) valueNode ; return new PathValueEventFilter ( xpath , new TimeMillisValuePredicate ( timeNode . getValueFormat ( ) , timeNode . getValue ( ) , "=" ) ) ; case TIME_STRING_FUN_NAME : TimeStringValueTreeNode timeStringNode = ( TimeStringValueTreeNode ) valueNode ; return new PathValueEventFilter ( xpath , new TimeStringValuePredicate ( timeStringNode . getValueTimeFormat ( ) , timeStringNode . getInputTimeFormat ( ) , timeStringNode . getValue ( ) , "=" ) ) ; default : throw new UnexpectedTokenException ( valueNode , "Number" , "String" , "TRUE" , "FALSE" ) ; } } | but I can t get ANTLR to generated nested tree with added node . |
32,405 | public static boolean equalObjects ( Object o1 , Object o2 ) { if ( o1 == null ) { return ( o2 == null ) ; } else if ( o2 == null ) { return false ; } else { return o1 . equals ( o2 ) ; } } | Utility function to make it easy to compare two possibly null objects . |
32,406 | @ SuppressWarnings ( "deprecation" ) public < S extends T > S save ( final S entity ) { if ( arangoOperations . getVersion ( ) . getVersion ( ) . compareTo ( "3.4.0" ) < 0 ) { arangoOperations . upsert ( entity , UpsertStrategy . REPLACE ) ; } else { arangoOperations . repsert ( entity ) ; } return entity ; } | Saves the passed entity to the database using upsert from the template |
32,407 | @ SuppressWarnings ( "deprecation" ) public < S extends T > Iterable < S > saveAll ( final Iterable < S > entities ) { if ( arangoOperations . getVersion ( ) . getVersion ( ) . compareTo ( "3.4.0" ) < 0 ) { arangoOperations . upsert ( entities , UpsertStrategy . UPDATE ) ; } else { final S first = StreamSupport . stream ( entities . spliterator ( ) , false ) . findFirst ( ) . get ( ) ; arangoOperations . repsert ( entities , ( Class < S > ) first . getClass ( ) ) ; } return entities ; } | Saves the given iterable of entities to the database |
32,408 | public Optional < T > findById ( final ID id ) { return arangoOperations . find ( id , domainClass ) ; } | Finds if a document with the given id exists in the database |
32,409 | public Iterable < T > findAllById ( final Iterable < ID > ids ) { return arangoOperations . find ( ids , domainClass ) ; } | Finds all documents with the an id or key in the argument |
32,410 | public void delete ( final T entity ) { String id = null ; try { id = ( String ) arangoOperations . getConverter ( ) . getMappingContext ( ) . getPersistentEntity ( domainClass ) . getIdProperty ( ) . getField ( ) . get ( entity ) ; } catch ( final IllegalAccessException e ) { e . printStackTrace ( ) ; } arangoOperations . delete ( id , domainClass ) ; } | Deletes document in the database representing the given object by getting it s id |
32,411 | public Iterable < T > findAll ( final Sort sort ) { return new Iterable < T > ( ) { public Iterator < T > iterator ( ) { return findAllInternal ( sort , null , new HashMap < > ( ) ) ; } } ; } | Gets all documents in the collection for the class type of this repository with the given sort applied |
32,412 | public Page < T > findAll ( final Pageable pageable ) { if ( pageable == null ) { LOGGER . debug ( "Pageable in findAll(Pageable) is null" ) ; } final ArangoCursor < T > result = findAllInternal ( pageable , null , new HashMap < > ( ) ) ; final List < T > content = result . asListRemaining ( ) ; return new PageImpl < > ( content , pageable , result . getStats ( ) . getFullCount ( ) ) ; } | Gets all documents in the collection for the class type of this repository with pagination |
32,413 | public < S extends T > Optional < S > findOne ( final Example < S > example ) { final ArangoCursor cursor = findAllInternal ( ( Pageable ) null , example , new HashMap ( ) ) ; return cursor . hasNext ( ) ? Optional . ofNullable ( ( S ) cursor . next ( ) ) : Optional . empty ( ) ; } | Finds one document which matches the given example object |
32,414 | public < S extends T > Iterable < S > findAll ( final Example < S > example ) { final ArangoCursor cursor = findAllInternal ( ( Pageable ) null , example , new HashMap < > ( ) ) ; return cursor ; } | Finds all documents which match with the given example |
32,415 | public < S extends T > Iterable < S > findAll ( final Example < S > example , final Sort sort ) { final ArangoCursor cursor = findAllInternal ( sort , example , new HashMap ( ) ) ; return cursor ; } | Finds all documents which match with the given example then apply the given sort to results |
32,416 | public < S extends T > Page < S > findAll ( final Example < S > example , final Pageable pageable ) { final ArangoCursor cursor = findAllInternal ( pageable , example , new HashMap ( ) ) ; final List < T > content = cursor . asListRemaining ( ) ; return new PageImpl < > ( ( List < S > ) content , pageable , cursor . getStats ( ) . getFullCount ( ) ) ; } | Finds all documents which match with the given example with pagination |
32,417 | public < S extends T > long count ( final Example < S > example ) { final Map < String , Object > bindVars = new HashMap < > ( ) ; final String predicate = exampleConverter . convertExampleToPredicate ( example , bindVars ) ; final String filter = predicate . length ( ) == 0 ? "" : " FILTER " + predicate ; final String query = String . format ( "FOR e IN %s%s COLLECT WITH COUNT INTO length RETURN length" , getCollectionName ( ) , filter ) ; final ArangoCursor < Long > cursor = arangoOperations . query ( query , bindVars , null , Long . class ) ; return cursor . next ( ) ; } | Counts the number of documents in the collection which match with the given example |
32,418 | private String escapeSpecialCharacters ( final String string ) { final StringBuilder escaped = new StringBuilder ( ) ; for ( final char character : string . toCharArray ( ) ) { if ( character == '%' || character == '_' || character == '\\' ) { escaped . append ( '\\' ) ; } escaped . append ( character ) ; } return escaped . toString ( ) ; } | Escapes special characters which could be used in an operand of LIKE operator |
32,419 | public static String determineDocumentKeyFromId ( final String id ) { final int lastSlash = id . lastIndexOf ( KEY_DELIMITER ) ; return id . substring ( lastSlash + 1 ) ; } | Provides a substring with _key . |
32,420 | public static String determineCollectionFromId ( final String id ) { final int delimiter = id . indexOf ( KEY_DELIMITER ) ; return delimiter == - 1 ? null : id . substring ( 0 , delimiter ) ; } | Provides a substring with collection name . |
32,421 | private boolean shouldIgnoreCase ( final Part part ) { final Class < ? > propertyClass = part . getProperty ( ) . getLeafProperty ( ) . getType ( ) ; final boolean isLowerable = String . class . isAssignableFrom ( propertyClass ) ; final boolean shouldIgnoreCase = part . shouldIgnoreCase ( ) != Part . IgnoreCaseType . NEVER && isLowerable && ! UNSUPPORTED_IGNORE_CASE . contains ( part . getType ( ) ) ; if ( part . shouldIgnoreCase ( ) == Part . IgnoreCaseType . ALWAYS && ( ! isLowerable || UNSUPPORTED_IGNORE_CASE . contains ( part . getType ( ) ) ) ) { LOGGER . debug ( "Ignoring case for \"{}\" type is meaningless" , propertyClass ) ; } return shouldIgnoreCase ; } | Determines whether the case for a Part should be ignored based on property type and IgnoreCase keywords in the method name |
32,422 | private void checkUniquePoint ( final Point point ) { final boolean isStillUnique = ( uniquePoint == null || uniquePoint . equals ( point ) ) ; if ( ! isStillUnique ) { isUnique = false ; } if ( ! geoFields . isEmpty ( ) ) { Assert . isTrue ( uniquePoint == null || uniquePoint . equals ( point ) , "Different Points are used - Distance is ambiguous" ) ; uniquePoint = point ; } } | Ensures that Points used in geospatial parts of non - nested properties are the same in case geospatial return type is expected |
32,423 | private void checkUniqueLocation ( final Part part ) { isUnique = isUnique == null ? true : isUnique ; isUnique = ( uniqueLocation == null || uniqueLocation . equals ( ignorePropertyCase ( part ) ) ) ? isUnique : false ; if ( ! geoFields . isEmpty ( ) ) { Assert . isTrue ( isUnique , "Different location fields are used - Distance is ambiguous" ) ; } uniqueLocation = ignorePropertyCase ( part ) ; } | Ensures that the same geo fields are used in geospatial parts of non - nested properties are the same in case geospatial return type is expected |
32,424 | public Object convertResult ( final Class < ? > type ) { try { if ( type . isArray ( ) ) { return TYPE_MAP . get ( "array" ) . invoke ( this ) ; } if ( ! TYPE_MAP . containsKey ( type ) ) { return getNext ( result ) ; } return TYPE_MAP . get ( type ) . invoke ( this ) ; } catch ( final Exception e ) { e . printStackTrace ( ) ; return null ; } } | Called to convert result from ArangoCursor to given type by invoking the appropriate converter method |
32,425 | private Set < ? > buildSet ( final ArangoCursor < ? > cursor ) { return StreamSupport . stream ( Spliterators . spliteratorUnknownSize ( cursor , 0 ) , false ) . collect ( Collectors . toSet ( ) ) ; } | Creates a Set return type from the given cursor |
32,426 | private GeoResult < ? > buildGeoResult ( final ArangoCursor < ? > cursor ) { GeoResult < ? > geoResult = null ; while ( cursor . hasNext ( ) && geoResult == null ) { final Object object = cursor . next ( ) ; if ( ! ( object instanceof VPackSlice ) ) { continue ; } final VPackSlice slice = ( VPackSlice ) object ; final VPackSlice distSlice = slice . get ( "_distance" ) ; final Double distanceInMeters = distSlice . isDouble ( ) ? distSlice . getAsDouble ( ) : null ; if ( distanceInMeters == null ) { continue ; } final Object entity = operations . getConverter ( ) . read ( domainClass , slice ) ; final Distance distance = new Distance ( distanceInMeters / 1000 , Metrics . KILOMETERS ) ; geoResult = new GeoResult < > ( entity , distance ) ; } return geoResult ; } | Build a GeoResult from the given ArangoCursor |
32,427 | @ SuppressWarnings ( { "rawtypes" , "unchecked" } ) private GeoResults < ? > buildGeoResults ( final ArangoCursor < ? > cursor ) { final List < GeoResult < ? > > list = new LinkedList < > ( ) ; cursor . forEachRemaining ( o -> { final GeoResult < ? > geoResult = buildGeoResult ( o ) ; if ( geoResult != null ) { list . add ( geoResult ) ; } } ) ; return new GeoResults ( list ) ; } | Build a GeoResults object with the ArangoCursor returned by query execution |
32,428 | public static Zone create ( String name , String id ) { return new Zone ( id , name , 86400 , "nil@" + name ) ; } | Represent a zone with a fake email and a TTL of 86400 . |
32,429 | public StringRecordBuilder < D > addAll ( String ... records ) { return addAll ( Arrays . asList ( checkNotNull ( records , "records" ) ) ) ; } | adds values to the builder |
32,430 | private Zone zipWithSOA ( Zone next ) { Record soa = api . recordsByNameAndType ( Integer . parseInt ( next . id ( ) ) , next . name ( ) , "SOA" ) . get ( 0 ) ; return Zone . create ( next . id ( ) , next . name ( ) , soa . ttl , next . email ( ) ) ; } | CloudDNS doesn t expose the domain s ttl in the list api . |
32,431 | private Integer getPriority ( Map < String , Object > mutableRData ) { Integer priority = null ; if ( mutableRData . containsKey ( "priority" ) ) { priority = Integer . class . cast ( mutableRData . remove ( "priority" ) ) ; } else if ( mutableRData . containsKey ( "preference" ) ) { priority = Integer . class . cast ( mutableRData . remove ( "preference" ) ) ; } return priority ; } | Has the side effect of removing the priority from the mutableRData . |
32,432 | public static Filter < ResourceRecordSet < ? > > alwaysVisible ( ) { return new Filter < ResourceRecordSet < ? > > ( ) { public boolean apply ( ResourceRecordSet < ? > in ) { return in != null && in . qualifier ( ) == null ; } public String toString ( ) { return "alwaysVisible()" ; } } ; } | Returns true if the input has no visibility qualifier . Typically indicates a basic record set . |
32,433 | public static ResourceRecordSet < SOAData > soa ( ResourceRecordSet < ? > soa , String email , int ttl ) { SOAData soaData = ( SOAData ) soa . records ( ) . get ( 0 ) ; soaData = soaData . toBuilder ( ) . serial ( soaData . serial ( ) + 1 ) . rname ( email ) . build ( ) ; return ResourceRecordSet . < SOAData > builder ( ) . name ( soa . name ( ) ) . type ( "SOA" ) . ttl ( ttl ) . add ( soaData ) . build ( ) ; } | Returns an updated SOA rrset with an incremented serial number and the specified parameters . |
32,434 | public static List < String > split ( char delim , String toSplit ) { checkNotNull ( toSplit , "toSplit" ) ; if ( toSplit . indexOf ( delim ) == - 1 ) { return Arrays . asList ( toSplit ) ; } List < String > out = new LinkedList < String > ( ) ; StringBuilder currentString = new StringBuilder ( ) ; for ( char c : toSplit . toCharArray ( ) ) { if ( c == delim ) { out . add ( emptyToNull ( currentString . toString ( ) ) ) ; currentString . setLength ( 0 ) ; } else { currentString . append ( c ) ; } } out . add ( emptyToNull ( currentString . toString ( ) ) ) ; return out ; } | empty fields will result in null elements in the result . |
32,435 | public Iterator < Zone > iterator ( ) { final Iterator < String > delegate = api . getZonesOfAccount ( account . get ( ) ) . iterator ( ) ; return new Iterator < Zone > ( ) { public boolean hasNext ( ) { return delegate . hasNext ( ) ; } public Zone next ( ) { return fromSOA ( delegate . next ( ) ) ; } public void remove ( ) { throw new UnsupportedOperationException ( ) ; } } ; } | in UltraDNS zones are scoped to an account . |
32,436 | public Object put ( String key , Object val ) { val = val != null && val instanceof Number ? Number . class . cast ( val ) . intValue ( ) : val ; return super . put ( key , val ) ; } | a ctor that allows passing a map . |
32,437 | static Map < String , String > parseJson ( String in ) { if ( in == null ) { return Collections . emptyMap ( ) ; } String noBraces = in . replace ( '{' , ' ' ) . replace ( '}' , ' ' ) . trim ( ) ; Map < String , String > builder = new LinkedHashMap < String , String > ( ) ; Matcher matcher = JSON_FIELDS . matcher ( noBraces ) ; while ( matcher . find ( ) ) { String key = keyMap . get ( matcher . group ( 1 ) ) ; if ( key != null ) { builder . put ( key , matcher . group ( 2 ) ) ; } } return builder ; } | IAM Instance Profile format is simple non - nested json . |
32,438 | public static List < String > list ( URI metadataService , String path ) { checkArgument ( checkNotNull ( path , "path" ) . endsWith ( "/" ) , "path must end with '/'; %s provided" , path ) ; String content = get ( metadataService , path ) ; if ( content != null ) { return split ( '\n' , content ) ; } return Collections . < String > emptyList ( ) ; } | Retrieves a list of resources at a path if present . |
32,439 | public static String get ( URI metadataService , String path ) { checkNotNull ( metadataService , "metadataService" ) ; checkArgument ( metadataService . getPath ( ) . endsWith ( "/" ) , "metadataService must end with '/'; %s provided" , metadataService ) ; checkNotNull ( path , "path" ) ; InputStream stream = null ; try { stream = openStream ( metadataService + path ) ; String content = slurp ( new InputStreamReader ( stream ) ) ; if ( content . isEmpty ( ) ) { return null ; } return content ; } catch ( IOException e ) { return null ; } finally { try { if ( stream != null ) { stream . close ( ) ; } } catch ( IOException e ) { } } } | Retrieves content at a path if present . |
32,440 | public Iterator < ResourceRecordSet < ? > > iterateByName ( String name ) { Filter < ResourceRecordSet < ? > > filter = andNotAlias ( nameEqualTo ( name ) ) ; return lazyIterateRRSets ( api . listResourceRecordSets ( zoneId , name ) , filter ) ; } | lists and lazily transforms all record sets for a name which are not aliases into denominator format . |
32,441 | public boolean onKeyDown ( int keyCode , KeyEvent event ) { if ( keyCode == KeyEvent . KEYCODE_MENU ) { startActivity ( new Intent ( this , PreferencesActivity . class ) ) ; return true ; } return super . onKeyDown ( keyCode , event ) ; } | wire up preferences screen when menu button is pressed . |
32,442 | public void onZones ( ZoneList . SuccessEvent event ) { String durationEvent = getString ( R . string . list_duration , event . duration ) ; Toast . makeText ( this , durationEvent , LENGTH_SHORT ) . show ( ) ; } | flash the response time of doing the list . |
32,443 | public void onFailure ( Throwable t ) { Toast . makeText ( this , t . getMessage ( ) , LENGTH_LONG ) . show ( ) ; } | show any error messages posted to the bus . |
32,444 | public Iterator < Zone > iterateByName ( final String name ) { final Iterator < HostedZone > delegate = api . listHostedZonesByName ( name ) . iterator ( ) ; return new PeekingIterator < Zone > ( ) { protected Zone computeNext ( ) { if ( delegate . hasNext ( ) ) { HostedZone next = delegate . next ( ) ; if ( next . name . equals ( name ) ) { return zipWithSOA ( next ) ; } } return endOfData ( ) ; } } ; } | This implementation assumes that there isn t more than one page of zones with the same name . |
32,445 | private void deleteEverythingExceptNSAndSOA ( String id , String name ) { List < ActionOnResourceRecordSet > deletes = new ArrayList < ActionOnResourceRecordSet > ( ) ; ResourceRecordSetList page = api . listResourceRecordSets ( id ) ; while ( ! page . isEmpty ( ) ) { for ( ResourceRecordSet < ? > rrset : page ) { if ( rrset . type ( ) . equals ( "SOA" ) || rrset . type ( ) . equals ( "NS" ) && rrset . name ( ) . equals ( name ) ) { continue ; } deletes . add ( ActionOnResourceRecordSet . delete ( rrset ) ) ; } if ( ! deletes . isEmpty ( ) ) { api . changeResourceRecordSets ( id , deletes ) ; } if ( page . next == null ) { page . clear ( ) ; } else { deletes . clear ( ) ; page = api . listResourceRecordSets ( id , page . next . name , page . next . type , page . next . identifier ) ; } } } | Works through the zone deleting each page of rrsets except the zone s SOA and the NS rrsets . Once the zone is cleared it can be deleted . |
32,446 | static Object logModule ( boolean quiet , boolean verbose ) { checkArgument ( ! ( quiet && verbose ) , "quiet and verbose flags cannot be used at the same time!" ) ; Logger . Level logLevel ; if ( quiet ) { return null ; } else if ( verbose ) { logLevel = Logger . Level . FULL ; } else { logLevel = Logger . Level . BASIC ; } return new LogModule ( logLevel ) ; } | Returns a log configuration module or null if none is needed . |
32,447 | public boolean hasNext ( ) { if ( ! peekingIterator . hasNext ( ) ) { return false ; } DirectionalRecord record = peekingIterator . peek ( ) ; if ( record . noResponseRecord ) { peekingIterator . next ( ) ; } return true ; } | skips no response records as they aren t portable |
32,448 | static String awaitComplete ( CloudDNS api , Job job ) { RetryableException retryableException = new RetryableException ( format ( "Job %s did not complete. Check your logs." , job . id ) , null ) ; Retryer retryer = new Retryer . Default ( 500 , 1000 , 30 ) ; while ( true ) { job = api . getStatus ( job . id ) ; if ( "COMPLETED" . equals ( job . status ) ) { return job . resultId ; } else if ( "ERROR" . equals ( job . status ) ) { throw new IllegalStateException ( format ( "Job %s failed with error: %s" , job . id , job . errorDetails ) ) ; } retryer . continueOrPropagate ( retryableException ) ; } } | Returns the ID of the object created or null . |
32,449 | static Map < String , Object > toRDataMap ( Record record ) { if ( "MX" . equals ( record . type ) ) { return MXData . create ( record . priority , record . data ( ) ) ; } else if ( "TXT" . equals ( record . type ) ) { return TXTData . create ( record . data ( ) ) ; } else if ( "SRV" . equals ( record . type ) ) { List < String > rdata = split ( ' ' , record . data ( ) ) ; return SRVData . builder ( ) . priority ( record . priority ) . weight ( Integer . valueOf ( rdata . get ( 0 ) ) ) . port ( Integer . valueOf ( rdata . get ( 1 ) ) ) . target ( rdata . get ( 2 ) ) . build ( ) ; } else if ( "SOA" . equals ( record . type ) ) { List < String > threeParts = split ( ' ' , record . data ( ) ) ; return SOAData . builder ( ) . mname ( threeParts . get ( 0 ) ) . rname ( threeParts . get ( 1 ) ) . serial ( Integer . valueOf ( threeParts . get ( 2 ) ) ) . refresh ( record . ttl ) . retry ( record . ttl ) . expire ( record . ttl ) . minimum ( record . ttl ) . build ( ) ; } else { return Util . toMap ( record . type , record . data ( ) ) ; } } | Special - cases priority field and the strange and incomplete SOA record . |
32,450 | public void stop ( ) { MessageBatcher batcher = null ; for ( String originalAppenderName : originalAsyncAppenderNameMap . keySet ( ) ) { String batcherName = AsyncAppender . class . getName ( ) + "." + originalAppenderName ; batcher = BatcherFactory . getBatcher ( batcherName ) ; if ( batcher == null ) { continue ; } batcher . stop ( ) ; } for ( String originalAppenderName : originalAsyncAppenderNameMap . keySet ( ) ) { String batcherName = AsyncAppender . class . getName ( ) + "." + originalAppenderName ; batcher = BatcherFactory . getBatcher ( batcherName ) ; if ( batcher == null ) { continue ; } BatcherFactory . removeBatcher ( batcherName ) ; } } | Shuts down blitz4j cleanly by flushing out all the async related messages . |
32,451 | public synchronized void reconfigure ( Properties props ) { Properties newOverrideProps = new Properties ( ) ; for ( Entry < Object , Object > prop : props . entrySet ( ) ) { if ( isLog4JProperty ( prop . getKey ( ) . toString ( ) ) ) { Object initialValue = initialProps . get ( prop . getKey ( ) ) ; if ( initialValue == null || ! initialValue . equals ( prop . getValue ( ) ) ) { newOverrideProps . put ( prop . getKey ( ) , prop . getValue ( ) ) ; } } } if ( ! overrideProps . equals ( newOverrideProps ) ) { this . overrideProps . clear ( ) ; this . overrideProps . putAll ( newOverrideProps ) ; reConfigureAsynchronously ( ) ; } } | Set a snapshot of all LOG4J properties and reconfigure if properties have been changed . |
32,452 | private void reConfigureAsynchronously ( ) { refreshCount . incrementAndGet ( ) ; if ( pendingRefreshes . incrementAndGet ( ) == 1 ) { executorPool . submit ( new Runnable ( ) { public void run ( ) { do { try { Thread . sleep ( MIN_DELAY_BETWEEN_REFRESHES ) ; logger . info ( "Configuring log4j dynamically" ) ; reconfigure ( ) ; } catch ( Exception th ) { logger . error ( "Cannot dynamically configure log4j :" , th ) ; } } while ( 0 != pendingRefreshes . getAndSet ( 0 ) ) ; } } ) ; } } | Refresh the configuration asynchronously |
32,453 | private void reconfigure ( ) throws ConfigurationException , FileNotFoundException { Properties consolidatedProps = getConsolidatedProperties ( ) ; logger . info ( "The root category for log4j.rootCategory now is {}" , consolidatedProps . getProperty ( LOG4J_ROOT_CATEGORY ) ) ; logger . info ( "The root category for log4j.rootLogger now is {}" , consolidatedProps . getProperty ( LOG4J_ROOT_LOGGER ) ) ; for ( String originalAppenderName : originalAsyncAppenderNameMap . keySet ( ) ) { MessageBatcher asyncBatcher = BatcherFactory . getBatcher ( AsyncAppender . class . getName ( ) + "." + originalAppenderName ) ; if ( asyncBatcher == null ) { continue ; } asyncBatcher . pause ( ) ; } configureLog4j ( consolidatedProps ) ; for ( String originalAppenderName : originalAsyncAppenderNameMap . keySet ( ) ) { MessageBatcher asyncBatcher = BatcherFactory . getBatcher ( AsyncAppender . class . getName ( ) + "." + originalAppenderName ) ; if ( asyncBatcher == null ) { continue ; } asyncBatcher . resume ( ) ; } } | Reconfigure log4j at run - time . |
32,454 | private void configureLog4j ( Properties props ) throws ConfigurationException , FileNotFoundException { if ( blitz4jConfig . shouldUseLockFree ( ) && ( props . getProperty ( LOG4J_LOGGER_FACTORY ) == null ) ) { props . setProperty ( LOG4J_LOGGER_FACTORY , LOG4J_FACTORY_IMPL ) ; } convertConfiguredAppendersToAsync ( props ) ; clearAsyncAppenderList ( ) ; logger . info ( "Configuring log4j with properties :" + props ) ; PropertyConfigurator . configure ( props ) ; } | Configure log4j with the given properties . |
32,455 | private void closeNonexistingAsyncAppenders ( ) { org . apache . log4j . Logger rootLogger = LogManager . getRootLogger ( ) ; if ( NFLockFreeLogger . class . isInstance ( rootLogger ) ) { ( ( NFLockFreeLogger ) rootLogger ) . reconcileAppenders ( ) ; } Enumeration enums = LogManager . getCurrentLoggers ( ) ; while ( enums . hasMoreElements ( ) ) { Object myLogger = enums . nextElement ( ) ; if ( NFLockFreeLogger . class . isInstance ( myLogger ) ) { ( ( NFLockFreeLogger ) myLogger ) . reconcileAppenders ( ) ; } } } | Closes any asynchronous appenders that were not removed during configuration . |
32,456 | public Logger getOrCreateLogger ( String clazz ) { Logger logger = appenderLoggerMap . get ( clazz ) ; if ( logger == null ) { logger = Logger . getLogger ( clazz ) ; appenderLoggerMap . put ( clazz , logger ) ; } return logger ; } | Get the logger to be used for the given class . |
32,457 | public void setProcessorMaxThreads ( int maxThreads ) { if ( processor . getCorePoolSize ( ) > maxThreads ) { processor . setCorePoolSize ( maxThreads ) ; } processor . setMaximumPoolSize ( maxThreads ) ; } | Set the max threads for the processors |
32,458 | public boolean process ( T message ) { if ( isShutDown ) { return false ; } try { queueSizeTracer . record ( queue . size ( ) ) ; } catch ( Throwable ignored ) { } if ( ! queue . offer ( message ) ) { numberDropped . incrementAndGet ( ) ; queueOverflowCounter . increment ( ) ; return false ; } numberAdded . incrementAndGet ( ) ; return true ; } | Processes the message sent to the batcher . This method just writes the message to the queue and returns immediately . If the queue is full the messages are dropped immediately and corresponding counter is incremented . |
32,459 | public void processSync ( T message ) { if ( isShutDown ) { return ; } try { queueSizeTracer . record ( queue . size ( ) ) ; } catch ( Throwable ignored ) { } try { Stopwatch s = batchSyncPutTracer . start ( ) ; queue . put ( message ) ; s . stop ( ) ; } catch ( InterruptedException e ) { return ; } numberAdded . incrementAndGet ( ) ; } | Processes the message sent to the batcher . This method tries to write to the queue . If the queue is full the send blocks and waits for the available space . |
32,460 | public void process ( List < T > objects ) { for ( T message : objects ) { if ( isShutDown ) { return ; } process ( message ) ; } } | Processes the messages sent to the batcher . This method just writes the message to the queue and returns immediately . If the queue is full the messages are dropped immediately and corresponding counter is incremented . |
32,461 | @ Monitor ( name = "batcherQueueSize" , type = DataSourceType . GAUGE ) public int getSize ( ) { if ( queue != null ) { return queue . size ( ) ; } else { return 0 ; } } | The size of the the queue in which the messages are batches |
32,462 | public void reconcileAppenders ( ) { for ( Appender appender : appenderList ) { if ( ! configuredAppenderList . contains ( appender . getName ( ) ) ) { appender . close ( ) ; appenderList . remove ( appender ) ; } } } | Reconciles the appender list after configuration to ensure that the asynchrnous appenders are not left over after the configuration . This is needed because the appenders are not cleaned out completely during configuration for it to retain the ability to not messages . |
32,463 | public static MessageBatcher getBatcher ( String name ) { MessageBatcher batcher = batcherMap . get ( name ) ; return batcher ; } | Get a batcher by name |
32,464 | public static MessageBatcher createBatcher ( String name , MessageProcessor processor ) { MessageBatcher batcher = batcherMap . get ( name ) ; if ( batcher == null ) { synchronized ( BatcherFactory . class ) { batcher = batcherMap . get ( name ) ; if ( batcher == null ) { batcher = new MessageBatcher ( name , processor ) ; batcherMap . put ( name , batcher ) ; } } } return batcher ; } | Creates the batcher . The user needs to make sure another batcher already exists before they create one . |
32,465 | public StackTraceElement getStackTraceElement ( Class stackClass ) { Stopwatch s = stackTraceTimer . start ( ) ; Throwable t = new Throwable ( ) ; StackTraceElement [ ] stArray = t . getStackTrace ( ) ; int stackSize = stArray . length ; StackTraceElement st = null ; for ( int i = 0 ; i < stackSize ; i ++ ) { boolean found = false ; while ( stArray [ i ] . getClassName ( ) . equals ( stackClass . getName ( ) ) ) { ++ i ; found = true ; } if ( found ) { st = stArray [ i ] ; } } s . stop ( ) ; return st ; } | Gets the starting calling stack trace element of a given stack which matches the given class name . Given the wrapper class name the match continues until the last stack trace element of the wrapper class is matched . |
32,466 | public LocationInfo getLocationInfo ( Class wrapperClassName ) { LocationInfo locationInfo = null ; try { if ( stackLocal . get ( ) == null ) { stackLocal . set ( this . getStackTraceElement ( wrapperClassName ) ) ; } locationInfo = new LocationInfo ( stackLocal . get ( ) . getFileName ( ) , stackLocal . get ( ) . getClassName ( ) , stackLocal . get ( ) . getMethodName ( ) , stackLocal . get ( ) . getLineNumber ( ) + "" ) ; } catch ( Throwable e ) { if ( CONFIGURATION . shouldPrintLoggingErrors ( ) ) { e . printStackTrace ( ) ; } } return locationInfo ; } | Get the location information of the calling class |
32,467 | public LocationInfo generateLocationInfo ( LoggingEvent event ) { if ( event != loggingEvent . get ( ) ) { loggingEvent . set ( event ) ; clearLocationInfo ( ) ; } LocationInfo locationInfo = null ; try { if ( isUsingNFPatternLayout ( event . getLogger ( ) ) ) { locationInfo = LoggingContext . getInstance ( ) . getLocationInfo ( Class . forName ( event . getFQNOfLoggerClass ( ) ) ) ; if ( locationInfo != null ) { MDC . put ( LOCATION_INFO , locationInfo ) ; } } } catch ( Throwable e ) { if ( CONFIGURATION != null && CONFIGURATION . shouldPrintLoggingErrors ( ) ) { e . printStackTrace ( ) ; } } return locationInfo ; } | Generate the location information of the given logging event and cache it . |
32,468 | private void initBatcher ( String appenderName ) { MessageProcessor < LoggingEvent > messageProcessor = new MessageProcessor < LoggingEvent > ( ) { public void process ( List < LoggingEvent > objects ) { processLoggingEvents ( objects ) ; } } ; String batcherName = this . getClass ( ) . getName ( ) + BATCHER_NAME_LIMITER + appenderName ; batcher = BatcherFactory . createBatcher ( batcherName , messageProcessor ) ; batcher . setTarget ( messageProcessor ) ; } | Initialize the batcher that stores the messages and calls the underlying appenders . |
32,469 | private void processLoggingEvents ( List < LoggingEvent > loggingEvents ) { while ( appenders . getAllAppenders ( ) == null ) { if ( ( batcher == null ) || ( batcher . isPaused ( ) ) ) { try { Thread . sleep ( SLEEP_TIME_MS ) ; } catch ( InterruptedException ignore ) { } continue ; } org . apache . log4j . Logger asyncLogger = LoggerCache . getInstance ( ) . getOrCreateLogger ( LOGGER_ASYNC_APPENDER ) ; Appender originalAppender = asyncLogger . getAppender ( originalAppenderName ) ; if ( originalAppender == null ) { try { Thread . sleep ( SLEEP_TIME_MS ) ; } catch ( InterruptedException ignore ) { } continue ; } appenders . addAppender ( originalAppender ) ; } for ( Iterator < Entry < String , LogSummary > > iter = logSummaryMap . entrySet ( ) . iterator ( ) ; iter . hasNext ( ) ; ) { Entry < String , LogSummary > mapEntry = ( Entry < String , LogSummary > ) iter . next ( ) ; if ( batcher . isSpaceAvailable ( ) ) { LogSummary logSummary = mapEntry . getValue ( ) ; LoggingEvent event = logSummary . createEvent ( ) ; if ( batcher . process ( event ) ) { iter . remove ( ) ; } else { break ; } } else { break ; } } for ( LoggingEvent event : loggingEvents ) { appenders . appendLoopOnAppenders ( event ) ; } } | Process the logging events . This is called by the batcher . |
32,470 | private Counter initAndRegisterCounter ( String name ) { BasicCounter counter = new BasicCounter ( MonitorConfig . builder ( name ) . build ( ) ) ; DefaultMonitorRegistry . getInstance ( ) . register ( counter ) ; return counter ; } | Construct a new Counter register it and then return it . |
32,471 | private boolean putInBuffer ( final LoggingEvent event ) { putInBufferCounter . increment ( ) ; Stopwatch t = putBufferTimeTracer . start ( ) ; boolean hasPut = false ; if ( batcher . process ( event ) ) { hasPut = true ; } else { hasPut = false ; } t . stop ( ) ; return hasPut ; } | Puts the logging events to the in - memory buffer . |
32,472 | public void add ( long n ) { int index = Arrays . binarySearch ( bucketOffsets , n ) ; if ( index < 0 ) { index = - index - 1 ; } buckets . incrementAndGet ( index ) ; } | Increments the count of the bucket closest to n rounding UP . |
32,473 | public < R > Connection < CL > getConnectionForOperation ( BaseOperation < CL , R > baseOperation ) { return selectionStrategy . getConnection ( baseOperation , cpConfiguration . getMaxTimeoutWhenExhausted ( ) , TimeUnit . MILLISECONDS ) ; } | Use with EXTREME CAUTION . Connection that is borrowed must be returned else we will have connection pool exhaustion |
32,474 | private Connection < CL > getConnectionForTokenOnRackNoFallback ( BaseOperation < CL , ? > op , Long token , String rack , int duration , TimeUnit unit , RetryPolicy retry ) throws NoAvailableHostsException , PoolExhaustedException , PoolTimeoutException , PoolOfflineException { DynoConnectException lastEx = null ; HostSelectionStrategy < CL > selector = findSelectorForRack ( rack ) ; HostConnectionPool < CL > hostPool = selector . getPoolForToken ( token ) ; if ( hostPool != null ) { try { return hostPool . borrowConnection ( duration , unit ) ; } catch ( PoolTimeoutException pte ) { lastEx = pte ; cpMonitor . incOperationFailure ( null , pte ) ; } } if ( lastEx == null ) { throw new PoolOfflineException ( hostPool == null ? null : hostPool . getHost ( ) , "host pool is offline and we are forcing no fallback" ) ; } else { throw lastEx ; } } | Should be called when a connection is required on that particular zone with no fall backs what so ever |
32,475 | public void initWithHosts ( Map < Host , HostConnectionPool < CL > > hPools ) { List < HostToken > allHostTokens = tokenSupplier . getTokens ( hPools . keySet ( ) ) ; Map < HostToken , HostConnectionPool < CL > > tokenPoolMap = new HashMap < HostToken , HostConnectionPool < CL > > ( ) ; for ( HostToken hToken : allHostTokens ) { hostTokens . put ( hToken . getHost ( ) , hToken ) ; tokenPoolMap . put ( hToken , hPools . get ( hToken . getHost ( ) ) ) ; } Map < HostToken , HostConnectionPool < CL > > localPools = getHostPoolsForRack ( tokenPoolMap , localRack ) ; localSelector . initWithHosts ( localPools ) ; if ( localSelector . isTokenAware ( ) && localRack != null ) { replicationFactor . set ( calculateReplicationFactor ( allHostTokens ) ) ; } Set < String > remoteRacks = new HashSet < String > ( ) ; for ( Host host : hPools . keySet ( ) ) { String rack = host . getRack ( ) ; if ( localRack != null && ! localRack . isEmpty ( ) && rack != null && ! rack . isEmpty ( ) && ! localRack . equals ( rack ) ) { remoteRacks . add ( rack ) ; } } for ( String rack : remoteRacks ) { Map < HostToken , HostConnectionPool < CL > > dcPools = getHostPoolsForRack ( tokenPoolMap , rack ) ; HostSelectionStrategy < CL > remoteSelector = selectorFactory . vendPoolSelectionStrategy ( ) ; remoteSelector . initWithHosts ( dcPools ) ; remoteRackSelectors . put ( rack , remoteSelector ) ; } remoteDCNames . swapWithList ( remoteRackSelectors . keySet ( ) ) ; topology . set ( createTokenPoolTopology ( allHostTokens ) ) ; } | hPools comes from discovery . |
32,476 | private void checkKey ( final byte [ ] key ) { if ( theBinaryKey . get ( ) != null ) { verifyKey ( key ) ; } else { boolean success = theBinaryKey . compareAndSet ( null , key ) ; if ( ! success ) { verifyKey ( key ) ; } else { pipelined ( key ) ; } } } | Checks that a pipeline is associated with a single key . Binary keys do not support hashtags . |
32,477 | private void checkKey ( final String key ) { String hashtag = connPool . getConfiguration ( ) . getHashtag ( ) ; if ( hashtag == null || hashtag . isEmpty ( ) ) { if ( theKey . get ( ) != null ) { verifyKey ( key ) ; } else { boolean success = theKey . compareAndSet ( null , key ) ; if ( ! success ) { verifyKey ( key ) ; } else { pipelined ( key ) ; } } } else { String hashValue = StringUtils . substringBetween ( key , Character . toString ( hashtag . charAt ( 0 ) ) , Character . toString ( hashtag . charAt ( 1 ) ) ) ; if ( Strings . isNullOrEmpty ( hashValue ) ) { hashValue = key ; } checkHashtag ( key , hashValue ) ; } } | Checks that a pipeline is associated with a single key . If there is a hashtag defined in the first host of the connectionpool then we check that first . |
32,478 | private void verifyKey ( final String key ) { if ( ! theKey . get ( ) . equals ( key ) ) { try { throw new RuntimeException ( "Must have same key for Redis Pipeline in Dynomite. This key: " + key ) ; } finally { discardPipelineAndReleaseConnection ( ) ; } } } | Verifies key with pipeline key |
32,479 | public static int hash32 ( final byte [ ] data , int length , int seed ) { final int m = 0x5bd1e995 ; final int r = 24 ; int h = seed ^ length ; int length4 = length / 4 ; for ( int i = 0 ; i < length4 ; i ++ ) { final int i4 = i * 4 ; int k = ( data [ i4 + 0 ] & 0xff ) + ( ( data [ i4 + 1 ] & 0xff ) << 8 ) + ( ( data [ i4 + 2 ] & 0xff ) << 16 ) + ( ( data [ i4 + 3 ] & 0xff ) << 24 ) ; k *= m ; k ^= k >>> r ; k *= m ; h *= m ; h ^= k ; } switch ( length % 4 ) { case 3 : h ^= ( data [ ( length & ~ 3 ) + 2 ] & 0xff ) << 16 ; case 2 : h ^= ( data [ ( length & ~ 3 ) + 1 ] & 0xff ) << 8 ; case 1 : h ^= ( data [ length & ~ 3 ] & 0xff ) ; h *= m ; } h ^= h >>> 13 ; h *= m ; h ^= h >>> 15 ; return h ; } | Generates 32 bit hash from byte array of the given length and seed . |
32,480 | public static int hash32 ( final String text ) { final byte [ ] bytes = text . getBytes ( ) ; return hash32 ( bytes , bytes . length ) ; } | Generates 32 bit hash from a string . |
32,481 | public static int hash32 ( final String text , int from , int length ) { return hash32 ( text . substring ( from , from + length ) ) ; } | Generates 32 bit hash from a substring . |
32,482 | public static long hash64 ( final String text ) { final byte [ ] bytes = text . getBytes ( ) ; return hash64 ( bytes , bytes . length ) ; } | Generates 64 bit hash from a string . |
32,483 | public static long hash64 ( final String text , int from , int length ) { return hash64 ( text . substring ( from , from + length ) ) ; } | Generates 64 bit hash from a substring . |
32,484 | public static byte [ ] compressBytesNonBase64 ( byte [ ] value ) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream ( value . length ) ; try ( GZIPOutputStream gos = new GZIPOutputStream ( baos ) ) { gos . write ( value ) ; } byte [ ] compressed = baos . toByteArray ( ) ; baos . close ( ) ; return compressed ; } | Encodes the given byte array and then GZIP compresses it . |
32,485 | public static byte [ ] decompressBytesNonBase64 ( byte [ ] compressed ) throws IOException { ByteArrayInputStream is = new ByteArrayInputStream ( compressed ) ; try ( InputStream gis = new GZIPInputStream ( is ) ) { return IOUtils . toByteArray ( gis ) ; } } | Decompresses the given byte array without transforming it into a String |
32,486 | public static String decompressStringNonBase64 ( byte [ ] compressed ) throws IOException { ByteArrayInputStream is = new ByteArrayInputStream ( compressed ) ; try ( InputStream gis = new GZIPInputStream ( is ) ) { return new String ( IOUtils . toByteArray ( gis ) , StandardCharsets . UTF_8 ) ; } } | Decompresses the given byte array |
32,487 | public static String compressStringToBase64String ( String value ) throws IOException { return new String ( Base64 . encode ( compressString ( value ) ) , StandardCharsets . UTF_8 ) ; } | Encodes the given string with Base64 encoding and then GZIP compresses it . Returns result as a Base64 encoded string . |
32,488 | public static String decompressString ( byte [ ] compressed ) throws IOException { ByteArrayInputStream is = new ByteArrayInputStream ( compressed ) ; try ( InputStream gis = new GZIPInputStream ( is ) ) { return new String ( Base64 . decode ( IOUtils . toByteArray ( gis ) ) , StandardCharsets . UTF_8 ) ; } } | Decompresses the given byte array and decodes with Base64 decoding |
32,489 | public static String decompressFromBase64String ( String compressed ) throws IOException { return decompressString ( Base64 . decode ( compressed . getBytes ( StandardCharsets . UTF_8 ) ) ) ; } | Given a Base64 encoded String decompresses it . |
32,490 | public static boolean isCompressed ( byte [ ] bytes ) throws IOException { return bytes != null && bytes . length >= 2 && bytes [ 0 ] == ( byte ) ( GZIPInputStream . GZIP_MAGIC ) && bytes [ 1 ] == ( byte ) ( GZIPInputStream . GZIP_MAGIC >> 8 ) ; } | Determines if a byte array is compressed . The java . util . zip GZip implementation does not expose the GZip header so it is difficult to determine if a string is compressed . |
32,491 | public static boolean isCompressed ( InputStream inputStream ) throws IOException { ByteArrayOutputStream buffer = new ByteArrayOutputStream ( ) ; byte [ ] data = new byte [ 2 ] ; int nRead = inputStream . read ( data , 0 , 2 ) ; buffer . write ( data , 0 , nRead ) ; buffer . flush ( ) ; return isCompressed ( buffer . toByteArray ( ) ) ; } | Determines if an InputStream is compressed . The java . util . zip GZip implementation does not expose the GZip header so it is difficult to determine if a string is compressed . |
32,492 | public boolean inactiveSetChanged ( Collection < Host > hostsUp , Collection < Host > hostsDown ) { boolean newInactiveHostsFound = false ; for ( Host hostDown : hostsDown ) { if ( activeHosts . contains ( hostDown ) ) { newInactiveHostsFound = true ; break ; } } Set < Host > prevActiveHosts = new HashSet < Host > ( activeHosts ) ; prevActiveHosts . removeAll ( hostsUp ) ; newInactiveHostsFound = ! prevActiveHosts . isEmpty ( ) ; return newInactiveHostsFound ; } | This check is more involved than the active set check . Here we 2 conditions to check for |
32,493 | public HostStatusTracker computeNewHostStatus ( Collection < Host > hostsUp , Collection < Host > hostsDown ) { verifyMutuallyExclusive ( hostsUp , hostsDown ) ; Set < Host > nextActiveHosts = new HashSet < Host > ( hostsUp ) ; Set < Host > nextInactiveHosts = new HashSet < Host > ( hostsDown ) ; Set < Host > union = new HashSet < > ( hostsUp ) ; union . addAll ( hostsDown ) ; if ( ! union . containsAll ( inactiveHosts ) ) { logger . info ( "REMOVING at least one inactive host from {} b/c it is no longer reported by HostSupplier" , inactiveHosts ) ; inactiveHosts . retainAll ( union ) ; } nextInactiveHosts . addAll ( inactiveHosts ) ; for ( Host host : nextActiveHosts ) { nextInactiveHosts . remove ( host ) ; } Set < Host > prevActiveHosts = new HashSet < Host > ( activeHosts ) ; prevActiveHosts . removeAll ( hostsUp ) ; nextInactiveHosts . addAll ( prevActiveHosts ) ; for ( Host host : nextActiveHosts ) { host . setStatus ( Status . Up ) ; } for ( Host host : nextInactiveHosts ) { host . setStatus ( Status . Down ) ; } return new HostStatusTracker ( nextActiveHosts , nextInactiveHosts ) ; } | Helper method that actually changes the state of the class to reflect the new set of hosts up and down Note that the new HostStatusTracker is returned that holds onto the new state . Calling classes must update their references to use the new HostStatusTracker |
32,494 | public void swapWithList ( Collection < T > newList ) { InnerList newInnerList = new InnerList ( newList ) ; ref . set ( newInnerList ) ; } | Swap the entire inner list with a new list |
32,495 | public synchronized void addElement ( T element ) { List < T > origList = ref . get ( ) . list ; boolean isPresent = origList . contains ( element ) ; if ( isPresent ) { return ; } List < T > newList = new ArrayList < T > ( origList ) ; newList . add ( element ) ; swapWithList ( newList ) ; } | Add an element to the list . This causes the inner list to be swapped out |
32,496 | public synchronized void removeElement ( T element ) { List < T > origList = ref . get ( ) . list ; boolean isPresent = origList . contains ( element ) ; if ( ! isPresent ) { return ; } List < T > newList = new ArrayList < T > ( origList ) ; newList . remove ( element ) ; swapWithList ( newList ) ; } | Remove an element from this list . This causes the inner list to be swapped out |
32,497 | public List < T > getEntireList ( ) { InnerList iList = ref . get ( ) ; return iList != null ? iList . getList ( ) : null ; } | Helpful utility to access the inner list . Must be used with care since the inner list can change . |
32,498 | public int getSize ( ) { InnerList iList = ref . get ( ) ; return iList != null ? iList . getList ( ) . size ( ) : 0 ; } | Gets the size of the bounded list underneath . Note that this num can change if the inner list is swapped out . |
32,499 | public static int hash ( byte [ ] data , int offset , int length , int seed ) { return hash ( ByteBuffer . wrap ( data , offset , length ) , seed ) ; } | Hashes bytes in part of an array . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.