idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
32,400
private void reconfigure ( ) throws ConfigurationException , FileNotFoundException { Properties consolidatedProps = getConsolidatedProperties ( ) ; logger . info ( "The root category for log4j.rootCategory now is {}" , consolidatedProps . getProperty ( LOG4J_ROOT_CATEGORY ) ) ; logger . info ( "The root category for log4j.rootLogger now is {}" , consolidatedProps . getProperty ( LOG4J_ROOT_LOGGER ) ) ; // Pause the async appenders so that the appenders are not accessed for ( String originalAppenderName : originalAsyncAppenderNameMap . keySet ( ) ) { MessageBatcher asyncBatcher = BatcherFactory . getBatcher ( AsyncAppender . class . getName ( ) + "." + originalAppenderName ) ; if ( asyncBatcher == null ) { continue ; } asyncBatcher . pause ( ) ; } // Configure log4j using the new set of properties configureLog4j ( consolidatedProps ) ; // Resume all the batchers to continue logging for ( String originalAppenderName : originalAsyncAppenderNameMap . keySet ( ) ) { MessageBatcher asyncBatcher = BatcherFactory . getBatcher ( AsyncAppender . class . getName ( ) + "." + originalAppenderName ) ; if ( asyncBatcher == null ) { continue ; } asyncBatcher . resume ( ) ; } }
Reconfigure log4j at run - time .
314
11
32,401
private void configureLog4j ( Properties props ) throws ConfigurationException , FileNotFoundException { if ( blitz4jConfig . shouldUseLockFree ( ) && ( props . getProperty ( LOG4J_LOGGER_FACTORY ) == null ) ) { props . setProperty ( LOG4J_LOGGER_FACTORY , LOG4J_FACTORY_IMPL ) ; } convertConfiguredAppendersToAsync ( props ) ; clearAsyncAppenderList ( ) ; logger . info ( "Configuring log4j with properties :" + props ) ; PropertyConfigurator . configure ( props ) ; }
Configure log4j with the given properties .
132
10
32,402
private void closeNonexistingAsyncAppenders ( ) { org . apache . log4j . Logger rootLogger = LogManager . getRootLogger ( ) ; if ( NFLockFreeLogger . class . isInstance ( rootLogger ) ) { ( ( NFLockFreeLogger ) rootLogger ) . reconcileAppenders ( ) ; } Enumeration enums = LogManager . getCurrentLoggers ( ) ; while ( enums . hasMoreElements ( ) ) { Object myLogger = enums . nextElement ( ) ; if ( NFLockFreeLogger . class . isInstance ( myLogger ) ) { ( ( NFLockFreeLogger ) myLogger ) . reconcileAppenders ( ) ; } } }
Closes any asynchronous appenders that were not removed during configuration .
159
13
32,403
public Logger getOrCreateLogger ( String clazz ) { Logger logger = appenderLoggerMap . get ( clazz ) ; if ( logger == null ) { // If multiple threads do the puts, that is fine as it is a one time thing logger = Logger . getLogger ( clazz ) ; appenderLoggerMap . put ( clazz , logger ) ; } return logger ; }
Get the logger to be used for the given class .
87
11
32,404
public void setProcessorMaxThreads ( int maxThreads ) { if ( processor . getCorePoolSize ( ) > maxThreads ) { processor . setCorePoolSize ( maxThreads ) ; } processor . setMaximumPoolSize ( maxThreads ) ; }
Set the max threads for the processors
57
7
32,405
public boolean process ( T message ) { // If this batcher has been shutdown, do not accept any more messages if ( isShutDown ) { return false ; } try { queueSizeTracer . record ( queue . size ( ) ) ; } catch ( Throwable ignored ) { } if ( ! queue . offer ( message ) ) { numberDropped . incrementAndGet ( ) ; queueOverflowCounter . increment ( ) ; return false ; } numberAdded . incrementAndGet ( ) ; return true ; }
Processes the message sent to the batcher . This method just writes the message to the queue and returns immediately . If the queue is full the messages are dropped immediately and corresponding counter is incremented .
106
40
32,406
public void processSync ( T message ) { // If this batcher has been shutdown, do not accept any more messages if ( isShutDown ) { return ; } try { queueSizeTracer . record ( queue . size ( ) ) ; } catch ( Throwable ignored ) { } try { Stopwatch s = batchSyncPutTracer . start ( ) ; queue . put ( message ) ; s . stop ( ) ; } catch ( InterruptedException e ) { return ; } numberAdded . incrementAndGet ( ) ; }
Processes the message sent to the batcher . This method tries to write to the queue . If the queue is full the send blocks and waits for the available space .
110
34
32,407
public void process ( List < T > objects ) { for ( T message : objects ) { // If this batcher has been shutdown, do not accept any more // messages if ( isShutDown ) { return ; } process ( message ) ; } }
Processes the messages sent to the batcher . This method just writes the message to the queue and returns immediately . If the queue is full the messages are dropped immediately and corresponding counter is incremented .
52
40
32,408
@ Monitor ( name = "batcherQueueSize" , type = DataSourceType . GAUGE ) public int getSize ( ) { if ( queue != null ) { return queue . size ( ) ; } else { return 0 ; } }
The size of the the queue in which the messages are batches
50
12
32,409
public void reconcileAppenders ( ) { for ( Appender appender : appenderList ) { if ( ! configuredAppenderList . contains ( appender . getName ( ) ) ) { appender . close ( ) ; appenderList . remove ( appender ) ; } } }
Reconciles the appender list after configuration to ensure that the asynchrnous appenders are not left over after the configuration . This is needed because the appenders are not cleaned out completely during configuration for it to retain the ability to not messages .
60
53
32,410
public static MessageBatcher getBatcher ( String name ) { MessageBatcher batcher = batcherMap . get ( name ) ; return batcher ; }
Get a batcher by name
33
6
32,411
public static MessageBatcher createBatcher ( String name , MessageProcessor processor ) { MessageBatcher batcher = batcherMap . get ( name ) ; if ( batcher == null ) { synchronized ( BatcherFactory . class ) { batcher = batcherMap . get ( name ) ; if ( batcher == null ) { batcher = new MessageBatcher ( name , processor ) ; batcherMap . put ( name , batcher ) ; } } } return batcher ; }
Creates the batcher . The user needs to make sure another batcher already exists before they create one .
103
22
32,412
public StackTraceElement getStackTraceElement ( Class stackClass ) { Stopwatch s = stackTraceTimer . start ( ) ; Throwable t = new Throwable ( ) ; StackTraceElement [ ] stArray = t . getStackTrace ( ) ; int stackSize = stArray . length ; StackTraceElement st = null ; for ( int i = 0 ; i < stackSize ; i ++ ) { boolean found = false ; while ( stArray [ i ] . getClassName ( ) . equals ( stackClass . getName ( ) ) ) { ++ i ; found = true ; } if ( found ) { st = stArray [ i ] ; } } s . stop ( ) ; return st ; }
Gets the starting calling stack trace element of a given stack which matches the given class name . Given the wrapper class name the match continues until the last stack trace element of the wrapper class is matched .
154
40
32,413
public LocationInfo getLocationInfo ( Class wrapperClassName ) { LocationInfo locationInfo = null ; try { if ( stackLocal . get ( ) == null ) { stackLocal . set ( this . getStackTraceElement ( wrapperClassName ) ) ; } locationInfo = new LocationInfo ( stackLocal . get ( ) . getFileName ( ) , stackLocal . get ( ) . getClassName ( ) , stackLocal . get ( ) . getMethodName ( ) , stackLocal . get ( ) . getLineNumber ( ) + "" ) ; } catch ( Throwable e ) { if ( CONFIGURATION . shouldPrintLoggingErrors ( ) ) { e . printStackTrace ( ) ; } } return locationInfo ; }
Get the location information of the calling class
156
8
32,414
public LocationInfo generateLocationInfo ( LoggingEvent event ) { // If the event is not the same, clear the cache if ( event != loggingEvent . get ( ) ) { loggingEvent . set ( event ) ; clearLocationInfo ( ) ; } LocationInfo locationInfo = null ; try { // We should only generate location info if the caller is using NFPatternLayout otherwise this is expensive and unused. if ( isUsingNFPatternLayout ( event . getLogger ( ) ) ) { locationInfo = LoggingContext . getInstance ( ) . getLocationInfo ( Class . forName ( event . getFQNOfLoggerClass ( ) ) ) ; if ( locationInfo != null ) { MDC . put ( LOCATION_INFO , locationInfo ) ; } } } catch ( Throwable e ) { if ( CONFIGURATION != null && CONFIGURATION . shouldPrintLoggingErrors ( ) ) { e . printStackTrace ( ) ; } } return locationInfo ; }
Generate the location information of the given logging event and cache it .
211
14
32,415
private void initBatcher ( String appenderName ) { MessageProcessor < LoggingEvent > messageProcessor = new MessageProcessor < LoggingEvent > ( ) { @ Override public void process ( List < LoggingEvent > objects ) { processLoggingEvents ( objects ) ; } } ; String batcherName = this . getClass ( ) . getName ( ) + BATCHER_NAME_LIMITER + appenderName ; batcher = BatcherFactory . createBatcher ( batcherName , messageProcessor ) ; batcher . setTarget ( messageProcessor ) ; }
Initialize the batcher that stores the messages and calls the underlying appenders .
127
16
32,416
private void processLoggingEvents ( List < LoggingEvent > loggingEvents ) { // Lazy initialization of the appender. This is needed because the // original appenders configuration may be available only after the // complete // log4j initialization. while ( appenders . getAllAppenders ( ) == null ) { if ( ( batcher == null ) || ( batcher . isPaused ( ) ) ) { try { Thread . sleep ( SLEEP_TIME_MS ) ; } catch ( InterruptedException ignore ) { } continue ; } org . apache . log4j . Logger asyncLogger = LoggerCache . getInstance ( ) . getOrCreateLogger ( LOGGER_ASYNC_APPENDER ) ; Appender originalAppender = asyncLogger . getAppender ( originalAppenderName ) ; if ( originalAppender == null ) { try { Thread . sleep ( SLEEP_TIME_MS ) ; } catch ( InterruptedException ignore ) { } continue ; } appenders . addAppender ( originalAppender ) ; } // First take the overflown summary events and put it back in the queue for ( Iterator < Entry < String , LogSummary > > iter = logSummaryMap . entrySet ( ) . iterator ( ) ; iter . hasNext ( ) ; ) { Entry < String , LogSummary > mapEntry = ( Entry < String , LogSummary > ) iter . next ( ) ; // If the space is not available, then exit immediately if ( batcher . isSpaceAvailable ( ) ) { LogSummary logSummary = mapEntry . getValue ( ) ; LoggingEvent event = logSummary . createEvent ( ) ; // Put the event in the queue and remove the event from the summary if ( batcher . process ( event ) ) { iter . remove ( ) ; } else { break ; } } else { break ; } } // Process the events from the queue and call the underlying // appender for ( LoggingEvent event : loggingEvents ) { appenders . appendLoopOnAppenders ( event ) ; } }
Process the logging events . This is called by the batcher .
434
13
32,417
private Counter initAndRegisterCounter ( String name ) { BasicCounter counter = new BasicCounter ( MonitorConfig . builder ( name ) . build ( ) ) ; DefaultMonitorRegistry . getInstance ( ) . register ( counter ) ; return counter ; }
Construct a new Counter register it and then return it .
51
11
32,418
private boolean putInBuffer ( final LoggingEvent event ) { putInBufferCounter . increment ( ) ; Stopwatch t = putBufferTimeTracer . start ( ) ; boolean hasPut = false ; if ( batcher . process ( event ) ) { hasPut = true ; } else { hasPut = false ; } t . stop ( ) ; return hasPut ; }
Puts the logging events to the in - memory buffer .
78
12
32,419
public void add ( long n ) { int index = Arrays . binarySearch ( bucketOffsets , n ) ; if ( index < 0 ) { // inexact match, take the first bucket higher than n index = - index - 1 ; } // else exact match; we're good buckets . incrementAndGet ( index ) ; }
Increments the count of the bucket closest to n rounding UP .
69
13
32,420
public < R > Connection < CL > getConnectionForOperation ( BaseOperation < CL , R > baseOperation ) { return selectionStrategy . getConnection ( baseOperation , cpConfiguration . getMaxTimeoutWhenExhausted ( ) , TimeUnit . MILLISECONDS ) ; }
Use with EXTREME CAUTION . Connection that is borrowed must be returned else we will have connection pool exhaustion
59
22
32,421
private Connection < CL > getConnectionForTokenOnRackNoFallback ( BaseOperation < CL , ? > op , Long token , String rack , int duration , TimeUnit unit , RetryPolicy retry ) throws NoAvailableHostsException , PoolExhaustedException , PoolTimeoutException , PoolOfflineException { DynoConnectException lastEx = null ; // find the selector for that rack, HostSelectionStrategy < CL > selector = findSelectorForRack ( rack ) ; // get the host using that selector HostConnectionPool < CL > hostPool = selector . getPoolForToken ( token ) ; if ( hostPool != null ) { try { // Note that if a PoolExhaustedException is thrown it is caught by the calling // ConnectionPoolImpl#executeXXX() method return hostPool . borrowConnection ( duration , unit ) ; } catch ( PoolTimeoutException pte ) { lastEx = pte ; cpMonitor . incOperationFailure ( null , pte ) ; } } if ( lastEx == null ) { throw new PoolOfflineException ( hostPool == null ? null : hostPool . getHost ( ) , "host pool is offline and we are forcing no fallback" ) ; } else { throw lastEx ; } }
Should be called when a connection is required on that particular zone with no fall backs what so ever
261
19
32,422
public void initWithHosts ( Map < Host , HostConnectionPool < CL > > hPools ) { // Get the list of tokens for these hosts //tokenSupplier.initWithHosts(hPools.keySet()); List < HostToken > allHostTokens = tokenSupplier . getTokens ( hPools . keySet ( ) ) ; Map < HostToken , HostConnectionPool < CL > > tokenPoolMap = new HashMap < HostToken , HostConnectionPool < CL > > ( ) ; // Update inner state with the host tokens. for ( HostToken hToken : allHostTokens ) { hostTokens . put ( hToken . getHost ( ) , hToken ) ; tokenPoolMap . put ( hToken , hPools . get ( hToken . getHost ( ) ) ) ; } // Initialize Local selector Map < HostToken , HostConnectionPool < CL > > localPools = getHostPoolsForRack ( tokenPoolMap , localRack ) ; localSelector . initWithHosts ( localPools ) ; if ( localSelector . isTokenAware ( ) && localRack != null ) { replicationFactor . set ( calculateReplicationFactor ( allHostTokens ) ) ; } // Initialize Remote selectors Set < String > remoteRacks = new HashSet < String > ( ) ; for ( Host host : hPools . keySet ( ) ) { String rack = host . getRack ( ) ; if ( localRack != null && ! localRack . isEmpty ( ) && rack != null && ! rack . isEmpty ( ) && ! localRack . equals ( rack ) ) { remoteRacks . add ( rack ) ; } } for ( String rack : remoteRacks ) { Map < HostToken , HostConnectionPool < CL > > dcPools = getHostPoolsForRack ( tokenPoolMap , rack ) ; HostSelectionStrategy < CL > remoteSelector = selectorFactory . vendPoolSelectionStrategy ( ) ; remoteSelector . initWithHosts ( dcPools ) ; remoteRackSelectors . put ( rack , remoteSelector ) ; } remoteDCNames . swapWithList ( remoteRackSelectors . keySet ( ) ) ; topology . set ( createTokenPoolTopology ( allHostTokens ) ) ; }
hPools comes from discovery .
495
7
32,423
private void checkKey ( final byte [ ] key ) { if ( theBinaryKey . get ( ) != null ) { verifyKey ( key ) ; } else { boolean success = theBinaryKey . compareAndSet ( null , key ) ; if ( ! success ) { // someone already beat us to it. that's fine, just verify // that the key is the same verifyKey ( key ) ; } else { pipelined ( key ) ; } } }
Checks that a pipeline is associated with a single key . Binary keys do not support hashtags .
98
20
32,424
private void checkKey ( final String key ) { /* * Get hashtag from the first host of the active pool We cannot use the * connection object because as of now we have not selected a connection. A * connection is selected based on the key or hashtag respectively. */ String hashtag = connPool . getConfiguration ( ) . getHashtag ( ) ; if ( hashtag == null || hashtag . isEmpty ( ) ) { if ( theKey . get ( ) != null ) { verifyKey ( key ) ; } else { boolean success = theKey . compareAndSet ( null , key ) ; if ( ! success ) { // someone already beat us to it. that's fine, just verify // that the key is the same verifyKey ( key ) ; } else { pipelined ( key ) ; } } } else { /* * We have a identified a hashtag in the Host object. That means Dynomite has a * defined hashtag. Producing the hashvalue out of the hashtag and using that as * a reference to the pipeline */ String hashValue = StringUtils . substringBetween ( key , Character . toString ( hashtag . charAt ( 0 ) ) , Character . toString ( hashtag . charAt ( 1 ) ) ) ; if ( Strings . isNullOrEmpty ( hashValue ) ) { hashValue = key ; } checkHashtag ( key , hashValue ) ; } }
Checks that a pipeline is associated with a single key . If there is a hashtag defined in the first host of the connectionpool then we check that first .
289
32
32,425
private void verifyKey ( final String key ) { if ( ! theKey . get ( ) . equals ( key ) ) { try { throw new RuntimeException ( "Must have same key for Redis Pipeline in Dynomite. This key: " + key ) ; } finally { discardPipelineAndReleaseConnection ( ) ; } } }
Verifies key with pipeline key
71
6
32,426
public static int hash32 ( final byte [ ] data , int length , int seed ) { // 'm' and 'r' are mixing constants generated offline. // They're not really 'magic', they just happen to work well. final int m = 0x5bd1e995 ; final int r = 24 ; // Initialize the hash to a random value int h = seed ^ length ; int length4 = length / 4 ; for ( int i = 0 ; i < length4 ; i ++ ) { final int i4 = i * 4 ; int k = ( data [ i4 + 0 ] & 0xff ) + ( ( data [ i4 + 1 ] & 0xff ) << 8 ) + ( ( data [ i4 + 2 ] & 0xff ) << 16 ) + ( ( data [ i4 + 3 ] & 0xff ) << 24 ) ; k *= m ; k ^= k >>> r ; k *= m ; h *= m ; h ^= k ; } // Handle the last few bytes of the input array switch ( length % 4 ) { case 3 : h ^= ( data [ ( length & ~ 3 ) + 2 ] & 0xff ) << 16 ; case 2 : h ^= ( data [ ( length & ~ 3 ) + 1 ] & 0xff ) << 8 ; case 1 : h ^= ( data [ length & ~ 3 ] & 0xff ) ; h *= m ; } h ^= h >>> 13 ; h *= m ; h ^= h >>> 15 ; return h ; }
Generates 32 bit hash from byte array of the given length and seed .
325
15
32,427
public static int hash32 ( final String text ) { final byte [ ] bytes = text . getBytes ( ) ; return hash32 ( bytes , bytes . length ) ; }
Generates 32 bit hash from a string .
36
9
32,428
public static int hash32 ( final String text , int from , int length ) { return hash32 ( text . substring ( from , from + length ) ) ; }
Generates 32 bit hash from a substring .
35
10
32,429
public static long hash64 ( final String text ) { final byte [ ] bytes = text . getBytes ( ) ; return hash64 ( bytes , bytes . length ) ; }
Generates 64 bit hash from a string .
36
9
32,430
public static long hash64 ( final String text , int from , int length ) { return hash64 ( text . substring ( from , from + length ) ) ; }
Generates 64 bit hash from a substring .
35
10
32,431
public static byte [ ] compressBytesNonBase64 ( byte [ ] value ) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream ( value . length ) ; try ( GZIPOutputStream gos = new GZIPOutputStream ( baos ) ) { gos . write ( value ) ; } byte [ ] compressed = baos . toByteArray ( ) ; baos . close ( ) ; return compressed ; }
Encodes the given byte array and then GZIP compresses it .
94
15
32,432
public static byte [ ] decompressBytesNonBase64 ( byte [ ] compressed ) throws IOException { ByteArrayInputStream is = new ByteArrayInputStream ( compressed ) ; try ( InputStream gis = new GZIPInputStream ( is ) ) { return IOUtils . toByteArray ( gis ) ; } }
Decompresses the given byte array without transforming it into a String
70
13
32,433
public static String decompressStringNonBase64 ( byte [ ] compressed ) throws IOException { ByteArrayInputStream is = new ByteArrayInputStream ( compressed ) ; try ( InputStream gis = new GZIPInputStream ( is ) ) { return new String ( IOUtils . toByteArray ( gis ) , StandardCharsets . UTF_8 ) ; } }
Decompresses the given byte array
81
7
32,434
public static String compressStringToBase64String ( String value ) throws IOException { return new String ( Base64 . encode ( compressString ( value ) ) , StandardCharsets . UTF_8 ) ; }
Encodes the given string with Base64 encoding and then GZIP compresses it . Returns result as a Base64 encoded string .
44
27
32,435
public static String decompressString ( byte [ ] compressed ) throws IOException { ByteArrayInputStream is = new ByteArrayInputStream ( compressed ) ; try ( InputStream gis = new GZIPInputStream ( is ) ) { return new String ( Base64 . decode ( IOUtils . toByteArray ( gis ) ) , StandardCharsets . UTF_8 ) ; } }
Decompresses the given byte array and decodes with Base64 decoding
84
14
32,436
public static String decompressFromBase64String ( String compressed ) throws IOException { return decompressString ( Base64 . decode ( compressed . getBytes ( StandardCharsets . UTF_8 ) ) ) ; }
Given a Base64 encoded String decompresses it .
45
10
32,437
public static boolean isCompressed ( byte [ ] bytes ) throws IOException { return bytes != null && bytes . length >= 2 && bytes [ 0 ] == ( byte ) ( GZIPInputStream . GZIP_MAGIC ) && bytes [ 1 ] == ( byte ) ( GZIPInputStream . GZIP_MAGIC >> 8 ) ; }
Determines if a byte array is compressed . The java . util . zip GZip implementation does not expose the GZip header so it is difficult to determine if a string is compressed .
76
38
32,438
public static boolean isCompressed ( InputStream inputStream ) throws IOException { ByteArrayOutputStream buffer = new ByteArrayOutputStream ( ) ; byte [ ] data = new byte [ 2 ] ; int nRead = inputStream . read ( data , 0 , 2 ) ; buffer . write ( data , 0 , nRead ) ; buffer . flush ( ) ; return isCompressed ( buffer . toByteArray ( ) ) ; }
Determines if an InputStream is compressed . The java . util . zip GZip implementation does not expose the GZip header so it is difficult to determine if a string is compressed .
90
38
32,439
public boolean inactiveSetChanged ( Collection < Host > hostsUp , Collection < Host > hostsDown ) { boolean newInactiveHostsFound = false ; // Check for condition 1. for ( Host hostDown : hostsDown ) { if ( activeHosts . contains ( hostDown ) ) { newInactiveHostsFound = true ; break ; } } // Check for condition 2. Set < Host > prevActiveHosts = new HashSet < Host > ( activeHosts ) ; prevActiveHosts . removeAll ( hostsUp ) ; newInactiveHostsFound = ! prevActiveHosts . isEmpty ( ) ; return newInactiveHostsFound ; }
This check is more involved than the active set check . Here we 2 conditions to check for
139
18
32,440
public HostStatusTracker computeNewHostStatus ( Collection < Host > hostsUp , Collection < Host > hostsDown ) { verifyMutuallyExclusive ( hostsUp , hostsDown ) ; Set < Host > nextActiveHosts = new HashSet < Host > ( hostsUp ) ; // Get the hosts that are currently down Set < Host > nextInactiveHosts = new HashSet < Host > ( hostsDown ) ; // add any previous hosts that were currently down iff they are still reported by the HostSupplier Set < Host > union = new HashSet <> ( hostsUp ) ; union . addAll ( hostsDown ) ; if ( ! union . containsAll ( inactiveHosts ) ) { logger . info ( "REMOVING at least one inactive host from {} b/c it is no longer reported by HostSupplier" , inactiveHosts ) ; inactiveHosts . retainAll ( union ) ; } nextInactiveHosts . addAll ( inactiveHosts ) ; // Now remove from the total set of inactive hosts any host that is currently up. // This typically happens when a host moves from the inactive state to the active state. // And hence it will be there in the prev inactive set, and will also be there in the new active set // for this round. for ( Host host : nextActiveHosts ) { nextInactiveHosts . remove ( host ) ; } // Now add any host that is not in the new active hosts set and that was in the previous active set Set < Host > prevActiveHosts = new HashSet < Host > ( activeHosts ) ; prevActiveHosts . removeAll ( hostsUp ) ; // If anyone is remaining in the prev set then add it to the inactive set, since it has gone away nextInactiveHosts . addAll ( prevActiveHosts ) ; for ( Host host : nextActiveHosts ) { host . setStatus ( Status . Up ) ; } for ( Host host : nextInactiveHosts ) { host . setStatus ( Status . Down ) ; } return new HostStatusTracker ( nextActiveHosts , nextInactiveHosts ) ; }
Helper method that actually changes the state of the class to reflect the new set of hosts up and down Note that the new HostStatusTracker is returned that holds onto the new state . Calling classes must update their references to use the new HostStatusTracker
445
49
32,441
public void swapWithList ( Collection < T > newList ) { InnerList newInnerList = new InnerList ( newList ) ; ref . set ( newInnerList ) ; }
Swap the entire inner list with a new list
40
10
32,442
public synchronized void addElement ( T element ) { List < T > origList = ref . get ( ) . list ; boolean isPresent = origList . contains ( element ) ; if ( isPresent ) { return ; } List < T > newList = new ArrayList < T > ( origList ) ; newList . add ( element ) ; swapWithList ( newList ) ; }
Add an element to the list . This causes the inner list to be swapped out
81
16
32,443
public synchronized void removeElement ( T element ) { List < T > origList = ref . get ( ) . list ; boolean isPresent = origList . contains ( element ) ; if ( ! isPresent ) { return ; } List < T > newList = new ArrayList < T > ( origList ) ; newList . remove ( element ) ; swapWithList ( newList ) ; }
Remove an element from this list . This causes the inner list to be swapped out
82
16
32,444
public List < T > getEntireList ( ) { InnerList iList = ref . get ( ) ; return iList != null ? iList . getList ( ) : null ; }
Helpful utility to access the inner list . Must be used with care since the inner list can change .
40
21
32,445
public int getSize ( ) { InnerList iList = ref . get ( ) ; return iList != null ? iList . getList ( ) . size ( ) : 0 ; }
Gets the size of the bounded list underneath . Note that this num can change if the inner list is swapped out .
39
24
32,446
public static int hash ( byte [ ] data , int offset , int length , int seed ) { return hash ( ByteBuffer . wrap ( data , offset , length ) , seed ) ; }
Hashes bytes in part of an array .
39
9
32,447
public static int hash ( ByteBuffer buf , int seed ) { // save byte order for later restoration ByteOrder byteOrder = buf . order ( ) ; buf . order ( ByteOrder . LITTLE_ENDIAN ) ; int m = 0x5bd1e995 ; int r = 24 ; int h = seed ^ buf . remaining ( ) ; while ( buf . remaining ( ) >= 4 ) { int k = buf . getInt ( ) ; k *= m ; k ^= k >>> r ; k *= m ; h *= m ; h ^= k ; } if ( buf . remaining ( ) > 0 ) { ByteBuffer finish = ByteBuffer . allocate ( 4 ) . order ( ByteOrder . LITTLE_ENDIAN ) ; // for big-endian version, use this first: // finish.position(4-buf.remaining()); finish . put ( buf ) . rewind ( ) ; h ^= finish . getInt ( ) ; h *= m ; } h ^= h >>> 13 ; h *= m ; h ^= h >>> 15 ; buf . order ( byteOrder ) ; return h ; }
Hashes the bytes in a buffer from the current position to the limit .
241
15
32,448
@ Override public String getTopologyJsonPayload ( Set < Host > activeHosts ) { int count = NUM_RETRIER_ACROSS_NODES ; String response ; Exception lastEx = null ; do { try { response = getTopologyFromRandomNodeWithRetry ( activeHosts ) ; if ( response != null ) { return response ; } } catch ( Exception e ) { lastEx = e ; } finally { count -- ; } } while ( ( count > 0 ) ) ; if ( lastEx != null ) { if ( lastEx instanceof ConnectTimeoutException ) { throw new TimeoutException ( "Unable to obtain topology" , lastEx ) ; } throw new DynoException ( lastEx ) ; } else { throw new DynoException ( "Could not contact dynomite for token map" ) ; } }
Tries to get topology information by randomly trying across nodes .
182
13
32,449
public Host getRandomHost ( Set < Host > activeHosts ) { Random random = new Random ( ) ; List < Host > hostsUp = new ArrayList < Host > ( CollectionUtils . filter ( activeHosts , new Predicate < Host > ( ) { @ Override public boolean apply ( Host x ) { return x . isUp ( ) ; } } ) ) ; return hostsUp . get ( random . nextInt ( hostsUp . size ( ) ) ) ; }
Finds a random host from the set of active hosts to perform cluster_describe
101
17
32,450
private String getTopologyFromRandomNodeWithRetry ( Set < Host > activeHosts ) { int count = NUM_RETRIES_PER_NODE ; String nodeResponse ; Exception lastEx ; final Host randomHost = getRandomHost ( activeHosts ) ; do { try { lastEx = null ; nodeResponse = getResponseViaHttp ( randomHost . getHostName ( ) ) ; if ( nodeResponse != null ) { Logger . info ( "Received topology from " + randomHost ) ; return nodeResponse ; } } catch ( Exception e ) { Logger . info ( "cannot get topology from : " + randomHost ) ; lastEx = e ; } finally { count -- ; } } while ( ( count > 0 ) ) ; if ( lastEx != null ) { if ( lastEx instanceof ConnectTimeoutException ) { throw new TimeoutException ( "Unable to obtain topology" , lastEx ) . setHost ( randomHost ) ; } throw new DynoException ( String . format ( "Unable to obtain topology from %s" , randomHost ) , lastEx ) ; } else { throw new DynoException ( String . format ( "Could not contact dynomite manager for token map on %s" , randomHost ) ) ; } }
Tries multiple nodes and it only bubbles up the last node s exception . We want to bubble up the exception in order for the last node to be removed from the connection pool .
274
36
32,451
List < HostToken > parseTokenListFromJson ( String json ) { List < HostToken > hostTokens = new ArrayList < HostToken > ( ) ; JSONParser parser = new JSONParser ( ) ; try { JSONArray arr = ( JSONArray ) parser . parse ( json ) ; Iterator < ? > iter = arr . iterator ( ) ; while ( iter . hasNext ( ) ) { Object item = iter . next ( ) ; if ( ! ( item instanceof JSONObject ) ) { continue ; } JSONObject jItem = ( JSONObject ) item ; Long token = Long . parseLong ( ( String ) jItem . get ( "token" ) ) ; String hostname = ( String ) jItem . get ( "hostname" ) ; String ipAddress = ( String ) jItem . get ( "ip" ) ; String zone = ( String ) jItem . get ( "zone" ) ; String datacenter = ( String ) jItem . get ( "dc" ) ; String portStr = ( String ) jItem . get ( "port" ) ; String securePortStr = ( String ) jItem . get ( "secure_port" ) ; String hashtag = ( String ) jItem . get ( "hashtag" ) ; int port = Host . DEFAULT_PORT ; if ( portStr != null ) { port = Integer . valueOf ( portStr ) ; } int securePort = port ; if ( securePortStr != null ) { securePort = Integer . valueOf ( securePortStr ) ; } Host host = new Host ( hostname , ipAddress , port , securePort , zone , datacenter , Status . Up , hashtag ) ; if ( isLocalDatacenterHost ( host ) ) { HostToken hostToken = new HostToken ( token , host ) ; hostTokens . add ( hostToken ) ; } } } catch ( ParseException e ) { Logger . error ( "Failed to parse json response: " + json , e ) ; throw new RuntimeException ( e ) ; } return hostTokens ; }
package - private for Test
434
5
32,452
@ Override public void start ( final BaseCallback < Authentication , AuthenticationException > callback ) { credentialsRequest . start ( new BaseCallback < Credentials , AuthenticationException > ( ) { @ Override public void onSuccess ( final Credentials credentials ) { userInfoRequest . addHeader ( HEADER_AUTHORIZATION , "Bearer " + credentials . getAccessToken ( ) ) . start ( new BaseCallback < UserProfile , AuthenticationException > ( ) { @ Override public void onSuccess ( UserProfile profile ) { callback . onSuccess ( new Authentication ( profile , credentials ) ) ; } @ Override public void onFailure ( AuthenticationException error ) { callback . onFailure ( error ) ; } } ) ; } @ Override public void onFailure ( AuthenticationException error ) { callback . onFailure ( error ) ; } } ) ; }
Starts the log in request and then fetches the user s profile
178
14
32,453
@ Override public Authentication execute ( ) throws Auth0Exception { Credentials credentials = credentialsRequest . execute ( ) ; UserProfile profile = userInfoRequest . addHeader ( HEADER_AUTHORIZATION , "Bearer " + credentials . getAccessToken ( ) ) . execute ( ) ; return new Authentication ( profile , credentials ) ; }
Logs in the user with Auth0 and fetches it s profile .
73
15
32,454
public Map < String , Object > getExtraInfo ( ) { return extraInfo != null ? new HashMap <> ( extraInfo ) : Collections . < String , Object > emptyMap ( ) ; }
Returns extra information of the profile that is not part of the normalized profile
42
14
32,455
@ Override public SignUpRequest addAuthenticationParameters ( Map < String , Object > parameters ) { authenticationRequest . addAuthenticationParameters ( parameters ) ; return this ; }
Add additional parameters sent when logging the user in
36
9
32,456
@ Override public void start ( final BaseCallback < Credentials , AuthenticationException > callback ) { signUpRequest . start ( new BaseCallback < DatabaseUser , AuthenticationException > ( ) { @ Override public void onSuccess ( final DatabaseUser user ) { authenticationRequest . start ( callback ) ; } @ Override public void onFailure ( AuthenticationException error ) { callback . onFailure ( error ) ; } } ) ; }
Starts to execute create user request and then logs the user in .
89
14
32,457
public void bindService ( ) { Log . v ( TAG , "Trying to bind the service" ) ; Context context = this . context . get ( ) ; isBound = false ; if ( context != null && preferredPackage != null ) { isBound = CustomTabsClient . bindCustomTabsService ( context , preferredPackage , this ) ; } Log . v ( TAG , "Bind request result: " + isBound ) ; }
Attempts to bind the Custom Tabs Service to the Context .
92
12
32,458
public void unbindService ( ) { Log . v ( TAG , "Trying to unbind the service" ) ; Context context = this . context . get ( ) ; if ( isBound && context != null ) { context . unbindService ( this ) ; isBound = false ; } }
Attempts to unbind the Custom Tabs Service from the Context .
62
13
32,459
public void getToken ( String authorizationCode , @ NonNull final AuthCallback callback ) { apiClient . token ( authorizationCode , redirectUri ) . setCodeVerifier ( codeVerifier ) . start ( new BaseCallback < Credentials , AuthenticationException > ( ) { @ Override public void onSuccess ( Credentials payload ) { callback . onSuccess ( payload ) ; } @ Override public void onFailure ( AuthenticationException error ) { if ( "Unauthorized" . equals ( error . getDescription ( ) ) ) { Log . e ( TAG , "Please go to 'https://manage.auth0.com/#/applications/" + apiClient . getClientId ( ) + "/settings' and set 'Client Type' to 'Native' to enable PKCE." ) ; } callback . onFailure ( error ) ; } } ) ; }
Performs a request to the Auth0 API to get the OAuth Token and end the PKCE flow . The instance of this class must be disposed after this method is called .
183
36
32,460
private boolean checkPermissions ( Activity activity ) { String [ ] permissions = getRequiredAndroidPermissions ( ) ; return handler . areAllPermissionsGranted ( activity , permissions ) ; }
Checks if all the required Android Manifest . permissions have already been granted .
39
15
32,461
private void requestPermissions ( Activity activity , int requestCode ) { String [ ] permissions = getRequiredAndroidPermissions ( ) ; handler . requestPermissions ( activity , permissions , requestCode ) ; }
Starts the async Permission Request . The caller activity will be notified of the result on the onRequestPermissionsResult method from the ActivityCompat . OnRequestPermissionsResultCallback interface .
42
38
32,462
public DelegationRequest < T > addParameters ( Map < String , Object > parameters ) { request . addParameters ( parameters ) ; return this ; }
Add additional parameters to be sent in the request
32
9
32,463
public DelegationRequest < T > setScope ( String scope ) { request . addParameter ( ParameterBuilder . SCOPE_KEY , scope ) ; return this ; }
Set the scope used to make the delegation
36
8
32,464
@ SuppressWarnings ( "WeakerAccess" ) private ParameterizableRequest < Void , AuthenticationException > passwordless ( ) { HttpUrl url = HttpUrl . parse ( auth0 . getDomainUrl ( ) ) . newBuilder ( ) . addPathSegment ( PASSWORDLESS_PATH ) . addPathSegment ( START_PATH ) . build ( ) ; final Map < String , Object > parameters = ParameterBuilder . newBuilder ( ) . setClientId ( getClientId ( ) ) . asDictionary ( ) ; return factory . POST ( url , client , gson , authErrorBuilder ) . addParameters ( parameters ) ; }
Start a custom passwordless flow
141
6
32,465
@ Override public AuthenticationRequest setConnection ( String connection ) { if ( ! hasLegacyPath ( ) ) { Log . w ( TAG , "Not setting the 'connection' parameter as the request is using a OAuth 2.0 API Authorization endpoint that doesn't support it." ) ; return this ; } addParameter ( CONNECTION_KEY , connection ) ; return this ; }
Sets the connection parameter .
80
6
32,466
@ Override public AuthenticationRequest setRealm ( String realm ) { if ( hasLegacyPath ( ) ) { Log . w ( TAG , "Not setting the 'realm' parameter as the request is using a Legacy Authorization API endpoint that doesn't support it." ) ; return this ; } addParameter ( REALM_KEY , realm ) ; return this ; }
Sets the realm parameter . A realm identifies the host against which the authentication will be made and usually helps to know which username and password to use .
76
30
32,467
public DatabaseConnectionRequest < T , U > addParameters ( Map < String , Object > parameters ) { request . addParameters ( parameters ) ; return this ; }
Add the given parameters to the request
33
7
32,468
public DatabaseConnectionRequest < T , U > addParameter ( String name , Object value ) { request . addParameter ( name , value ) ; return this ; }
Add a parameter by name to the request
33
8
32,469
public DatabaseConnectionRequest < T , U > setConnection ( String connection ) { request . addParameter ( ParameterBuilder . CONNECTION_KEY , connection ) ; return this ; }
Set the Auth0 Database Connection used for this request using its name .
38
14
32,470
public OkHttpClient createClient ( boolean loggingEnabled , boolean tls12Enforced , int connectTimeout , int readTimeout , int writeTimeout ) { return modifyClient ( new OkHttpClient ( ) , loggingEnabled , tls12Enforced , connectTimeout , readTimeout , writeTimeout ) ; }
This method creates an instance of OKHttpClient according to the provided parameters . It is used internally and is not intended to be used directly .
62
28
32,471
private void enforceTls12 ( OkHttpClient client ) { // No need to modify client as TLS 1.2 is enabled by default on API21+ // Lollipop is included because some Samsung devices face the same problem on API 21. if ( Build . VERSION . SDK_INT < Build . VERSION_CODES . JELLY_BEAN || Build . VERSION . SDK_INT > Build . VERSION_CODES . LOLLIPOP ) { return ; } try { SSLContext sc = SSLContext . getInstance ( "TLSv1.2" ) ; sc . init ( null , null , null ) ; client . setSslSocketFactory ( new TLS12SocketFactory ( sc . getSocketFactory ( ) ) ) ; ConnectionSpec cs = new ConnectionSpec . Builder ( ConnectionSpec . MODERN_TLS ) . tlsVersions ( TlsVersion . TLS_1_2 ) . build ( ) ; List < ConnectionSpec > specs = new ArrayList <> ( ) ; specs . add ( cs ) ; specs . add ( ConnectionSpec . COMPATIBLE_TLS ) ; specs . add ( ConnectionSpec . CLEARTEXT ) ; client . setConnectionSpecs ( specs ) ; } catch ( NoSuchAlgorithmException | KeyManagementException e ) { Log . e ( TAG , "Error while setting TLS 1.2" , e ) ; } }
Enable TLS 1 . 2 on the OkHttpClient on API 16 - 21 which is supported but not enabled by default .
294
24
32,472
public ParameterBuilder addAll ( Map < String , Object > parameters ) { if ( parameters != null ) { for ( String k : parameters . keySet ( ) ) { if ( parameters . get ( k ) != null ) { this . parameters . put ( k , parameters . get ( k ) ) ; } } } return this ; }
Adds all parameter from a map
71
6
32,473
public boolean isValid ( int expectedRequestCode ) { Uri uri = intent != null ? intent . getData ( ) : null ; if ( uri == null ) { Log . d ( TAG , "Result is invalid: Received Intent's Uri is null." ) ; return false ; } if ( requestCode == MISSING_REQUEST_CODE ) { return true ; } boolean fromRequest = getRequestCode ( ) == expectedRequestCode ; if ( ! fromRequest ) { Log . d ( TAG , String . format ( "Result is invalid: Received Request Code doesn't match the expected one. Was %d but expected %d" , getRequestCode ( ) , expectedRequestCode ) ) ; } return fromRequest && resultCode == Activity . RESULT_OK ; }
Checks if the received data is valid and can be parsed .
162
13
32,474
public boolean hasValidCredentials ( ) { String accessToken = storage . retrieveString ( KEY_ACCESS_TOKEN ) ; String refreshToken = storage . retrieveString ( KEY_REFRESH_TOKEN ) ; String idToken = storage . retrieveString ( KEY_ID_TOKEN ) ; Long expiresAt = storage . retrieveLong ( KEY_EXPIRES_AT ) ; return ! ( isEmpty ( accessToken ) && isEmpty ( idToken ) || expiresAt == null || expiresAt <= getCurrentTimeInMillis ( ) && refreshToken == null ) ; }
Checks if a non - expired pair of credentials can be obtained from this manager .
123
17
32,475
public void clearCredentials ( ) { storage . remove ( KEY_ACCESS_TOKEN ) ; storage . remove ( KEY_REFRESH_TOKEN ) ; storage . remove ( KEY_ID_TOKEN ) ; storage . remove ( KEY_TOKEN_TYPE ) ; storage . remove ( KEY_EXPIRES_AT ) ; storage . remove ( KEY_SCOPE ) ; }
Removes the credentials from the storage if present .
84
10
32,476
public void clearCredentials ( ) { storage . remove ( KEY_CREDENTIALS ) ; storage . remove ( KEY_EXPIRES_AT ) ; storage . remove ( KEY_CAN_REFRESH ) ; Log . d ( TAG , "Credentials were just removed from the storage" ) ; }
Delete the stored credentials
68
4
32,477
public boolean hasValidCredentials ( ) { String encryptedEncoded = storage . retrieveString ( KEY_CREDENTIALS ) ; Long expiresAt = storage . retrieveLong ( KEY_EXPIRES_AT ) ; Boolean canRefresh = storage . retrieveBoolean ( KEY_CAN_REFRESH ) ; return ! ( isEmpty ( encryptedEncoded ) || expiresAt == null || expiresAt <= getCurrentTimeInMillis ( ) && ( canRefresh == null || ! canRefresh ) ) ; }
Returns whether this manager contains a valid non - expired pair of credentials .
110
14
32,478
@ Override public boolean hasNext ( ) { if ( ! members . hasNext ( ) ) { try { getNextEntries ( ) ; } catch ( final Exception ignored ) { LOG . error ( "An error occured while getting next entries" , ignored ) ; } } return members . hasNext ( ) ; }
Returns true if more entries are available .
67
8
32,479
@ Override public ClientEntry next ( ) { if ( hasNext ( ) ) { final Entry romeEntry = members . next ( ) ; try { if ( ! romeEntry . isMediaEntry ( ) ) { return new ClientEntry ( null , collection , romeEntry , true ) ; } else { return new ClientMediaEntry ( null , collection , romeEntry , true ) ; } } catch ( final ProponoException e ) { throw new RuntimeException ( "Unexpected exception creating ClientEntry or ClientMedia" , e ) ; } } throw new NoSuchElementException ( ) ; }
Get next entry in collection .
125
6
32,480
public ClientEntry getEntry ( final String uri ) throws ProponoException { final GetMethod method = new GetMethod ( uri ) ; authStrategy . addAuthentication ( httpClient , method ) ; try { httpClient . executeMethod ( method ) ; if ( method . getStatusCode ( ) != 200 ) { throw new ProponoException ( "ERROR HTTP status code=" + method . getStatusCode ( ) ) ; } final Entry romeEntry = Atom10Parser . parseEntry ( new InputStreamReader ( method . getResponseBodyAsStream ( ) ) , uri , Locale . US ) ; if ( ! romeEntry . isMediaEntry ( ) ) { return new ClientEntry ( service , this , romeEntry , false ) ; } else { return new ClientMediaEntry ( service , this , romeEntry , false ) ; } } catch ( final Exception e ) { throw new ProponoException ( "ERROR: getting or parsing entry/media, HTTP code: " , e ) ; } finally { method . releaseConnection ( ) ; } }
Get full entry specified by entry edit URI . Note that entry may or may not be associated with this collection .
223
22
32,481
public ClientMediaEntry createMediaEntry ( final String title , final String slug , final String contentType , final byte [ ] bytes ) throws ProponoException { if ( ! isWritable ( ) ) { throw new ProponoException ( "Collection is not writable" ) ; } return new ClientMediaEntry ( service , this , title , slug , contentType , bytes ) ; }
Create new media entry assocaited with collection but do not save . server . Depending on the Atom server you may or may not be able to persist the properties of the entry that is returned .
79
39
32,482
public ClientMediaEntry createMediaEntry ( final String title , final String slug , final String contentType , final InputStream is ) throws ProponoException { if ( ! isWritable ( ) ) { throw new ProponoException ( "Collection is not writable" ) ; } return new ClientMediaEntry ( service , this , title , slug , contentType , is ) ; }
Create new media entry assocaited with collection but do not save . server . Depending on the Atom server you may or may not be able to . persist the properties of the entry that is returned .
78
40
32,483
@ Override public Module parse ( final Element elem , final Locale locale ) { final AppModule m = new AppModuleImpl ( ) ; final Element control = elem . getChild ( "control" , getContentNamespace ( ) ) ; if ( control != null ) { final Element draftElem = control . getChild ( "draft" , getContentNamespace ( ) ) ; if ( draftElem != null ) { if ( "yes" . equals ( draftElem . getText ( ) ) ) { m . setDraft ( Boolean . TRUE ) ; } if ( "no" . equals ( draftElem . getText ( ) ) ) { m . setDraft ( Boolean . FALSE ) ; } } } final Element edited = elem . getChild ( "edited" , getContentNamespace ( ) ) ; if ( edited != null ) { try { m . setEdited ( DateParser . parseW3CDateTime ( edited . getTextTrim ( ) , locale ) ) ; } catch ( final Exception ignored ) { } } return m ; }
Parse JDOM element into module
226
7
32,484
public InputStream getAsStream ( ) throws ProponoException { if ( getContents ( ) != null && ! getContents ( ) . isEmpty ( ) ) { final Content c = getContents ( ) . get ( 0 ) ; if ( c . getSrc ( ) != null ) { return getResourceAsStream ( ) ; } else if ( inputStream != null ) { return inputStream ; } else if ( bytes != null ) { return new ByteArrayInputStream ( bytes ) ; } else { throw new ProponoException ( "ERROR: no src URI or binary data to return" ) ; } } else { throw new ProponoException ( "ERROR: no content found in entry" ) ; } }
Get media resource as an InputStream should work regardless of whether you set the media resource data as an InputStream or as a byte array .
148
28
32,485
@ Override public void update ( ) throws ProponoException { if ( partial ) { throw new ProponoException ( "ERROR: attempt to update partial entry" ) ; } EntityEnclosingMethod method = null ; final Content updateContent = getContents ( ) . get ( 0 ) ; try { if ( getMediaLinkURI ( ) != null && getBytes ( ) != null ) { // existing media entry and new file, so PUT file to edit-media URI method = new PutMethod ( getMediaLinkURI ( ) ) ; if ( inputStream != null ) { method . setRequestEntity ( new InputStreamRequestEntity ( inputStream ) ) ; } else { method . setRequestEntity ( new InputStreamRequestEntity ( new ByteArrayInputStream ( getBytes ( ) ) ) ) ; } method . setRequestHeader ( "Content-type" , updateContent . getType ( ) ) ; } else if ( getEditURI ( ) != null ) { // existing media entry and NO new file, so PUT entry to edit URI method = new PutMethod ( getEditURI ( ) ) ; final StringWriter sw = new StringWriter ( ) ; Atom10Generator . serializeEntry ( this , sw ) ; method . setRequestEntity ( new StringRequestEntity ( sw . toString ( ) , null , null ) ) ; method . setRequestHeader ( "Content-type" , "application/atom+xml; charset=utf8" ) ; } else { throw new ProponoException ( "ERROR: media entry has no edit URI or media-link URI" ) ; } getCollection ( ) . addAuthentication ( method ) ; method . addRequestHeader ( "Title" , getTitle ( ) ) ; getCollection ( ) . getHttpClient ( ) . executeMethod ( method ) ; if ( inputStream != null ) { inputStream . close ( ) ; } final InputStream is = method . getResponseBodyAsStream ( ) ; if ( method . getStatusCode ( ) != 200 && method . getStatusCode ( ) != 201 ) { throw new ProponoException ( "ERROR HTTP status=" + method . getStatusCode ( ) + " : " + Utilities . streamToString ( is ) ) ; } } catch ( final Exception e ) { throw new ProponoException ( "ERROR: saving media entry" ) ; } if ( method . getStatusCode ( ) != 201 ) { throw new ProponoException ( "ERROR HTTP status=" + method . getStatusCode ( ) ) ; } }
Update entry on server .
527
5
32,486
@ Override public final synchronized void setSyndFeed ( final SyndFeed feed ) { super . setSyndFeed ( feed ) ; changedMap . clear ( ) ; final List < SyndEntry > entries = feed . getEntries ( ) ; for ( final SyndEntry entry : entries ) { final String currentEntryTag = computeEntryTag ( entry ) ; final String previousEntryTag = entryTagsMap . get ( entry . getUri ( ) ) ; if ( previousEntryTag == null || ! currentEntryTag . equals ( previousEntryTag ) ) { // Entry has changed changedMap . put ( entry . getUri ( ) , Boolean . TRUE ) ; } entryTagsMap . put ( entry . getUri ( ) , currentEntryTag ) ; } }
Overrides super class method to update changedMap and entryTagsMap for tracking changed entries .
160
19
32,487
public void setContent ( final String contentString , final String type ) { final Content newContent = new Content ( ) ; newContent . setType ( type == null ? Content . HTML : type ) ; newContent . setValue ( contentString ) ; final ArrayList < Content > contents = new ArrayList < Content > ( ) ; contents . add ( newContent ) ; setContents ( contents ) ; }
Set content of entry .
84
5
32,488
public void setContent ( final Content c ) { final ArrayList < Content > contents = new ArrayList < Content > ( ) ; contents . add ( c ) ; setContents ( contents ) ; }
Convenience method to set first content object in content collection . Atom 1 . 0 allows only one content element per entry .
41
25
32,489
public Content getContent ( ) { if ( getContents ( ) != null && ! getContents ( ) . isEmpty ( ) ) { final Content c = getContents ( ) . get ( 0 ) ; return c ; } return null ; }
Convenience method to get first content object in content collection . Atom 1 . 0 allows only one content element per entry .
50
25
32,490
public void remove ( ) throws ProponoException { if ( getEditURI ( ) == null ) { throw new ProponoException ( "ERROR: cannot delete unsaved entry" ) ; } final DeleteMethod method = new DeleteMethod ( getEditURI ( ) ) ; addAuthentication ( method ) ; try { getHttpClient ( ) . executeMethod ( method ) ; } catch ( final IOException ex ) { throw new ProponoException ( "ERROR: removing entry, HTTP code" , ex ) ; } finally { method . releaseConnection ( ) ; } }
Remove entry from server .
116
5
32,491
public String getEditURI ( ) { for ( int i = 0 ; i < getOtherLinks ( ) . size ( ) ; i ++ ) { final Link link = getOtherLinks ( ) . get ( i ) ; if ( link . getRel ( ) != null && link . getRel ( ) . equals ( "edit" ) ) { return link . getHrefResolved ( ) ; } } return null ; }
Get the URI that can be used to edit the entry via HTTP PUT or DELETE .
89
20
32,492
public void setDefaultContentIndex ( final Integer defaultContentIndex ) { for ( int i = 0 ; i < getContents ( ) . length ; i ++ ) { if ( i == defaultContentIndex . intValue ( ) ) { getContents ( ) [ i ] . setDefaultContent ( true ) ; } else { getContents ( ) [ i ] . setDefaultContent ( false ) ; } } this . defaultContentIndex = defaultContentIndex ; }
Default content index MediaContent .
94
6
32,493
public Element workspaceToElement ( ) { final Workspace space = this ; final Element element = new Element ( "workspace" , AtomService . ATOM_PROTOCOL ) ; final Element titleElem = new Element ( "title" , AtomService . ATOM_FORMAT ) ; titleElem . setText ( space . getTitle ( ) ) ; if ( space . getTitleType ( ) != null && ! space . getTitleType ( ) . equals ( "TEXT" ) ) { titleElem . setAttribute ( "type" , space . getTitleType ( ) , AtomService . ATOM_FORMAT ) ; } element . addContent ( titleElem ) ; for ( final Collection col : space . getCollections ( ) ) { element . addContent ( col . collectionToElement ( ) ) ; } return element ; }
Serialize an AtomService . DefaultWorkspace object into an XML element
180
14
32,494
protected void parseWorkspaceElement ( final Element element ) throws ProponoException { final Element titleElem = element . getChild ( "title" , AtomService . ATOM_FORMAT ) ; setTitle ( titleElem . getText ( ) ) ; if ( titleElem . getAttribute ( "type" , AtomService . ATOM_FORMAT ) != null ) { setTitleType ( titleElem . getAttribute ( "type" , AtomService . ATOM_FORMAT ) . getValue ( ) ) ; } final List < Element > collections = element . getChildren ( "collection" , AtomService . ATOM_PROTOCOL ) ; for ( final Element e : collections ) { addCollection ( new Collection ( e ) ) ; } }
Deserialize a Atom workspace XML element into an object
161
11
32,495
public Workspace findWorkspace ( final String title ) { for ( final Object element : workspaces ) { final Workspace ws = ( Workspace ) element ; if ( title . equals ( ws . getTitle ( ) ) ) { return ws ; } } return null ; }
Find workspace by title .
60
5
32,496
public Document serviceToDocument ( ) { final AtomService service = this ; final Document doc = new Document ( ) ; final Element root = new Element ( "service" , ATOM_PROTOCOL ) ; doc . setRootElement ( root ) ; final List < Workspace > spaces = service . getWorkspaces ( ) ; for ( final Workspace space : spaces ) { root . addContent ( space . workspaceToElement ( ) ) ; } return doc ; }
Serialize an AtomService object into an XML document
98
10
32,497
public synchronized void sortOnProperty ( final Object value , final boolean ascending , final ValueStrategy strategy ) { final int elementCount = size ( ) ; for ( int i = 0 ; i < elementCount - 1 ; i ++ ) { for ( int j = i + 1 ; j < elementCount ; j ++ ) { final T entry1 = get ( i ) ; final T entry2 = get ( j ) ; final EntryValue oc1 = strategy . getValue ( entry1 , value ) ; final EntryValue oc2 = strategy . getValue ( entry2 , value ) ; if ( oc1 != oc2 ) { final boolean bothNotNull = oc1 != null && oc2 != null ; if ( ascending ) { if ( oc2 == null || bothNotNull && oc2 . compareTo ( oc1 ) < 0 ) { // swap entries set ( i , entry2 ) ; set ( j , entry1 ) ; } } else { if ( oc1 == null || bothNotNull && oc1 . compareTo ( oc2 ) < 0 ) { // swap entries set ( i , entry2 ) ; set ( j , entry1 ) ; } } } } } }
performs a selection sort on all the beans in the List
259
12
32,498
@ Override public InputStream getAsStream ( ) throws BlogClientException { final HttpClient httpClient = new HttpClient ( ) ; final GetMethod method = new GetMethod ( permalink ) ; try { httpClient . executeMethod ( method ) ; } catch ( final Exception e ) { throw new BlogClientException ( "ERROR: error reading file" , e ) ; } if ( method . getStatusCode ( ) != 200 ) { throw new BlogClientException ( "ERROR HTTP status=" + method . getStatusCode ( ) ) ; } try { return method . getResponseBodyAsStream ( ) ; } catch ( final Exception e ) { throw new BlogClientException ( "ERROR: error reading file" , e ) ; } }
Get media resource as input stream .
155
7
32,499
protected SAXBuilder createSAXBuilder ( ) { SAXBuilder saxBuilder ; if ( validate ) { saxBuilder = new SAXBuilder ( XMLReaders . DTDVALIDATING ) ; } else { saxBuilder = new SAXBuilder ( XMLReaders . NONVALIDATING ) ; } saxBuilder . setEntityResolver ( RESOLVER ) ; // // This code is needed to fix the security problem outlined in // http://www.securityfocus.com/archive/1/297714 // // Unfortunately there isn't an easy way to check if an XML parser // supports a particular feature, so // we need to set it and catch the exception if it fails. We also need // to subclass the JDom SAXBuilder // class in order to get access to the underlying SAX parser - otherwise // the features don't get set until // we are already building the document, by which time it's too late to // fix the problem. // // Crimson is one parser which is known not to support these features. try { final XMLReader parser = saxBuilder . createParser ( ) ; setFeature ( saxBuilder , parser , "http://xml.org/sax/features/external-general-entities" , false ) ; setFeature ( saxBuilder , parser , "http://xml.org/sax/features/external-parameter-entities" , false ) ; setFeature ( saxBuilder , parser , "http://apache.org/xml/features/nonvalidating/load-external-dtd" , false ) ; if ( ! allowDoctypes ) { setFeature ( saxBuilder , parser , "http://apache.org/xml/features/disallow-doctype-decl" , true ) ; } } catch ( final JDOMException e ) { throw new IllegalStateException ( "JDOM could not create a SAX parser" , e ) ; } saxBuilder . setExpandEntities ( false ) ; return saxBuilder ; }
Creates and sets up a org . jdom2 . input . SAXBuilder for parsing .
416
20