idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
22,100
public static Object getValue ( Object instance , String fieldName ) { Field f = findFieldRecursively ( instance . getClass ( ) , fieldName ) ; if ( f == null ) throw new CacheException ( "Could not find field named '" + fieldName + "' on instance " + instance ) ; try { f . setAccessible ( true ) ; return f . get ( instance ) ; } catch ( IllegalAccessException iae ) { throw new CacheException ( "Cannot access field " + f , iae ) ; } }
Retrieves the value of a field of an object instance via reflection
22,101
private boolean waitForAvailabilityInternal ( ) { final Directory directory = indexManager . getDirectoryProvider ( ) . getDirectory ( ) ; try { Lock lock = directory . obtainLock ( IndexWriter . WRITE_LOCK_NAME ) ; lock . close ( ) ; return true ; } catch ( LockObtainFailedException lofe ) { return false ; } catch ( IOException e ) { log . error ( e ) ; return false ; } }
This is returning as soon as the lock is available or after 10 seconds .
22,102
public void scheduleForCompletion ( String requestId , CompletableFuture < Boolean > request , long time , TimeUnit unit ) { if ( request . isDone ( ) ) { if ( trace ) { log . tracef ( "Request[%s] is not scheduled because is already done" , requestId ) ; } return ; } if ( scheduledRequests . containsKey ( requestId ) ) { String message = String . format ( "Request[%s] is not scheduled because it is already scheduled" , requestId ) ; log . error ( message ) ; throw new IllegalStateException ( message ) ; } if ( trace ) { log . tracef ( "Request[%s] being scheduled to be completed in [%d, %s]" , requestId , time , unit ) ; } ScheduledFuture < ? > scheduledFuture = scheduledExecutorService . schedule ( ( ) -> { request . complete ( false ) ; scheduledRequests . remove ( requestId ) ; } , time , unit ) ; scheduledRequests . putIfAbsent ( requestId , new ScheduledRequest ( request , scheduledFuture ) ) ; }
Schedules a request for completion
22,103
public void abortScheduling ( String requestId , boolean force ) { if ( trace ) { log . tracef ( "Request[%s] abort scheduling" , requestId ) ; } ScheduledRequest scheduledRequest = scheduledRequests . get ( requestId ) ; if ( scheduledRequest != null && ( scheduledRequest . request . isDone ( ) || force ) ) { scheduledRequest . scheduledFuture . cancel ( false ) ; scheduledRequests . remove ( requestId ) ; } }
Aborts the scheduled request . If force is true it will abort even if the request is not completed
22,104
public TaskContext addParameter ( String name , Object value ) { Map < String , Object > params = ( Map < String , Object > ) parameters . orElseGet ( HashMap :: new ) ; params . put ( name , value ) ; return parameters ( params ) ; }
Adds a named parameter to the task context
22,105
public void clear ( IntSet segments ) { IntSet extraSegments = null ; PrimitiveIterator . OfInt iter = segments . iterator ( ) ; while ( iter . hasNext ( ) ) { int segment = iter . nextInt ( ) ; ConcurrentMap < K , InternalCacheEntry < K , V > > map = maps . get ( segment ) ; if ( map != null ) { map . clear ( ) ; } else { if ( extraSegments == null ) { extraSegments = IntSets . mutableEmptySet ( segments . size ( ) ) ; } extraSegments . set ( segment ) ; } } if ( extraSegments != null ) { IntSet finalExtraSegments = extraSegments ; nonOwnedEntries . keySet ( ) . removeIf ( k -> finalExtraSegments . contains ( getSegmentForKey ( k ) ) ) ; } }
Removes all entries that map to the given segments
22,106
private void startInternalCache ( ) { if ( knownClassesCache == null ) { synchronized ( this ) { if ( knownClassesCache == null ) { internalCacheRegistry . registerInternalCache ( QUERY_KNOWN_CLASSES_CACHE_NAME , getInternalCacheConfig ( ) , EnumSet . of ( InternalCacheRegistry . Flag . PERSISTENT ) ) ; Cache < KeyValuePair < String , Class < ? > > , Boolean > knownClassesCache = SecurityActions . getCache ( cacheManager , QUERY_KNOWN_CLASSES_CACHE_NAME ) ; this . knownClassesCache = knownClassesCache . getAdvancedCache ( ) . withFlags ( Flag . SKIP_LOCKING , Flag . IGNORE_RETURN_VALUES ) ; transactionHelper = new TransactionHelper ( this . knownClassesCache . getTransactionManager ( ) ) ; } } } }
Start the internal cache lazily .
22,107
private Configuration getInternalCacheConfig ( ) { ConfigurationBuilder configurationBuilder = new ConfigurationBuilder ( ) ; CacheMode cacheMode = cacheManager . getGlobalComponentRegistry ( ) . getGlobalConfiguration ( ) . isClustered ( ) ? CacheMode . REPL_SYNC : CacheMode . LOCAL ; configurationBuilder . clustering ( ) . cacheMode ( cacheMode ) ; configurationBuilder . transaction ( ) . transactionMode ( TransactionMode . TRANSACTIONAL ) . transactionManagerLookup ( null ) . invocationBatching ( ) . enable ( ) ; configurationBuilder . security ( ) . authorization ( ) . disable ( ) ; return configurationBuilder . build ( ) ; }
Create the configuration for the internal cache .
22,108
public void execute ( OperationContext context , ModelNode operation ) throws OperationFailedException { final PathAddress address = PathAddress . pathAddress ( operation . require ( OP_ADDR ) ) ; ListIterator < PathElement > iterator = address . iterator ( ) ; PathElement element = iterator . next ( ) ; while ( ! element . getValue ( ) . equals ( InfinispanSubsystem . SUBSYSTEM_NAME ) ) { element = iterator . next ( ) ; } final String cacheContainerName = iterator . next ( ) . getValue ( ) ; final String cacheName = iterator . next ( ) . getValue ( ) ; if ( context . isNormalServer ( ) ) { final ServiceController < ? > controller = context . getServiceRegistry ( false ) . getService ( CacheServiceName . CACHE . getServiceName ( cacheContainerName , cacheName ) ) ; Cache < ? , ? > cache = ( Cache < ? , ? > ) controller . getValue ( ) ; ModelNode operationResult ; try { operationResult = invokeCommand ( cache , operation , context ) ; } catch ( Exception e ) { throw new OperationFailedException ( MESSAGES . failedToInvokeOperation ( e . getLocalizedMessage ( ) ) , e ) ; } if ( operationResult != null ) { context . getResult ( ) . set ( operationResult ) ; } } }
An attribute write handler which performs cache operations
22,109
public void commit ( ) throws RollbackException , HeuristicMixedException , HeuristicRollbackException , SecurityException , SystemException { if ( trace ) { log . tracef ( "Transaction.commit() invoked in transaction with Xid=%s" , xid ) ; } if ( isDone ( ) ) { throw new IllegalStateException ( "Transaction is done. Cannot commit transaction." ) ; } runPrepare ( ) ; runCommit ( false ) ; }
Attempt to commit this transaction .
22,110
public void rollback ( ) throws IllegalStateException , SystemException { if ( trace ) { log . tracef ( "Transaction.rollback() invoked in transaction with Xid=%s" , xid ) ; } if ( isDone ( ) ) { throw new IllegalStateException ( "Transaction is done. Cannot rollback transaction" ) ; } try { status = Status . STATUS_MARKED_ROLLBACK ; endResources ( ) ; runCommit ( false ) ; } catch ( HeuristicMixedException | HeuristicRollbackException e ) { log . errorRollingBack ( e ) ; SystemException systemException = new SystemException ( "Unable to rollback transaction" ) ; systemException . initCause ( e ) ; throw systemException ; } catch ( RollbackException e ) { if ( trace ) { log . trace ( "RollbackException thrown while rolling back" , e ) ; } } }
Rolls back this transaction .
22,111
public void setRollbackOnly ( ) throws IllegalStateException , SystemException { if ( trace ) { log . tracef ( "Transaction.setRollbackOnly() invoked in transaction with Xid=%s" , xid ) ; } if ( isDone ( ) ) { throw new IllegalStateException ( "Transaction is done. Cannot change status" ) ; } markRollbackOnly ( new RollbackException ( "Transaction marked as rollback only." ) ) ; }
Mark the transaction so that the only possible outcome is a rollback .
22,112
public boolean enlistResource ( XAResource resource ) throws RollbackException , IllegalStateException , SystemException { if ( trace ) { log . tracef ( "Transaction.enlistResource(%s) invoked in transaction with Xid=%s" , resource , xid ) ; } checkStatusBeforeRegister ( "resource" ) ; for ( Map . Entry < XAResource , Integer > otherResourceEntry : resources ) { try { if ( otherResourceEntry . getKey ( ) . isSameRM ( resource ) ) { log . debug ( "Ignoring resource. It is already there." ) ; return true ; } } catch ( XAException e ) { } } resources . add ( new AbstractMap . SimpleEntry < > ( resource , null ) ) ; try { if ( trace ) { log . tracef ( "XaResource.start() invoked in transaction with Xid=%s" , xid ) ; } resource . start ( xid , XAResource . TMNOFLAGS ) ; } catch ( XAException e ) { if ( isRollbackCode ( e ) ) { RollbackException exception = newRollbackException ( format ( "Resource %s rolled back the transaction while XaResource.start()" , resource ) , e ) ; markRollbackOnly ( exception ) ; log . errorEnlistingResource ( e ) ; throw exception ; } log . errorEnlistingResource ( e ) ; throw new SystemException ( e . getMessage ( ) ) ; } return true ; }
Enlist an XA resource with this transaction .
22,113
public void runCommit ( boolean forceRollback ) throws HeuristicMixedException , HeuristicRollbackException , RollbackException { if ( trace ) { log . tracef ( "runCommit(forceRollback=%b) invoked in transaction with Xid=%s" , forceRollback , xid ) ; } if ( forceRollback ) { markRollbackOnly ( new RollbackException ( FORCE_ROLLBACK_MESSAGE ) ) ; } int notifyAfterStatus = 0 ; try { if ( status == Status . STATUS_MARKED_ROLLBACK ) { notifyAfterStatus = Status . STATUS_ROLLEDBACK ; rollbackResources ( ) ; } else { notifyAfterStatus = Status . STATUS_COMMITTED ; commitResources ( ) ; } } finally { notifyAfterCompletion ( notifyAfterStatus ) ; DummyBaseTransactionManager . setTransaction ( null ) ; } throwRollbackExceptionIfAny ( forceRollback ) ; }
Runs the second phase of two - phase - commit protocol .
22,114
public Object getKeyMapping ( String key ) { if ( key == null ) { throw new IllegalArgumentException ( "Not supporting null keys" ) ; } String [ ] split = singlePipePattern . split ( key ) ; switch ( split [ 0 ] ) { case "C" : { if ( split . length != 6 ) { throw log . keyMappperUnexpectedStringFormat ( key ) ; } final String indexName = split [ 4 ] ; final String fileName = split [ 1 ] ; final int chunkId = toInt ( split [ 2 ] , key ) ; final int bufferSize = toInt ( split [ 3 ] , key ) ; final int affinitySegmentId = toInt ( split [ 5 ] , key ) ; return new ChunkCacheKey ( indexName , fileName , chunkId , bufferSize , affinitySegmentId ) ; } case "M" : { if ( split . length != 4 ) throw log . keyMappperUnexpectedStringFormat ( key ) ; final String indexName = split [ 2 ] ; final String fileName = split [ 1 ] ; final int affinitySegmentId = toInt ( split [ 3 ] , key ) ; return new FileCacheKey ( indexName , fileName , affinitySegmentId ) ; } case "*" : { if ( split . length != 3 ) throw log . keyMappperUnexpectedStringFormat ( key ) ; final String indexName = split [ 1 ] ; final int affinitySegmentId = toInt ( split [ 2 ] , key ) ; return new FileListCacheKey ( indexName , affinitySegmentId ) ; } case "RL" : { if ( split . length != 4 ) throw log . keyMappperUnexpectedStringFormat ( key ) ; final String indexName = split [ 2 ] ; final String fileName = split [ 1 ] ; final int affinitySegmentId = toInt ( split [ 3 ] , key ) ; return new FileReadLockKey ( indexName , fileName , affinitySegmentId ) ; } default : throw log . keyMappperUnexpectedStringFormat ( key ) ; } }
This method has to perform the inverse transformation of the keys used in the Lucene Directory from String to object . So this implementation is strongly coupled to the toString method of each key type .
22,115
public Transaction suspend ( ) { Transaction tx = null ; try { if ( tm != null ) { tx = tm . suspend ( ) ; } } catch ( SystemException se ) { throw log . cannotSuspendTx ( se ) ; } return tx ; }
Tell the TransactionManager to suspend any ongoing transaction .
22,116
public void resume ( Transaction tx ) { try { if ( tx != null ) { tm . resume ( tx ) ; } } catch ( Exception e ) { throw log . cannotResumeTx ( e ) ; } }
Tell the TransactionManager to resume the given transaction
22,117
private ModelNode addNewAliasToList ( ModelNode list , String alias ) { if ( alias == null || alias . equals ( "" ) ) return list ; if ( ! list . isDefined ( ) ) { list . setEmptyList ( ) ; } ModelNode newList = list . clone ( ) ; List < ModelNode > listElements = list . asList ( ) ; boolean found = false ; for ( ModelNode listElement : listElements ) { if ( listElement . asString ( ) . equals ( alias ) ) { found = true ; } } if ( ! found ) { newList . add ( ) . set ( alias ) ; } return newList ; }
Adds new alias to a LIST ModelNode of existing aliases .
22,118
public static QueryCache getQueryCache ( Cache < ? , ? > cache ) { return SecurityActions . getCacheGlobalComponentRegistry ( cache . getAdvancedCache ( ) ) . getComponent ( QueryCache . class ) ; }
Returns the optional QueryCache .
22,119
public AttributeNode < AttributeMetadata , AttributeId > addChild ( AttributeId attribute ) { AttributeNode < AttributeMetadata , AttributeId > child ; if ( children == null ) { children = new HashMap < > ( ) ; child = new AttributeNode < > ( attribute , this ) ; children . put ( attribute , child ) ; rebuildChildrenArray ( ) ; } else { child = children . get ( attribute ) ; if ( child == null ) { child = new AttributeNode < > ( attribute , this ) ; children . put ( attribute , child ) ; rebuildChildrenArray ( ) ; } } return child ; }
Add a child node . If the child already exists it just increments its usage counter and returns the existing child .
22,120
public void removeChild ( AttributeId attribute ) { if ( children == null ) { throw new IllegalArgumentException ( "No child found : " + attribute ) ; } AttributeNode < AttributeMetadata , AttributeId > child = children . get ( attribute ) ; if ( child == null ) { throw new IllegalArgumentException ( "No child found : " + attribute ) ; } children . remove ( attribute ) ; rebuildChildrenArray ( ) ; }
Decrement the usage counter of a child node and remove it if no usages remain . The removal works recursively up the path to the root .
22,121
public final void ensureOrder ( TotalOrderRemoteTransactionState state , Collection < ? > keysModified ) throws InterruptedException { state . awaitUntilReset ( ) ; TotalOrderLatch transactionSynchronizedBlock = new TotalOrderLatchImpl ( state . getGlobalTransaction ( ) . globalId ( ) ) ; state . setTransactionSynchronizedBlock ( transactionSynchronizedBlock ) ; for ( Object key : keysModified ) { TotalOrderLatch prevTx = keysLocked . put ( key , transactionSynchronizedBlock ) ; if ( prevTx != null ) { state . addSynchronizedBlock ( prevTx ) ; } state . addLockedKey ( key ) ; } TotalOrderLatch stateTransfer = stateTransferInProgress . get ( ) ; if ( stateTransfer != null ) { state . addSynchronizedBlock ( stateTransfer ) ; } if ( trace ) { log . tracef ( "Transaction [%s] will wait for %s and locked %s" , state . getGlobalTransaction ( ) . globalId ( ) , state . getConflictingTransactionBlocks ( ) , state . getLockedKeys ( ) == null ? "[ClearCommand]" : state . getLockedKeys ( ) ) ; } }
It ensures the validation order for the transaction corresponding to the prepare command . This allow the prepare command to be moved to a thread pool .
22,122
public final void release ( TotalOrderRemoteTransactionState state ) { TotalOrderLatch synchronizedBlock = state . getTransactionSynchronizedBlock ( ) ; if ( synchronizedBlock == null ) { return ; } Collection < Object > lockedKeys = state . getLockedKeys ( ) ; synchronizedBlock . unBlock ( ) ; for ( Object key : lockedKeys ) { keysLocked . remove ( key , synchronizedBlock ) ; } if ( trace ) { log . tracef ( "Release %s and locked keys %s. Checking pending tasks!" , synchronizedBlock , lockedKeys ) ; } state . reset ( ) ; }
Release the locked key possibly unblock waiting prepares .
22,123
public final Collection < TotalOrderLatch > notifyStateTransferStart ( int topologyId , boolean isRebalance ) { if ( stateTransferInProgress . get ( ) != null ) { return Collections . emptyList ( ) ; } List < TotalOrderLatch > preparingTransactions = new ArrayList < > ( keysLocked . size ( ) ) ; preparingTransactions . addAll ( keysLocked . values ( ) ) ; if ( isRebalance ) { stateTransferInProgress . set ( new TotalOrderLatchImpl ( "StateTransfer-" + topologyId ) ) ; } if ( trace ) { log . tracef ( "State Transfer start. It will wait for %s" , preparingTransactions ) ; } return preparingTransactions ; }
It notifies that a state transfer is about to start .
22,124
public final void notifyStateTransferEnd ( ) { TotalOrderLatch block = stateTransferInProgress . getAndSet ( null ) ; if ( block != null ) { block . unBlock ( ) ; } if ( trace ) { log . tracef ( "State Transfer finish. It will release %s" , block ) ; } totalOrderExecutor . checkForReadyTasks ( ) ; }
It notifies the end of the state transfer possibly unblock waiting prepares .
22,125
public void readJson ( ConfigurationBuilderInfo builderInfo , String json ) { readJson ( builderInfo , "" , Json . read ( json ) ) ; }
Parses a JSON document into the supplied builder .
22,126
String format ( String pattern ) { return String . format ( pattern , this . domain , this . major , this . minor ) ; }
Formats a string using the specified pattern .
22,127
private DecoratedCache < K , V > findDecoratedCache ( Cache < K , V > cache ) { if ( cache instanceof AbstractDelegatingCache ) { if ( cache instanceof DecoratedCache ) { return ( ( DecoratedCache < K , V > ) cache ) ; } return findDecoratedCache ( ( ( AbstractDelegatingCache < K , V > ) cache ) . getDelegate ( ) ) ; } return null ; }
Finds the first decorated cache if there are delegates surrounding it otherwise null
22,128
@ SuppressWarnings ( { "rawtypes" , "unchecked" } ) public void onCacheModification ( CacheEntryEvent event ) { if ( ! event . getKey ( ) . equals ( key ) ) return ; if ( event . isPre ( ) ) return ; try { GenericJBossMarshaller marshaller = new GenericJBossMarshaller ( ) ; byte [ ] bb = ( byte [ ] ) event . getValue ( ) ; Call call = ( Call ) marshaller . objectFromByteBuffer ( bb ) ; if ( log . isDebugEnabled ( ) ) log . debugf ( "Receive %s (isOriginLocal=%s) " , call , event . isOriginLocal ( ) ) ; callExecutor . execute ( new AtomicObjectContainerTask ( call ) ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } }
Internal use of the listener API .
22,129
public static ConsistentHashFactory pickConsistentHashFactory ( GlobalConfiguration globalConfiguration , Configuration configuration ) { ConsistentHashFactory factory = configuration . clustering ( ) . hash ( ) . consistentHashFactory ( ) ; if ( factory == null ) { CacheMode cacheMode = configuration . clustering ( ) . cacheMode ( ) ; if ( cacheMode . isClustered ( ) ) { if ( cacheMode . isDistributed ( ) ) { if ( globalConfiguration . transport ( ) . hasTopologyInfo ( ) ) { factory = new TopologyAwareSyncConsistentHashFactory ( ) ; } else { factory = new SyncConsistentHashFactory ( ) ; } } else if ( cacheMode . isReplicated ( ) || cacheMode . isInvalidation ( ) ) { factory = new SyncReplicatedConsistentHashFactory ( ) ; } else if ( cacheMode . isScattered ( ) ) { factory = new ScatteredConsistentHashFactory ( ) ; } else { throw new CacheException ( "Unexpected cache mode: " + cacheMode ) ; } } } return factory ; }
If no ConsistentHashFactory was explicitly configured we choose a suitable one based on cache mode .
22,130
private CacheTopology addPartitioner ( CacheTopology cacheTopology ) { ConsistentHash currentCH = cacheTopology . getCurrentCH ( ) ; currentCH = new PartitionerConsistentHash ( currentCH , keyPartitioner ) ; ConsistentHash pendingCH = cacheTopology . getPendingCH ( ) ; if ( pendingCH != null ) { pendingCH = new PartitionerConsistentHash ( pendingCH , keyPartitioner ) ; } ConsistentHash unionCH = cacheTopology . getUnionCH ( ) ; if ( unionCH != null ) { unionCH = new PartitionerConsistentHash ( unionCH , keyPartitioner ) ; } return new CacheTopology ( cacheTopology . getTopologyId ( ) , cacheTopology . getRebalanceId ( ) , currentCH , pendingCH , unionCH , cacheTopology . getPhase ( ) , cacheTopology . getActualMembers ( ) , cacheTopology . getMembersPersistentUUIDs ( ) ) ; }
Decorates the given cache topology to add a key partitioner .
22,131
private void registerGlobalTxTable ( GlobalComponentRegistry globalComponentRegistry ) { InternalCacheRegistry registry = globalComponentRegistry . getComponent ( InternalCacheRegistry . class ) ; ConfigurationBuilder builder = new ConfigurationBuilder ( ) ; builder . clustering ( ) . cacheMode ( globalComponentRegistry . getGlobalConfiguration ( ) . isClustered ( ) ? CacheMode . REPL_SYNC : CacheMode . LOCAL ) ; builder . transaction ( ) . transactionMode ( TransactionMode . NON_TRANSACTIONAL ) ; registry . registerInternalCache ( GLOBAL_TX_TABLE_CACHE_NAME , builder . build ( ) , EnumSet . noneOf ( InternalCacheRegistry . Flag . class ) ) ; }
Creates the global transaction internal cache .
22,132
public synchronized void registerInternalCache ( String name , Configuration configuration , EnumSet < Flag > flags ) { boolean configPresent = cacheManager . getCacheConfiguration ( name ) != null ; if ( ( flags . contains ( Flag . EXCLUSIVE ) || ! internalCaches . containsKey ( name ) ) && configPresent ) { throw log . existingConfigForInternalCache ( name ) ; } if ( configPresent ) { return ; } ConfigurationBuilder builder = new ConfigurationBuilder ( ) . read ( configuration ) ; builder . jmxStatistics ( ) . disable ( ) ; GlobalConfiguration globalConfiguration = cacheManager . getCacheManagerConfiguration ( ) ; if ( flags . contains ( Flag . GLOBAL ) && globalConfiguration . isClustered ( ) ) { builder . clustering ( ) . cacheMode ( CacheMode . REPL_SYNC ) . stateTransfer ( ) . fetchInMemoryState ( true ) . awaitInitialTransfer ( true ) ; } if ( flags . contains ( Flag . PERSISTENT ) && globalConfiguration . globalState ( ) . enabled ( ) ) { builder . persistence ( ) . addSingleFileStore ( ) . location ( globalConfiguration . globalState ( ) . persistentLocation ( ) ) . purgeOnStartup ( false ) . preload ( true ) . fetchPersistentState ( true ) ; } SecurityActions . defineConfiguration ( cacheManager , name , builder . build ( ) ) ; internalCaches . put ( name , flags ) ; if ( ! flags . contains ( Flag . USER ) ) { privateCaches . add ( name ) ; } }
Synchronized to prevent users from registering the same configuration at the same time
22,133
private CompletableFuture < Void > replicateAndInvalidate ( CacheTopology cacheTopology ) { Address nextMember = getNextMember ( cacheTopology ) ; if ( nextMember != null ) { HashSet < Address > otherMembers = new HashSet < > ( cacheTopology . getActualMembers ( ) ) ; Address localAddress = rpcManager . getAddress ( ) ; otherMembers . remove ( localAddress ) ; otherMembers . remove ( nextMember ) ; IntSet oldSegments ; if ( cacheTopology . getCurrentCH ( ) . getMembers ( ) . contains ( localAddress ) ) { oldSegments = IntSets . from ( cacheTopology . getCurrentCH ( ) . getSegmentsForOwner ( localAddress ) ) ; oldSegments . retainAll ( cacheTopology . getPendingCH ( ) . getSegmentsForOwner ( localAddress ) ) ; } else { log . trace ( "Local address is not a member of currentCH, returning" ) ; return CompletableFutures . completedNull ( ) ; } log . trace ( "Segments to replicate and invalidate: " + oldSegments ) ; if ( oldSegments . isEmpty ( ) ) { return CompletableFutures . completedNull ( ) ; } AtomicInteger outboundInvalidations = new AtomicInteger ( 1 ) ; CompletableFuture < Void > outboundTaskFuture = new CompletableFuture < > ( ) ; OutboundTransferTask outboundTransferTask = new OutboundTransferTask ( nextMember , oldSegments , cacheTopology . getCurrentCH ( ) . getNumSegments ( ) , chunkSize , cacheTopology . getTopologyId ( ) , keyPartitioner , task -> { if ( outboundInvalidations . decrementAndGet ( ) == 0 ) { outboundTaskFuture . complete ( null ) ; } } , chunks -> invalidateChunks ( chunks , otherMembers , outboundInvalidations , outboundTaskFuture , cacheTopology ) , OutboundTransferTask :: defaultMapEntryFromDataContainer , OutboundTransferTask :: defaultMapEntryFromStore , dataContainer , persistenceManager , rpcManager , commandsFactory , entryFactory , timeout , cacheName , true , true ) ; outboundTransferTask . execute ( executorService ) ; return outboundTaskFuture ; } else { return CompletableFutures . completedNull ( ) ; } }
that this node keeps from previous topology .
22,134
public static < T > Set < T > makeSet ( T ... entries ) { return new HashSet < > ( Arrays . asList ( entries ) ) ; }
Create a Set backed by the specified array .
22,135
public final int getNewBufferSize ( int curSize , int minNewSize ) { if ( curSize <= maxDoublingSize ) return Math . max ( curSize << 1 , minNewSize ) ; else return Math . max ( curSize + ( curSize >> 2 ) , minNewSize ) ; }
Gets the number of bytes to which the internal buffer should be resized .
22,136
public static < E > CloseableSpliterator < E > spliterator ( CloseableIterator < ? extends E > iterator , long size , int characteristics ) { return new CloseableIteratorAsCloseableSpliterator < > ( iterator , size , characteristics ) ; }
Takes a provided closeable iterator and wraps it appropriately so it can be used as a closeable spliterator that will close the iterator when the spliterator is closed .
22,137
public static < T > CloseableSpliterator < T > spliterator ( Spliterator < T > spliterator ) { if ( spliterator instanceof CloseableSpliterator ) { return ( CloseableSpliterator < T > ) spliterator ; } return new SpliteratorAsCloseableSpliterator < > ( spliterator ) ; }
Creates a closeable spliterator from the given spliterator that does nothing when close is called .
22,138
public static < R > CloseableSpliterator < R > spliterator ( BaseStream < R , Stream < R > > stream ) { Spliterator < R > spliterator = stream . spliterator ( ) ; if ( spliterator instanceof CloseableSpliterator ) { return ( CloseableSpliterator < R > ) spliterator ; } return new StreamToCloseableSpliterator < > ( stream , spliterator ) ; }
Creates a closeable spliterator that when closed will close the underlying stream as well
22,139
public static < R > CloseableIterator < R > iterator ( BaseStream < R , Stream < R > > stream ) { Iterator < R > iterator = stream . iterator ( ) ; if ( iterator instanceof CloseableIterator ) { return ( CloseableIterator < R > ) iterator ; } return new StreamToCloseableIterator < > ( stream , iterator ) ; }
Creates a closeable iterator that when closed will close the underlying stream as well
22,140
public static < E > CloseableIterator < E > iterator ( Iterator < ? extends E > iterator ) { if ( iterator instanceof CloseableIterator ) { return ( CloseableIterator < E > ) iterator ; } return new IteratorAsCloseableIterator < > ( iterator ) ; }
Creates a closeable iterator from the given iterator that does nothing when close is called .
22,141
public static < E > Stream < E > stream ( CloseableSpliterator < E > spliterator , boolean parallel ) { Stream < E > stream = StreamSupport . stream ( spliterator , parallel ) ; stream . onClose ( spliterator :: close ) ; return stream ; }
Creates a stream that when closed will also close the underlying spliterator
22,142
public static < E > Stream < E > stream ( CloseableIterator < E > iterator , boolean parallel , long size , int characteristics ) { Stream < E > stream = StreamSupport . stream ( Spliterators . spliterator ( iterator , size , characteristics ) , parallel ) ; stream . onClose ( iterator :: close ) ; return stream ; }
Creates a stream that when closed will also close the underlying iterator
22,143
public void cancelSegments ( IntSet cancelledSegments ) { if ( segments . removeAll ( cancelledSegments ) ) { if ( trace ) { log . tracef ( "Cancelling outbound transfer to node %s, segments %s (remaining segments %s)" , destination , cancelledSegments , segments ) ; } entriesBySegment . keySet ( ) . removeAll ( cancelledSegments ) ; if ( segments . isEmpty ( ) ) { cancel ( ) ; } } }
Cancel some of the segments . If all segments get cancelled then the whole task will be cancelled .
22,144
public void cancel ( ) { if ( runnableFuture != null && ! runnableFuture . isCancelled ( ) ) { log . debugf ( "Cancelling outbound transfer to node %s, segments %s" , destination , segments ) ; runnableFuture . cancel ( true ) ; } }
Cancel the whole task .
22,145
public static < E > E [ ] unmarshallArray ( ObjectInput in , ArrayBuilder < E > builder ) throws IOException , ClassNotFoundException { final int size = unmarshallSize ( in ) ; if ( size == NULL_VALUE ) { return null ; } final E [ ] array = Objects . requireNonNull ( builder , "ArrayBuilder must be non-null" ) . build ( size ) ; for ( int i = 0 ; i < size ; ++ i ) { array [ i ] = ( E ) in . readObject ( ) ; } return array ; }
Unmarshall arrays .
22,146
public static int unmarshallSize ( ObjectInput in ) throws IOException { byte b = in . readByte ( ) ; if ( ( b & 0x80 ) != 0 ) { return NULL_VALUE ; } int i = b & 0x3F ; if ( ( b & 0x40 ) == 0 ) { return i ; } int shift = 6 ; do { b = in . readByte ( ) ; i |= ( b & 0x7F ) << shift ; shift += 7 ; } while ( ( b & 0x80 ) != 0 ) ; return i ; }
Unmarshall an integer .
22,147
public static void marshallIntCollection ( Collection < Integer > collection , ObjectOutput out ) throws IOException { final int size = collection == null ? NULL_VALUE : collection . size ( ) ; marshallSize ( out , size ) ; if ( size <= 0 ) { return ; } for ( Integer integer : collection ) { out . writeInt ( integer ) ; } }
Marshalls a collection of integers .
22,148
public static < T extends Collection < Integer > > T unmarshallIntCollection ( ObjectInput in , CollectionBuilder < Integer , T > builder ) throws IOException { final int size = unmarshallSize ( in ) ; if ( size == NULL_VALUE ) { return null ; } T collection = Objects . requireNonNull ( builder , "CollectionBuilder must be non-null" ) . build ( size ) ; for ( int i = 0 ; i < size ; ++ i ) { collection . add ( in . readInt ( ) ) ; } return collection ; }
Unmarshalls a collection of integers .
22,149
public static boolean isSafeClass ( String className , List < String > whitelist ) { for ( String whiteRegExp : whitelist ) { Pattern whitePattern = Pattern . compile ( whiteRegExp ) ; Matcher whiteMatcher = whitePattern . matcher ( className ) ; if ( whiteMatcher . find ( ) ) { if ( log . isTraceEnabled ( ) ) log . tracef ( "Whitelist match: '%s'" , className ) ; return true ; } } return false ; }
Checks whether class name is matched by the class name white list regular expressions provided .
22,150
private synchronized void initCounterState ( Long currentValue ) { if ( weakCounter == null ) { weakCounter = currentValue == null ? newCounterValue ( configuration ) : newCounterValue ( currentValue , configuration ) ; } }
Initializes the weak value .
22,151
public static long millis ( final String time , final String timeUnit ) { return TIMEUNITS . get ( timeUnit ) . toMillis ( Long . parseLong ( time ) ) ; }
Converts a time representation into milliseconds
22,152
public PingOperation newPingOperation ( boolean releaseChannel ) { return new PingOperation ( codec , topologyId , cfg , cacheNameBytes , channelFactory , releaseChannel , this ) ; }
Construct a ping request directed to a particular node .
22,153
public static int count ( SegmentedAdvancedLoadWriteStore < ? , ? > salws , IntSet segments ) { Long result = Flowable . fromPublisher ( salws . publishKeys ( segments , null ) ) . count ( ) . blockingGet ( ) ; if ( result > Integer . MAX_VALUE ) { return Integer . MAX_VALUE ; } return result . intValue ( ) ; }
Counts how many entries are present in the segmented store . Only the segments provided will have entries counted .
22,154
public Object stringToKey ( String s ) { char type = s . charAt ( 0 ) ; switch ( type ) { case 'S' : return s . substring ( 2 ) ; case 'I' : return Integer . valueOf ( s . substring ( 2 ) ) ; case 'Y' : return Byte . valueOf ( s . substring ( 2 ) ) ; case 'L' : return Long . valueOf ( s . substring ( 2 ) ) ; case 'X' : return Short . valueOf ( s . substring ( 2 ) ) ; case 'D' : return Double . valueOf ( s . substring ( 2 ) ) ; case 'F' : return Float . valueOf ( s . substring ( 2 ) ) ; case 'B' : return Boolean . valueOf ( s . substring ( 2 ) ) ; case 'C' : return Character . valueOf ( s . charAt ( 2 ) ) ; case 'U' : return UUID . fromString ( s . substring ( 2 ) ) ; case 'A' : return Base64 . getDecoder ( ) . decode ( s . substring ( 2 ) ) ; case 'T' : int indexOfSecondDelimiter = s . indexOf ( ':' , 2 ) ; String keyClassName = s . substring ( 2 , indexOfSecondDelimiter ) ; String keyAsString = s . substring ( indexOfSecondDelimiter + 1 ) ; Transformer t = getTransformer ( keyClassName ) ; if ( t != null ) { return t . fromString ( keyAsString ) ; } else { throw log . noTransformerForKey ( keyClassName ) ; } } throw new CacheException ( "Unknown key type metadata: " + type ) ; }
Converts a Lucene document id from string form back to the original object .
22,155
public String keyToString ( Object key ) { if ( key instanceof byte [ ] ) return "A:" + Base64 . getEncoder ( ) . encodeToString ( ( byte [ ] ) key ) ; if ( key instanceof String ) return "S:" + key ; else if ( key instanceof Integer ) return "I:" + key ; else if ( key instanceof Boolean ) return "B:" + key ; else if ( key instanceof Long ) return "L:" + key ; else if ( key instanceof Float ) return "F:" + key ; else if ( key instanceof Double ) return "D:" + key ; else if ( key instanceof Short ) return "X:" + key ; else if ( key instanceof Byte ) return "Y:" + key ; else if ( key instanceof Character ) return "C:" + key ; else if ( key instanceof UUID ) return "U:" + key ; else { Transformer t = getTransformer ( key . getClass ( ) ) ; if ( t != null ) { return "T:" + key . getClass ( ) . getName ( ) + ":" + t . toString ( key ) ; } else { throw log . noTransformerForKey ( key . getClass ( ) . getName ( ) ) ; } } }
Stringify a key so Lucene can use it as document id .
22,156
public final Object invokeNextInterceptor ( InvocationContext ctx , VisitableCommand command ) throws Throwable { Object maybeStage = nextInterceptor . visitCommand ( ctx , command ) ; if ( maybeStage instanceof SimpleAsyncInvocationStage ) { return ( ( InvocationStage ) maybeStage ) . get ( ) ; } else { return maybeStage ; } }
Invokes the next interceptor in the chain . This is how interceptor implementations should pass a call up the chain to the next interceptor .
22,157
protected Object handleDefault ( InvocationContext ctx , VisitableCommand command ) throws Throwable { return invokeNextInterceptor ( ctx , command ) ; }
The default behaviour of the visitXXX methods which is to ignore the call and pass the call up to the next interceptor in the chain .
22,158
public static < K , V > Function < Map . Entry < K , V > , K > entryToKeyFunction ( ) { return EntryToKeyFunction . getInstance ( ) ; }
Provides a function that returns the key of the entry when invoked .
22,159
public static < K , V > Function < Map . Entry < K , V > , V > entryToValueFunction ( ) { return EntryToValueFunction . getInstance ( ) ; }
Provides a function that returns the value of the entry when invoked .
22,160
private static int hash ( int h ) { h += h << 15 ^ 0xffffcd7d ; h ^= h >>> 10 ; h += h << 3 ; h ^= h >>> 6 ; h += ( h << 2 ) + ( h << 14 ) ; return h ^ h >>> 16 ; }
Applies a supplemental hash function to a given hashCode which defends against poor quality hash functions . This is critical because ConcurrentReferenceHashMap uses power - of - two length hash tables that otherwise encounter collisions for hashCodes that do not differ in lower or upper bits .
22,161
public ComponentMetadata getComponentMetadata ( Class < ? > componentClass ) { ComponentMetadata md = componentMetadataMap . get ( componentClass . getName ( ) ) ; if ( md == null ) { if ( componentClass . getSuperclass ( ) != null ) { return getComponentMetadata ( componentClass . getSuperclass ( ) ) ; } else { return null ; } } initMetadata ( componentClass , md ) ; return md ; }
Look up metadata for a component class .
22,162
public void injectFactoryForComponent ( Class < ? > componentType , Class < ? > factoryType ) { factories . put ( componentType . getName ( ) , factoryType . getName ( ) ) ; }
Inject a factory for a given component type .
22,163
public final void setNextInterceptor ( AsyncInterceptor nextInterceptor ) { this . nextInterceptor = nextInterceptor ; this . nextDDInterceptor = nextInterceptor instanceof DDAsyncInterceptor ? ( DDAsyncInterceptor ) nextInterceptor : null ; }
Used internally to set up the interceptor .
22,164
public final Object invokeNext ( InvocationContext ctx , VisitableCommand command ) { try { if ( nextDDInterceptor != null ) { return command . acceptVisitor ( ctx , nextDDInterceptor ) ; } else { return nextInterceptor . visitCommand ( ctx , command ) ; } } catch ( Throwable throwable ) { return new SimpleAsyncInvocationStage ( throwable ) ; } }
Invoke the next interceptor possibly with a new command .
22,165
public static Object delayedValue ( CompletionStage < Void > stage , Object syncValue ) { if ( stage != null ) { CompletableFuture < ? > future = stage . toCompletableFuture ( ) ; if ( ! future . isDone ( ) ) { return asyncValue ( stage . thenApply ( v -> syncValue ) ) ; } if ( future . isCompletedExceptionally ( ) ) { return asyncValue ( stage ) ; } } return syncValue ; }
Returns an InvocationStage if the provided CompletionStage is null not completed or completed via exception . If these are not true the sync value is returned directly .
22,166
protected Map < Address , Response > afterInvokeRemotely ( ReplicableCommand command , Map < Address , Response > responseMap ) { return responseMap ; }
method invoked after a successful remote invocation .
22,167
@ Stop ( priority = 9999 ) public void stop ( ) { for ( int i = 0 ; i < maps . length ( ) ; ++ i ) { stopMap ( i , false ) ; } }
Priority has to be higher than the clear priority - which is currently 999
22,168
private Event . Type getEventTypeFromAnnotation ( Class < ? extends Annotation > annotation ) { if ( annotation == CacheEntryCreated . class ) return Event . Type . CACHE_ENTRY_CREATED ; if ( annotation == CacheEntryModified . class ) return Event . Type . CACHE_ENTRY_MODIFIED ; if ( annotation == CacheEntryRemoved . class ) return Event . Type . CACHE_ENTRY_REMOVED ; if ( annotation == CacheEntryActivated . class ) return Event . Type . CACHE_ENTRY_ACTIVATED ; if ( annotation == CacheEntryInvalidated . class ) return Event . Type . CACHE_ENTRY_INVALIDATED ; if ( annotation == CacheEntryLoaded . class ) return Event . Type . CACHE_ENTRY_LOADED ; if ( annotation == CacheEntryPassivated . class ) return Event . Type . CACHE_ENTRY_PASSIVATED ; if ( annotation == CacheEntryVisited . class ) return Event . Type . CACHE_ENTRY_VISITED ; if ( annotation == CacheEntriesEvicted . class ) return Event . Type . CACHE_ENTRY_EVICTED ; if ( annotation == CacheEntryExpired . class ) return Event . Type . CACHE_ENTRY_EXPIRED ; return null ; }
Obtains the event type that corresponds to the given event annotation .
22,169
public static < E extends Enum < E > > E asEnum ( ModelNode value , Class < E > targetClass ) { return asEnum ( value , targetClass , null ) ; }
Returns the value of the node as an Enum value or null if the node is undefined .
22,170
public static ModuleIdentifier asModuleIdentifier ( ModelNode value , ModuleIdentifier defaultValue ) { return value . isDefined ( ) ? ModuleIdentifier . fromString ( value . asString ( ) ) : defaultValue ; }
Returns the value of the node as a module identifier or the specified default if the node is undefined .
22,171
private void handleNullMarshaller ( ) { try { Class . forName ( "org.jboss.marshalling.river.RiverMarshaller" , false , Util . class . getClassLoader ( ) ) ; marshallerClass = GenericJBossMarshaller . class ; } catch ( ClassNotFoundException e ) { log . tracef ( "JBoss Marshalling is not on the class path - Only byte[] instances can be marshalled" ) ; marshaller = BytesOnlyMarshaller . INSTANCE ; } }
Method that handles default marshaller - needed as a placeholder
22,172
private Object handleTxWriteCommand ( InvocationContext ctx , AbstractDataWriteCommand command , Object key ) throws Throwable { try { if ( ! ctx . isOriginLocal ( ) ) { LocalizedCacheTopology cacheTopology = checkTopologyId ( command ) ; if ( ! cacheTopology . isSegmentWriteOwner ( command . getSegment ( ) ) ) { return null ; } } CacheEntry entry = ctx . lookupEntry ( command . getKey ( ) ) ; if ( entry == null ) { if ( isLocalModeForced ( command ) || command . hasAnyFlag ( FlagBitSets . SKIP_REMOTE_LOOKUP ) || ! needsPreviousValue ( ctx , command ) ) { entryFactory . wrapExternalEntry ( ctx , key , null , false , true ) ; } else { Object result = asyncInvokeNext ( ctx , command , remoteGetSingleKey ( ctx , command , command . getKey ( ) , true ) ) ; return makeStage ( result ) . andFinally ( ctx , command , ( rCtx , rCommand , rv , t ) -> updateMatcherForRetry ( ( WriteCommand ) rCommand ) ) ; } } return invokeNextAndFinally ( ctx , command , ( rCtx , rCommand , rv , t ) -> updateMatcherForRetry ( ( WriteCommand ) rCommand ) ) ; } catch ( Throwable t ) { updateMatcherForRetry ( command ) ; throw t ; } }
If we are within one transaction we won t do any replication as replication would only be performed at commit time . If the operation didn t originate locally we won t do any replication either .
22,173
protected < E > CacheQuery < E > buildLuceneQuery ( IckleParsingResult < TypeMetadata > ickleParsingResult , Map < String , Object > namedParameters , long startOffset , int maxResults , IndexedQueryMode queryMode ) { if ( log . isDebugEnabled ( ) ) { log . debugf ( "Building Lucene query for : %s" , ickleParsingResult . getQueryString ( ) ) ; } if ( ! isIndexed ) { throw log . cannotRunLuceneQueriesIfNotIndexed ( cache . getName ( ) ) ; } LuceneQueryParsingResult luceneParsingResult = transformParsingResult ( ickleParsingResult , namedParameters ) ; org . apache . lucene . search . Query luceneQuery = makeTypeQuery ( luceneParsingResult . getQuery ( ) , luceneParsingResult . getTargetEntityName ( ) ) ; if ( log . isDebugEnabled ( ) ) { log . debugf ( "The resulting Lucene query is : %s" , luceneQuery . toString ( ) ) ; } CacheQuery < ? > cacheQuery = makeCacheQuery ( ickleParsingResult , luceneQuery , queryMode , namedParameters ) ; if ( queryMode != IndexedQueryMode . BROADCAST ) { if ( luceneParsingResult . getSort ( ) != null ) { cacheQuery = cacheQuery . sort ( luceneParsingResult . getSort ( ) ) ; } if ( luceneParsingResult . getProjections ( ) != null ) { cacheQuery = cacheQuery . projection ( luceneParsingResult . getProjections ( ) ) ; } } if ( startOffset >= 0 ) { cacheQuery = cacheQuery . firstResult ( ( int ) startOffset ) ; } if ( maxResults > 0 ) { cacheQuery = cacheQuery . maxResults ( maxResults ) ; } return ( CacheQuery < E > ) cacheQuery ; }
Build a Lucene index query .
22,174
protected org . apache . lucene . search . Query makeTypeQuery ( org . apache . lucene . search . Query query , String targetEntityName ) { return query ; }
Enhances the give query with an extra condition to discriminate on entity type . This is a no - op in embedded mode but other query engines could use it to discriminate if more types are stored in the same index . To be overridden by subclasses as needed .
22,175
protected final CompletionStage < Void > loadIfNeeded ( final InvocationContext ctx , Object key , final FlagAffectedCommand cmd ) { if ( skipLoad ( cmd , key , ctx ) ) { return null ; } return loadInContext ( ctx , key , cmd ) ; }
Loads from the cache loader the entry for the given key . A found value is loaded into the current context . The method returns whether the value was found or not or even if the cache loader was checked .
22,176
@ SuppressWarnings ( "unchecked" ) public static < T , R > SerializableSupplier < Collector < T , ? , R > > toSerialSupplierCollect ( SerializableSupplier supplier ) { return supplier ; }
since Java doesn t work as well with nested generics
22,177
@ SuppressWarnings ( "unchecked" ) public static < T , R > Supplier < Collector < T , ? , R > > toSupplierCollect ( Supplier supplier ) { return supplier ; }
This is a hack to allow for cast to work properly since Java doesn t work as well with nested generics
22,178
private ModelNode removeAliasFromList ( ModelNode list , String alias ) throws OperationFailedException { if ( alias == null || alias . equals ( "" ) ) return list ; if ( ! list . isDefined ( ) ) { throw InfinispanMessages . MESSAGES . cannotRemoveAliasFromEmptyList ( alias ) ; } ModelNode newList = new ModelNode ( ) ; List < ModelNode > listElements = list . asList ( ) ; for ( ModelNode listElement : listElements ) { if ( ! listElement . asString ( ) . equals ( alias ) ) { newList . add ( ) . set ( listElement ) ; } } return newList ; }
Remove an alias from a LIST ModelNode of existing aliases .
22,179
public boolean indexShareable ( ) { TypedProperties properties = properties ( ) ; boolean hasRamDirectoryProvider = false ; for ( Object objKey : properties . keySet ( ) ) { String key = ( String ) objKey ; if ( key . endsWith ( DIRECTORY_PROVIDER_KEY ) ) { String directoryImplementationName = String . valueOf ( properties . get ( key ) ) . trim ( ) ; if ( LOCAL_HEAP_DIRECTORY_PROVIDER . equalsIgnoreCase ( directoryImplementationName ) || RAM_DIRECTORY_PROVIDER . equalsIgnoreCase ( directoryImplementationName ) || LOCAL_HEAP_DIRECTORY_PROVIDER_FQN . equals ( directoryImplementationName ) ) { hasRamDirectoryProvider = true ; } else { return true ; } } } return ! hasRamDirectoryProvider ; }
Check if the indexes can be shared . Currently only ram based indexes don t allow any sort of sharing .
22,180
public static < T > T withinTx ( AdvancedCache cache , Callable < T > c ) throws Exception { return withinTx ( cache . getTransactionManager ( ) , c ) ; }
Call an operation within a transaction . This method guarantees that the right pattern is used to make sure that the transaction is always either committed or rollback .
22,181
public static < T > T withinTx ( TransactionManager tm , Callable < T > c ) throws Exception { if ( tm == null ) { try { return c . call ( ) ; } catch ( Exception e ) { throw e ; } } else { tm . begin ( ) ; try { return c . call ( ) ; } catch ( Exception e ) { tm . setRollbackOnly ( ) ; throw e ; } finally { if ( tm . getStatus ( ) == Status . STATUS_ACTIVE ) { tm . commit ( ) ; } else { tm . rollback ( ) ; } } } }
Call an operation within a transaction . This method guarantees that the right pattern is used to make sure that the transaction is always either committed or rollbacked .
22,182
public static AdvancedCache asyncWriteCache ( AdvancedCache cache , Flag extraFlag ) { return cache . withFlags ( Flag . SKIP_CACHE_LOAD , Flag . SKIP_REMOTE_LOOKUP , Flag . FORCE_ASYNCHRONOUS , extraFlag ) ; }
Transform a given cache into a cache that writes cache entries without waiting for them to complete adding an extra flag .
22,183
public static AdvancedCache failSilentWriteCache ( AdvancedCache cache ) { return cache . withFlags ( Flag . FAIL_SILENTLY , Flag . ZERO_LOCK_ACQUISITION_TIMEOUT , Flag . SKIP_CACHE_LOAD , Flag . SKIP_REMOTE_LOOKUP ) ; }
Transform a given cache into a cache that fails silently if cache writes fail .
22,184
public static AdvancedCache failSilentReadCache ( AdvancedCache cache ) { return cache . withFlags ( Flag . FAIL_SILENTLY , Flag . ZERO_LOCK_ACQUISITION_TIMEOUT ) ; }
Transform a given cache into a cache that fails silently if cache reads fail .
22,185
public final void setTransactionOutcome ( boolean commit , GlobalTransaction globalTransaction , boolean local ) { TransactionStatistics txs = getTransactionStatistic ( globalTransaction , local ) ; if ( txs == null ) { log . outcomeOnUnexistingTransaction ( globalTransaction == null ? "null" : globalTransaction . globalId ( ) , commit ? "COMMIT" : "ROLLBACK" ) ; return ; } txs . setOutcome ( commit ) ; }
Sets the transaction outcome to commit or rollback depending if the transaction has commit successfully or not respectively .
22,186
public final void beginTransaction ( GlobalTransaction globalTransaction , boolean local ) { if ( local ) { TransactionStatistics lts = createTransactionStatisticIfAbsent ( globalTransaction , true ) ; if ( trace ) { log . tracef ( "Local transaction statistic is already initialized: %s" , lts ) ; } } else { TransactionStatistics rts = createTransactionStatisticIfAbsent ( globalTransaction , false ) ; if ( trace ) { log . tracef ( "Using the remote transaction statistic %s for transaction %s" , rts , globalTransaction ) ; } } }
Signals the start of a transaction .
22,187
public final void terminateTransaction ( GlobalTransaction globalTransaction , boolean local , boolean remote ) { TransactionStatistics txs = null ; if ( local ) { txs = removeTransactionStatistic ( globalTransaction , true ) ; } if ( txs != null ) { txs . terminateTransaction ( ) ; cacheStatisticCollector . merge ( txs ) ; txs = null ; } if ( remote ) { txs = removeTransactionStatistic ( globalTransaction , false ) ; } if ( txs != null ) { txs . terminateTransaction ( ) ; cacheStatisticCollector . merge ( txs ) ; } }
Signals the ending of a transaction . After this no more statistics are updated for this transaction and the values measured are merged with the cache statistics .
22,188
private void populateSegments ( int [ ] shardsNumPerServer , List < Set < Integer > > segmentsPerServer , List < Address > nodes ) { int shardId = 0 ; int n = 0 ; Set < Integer > remainingSegments = new HashSet < > ( ) ; for ( Address node : nodes ) { Collection < Integer > primarySegments = segmentsPerServer . get ( n ) ; int shardQuantity = shardsNumPerServer [ n ] ; if ( shardQuantity == 0 ) { remainingSegments . addAll ( segmentsPerServer . get ( n ++ ) ) ; continue ; } shardsPerAddressMap . computeIfAbsent ( node , a -> new HashSet < > ( shardQuantity ) ) ; List < Set < Integer > > segments = this . split ( primarySegments , shardsNumPerServer [ n ++ ] ) ; for ( Collection < Integer > shardSegments : segments ) { String id = String . valueOf ( shardId ++ ) ; shardSegments . forEach ( seg -> shardPerSegmentMap . put ( seg , id ) ) ; shardsPerAddressMap . get ( node ) . add ( id ) ; addressPerShardMap . put ( id , node ) ; } } if ( ! remainingSegments . isEmpty ( ) ) { Iterator < String > shardIterator = Stream . iterate ( 0 , i -> ( i + 1 ) % numShards ) . map ( String :: valueOf ) . iterator ( ) ; for ( Integer segment : remainingSegments ) { shardPerSegmentMap . put ( segment , shardIterator . next ( ) ) ; } } }
Associates segments to each shard .
22,189
private static int [ ] allocateShardsToNodes ( int numShards , int numNodes , List < Set < Integer > > weightPerServer ) { int [ ] shardsPerServer = new int [ numNodes ] ; Iterator < Integer > cyclicNodeIterator = Stream . iterate ( 0 , i -> ( i + 1 ) % numNodes ) . iterator ( ) ; while ( numShards > 0 ) { int slot = cyclicNodeIterator . next ( ) ; if ( ! weightPerServer . get ( slot ) . isEmpty ( ) ) { shardsPerServer [ slot ] ++ ; numShards -- ; } } return shardsPerServer ; }
Allocates shards in a round robin fashion for the servers ignoring those without segments .
22,190
private void sendCommandToAll ( ReplicableCommand command , long requestId , DeliverOrder deliverOrder , boolean rsvp ) { Message message = new Message ( ) ; marshallRequest ( message , command , requestId ) ; setMessageFlags ( message , deliverOrder , rsvp , true ) ; if ( deliverOrder == DeliverOrder . TOTAL ) { message . dest ( new AnycastAddress ( ) ) ; } send ( message ) ; }
Send a command to the entire cluster .
22,191
private void sendCommand ( Collection < Address > targets , ReplicableCommand command , long requestId , DeliverOrder deliverOrder , boolean rsvp , boolean checkView ) { Objects . requireNonNull ( targets ) ; Message message = new Message ( ) ; marshallRequest ( message , command , requestId ) ; setMessageFlags ( message , deliverOrder , rsvp , true ) ; if ( deliverOrder == DeliverOrder . TOTAL ) { message . dest ( new AnycastAddress ( toJGroupsAddressList ( targets ) ) ) ; send ( message ) ; } else { Message copy = message ; for ( Iterator < Address > it = targets . iterator ( ) ; it . hasNext ( ) ; ) { Address address = it . next ( ) ; if ( checkView && ! clusterView . contains ( address ) ) continue ; if ( address . equals ( getAddress ( ) ) ) continue ; copy . dest ( toJGroupsAddress ( address ) ) ; send ( copy ) ; if ( it . hasNext ( ) ) { copy = copy . copy ( true ) ; } } } }
Send a command to multiple targets .
22,192
private ClassLoader makeGlobalClassLoader ( ModuleLoader moduleLoader , ModuleIdentifier cacheContainerModule , List < ModuleIdentifier > additionalModules ) throws ModuleLoadException { Set < ClassLoader > classLoaders = new LinkedHashSet < > ( ) ; if ( cacheContainerModule != null ) { classLoaders . add ( moduleLoader . loadModule ( cacheContainerModule ) . getClassLoader ( ) ) ; } if ( additionalModules != null ) { for ( ModuleIdentifier additionalModule : additionalModules ) { classLoaders . add ( moduleLoader . loadModule ( additionalModule ) . getClassLoader ( ) ) ; } } ClassLoader infinispanSubsystemClassloader = CacheContainerConfiguration . class . getClassLoader ( ) ; if ( classLoaders . isEmpty ( ) ) { return infinispanSubsystemClassloader ; } if ( cacheContainerModule == null ) { classLoaders . add ( infinispanSubsystemClassloader ) ; } if ( classLoaders . size ( ) == 1 ) { return classLoaders . iterator ( ) . next ( ) ; } return new AggregatedClassLoader ( classLoaders ) ; }
Creates an aggregated ClassLoader using the loaders of the cache container module and the optional modules listed under the modules element .
22,193
private void splitExpectedOwnedSegments ( Collection < ? extends Location > locations , float totalOwnedSegments , float totalCapacity ) { float remainingCapacity = totalCapacity ; float remainingOwned = totalOwnedSegments ; List < Location > remainingLocations = new ArrayList < > ( locations ) ; for ( ListIterator < Location > it = remainingLocations . listIterator ( locations . size ( ) ) ; it . hasPrevious ( ) ; ) { Location location = it . previous ( ) ; if ( remainingOwned < numSegments * remainingLocations . size ( ) ) break ; int minOwned = numSegments ; float locationOwned = remainingOwned * location . totalCapacity / remainingCapacity ; if ( locationOwned > minOwned ) break ; splitExpectedOwnedSegments2 ( location . getChildren ( ) , minOwned , location . totalCapacity ) ; remainingCapacity -= location . totalCapacity ; remainingOwned -= location . expectedOwnedSegments ; it . remove ( ) ; } for ( Iterator < ? extends Location > it = remainingLocations . iterator ( ) ; it . hasNext ( ) ; ) { Location location = it . next ( ) ; float maxOwned = computeMaxOwned ( remainingOwned , remainingLocations . size ( ) ) ; float locationOwned = remainingOwned * location . totalCapacity / remainingCapacity ; if ( locationOwned < maxOwned ) break ; splitExpectedOwnedSegments2 ( location . getChildren ( ) , maxOwned , location . totalCapacity ) ; remainingCapacity -= location . totalCapacity ; remainingOwned -= maxOwned ; it . remove ( ) ; } if ( remainingLocations . isEmpty ( ) ) return ; if ( remainingLocations . size ( ) * numSegments < remainingOwned ) { List < Location > childrenLocations = new ArrayList < > ( remainingLocations . size ( ) * 2 ) ; for ( Location location : remainingLocations ) { childrenLocations . addAll ( location . getChildren ( ) ) ; } Collections . sort ( childrenLocations ) ; splitExpectedOwnedSegments2 ( childrenLocations , remainingOwned , remainingCapacity ) ; } else { float fraction = remainingOwned / remainingCapacity ; for ( Location location : remainingLocations ) { float locationOwned = location . totalCapacity * fraction ; splitExpectedOwnedSegments2 ( location . getChildren ( ) , locationOwned , location . totalCapacity ) ; } } }
Split totalOwnedSegments segments into the given locations recursively .
22,194
public void registerCounter ( ByteString counterName , CounterEventGenerator generator , TopologyChangeListener topologyChangeListener ) { if ( counters . putIfAbsent ( counterName , new Holder ( generator , topologyChangeListener ) ) != null ) { throw new IllegalStateException ( ) ; } }
It registers a new counter created locally .
22,195
public synchronized void listenOn ( Cache < CounterKey , CounterValue > cache ) throws InterruptedException { if ( ! topologyListener . registered ) { this . cache = cache ; topologyListener . register ( cache ) ; } if ( ! listenersRegistered ) { this . cache . addListener ( valueListener , CounterKeyFilter . getInstance ( ) ) ; listenersRegistered = true ; } }
It registers the cache listeners if they aren t already registered .
22,196
protected < K , V > void loadAllEntries ( final Set < MarshallableEntry < K , V > > entriesCollector , final int maxEntries , MarshallableEntryFactory < K , V > entryFactory ) { int existingElements = entriesCollector . size ( ) ; int toLoadElements = maxEntries - existingElements ; if ( toLoadElements <= 0 ) { return ; } HashSet < IndexScopedKey > keysCollector = new HashSet < > ( ) ; loadSomeKeys ( keysCollector , Collections . EMPTY_SET , toLoadElements ) ; for ( IndexScopedKey key : keysCollector ) { Object value = load ( key ) ; if ( value != null ) { MarshallableEntry < K , V > cacheEntry = entryFactory . create ( key , value ) ; entriesCollector . add ( cacheEntry ) ; } } }
Loads all entries from the CacheLoader ; considering this is actually a Lucene index that s going to transform segments in entries in a specific order simplest entries first .
22,197
protected Object load ( final IndexScopedKey key ) { try { return key . accept ( loadVisitor ) ; } catch ( Exception e ) { throw log . exceptionInCacheLoader ( e ) ; } }
Load the value for a specific key
22,198
private byte [ ] loadIntern ( final ChunkCacheKey key ) throws IOException { final String fileName = key . getFileName ( ) ; final long chunkId = key . getChunkId ( ) ; int bufferSize = key . getBufferSize ( ) ; final long seekTo = chunkId * bufferSize ; final byte [ ] buffer ; final IndexInput input = directory . openInput ( fileName , IOContext . READ ) ; final long length = input . length ( ) ; try { if ( seekTo != 0 ) { input . seek ( seekTo ) ; } bufferSize = ( int ) Math . min ( length - seekTo , ( long ) bufferSize ) ; buffer = new byte [ bufferSize ] ; input . readBytes ( buffer , 0 , bufferSize ) ; } finally { input . close ( ) ; } return buffer ; }
Loads the actual byte array from a segment in the range of a specific chunkSize . Not that while the chunkSize is specified in this case it s likely derived from the invocations of other loading methods .
22,199
public List < CommandInterceptor > asList ( ) { ArrayList < CommandInterceptor > list = new ArrayList < > ( asyncInterceptorChain . getInterceptors ( ) . size ( ) ) ; asyncInterceptorChain . getInterceptors ( ) . forEach ( ci -> { if ( ci instanceof CommandInterceptor ) { list . add ( ( CommandInterceptor ) ci ) ; } } ) ; return list ; }
Returns an unmofiable list with all the interceptors in sequence . If first in chain is null an empty list is returned .